From c71691659b5baee5f1524809fd827991b0474e59 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Tue, 9 Nov 2021 16:00:16 +0100 Subject: [PATCH 001/606] tl: implement suffix operator normal form * spot/tl/Makefile.am: New sonf files * spot/tl/sonf.cc, spot/tl/sonf.hh: Here. * python/spot/impl.i: include sonf.hh header * doc/spot.bib: add entry for the SONF paper * tests/Makefile.am: new python tests * tests/python/formulas.ipynb: show sample usage * tests/python/sonf.py: test automata equivalence before/after SONF * NEWS: mention the change --- NEWS | 4 + doc/spot.bib | 12 +++ python/spot/impl.i | 3 + spot/tl/Makefile.am | 2 + spot/tl/sonf.cc | 185 ++++++++++++++++++++++++++++++++++++ spot/tl/sonf.hh | 44 +++++++++ tests/Makefile.am | 1 + tests/python/formulas.ipynb | 56 +++++++++++ tests/python/sonf.py | 41 ++++++++ 9 files changed, 348 insertions(+) create mode 100644 spot/tl/sonf.cc create mode 100644 spot/tl/sonf.hh create mode 100644 tests/python/sonf.py diff --git a/NEWS b/NEWS index 928a25da2..cc0fe35a6 100644 --- a/NEWS +++ b/NEWS @@ -11,6 +11,10 @@ New in spot 2.10.4.dev (net yet released) Library: + - The new function suffix_operator_normal_form() implements + transformation of formulas to Suffix Operator Normal Form, + described in [cimatti.06.fmcad]. + - "original-classes" is a new named property similar to "original-states". It maps an each state to an unsigned integer such that if two classes are in the same class, they are expected diff --git a/doc/spot.bib b/doc/spot.bib index 9f18ad2a9..9d5d6b235 100644 --- a/doc/spot.bib +++ b/doc/spot.bib @@ -214,6 +214,18 @@ doi = {10.1109/DepCoS-RELCOMEX.2009.31} } +@InProceedings{ cimatti.06.fmcad, + author = {Cimatti, Alessandro and Roveri, Marco and Semprini, Simone and + Tonetta, Stefano}, + title = {From {PSL} to {NBA}: a Modular Symbolic Encoding}, + booktitle = {Proceedings of the 6th conference on Formal Methods in Computer + Aided Design (FMCAD'06)}, + pages = {125--133}, + year = {2006}, + publisher = {IEEE Computer Society}, + doi = {10.1109/FMCAD.2006.19} +} + @Article{ cimatti.08.tcad, author = {Alessandro Cimatti and Marco Roveri and Stefano Tonetta}, journal = {IEEE Transactions on Computer Aided Design of Integrated diff --git a/python/spot/impl.i b/python/spot/impl.i index 90a38a55a..7132a5cc6 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -94,6 +94,7 @@ #include #include #include +#include #include #include #include @@ -517,6 +518,7 @@ namespace std { %template(vectorbdd) vector; %template(aliasvector) vector>; %template(vectorstring) vector; + %template(pair_formula_vectorstring) pair>; %template(atomic_prop_set) set; %template(relabeling_map) map; } @@ -577,6 +579,7 @@ namespace std { %include %include %include +%include %include %include %include diff --git a/spot/tl/Makefile.am b/spot/tl/Makefile.am index b7362ae99..cdedddffd 100644 --- a/spot/tl/Makefile.am +++ b/spot/tl/Makefile.am @@ -44,6 +44,7 @@ tl_HEADERS = \ remove_x.hh \ simplify.hh \ snf.hh \ + sonf.hh \ unabbrev.hh noinst_LTLIBRARIES = libtl.la @@ -68,4 +69,5 @@ libtl_la_SOURCES = \ remove_x.cc \ simplify.cc \ snf.cc \ + sonf.cc \ unabbrev.cc diff --git a/spot/tl/sonf.cc b/spot/tl/sonf.cc new file mode 100644 index 000000000..29a319039 --- /dev/null +++ b/spot/tl/sonf.cc @@ -0,0 +1,185 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2021 Laboratoire de Recherche et Developpement de +// l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" +#include +#include + +#include +#include +#include +#include + +namespace spot +{ + namespace + { + /// Uses `extractor` to extract some parts of the formula and replace them + /// with atomic propositions. + /// + /// Returns (f & g1 & g2 & .. & gn) with g1..gn the extracted subformulas. + /// + /// `extractor` should be a lambda taking the following parameters as input: + /// + /// - `formula` the formula to process + /// - `std::vector&` the vector that stores extracted subformulas + /// - `auto&&` itself, in case it needs to call itself recursively + /// (formula::map for example) + /// - `bool` a boolean indicating whether the lambda is currently being + /// called at the formula's "root" + /// - `bool` a boolean indicating whether the lambda is currently being + /// called inside a toplevel `and` construct. + /// + /// Note that the last 2 boolean arguments can be used as you see fit in + /// your recursive calls, the first one being set to true in the original + /// call, and the second one to false. + /// + /// `extractor` should return the new rewritten formula. + /// + /// auto sample_extractor = [](formula f, + /// std::vector& extracted, + /// auto&& extractor, + /// bool top_level, + /// bool in_top_level_and) -> formula + template + static formula + extract(formula f, Ext extractor) + { + std::vector extracted; + formula new_f = extractor(f, extracted, extractor, true, false); + extracted.push_back(new_f); + return formula::And(extracted); + } + } + + std::pair> + suffix_operator_normal_form(formula f, const std::string prefix) + { + // SONF can only be applied to formulas in negative normal form + f = negative_normal_form(f); + + std::unordered_set used_aps; + std::vector added_aps; + size_t count = 0; + + // identify all used ap names to avoid them when generating new ones + auto ap_indexer = [&used_aps](formula f) noexcept { + if (f.is(op::ap)) + { + used_aps.insert(f.ap_name()); + return true; + } + + return false; + }; + + f.traverse(ap_indexer); + + auto new_ap_name = + [&used_aps, &added_aps, &prefix, &count]() noexcept -> std::string + { + std::string new_name = prefix + std::to_string(count++); + while (used_aps.find(new_name) != used_aps.end()) + new_name = prefix + std::to_string(count++); + used_aps.insert(new_name); + added_aps.push_back(new_name); + return new_name; + }; + + // extracts the SERE part and replaces it with an atomic proposition, + // storing the extracted formula in `extracted` and returning the rewritten + // original formula + auto sonf_extract = [&](formula f, + std::vector& extracted, + auto&& extractor, + bool top_level, + bool in_top_level_and) noexcept -> formula + { + const auto kind = f.kind(); + + switch (kind) + { + case op::G: + { + // skip if shape is G(!ap | (regex []-> formula)) and at top level + if ((top_level || in_top_level_and) + && f[0].is(op::Or) // G(_ | _) + && f[0][0].is(op::Not) // G(!_ | _) + && f[0][0][0].is(op::ap) // G(!ap | _) + && f[0][1].is(op::EConcat, op::UConcat)) // G(!ap | (_ []-> _)) + return f; + else + return f.map(extractor, extracted, extractor, false, false); + } + case op::EConcat: + case op::UConcat: + { + // recurse into rhs first (_ []-> rhs) + formula rhs = + f[1].map(extractor, extracted, extractor, false, false); + f = formula::binop(kind, f[0], rhs); + + formula ap = formula::ap(new_ap_name()); + extracted.push_back(formula::G(formula::Or({formula::Not(ap), f}))); + return ap; + } + default: + // tracking if we're in a op::And at the formula root + in_top_level_and = top_level && f.is(op::And); + return f.map(extractor, extracted, extractor, + false, in_top_level_and); + } + }; + + f = extract(f, sonf_extract); + + auto ltl_extract = [&](formula f, + std::vector& extracted, + auto&& extractor, + [[maybe_unused]] + bool top_level, + [[maybe_unused]] + bool in_top_level_and) noexcept -> formula + { + switch (f.kind()) + { + case op::EConcat: + case op::UConcat: + { + formula rhs = f[1]; + + if (rhs.is(op::ap)) + return f; + + formula ap = formula::ap(new_ap_name()); + extracted.push_back( + formula::G(formula::Or({formula::Not(ap), rhs}))); + + return formula::binop(f.kind(), f[0], ap); + } + default: + return f.map(extractor, extracted, extractor, false, false); + } + }; + + f = extract(f, ltl_extract); + + return {f, added_aps}; + } +} diff --git a/spot/tl/sonf.hh b/spot/tl/sonf.hh new file mode 100644 index 000000000..37ef5d05d --- /dev/null +++ b/spot/tl/sonf.hh @@ -0,0 +1,44 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2021 Laboratoire de Recherche et Développement de +// l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include +#include +#include + +#include + +namespace spot +{ + /// \ingroup tl_rewriting + /// \brief Helper to rewrite a PSL formula in Suffix Operator Normal Form. + /// + /// SONF is described in section 4 of \cite cimatti.06.fmcad + /// + /// The formula output by this function is guaranteed to be in Negative Normal + /// Form. + /// + /// \param f the PSL formula to rewrite + /// \param prefix the prefix to use to name newly introduced aps + /// \return a pair with the rewritten formula, and a vector containing the + /// names of newly introduced aps + SPOT_API std::pair> + suffix_operator_normal_form(formula f, const std::string prefix); +} diff --git a/tests/Makefile.am b/tests/Makefile.am index 1b5d63fee..a6f4ab56c 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -445,6 +445,7 @@ TESTS_python = \ python/setxor.py \ python/simplacc.py \ python/simstate.py \ + python/sonf.py \ python/split.py \ python/streett_totgba.py \ python/streett_totgba2.py \ diff --git a/tests/python/formulas.ipynb b/tests/python/formulas.ipynb index 95241be9d..7075cf653 100644 --- a/tests/python/formulas.ipynb +++ b/tests/python/formulas.ipynb @@ -976,6 +976,62 @@ "print(ap) # print as a string\n", "display(ap) # LaTeX-style, for notebooks" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Converting to Suffix Operator Normal Form:" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "data": { + "text/latex": [ + "$\\mathsf{G} (\\{x^{\\star}\\}\\mathrel{\\Box\\kern-1.7pt\\raise.4pt\\hbox{$\\mathord{\\rightarrow}$}} \\mathsf{F} a)$" + ], + "text/plain": [ + "spot.formula(\"G({x[*]}[]-> Fa)\")" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/latex": [ + "$\\mathsf{G} \\mathit{sonf\\_}_{0} \\land \\mathsf{G} (\\lnot \\mathit{sonf\\_}_{1} \\lor \\mathsf{F} a) \\land \\mathsf{G} (\\lnot \\mathit{sonf\\_}_{0} \\lor (\\{x^{\\star}\\}\\mathrel{\\Box\\kern-1.7pt\\raise.4pt\\hbox{$\\mathord{\\rightarrow}$}} \\mathit{sonf\\_}_{1}))$" + ], + "text/plain": [ + "spot.formula(\"Gsonf_0 & G(!sonf_1 | Fa) & G(!sonf_0 | ({x[*]}[]-> sonf_1))\")" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "('sonf_0', 'sonf_1')" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "f = spot.formula('G({x*} []-> Fa)')\n", + "display(f)\n", + "\n", + "# In addition to the formula, returns a list of newly introduced APs\n", + "f, aps = spot.suffix_operator_normal_form(f, 'sonf_')\n", + "display(f)\n", + "display(aps)" + ] } ], "metadata": { diff --git a/tests/python/sonf.py b/tests/python/sonf.py new file mode 100644 index 000000000..558f90c63 --- /dev/null +++ b/tests/python/sonf.py @@ -0,0 +1,41 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (C) 2020 Laboratoire de Recherche et Développement de l'Epita +# (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import spot + +formulas = """\ +{x[*]}[]-> F({y[*]}<>-> GFz) +<>(({{p12}[*0..3]}[]-> ((p9) || (!(p17)))) V ((true) U (p17))) +{{true} || {[*0]}}[]-> (false) +{{p14} & {{p0}[*]}}[]-> (p11) +{{{!{p6}} -> {!{p3}}}[*]}[]-> ((p3)V((p3) || ((X((false))) && ((p2)V(p18))))) +""" + +for f1 in formulas.splitlines(): + f1 = spot.formula(f1) + a1 = spot.translate(f1) + + f2, aps = spot.suffix_operator_normal_form(f1, 'sonf_') + a2 = spot.translate(f2) + rm = spot.remove_ap() + for ap in aps: + rm.add_ap(ap) + a2 = rm.strip(a2) + + assert(spot.are_equivalent(a1, a2)) From 93fb11017b78ff9dc8fae5e9f4eb7b981bf4bcf5 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 8 Dec 2021 11:31:54 +0100 Subject: [PATCH 002/606] ltlfilt: add --sonf and --sonf-aps flags * bin/ltlfilt.cc: Here. * NEWS: Mention new ltlfilt flags. * tests/Makefile.am, tests/core/sonf.test: Test these flags. --- NEWS | 5 +++ bin/ltlfilt.cc | 35 ++++++++++++++++++ tests/Makefile.am | 3 +- tests/core/sonf.test | 85 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 127 insertions(+), 1 deletion(-) create mode 100644 tests/core/sonf.test diff --git a/NEWS b/NEWS index cc0fe35a6..5e7816b7b 100644 --- a/NEWS +++ b/NEWS @@ -9,6 +9,11 @@ New in spot 2.10.4.dev (net yet released) - autfilt has a new --to-finite option, illustrated on https://spot.lrde.epita.fr/tut12.html + - ltlfilt has a new --sonf option to produce a formula's Suffix + Operator Normal Form, described in [cimatti.06.fmcad]. The + associated option --sonf-aps allows listing the newly introduced + atomic propositions. + Library: - The new function suffix_operator_normal_form() implements diff --git a/bin/ltlfilt.cc b/bin/ltlfilt.cc index cc9e0f02b..af9316192 100644 --- a/bin/ltlfilt.cc +++ b/bin/ltlfilt.cc @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -100,6 +101,8 @@ enum { OPT_SIZE_MAX, OPT_SIZE_MIN, OPT_SKIP_ERRORS, + OPT_SONF, + OPT_SONF_APS, OPT_STUTTER_INSENSITIVE, OPT_SUSPENDABLE, OPT_SYNTACTIC_GUARANTEE, @@ -127,6 +130,11 @@ static const argp_option options[] = { "negate", OPT_NEGATE, nullptr, 0, "negate each formula", 0 }, { "nnf", OPT_NNF, nullptr, 0, "rewrite formulas in negative normal form", 0 }, + { "sonf", OPT_SONF, "PREFIX", OPTION_ARG_OPTIONAL, + "rewrite formulas in suffix operator normal form", 0 }, + { "sonf-aps", OPT_SONF_APS, "FILENAME", OPTION_ARG_OPTIONAL, + "when used with --sonf, output the newly introduced atomic " + "propositions", 0 }, { "relabel", OPT_RELABEL, "abc|pnn", OPTION_ARG_OPTIONAL, "relabel all atomic propositions, alphabetically unless " \ "specified otherwise", 0 }, @@ -316,6 +324,7 @@ static range opt_nth = { 0, std::numeric_limits::max() }; static int opt_max_count = -1; static long int match_count = 0; static const char* from_ltlf = nullptr; +static const char* sonf = nullptr; // We want all these variables to be destroyed when we exit main, to @@ -327,6 +336,7 @@ static struct opt_t spot::bdd_dict_ptr dict = spot::make_bdd_dict(); spot::exclusive_ap excl_ap; std::unique_ptr output_define = nullptr; + std::unique_ptr output_sonf = nullptr; spot::formula implied_by = nullptr; spot::formula imply = nullptr; spot::formula equivalent_to = nullptr; @@ -460,6 +470,12 @@ parse_opt(int key, char* arg, struct argp_state*) case OPT_NNF: nnf = true; break; + case OPT_SONF: + sonf = arg ? arg : "sonf_"; + break; + case OPT_SONF_APS: + opt->output_sonf.reset(new output_file(arg ? arg : "-")); + break; case OPT_OBLIGATION: obligation = true; break; @@ -650,6 +666,25 @@ namespace if (nnf) f = simpl.negative_normal_form(f); + if (sonf != nullptr) + { + std::vector new_aps; + std::tie(f, new_aps) = suffix_operator_normal_form(f, sonf); + + if (opt->output_sonf + && output_format != count_output + && output_format != quiet_output) + { + for (size_t i = 0; i < new_aps.size(); ++i) + { + if (i > 0) + opt->output_sonf->ostream() << ' '; + opt->output_sonf->ostream() << new_aps[i]; + } + opt->output_sonf->ostream() << '\n'; + } + } + switch (relabeling) { case ApRelabeling: diff --git a/tests/Makefile.am b/tests/Makefile.am index a6f4ab56c..afcd0c8d2 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -198,7 +198,8 @@ TESTS_tl = \ core/stutter-ltl.test \ core/hierarchy.test \ core/mempool.test \ - core/format.test + core/format.test \ + core/sonf.test TESTS_graph = \ core/graph.test \ diff --git a/tests/core/sonf.test b/tests/core/sonf.test new file mode 100644 index 000000000..0febfc342 --- /dev/null +++ b/tests/core/sonf.test @@ -0,0 +1,85 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) 2021 Laboratoire de Recherche et Développement de +# l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs +set -e + +cat >input < Fa) & G(b -> ({x[*]}[]-> c)) +{x[*]}[]-> F({y[*]}<>-> GFz) +<>(({{p12}[*0..3]}[]-> ((p9) || (!(p17)))) V ((true) U (p17))) +{{true} || {[*0]}}[]-> (false) +{{p14} & {{p0}[*]}}[]-> (p11) +{{{!{p6}} -> {!{p3}}}[*]}[]-> ((p3)V((p3) || ((X((false))) && ((p2)V(p18))))) +X({{true} || {[*0]}}[]-> ((p17) U ((p8) && (p17)))) +({{{p4} || {p5} || {{p16} <-> {{p15} -> {p11}}}}[*]}[]-> (false)) -> (p8) +{[*1..6]}[]-> ((p9) V ((p9) || (!((p4) && (p19))))) +X({{{[*0]} || {{{p10};{p14}}[:*2..3]}}[:*]}<>-> (p8)) +{{true} && {{p8}[*]}}<>-> (!(p10)) +<>(!(({{p7}[*1..2]}<>-> (p11)) V ((!(p9)) && ([]((p11) || (X(p10))))))) +<>({{!{{p5} || {{!{p2}} <-> {p7}}}} & {[*]}}<>-> (p17)) +{{p0} || {{{[*0..2]}[:*2]}[*]}}<>-> ((p1) && (p6)) +EOF + +cat >expected < c)) +s1&G(!s2|GFz)&G(!s0|({y[*]}<>-> s2))&G(!s3|Fs0)&G(!s1|({x[*]}[]-> s3)) +F(s0 R (1 U p17))&G(p9|!p17|!s1)&G(!s0|({p12[*0..3]}[]-> s1)) +s0&G!s1&G(!s0|({1|[*0]}[]-> s1)) +s0&G(!s0|({p14&p0[*]}[]-> p11)) +s0&G(!s1|(p3 R (p3|(X(0)&(p2 R p18)))))&G(!s0|({{!p3|p6}[*]}[]-> s1)) +Xs0&G(!s1|(p17 U (p8&p17)))&G(!s0|({1|[*0]}[]-> s1)) +(p8|s0)&G(!s0|({{p4|p5|{p16 && {p11|!p15}}|{!p11 && p15 && !p16}}[*]}<>-> s1)) +s0&G(!s1|(p9 R (!p4|p9|!p19)))&G(!s0|({[*1..6]}[]-> s1)) +G(!s0|({{[*0]|{p10;p14}[:*2..3]}[:*]}<>-> p8))&Xs0 +s0&G(!p10|!s1)&G(!s0|({1 && p8[*]}<>-> s1)) +F(s0 U (p9|F(!p11&X!p10)))&G(!p11|!s1)&G(!s0|({p7[*1..2]}[]-> s1)) +G(!s0|({{!p5 && {{!p2 && !p7}|{p2 && p7}}}&[*]}<>-> p17))&Fs0 +s0&G(!s1|(p1&p6))&G(!s0|({p0|[*0..2][:*2][*]}<>-> s1)) +EOF + +cat >expected-aps < stdout +diff expected stdout +diff expected-aps stdout-aps + +# check idempotence +ltlfilt -F expected --sonf=s --sonf-aps=stdout-aps \ + | sed 's/ \([|&]\) /\1/g' > stdout +diff expected stdout +# should be 14 empty lines, no new aps introduced this time +test "$(wc -l -m stdout-aps | awk '{print $1 " " $2}')" = "14 14" From 7b7e1b254b8fb699a2844dfa337d77c17c346294 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 3 Mar 2022 18:01:11 +0100 Subject: [PATCH 003/606] tests: avoid seq Partial fix for #501. * tests/core/prodchain.test: Hardcode the seq output. * tests/core/bricks.test: Use $AWK instead of seq. * tests/core/defs.in: Define $AWK. * NEWS: Mention the bug. --- NEWS | 3 +++ tests/core/bricks.test | 11 ++++++----- tests/core/defs.in | 3 ++- tests/core/prodchain.test | 5 +++-- 4 files changed, 14 insertions(+), 8 deletions(-) diff --git a/NEWS b/NEWS index 5e7816b7b..89a1d52a2 100644 --- a/NEWS +++ b/NEWS @@ -74,6 +74,9 @@ New in spot 2.10.4.dev (net yet released) - work around a portability issue in Flex 2.6.4 preventing compilation on OpenBSD. + - Do not use the seq command in test cases, it is not available + everywhere. + New in spot 2.10.4 (2022-02-01) Bug fixed: diff --git a/tests/core/bricks.test b/tests/core/bricks.test index b98c7e856..37ff57cb0 100644 --- a/tests/core/bricks.test +++ b/tests/core/bricks.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -21,12 +21,13 @@ . ./defs set -e -seq 0 1999 > expected +# The seq command is not always available, but we assume awk is. +$AWK 'BEGIN{for(x=0;x<2000;++x) print x;}' >expected ../bricks > stdout -cat stdout | head -n 2000 | awk '{print $2}' | sed 's/{//g' | \ - awk -F',' '{print $1}' | sort -n > map +cat stdout | head -n 2000 | $AWK '{print $2}' | sed 's/{//g' | \ + $AWK -F',' '{print $1}' | sort -n > map diff expected map diff --git a/tests/core/defs.in b/tests/core/defs.in index 7df6fdf77..d06a3b67d 100644 --- a/tests/core/defs.in +++ b/tests/core/defs.in @@ -1,5 +1,5 @@ # -*- mode: shell-script; coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2012, 2013, 2015 Laboratoire de Recherche +# Copyright (C) 2009, 2010, 2012, 2013, 2015, 2022 Laboratoire de Recherche # et Développement de l'Epita (LRDE). # Copyright (C) 2003, 2004, 2006 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -57,6 +57,7 @@ case $srcdir in *) srcdir=../$srcdir esac +AWK='@AWK@' DOT='@DOT@' LBTT="@LBTT@" LBTT_TRANSLATE="@LBTT_TRANSLATE@" diff --git a/tests/core/prodchain.test b/tests/core/prodchain.test index b5037782f..0a7f1a1d9 100755 --- a/tests/core/prodchain.test +++ b/tests/core/prodchain.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2018 Laboratoire de Recherche et Développement +# Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement # de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -23,7 +23,8 @@ set -e set x shift -for i in `seq 1 42`; do +for i in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 \ + 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42; do ltl2tgba "{a[*$i]}[]->GFb" > $i.hoa done for i in *.hoa; do From 530cf7ca47c942211ee42118f0cc567102e6a86f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 4 Mar 2022 16:59:04 +0100 Subject: [PATCH 004/606] tests: replace all "assert" by unittest assertions If the assert fails because of a comparison, it is useful that the test suite log contains a comparison of these values. unittest.assertEqual() and friends do that for us. * HACKING: Add a section about Python tests. * tests/sanity/style.test: Forbid the use of "assert" in Python tests. * tests/python/298.py, tests/python/341.py, tests/python/471.py, tests/python/accparse2.py, tests/python/aiger.py, tests/python/aliases.py, tests/python/alternating.py, tests/python/bdddict.py, tests/python/bdditer.py, tests/python/bugdet.py, tests/python/complement_semidet.py, tests/python/declenv.py, tests/python/decompose_scc.py, tests/python/det.py, tests/python/dualize.py, tests/python/ecfalse.py, tests/python/except.py, tests/python/game.py, tests/python/gen.py, tests/python/genem.py, tests/python/implies.py, tests/python/intrun.py, tests/python/kripke.py, tests/python/langmap.py, tests/python/ltl2tgba.py, tests/python/ltlf.py, tests/python/ltlparse.py, tests/python/ltlsimple.py, tests/python/mealy.py, tests/python/merge.py, tests/python/mergedge.py, tests/python/misc-ec.py, tests/python/optionmap.py, tests/python/origstate.py, tests/python/otfcrash.py, tests/python/parity.py, tests/python/parsetgba.py, tests/python/pdegen.py, tests/python/prodexpt.py, tests/python/randgen.py, tests/python/relabel.py, tests/python/remfin.py, tests/python/removeap.py, tests/python/rs_like.py, tests/python/satmin.py, tests/python/sbacc.py, tests/python/sccfilter.py, tests/python/sccinfo.py, tests/python/sccsplit.py, tests/python/semidet.py, tests/python/setacc.py, tests/python/setxor.py, tests/python/simplacc.py, tests/python/simstate.py, tests/python/sonf.py, tests/python/split.py, tests/python/streett_totgba.py, tests/python/streett_totgba2.py, tests/python/stutter.py, tests/python/sum.py, tests/python/synthesis.py, tests/python/toparity.py, tests/python/toweak.py, tests/python/tra2tba.py, tests/python/trival.py, tests/python/twagraph.py, tests/python/zlktree.py: Replace all occurrences of "assert" by calls to unittest.TestCase methods. --- HACKING | 44 +++++- tests/python/298.py | 22 +-- tests/python/341.py | 9 +- tests/python/471.py | 9 +- tests/python/accparse2.py | 126 +++++++-------- tests/python/aiger.py | 83 +++++----- tests/python/aliases.py | 12 +- tests/python/alternating.py | 52 +++--- tests/python/bdddict.py | 10 +- tests/python/bdditer.py | 22 +-- tests/python/bugdet.py | 14 +- tests/python/complement_semidet.py | 6 +- tests/python/declenv.py | 23 +-- tests/python/decompose_scc.py | 16 +- tests/python/det.py | 12 +- tests/python/dualize.py | 86 +++++----- tests/python/ecfalse.py | 26 +-- tests/python/except.py | 86 +++++----- tests/python/game.py | 10 +- tests/python/gen.py | 65 ++++---- tests/python/genem.py | 12 +- tests/python/implies.py | 38 ++--- tests/python/intrun.py | 10 +- tests/python/kripke.py | 25 +-- tests/python/langmap.py | 15 +- tests/python/ltl2tgba.py | 12 +- tests/python/ltlf.py | 6 +- tests/python/ltlparse.py | 48 +++--- tests/python/ltlsimple.py | 36 +++-- tests/python/mealy.py | 28 ++-- tests/python/merge.py | 96 +++++------ tests/python/mergedge.py | 36 +++-- tests/python/misc-ec.py | 13 +- tests/python/optionmap.py | 54 ++++--- tests/python/origstate.py | 20 +-- tests/python/otfcrash.py | 6 +- tests/python/parity.py | 54 ++++--- tests/python/parsetgba.py | 8 +- tests/python/pdegen.py | 140 ++++++++-------- tests/python/prodexpt.py | 24 +-- tests/python/randgen.py | 12 +- tests/python/relabel.py | 21 +-- tests/python/remfin.py | 24 +-- tests/python/removeap.py | 16 +- tests/python/rs_like.py | 9 +- tests/python/satmin.py | 246 +++++++++++++++-------------- tests/python/sbacc.py | 24 +-- tests/python/sccfilter.py | 6 +- tests/python/sccinfo.py | 48 +++--- tests/python/sccsplit.py | 9 +- tests/python/semidet.py | 16 +- tests/python/setacc.py | 86 +++++----- tests/python/setxor.py | 22 +-- tests/python/simplacc.py | 16 +- tests/python/simstate.py | 66 ++++---- tests/python/sonf.py | 6 +- tests/python/split.py | 24 +-- tests/python/streett_totgba.py | 14 +- tests/python/streett_totgba2.py | 14 +- tests/python/stutter.py | 18 ++- tests/python/sum.py | 10 +- tests/python/synthesis.py | 6 +- tests/python/toparity.py | 31 ++-- tests/python/toweak.py | 10 +- tests/python/tra2tba.py | 56 +++---- tests/python/trival.py | 42 ++--- tests/python/twagraph.py | 76 ++++----- tests/python/zlktree.py | 65 ++++---- tests/sanity/style.test | 23 ++- 69 files changed, 1314 insertions(+), 1116 deletions(-) diff --git a/HACKING b/HACKING index 8841b033c..c6e127a70 100644 --- a/HACKING +++ b/HACKING @@ -290,8 +290,8 @@ would understand with: make check LOG_DRIVER=$PWD/tools/test-driver-teamcity -Coding conventions -================== +C++ Coding conventions +====================== Here some of the conventions we follow in Spot, so that the code looks homogeneous. Please follow these strictly. Since this is free @@ -682,3 +682,43 @@ Other style recommandations * Always code as if the person who ends up maintaining your code is a violent psychopath who knows where you live. + + +Coding conventions for Python Tests +=================================== + +Unless you have some specific reason to write test cases in C++ (for +instance do test some specific C++ constructions, or to use valgrind), +prefer writing test cases in Python. Writing test cases in C++ +requires some compilation, which slows down the test suite. Doing the +same test in Python is therefore faster, and it has the added benefit +of ensuring that the Python bindings works. + +We have two types of Python tests: Python scripts or jupyter +notebooks. Jupyter notebooks are usually used for a sequence of +examples and comments that can also serve as part of the +documentation. Such jupyter notebooks should be added to the list of +code examples in doc/org/tut.org. Testing a notebook is done by the +tests/python/ipnbdoctest.py scripts, which evaluate each cells, and +checks that the obtainted result is equivalent to the result saved in +the notebook. The process is a bit slow, so plain Python scripts +should be prefered for most tests. + +If you do need a notebook to tests Jupyter-specific code but this +notebook should not be shown in the documentation, use a filename +starting with '_'. + +Tests written as Python scripts should follow the same convention as +shell scripts: exit 0 for PASS, exit 77 for SKIP, and any other exit +code for FAIL. + +Do not use assert() in those scripts, as (1) asserts can be disabled, +and (2) they provide poor insights in case of failures. Instead do + + from unittest import TestCase + tc = TestCase() + +and then use tc.assertTrue(...), tc.assertEqual(..., ...), +tc.assertIn(..., ...), etc. In case of failures, those will print +useful messages in the trace of the tests. For instance multiline +strings that should have been equal will be presented with a diff. diff --git a/tests/python/298.py b/tests/python/298.py index d4865c440..89ddbdb0c 100644 --- a/tests/python/298.py +++ b/tests/python/298.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -20,21 +20,23 @@ # Test for parts of Issue #298. import spot +from unittest import TestCase +tc = TestCase() a1 = spot.automaton("""genltl --dac=51 | ltl2tgba --med |""") a1 = spot.degeneralize_tba(a1) r1 = spot.tgba_determinize(a1, True, False, False) -assert r1.num_sets() == 3 -assert a1.prop_complete().is_false(); +tc.assertEqual(r1.num_sets(), 3) +tc.assertTrue(a1.prop_complete().is_false()) # This used to fail in 2.9.5 and earlier. -assert r1.prop_complete().is_maybe(); -assert spot.is_complete(r1) +tc.assertTrue(r1.prop_complete().is_maybe()) +tc.assertTrue(spot.is_complete(r1)) a2 = spot.automaton("""genltl --dac=51 | ltl2tgba --high |""") a2 = spot.degeneralize_tba(a2) r2 = spot.tgba_determinize(a2, True, False, False) # This used to fail in 2.9.5 and earlier. -assert r2.num_sets() == 3 -assert a2.prop_complete().is_false(); -assert r2.prop_complete().is_maybe(); -assert spot.is_complete(r2) +tc.assertEqual(r2.num_sets(), 3) +tc.assertTrue(a2.prop_complete().is_false()) +tc.assertTrue(r2.prop_complete().is_maybe()) +tc.assertTrue(spot.is_complete(r2)) diff --git a/tests/python/341.py b/tests/python/341.py index 4c5937149..e828ab07c 100644 --- a/tests/python/341.py +++ b/tests/python/341.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2017, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -19,7 +19,8 @@ import spot from subprocess import _active - +from unittest import TestCase +tc = TestCase() def two_intersecting_automata(): """return two random automata with a non-empty intersection""" @@ -34,4 +35,4 @@ for i in range(5): n = len(_active) print(n, "active processes") -assert(n == 0) +tc.assertEqual(n, 0) diff --git a/tests/python/471.py b/tests/python/471.py index 6fee3a2d3..0fe180554 100644 --- a/tests/python/471.py +++ b/tests/python/471.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2021 Laboratoire de Recherche et Développement de l'Epita +# Copyright (C) 2021, 2022 Laboratoire de Recherche et Développement de l'Epita # (LRDE). # # This file is part of Spot, a model checking library. @@ -20,9 +20,12 @@ # Test for Issue #471. import spot +from unittest import TestCase +tc = TestCase() + a = spot.translate('Fa') a = spot.to_generalized_rabin(a, False) r1 = a.intersecting_run(a) r2 = a.accepting_run() -assert str(r1) == str(r2) -assert a.prop_weak().is_true() +tc.assertEqual(str(r1), str(r2)) +tc.assertTrue(a.prop_weak().is_true()) diff --git a/tests/python/accparse2.py b/tests/python/accparse2.py index 4e6eb1cb3..d9c7274a0 100644 --- a/tests/python/accparse2.py +++ b/tests/python/accparse2.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2017-2018 Laboratoire de Recherche et Développement +# Copyright (C) 2015, 2017-2018, 2022 Laboratoire de Recherche et Développement # de l'Epita # # This file is part of Spot, a model checking library. @@ -18,99 +18,101 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() a = spot.acc_cond(5) a.set_acceptance(spot.acc_code('parity min odd 5')) -assert(a.is_parity() == [True, False, True]) +tc.assertEqual(a.is_parity(), [True, False, True]) a.set_acceptance('parity max even 5') -assert(a.is_parity() == [True, True, False]) +tc.assertEqual(a.is_parity(), [True, True, False]) a.set_acceptance('generalized-Buchi 5') -assert(a.is_parity()[0] == False) -assert(a.is_parity(True)[0] == False) +tc.assertEqual(a.is_parity()[0], False) +tc.assertEqual(a.is_parity(True)[0], False) a.set_acceptance('Inf(4) | (Fin(3)&Inf(2)) | (Fin(3)&Fin(1)&Inf(0))') -assert(a.is_parity()[0] == False) -assert(a.is_parity(True) == [True, True, False]) +tc.assertEqual(a.is_parity()[0], False) +tc.assertEqual(a.is_parity(True), [True, True, False]) -assert a.maybe_accepting([1, 2, 3], [0, 4]).is_true() -assert a.maybe_accepting([0], []).is_true() -assert a.maybe_accepting([0], [3]).is_false() -assert a.maybe_accepting([0, 3], []).is_maybe() -assert a.maybe_accepting([2, 3], [3]).is_false() -assert a.maybe_accepting([2, 3], []).is_maybe() -assert a.maybe_accepting([2], []).is_true() -assert a.maybe_accepting([0, 1], []).is_maybe() -assert a.maybe_accepting([0, 1], [1]).is_false() +tc.assertTrue(a.maybe_accepting([1, 2, 3], [0, 4]).is_true()) +tc.assertTrue(a.maybe_accepting([0], []).is_true()) +tc.assertTrue(a.maybe_accepting([0], [3]).is_false()) +tc.assertTrue(a.maybe_accepting([0, 3], []).is_maybe()) +tc.assertTrue(a.maybe_accepting([2, 3], [3]).is_false()) +tc.assertTrue(a.maybe_accepting([2, 3], []).is_maybe()) +tc.assertTrue(a.maybe_accepting([2], []).is_true()) +tc.assertTrue(a.maybe_accepting([0, 1], []).is_maybe()) +tc.assertTrue(a.maybe_accepting([0, 1], [1]).is_false()) a.set_acceptance('Fin(0)|Fin(1)') -assert a.maybe_accepting([0, 1], [1]).is_maybe() -assert a.maybe_accepting([0, 1], [0, 1]).is_false() -assert a.maybe_accepting([0], []).is_true() -assert a.maybe_accepting([], [0]).is_true() +tc.assertTrue(a.maybe_accepting([0, 1], [1]).is_maybe()) +tc.assertTrue(a.maybe_accepting([0, 1], [0, 1]).is_false()) +tc.assertTrue(a.maybe_accepting([0], []).is_true()) +tc.assertTrue(a.maybe_accepting([], [0]).is_true()) a = spot.acc_cond(0) a.set_acceptance('all') -assert(a.is_rabin() == -1) -assert(a.is_streett() == 0) -assert(a.is_parity() == [True, True, True]) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), 0) +tc.assertEqual(a.is_parity(), [True, True, True]) a.set_acceptance('none') -assert(a.is_rabin() == 0) -assert(a.is_streett() == -1) -assert(a.is_parity() == [True, True, False]) +tc.assertEqual(a.is_rabin(), 0) +tc.assertEqual(a.is_streett(), -1) +tc.assertEqual(a.is_parity(), [True, True, False]) a = spot.acc_cond('(Fin(0)&Inf(1))') -assert(a.is_rabin() == 1) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), 1) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance('Inf(1)&Fin(0)') -assert(a.is_rabin() == 1) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), 1) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance('(Fin(0)|Inf(1))') -assert(a.is_rabin() == -1) -assert(a.is_streett() == 1) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), 1) a.set_acceptance('Inf(1)|Fin(0)') -assert(a.is_rabin() == -1) -assert(a.is_streett() == 1) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), 1) a = spot.acc_cond('(Fin(0)&Inf(1))|(Fin(2)&Inf(3))') -assert(a.is_rabin() == 2) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), 2) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance(spot.acc_code('(Inf(3)&Fin(2))|(Fin(0)&Inf(1))')) -assert(a.is_rabin() == 2) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), 2) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance(spot.acc_code('(Inf(2)&Fin(3))|(Fin(0)&Inf(1))')) -assert(a.is_rabin() == -1) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance(spot.acc_code('(Inf(3)&Fin(2))|(Fin(2)&Inf(1))')) -assert(a.is_rabin() == -1) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance(spot.acc_code('(Inf(1)&Fin(0))|(Fin(0)&Inf(1))')) -assert(a.is_rabin() == -1) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance('(Fin(0)&Inf(1))|(Inf(1)&Fin(0))|(Inf(3)&Fin(2))') -assert(a.is_rabin() == 2) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), 2) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance('(Fin(0)|Inf(1))&(Fin(2)|Inf(3))') -assert(a.is_rabin() == -1) -assert(a.is_streett() == 2) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), 2) a.set_acceptance('(Inf(3)|Fin(2))&(Fin(0)|Inf(1))') -assert(a.is_rabin() == -1) -assert(a.is_streett() == 2) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), 2) a.set_acceptance('(Inf(2)|Fin(3))&(Fin(0)|Inf(1))') -assert(a.is_rabin() == -1) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance('(Inf(3)|Fin(2))&(Fin(2)|Inf(1))') -assert(a.is_rabin() == -1) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance('(Inf(1)|Fin(0))&(Fin(0)|Inf(1))') -assert(a.is_rabin() == -1) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance('(Fin(0)|Inf(1))&(Inf(1)|Fin(0))&(Inf(3)|Fin(2))') -assert(a.is_rabin() == -1) -assert(a.is_streett() == 2) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), 2) a = spot.acc_code('Inf(0)&Inf(1)&Inf(3) | Fin(0)&(Fin(1)|Fin(3))') u = a.symmetries() -assert u[0] == 0 -assert u[1] == 1 -assert u[2] == 2 -assert u[3] == 1 +tc.assertEqual(u[0], 0) +tc.assertEqual(u[1], 1) +tc.assertEqual(u[2], 2) +tc.assertEqual(u[3], 1) diff --git a/tests/python/aiger.py b/tests/python/aiger.py index 5148fef5f..f490465b0 100644 --- a/tests/python/aiger.py +++ b/tests/python/aiger.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2021 Laboratoire de Recherche et +# Copyright (C) 2021, 2022 Laboratoire de Recherche et # Développement de l'Epita # # This file is part of Spot, a model checking library. @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot, buddy +from unittest import TestCase +tc = TestCase() strats = (("""HOA: v1 States: 4 @@ -3346,7 +3348,7 @@ for strat_string, (ins_str, outs_str) in strats: print(f"Mode is {m+ss+ddx+uud}") print(f"""Strat is \n{strat_s.to_str("hoa")}""") print(f"""Aig as aut is \n{strat2_s.to_str("hoa")}""") - assert 0 + raise AssertionError("not a specialization") # Check stepwise simulation @@ -3386,7 +3388,7 @@ for (i, e_latch) in zip(ins, exp_latches): # Variable names -assert(spot.aiger_circuit("""aag 2 2 0 2 0 +tc.assertEqual(spot.aiger_circuit("""aag 2 2 0 2 0 2 4 2 @@ -3394,9 +3396,9 @@ assert(spot.aiger_circuit("""aag 2 2 0 2 0 i0 a i1 b c c -""").to_str() == 'aag 2 2 0 2 0\n2\n4\n2\n1\ni0 a\ni1 b c\no0 o0\no1 o1') +""").to_str(), 'aag 2 2 0 2 0\n2\n4\n2\n1\ni0 a\ni1 b c\no0 o0\no1 o1') -assert(spot.aiger_circuit("""aag 2 2 0 2 0 +tc.assertEqual(spot.aiger_circuit("""aag 2 2 0 2 0 2 4 2 @@ -3404,7 +3406,7 @@ assert(spot.aiger_circuit("""aag 2 2 0 2 0 o0 x o1 y c -""").to_str() == 'aag 2 2 0 2 0\n2\n4\n2\n1\ni0 i0\ni1 i1\no0 x\no1 y') +""").to_str(), 'aag 2 2 0 2 0\n2\n4\n2\n1\ni0 i0\ni1 i1\no0 x\no1 y') def report_missing_exception(): @@ -3415,7 +3417,7 @@ try: 0 """) except SyntaxError as e: - assert str(e) == "\n:1: invalid header line" + tc.assertEqual(str(e), "\n:1: invalid header line") else: report_missing_exception() @@ -3423,14 +3425,15 @@ try: spot.aiger_circuit("""aag 2 2 3 2 0 """) except SyntaxError as e: - assert str(e) == "\n:1: more variables than indicated by max var" + tc.assertEqual(str(e), + "\n:1: more variables than indicated by max var") else: report_missing_exception() try: spot.aiger_circuit("""aag 2 2 0 2 0\n""") except SyntaxError as e: - assert str(e) == "\n:2: expecting input number 2" + tc.assertEqual(str(e), "\n:2: expecting input number 2") else: report_missing_exception() @@ -3439,7 +3442,7 @@ try: 3 """) except SyntaxError as e: - assert str(e) == "\n:2: expecting input number 2" + tc.assertEqual(str(e), "\n:2: expecting input number 2") else: report_missing_exception() @@ -3448,7 +3451,7 @@ try: 3 4 5 """) except SyntaxError as e: - assert str(e) == "\n:2: invalid format for an input" + tc.assertEqual(str(e), "\n:2: invalid format for an input") else: report_missing_exception() @@ -3457,7 +3460,7 @@ try: 2 """) except SyntaxError as e: - assert str(e) == "\n:3: expecting input number 4" + tc.assertEqual(str(e), "\n:3: expecting input number 4") else: report_missing_exception() @@ -3468,7 +3471,7 @@ try: 1 """) except SyntaxError as e: - assert str(e) == "\n:4: invalid format for a latch" + tc.assertEqual(str(e), "\n:4: invalid format for a latch") else: report_missing_exception() @@ -3479,7 +3482,7 @@ try: 1 1 """) except SyntaxError as e: - assert str(e) == "\n:4: expecting latch number 6" + tc.assertEqual(str(e), "\n:4: expecting latch number 6") else: report_missing_exception() @@ -3490,7 +3493,7 @@ try: 6 1 """) except SyntaxError as e: - assert str(e) == "\n:5: expecting latch number 8" + tc.assertEqual(str(e), "\n:5: expecting latch number 8") else: report_missing_exception() @@ -3502,7 +3505,7 @@ try: 8 7 """) except SyntaxError as e: - assert str(e) == "\n:6: expecting an output" + tc.assertEqual(str(e), "\n:6: expecting an output") else: report_missing_exception() @@ -3515,7 +3518,7 @@ try: 9 9 9 """) except SyntaxError as e: - assert str(e) == "\n:6: invalid format for an output" + tc.assertEqual(str(e), "\n:6: invalid format for an output") else: report_missing_exception() @@ -3528,7 +3531,7 @@ try: 9 9 9 """) except SyntaxError as e: - assert str(e) == "\n:6: invalid format for an output" + tc.assertEqual(str(e), "\n:6: invalid format for an output") else: report_missing_exception() @@ -3541,7 +3544,7 @@ try: 9 """) except SyntaxError as e: - assert str(e) == "\n:7: expecting AND gate number 10" + tc.assertEqual(str(e), "\n:7: expecting AND gate number 10") else: report_missing_exception() @@ -3555,7 +3558,7 @@ try: 10 3 8 9 """) except SyntaxError as e: - assert str(e) == "\n:7: invalid format for an AND gate" + tc.assertEqual(str(e), "\n:7: invalid format for an AND gate") else: report_missing_exception() @@ -3569,7 +3572,7 @@ try: 10 3 """) except SyntaxError as e: - assert str(e) == "\n:7: invalid format for an AND gate" + tc.assertEqual(str(e), "\n:7: invalid format for an AND gate") else: report_missing_exception() @@ -3583,7 +3586,7 @@ try: 10 3 8 """) except SyntaxError as e: - assert str(e) == "\n:8: expecting AND gate number 12" + tc.assertEqual(str(e), "\n:8: expecting AND gate number 12") else: report_missing_exception() @@ -3599,7 +3602,7 @@ try: i0 """) except SyntaxError as e: - assert str(e) == "\n:9: could not parse as input name" + tc.assertEqual(str(e), "\n:9: could not parse as input name") else: report_missing_exception() @@ -3616,7 +3619,7 @@ i0 foo i3 bar """) except SyntaxError as e: - assert str(e) == "\n:10: value 3 exceeds input count" + tc.assertEqual(str(e), "\n:10: value 3 exceeds input count") else: report_missing_exception() @@ -3633,7 +3636,7 @@ i1 bar i0 foo """) except SyntaxError as e: - assert str(e) == "\n:9: expecting name for input 0" + tc.assertEqual(str(e), "\n:9: expecting name for input 0") else: report_missing_exception() @@ -3650,8 +3653,8 @@ i0 name with spaces i1 name with spaces """) except SyntaxError as e: - assert str(e) == \ - "\n:10: name 'name with spaces' already used" + tc.assertEqual(str(e), \ + "\n:10: name 'name with spaces' already used") else: report_missing_exception() @@ -3669,7 +3672,7 @@ i1 bar o0 """) except SyntaxError as e: - assert str(e) == "\n:11: could not parse as output name" + tc.assertEqual(str(e), "\n:11: could not parse as output name") else: report_missing_exception() @@ -3689,7 +3692,7 @@ o1 hmm o0 foo bar baz """) except SyntaxError as e: - assert str(e) == "\n:12: expecting name for output 0" + tc.assertEqual(str(e), "\n:12: expecting name for output 0") else: report_missing_exception() @@ -3709,7 +3712,7 @@ o0 hmm o2 foo bar baz """) except SyntaxError as e: - assert str(e) == "\n:13: value 2 exceeds output count" + tc.assertEqual(str(e), "\n:13: value 2 exceeds output count") else: report_missing_exception() @@ -3729,7 +3732,7 @@ o0 foo o1 foo """) except SyntaxError as e: - assert str(e) == "\n:13: name 'foo' already used" + tc.assertEqual(str(e), "\n:13: name 'foo' already used") else: report_missing_exception() @@ -3749,7 +3752,7 @@ o0 foo o1 bar """) except SyntaxError as e: - assert str(e) == "\n:13: name 'bar' already used" + tc.assertEqual(str(e), "\n:13: name 'bar' already used") else: report_missing_exception() @@ -3770,7 +3773,7 @@ o1 baz this is a bug """) except SyntaxError as e: - assert str(e) == "\n:14: unsupported line type" + tc.assertEqual(str(e), "\n:14: unsupported line type") else: report_missing_exception() @@ -3791,8 +3794,8 @@ c this is not a bug """) except SyntaxError as e: - assert str(e) == \ - "\n:10: either all or none of the inputs should be named" + tc.assertEqual(str(e), \ + "\n:10: either all or none of the inputs should be named") else: report_missing_exception() @@ -3815,8 +3818,8 @@ c this is not a bug """) except SyntaxError as e: - assert str(e) == \ - "\n:11-12: either all or none of the inputs should be named" + tc.assertEqual(str(e), \ + "\n:11-12: either all or none of the inputs should be named") else: report_missing_exception() @@ -3841,8 +3844,8 @@ c this is not a bug """) except SyntaxError as e: - assert str(e) == \ - "\n:14-16: either all or none of the outputs should be named" + tc.assertEqual(str(e), \ + "\n:14-16: either all or none of the outputs should be named") else: report_missing_exception() @@ -3866,4 +3869,4 @@ o2 bar c this is not a bug """).to_str() -assert x == spot.aiger_circuit(x).to_str() +tc.assertEqual(x, spot.aiger_circuit(x).to_str()) diff --git a/tests/python/aliases.py b/tests/python/aliases.py index 6f861a880..40dd4d0ec 100644 --- a/tests/python/aliases.py +++ b/tests/python/aliases.py @@ -20,6 +20,8 @@ # Test for parts of Issue #497. import spot +from unittest import TestCase +tc = TestCase() aut = spot.automaton(""" HOA: v1 @@ -63,11 +65,11 @@ State: 0 --END--""") s = aut.to_str('hoa') aut2 = spot.automaton(s) -assert aut.equivalent_to(aut2) +tc.assertTrue(aut.equivalent_to(aut2)) s2 = aut.to_str('hoa') -assert s == s2 +tc.assertEqual(s, s2) -assert s == """HOA: v1 +tc.assertEqual(s, """HOA: v1 States: 1 Start: 0 AP: 3 "x" "y" "z" @@ -105,7 +107,7 @@ State: 0 [@a&2 | @p1&@p0&2] 0 [@a&2] 0 [@p0&2 | @p1&2] 0 ---END--""" +--END--""") # Check what happens to aliases when an AP has been removed, but # the aliases have been preserved... @@ -115,7 +117,7 @@ aut3 = rem.strip(aut) spot.set_aliases(aut3, spot.get_aliases(aut)) s2 = aut3.to_str('hoa') # Aliases based on "x" should have disappeared. -assert(s2 == """HOA: v1 +tc.assertEqual(s2, """HOA: v1 States: 1 Start: 0 AP: 2 "y" "z" diff --git a/tests/python/alternating.py b/tests/python/alternating.py index 7b3a5d713..5b38ca378 100755 --- a/tests/python/alternating.py +++ b/tests/python/alternating.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016, 2017, 2021 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2016-2017, 2021-2022 Laboratoire de Recherche +# et Développement de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -20,6 +20,8 @@ import spot import buddy +from unittest import TestCase +tc = TestCase() aut = spot.make_twa_graph(spot._bdd_dict) @@ -38,9 +40,8 @@ aut.new_edge(2, 2, p1 | p2) tr = [(s, [[x for x in aut.univ_dests(i)] for i in aut.out(s)]) for s in range(3)] -print(tr) -assert [(0, [[1, 2], [0, 1]]), (1, [[0, 2, 1]]), (2, [[2]])] == tr -assert not aut.is_existential() +tc.assertEqual([(0, [[1, 2], [0, 1]]), (1, [[0, 2, 1]]), (2, [[2]])], tr) +tc.assertFalse(aut.is_existential()) received = False try: @@ -49,11 +50,10 @@ try: pass except RuntimeError: received = True -assert received +tc.assertTrue(received) h = aut.to_str('hoa') -print(h) -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 3 Start: 0 AP: 2 "p1" "p2" @@ -68,22 +68,20 @@ State: 1 [0&1] 0&2&1 State: 2 [0 | 1] 2 ---END--""" +--END--""") aut2 = spot.automaton(h) h2 = aut2.to_str('hoa') -print(h2) -assert h != h2 +tc.assertNotEqual(h, h2) # This will sort destination groups aut.merge_univ_dests() h = aut.to_str('hoa') -assert h == h2 +tc.assertEqual(h, h2) aut2.set_univ_init_state([0, 1]) h3 = aut2.to_str('hoa') -print(h3) -assert h3 == """HOA: v1 +tc.assertEqual(h3, """HOA: v1 States: 3 Start: 0&1 AP: 2 "p1" "p2" @@ -98,23 +96,22 @@ State: 1 [0&1] 0&1&2 State: 2 [0 | 1] 2 ---END--""" +--END--""") st = spot.states_and(aut, [0, 2]) st2 = spot.states_and(aut, [1, st]) st3 = spot.states_and(aut, [0, 1, 2]) -assert (st, st2, st3) == (3, 4, 5) +tc.assertEqual((st, st2, st3), (3, 4, 5)) received = False try: st4 = spot.states_and(aut, []) except RuntimeError: received = True -assert received +tc.assertTrue(received) h = aut.to_str('hoa') -print(h) -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 6 Start: 0 AP: 2 "p1" "p2" @@ -136,11 +133,10 @@ State: 4 [0&1] 0&1&2 State: 5 [0&1] 0&1&2 ---END--""" +--END--""") h = spot.split_edges(aut).to_str('hoa') -print(h) -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 6 Start: 0 AP: 2 "p1" "p2" @@ -168,7 +164,7 @@ State: 4 [0&1] 0&1&2 State: 5 [0&1] 0&1&2 ---END--""" +--END--""") # remove_univ_otf @@ -206,11 +202,11 @@ State: 2 --END--""" desalt = spot.remove_univ_otf(aut) -assert(desalt.to_str('hoa') == out) +tc.assertEqual(desalt.to_str('hoa'), out) -assert aut.num_states() == 3 -assert aut.num_edges() == 3 +tc.assertEqual(aut.num_states(), 3) +tc.assertEqual(aut.num_edges(), 3) aut.edge_storage(3).cond = buddy.bddfalse aut.purge_dead_states() -assert aut.num_states() == 1 -assert aut.num_edges() == 0 +tc.assertEqual(aut.num_states(), 1) +tc.assertEqual(aut.num_edges(), 0) diff --git a/tests/python/bdddict.py b/tests/python/bdddict.py index d6222b58f..0172bd050 100644 --- a/tests/python/bdddict.py +++ b/tests/python/bdddict.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2019, 2021 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2019, 2021, 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -33,6 +33,8 @@ else: gc.collect() import spot +from unittest import TestCase +tc = TestCase() class bdd_holder: @@ -64,7 +66,7 @@ class bdd_holder3: def check_ok(): - assert type(bdict.varnum(spot.formula.ap("a"))) is int + tc.assertIs(type(bdict.varnum(spot.formula.ap("a"))), int) def check_nok(): @@ -123,7 +125,7 @@ debug("h2") h3 = bdd_holder3(h2) var = bdict.register_anonymous_variables(1, h3) debug("h3") -assert var == 2 +tc.assertEqual(var, 2) del h2 gcollect() debug("-h2") diff --git a/tests/python/bdditer.py b/tests/python/bdditer.py index 3d3bb7894..95cc441b3 100644 --- a/tests/python/bdditer.py +++ b/tests/python/bdditer.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2021 Laboratoire de Recherche et +# Copyright (C) 2017, 2018, 2021, 2022 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -24,11 +24,13 @@ import spot import buddy import sys +from unittest import TestCase +tc = TestCase() run = spot.translate('a & !b').accepting_run() b = run.prefix[0].label c = buddy.bdd_satone(b) -assert c != buddy.bddfalse +tc.assertNotEqual(c, buddy.bddfalse) res = [] while c != buddy.bddtrue: var = buddy.bdd_var(c) @@ -40,23 +42,23 @@ while c != buddy.bddtrue: res.append(var) c = h -assert res == [0, -1] +tc.assertEqual(res, [0, -1]) res2 = [] for i in run.aut.ap(): res2.append((str(i), run.aut.register_ap(i))) -assert str(res2) == "[('a', 0), ('b', 1)]" +tc.assertEqual(str(res2), "[('a', 0), ('b', 1)]") f = spot.bdd_to_formula(b) -assert f._is(spot.op_And) -assert f[0]._is(spot.op_ap) -assert f[1]._is(spot.op_Not) -assert f[1][0]._is(spot.op_ap) -assert str(f) == 'a & !b' +tc.assertTrue(f._is(spot.op_And)) +tc.assertTrue(f[0]._is(spot.op_ap)) +tc.assertTrue(f[1]._is(spot.op_Not)) +tc.assertTrue(f[1][0]._is(spot.op_ap)) +tc.assertEqual(str(f), 'a & !b') try: f = spot.bdd_to_formula(b, spot.make_bdd_dict()) sys.exit(2) except RuntimeError as e: - assert "not in the dictionary" in str(e) + tc.assertIn("not in the dictionary", str(e)) diff --git a/tests/python/bugdet.py b/tests/python/bugdet.py index 9e06e0db3..19434c967 100644 --- a/tests/python/bugdet.py +++ b/tests/python/bugdet.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2016, 2022 Laboratoire de Recherche et Développement +# de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -22,6 +22,8 @@ # sent to the Spot mailing list on 2016-10-31. import spot +from unittest import TestCase +tc = TestCase() a = spot.automaton(""" HOA: v1 @@ -80,12 +82,12 @@ State: 7 {0} # was fine. print("use_simulation=True") b1 = spot.tgba_determinize(b, False, True, True, True) -assert b1.num_states() == 5 +tc.assertEqual(b1.num_states(), 5) b1 = spot.remove_fin(spot.dualize(b1)) -assert not a.intersects(b1) +tc.assertFalse(a.intersects(b1)) print("\nuse_simulation=False") b2 = spot.tgba_determinize(b, False, True, False, True) -assert b2.num_states() == 5 +tc.assertEqual(b2.num_states(), 5) b2 = spot.remove_fin(spot.dualize(b2)) -assert not a.intersects(b2) +tc.assertFalse(a.intersects(b2)) diff --git a/tests/python/complement_semidet.py b/tests/python/complement_semidet.py index 5ab4557bc..da06749a3 100644 --- a/tests/python/complement_semidet.py +++ b/tests/python/complement_semidet.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018 Laboratoire de Recherche et Développement de +# Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -19,6 +19,8 @@ import spot import buddy +from unittest import TestCase +tc = TestCase() def complement(aut): @@ -35,4 +37,4 @@ for aut in spot.automata( comp = complement(aut) semidet_comp = spot.complement_semidet(aut, True) - assert(comp.equivalent_to(semidet_comp)) + tc.assertTrue(comp.equivalent_to(semidet_comp)) diff --git a/tests/python/declenv.py b/tests/python/declenv.py index 868f6ca1d..3ab47736b 100644 --- a/tests/python/declenv.py +++ b/tests/python/declenv.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2017, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -21,6 +21,8 @@ # This file tests various error conditions on the twa API import spot +from unittest import TestCase +tc = TestCase() env = spot.declarative_environment() env.declare("a") @@ -28,26 +30,27 @@ env.declare("b") f1a = spot.parse_infix_psl("a U b") f1b = spot.parse_infix_psl("a U b", env) -assert not f1a.errors -assert not f1b.errors +tc.assertFalse(f1a.errors) +tc.assertFalse(f1b.errors) + # In the past, atomic propositions requires via different environments were # never equal, but this feature was never used and we changed that in Spot 2.0 # for the sake of simplicity. -assert f1a.f == f1b.f +tc.assertEqual(f1a.f, f1b.f) f2 = spot.parse_infix_psl("(a U b) U c", env) -assert f2.errors +tc.assertTrue(f2.errors) ostr = spot.ostringstream() f2.format_errors(ostr) err = ostr.str() -assert "unknown atomic proposition `c'" in err +tc.assertIn("unknown atomic proposition `c'", err) f3 = spot.parse_prefix_ltl("R a d", env) -assert f3.errors +tc.assertTrue(f3.errors) ostr = spot.ostringstream() f3.format_errors(ostr) err = ostr.str() -assert "unknown atomic proposition `d'" in err +tc.assertIn("unknown atomic proposition `d'", err) f4 = spot.parse_prefix_ltl("R a b", env) -assert not f4.errors +tc.assertFalse(f4.errors) diff --git a/tests/python/decompose_scc.py b/tests/python/decompose_scc.py index 5f6ad46cb..47741fb72 100644 --- a/tests/python/decompose_scc.py +++ b/tests/python/decompose_scc.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2021 Laboratoire de Recherche et +# Copyright (C) 2017, 2021, 2022 Laboratoire de Recherche et # Développement de l'Epita # # This file is part of Spot, a model checking library. @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() aut = spot.translate('(Ga -> Gb) W c') si = spot.scc_info(aut) @@ -26,10 +28,10 @@ si = spot.scc_info(aut) # if the generation of the automaton changes, so just scan # for it. rej = [j for j in range(si.scc_count()) if si.is_rejecting_scc(j)] -assert len(rej) == 1 +tc.assertEqual(len(rej), 1) s = spot.decompose_scc(si, rej[0]).to_str('hoa', '1.1') -assert (s == """HOA: v1.1 +tc.assertEqual(s, """HOA: v1.1 States: 3 Start: 0 AP: 3 "b" "a" "c" @@ -56,7 +58,8 @@ except RuntimeError: else: raise AssertionError -assert (spot.decompose_scc(si, 0, True).to_str('hoa', '1.1') == """HOA: v1.1 +tc.assertEqual(spot.decompose_scc(si, 0, True).to_str('hoa', '1.1'), +"""HOA: v1.1 States: 4 Start: 0 AP: 3 "b" "a" "c" @@ -81,7 +84,8 @@ State: 3 [1] 3 --END--""") -assert (spot.decompose_scc(si, 2, True).to_str('hoa', '1.1') == """HOA: v1.1 +tc.assertEqual(spot.decompose_scc(si, 2, True).to_str('hoa', '1.1'), +"""HOA: v1.1 States: 2 Start: 0 AP: 3 "b" "a" "c" @@ -103,4 +107,4 @@ try: except RuntimeError: pass else: - raise AssertionError + raise AssertionError("missing exception") diff --git a/tests/python/det.py b/tests/python/det.py index 03f07c096..36fa31ff3 100644 --- a/tests/python/det.py +++ b/tests/python/det.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2021 Laboratoire de Recherche et Développement de +# Copyright (C) 2021, 2022 Laboratoire de Recherche et Développement de # l'EPITA. # # This file is part of Spot, a model checking library. @@ -19,6 +19,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() a = spot.translate('FGa | FGb') @@ -26,10 +28,10 @@ a = spot.translate('FGa | FGb') d = spot.tgba_determinize(a, False, True, True, True, None, -1, True) cld = list(d.get_original_classes()) -assert [0, 1, 2, 3, 3] == cld +tc.assertEqual([0, 1, 2, 3, 3], cld) e = spot.sbacc(d) -assert e.get_original_states() is None +tc.assertIsNone(e.get_original_states()) cle = list(e.get_original_classes()) -assert len(cle) == e.num_states() -assert set(cle) == set(cld) +tc.assertEqual(len(cle), e.num_states()) +tc.assertEqual(set(cle), set(cld)) diff --git a/tests/python/dualize.py b/tests/python/dualize.py index 81d2a2b23..b870e1e5e 100755 --- a/tests/python/dualize.py +++ b/tests/python/dualize.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017-2019, 2021 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2017-2019, 2021-2022 Laboratoire de Recherche et +# Développement de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -20,6 +20,8 @@ import spot import buddy +from unittest import TestCase +tc = TestCase() match_strings = [('is_buchi', 'is_co_buchi'), ('is_generalized_buchi', 'is_generalized_co_buchi'), @@ -79,19 +81,19 @@ def test_aut(aut, d=None): def test_complement(aut): - assert aut.is_deterministic() + tc.assertTrue(aut.is_deterministic()) d = spot.dualize(aut) s = spot.product_or(aut, d) - assert spot.dualize(s).is_empty() + tc.assertTrue(spot.dualize(s).is_empty()) def test_assert(a, d=None): t = test_aut(a, d) if not t[0]: - print (t[1]) - print (a.to_str('hoa')) - print (spot.dualize(a).to_str('hoa')) - assert False + print(t[1]) + print(a.to_str('hoa')) + print(spot.dualize(a).to_str('hoa')) + tc.assertTrue(t[0]) aut = spot.translate('a') @@ -101,7 +103,7 @@ test_assert(aut) dual = spot.dualize(aut) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 3 Start: 1 AP: 1 "a" @@ -117,7 +119,7 @@ State: 1 [!0] 2 State: 2 [t] 2 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -141,7 +143,7 @@ test_assert(aut) dual = spot.dualize(aut) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 4 Start: 0 AP: 2 "a" "b" @@ -161,7 +163,7 @@ State: 2 {0} [!1] 3 State: 3 [t] 3 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -186,7 +188,7 @@ test_assert(aut) dual = spot.dualize(aut) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 2 Start: 1 AP: 2 "a" "b" @@ -198,7 +200,7 @@ State: 0 [t] 0 State: 1 [!0 | !1] 0 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -219,10 +221,10 @@ State: 3 {1} --END--""") dual = spot.dualize(aut) -assert dualtype(aut, dual) +tc.assertTrue(dualtype(aut, dual)) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 2 Start: 1 AP: 2 "a" "b" @@ -234,7 +236,7 @@ State: 0 [t] 0 State: 1 [!0 | !1] 0 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -255,10 +257,10 @@ State: 3 {0} --END--""") dual = spot.dualize(aut) -assert dualtype(aut, dual) +tc.assertTrue(dualtype(aut, dual)) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 5 Start: 0 AP: 2 "a" "b" @@ -280,7 +282,7 @@ State: 3 {0} [t] 3 State: 4 [t] 4 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -302,10 +304,10 @@ State: 2 --END--""") dual = spot.dualize(aut) -assert dualtype(aut, dual) +tc.assertTrue(dualtype(aut, dual)) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 4 Start: 0 AP: 2 "a" "b" @@ -327,7 +329,7 @@ State: 2 [!0&!1] 0&2 State: 3 [t] 3 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -348,10 +350,10 @@ State: 2 --END--""") dual = spot.dualize(aut) -assert dualtype(aut, dual) +tc.assertTrue(dualtype(aut, dual)) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 1 Start: 0 AP: 1 "a" @@ -362,7 +364,7 @@ properties: deterministic terminal --BODY-- State: 0 [t] 0 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -382,10 +384,10 @@ State: 2 --END--""") dual = spot.dualize(aut) -assert dualtype(aut, dual) +tc.assertTrue(dualtype(aut, dual)) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 1 Start: 0 AP: 1 "a" @@ -396,7 +398,7 @@ properties: deterministic terminal --BODY-- State: 0 [t] 0 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -419,7 +421,7 @@ State: 2 dual = spot.dualize(aut) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -435,7 +437,7 @@ State: 1 {0} [t] 1 State: 2 [t] 2 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -456,10 +458,10 @@ State: 2 dual = spot.dualize(aut) -assert dualtype(aut, dual) +tc.assertTrue(dualtype(aut, dual)) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 2 Start: 0 AP: 1 "a" @@ -471,7 +473,7 @@ State: 0 [!0] 1 State: 1 {0} [t] 1 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -495,10 +497,10 @@ State: 3 {0} --END--""") dual = spot.dualize(aut) -assert dualtype(aut, dual) +tc.assertTrue(dualtype(aut, dual)) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 3 Start: 0 AP: 1 "a" @@ -515,7 +517,7 @@ State: 1 [0] 2 State: 2 {0} [t] 2 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -536,10 +538,10 @@ State: 2 --END--""") dual = spot.dualize(aut) -assert dualtype(aut, dual) +tc.assertTrue(dualtype(aut, dual)) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 3 Start: 0 AP: 1 "a" @@ -555,14 +557,14 @@ State: 1 {0} [t] 0 State: 2 {1} [t] 0 ---END--""" +--END--""") aut = spot.translate('G!a R XFb') test_assert(aut) dual = spot.dualize(aut) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 5 Start: 0 AP: 2 "a" "b" @@ -589,7 +591,7 @@ State: 3 {0} [0] 4 State: 4 [t] 4 ---END--""" +--END--""") opts = spot.option_map() opts.set('output', spot.randltlgenerator.LTL) diff --git a/tests/python/ecfalse.py b/tests/python/ecfalse.py index 36301914b..ccbaa2693 100644 --- a/tests/python/ecfalse.py +++ b/tests/python/ecfalse.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ import spot from buddy import bddfalse, bddtrue +from unittest import TestCase +tc = TestCase() a = spot.automaton(""" HOA: v1 @@ -43,8 +45,8 @@ for e in a.out(1): if e.dst == 0: e.cond = bddfalse -assert a.accepting_run() is None -assert a.is_empty() +tc.assertIsNone(a.accepting_run()) +tc.assertTrue(a.is_empty()) for name in ['SE05', 'CVWY90', 'GV04', 'Cou99(shy)', 'Cou99', 'Tau03']: print(name) @@ -52,13 +54,13 @@ for name in ['SE05', 'CVWY90', 'GV04', 'Cou99(shy)', 'Cou99', 'Tau03']: res = ec.check() if res is not None: print(res.accepting_run()) - assert res is None + tc.assertIsNone(res) si = spot.scc_info(a) -assert si.scc_count() == 1 # only one accessible SCC +tc.assertEqual(si.scc_count(), 1) # only one accessible SCC a.set_init_state(0) si = spot.scc_info(a) -assert si.scc_count() == 2 +tc.assertEqual(si.scc_count(), 2) a = spot.automaton("""HOA: v1 States: 11 Start: 0 AP: 2 "a" "b" Acceptance: 8 (Fin(0) | Inf(1)) & (Fin(2) | Inf(3)) & ((Fin(4) & Inf(5)) | (Fin(6) & Inf(7))) @@ -71,16 +73,16 @@ State: 5 State: 6 State: 7 [!0&!1] 1 {4 6 7} [!0&!1] 2 {5 6} State: 8 [!0&!1] 2 {4} State: 9 [!0&!1] 2 {0 4} [!0&!1] 4 {3 4} State: 10 --END-- """) r = a.accepting_run() -assert r is not None -assert r.replay(spot.get_cout()) +tc.assertIsNotNone(r) +tc.assertTrue(r.replay(spot.get_cout())) for e in a.out(7): if e.dst == 2: e.cond = bddfalse s = a.accepting_run() -assert s is not None -assert s.replay(spot.get_cout()) +tc.assertIsNotNone(s) +tc.assertTrue(s.replay(spot.get_cout())) for e in a.out(2): if e.dst == 1: e.cond = bddfalse s = a.accepting_run() -assert s is None +tc.assertIsNone(s) diff --git a/tests/python/except.py b/tests/python/except.py index 178e419b4..76f17f76c 100644 --- a/tests/python/except.py +++ b/tests/python/except.py @@ -24,6 +24,8 @@ import spot import buddy +from unittest import TestCase +tc = TestCase() def report_missing_exception(): @@ -35,7 +37,7 @@ aut.set_acceptance(spot.acc_cond("parity min even 4")) try: spot.iar(aut) except RuntimeError as e: - assert 'iar() expects Rabin-like or Streett-like input' in str(e) + tc.assertIn('iar() expects Rabin-like or Streett-like input', str(e)) else: report_missing_exception() @@ -43,7 +45,7 @@ alt = spot.dualize(spot.translate('FGa | FGb')) try: spot.tgba_determinize(alt) except RuntimeError as e: - assert 'tgba_determinize() does not support alternation' in str(e) + tc.assertIn('tgba_determinize() does not support alternation', str(e)) else: report_missing_exception() @@ -52,18 +54,18 @@ aps = aut.ap() rem = spot.remove_ap() rem.add_ap('"a"=0,b') aut = rem.strip(aut) -assert aut.ap() == aps[2:] +tc.assertEqual(aut.ap(), aps[2:]) try: rem.add_ap('"a=0,b') except ValueError as e: - assert """missing closing '"'""" in str(e) + tc.assertIn("""missing closing '"'""", str(e)) else: report_missing_exception() try: rem.add_ap('a=0=b') except ValueError as e: - assert """unexpected '=' at position 3""" in str(e) + tc.assertIn("""unexpected '=' at position 3""", str(e)) else: report_missing_exception() @@ -73,7 +75,7 @@ for meth in ('scc_has_rejecting_cycle', 'is_inherently_weak_scc', try: getattr(spot, meth)(si, 20) except ValueError as e: - assert "invalid SCC number" in str(e) + tc.assertIn("invalid SCC number", str(e)) else: report_missing_exception() @@ -89,14 +91,15 @@ si = spot.scc_info(alt) try: si.determine_unknown_acceptance() except RuntimeError as e: - assert "scc_info::determine_unknown_acceptance() does not supp" in str(e) + tc.assertIn("scc_info::determine_unknown_acceptance() does not supp", + str(e)) else: report_missing_exception() try: alt.set_init_state(999) except ValueError as e: - assert "set_init_state()" in str(e) + tc.assertIn("set_init_state()", str(e)) else: report_missing_exception() @@ -107,7 +110,7 @@ alt.set_init_state(u) try: alt.set_init_state(u - 1) except ValueError as e: - assert "set_init_state()" in str(e) + tc.assertIn("set_init_state()", str(e)) else: report_missing_exception() @@ -116,21 +119,21 @@ r = spot.twa_run(aut) try: a = r.as_twa() except RuntimeError as e: - assert "empty cycle" in str(e) + tc.assertIn("empty cycle", str(e)) else: report_missing_exception() try: a = r.replay(spot.get_cout()) except RuntimeError as e: - assert "empty cycle" in str(e) + tc.assertIn("empty cycle", str(e)) else: report_missing_exception() try: a = r.reduce() except RuntimeError as e: - assert "empty cycle" in str(e) + tc.assertIn("empty cycle", str(e)) else: report_missing_exception() @@ -138,12 +141,12 @@ a = spot.translate('Fa') a = spot.to_generalized_rabin(a, False) r = a.accepting_run() r = r.reduce() -assert r.cycle[0].acc == spot.mark_t([1]) +tc.assertEqual(r.cycle[0].acc, spot.mark_t([1])) r.cycle[0].acc = spot.mark_t([0]) try: r.reduce(); except RuntimeError as e: - assert "expects an accepting cycle" in str(e) + tc.assertIn("expects an accepting cycle", str(e)) else: report_missing_exception() @@ -151,7 +154,7 @@ f = spot.formula('GF(a | Gb)') try: spot.gf_guarantee_to_ba(f, spot._bdd_dict) except RuntimeError as e: - assert "guarantee" in str(e) + tc.assertIn("guarantee", str(e)) else: report_missing_exception() @@ -159,7 +162,7 @@ f = spot.formula('FG(a | Fb)') try: spot.fg_safety_to_dca(f, spot._bdd_dict) except RuntimeError as e: - assert "safety" in str(e) + tc.assertIn("safety", str(e)) else: report_missing_exception() @@ -168,28 +171,28 @@ m = spot.mark_t([n - 1]) try: m = spot.mark_t([0]) << n except RuntimeError as e: - assert "Too many acceptance sets" in str(e) + tc.assertIn("Too many acceptance sets", str(e)) else: report_missing_exception() try: m.set(n) except RuntimeError as e: - assert "bit index is out of bounds" in str(e) + tc.assertIn("bit index is out of bounds", str(e)) else: report_missing_exception() try: m = spot.mark_t([0, n, 1]) except RuntimeError as e: - assert "Too many acceptance sets used. The limit is" in str(e) + tc.assertIn("Too many acceptance sets used. The limit is", str(e)) else: report_missing_exception() try: spot.complement_semidet(spot.translate('Gb R a', 'ba')) except RuntimeError as e: - assert "requires a semi-deterministic input" in str(e) + tc.assertIn("requires a semi-deterministic input", str(e)) else: report_missing_exception() @@ -197,52 +200,55 @@ try: spot.translate('F(G(a | !a) & ((b <-> c) W d))', 'det', 'any') except ValueError as e: s = str(e) - assert 'det' in s - assert 'any' in s + tc.assertIn('det', s) + tc.assertIn('any', s) else: report_missing_exception() a1 = spot.translate('FGa') a2 = spot.translate('Gb') -assert not spot.is_deterministic(a1) -assert spot.is_deterministic(a2) +tc.assertFalse(spot.is_deterministic(a1)) +tc.assertTrue(spot.is_deterministic(a2)) try: spot.product_xor(a1, a2) except RuntimeError as e: - assert "product_xor() only works with deterministic automata" in str(e) + tc.assertIn("product_xor() only works with deterministic automata", str(e)) else: report_missing_exception() try: spot.product_xor(a2, a1) except RuntimeError as e: - assert "product_xor() only works with deterministic automata" in str(e) + tc.assertIn("product_xor() only works with deterministic automata", str(e)) else: report_missing_exception() try: spot.product_xnor(a1, a2) except RuntimeError as e: - assert "product_xnor() only works with deterministic automata" in str(e) + tc.assertIn("product_xnor() only works with deterministic automata", str(e)) else: report_missing_exception() try: spot.product_xnor(a2, a1) except RuntimeError as e: - assert "product_xnor() only works with deterministic automata" in str(e) + tc.assertIn("product_xnor() only works with deterministic automata", str(e)) else: report_missing_exception() try: spot.solve_safety_game(a1) except RuntimeError as e: - assert "solve_safety_game(): arena should have true acceptance" in str(e) + tc.assertIn( + "solve_safety_game(): arena should have true acceptance", + str(e)) else: report_missing_exception() try: spot.solve_parity_game(a1) except RuntimeError as e: - assert "solve_parity_game(): arena must have max-odd acceptance condition" \ - in str(e) + tc.assertIn( + "solve_parity_game(): arena must have max-odd acceptance condition", + str(e)) else: report_missing_exception() @@ -250,16 +256,16 @@ else: try: spot.formula_Star(spot.formula("a"), 10, 333) except OverflowError as e: - assert "333" in str(e) - assert "254" in str(e) + tc.assertIn("333", str(e)) + tc.assertIn("254", str(e)) else: report_missing_exception() try: spot.formula_FStar(spot.formula("a"), 333, 400) except OverflowError as e: - assert "333" in str(e) - assert "254" in str(e) + tc.assertIn("333", str(e)) + tc.assertIn("254", str(e)) else: report_missing_exception() @@ -267,15 +273,15 @@ try: spot.formula_nested_unop_range(spot.op_F, spot.op_Or, 333, 400, spot.formula("a")) except OverflowError as e: - assert "333" in str(e) - assert "254" in str(e) + tc.assertIn("333", str(e)) + tc.assertIn("254", str(e)) else: report_missing_exception() try: spot.formula_FStar(spot.formula("a"), 50, 40) except OverflowError as e: - assert "reversed" in str(e) + tc.assertIn("reversed", str(e)) else: report_missing_exception() @@ -287,5 +293,5 @@ try: a.to_str() except RuntimeError as e: se = str(e) - assert "synthesis-outputs" in se - assert "unregistered proposition" in se + tc.assertIn("synthesis-outputs", se) + tc.assertIn("unregistered proposition", se) diff --git a/tests/python/game.py b/tests/python/game.py index 9d77c153d..d7aec2f38 100644 --- a/tests/python/game.py +++ b/tests/python/game.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Développement de +# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement de # l'EPITA. # # This file is part of Spot, a model checking library. @@ -19,6 +19,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() g = spot.automaton("""HOA: v1 States: 9 Start: 0 AP: 2 "a" "b" acc-name: Streett 1 Acceptance: 2 Fin(0) | Inf(1) properties: @@ -27,10 +29,10 @@ trans-labels explicit-labels state-acc spot-state-player: 0 1 0 1 0 1 {1} [0] 8 State: 3 {1} [1] 4 State: 4 {1} [0] 5 State: 5 {1} [0] 6 State: 6 {1} [1] 7 State: 7 State: 8 {1} [0] 2 --END--""") -assert spot.solve_parity_game(g) == False +tc.assertFalse(spot.solve_parity_game(g)) s = spot.highlight_strategy(g).to_str("HOA", "1.1") -assert s == """HOA: v1.1 +tc.assertEqual(s, """HOA: v1.1 States: 9 Start: 0 AP: 2 "a" "b" @@ -60,4 +62,4 @@ State: 6 {1} State: 7 State: 8 {1} [0] 2 ---END--""" +--END--""") diff --git a/tests/python/gen.py b/tests/python/gen.py index dd844741c..a9fed6890 100644 --- a/tests/python/gen.py +++ b/tests/python/gen.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017 Laboratoire de Recherche et Développement de +# Copyright (C) 2017, 2022 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -23,63 +23,66 @@ import spot.gen as gen from sys import exit +from unittest import TestCase +tc = TestCase() k2 = gen.aut_pattern(gen.AUT_KS_NCA, 2) -assert k2.prop_state_acc() -assert k2.num_states() == 5 -assert k2.prop_universal().is_false() -assert k2.prop_inherently_weak().is_false() -assert k2.prop_stutter_invariant().is_false() -assert k2.prop_semi_deterministic().is_false() -assert k2.prop_deterministic().is_false() -assert k2.prop_terminal().is_false() +tc.assertTrue(k2.prop_state_acc()) +tc.assertEqual(k2.num_states(), 5) +tc.assertTrue(k2.prop_universal().is_false()) +tc.assertTrue(k2.prop_inherently_weak().is_false()) +tc.assertTrue(k2.prop_stutter_invariant().is_false()) +tc.assertTrue(k2.prop_semi_deterministic().is_false()) +tc.assertTrue(k2.prop_deterministic().is_false()) +tc.assertTrue(k2.prop_terminal().is_false()) # to_str is defined in the spot package, so this makes sure # the type returned by spot.gen.ks_nca() is the correct one. -assert 'to_str' in dir(k2) +tc.assertIn('to_str', dir(k2)) k3 = gen.aut_pattern(gen.AUT_L_NBA, 3) -assert k3.num_states() == 10 -assert k3.prop_state_acc() -assert k3.prop_universal().is_false() -assert k3.prop_inherently_weak().is_false() -assert k3.prop_stutter_invariant().is_false() -assert k3.prop_semi_deterministic().is_false() -assert k3.prop_deterministic().is_false() -assert k3.prop_terminal().is_false() +tc.assertEqual(k3.num_states(), 10) +tc.assertTrue(k3.prop_state_acc()) +tc.assertTrue(k3.prop_universal().is_false()) +tc.assertTrue(k3.prop_inherently_weak().is_false()) +tc.assertTrue(k3.prop_stutter_invariant().is_false()) +tc.assertTrue(k3.prop_semi_deterministic().is_false()) +tc.assertTrue(k3.prop_deterministic().is_false()) +tc.assertTrue(k3.prop_terminal().is_false()) -assert k2.get_dict() == k3.get_dict() +tc.assertEqual(k2.get_dict(), k3.get_dict()) try: gen.aut_pattern(gen.AUT_KS_NCA, 0) except RuntimeError as e: - assert 'positive argument' in str(e) + tc.assertIn('positive argument', str(e)) else: exit(2) f = gen.ltl_pattern(gen.LTL_AND_F, 3) -assert f.size() == 3 -assert gen.ltl_pattern_name(gen.LTL_AND_F) == "and-f" +tc.assertEqual(f.size(), 3) +tc.assertEqual(gen.ltl_pattern_name(gen.LTL_AND_F), "and-f") try: gen.ltl_pattern(1000, 3) except RuntimeError as e: - assert 'unsupported pattern' in str(e) + tc.assertIn('unsupported pattern', str(e)) else: exit(2) try: gen.ltl_pattern(gen.LTL_OR_G, -10) except RuntimeError as e: - assert 'or-g' in str(e) - assert 'positive' in str(e) + tc.assertIn('or-g', str(e)) + tc.assertIn('positive', str(e)) else: exit(2) -assert 40 == sum(p.size() for p in gen.ltl_patterns((gen.LTL_OR_G, 1, 5), - (gen.LTL_GH_Q, 3), - gen.LTL_EH_PATTERNS)) +tc.assertEqual(40, sum(p.size() + for p in gen.ltl_patterns((gen.LTL_OR_G, 1, 5), + (gen.LTL_GH_Q, 3), + gen.LTL_EH_PATTERNS))) -assert 32 == sum(p.num_states() - for p in gen.aut_patterns((gen.AUT_L_NBA, 1, 3), - (gen.AUT_KS_NCA, 5))) +tc.assertEqual(32, sum(p.num_states() + for p in gen.aut_patterns((gen.AUT_L_NBA, 1, 3), + (gen.AUT_KS_NCA, 5)))) diff --git a/tests/python/genem.py b/tests/python/genem.py index 5da9ce85c..0c9d0809a 100644 --- a/tests/python/genem.py +++ b/tests/python/genem.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018-2022 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2018-2022 Laboratoire de Recherche et Développement de +# l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -22,6 +22,8 @@ # are usable with methods from the spot package. import spot +from unittest import TestCase +tc = TestCase() a1 = spot.automaton(''' HOA: v1 name: "aut" States: 4 Start: 0 AP: 0 @@ -179,7 +181,7 @@ def generic_emptiness2_rec(aut): # Find some Fin set, we necessarily have one, otherwise the SCC # would have been found to be either rejecting or accepting. fo = acc.fin_one() - assert fo >= 0, acc + tc.assertTrue(fo >= 0, acc) for part in si.split_on_sets(scc, [fo]): if not generic_emptiness2(part): return False @@ -309,10 +311,10 @@ def run_bench(automata): + str(res3b)[0] + str(res3c)[0] + str(res3d)[0] + str(res4)[0] + str(res5)[0]) print(res) - assert res in ('TTTTTTTT', 'FFFFFFFF') + tc.assertIn(res, ('TTTTTTTT', 'FFFFFFFF')) if res == 'FFFFFFFF': run3 = spot.generic_accepting_run(aut) - assert run3.replay(spot.get_cout()) is True + tc.assertTrue(run3.replay(spot.get_cout())) run_bench([a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a360, act]) diff --git a/tests/python/implies.py b/tests/python/implies.py index 2e4e64ddd..24d74b720 100755 --- a/tests/python/implies.py +++ b/tests/python/implies.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2012 Laboratoire de Recherche et Développement +# Copyright (C) 2012, 2022 Laboratoire de Recherche et Développement # de l'EPITA. # # This file is part of Spot, a model checking library. @@ -19,6 +19,8 @@ import sys from buddy import * +from unittest import TestCase +tc = TestCase() bdd_init(10000, 10000) bdd_setvarnum(5) @@ -33,26 +35,26 @@ e = V[1] & V[2] & -V[3] & V[4] f = V[0] & -V[3] & V[4] g = -V[0] | V[1] -assert(bdd_implies(b, a)) -assert(not bdd_implies(a, b)) -assert(not bdd_implies(c, a)) -assert(bdd_implies(a, d)) -assert(bdd_implies(b, d)) -assert(bdd_implies(c, d)) -assert(bdd_implies(d, d)) -assert(not bdd_implies(e, d)) -assert(not bdd_implies(d, e)) -assert(not bdd_implies(f, e)) -assert(not bdd_implies(e, f)) -assert(bdd_implies(bddfalse, f)) -assert(not bdd_implies(bddtrue, f)) -assert(bdd_implies(f, bddtrue)) -assert(not bdd_implies(f, bddfalse)) -assert(bdd_implies(a, g)) +tc.assertTrue(bdd_implies(b, a)) +tc.assertFalse(bdd_implies(a, b)) +tc.assertFalse(bdd_implies(c, a)) +tc.assertTrue(bdd_implies(a, d)) +tc.assertTrue(bdd_implies(b, d)) +tc.assertTrue(bdd_implies(c, d)) +tc.assertTrue(bdd_implies(d, d)) +tc.assertFalse(bdd_implies(e, d)) +tc.assertFalse(bdd_implies(d, e)) +tc.assertFalse(bdd_implies(f, e)) +tc.assertFalse(bdd_implies(e, f)) +tc.assertTrue(bdd_implies(bddfalse, f)) +tc.assertFalse(bdd_implies(bddtrue, f)) +tc.assertTrue(bdd_implies(f, bddtrue)) +tc.assertFalse(bdd_implies(f, bddfalse)) +tc.assertTrue(bdd_implies(a, g)) a = (-V[2] & (-V[1] | V[0])) | (-V[0] & V[1] & V[2]) b = V[1] | -V[2] -assert(bdd_implies(a, b)) +tc.assertTrue(bdd_implies(a, b)) # Cleanup all BDD variables before calling bdd_done(), otherwise # bdd_delref will be called after bdd_done() and this is unsafe in diff --git a/tests/python/intrun.py b/tests/python/intrun.py index c86c6d643..e3b708a95 100644 --- a/tests/python/intrun.py +++ b/tests/python/intrun.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() # This issue was reported by Florian Renkin. The reduce() call used in # intersecting_run() was bogus, and could incorrectly reduce a word @@ -34,5 +36,5 @@ trans-labels explicit-labels trans-acc complete properties: deterministic State: 3 [t] 1 {1 2} State: 4 [!0&1] 4 {2} [!0&!1] 3 {2} [0] 2 {0 2} --END--""") r = b.intersecting_run(spot.complement(a)); c = spot.twa_word(r).as_automaton() -assert c.intersects(b) -assert not c.intersects(a) +tc.assertTrue(c.intersects(b)) +tc.assertFalse(c.intersects(a)) diff --git a/tests/python/kripke.py b/tests/python/kripke.py index f3ce218b2..fa92b3fa9 100644 --- a/tests/python/kripke.py +++ b/tests/python/kripke.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2019 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2019, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -19,6 +19,9 @@ import spot import buddy +from unittest import TestCase +tc = TestCase() + bdict = spot.make_bdd_dict() k = spot.make_kripke_graph(bdict) p1 = buddy.bdd_ithvar(k.register_ap("p1")) @@ -51,25 +54,25 @@ State: [0&1] 1 "0" State: [!0&!1] 2 "2" 2 1 --END--""" -assert hoa == k.to_str('HOA') -assert k.num_states() == 3 -assert k.num_edges() == 5 +tc.assertEqual(hoa, k.to_str('HOA')) +tc.assertEqual(k.num_states(), 3) +tc.assertEqual(k.num_edges(), 5) res = [] for e in k.out(s1): res.append((e.src, e.dst)) -assert res == [(1, 0), (1, 2)] +tc.assertEqual(res, [(1, 0), (1, 2)]) res = [] for e in k.edges(): res.append((e.src, e.dst)) -assert res == [(1, 0), (0, 0), (1, 2), (2, 2), (2, 0)] +tc.assertEqual(res, [(1, 0), (0, 0), (1, 2), (2, 2), (2, 0)]) res = [] for s in k.states(): res.append(s.cond()) -assert res == [cond1, cond2, cond3] +tc.assertEqual(res, [cond1, cond2, cond3]) -assert k.states()[0].cond() == cond1 -assert k.states()[1].cond() == cond2 -assert k.states()[2].cond() == cond3 +tc.assertEqual(k.states()[0].cond(), cond1) +tc.assertEqual(k.states()[1].cond(), cond2) +tc.assertEqual(k.states()[2].cond(), cond3) diff --git a/tests/python/langmap.py b/tests/python/langmap.py index 6fd860986..723a5c0d5 100644 --- a/tests/python/langmap.py +++ b/tests/python/langmap.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2017, 2020 Laboratoire de Recherche et Développement -# de l'Epita (LRDE) +# Copyright (C) 2016, 2017, 2020, 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE) # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ import spot import sys +from unittest import TestCase +tc = TestCase() def hstates(txt): @@ -31,13 +33,10 @@ def hstates(txt): def test(f, opt, expected): aut = spot.translate(f, *opt, 'deterministic') v = spot.language_map(aut) - assert len(v) == aut.num_states() + tc.assertEqual(len(v), aut.num_states()) spot.highlight_languages(aut) l = hstates(aut.to_str('hoa', '1.1')) - if l != expected: - print('for {}\nexpected: {}\n but got: {}'.format(f, expected, l), - file=sys.stderr) - exit(1) + tc.assertEqual(l, expected) test('GF(a) & GFb & c', ['Buchi', 'SBAcc'], '1 0 2 0 3 0') @@ -50,6 +49,6 @@ test('Xa', ['Buchi', 'SBAcc'], '') try: test('FGa', ['Buchi'], '') except RuntimeError as e: - assert 'language_map only works with deterministic automata'in str(e) + tc.assertIn('language_map only works with deterministic automata', str(e)) else: exit(1) diff --git a/tests/python/ltl2tgba.py b/tests/python/ltl2tgba.py index 25fff4566..913c557be 100755 --- a/tests/python/ltl2tgba.py +++ b/tests/python/ltl2tgba.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2012, 2014-2016, 2021 Laboratoire de Recherche -# et Développement de l'Epita (LRDE). +# Copyright (C) 2009, 2010, 2012, 2014-2016, 2021-2022 Laboratoire de +# Recherche et Développement de l'Epita (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre # et Marie Curie. @@ -98,7 +98,7 @@ if f: elif taa_opt: a = concrete = spot.ltl_to_taa(f, dict) else: - assert "unspecified translator" + raise RuntimeError("unspecified translator") if wdba: a = spot.ensure_digraph(a) @@ -117,7 +117,7 @@ if f: elif output == 6: spot.print_lbtt(cout, a) else: - assert "unknown output option" + raise RuntimeError("unknown output option") if degeneralize_opt: del degeneralized @@ -137,4 +137,6 @@ del dict # not necessary in other implementations. from platform import python_implementation if python_implementation() == 'CPython': - assert spot.fnode_instances_check() + from unittest import TestCase + tc = TestCase() + tc.assertTrue(spot.fnode_instances_check()) diff --git a/tests/python/ltlf.py b/tests/python/ltlf.py index 5676a2a1b..b13432d3e 100644 --- a/tests/python/ltlf.py +++ b/tests/python/ltlf.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016 Laboratoire de Recherche et Développement de +# Copyright (C) 2016, 2022 Laboratoire de Recherche et Développement de # l'Epita # # This file is part of Spot, a model checking library. @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() lcc = spot.language_containment_checker() @@ -43,5 +45,5 @@ for f in formulas: f4 = spot.formula_And([spot.from_ltlf(f2), cst]) print("{}\t=>\t{}".format(f1, f3)) print("{}\t=>\t{}".format(f2, f4)) - assert lcc.equal(f3, f4) + tc.assertTrue(lcc.equal(f3, f4)) print() diff --git a/tests/python/ltlparse.py b/tests/python/ltlparse.py index 98562743c..208e0c321 100755 --- a/tests/python/ltlparse.py +++ b/tests/python/ltlparse.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2009-2012, 2014-2017, 2019, 2021 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) 2009-2012, 2014-2017, 2019, 2021-2022 Laboratoire de +# Recherche et Développement de l'Epita (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre # et Marie Curie. @@ -22,6 +22,8 @@ import sys import spot +from unittest import TestCase +tc = TestCase() e = spot.default_environment.instance() @@ -41,11 +43,11 @@ for str1, isl in l: pf = spot.parse_infix_psl(str2, e) if pf.format_errors(spot.get_cout()): sys.exit(1) - assert isl == pf.f.is_leaf() + tc.assertEqual(isl, pf.f.is_leaf()) del pf -assert spot.formula('a').is_leaf() -assert spot.formula('0').is_leaf() +tc.assertTrue(spot.formula('a').is_leaf()) +tc.assertTrue(spot.formula('0').is_leaf()) for str1 in ['a * b', 'a xor b', 'a <-> b']: pf = spot.parse_infix_boolean(str1, e, False) @@ -66,21 +68,21 @@ for (x, op) in [('a* <-> b*', "`<->'"), ('a*[=2]', "[=...]"), ('a*[->2]', "[->...]")]: f5 = spot.parse_infix_sere(x) - assert f5.errors + tc.assertTrue(f5.errors) ostr = spot.ostringstream() f5.format_errors(ostr) err = ostr.str() - assert "not a Boolean expression" in err - assert op in err - assert "SERE" in err + tc.assertIn("not a Boolean expression", err) + tc.assertIn(op, err) + tc.assertIn("SERE", err) del f5 f6 = spot.parse_infix_sere('(a <-> b -> c ^ "b\n\n\rc")[=2] & c[->2]') -assert not f6.errors +tc.assertFalse(f6.errors) del f6 f6 = spot.parse_infix_sere('-') -assert f6.errors +tc.assertTrue(f6.errors) del f6 for (x, msg) in [('{foo[->bug]}', "treating this goto block as [->]"), @@ -150,12 +152,12 @@ for (x, msg) in [('{foo[->bug]}', "treating this goto block as [->]"), ('{"X}', "missing closing brace"), ]: f7 = spot.parse_infix_psl(x) - assert f7.errors + tc.assertTrue(f7.errors) ostr = spot.ostringstream() f7.format_errors(ostr) err = ostr.str() print(err) - assert msg in err + tc.assertIn(msg, err) del f7 for (x, msg) in [('a&', "missing right operand for \"and operator\""), @@ -174,12 +176,12 @@ for (x, msg) in [('a&', "missing right operand for \"and operator\""), ('!', "missing right operand for \"not operator\""), ]: f8 = spot.parse_infix_boolean(x) - assert f8.errors + tc.assertTrue(f8.errors) ostr = spot.ostringstream() f8.format_errors(ostr) err = ostr.str() print(err) - assert msg in err + tc.assertIn(msg, err) del f8 for (x, msg) in [('a->', "missing right operand for \"implication operator\""), @@ -191,12 +193,12 @@ for (x, msg) in [('a->', "missing right operand for \"implication operator\""), ]: f9 = spot.parse_infix_psl(x, spot.default_environment.instance(), False, True) - assert f9.errors + tc.assertTrue(f9.errors) ostr = spot.ostringstream() f9.format_errors(ostr) err = ostr.str() print(err) - assert msg in err + tc.assertIn(msg, err) del f9 # force GC before fnode_instances_check(), unless it's CPython @@ -205,15 +207,15 @@ if python_implementation() != 'CPython': import gc gc.collect() -assert spot.fnode_instances_check() +tc.assertTrue(spot.fnode_instances_check()) f = spot.formula_F(2, 4, spot.formula_ap("a")) -assert f == spot.formula("XX(a | X(a | X(a)))") +tc.assertEqual(f, spot.formula("XX(a | X(a | X(a)))")) f = spot.formula_G(2, 4, spot.formula_ap("a")) -assert f == spot.formula("XX(a & X(a & X(a)))") +tc.assertEqual(f, spot.formula("XX(a & X(a & X(a)))")) f = spot.formula_X(2, spot.formula_ap("a")) -assert f == spot.formula("XX(a)") +tc.assertEqual(f, spot.formula("XX(a)")) f = spot.formula_G(2, spot.formula_unbounded(), spot.formula_ap("a")) -assert f == spot.formula("XXG(a)") +tc.assertEqual(f, spot.formula("XXG(a)")) f = spot.formula_F(2, spot.formula_unbounded(), spot.formula_ap("a")) -assert f == spot.formula("XXF(a)") +tc.assertEqual(f, spot.formula("XXF(a)")) diff --git a/tests/python/ltlsimple.py b/tests/python/ltlsimple.py index 7b88f07dc..c21c3b7f1 100755 --- a/tests/python/ltlsimple.py +++ b/tests/python/ltlsimple.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2012, 2015, 2018, 2021 Laboratoire de +# Copyright (C) 2009, 2010, 2012, 2015, 2018, 2021-2022 Laboratoire de # Recherche et Développement de l'Epita (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systemes Répartis Coopératifs (SRC), Université Pierre @@ -22,6 +22,8 @@ import spot import sys +from unittest import TestCase +tc = TestCase() # Some of the tests here assume timely destructor calls, as they occur # in the the reference-counted CPython implementation. Other @@ -35,13 +37,13 @@ b = spot.formula.ap('b') c = spot.formula.ap('c') c2 = spot.formula.ap('c') -assert c == c2 +tc.assertEqual(c, c2) op = spot.formula.And([a, b]) op2 = spot.formula.And([op, c]) op3 = spot.formula.And([a, c, b]) -assert op2 == op3 +tc.assertEqual(op2, op3) # The symbol for a subformula which hasn't been cloned is better # suppressed, so we don't attempt to reuse it elsewhere. @@ -52,12 +54,12 @@ sys.stdout.write('op2 = %s\n' % str(op2)) del a, b, c2 sys.stdout.write('op3 = %s\n' % str(op3)) -assert op2 == op3 +tc.assertEqual(op2, op3) op4 = spot.formula.Or([op2, op3]) sys.stdout.write('op4 = %s\n' % str(op4)) -assert op4 == op2 +tc.assertEqual(op4, op2) del op2, op3, op4 @@ -78,10 +80,11 @@ f5 = spot.formula.Xor(F, c) del a, b, c, T, F, f1, f2, f4, f5 if is_cpython: - assert spot.fnode_instances_check() + tc.assertTrue(spot.fnode_instances_check()) # ---------------------------------------------------------------------- -assert str([str(x) for x in spot.formula('a &b & c')]) == "['a', 'b', 'c']" +tc.assertEqual(str([str(x) for x in spot.formula('a &b & c')]), + "['a', 'b', 'c']") def switch_g_f(x): @@ -93,7 +96,7 @@ def switch_g_f(x): f = spot.formula('GFa & XFGb & Fc & G(a | b | Fd)') -assert str(switch_g_f(f)) == 'FGa & XGFb & Gc & F(a | b | Gd)' +tc.assertEqual(str(switch_g_f(f)), 'FGa & XGFb & Gc & F(a | b | Gd)') x = 0 @@ -105,7 +108,7 @@ def count_g(f): f.traverse(count_g) -assert x == 3 +tc.assertEqual(x, 3) # ---------------------------------------------------------------------- @@ -121,14 +124,14 @@ LBT for shell: echo {f:lq} | ... Default for CSV: ...,{f:c},... Wring, centered: {f:w:~^50}""".format(f=formula) -assert res == """\ +tc.assertEqual(res, """\ Default output: a U (b U "$strange[0]=name") Spin syntax: a U (b U ($strange[0]=name)) (Spin syntax): (a) U ((b) U ($strange[0]=name)) Default for shell: echo 'a U (b U "$strange[0]=name")' | ... LBT for shell: echo 'U "a" U "b" "$strange[0]=name"' | ... Default for CSV: ...,"a U (b U ""$strange[0]=name"")",... -Wring, centered: ~~~~~(a=1) U ((b=1) U ("$strange[0]=name"=1))~~~~~""" +Wring, centered: ~~~~~(a=1) U ((b=1) U ("$strange[0]=name"=1))~~~~~""") opt = spot.tl_simplifier_options(False, True, True, @@ -144,9 +147,8 @@ for (input, output) in [('(a&b)<->b', 'b->(a&b)'), ('b xor (!(a&b))', 'b->(a&b)'), ('!b xor (a&b)', 'b->(a&b)')]: f = spot.tl_simplifier(opt).simplify(input) - print(input, f, output) - assert(f == output) - assert(spot.are_equivalent(input, output)) + tc.assertEqual(f, output) + tc.assertTrue(spot.are_equivalent(input, output)) def myparse(input): @@ -157,7 +159,7 @@ def myparse(input): # This used to fail, because myparse would return a pointer # to pf.f inside the destroyed pf. -assert myparse('a U b') == spot.formula('a U b') +tc.assertEqual(myparse('a U b'), spot.formula('a U b')) -assert spot.is_liveness('a <-> GFb') -assert not spot.is_liveness('a & GFb') +tc.assertTrue(spot.is_liveness('a <-> GFb')) +tc.assertFalse(spot.is_liveness('a & GFb')) diff --git a/tests/python/mealy.py b/tests/python/mealy.py index da71d1bfb..71c7739f9 100644 --- a/tests/python/mealy.py +++ b/tests/python/mealy.py @@ -19,6 +19,8 @@ # along with this program. If not, see . import spot, buddy +from unittest import TestCase +tc = TestCase() # Testing Sat-based approach @@ -42,8 +44,8 @@ spot.set_state_players(a, [False,True,False,True,False,True]) spot.set_synthesis_outputs(a, o1&o2) b = spot.minimize_mealy(a) -assert(list(spot.get_state_players(b)).count(False) == 2) -assert(spot.is_split_mealy_specialization(a, b)) +tc.assertEqual(list(spot.get_state_players(b)).count(False), 2) +tc.assertTrue(spot.is_split_mealy_specialization(a, b)) test_auts = [ ("""HOA: v1 @@ -371,21 +373,21 @@ for (mealy_str, nenv_min) in test_auts: elif aap.ap_name().startswith("i"): ins = ins & buddy.bdd_ithvar(mealy.register_ap(aap.ap_name())) else: - assert("""Aps must start with either "i" or "o".""") + raise AssertionError("""Aps must start with either "i" or "o".""") spot.set_synthesis_outputs(mealy, outs) mealy_min_ks = spot.minimize_mealy(mealy, -1) n_e = sum([s == 0 for s in spot.get_state_players(mealy_min_ks)]) - assert(n_e == nenv_min) - assert(spot.is_split_mealy_specialization(mealy, mealy_min_ks)) + tc.assertEqual(n_e, nenv_min) + tc.assertTrue(spot.is_split_mealy_specialization(mealy, mealy_min_ks)) # Test un- and resplit tmp = spot.unsplit_2step(mealy_min_ks) mealy_min_rs = spot.split_2step(tmp, spot.get_synthesis_outputs(tmp), False) - assert(spot.is_split_mealy_specialization(mealy, mealy_min_rs, True)) - assert(spot.are_equivalent(mealy_min_ks, mealy_min_rs)) + tc.assertTrue(spot.is_split_mealy_specialization(mealy, mealy_min_rs, True)) + tc.assertTrue(spot.are_equivalent(mealy_min_ks, mealy_min_rs)) # Testing bisimulation (with output assignment) @@ -515,15 +517,15 @@ spot.set_synthesis_outputs(aut, & buddy.bdd_ithvar( aut.register_ap("u02alarm29control0f1d2alarm29turn2off1b"))) min_equiv = spot.reduce_mealy(aut, False) -assert min_equiv.num_states() == 6 -assert spot.are_equivalent(min_equiv, aut) +tc.assertEqual(min_equiv.num_states(), 6) +tc.assertTrue(spot.are_equivalent(min_equiv, aut)) # Build an automaton that recognizes a subset of the language of the original # automaton min_sub = spot.reduce_mealy(aut, True) -assert min_sub.num_states() == 5 +tc.assertEqual(min_sub.num_states(), 5) prod = spot.product(spot.complement(aut), min_sub) -assert spot.generic_emptiness_check(prod) +tc.assertTrue(spot.generic_emptiness_check(prod)) aut = spot.automaton(""" HOA: v1 @@ -564,7 +566,7 @@ State: 0 # An example that shows that we should not build a tree when we use inclusion. res = spot.reduce_mealy(aut, True) -assert res.to_str() == exp +tc.assertEqual(res.to_str(), exp) aut = spot.automaton(""" HOA: v1 @@ -608,4 +610,4 @@ State: 1 --END--""" res = spot.reduce_mealy(aut, True) -assert res.to_str() == exp +tc.assertEqual(res.to_str(), exp) diff --git a/tests/python/merge.py b/tests/python/merge.py index c56d8f309..893916953 100644 --- a/tests/python/merge.py +++ b/tests/python/merge.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2020 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2017, 2020, 2022 Laboratoire de Recherche et +# Développement de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() aut = spot.automaton(""" HOA: v1 @@ -39,7 +41,7 @@ State: 2 out = spot.simplify_acceptance(aut) hoa = out.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -54,8 +56,8 @@ State: 1 [1] 2 {0} State: 2 [1] 0 ---END--""" -assert spot.are_equivalent(out, aut) +--END--""") +tc.assertTrue(spot.are_equivalent(out, aut)) aut = spot.automaton("""HOA: v1 States: 3 @@ -75,7 +77,7 @@ State: 2 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -90,7 +92,7 @@ State: 1 [1] 2 {0} State: 2 [1] 0 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -111,7 +113,7 @@ State: 2 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -126,7 +128,7 @@ State: 1 [1] 2 State: 2 [1] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -146,7 +148,7 @@ State: 2 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -161,7 +163,7 @@ State: 1 [1] 2 {1} State: 2 [1] 0 {0} ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -182,7 +184,7 @@ State: 2 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -197,7 +199,7 @@ State: 1 [1] 2 State: 2 [1] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -217,7 +219,7 @@ State: 2 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -232,7 +234,7 @@ State: 1 {0} [1] 2 State: 2 {0} [1] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -252,7 +254,7 @@ State: 2 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -267,7 +269,7 @@ State: 1 {0} [1] 2 State: 2 {0} [1] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -287,7 +289,7 @@ State: 2 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -301,7 +303,7 @@ State: 1 {1} [1] 2 State: 2 [1] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 4 @@ -335,7 +337,7 @@ State: 3 {1 3} spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 4 Start: 0 AP: 2 "a" "b" @@ -364,7 +366,7 @@ State: 3 {1} [0&!1] 0 [!0&1] 3 [0&1] 2 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -388,7 +390,7 @@ State: 2 out = spot.simplify_acceptance(aut) hoa = out.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "p0" "p1" @@ -406,8 +408,8 @@ State: 1 State: 2 [0] 2 {0} [!0] 1 {0} ---END--""" -assert spot.are_equivalent(out, aut) +--END--""") +tc.assertTrue(spot.are_equivalent(out, aut)) aut = spot.automaton("""HOA: v1 States: 4 @@ -435,7 +437,7 @@ State: 3 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 4 Start: 0 AP: 2 "p0" "p1" @@ -457,7 +459,7 @@ State: 3 [0&1] 0 {1} [0&!1] 3 {1 2} [!0] 1 {3} ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 1 @@ -475,7 +477,7 @@ State: 0 {1 2} spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 1 Start: 0 AP: 2 "p0" "p1" @@ -486,7 +488,7 @@ properties: deterministic --BODY-- State: 0 [t] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 2 @@ -506,7 +508,7 @@ State: 1 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 2 Start: 0 AP: 2 "p0" "p1" @@ -519,7 +521,7 @@ State: 0 [!0] 1 {2} State: 1 [t] 1 {1 2} ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 1 @@ -536,7 +538,7 @@ State: 0 {0 1 3} spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 1 Start: 0 AP: 2 "p0" "p1" @@ -547,7 +549,7 @@ properties: deterministic --BODY-- State: 0 [t] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 2 @@ -568,7 +570,7 @@ State: 1 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 2 Start: 0 AP: 2 "p0" "p1" @@ -583,7 +585,7 @@ State: 0 State: 1 [0] 1 [!0] 0 {1} ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 2 @@ -602,7 +604,7 @@ State: 1 {1} spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 2 Start: 0 AP: 2 "p0" "p1" @@ -615,7 +617,7 @@ State: 0 [t] 1 State: 1 [t] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -636,7 +638,7 @@ State: 2 {2} spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "p0" "p1" @@ -650,7 +652,7 @@ State: 1 {0} [t] 1 State: 2 {2} [t] 1 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -672,7 +674,7 @@ State: 2 {1 2 3} out = spot.simplify_acceptance(aut) hoa = out.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "p0" "p1" @@ -687,8 +689,8 @@ State: 1 {1} [t] 2 State: 2 {0 1} [t] 1 ---END--""" -assert spot.are_equivalent(out, aut) +--END--""") +tc.assertTrue(spot.are_equivalent(out, aut)) aut = spot.automaton("""HOA: v1 States: 2 @@ -708,7 +710,7 @@ State: 1 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 2 Start: 0 AP: 2 "p0" "p1" @@ -722,7 +724,7 @@ State: 0 State: 1 [0] 1 [!0] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -740,7 +742,7 @@ State: 2 --END--""") spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -755,7 +757,7 @@ State: 1 [1] 2 State: 2 [1] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -773,7 +775,7 @@ State: 2 --END--""") spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -788,4 +790,4 @@ State: 1 [1] 2 State: 2 [1] 0 ---END--""" +--END--""") diff --git a/tests/python/mergedge.py b/tests/python/mergedge.py index e55bdabf2..4e97abe23 100644 --- a/tests/python/mergedge.py +++ b/tests/python/mergedge.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020, 2021 Laboratoire de Recherche et Développement de +# Copyright (C) 2020-2022 Laboratoire de Recherche et Développement de # l'EPITA. # # This file is part of Spot, a model checking library. @@ -20,12 +20,14 @@ import spot +from unittest import TestCase +tc = TestCase() aut = spot.automaton("""HOA: v1 States: 1 Start: 0 AP: 1 "a" Acceptance: 1 Inf(0) --BODY-- State: 0 [0] 0 [0] 0 {0} --END--""") -assert aut.num_edges() == 2 +tc.assertEqual(aut.num_edges(), 2) aut.merge_edges() -assert aut.num_edges() == 1 +tc.assertEqual(aut.num_edges(), 1) aut = spot.automaton(""" HOA: v1 @@ -44,15 +46,15 @@ State: 1 [0 | 1] 1 [0&!1] 1 {0} --END--""") -assert aut.num_edges() == 5 +tc.assertEqual(aut.num_edges(), 5) aut.merge_edges() -assert aut.num_edges() == 5 -assert not spot.is_deterministic(aut) +tc.assertEqual(aut.num_edges(), 5) +tc.assertFalse(spot.is_deterministic(aut)) aut = spot.split_edges(aut) -assert aut.num_edges() == 9 +tc.assertEqual(aut.num_edges(), 9) aut.merge_edges() -assert aut.num_edges() == 5 -assert spot.is_deterministic(aut) +tc.assertEqual(aut.num_edges(), 5) +tc.assertTrue(spot.is_deterministic(aut)) aut = spot.automaton(""" HOA: v1 @@ -74,15 +76,15 @@ State: 2 [0] 1 --END--""") aut.merge_states() -assert aut.num_edges() == 4 -assert aut.num_states() == 2 -assert spot.is_deterministic(aut) -assert aut.prop_complete() +tc.assertEqual(aut.num_edges(), 4) +tc.assertEqual(aut.num_states(), 2) +tc.assertTrue(spot.is_deterministic(aut)) +tc.assertTrue(aut.prop_complete()) aut.merge_states() -assert aut.num_edges() == 4 -assert aut.num_states() == 2 -assert spot.is_deterministic(aut) -assert aut.prop_complete() +tc.assertEqual(aut.num_edges(), 4) +tc.assertEqual(aut.num_states(), 2) +tc.assertTrue(spot.is_deterministic(aut)) +tc.assertTrue(aut.prop_complete()) aa = spot.automaton(""" diff --git a/tests/python/misc-ec.py b/tests/python/misc-ec.py index d1234bd69..85d4aaa47 100644 --- a/tests/python/misc-ec.py +++ b/tests/python/misc-ec.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2020 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2017, 2020, 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -18,6 +18,9 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() + aut = spot.translate("G(p0 | (p0 R Xp0) | XF(!p0 & p1))", 'Buchi', 'SBAcc') ec = spot.make_emptiness_check_instantiator('SE05')[0].instantiate(aut) n = 0 @@ -27,7 +30,7 @@ while True: break print(res.accepting_run()) n += 1 -assert n == 2 +tc.assertEqual(n, 2) for name in ['SE05', 'CVWY90', 'GV04']: aut = spot.translate("GFa && GFb") @@ -35,13 +38,13 @@ for name in ['SE05', 'CVWY90', 'GV04']: ec = spot.make_emptiness_check_instantiator(name)[0].instantiate(aut) print(ec.check().accepting_run()) except RuntimeError as e: - assert "Büchi or weak" in str(e) + tc.assertIn("Büchi or weak", str(e)) aut = spot.translate("a", 'monitor') try: ec = spot.make_emptiness_check_instantiator('Tau03')[0].instantiate(aut) except RuntimeError as e: - assert "at least one" in str(e) + tc.assertIn("at least one", str(e)) aut = spot.translate("a", 'ba') ec = spot.make_emptiness_check_instantiator('Tau03')[0].instantiate(aut) diff --git a/tests/python/optionmap.py b/tests/python/optionmap.py index 667ef0b19..ad526f510 100755 --- a/tests/python/optionmap.py +++ b/tests/python/optionmap.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2010, 2012, 2018 Laboratoire de Recherche et Développement -# de l'EPITA. +# Copyright (C) 2010, 2012, 2018, 2022 Laboratoire de Recherche et +# Développement de l'EPITA. # Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre # et Marie Curie. @@ -21,65 +21,67 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() o = spot.option_map() res = o.parse_options("optA, opta=2M, optb =4 ; optB = 7\ , optC= 10") -assert not res +tc.assertFalse(res) -assert o.get('optA') == 1 -assert o.get('opta') == 2*1024*1024 -assert o.get('optb') == 4 -assert o.get('optB') == 7 -assert o.get('optC') == 10 -assert o.get('none') == 0 -assert o.get('none', 16) == 16 +tc.assertEqual(o.get('optA'), 1) +tc.assertEqual(o.get('opta'), 2*1024*1024) +tc.assertEqual(o.get('optb'), 4) +tc.assertEqual(o.get('optB'), 7) +tc.assertEqual(o.get('optC'), 10) +tc.assertEqual(o.get('none'), 0) +tc.assertEqual(o.get('none', 16), 16) o.set('optb', 40) -assert o.get('optb') == 40 +tc.assertEqual(o.get('optb'), 40) res = o.parse_options("!optA !optb optC, !optB") -assert not res -assert o.get('optA') == 0 -assert o.get('opta') == 2*1024*1024 -assert o.get('optb') == 0 -assert o.get('optB') == 0 -assert o.get('optC') == 1 +tc.assertFalse(res) +tc.assertEqual(o.get('optA'), 0) +tc.assertEqual(o.get('opta'), 2*1024*1024) +tc.assertEqual(o.get('optb'), 0) +tc.assertEqual(o.get('optB'), 0) +tc.assertEqual(o.get('optC'), 1) res = o.parse_options("!") -assert res == "!" +tc.assertEqual(res, "!") res = o.parse_options("foo, !opt = 1") -assert res == "!opt = 1" +tc.assertEqual(res, "!opt = 1") res = o.parse_options("foo=3, opt == 1") -assert res == "opt == 1" +tc.assertEqual(res, "opt == 1") res = o.parse_options("foo=3opt == 1") -assert res == "3opt == 1" +tc.assertEqual(res, "3opt == 1") aut1 = spot.translate('GF(a <-> XXa)', 'det') -assert aut1.num_states() == 4 +tc.assertEqual(aut1.num_states(), 4) aut2 = spot.translate('GF(a <-> XXa)', 'det', xargs='gf-guarantee=0') -assert aut2.num_states() == 9 +tc.assertEqual(aut2.num_states(), 9) try: spot.translate('GF(a <-> XXa)', 'det', xargs='foobar=1') except RuntimeError as e: - assert "option 'foobar' was not used" in str(e) + tc.assertIn("option 'foobar' was not used", str(e)) else: raise RuntimeError("missing exception") try: spot.translate('GF(a <-> XXa)').postprocess('det', xargs='gf-guarantee=0') except RuntimeError as e: - assert "option 'gf-guarantee' was not used" in str(e) + tc.assertIn("option 'gf-guarantee' was not used", str(e)) else: raise RuntimeError("missing exception") try: spot.translate('GF(a <-> XXa)').postprocess('det', xargs='gf-guarantee=x') except RuntimeError as e: - assert "failed to parse option at: 'gf-guarantee=x'" in str(e) + tc.assertIn("failed to parse option at: 'gf-guarantee=x'", str(e)) else: raise RuntimeError("missing exception") diff --git a/tests/python/origstate.py b/tests/python/origstate.py index 0ca013889..15a7ab0ad 100644 --- a/tests/python/origstate.py +++ b/tests/python/origstate.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2017 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) 2015, 2017, 2022 Laboratoire de Recherche et +# Développement de l'Epita # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ import spot from sys import exit +from unittest import TestCase +tc = TestCase() aut = spot.automaton(""" HOA: v1 @@ -38,7 +40,7 @@ State: 1 """) aut2 = spot.degeneralize(aut) -assert aut2.to_str() == """HOA: v1 +tc.assertEqual(aut2.to_str(), """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -56,10 +58,10 @@ State: 1 [1] 2 State: 2 {0} [1] 2 ---END--""" +--END--""") aut2.copy_state_names_from(aut) -assert aut2.to_str() == """HOA: v1 +tc.assertEqual(aut2.to_str(), """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -77,7 +79,7 @@ State: 1 "0#0" [1] 2 State: 2 "1#1" {0} [1] 2 ---END--""" +--END--""") aut2.set_init_state(2) aut2.purge_unreachable_states() @@ -93,16 +95,16 @@ properties: deterministic State: 0 "1#1" {0} [1] 0 --END--""" -assert aut2.to_str() == ref +tc.assertEqual(aut2.to_str(), ref) # This makes sure that the original-states vector has also been renamed. aut2.copy_state_names_from(aut) -assert aut2.to_str() == ref +tc.assertEqual(aut2.to_str(), ref) aut2 = spot.degeneralize(aut) aut2.release_named_properties() try: aut2.copy_state_names_from(aut) except RuntimeError as e: - assert "state does not exist in source automaton" in str(e) + tc.assertIn("state does not exist in source automaton", str(e)) else: exit(1) diff --git a/tests/python/otfcrash.py b/tests/python/otfcrash.py index 69acbcb1a..8e30cb501 100644 --- a/tests/python/otfcrash.py +++ b/tests/python/otfcrash.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016, 2018 Laboratoire de Recherche et Développement +# Copyright (C) 2016, 2018, 2022 Laboratoire de Recherche et Développement # de l'Epita # # This file is part of Spot, a model checking library. @@ -23,6 +23,8 @@ import spot.aux import tempfile import shutil import sys +from unittest import TestCase +tc = TestCase() spot.ltsmin.require('divine') @@ -51,4 +53,4 @@ system async; p = spot.otf_product(k, a) return p.is_empty() - assert(modelcheck('X "R.found"', m) == True) + tc.assertTrue(modelcheck('X "R.found"', m)) diff --git a/tests/python/parity.py b/tests/python/parity.py index b0389c40e..6ced51c40 100644 --- a/tests/python/parity.py +++ b/tests/python/parity.py @@ -19,36 +19,38 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() max_even_5 = spot.acc_code.parity(True, False, 5) -assert max_even_5 == spot.acc_code.parity_max_even(5) -assert max_even_5 == spot.acc_code.parity_max(False, 5) +tc.assertEqual(max_even_5, spot.acc_code.parity_max_even(5)) +tc.assertEqual(max_even_5, spot.acc_code.parity_max(False, 5)) min_even_5 = spot.acc_code.parity(False, False, 5) -assert min_even_5 == spot.acc_code.parity_min_even(5) -assert min_even_5 == spot.acc_code.parity_min(False, 5) +tc.assertEqual(min_even_5, spot.acc_code.parity_min_even(5)) +tc.assertEqual(min_even_5, spot.acc_code.parity_min(False, 5)) max_odd_5 = spot.acc_code.parity(True, True, 5) -assert max_odd_5 == spot.acc_code.parity_max_odd(5) -assert max_odd_5 == spot.acc_code.parity_max(True, 5) +tc.assertEqual(max_odd_5, spot.acc_code.parity_max_odd(5)) +tc.assertEqual(max_odd_5, spot.acc_code.parity_max(True, 5)) min_odd_5 = spot.acc_code.parity(False, True, 5) -assert min_odd_5 == spot.acc_code.parity_min_odd(5) -assert min_odd_5 == spot.acc_code.parity_min(True, 5) +tc.assertEqual(min_odd_5, spot.acc_code.parity_min_odd(5)) +tc.assertEqual(min_odd_5, spot.acc_code.parity_min(True, 5)) for f in ('FGa', 'GFa & GFb & FGc', 'XXX(a U b)'): a1 = spot.translate(f, 'parity') - assert a1.acc().is_parity() + tc.assertTrue(a1.acc().is_parity()) a2 = spot.translate(f).postprocess('parity') - assert a2.acc().is_parity() + tc.assertTrue(a2.acc().is_parity()) a3 = spot.translate(f, 'det').postprocess('parity', 'colored') - assert a3.acc().is_parity() - assert spot.is_colored(a3) + tc.assertTrue(a3.acc().is_parity()) + tc.assertTrue(spot.is_colored(a3)) a = spot.translate('GFa & GFb') try: spot.change_parity_here(a, spot.parity_kind_same, spot.parity_style_even) except RuntimeError as e: - assert 'input should have parity acceptance' in str(e) + tc.assertIn('input should have parity acceptance', str(e)) else: exit(2) @@ -64,7 +66,7 @@ State: 0 --END-- """) spot.cleanup_parity_here(a) -assert a.to_str() == """HOA: v1 +tc.assertEqual(a.to_str(), """HOA: v1 States: 1 Start: 0 AP: 1 "a" @@ -75,7 +77,7 @@ properties: deterministic --BODY-- State: 0 [t] 0 ---END--""" +--END--""") a = spot.automaton(""" HOA: v1 @@ -89,7 +91,7 @@ State: 0 --END-- """) spot.cleanup_parity_here(a) -assert a.to_str() == """HOA: v1 +tc.assertEqual(a.to_str(), """HOA: v1 States: 1 Start: 0 AP: 1 "a" @@ -100,7 +102,7 @@ properties: deterministic --BODY-- State: 0 [t] 0 ---END--""" +--END--""") a = spot.automaton("""HOA: v1 States: 3 @@ -120,39 +122,39 @@ State: 2 try: spot.get_state_players(a) except RuntimeError as e: - assert "not a game" in str(e) + tc.assertIn("not a game", str(e)) else: report_missing_exception() try: spot.set_state_player(a, 1, True) except RuntimeError as e: - assert "Can only" in str(e) + tc.assertIn("Can only", str(e)) else: report_missing__exception() spot.set_state_players(a, (False, True, False)) -assert spot.get_state_player(a, 0) == False -assert spot.get_state_player(a, 1) == True -assert spot.get_state_player(a, 2) == False +tc.assertEqual(spot.get_state_player(a, 0), False) +tc.assertEqual(spot.get_state_player(a, 1), True) +tc.assertEqual(spot.get_state_player(a, 2), False) try: spot.set_state_players(a, [True, False, False, False]) except RuntimeError as e: - assert "many owners as states" in str(e) + tc.assertIn("many owners as states", str(e)) else: report_missing_exception() try: spot.get_state_player(a, 4) except RuntimeError as e: - assert "invalid state number" in str(e) + tc.assertIn("invalid state number", str(e)) else: report_missing_exception() try: spot.set_state_player(a, 4, True) except RuntimeError as e: - assert "invalid state number" in str(e) + tc.assertIn("invalid state number", str(e)) else: report_missing_exception() @@ -168,4 +170,4 @@ oi.erase() # postprocess used to call reduce_parity that did not # work correctly on automata with deleted edges. sm = a.postprocess("gen", "small") -assert sm.num_states() == 3 +tc.assertEqual(sm.num_states(), 3) diff --git a/tests/python/parsetgba.py b/tests/python/parsetgba.py index cbcacb183..038b33a19 100755 --- a/tests/python/parsetgba.py +++ b/tests/python/parsetgba.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2012, 2014, 2015 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) 2012, 2014, 2015, 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ import os import spot +from unittest import TestCase +tc = TestCase() contents = ''' HOA: v1 name: "a U b" States: 2 Start: 1 AP: 2 "a" "b" acc-name: Buchi @@ -34,7 +36,7 @@ out.close() a = spot.parse_aut(filename, spot.make_bdd_dict()) -assert not a.errors +tc.assertFalse(a.errors) spot.print_dot(spot.get_cout(), a.aut) diff --git a/tests/python/pdegen.py b/tests/python/pdegen.py index 02150d375..12bc9e39a 100644 --- a/tests/python/pdegen.py +++ b/tests/python/pdegen.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2019, 2020, 2021 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) 2019, 2020, 2021, 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -23,6 +23,8 @@ import spot +from unittest import TestCase +tc = TestCase() a, b, d, f = spot.automata(""" HOA: v1 @@ -73,19 +75,19 @@ State: 1 --END-- """) -assert spot.is_partially_degeneralizable(a) == [0, 1] +tc.assertEqual(spot.is_partially_degeneralizable(a), [0, 1]) da = spot.partial_degeneralize(a, [0, 1]) -assert da.equivalent_to(a) -assert da.num_states() == 2 +tc.assertTrue(da.equivalent_to(a)) +tc.assertEqual(da.num_states(), 2) -assert spot.is_partially_degeneralizable(b) == [0, 1] +tc.assertEqual(spot.is_partially_degeneralizable(b), [0, 1]) db = spot.partial_degeneralize(b, [0, 1]) -assert db.equivalent_to(b) -assert db.num_states() == 3 +tc.assertTrue(db.equivalent_to(b)) +tc.assertEqual(db.num_states(), 3) db.copy_state_names_from(b) dbhoa = db.to_str('hoa') -assert dbhoa == """HOA: v1 +tc.assertEqual(dbhoa, """HOA: v1 States: 3 Start: 0 AP: 1 "p0" @@ -99,28 +101,28 @@ State: 1 "0#0" {0 1} [0] 2 State: 2 "1#0" {1} [0] 1 ---END--""" +--END--""") c = spot.automaton("randaut -A'(Fin(0)&Inf(1)&Inf(2))|Fin(2)' 1 |") -assert spot.is_partially_degeneralizable(c) == [1, 2] +tc.assertEqual(spot.is_partially_degeneralizable(c), [1, 2]) dc = spot.partial_degeneralize(c, [1, 2]) -assert dc.equivalent_to(c) -assert str(dc.get_acceptance()) == '(Fin(0) & Inf(2)) | Fin(1)' +tc.assertTrue(dc.equivalent_to(c)) +tc.assertEqual(str(dc.get_acceptance()), '(Fin(0) & Inf(2)) | Fin(1)') -assert spot.is_partially_degeneralizable(d) == [] +tc.assertEqual(spot.is_partially_degeneralizable(d), []) dd = spot.partial_degeneralize(d, []) -assert dd.equivalent_to(d) -assert dd.num_states() == 1 -assert str(dd.get_acceptance()) == 'Inf(1) & Fin(0)' +tc.assertTrue(dd.equivalent_to(d)) +tc.assertEqual(dd.num_states(), 1) +tc.assertEqual(str(dd.get_acceptance()), 'Inf(1) & Fin(0)') e = spot.dualize(b) de = spot.partial_degeneralize(e, [0, 1]) -assert de.equivalent_to(e) -assert de.num_states() == 4 +tc.assertTrue(de.equivalent_to(e)) +tc.assertEqual(de.num_states(), 4) de.copy_state_names_from(e) dehoa = de.to_str('hoa') -assert dehoa == """HOA: v1 +tc.assertEqual(dehoa, """HOA: v1 States: 4 Start: 0 AP: 1 "p0" @@ -140,18 +142,18 @@ State: 2 "3#0" State: 3 "2#0" [0] 1 {0} [!0] 2 ---END--""" +--END--""") -assert spot.is_partially_degeneralizable(de) == [] +tc.assertEqual(spot.is_partially_degeneralizable(de), []) df = spot.partial_degeneralize(f, [0, 1]) df.equivalent_to(f) -assert str(df.acc()) == '(1, Fin(0))' +tc.assertEqual(str(df.acc()), '(1, Fin(0))') try: df = spot.partial_degeneralize(f, [0, 1, 2]) except RuntimeError as e: - assert 'partial_degeneralize(): {0,1,2} does not' in str(e) + tc.assertIn('partial_degeneralize(): {0,1,2} does not', str(e)) else: raise RuntimeError("missing exception") @@ -165,13 +167,13 @@ State: 2 [0&!1&2] 3 {1 4 9} State: 3 [0&!1&2] 4 {0 1 5 9} State: 4 [!0&!1&2] 1 State: 7 [0&!1&!2] 0 {4 7} --END--""") daut5 = spot.degeneralize_tba(aut5) -assert daut5.equivalent_to(aut5) +tc.assertTrue(daut5.equivalent_to(aut5)) sets = list(range(aut5.num_sets())) -assert spot.is_partially_degeneralizable(aut5) == sets +tc.assertEqual(spot.is_partially_degeneralizable(aut5), sets) pdaut5 = spot.partial_degeneralize(aut5, sets) -assert pdaut5.equivalent_to(aut5) -assert daut5.num_states() == 9 -assert pdaut5.num_states() == 8 +tc.assertTrue(pdaut5.equivalent_to(aut5)) +tc.assertEqual(daut5.num_states(), 9) +tc.assertEqual(pdaut5.num_states(), 8) aut6 = spot.automaton("""HOA: v1 States: 6 Start: 0 AP: 3 "p0" "p1" "p2" acc-name: generalized-Buchi 3 Acceptance: 3 Inf(0)&Inf(1)&Inf(2) properties: @@ -180,13 +182,13 @@ trans-labels explicit-labels trans-acc deterministic --BODY-- State: 0 [0&1&!2] 5 {1} State: 4 [!0&1&!2] 0 {1 2} [0&!1&!2] 3 {0} State: 5 [!0&1&2] 1 --END-- """) daut6 = spot.degeneralize_tba(aut6) -assert daut6.equivalent_to(aut6) +tc.assertTrue(daut6.equivalent_to(aut6)) sets = list(range(aut6.num_sets())) -assert spot.is_partially_degeneralizable(aut6) == sets +tc.assertEqual(spot.is_partially_degeneralizable(aut6), sets) pdaut6 = spot.partial_degeneralize(aut6, sets) -assert pdaut6.equivalent_to(aut6) -assert daut6.num_states() == 8 -assert pdaut6.num_states() == 8 +tc.assertTrue(pdaut6.equivalent_to(aut6)) +tc.assertEqual(daut6.num_states(), 8) +tc.assertEqual(pdaut6.num_states(), 8) aut7 = spot.automaton("""HOA: v1 States: 8 Start: 0 AP: 3 "p0" "p1" "p2" @@ -197,13 +199,13 @@ State: 0 [0&!1&2] 1 {2 3} State: 1 [0&!1&2] 0 {0 2} [0&!1&!2] 6 State: 2 [!0&!1&!2] 3 State: 5 [0&1&!2] 0 [!0&1&2] 7 State: 6 [0&1&2] 2 {1} State: 7 [!0&!1&2] 0 {0} [!0&1&!2] 4 --END--""") daut7 = spot.degeneralize_tba(aut7) -assert daut7.equivalent_to(aut7) +tc.assertTrue(daut7.equivalent_to(aut7)) sets = list(range(aut7.num_sets())) -assert spot.is_partially_degeneralizable(aut7) == sets +tc.assertEqual(spot.is_partially_degeneralizable(aut7), sets) pdaut7 = spot.partial_degeneralize(aut7, sets) -assert pdaut7.equivalent_to(aut7) -assert daut7.num_states() == 10 -assert pdaut7.num_states() == 10 +tc.assertTrue(pdaut7.equivalent_to(aut7)) +tc.assertEqual(daut7.num_states(), 10) +tc.assertEqual(pdaut7.num_states(), 10) aut8 = spot.automaton("""HOA: v1 States: 8 Start: 0 AP: 3 "p0" "p1" "p2" acc-name: generalized-Buchi 5 Acceptance: 5 Inf(0)&Inf(1)&Inf(2)&Inf(3)&Inf(4) @@ -213,19 +215,19 @@ State: 0 [!0&1&!2] 7 {0} State: 1 [!0&1&2] 1 {4} [0&!1&2] 6 {1 2} State: 2 5 [!0&1&!2] 0 {1 3} State: 6 [0&1&2] 4 [0&1&!2] 6 State: 7 [!0&!1&!2] 1 --END--""") daut8 = spot.degeneralize_tba(aut8) -assert daut8.equivalent_to(aut8) +tc.assertTrue(daut8.equivalent_to(aut8)) sets = list(range(aut8.num_sets())) -assert spot.is_partially_degeneralizable(aut8) == sets +tc.assertEqual(spot.is_partially_degeneralizable(aut8), sets) pdaut8 = spot.partial_degeneralize(aut8, sets) -assert pdaut8.equivalent_to(aut8) -assert daut8.num_states() == 22 -assert pdaut8.num_states() == 9 +tc.assertTrue(pdaut8.equivalent_to(aut8)) +tc.assertEqual(daut8.num_states(), 22) +tc.assertEqual(pdaut8.num_states(), 9) aut9 = spot.dualize(aut8) pdaut9 = spot.partial_degeneralize(aut9, sets) -assert pdaut9.equivalent_to(aut9) +tc.assertTrue(pdaut9.equivalent_to(aut9)) # one more state than aut9, because dualize completed the automaton. -assert pdaut9.num_states() == 10 +tc.assertEqual(pdaut9.num_states(), 10) aut10 = spot.automaton("""HOA: v1 States: 3 @@ -242,10 +244,10 @@ State: 2 [0] 0 {1} [!0] 1 --END--""") -assert spot.is_partially_degeneralizable(aut10) == [0, 1] +tc.assertEqual(spot.is_partially_degeneralizable(aut10), [0, 1]) pdaut10 = spot.partial_degeneralize(aut10, [0, 1]) -assert pdaut10.equivalent_to(aut10) -assert pdaut10.to_str() == """HOA: v1 +tc.assertTrue(pdaut10.equivalent_to(aut10)) +tc.assertEqual(pdaut10.to_str(), """HOA: v1 States: 3 Start: 0 AP: 1 "p0" @@ -260,7 +262,7 @@ State: 1 State: 2 [0] 0 {1} [!0] 1 ---END--""" +--END--""") aut11 = spot.automaton("""HOA: v1 States: 3 @@ -277,10 +279,10 @@ State: 2 [0] 0 {1} [!0] 1 --END--""") -assert spot.is_partially_degeneralizable(aut11) == [0, 1] +tc.assertEqual(spot.is_partially_degeneralizable(aut11), [0, 1]) pdaut11 = spot.partial_degeneralize(aut11, [0, 1]) -assert pdaut11.equivalent_to(aut11) -assert pdaut11.to_str() == """HOA: v1 +tc.assertTrue(pdaut11.equivalent_to(aut11)) +tc.assertEqual(pdaut11.to_str(), """HOA: v1 States: 3 Start: 0 AP: 1 "p0" @@ -295,7 +297,7 @@ State: 1 State: 2 [0] 0 {2} [!0] 1 ---END--""" +--END--""") aut12 = spot.automaton("""HOA: v1 States: 3 @@ -313,24 +315,24 @@ State: 2 [0] 0 [!0] 1 {3} --END--""") -assert spot.is_partially_degeneralizable(aut12) == [0,1] +tc.assertEqual(spot.is_partially_degeneralizable(aut12), [0,1]) aut12b = spot.partial_degeneralize(aut12, [0,1]) aut12c = spot.partial_degeneralize(aut12b, [1,2]) -assert aut12c.equivalent_to(aut12) -assert aut12c.num_states() == 9 +tc.assertTrue(aut12c.equivalent_to(aut12)) +tc.assertEqual(aut12c.num_states(), 9) aut12d = spot.partial_degeneralize(aut12, [0,1,3]) aut12e = spot.partial_degeneralize(aut12d, [0,1]) -assert aut12e.equivalent_to(aut12) -assert aut12e.num_states() == 9 +tc.assertTrue(aut12e.equivalent_to(aut12)) +tc.assertEqual(aut12e.num_states(), 9) aut12f = spot.partial_degeneralize(aut12) -assert aut12f.equivalent_to(aut12) -assert aut12f.num_states() == 9 +tc.assertTrue(aut12f.equivalent_to(aut12)) +tc.assertEqual(aut12f.num_states(), 9) # Check handling of original-states dot = aut12f.to_str('dot', 'd') -assert dot == """digraph "" { +tc.assertEqual(dot, """digraph "" { rankdir=LR label="Inf(2) | (Inf(1) & Fin(0))\\n[Rabin-like 2]" labelloc="t" @@ -367,10 +369,10 @@ assert dot == """digraph "" { 8 -> 4 [label="p0\\n{1,2}"] 8 -> 7 [label="p0"] } -""" +""") aut12g = spot.partial_degeneralize(aut12f) -assert aut12f == aut12g +tc.assertEqual(aut12f, aut12g) aut13 = spot.automaton("""HOA: v1 States: 2 @@ -390,8 +392,8 @@ State: 1 [!0&!1&2&3] 1 {0 2} --END--""") aut13g = spot.partial_degeneralize(aut13) -assert aut13g.equivalent_to(aut13) -assert aut13g.num_states() == 3 +tc.assertTrue(aut13g.equivalent_to(aut13)) +tc.assertEqual(aut13g.num_states(), 3) aut14 = spot.automaton("""HOA: v1 @@ -412,8 +414,8 @@ State: 1 --END-- """) aut14g = spot.partial_degeneralize(aut14) -assert aut14g.equivalent_to(aut14) -assert aut14g.num_states() == 3 +tc.assertTrue(aut14g.equivalent_to(aut14)) +tc.assertEqual(aut14g.num_states(), 3) # Extracting an SCC from this large automaton will produce an automaton A in # which original-states refers to states larger than those in A. Some version @@ -439,4 +441,4 @@ State: 10 [!0&1] 4 [0&1] 8 [!0&!1] 10 {0 1 2 3 5} [0&!1] 13 {1 2 3} State: 11 si = spot.scc_info(aut15) aut15b = si.split_on_sets(2, [])[0]; d aut15c = spot.partial_degeneralize(aut15b) -assert aut15c.equivalent_to(aut15b) +tc.assertTrue(aut15c.equivalent_to(aut15b)) diff --git a/tests/python/prodexpt.py b/tests/python/prodexpt.py index 098bafb26..4d00b4dae 100644 --- a/tests/python/prodexpt.py +++ b/tests/python/prodexpt.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016-2017, 2019-2020 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) 2016-2017, 2019-2020, 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() # make sure that we are not allowed to build the product of two automata with # different dictionaries. @@ -94,14 +96,14 @@ State: 60 40 38 60 68 State: 61 40 41 57 61 State: 62 40 59 44 62 State: State: 70 40 59 57 70 State: 71 40 63 57 71 State: 72 40 69 57 72 --END-- ''') res = spot.product(left, right) -assert res.num_states() == 977 -assert res.num_edges() == 8554 +tc.assertEqual(res.num_states(), 977) +tc.assertEqual(res.num_edges(), 8554) res = spot.product(left, right, spot.output_aborter(1000, 6000)) -assert res is None +tc.assertIsNone(res) res = spot.product(left, right, spot.output_aborter(900, 9000)) -assert res is None +tc.assertIsNone(res) res = spot.product(left, right, spot.output_aborter(1000, 9000)) -assert res is not None +tc.assertIsNotNone(res) a, b = spot.automata("""HOA: v1 States: 1 Start: 0 AP: 0 acc-name: all Acceptance: 0 t properties: trans-labels explicit-labels state-acc complete @@ -110,7 +112,7 @@ properties: deterministic stutter-invariant weak --BODY-- State: 0 [t] 0 properties: trans-labels explicit-labels state-acc complete properties: deterministic stutter-invariant weak --BODY-- State: 0 [t] 0 --END--""") out = spot.product(a, b).to_str() -assert out == """HOA: v1 +tc.assertEqual(out, """HOA: v1 States: 1 Start: 0 AP: 0 @@ -120,9 +122,9 @@ properties: trans-labels explicit-labels state-acc deterministic properties: stutter-invariant terminal --BODY-- State: 0 ---END--""" +--END--""") out = spot.product_susp(a, b).to_str() -assert out == """HOA: v1 +tc.assertEqual(out, """HOA: v1 States: 1 Start: 0 AP: 0 @@ -132,4 +134,4 @@ properties: trans-labels explicit-labels state-acc deterministic properties: stutter-invariant terminal --BODY-- State: 0 ---END--""" +--END--""") diff --git a/tests/python/randgen.py b/tests/python/randgen.py index 094ddcb3f..32762d02e 100755 --- a/tests/python/randgen.py +++ b/tests/python/randgen.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) 2015, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -18,9 +18,11 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() o = spot.option_map() g = spot.randltlgenerator(0, o) -assert str(g.next()) == '1' -assert str(g.next()) == '0' -assert str(g.next()) == 'None' +tc.assertEqual(str(g.next()), '1') +tc.assertEqual(str(g.next()), '0') +tc.assertEqual(str(g.next()), 'None') diff --git a/tests/python/relabel.py b/tests/python/relabel.py index 5a4a370eb..0de668b12 100644 --- a/tests/python/relabel.py +++ b/tests/python/relabel.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2017-2019 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) 2015, 2017-2019, 2022 Laboratoire de Recherche et +# Développement de l'Epita # # This file is part of Spot, a model checking library. # @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() f = spot.formula('GF(a & b) -> (FG(a & b) & Gc)') m = spot.relabeling_map() @@ -26,19 +28,18 @@ res = "" for old, new in m.items(): res += "#define {} {}\n".format(old, new) res += str(g) -print(res) -assert(res == """#define p0 a & b +tc.assertEqual(res, """#define p0 a & b #define p1 c GFp0 -> (FGp0 & Gp1)""") h = spot.relabel_apply(g, m) -assert h == f +tc.assertEqual(h, f) autg = g.translate() spot.relabel_here(autg, m) -assert str(autg.ap()) == \ - '(spot.formula("a"), spot.formula("b"), spot.formula("c"))' -assert spot.isomorphism_checker.are_isomorphic(autg, f.translate()) +tc.assertEqual(str(autg.ap()), \ + '(spot.formula("a"), spot.formula("b"), spot.formula("c"))') +tc.assertTrue(spot.isomorphism_checker.are_isomorphic(autg, f.translate())) a = spot.formula('a') u = spot.formula('a U b') @@ -46,11 +47,11 @@ m[a] = u try: spot.relabel_here(autg, m) except RuntimeError as e: - assert "new labels" in str(e) + tc.assertIn("new labels", str(e)) m = spot.relabeling_map() m[u] = a try: spot.relabel_here(autg, m) except RuntimeError as e: - assert "old labels" in str(e) + tc.assertIn("old labels", str(e)) diff --git a/tests/python/remfin.py b/tests/python/remfin.py index 20115a14f..ffff3e22a 100644 --- a/tests/python/remfin.py +++ b/tests/python/remfin.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015-2018, 2020 Laboratoire de Recherche et Développement de -# l'Epita +# Copyright (C) 2015-2018, 2020, 2022 Laboratoire de Recherche et +# Développement de l'Epita # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ import spot +from unittest import TestCase +tc = TestCase() # This test used to trigger an assertion (or a segfault) # in scc_filter_states(). @@ -41,7 +43,7 @@ State: 2 aut.prop_inherently_weak(True) aut = spot.dualize(aut) aut1 = spot.scc_filter_states(aut) -assert(aut1.to_str('hoa') == """HOA: v1 +tc.assertEqual(aut1.to_str('hoa'), """HOA: v1 States: 2 Start: 0 AP: 1 "a" @@ -56,17 +58,17 @@ State: 1 [t] 1 --END--""") -assert(aut.scc_filter_states().to_str() == aut1.to_str()) -assert(aut1.get_name() == None) +tc.assertEqual(aut.scc_filter_states().to_str(), aut1.to_str()) +tc.assertIsNone(aut1.get_name()) aut1.set_name("test me") -assert(aut1.get_name() == "test me") +tc.assertEqual(aut1.get_name(), "test me") # The method is the same as the function a = spot.translate('true', 'low', 'any') -assert(a.prop_universal().is_maybe()) -assert(a.prop_unambiguous().is_maybe()) -assert(a.is_deterministic() == True) -assert(a.is_unambiguous() == True) +tc.assertTrue(a.prop_universal().is_maybe()) +tc.assertTrue(a.prop_unambiguous().is_maybe()) +tc.assertTrue(a.is_deterministic()) +tc.assertTrue(a.is_unambiguous()) a = spot.automaton(""" HOA: v1 @@ -92,4 +94,4 @@ State: 2 """) b = spot.remove_fin(a) size = (b.num_states(), b.num_edges()) -assert size == (5, 13); +tc.assertEqual(size, (5, 13)) diff --git a/tests/python/removeap.py b/tests/python/removeap.py index 7a9268c85..ba656ac89 100644 --- a/tests/python/removeap.py +++ b/tests/python/removeap.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2019 Laboratoire de Recherche et Développement +# Copyright (C) 2019, 2022 Laboratoire de Recherche et Développement # de l'Epita # # This file is part of Spot, a model checking library. @@ -18,16 +18,18 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() aut = spot.translate('a U (c & Gb)') -assert not spot.is_terminal_automaton(aut) -assert aut.prop_terminal().is_false() +tc.assertFalse(spot.is_terminal_automaton(aut)) +tc.assertTrue(aut.prop_terminal().is_false()) rem = spot.remove_ap() rem.add_ap("b") aut = rem.strip(aut) -assert not aut.prop_terminal().is_false() -assert spot.is_terminal_automaton(aut) -assert aut.prop_terminal().is_true() +tc.assertFalse(aut.prop_terminal().is_false()) +tc.assertTrue(spot.is_terminal_automaton(aut)) +tc.assertTrue(aut.prop_terminal().is_true()) aut = rem.strip(aut) -assert aut.prop_terminal().is_true() +tc.assertTrue(aut.prop_terminal().is_true()) diff --git a/tests/python/rs_like.py b/tests/python/rs_like.py index 7b4ee75cf..669af5885 100644 --- a/tests/python/rs_like.py +++ b/tests/python/rs_like.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017 Laboratoire de Recherche et Développement +# Copyright (C) 2017, 2022 Laboratoire de Recherche et Développement # de l'Epita # # This file is part of Spot, a model checking library. @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() a = spot.vector_rs_pair() @@ -30,12 +32,13 @@ mall = spot.mark_t() def test_rs(acc, rs, expected_res, expected_pairs): res, p = getattr(acc, 'is_' + rs + '_like')() - assert res == expected_res + tc.assertEqual(res, expected_res) if expected_res: expected_pairs.sort() p = sorted(p) for a, b in zip(p, expected_pairs): - assert a.fin == b.fin and a.inf == b.inf + tc.assertEqual(a.fin, b.fin) + tc.assertEqual(a.inf, b.inf) def switch_pairs(pairs): diff --git a/tests/python/satmin.py b/tests/python/satmin.py index 2d28dd405..f9fa466f8 100644 --- a/tests/python/satmin.py +++ b/tests/python/satmin.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2020, 2021 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) 2015, 2020, 2021, 2022 Laboratoire de Recherche et +# Développement de l'Epita # # This file is part of Spot, a model checking library. # @@ -18,232 +18,234 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() aut = spot.translate('GFa & GFb', 'Buchi', 'SBAcc') -assert aut.num_sets() == 1 -assert aut.num_states() == 3 -assert aut.is_deterministic() +tc.assertEqual(aut.num_sets(), 1) +tc.assertEqual(aut.num_states(), 3) +tc.assertTrue(aut.is_deterministic()) min1 = spot.sat_minimize(aut, acc='Rabin 1') -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_langmap=True) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=1) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=1, sat_incr_steps=0) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=1, sat_incr_steps=1) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=1, sat_incr_steps=2) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=1, sat_incr_steps=50) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=2) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=2, sat_incr_steps=-1) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=2, sat_incr_steps=0) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=2, sat_incr_steps=1) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=2, sat_incr_steps=2) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=2, sat_incr_steps=50) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_naive=True) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min2 = spot.sat_minimize(aut, acc='Streett 2') -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_langmap=True) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=1) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=1, sat_incr_steps=0) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=1, sat_incr_steps=1) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=1, sat_incr_steps=2) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=1, sat_incr_steps=50) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=2) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=2, sat_incr_steps=-1) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=2, sat_incr_steps=0) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=2, sat_incr_steps=1) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=2, sat_incr_steps=2) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=2, sat_incr_steps=50) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_naive=True) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_langmap=True) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=1) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=1, sat_incr_steps=0) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=1, sat_incr_steps=1) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=1, sat_incr_steps=2) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=1, sat_incr_steps=50) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=2) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=2, sat_incr_steps=-1) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=2, sat_incr_steps=0) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=2, sat_incr_steps=1) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=2, sat_incr_steps=2) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=2, sat_incr_steps=50) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_naive=True) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_langmap=True) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=1) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=1, sat_incr_steps=0) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=1, sat_incr_steps=1) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=1, sat_incr_steps=2) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=1, sat_incr_steps=50) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=2) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=2, sat_incr_steps=-1) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=2, sat_incr_steps=0) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=2, sat_incr_steps=1) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=2, sat_incr_steps=2) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=2, sat_incr_steps=50) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_naive=True) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) aut = spot.translate('GFa') -assert aut.num_sets() == 1 -assert aut.num_states() == 1 -assert aut.is_deterministic() +tc.assertEqual(aut.num_sets(), 1) +tc.assertEqual(aut.num_states(), 1) +tc.assertTrue(aut.is_deterministic()) out = spot.sat_minimize(aut, state_based=True) -assert out.num_states() == 2 +tc.assertEqual(out.num_states(), 2) out = spot.sat_minimize(aut, state_based=True, max_states=1) -assert out is None +tc.assertTrue(out is None) diff --git a/tests/python/sbacc.py b/tests/python/sbacc.py index 445845dbc..22d937014 100644 --- a/tests/python/sbacc.py +++ b/tests/python/sbacc.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017-2018, 2021 Laboratoire de Recherche et +# Copyright (C) 2017-2018, 2021, 2022 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -18,13 +18,15 @@ # along with this program. If not, see . import spot -aut = spot.translate('GFa') -assert aut.num_states() == 1 -assert not aut.prop_state_acc().is_true() -aut = spot.sbacc(aut) -assert aut.num_states() == 2 -assert aut.prop_state_acc().is_true() +from unittest import TestCase +tc = TestCase() +aut = spot.translate('GFa') +tc.assertEqual(aut.num_states(), 1) +tc.assertFalse(aut.prop_state_acc().is_true()) +aut = spot.sbacc(aut) +tc.assertEqual(aut.num_states(), 2) +tc.assertTrue(aut.prop_state_acc().is_true()) aut = spot.automaton("""HOA: v1 States: 3 @@ -48,7 +50,7 @@ s = spot.sbacc(aut) s.copy_state_names_from(aut) h = s.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 2 Start: 0 AP: 2 "a" "b" @@ -59,7 +61,7 @@ State: 0 "0" [0] 1 State: 1 "2" {1} [t] 1 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -83,7 +85,7 @@ d = spot.degeneralize(aut) d.copy_state_names_from(aut) h = d.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 2 Start: 0 AP: 2 "a" "b" @@ -95,4 +97,4 @@ State: 0 "0#0" [0] 1 State: 1 "2#0" {0} [t] 1 ---END--""" +--END--""") diff --git a/tests/python/sccfilter.py b/tests/python/sccfilter.py index 6edd33e9f..7728b70a6 100644 --- a/tests/python/sccfilter.py +++ b/tests/python/sccfilter.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016 Laboratoire de Recherche et Développement de +# Copyright (C) 2016, 2022 Laboratoire de Recherche et Développement de # l'EPITA. # # This file is part of Spot, a model checking library. @@ -22,6 +22,8 @@ # Major) import spot +from unittest import TestCase +tc = TestCase() a = spot.automaton(""" HOA: v1.1 @@ -43,7 +45,7 @@ State: 1 "bar" --END-- """) -assert (spot.scc_filter(a, True).to_str('hoa', '1.1') == """HOA: v1.1 +tc.assertEqual(spot.scc_filter(a, True).to_str('hoa', '1.1'), """HOA: v1.1 States: 2 Start: 0 AP: 1 "a" diff --git a/tests/python/sccinfo.py b/tests/python/sccinfo.py index 0ac645726..f8ade7e4b 100644 --- a/tests/python/sccinfo.py +++ b/tests/python/sccinfo.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2021 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2017, 2021, 2022 Laboratoire de Recherche et +# Développement de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() a = spot.translate('(Ga -> Gb) W c') @@ -26,11 +28,11 @@ try: si = spot.scc_info(a, 10) exit(2) except RuntimeError as e: - assert "initial state does not exist" in str(e) + tc.assertIn("initial state does not exist", str(e)) si = spot.scc_info(a) n = si.scc_count() -assert n == 4 +tc.assertEqual(n, 4) acc = 0 rej = 0 @@ -39,24 +41,24 @@ for i in range(n): acc += si.is_accepting_scc(i) rej += si.is_rejecting_scc(i) triv += si.is_trivial(i) -assert acc == 3 -assert rej == 1 -assert triv == 0 +tc.assertEqual(acc, 3) +tc.assertEqual(rej, 1) +tc.assertEqual(triv, 0) for scc in si: acc -= scc.is_accepting() rej -= scc.is_rejecting() triv -= scc.is_trivial() -assert acc == 0 -assert rej == 0 -assert triv == 0 +tc.assertEqual(acc, 0) +tc.assertEqual(rej, 0) +tc.assertEqual(triv, 0) l0 = si.states_of(0) l1 = si.states_of(1) l2 = si.states_of(2) l3 = si.states_of(3) l = sorted(list(l0) + list(l1) + list(l2) + list(l3)) -assert l == [0, 1, 2, 3, 4] +tc.assertEqual(l, [0, 1, 2, 3, 4]) i = si.initial() todo = [i] @@ -73,14 +75,14 @@ while todo: if s not in seen: seen.add(s) todo.append(s) -assert seen == {0, 1, 2, 3} -assert trans == [(0, 0), (0, 1), (0, 2), (0, 3), - (2, 0), (2, 1), (2, 2), (2, 4), - (3, 3), (4, 1), (4, 4), (1, 1)] -assert transi == [(0, 0, 1), (0, 2, 3), (2, 0, 6), - (2, 2, 8), (3, 3, 10), (4, 4, 12), (1, 1, 5)] +tc.assertEqual(seen, {0, 1, 2, 3}) +tc.assertEqual(trans, [(0, 0), (0, 1), (0, 2), (0, 3), + (2, 0), (2, 1), (2, 2), (2, 4), + (3, 3), (4, 1), (4, 4), (1, 1)]) +tc.assertEqual(transi, [(0, 0, 1), (0, 2, 3), (2, 0, 6), + (2, 2, 8), (3, 3, 10), (4, 4, 12), (1, 1, 5)]) -assert not spot.is_weak_automaton(a, si) +tc.assertFalse(spot.is_weak_automaton(a, si)) a = spot.automaton(""" @@ -107,8 +109,8 @@ State: 3 """) si = spot.scc_info(a) si.determine_unknown_acceptance() -assert si.scc_count() == 2 -assert si.is_accepting_scc(0) -assert not si.is_rejecting_scc(0) -assert si.is_rejecting_scc(1) -assert not si.is_accepting_scc(1) +tc.assertEqual(si.scc_count(), 2) +tc.assertTrue(si.is_accepting_scc(0)) +tc.assertFalse(si.is_rejecting_scc(0)) +tc.assertTrue(si.is_rejecting_scc(1)) +tc.assertFalse(si.is_accepting_scc(1)) diff --git a/tests/python/sccsplit.py b/tests/python/sccsplit.py index 9095a1a29..4a1781475 100644 --- a/tests/python/sccsplit.py +++ b/tests/python/sccsplit.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement +# de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -19,6 +19,9 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() + aut = spot.translate('GF(a <-> Xa) & GF(b <-> XXb)') si = spot.scc_info(aut) @@ -27,4 +30,4 @@ for aut2 in si.split_on_sets(0, [0]): # This call to to_str() used to fail because split_on_sets had not # registered the atomic propositions of aut s += aut2.to_str() -assert spot.automaton(s).num_states() == 8 +tc.assertEqual(spot.automaton(s).num_states(), 8) diff --git a/tests/python/semidet.py b/tests/python/semidet.py index 856b3b7d2..9072f5917 100644 --- a/tests/python/semidet.py +++ b/tests/python/semidet.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) 2017, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() formulas = [('(Gp0 | Fp1) M 1', False, True), ('(!p1 U p1) U X(!p0 -> Fp1)', False, True), @@ -31,9 +33,9 @@ for f, isd, issd in formulas: aut = spot.translate(f) # The formula with isd=True, issd=True is the only one # for which both properties are already set. - assert (aut.prop_deterministic().is_maybe() or - aut.prop_semi_deterministic().is_maybe() or - isd == issd) + tc.assertTrue(aut.prop_deterministic().is_maybe() or + aut.prop_semi_deterministic().is_maybe() or + isd == issd) spot.check_determinism(aut) - assert aut.prop_deterministic() == isd - assert aut.prop_semi_deterministic() == issd + tc.assertEqual(aut.prop_deterministic(), isd) + tc.assertEqual(aut.prop_semi_deterministic(), issd) diff --git a/tests/python/setacc.py b/tests/python/setacc.py index 8d20b6a49..7246bf5cc 100644 --- a/tests/python/setacc.py +++ b/tests/python/setacc.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016, 2018, 2021 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2016, 2018, 2021, 2022 Laboratoire de Recherche et +# Développement de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -19,54 +19,56 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() # Test case reduced from a report from Juraj Major . a = spot.make_twa_graph(spot._bdd_dict) a.set_acceptance(0, spot.acc_code("t")) -assert(a.prop_state_acc() == True) +tc.assertTrue(a.prop_state_acc()) a.set_acceptance(1, spot.acc_code("Fin(0)")) -assert(a.prop_state_acc() == spot.trival.maybe()) +tc.assertEqual(a.prop_state_acc(), spot.trival.maybe()) # Some tests for used_inf_fin_sets(), which return a pair of mark_t. (inf, fin) = a.get_acceptance().used_inf_fin_sets() -assert inf == [] -assert fin == [0] +tc.assertEqual(inf, []) +tc.assertEqual(fin, [0]) (inf, fin) = spot.acc_code("(Fin(0)|Inf(1))&Fin(2)&Inf(0)").used_inf_fin_sets() -assert inf == [0, 1] -assert fin == [0, 2] +tc.assertEqual(inf, [0, 1]) +tc.assertEqual(fin, [0, 2]) # is_rabin_like() returns (bool, [(inf, fin), ...]) (b, v) = spot.acc_cond("(Fin(0)&Inf(1))|(Fin(2)&Inf(0))").is_rabin_like() -assert b == True -assert len(v) == 2 -assert v[0].fin == [0] -assert v[0].inf == [1] -assert v[1].fin == [2] -assert v[1].inf == [0] +tc.assertTrue(b) +tc.assertEqual(len(v), 2) +tc.assertEqual(v[0].fin, [0]) +tc.assertEqual(v[0].inf, [1]) +tc.assertEqual(v[1].fin, [2]) +tc.assertEqual(v[1].inf, [0]) (b, v) = spot.acc_cond("(Fin(0)|Inf(1))&(Fin(2)|Inf(0))").is_rabin_like() -assert b == False -assert len(v) == 0 +tc.assertFalse(b) +tc.assertEqual(len(v), 0) (b, v) = spot.acc_cond("(Fin(0)|Inf(1))&(Fin(2)|Inf(0))").is_streett_like() -assert b == True -assert repr(v) == \ - '(spot.rs_pair(fin=[0], inf=[1]), spot.rs_pair(fin=[2], inf=[0]))' +tc.assertTrue(b) +tc.assertEqual(repr(v), \ + '(spot.rs_pair(fin=[0], inf=[1]), spot.rs_pair(fin=[2], inf=[0]))') v2 = (spot.rs_pair(fin=[0], inf=[1]), spot.rs_pair(fin=[2], inf=[0])) -assert v == v2 +tc.assertEqual(v, v2) acc = spot.acc_cond("generalized-Rabin 1 2") (b, v) = acc.is_generalized_rabin() -assert b == True -assert v == (2,) +tc.assertTrue(b) +tc.assertEqual(v, (2,)) (b, v) = acc.is_generalized_streett() -assert b == False -assert v == () +tc.assertFalse(b) +tc.assertEqual(v, ()) (b, v) = acc.is_streett_like() -assert b == True +tc.assertTrue(b) ve = (spot.rs_pair([0], []), spot.rs_pair([], [1]), spot.rs_pair([], [2])) -assert v == ve -assert acc.name() == "generalized-Rabin 1 2" +tc.assertEqual(v, ve) +tc.assertEqual(acc.name(), "generalized-Rabin 1 2") # At the time of writting, acc_cond does not yet recognize # "generalized-Streett", as there is no definition for that in the HOA format, @@ -74,23 +76,23 @@ assert acc.name() == "generalized-Rabin 1 2" # being a generalized-Streett. See issue #249. acc = spot.acc_cond("Inf(0)|Fin(1)|Fin(2)") (b, v) = acc.is_generalized_streett() -assert b == True -assert v == (2,) +tc.assertTrue(b) +tc.assertEqual(v, (2,)) (b, v) = acc.is_generalized_rabin() -assert b == False -assert v == () +tc.assertFalse(b) +tc.assertEqual(v, ()) # FIXME: We should have a way to disable the following output, as it is not # part of HOA v1. -assert acc.name() == "generalized-Streett 1 2" +tc.assertEqual(acc.name(), "generalized-Streett 1 2") # issue #469. This test is meaningful only if Spot is compiled with # --enable-max-accsets=64 or more. try: m = spot.mark_t([33]) - assert m.lowest() == m + tc.assertEqual(m.lowest(), m) n = spot.mark_t([33,34]) - assert n.lowest() == m + tc.assertEqual(n.lowest(), m) except RuntimeError as e: if "Too many acceptance sets used." in str(e): pass @@ -102,24 +104,24 @@ except RuntimeError as e: from gc import collect acc = spot.translate('a').acc() collect() -assert acc == spot.acc_cond('Inf(0)') +tc.assertEqual(acc, spot.acc_cond('Inf(0)')) acc = spot.translate('b').get_acceptance() collect() -assert acc == spot.acc_code('Inf(0)') +tc.assertEqual(acc, spot.acc_code('Inf(0)')) c = spot.acc_cond('Fin(0)&Fin(1)&(Inf(2)|Fin(3))') m1 = c.fin_unit() m2 = c.inf_unit() -assert m1 == [0,1] -assert m2 == [] +tc.assertEqual(m1, [0,1]) +tc.assertEqual(m2, []) c = spot.acc_cond('Inf(0)&Inf(1)&(Inf(2)|Fin(3))') m1 = c.fin_unit() m2 = c.inf_unit() -assert m1 == [] -assert m2 == [0,1] +tc.assertEqual(m1, []) +tc.assertEqual(m2, [0,1]) c = spot.acc_cond('Inf(0)&Inf(1)|(Inf(2)|Fin(3))') m1 = c.fin_unit() m2 = c.inf_unit() -assert m1 == [] -assert m2 == [] +tc.assertEqual(m1, []) +tc.assertEqual(m2, []) diff --git a/tests/python/setxor.py b/tests/python/setxor.py index 7cd1e5da1..2fe69cd99 100755 --- a/tests/python/setxor.py +++ b/tests/python/setxor.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2010, 2011 Laboratoire de Recherche et Développement -# de l'EPITA. +# Copyright (C) 2010, 2011, 2022 Laboratoire de Recherche et +# Développement de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ import sys from buddy import * +from unittest import TestCase +tc = TestCase() bdd_init(10000, 10000) bdd_setvarnum(5) @@ -29,18 +31,18 @@ a = V[0] & -V[1] & V[2] & -V[3] b = V[0] & V[1] & V[2] & -V[3] c = -V[0] & V[1] & -V[2] & -V[3] -assert(c == bdd_setxor(a, b)) -assert(c == bdd_setxor(b, a)) -assert(a == bdd_setxor(b, c)) -assert(a == bdd_setxor(c, b)) -assert(b == bdd_setxor(a, c)) -assert(b == bdd_setxor(c, a)) +tc.assertEqual(c, bdd_setxor(a, b)) +tc.assertEqual(c, bdd_setxor(b, a)) +tc.assertEqual(a, bdd_setxor(b, c)) +tc.assertEqual(a, bdd_setxor(c, b)) +tc.assertEqual(b, bdd_setxor(a, c)) +tc.assertEqual(b, bdd_setxor(c, a)) d = V[1] & V[2] & -V[3] & V[4] e = V[0] & V[1] & -V[2] & -V[3] & V[4] -assert(e == bdd_setxor(a, d)) -assert(e == bdd_setxor(d, a)) +tc.assertEqual(e, bdd_setxor(a, d)) +tc.assertEqual(e, bdd_setxor(d, a)) # Cleanup all BDD variables before calling bdd_done(), otherwise # bdd_delref will be called after bdd_done() and this is unsafe in diff --git a/tests/python/simplacc.py b/tests/python/simplacc.py index e742d69a4..50dc2d74a 100644 --- a/tests/python/simplacc.py +++ b/tests/python/simplacc.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Développement de l'Epita +# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement de l'Epita # (LRDE). # # This file is part of Spot, a model checking library. @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() auts = list(spot.automata(""" @@ -70,19 +72,19 @@ explicit-labels trans-acc deterministic --BODY-- State: 0 [0&!1] 0 {2 3} res = [] for a in auts: b = spot.simplify_acceptance(a) - assert b.equivalent_to(a) + tc.assertTrue(b.equivalent_to(a)) res.append(str(b.get_acceptance())) c = spot.simplify_acceptance(b) - assert b.get_acceptance() == c.get_acceptance() + tc.assertEqual(b.get_acceptance(), c.get_acceptance()) a.set_acceptance(a.num_sets(), a.get_acceptance().complement()) b = spot.simplify_acceptance(a) - assert b.equivalent_to(a) + tc.assertTrue(b.equivalent_to(a)) res.append(str(b.get_acceptance())) c = spot.simplify_acceptance(b) - assert b.get_acceptance() == c.get_acceptance() + tc.assertEqual(b.get_acceptance(), c.get_acceptance()) -assert res == [ +tc.assertEqual(res, [ 'Inf(0)', 'Fin(0)', 'Inf(1) & Fin(0)', @@ -101,4 +103,4 @@ assert res == [ '(Inf(0) | Fin(2)) & Inf(1)', '(Fin(2) & (Inf(1) | Fin(0))) | (Inf(0)&Inf(2))', '(Inf(2) | (Fin(1) & Inf(0))) & (Fin(0)|Fin(2))', - ] + ]) diff --git a/tests/python/simstate.py b/tests/python/simstate.py index 6c2ca8bc3..b0b62267d 100644 --- a/tests/python/simstate.py +++ b/tests/python/simstate.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2017-2018, 2020-2021 Laboratoire de Recherche +# Copyright (C) 2015, 2017-2018, 2020-2022 Laboratoire de Recherche # et Développement de l'Epita # # This file is part of Spot, a model checking library. @@ -19,6 +19,8 @@ import spot from sys import exit +from unittest import TestCase +tc = TestCase() # CPython use reference counting, so that automata are destructed # when we expect them to be. However other implementations like @@ -48,7 +50,7 @@ State: 1 """) aut2 = spot.simulation(aut) -assert aut2.to_str() == """HOA: v1 +tc.assertEqual(aut2.to_str(), """HOA: v1 States: 1 Start: 0 AP: 2 "a" "b" @@ -59,10 +61,10 @@ properties: deterministic --BODY-- State: 0 {0} [t] 0 ---END--""" +--END--""") aut2.copy_state_names_from(aut) -assert aut2.to_str() == """HOA: v1 +tc.assertEqual(aut2.to_str(), """HOA: v1 States: 1 Start: 0 AP: 2 "a" "b" @@ -73,7 +75,7 @@ properties: deterministic --BODY-- State: 0 "[0,1]" {0} [t] 0 ---END--""" +--END--""") del aut del aut2 @@ -82,7 +84,7 @@ gcollect() aut = spot.translate('GF((p0 -> Gp0) R p1)') daut = spot.tgba_determinize(aut, True) -assert daut.to_str() == """HOA: v1 +tc.assertEqual(daut.to_str(), """HOA: v1 States: 3 Start: 0 AP: 2 "p1" "p0" @@ -106,7 +108,7 @@ State: 2 "{₀[0]₀}{₁[1]₁}" [!0&1] 2 [0&!1] 0 {0} [0&1] 1 {2} ---END--""" +--END--""") del aut del daut @@ -129,7 +131,7 @@ State: 1 """) daut = spot.tgba_determinize(aut, True) -assert daut.to_str() == """HOA: v1 +tc.assertEqual(daut.to_str(), """HOA: v1 States: 12 Start: 0 AP: 2 "a" "b" @@ -185,18 +187,18 @@ State: 11 "{₀[1#1]{₁[0#0,0#1]{₂[1#0]₂}₁}₀}" [!0&1] 2 {0} [0&!1] 6 {0} [0&1] 9 {0} ---END--""" +--END--""") a = spot.translate('!Gp0 xor FG((p0 W Gp1) M p1)') a = spot.degeneralize_tba(a) -assert a.num_states() == 8 +tc.assertEqual(a.num_states(), 8) b = spot.simulation(a) -assert b.num_states() == 3 +tc.assertEqual(b.num_states(), 3) b.set_init_state(1) b.purge_unreachable_states() b.copy_state_names_from(a) -assert b.to_str() == """HOA: v1 +tc.assertEqual(b.to_str(), """HOA: v1 States: 1 Start: 0 AP: 2 "p0" "p1" @@ -208,7 +210,7 @@ properties: deterministic stutter-invariant State: 0 "[1,7]" [1] 0 [!1] 0 {0} ---END--""" +--END--""") aut = spot.automaton('''HOA: v1 States: 12 @@ -267,7 +269,7 @@ State: 11 [0&!1] 6 {0} [0&1] 9 {0} --END--''') -assert spot.reduce_iterated(aut).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_iterated(aut).to_str(), '''HOA: v1 States: 9 Start: 0 AP: 2 "a" "b" @@ -308,7 +310,7 @@ State: 8 [0&!1] 4 {0} [!0&1] 6 [0&1] 7 ---END--''' +--END--''') aut = spot.automaton('''HOA: v1 States: 6 @@ -332,7 +334,7 @@ State: 4 State: 5 [0] 5 --END--''') -assert spot.reduce_iterated(aut).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_iterated(aut).to_str(), '''HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -347,7 +349,7 @@ State: 1 [0] 2 State: 2 [1] 2 ---END--''' +--END--''') aut = spot.automaton('''HOA: v1 States: 5 @@ -374,7 +376,7 @@ State: 4 [0&1&!2&3] 4 {0} --END--''') -assert spot.reduce_direct_cosim(aut).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_direct_cosim(aut).to_str(), '''HOA: v1 States: 5 Start: 0 AP: 4 "p0" "p2" "p3" "p1" @@ -395,7 +397,7 @@ State: 3 [0&!1&2&3] 3 {1} State: 4 [0&!1&2&3] 4 {0} ---END--''' +--END--''') aut = spot.automaton('''HOA: v1 States: 2 @@ -410,7 +412,7 @@ State: 0 State: 1 [0] 0 --END--''') -assert spot.reduce_direct_sim(aut).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_direct_sim(aut).to_str(), '''HOA: v1 States: 1 Start: 0 AP: 2 "a" "b" @@ -418,7 +420,7 @@ Acceptance: 2 Fin(0) & Fin(1) properties: trans-labels explicit-labels state-acc deterministic --BODY-- State: 0 ---END--''' +--END--''') aut = spot.automaton('''HOA: v1 name: "(p1 U p2) U p3" @@ -445,7 +447,7 @@ State: 3 [1] 1 [0&!1] 3 --END--''') -assert spot.reduce_direct_cosim_sba(aut).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_direct_cosim_sba(aut).to_str(), '''HOA: v1 States: 4 Start: 0 AP: 3 "p2" "p3" "p1" @@ -468,7 +470,7 @@ State: 2 State: 3 [0] 1 [!0&2] 3 ---END--''' +--END--''') aut = spot.automaton('''HOA: v1 States: 4 @@ -488,7 +490,7 @@ State: 2 State: 3 {0} [1] 3 --END--''') -assert spot.reduce_direct_cosim(aut).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_direct_cosim(aut).to_str(), '''HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -502,9 +504,9 @@ State: 1 [1] 2 State: 2 {0} [1] 2 ---END--''' +--END--''') -assert spot.reduce_direct_sim_sba(aut).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_direct_sim_sba(aut).to_str(), '''HOA: v1 States: 2 Start: 0 AP: 2 "a" "b" @@ -516,7 +518,7 @@ State: 0 [0] 1 State: 1 {0} [1] 1 ---END--''' +--END--''') aut = spot.automaton('''HOA: v1 States: 3 @@ -532,7 +534,7 @@ State: 1 State: 2 {0} [0] 2 --END--''') -assert spot.reduce_iterated_sba(aut).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_iterated_sba(aut).to_str(), '''HOA: v1 States: 1 Start: 0 AP: 1 "a" @@ -542,7 +544,7 @@ properties: deterministic --BODY-- State: 0 {0} [0] 0 ---END--''' +--END--''') aut = spot.automaton('''HOA: v1 States: 30 @@ -630,7 +632,7 @@ State: 28 State: 29 [0&!1&!2&!3] 29 --END--''') -assert spot.reduce_iterated(a).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_iterated(a).to_str(), '''HOA: v1 States: 8 Start: 0 AP: 2 "p0" "p1" @@ -669,7 +671,7 @@ State: 7 [!1] 1 {0} [0&1] 5 [1] 7 ---END--''' +--END--''') # issue #452 @@ -707,4 +709,4 @@ State: 8 [@p] 3 {0 1} --END--""") aut = spot.simulation(aut) -assert aut.num_states() == 1 +tc.assertEqual(aut.num_states(), 1) diff --git a/tests/python/sonf.py b/tests/python/sonf.py index 558f90c63..40af758b0 100644 --- a/tests/python/sonf.py +++ b/tests/python/sonf.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Développement de l'Epita +# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement de l'Epita # (LRDE). # # This file is part of Spot, a model checking library. @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() formulas = """\ {x[*]}[]-> F({y[*]}<>-> GFz) @@ -38,4 +40,4 @@ for f1 in formulas.splitlines(): rm.add_ap(ap) a2 = rm.strip(a2) - assert(spot.are_equivalent(a1, a2)) + tc.assertTrue(spot.are_equivalent(a1, a2)) diff --git a/tests/python/split.py b/tests/python/split.py index adab5a931..b916f494f 100644 --- a/tests/python/split.py +++ b/tests/python/split.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018-2021 Laboratoire de Recherche et +# Copyright (C) 2018-2022 Laboratoire de Recherche et # Développement de l'Epita # # This file is part of Spot, a model checking library. @@ -19,6 +19,8 @@ import spot import buddy +from unittest import TestCase +tc = TestCase() # CPython use reference counting, so that automata are destructed # when we expect them to be. However other implementations like @@ -51,16 +53,17 @@ def do_split(f, out_list): return aut, s aut, s = do_split('(FG !a) <-> (GF b)', ['b']) -assert equiv(aut, spot.unsplit_2step(s)) +tc.assertTrue(equiv(aut, spot.unsplit_2step(s))) del aut del s gcollect() aut, s = do_split('GFa && GFb', ['b']) -assert equiv(aut, spot.unsplit_2step(s)) -# FIXME see below -# assert str_diff("""HOA: v1 +tc.assertTrue(equiv(aut, spot.unsplit_2step(s))) +# FIXME s.to_str() is NOT the same on Debian stable and on Debian unstable +# we should investigate this. See Issue #502. +# tc.assertEqual("""HOA: v1 # States: 3 # Start: 0 # AP: 2 "a" "b" @@ -86,10 +89,11 @@ del s gcollect() aut, s = do_split('! ((G (req -> (F ack))) && (G (go -> (F grant))))', ['ack']) -assert equiv(aut, spot.unsplit_2step(s)) +tc.assertTrue(equiv(aut, spot.unsplit_2step(s))) + # FIXME s.to_str() is NOT the same on Debian stable and on Debian unstable -# we should investigate this -# assert s.to_str() == """HOA: v1 +# we should investigate this. See Issue #502. +# tc.assertEqual(s.to_str(), """HOA: v1 # States: 9 # Start: 0 # AP: 4 "ack" "req" "go" "grant" @@ -122,7 +126,7 @@ assert equiv(aut, spot.unsplit_2step(s)) # [!0] 1 # State: 8 {0} # [!3] 2 -# --END--""" +# --END--""") del aut del s @@ -131,4 +135,4 @@ gcollect() aut, s = do_split('((G (((! g_0) || (! g_1)) && ((r_0 && (X r_1)) -> (F (g_0 \ && g_1))))) && (G (r_0 -> F g_0))) && (G (r_1 -> F g_1))', ['g_0', 'g_1']) -assert equiv(aut, spot.unsplit_2step(s)) +tc.assertTrue(equiv(aut, spot.unsplit_2step(s))) diff --git a/tests/python/streett_totgba.py b/tests/python/streett_totgba.py index 1c0bfc13e..8a18defbc 100644 --- a/tests/python/streett_totgba.py +++ b/tests/python/streett_totgba.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2021 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2017-2018, 2021-2022 Laboratoire de Recherche et +# Développement de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -22,7 +22,8 @@ import spot import os import shutil import sys - +from unittest import TestCase +tc = TestCase() def tgba(a): if not a.is_existential(): @@ -33,11 +34,11 @@ def tgba(a): def test_aut(aut): stgba = tgba(aut) - assert stgba.equivalent_to(aut) + tc.assertTrue(stgba.equivalent_to(aut)) os.environ["SPOT_STREETT_CONV_MIN"] = '1' sftgba = tgba(aut) del os.environ["SPOT_STREETT_CONV_MIN"] - assert stgba.equivalent_to(sftgba) + tc.assertTrue(stgba.equivalent_to(sftgba)) slike = spot.simplify_acceptance(aut) @@ -45,8 +46,7 @@ def test_aut(aut): os.environ["SPOT_STREETT_CONV_MIN"] = "1" slftgba = tgba(slike) del os.environ["SPOT_STREETT_CONV_MIN"] - assert sltgba.equivalent_to(slftgba) - + tc.assertTrue(sltgba.equivalent_to(slftgba)) if shutil.which('ltl2dstar') is None: sys.exit(77) diff --git a/tests/python/streett_totgba2.py b/tests/python/streett_totgba2.py index 852eff0af..5ff97a369 100644 --- a/tests/python/streett_totgba2.py +++ b/tests/python/streett_totgba2.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement +# de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() # Issue 316 a = spot.automaton(""" @@ -60,11 +62,11 @@ State: 7 {1 3 4} """) tgba = spot.streett_to_generalized_buchi(a) -assert tgba.acc().is_generalized_buchi() +tc.assertTrue(tgba.acc().is_generalized_buchi()) ba = spot.simplify_acceptance(a) -assert ba.acc().is_buchi() +tc.assertTrue(ba.acc().is_buchi()) nba = spot.dualize(ba.postprocess('generic', 'deterministic')) ntgba = spot.dualize(tgba.postprocess('generic', 'deterministic')) -assert not ba.intersects(ntgba) -assert not tgba.intersects(nba) +tc.assertFalse(ba.intersects(ntgba)) +tc.assertFalse(tgba.intersects(nba)) diff --git a/tests/python/stutter.py b/tests/python/stutter.py index dafb03b7e..05c28fda9 100644 --- a/tests/python/stutter.py +++ b/tests/python/stutter.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2019-2021 Laboratoire de Recherche et Développement de +# Copyright (C) 2019-2022 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -23,6 +23,8 @@ import spot +from unittest import TestCase +tc = TestCase() def explain_stut(f): @@ -45,20 +47,20 @@ def explain_stut(f): # Test from issue #388 w1, w2 = explain_stut('{(a:b) | (a;b)}|->Gc') -assert str(w1) == 'a & !b & !c; cycle{!a & b & !c}' -assert str(w2) == 'a & !b & !c; a & !b & !c; cycle{!a & b & !c}' +tc.assertEqual(str(w1), 'a & !b & !c; cycle{!a & b & !c}') +tc.assertEqual(str(w2), 'a & !b & !c; a & !b & !c; cycle{!a & b & !c}') # Test from issue #401 w1, w2 = explain_stut('G({x} |-> ({x[+]} <>-> ({Y1[+]} <>=> Y2)))') -assert str(w1) == 'cycle{!Y1 & !Y2 & x; Y1 & Y2 & x}' -assert str(w2) == 'cycle{!Y1 & !Y2 & x; Y1 & Y2 & x; Y1 & Y2 & x}' +tc.assertEqual(str(w1), 'cycle{!Y1 & !Y2 & x; Y1 & Y2 & x}') +tc.assertEqual(str(w2), 'cycle{!Y1 & !Y2 & x; Y1 & Y2 & x; Y1 & Y2 & x}') # Related to issue #401 as well. sl() and sl2() should upgrade # the t acceptance condition into inf(0). pos = spot.translate('Xa & XXb') w = pos.accepting_word().as_automaton() -assert w.acc().is_t() +tc.assertTrue(w.acc().is_t()) a = spot.sl2(w) -assert a.acc().is_buchi() +tc.assertTrue(a.acc().is_buchi()) a = spot.sl(w) -assert a.acc().is_buchi() +tc.assertTrue(a.acc().is_buchi()) diff --git a/tests/python/sum.py b/tests/python/sum.py index 7e2e74220..1f7c6e0a1 100644 --- a/tests/python/sum.py +++ b/tests/python/sum.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017-2019 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) 2017-2019, 2022 Laboratoire de Recherche et +# Développement de l'Epita # # This file is part of Spot, a model checking library. # @@ -20,6 +20,8 @@ import spot import sys import itertools +from unittest import TestCase +tc = TestCase() # make sure that we are not allowed to build the sum of two automata with # different dictionaries. @@ -65,8 +67,8 @@ for p in zip(phi1, phi2): p0orp1 = spot.formula.Or(p) a1ora2 = spot.remove_alternation(spot.sum(a1, a2), True) - assert p0orp1.equivalent_to(a1ora2) + tc.assertTrue(p0orp1.equivalent_to(a1ora2)) p0andp1 = spot.formula.And(p) a1anda2 = spot.remove_alternation(spot.sum_and(a1, a2), True) - assert p0andp1.equivalent_to(a1anda2) + tc.assertTrue(p0andp1.equivalent_to(a1anda2)) diff --git a/tests/python/synthesis.py b/tests/python/synthesis.py index 59022624c..e1a88650a 100644 --- a/tests/python/synthesis.py +++ b/tests/python/synthesis.py @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() # A shared variable caused the 2nd call to ltl_to_game to give an incorrect # result. @@ -25,11 +27,11 @@ for i in range(0, 2): gi = spot.synthesis_info() gi.s = spot.synthesis_info.algo_LAR game = spot.ltl_to_game("(Ga) <-> (Fb)", ["b"], gi) - assert not spot.solve_game(game) + tc.assertFalse(spot.solve_game(game)) # A game can have only inputs game = spot.ltl_to_game("GFa", []) -assert(game.to_str() == """HOA: v1 +tc.assertEqual(game.to_str(), """HOA: v1 States: 3 Start: 0 AP: 1 "a" diff --git a/tests/python/toparity.py b/tests/python/toparity.py index df226ebe4..37e111f9b 100644 --- a/tests/python/toparity.py +++ b/tests/python/toparity.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018-2021 Laboratoire de Recherche et Développement de +# Copyright (C) 2018-2022 Laboratoire de Recherche et Développement de # l'EPITA. # # This file is part of Spot, a model checking library. @@ -21,6 +21,8 @@ import spot from itertools import zip_longest from buddy import bddfalse +from unittest import TestCase +tc = TestCase() # Tests for the new version of to_parity @@ -114,17 +116,16 @@ def test(aut, expected_num_states=[], full=True): if opt is not None and opt.parity_prefix is False: # Reduce the number of colors to help are_equivalent spot.reduce_parity_here(p1) - assert spot.are_equivalent(aut, p1) + tc.assertTrue(spot.are_equivalent(aut, p1)) if expected_num is not None: - # print(p1.num_states(), expected_num) - assert p1.num_states() == expected_num + tc.assertEqual(p1.num_states(), expected_num) if full and opt is not None: # Make sure passing opt is the same as setting # each argument individually p2 = spot.to_parity(aut, opt) - assert p2.num_states() == p1st - assert p2.num_edges() == p1ed - assert p2.num_sets() == p1se + tc.assertEqual(p2.num_states(), p1st) + tc.assertEqual(p2.num_edges(), p1ed) + tc.assertEqual(p2.num_sets(), p1se) test(spot.automaton("""HOA: v1 name: "(FGp0 & ((XFp0 & F!p1) | F(Gp1 & XG!p0))) | G(F!p0 & (XFp0 | F!p1) & @@ -351,7 +352,7 @@ State: 0 [!0&!1] 0 --END--""") p = spot.to_parity_old(a, True) -assert spot.are_equivalent(a, p) +tc.assertTrue(spot.are_equivalent(a, p)) test(a) a = spot.automaton(""" @@ -363,8 +364,8 @@ explicit-labels trans-acc --BODY-- State: 0 [0&1] 2 {4 5} [0&1] 4 {0 4} 4 [!0&!1] 1 {2 4} State: 5 [!0&1] 4 --END-- """) p = spot.to_parity_old(a, True) -assert p.num_states() == 22 -assert spot.are_equivalent(a, p) +tc.assertEqual(p.num_states(), 22) +tc.assertTrue(spot.are_equivalent(a, p)) test(a, [8, 6, 6, 6, 6, 6, 6, 6]) # Force a few edges to false, to make sure to_parity() is OK with that. @@ -377,22 +378,22 @@ for e in a.out(3): e.cond = bddfalse break p = spot.to_parity_old(a, True) -assert p.num_states() == 22 -assert spot.are_equivalent(a, p) +tc.assertEqual(p.num_states(), 22) +tc.assertTrue(spot.are_equivalent(a, p)) test(a, [7, 6, 6, 6, 6, 6, 6, 6]) for f in spot.randltl(4, 400): d = spot.translate(f, "det", "G") p = spot.to_parity_old(d, True) - assert spot.are_equivalent(p, d) + tc.assertTrue(spot.are_equivalent(p, d)) for f in spot.randltl(5, 2000): n = spot.translate(f) p = spot.to_parity_old(n, True) - assert spot.are_equivalent(n, p) + tc.assertTrue(spot.are_equivalent(n, p)) # Issue #390. a = spot.translate('!(GFa -> (GFb & GF(!b & !Xb)))', 'gen', 'det') b = spot.to_parity_old(a, True) -assert a.equivalent_to(b) +tc.assertTrue(a.equivalent_to(b)) test(a, [7, 7, 3, 7, 7, 7, 3, 3]) diff --git a/tests/python/toweak.py b/tests/python/toweak.py index b2d908037..23dcf66fa 100644 --- a/tests/python/toweak.py +++ b/tests/python/toweak.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2020 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) 2017, 2018, 2020, 2022 Laboratoire de Recherche et +# Développement de l'Epita # # This file is part of Spot, a model checking library. # @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() phi1 = """GFb X(!b | GF!a) @@ -33,7 +35,7 @@ b | (a & XF(b R a)) | (!a & XG(!b U !a))""" def test_phi(phi): a = spot.translate(phi, 'GeneralizedBuchi', 'SBAcc') res = spot.to_weak_alternating(spot.dualize(a)) - assert res.equivalent_to(spot.formula.Not(spot.formula(phi))) + tc.assertTrue(res.equivalent_to(spot.formula.Not(spot.formula(phi)))) for p in phi1.split('\n'): @@ -83,4 +85,4 @@ State: 6 --END-- """) a2 = spot.to_weak_alternating(a2) -assert a2.equivalent_to(phi2) +tc.assertTrue(a2.equivalent_to(phi2)) diff --git a/tests/python/tra2tba.py b/tests/python/tra2tba.py index b303c010b..354ced630 100644 --- a/tests/python/tra2tba.py +++ b/tests/python/tra2tba.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016-2018, 2020-2021 Laboratoire de Recherche +# Copyright (C) 2016-2018, 2020-2022 Laboratoire de Recherche # et Développement de l'Epita # # This file is part of Spot, a model checking library. @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() # CPython use reference counting, so that automata are destructed # when we expect them to be. However other implementations like @@ -57,7 +59,7 @@ State: 1 --END--""" res = spot.remove_fin(aut) -assert(res.to_str('hoa') == exp) +tc.assertEqual(res.to_str('hoa'), exp) # Test 2. aut = spot.automaton(""" @@ -97,7 +99,7 @@ State: 2 --END--""" res = spot.remove_fin(aut) -assert(res.to_str('hoa') == exp) +tc.assertEqual(res.to_str('hoa'), exp) # Test 3. aut = spot.automaton(""" @@ -128,7 +130,7 @@ State: 0 --END--""" res = spot.remove_fin(aut) -assert(res.to_str('hoa') == exp) +tc.assertEqual(res.to_str('hoa'), exp) # Test 4. aut = spot.automaton(""" @@ -168,7 +170,7 @@ State: 2 {0} --END--""" res = spot.remove_fin(aut) -assert(res.to_str('hoa') == exp) +tc.assertEqual(res.to_str('hoa'), exp) # Test 5. aut = spot.automaton(""" @@ -214,7 +216,7 @@ State: 3 {0} --END--""" res = spot.remove_fin(aut) -assert(res.to_str('hoa') == exp) +tc.assertEqual(res.to_str('hoa'), exp) # Test 6. aut = spot.automaton(""" @@ -257,7 +259,7 @@ State: 2 {0} --END--""" res = spot.remove_fin(aut) -assert(res.to_str('hoa') == exp) +tc.assertEqual(res.to_str('hoa'), exp) # Test 7. aut = spot.automaton(""" @@ -292,7 +294,7 @@ State: 1 {0} --END--""" res = spot.remove_fin(aut) -assert(res.to_str('hoa') == exp) +tc.assertEqual(res.to_str('hoa'), exp) # Test 8. aut = spot.automaton(""" @@ -372,9 +374,9 @@ State: 7 res = spot.remove_fin(aut) if is_cpython: - assert(res.to_str('hoa') == exp) + tc.assertEqual(res.to_str('hoa'), exp) else: - assert(res.equivalent_to(spot.automaton(exp))) + tc.assertTrue(res.equivalent_to(spot.automaton(exp))) # Test 9. aut = spot.automaton(""" @@ -411,9 +413,9 @@ State: 1 res = spot.remove_fin(aut) if is_cpython: - assert(res.to_str('hoa') == exp) + tc.assertEqual(res.to_str('hoa'), exp) else: - assert(res.equivalent_to(spot.automaton(exp))) + tc.assertTrue(res.equivalent_to(spot.automaton(exp))) # Test 10. aut = spot.automaton(""" @@ -453,9 +455,9 @@ State: 2 {0} res = spot.remove_fin(aut) if is_cpython: - assert(res.to_str('hoa') == exp) + tc.assertEqual(res.to_str('hoa'), exp) else: - assert(res.equivalent_to(spot.automaton(exp))) + tc.assertTrue(res.equivalent_to(spot.automaton(exp))) # Test 11. aut = spot.automaton(""" @@ -493,9 +495,9 @@ State: 1 res = spot.remove_fin(aut) if is_cpython: - assert(res.to_str('hoa') == exp) + tc.assertEqual(res.to_str('hoa'), exp) else: - assert(res.equivalent_to(spot.automaton(exp))) + tc.assertTrue(res.equivalent_to(spot.automaton(exp))) # Different order for rabin_to_buchi_if_realizable() due to merge_edges() not # being called. This is on purpose: the edge order should match exactly the @@ -518,9 +520,9 @@ State: 1 --END--""" res = spot.rabin_to_buchi_if_realizable(aut) if is_cpython: - assert(res.to_str('hoa') == exp2) + tc.assertEqual(res.to_str('hoa'), exp2) else: - assert(res.equivalent_to(spot.automaton(exp2))) + tc.assertTrue(res.equivalent_to(spot.automaton(exp2))) # Test 12. aut = spot.automaton(""" @@ -565,9 +567,9 @@ State: 3 {0} res = spot.remove_fin(aut) if is_cpython: - assert(res.to_str('hoa') == exp) + tc.assertEqual(res.to_str('hoa'), exp) else: - assert(res.equivalent_to(spot.automaton(exp))) + tc.assertTrue(res.equivalent_to(spot.automaton(exp))) # Test 13. aut = spot.automaton(""" @@ -615,9 +617,9 @@ State: 1 res = spot.remove_fin(aut) if is_cpython: - assert(res.to_str('hoa') == exp) + tc.assertEqual(res.to_str('hoa'), exp) else: - assert(res.equivalent_to(spot.automaton(exp))) + tc.assertTrue(res.equivalent_to(spot.automaton(exp))) # rabin_to_buchi_if_realizable() does not call merge_edges() on purpose: the # edge order should match exactly the original automaton. @@ -644,9 +646,9 @@ State: 1 res = spot.rabin_to_buchi_if_realizable(aut) if is_cpython: - assert(res.to_str('hoa') == exp2) + tc.assertEqual(res.to_str('hoa'), exp2) else: - assert(res.equivalent_to(spot.automaton(exp2))) + tc.assertTrue(res.equivalent_to(spot.automaton(exp2))) # Test 14. aut = spot.automaton(""" @@ -681,7 +683,7 @@ State: 1 res = spot.remove_fin(aut) if is_cpython: - assert(res.to_str('hoa') == exp) + tc.assertEqual(res.to_str('hoa'), exp) else: - assert(res.equivalent_to(spot.automaton(exp))) -assert spot.rabin_to_buchi_if_realizable(aut) is None + tc.assertTrue(res.equivalent_to(spot.automaton(exp))) +tc.assertIsNone(spot.rabin_to_buchi_if_realizable(aut)) diff --git a/tests/python/trival.py b/tests/python/trival.py index 8fcf6a1fa..ea844e29c 100644 --- a/tests/python/trival.py +++ b/tests/python/trival.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016, 2018 Laboratoire de Recherche et Développement +# Copyright (C) 2016, 2018, 2022 Laboratoire de Recherche et Développement # de l'Epita # # This file is part of Spot, a model checking library. @@ -18,30 +18,32 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() v1 = spot.trival() v2 = spot.trival(False) v3 = spot.trival(True) v4 = spot.trival_maybe() -assert v1 != v2 -assert v1 != v3 -assert v2 != v3 -assert v2 == spot.trival(spot.trival.no_value) -assert v2 != spot.trival(spot.trival.yes_value) -assert v4 != v2 -assert v4 != v3 -assert v2 == False -assert True == v3 -assert v2 != True -assert False != v3 -assert v4 == spot.trival_maybe() -assert v4 == spot.trival(spot.trival.maybe_value) -assert v3 -assert -v2 -assert not -v1 -assert not v1 -assert not -v3 +tc.assertNotEqual(v1, v2) +tc.assertNotEqual(v1, v3) +tc.assertNotEqual(v2, v3) +tc.assertEqual(v2, spot.trival(spot.trival.no_value)) +tc.assertNotEqual(v2, spot.trival(spot.trival.yes_value)) +tc.assertNotEqual(v4, v2) +tc.assertNotEqual(v4, v3) +tc.assertEqual(v2, False) +tc.assertEqual(True, v3) +tc.assertNotEqual(v2, True) +tc.assertNotEqual(False, v3) +tc.assertEqual(v4, spot.trival_maybe()) +tc.assertEqual(v4, spot.trival(spot.trival.maybe_value)) +tc.assertTrue(v3) +tc.assertTrue(-v2) +tc.assertFalse(-v1) +tc.assertFalse(v1) +tc.assertFalse(-v3) for u in (v1, v2, v3): for v in (v1, v2, v3): - assert (u & v) == -(-u | -v) + tc.assertEqual((u & v), -(-u | -v)) diff --git a/tests/python/twagraph.py b/tests/python/twagraph.py index b8834b211..1ebcb8ac5 100644 --- a/tests/python/twagraph.py +++ b/tests/python/twagraph.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2021 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2017, 2021-2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -22,6 +22,8 @@ import spot from buddy import bddtrue, bddfalse +from unittest import TestCase +tc = TestCase() aut = spot.make_twa_graph(spot.make_bdd_dict()) @@ -29,98 +31,98 @@ try: print(aut.to_str()) exit(2) except RuntimeError as e: - assert "no state" in str(e) + tc.assertIn("no state", str(e)) try: aut.set_init_state(2) except ValueError as e: - assert "nonexisting" in str(e) + tc.assertIn("nonexisting", str(e)) try: aut.set_univ_init_state([2, 1]) except ValueError as e: - assert "nonexisting" in str(e) + tc.assertIn("nonexisting", str(e)) aut.new_states(3) aut.set_init_state(2) -assert aut.get_init_state_number() == 2 +tc.assertEqual(aut.get_init_state_number(), 2) aut.set_univ_init_state([2, 1]) -assert [2, 1] == list(aut.univ_dests(aut.get_init_state_number())) +tc.assertEqual([2, 1], list(aut.univ_dests(aut.get_init_state_number()))) try: aut.get_init_state() except RuntimeError as e: s = str(e) - assert "abstract interface" in s and "alternating automata" in s + tc.assertIn("abstract interface" in s and "alternating automata", s) cpy = spot.make_twa_graph(aut, spot.twa_prop_set.all()) -assert aut.to_str() == cpy.to_str() +tc.assertEqual(aut.to_str(), cpy.to_str()) all = aut.set_buchi() -assert aut.to_str() != cpy.to_str() +tc.assertNotEqual(aut.to_str(), cpy.to_str()) cpy = spot.make_twa_graph(aut, spot.twa_prop_set.all()) aut.new_acc_edge(0, 1, bddtrue, True) -assert aut.num_edges() == 1 + cpy.num_edges() +tc.assertEqual(aut.num_edges(), 1 + cpy.num_edges()) aut.prop_universal(True) aut.set_name("some name") cpy = spot.make_twa_graph(aut, spot.twa_prop_set(False, False, False, False, False, False)) -assert cpy.prop_universal() != aut.prop_universal() -assert cpy.prop_universal() == spot.trival.maybe() -assert cpy.get_name() == None +tc.assertNotEqual(cpy.prop_universal(), aut.prop_universal()) +tc.assertEqual(cpy.prop_universal(), spot.trival.maybe()) +tc.assertEqual(cpy.get_name(), None) cpy = spot.make_twa_graph(aut, spot.twa_prop_set(False, False, False, False, False, False), True) -assert cpy.get_name() == "some name" +tc.assertEqual(cpy.get_name(), "some name") from copy import copy cpy = copy(aut) -assert aut.to_str() == cpy.to_str() +tc.assertEqual(aut.to_str(), cpy.to_str()) cpy.set_init_state(1) -assert [2, 1] == list(aut.univ_dests(aut.get_init_state_number())) -assert cpy.get_init_state_number() == 1 -assert cpy.get_name() == "some name" +tc.assertEqual([2, 1], list(aut.univ_dests(aut.get_init_state_number()))) +tc.assertEqual(cpy.get_init_state_number(), 1) +tc.assertEqual(cpy.get_name(), "some name") try: s = aut.state_acc_sets(0) except RuntimeError as e: - assert "state-based acceptance" in str(e) + tc.assertIn("state-based acceptance", str(e)) try: s = aut.state_is_accepting(0) except RuntimeError as e: - assert "state-based acceptance" in str(e) + tc.assertIn("state-based acceptance", str(e)) aut.prop_state_acc(True) -assert aut.state_acc_sets(0) == all -assert aut.state_is_accepting(0) == True +tc.assertEqual(aut.state_acc_sets(0), all) +tc.assertEqual(aut.state_is_accepting(0), True) aut.set_init_state(0) aut.purge_unreachable_states() i = aut.get_init_state() -assert aut.state_is_accepting(i) == True +tc.assertEqual(aut.state_is_accepting(i), True) it = aut.succ_iter(i) it.first() -assert aut.edge_number(it) == 1 -assert aut.state_number(it.dst()) == 1 -assert aut.edge_storage(it).src == 0 -assert aut.edge_storage(1).src == 0 -assert aut.edge_data(it).cond == bddtrue -assert aut.edge_data(1).cond == bddtrue +tc.assertEqual(aut.edge_number(it), 1) +tc.assertEqual(aut.state_number(it.dst()), 1) +tc.assertEqual(aut.edge_storage(it).src, 0) +tc.assertEqual(aut.edge_storage(1).src, 0) +tc.assertEqual(aut.edge_data(it).cond, bddtrue) +tc.assertEqual(aut.edge_data(1).cond, bddtrue) aut.release_iter(it) aut.purge_dead_states() i = aut.get_init_state() -assert aut.state_is_accepting(i) == False +tc.assertEqual(aut.state_is_accepting(i), False) aut = spot.translate('FGa') # Kill the edge between state 0 and 1 -assert aut.edge_storage(2).src == 0 -assert aut.edge_storage(2).dst == 1 +tc.assertEqual(aut.edge_storage(2).src, 0) +tc.assertEqual(aut.edge_storage(2).dst, 1) aut.edge_data(2).cond = bddfalse -assert aut.num_edges() == 3 -assert aut.num_states() == 2 +tc.assertEqual(aut.num_edges(), 3) +tc.assertEqual(aut.num_states(), 2) aut.purge_dead_states() -assert aut.num_edges() == 1 -assert aut.num_states() == 1 +tc.assertEqual(aut.num_edges(), 1) +tc.assertEqual(aut.num_states(), 1) diff --git a/tests/python/zlktree.py b/tests/python/zlktree.py index df8fd86f0..e1b0c9e7b 100644 --- a/tests/python/zlktree.py +++ b/tests/python/zlktree.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2021 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2021, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() a = spot.automaton("""HOA: v1 States: 5 Start: 0 AP: 2 "p0" "p1" Acceptance: 4 Inf(3) | Fin(3) properties: trans-labels explicit-labels @@ -25,8 +27,8 @@ trans-acc --BODY-- State: 0 [!0&!1] 3 [!0&!1] 4 State: 1 [!0&!1] 4 {3} [0&!1] 0 {2} [!0&1] 1 {2} State: 2 [!0&1] 0 {0 2} [!0&!1] 1 State: 3 [!0&1] 2 State: 4 [0&!1] 3 --END--""") b = spot.zielonka_tree_transform(a) -assert spot.are_equivalent(a, b) -assert b.acc().is_buchi() +tc.assertTrue(spot.are_equivalent(a, b)) +tc.assertTrue(b.acc().is_buchi()) def report_missing_exception(): raise RuntimeError("missing exception") @@ -45,95 +47,96 @@ State: 2 [0&1] 8 {3} [0&1] 2 {1} [!0&1] 4 {3 4} [!0&!1] 3 {2 5} State: [!0&!1] 2 {5} [!0&!1] 0 {3} [!0&!1] 5 --END--""") aa = spot.acd(a) try: - assert aa.has_rabin_shape() + tc.assertTrue(aa.has_rabin_shape()) except RuntimeError as e: - assert 'CHECK_RABIN' in str(e) + tc.assertIn('CHECK_RABIN', str(e)) else: report_missing_exception() try: - assert not aa.has_streett_shape() + tc.assertFalse(aa.has_streett_shape()) except RuntimeError as e: - assert 'CHECK_STREETT' in str(e) + tc.assertIn('CHECK_STREETT', str(e)) else: report_missing_exception() try: - assert not aa.has_parity_shape() + tc.assertFalse(aa.has_parity_shape()) except RuntimeError as e: - assert 'CHECK_PARITY' in str(e) + tc.assertIn('CHECK_PARITY', str(e)) else: report_missing_exception() aa = spot.acd(a, spot.acd_options_CHECK_RABIN) -assert aa.has_rabin_shape() -assert aa.node_count() == 13 +tc.assertTrue(aa.has_rabin_shape()) +tc.assertEqual(aa.node_count(), 13) try: - assert not aa.has_streett_shape() + tc.assertFalse(aa.has_streett_shape()) except RuntimeError as e: - assert 'CHECK_STREETT' in str(e) + tc.assertIn('CHECK_STREETT', str(e)) else: report_missing_exception() try: - assert aa.has_parity_shape() + tc.assertTrue(aa.has_parity_shape()) except RuntimeError as e: - assert 'CHECK_PARITY' in str(e) + tc.assertIn('CHECK_PARITY', str(e)) else: report_missing_exception() aa = spot.acd(a, (spot.acd_options_CHECK_PARITY | spot.acd_options_ABORT_WRONG_SHAPE)) -assert aa.has_rabin_shape() -assert not aa.has_streett_shape() -assert not aa.has_parity_shape() -assert aa.node_count() == 0 +tc.assertTrue(aa.has_rabin_shape()) +tc.assertFalse(aa.has_streett_shape()) +tc.assertFalse(aa.has_parity_shape()) +tc.assertEqual(aa.node_count(), 0) + try: aa.first_branch(0) except RuntimeError as e: - assert 'ABORT_WRONG_SHAPE' in str(e) + tc.assertIn('ABORT_WRONG_SHAPE', str(e)) else: report_missing_exception() try: aa.step(0, 0) except RuntimeError as e: - assert 'incorrect branch number' in str(e) + tc.assertIn('incorrect branch number', str(e)) else: report_missing_exception() try: aa.node_acceptance(0) except RuntimeError as e: - assert 'unknown node' in str(e) + tc.assertIn('unknown node', str(e)) else: report_missing_exception() try: aa.edges_of_node(0) except RuntimeError as e: - assert 'unknown node' in str(e) + tc.assertIn('unknown node', str(e)) else: report_missing_exception() try: aa.node_level(0) except RuntimeError as e: - assert 'unknown node' in str(e) + tc.assertIn('unknown node', str(e)) else: report_missing_exception() a = spot.translate('true') a.set_acceptance(spot.acc_cond('f')) b = spot.acd_transform(a) -assert a.equivalent_to(b) +tc.assertTrue(a.equivalent_to(b)) a = spot.translate('true') a.set_acceptance(spot.acc_cond('f')) b = spot.zielonka_tree_transform(a) -assert a.equivalent_to(b) +tc.assertTrue(a.equivalent_to(b)) a = spot.automaton("""HOA: v1 name: "^ G F p0 G F p1" States: 5 Start: 2 AP: 2 "a" "b" acc-name: Rabin 2 Acceptance: 4 (Fin(0) & Inf(1)) | @@ -144,8 +147,8 @@ complete properties: deterministic --BODY-- State: 0 {0} [!0&!1] 0 2} [!0&!1] 1 [0&!1] 4 [!0&1] 3 [0&1] 2 State: 4 {0 3} [!0&!1] 0 [0&!1] 4 [!0&1] 3 [0&1] 2 --END--""") b = spot.acd_transform_sbacc(a, True) -assert str(b.acc()) == '(3, Fin(0) & (Inf(1) | Fin(2)))' -assert a.equivalent_to(b) +tc.assertEqual(str(b.acc()), '(3, Fin(0) & (Inf(1) | Fin(2)))') +tc.assertTrue(a.equivalent_to(b)) b = spot.acd_transform_sbacc(a, False) -assert str(b.acc()) == '(2, Fin(0) & Inf(1))' -assert a.equivalent_to(b) +tc.assertEqual(str(b.acc()), '(2, Fin(0) & Inf(1))') +tc.assertTrue(a.equivalent_to(b)) diff --git a/tests/sanity/style.test b/tests/sanity/style.test index 8f157014d..85ef359b0 100755 --- a/tests/sanity/style.test +++ b/tests/sanity/style.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009-2021 Laboratoire de Recherche et Développement de +# Copyright (C) 2009-2022 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 # (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -392,6 +392,27 @@ for dir in "${INCDIR-..}" "${INCDIR-..}/../bin" "${INCDIR-..}/../tests"; do done || : # Make sure sh does not abort when read exits with false. done +# Rules for Python tests +for dir in "${INCDIR-..}/../tests"; do + + find "$dir" -name "*.py" -a -type f -a -print | + while read file; do + fail=false + + # Strip comments. + sed 's,[ ]*#.*,,' < $file > $tmp + + $GREP '[ ]$' $tmp && + diag 'Trailing whitespace.' + + $GREP -E '([ ]|^)assert[ (]' $tmp && + diag "replace assert keywords by unittest assertion tests" + + $fail && echo "$file" >>failures.style + done || : # Make sure sh does not abort when read exits with false. +done + + if test -f failures.style; then echo "The following files contain style errors:" cat failures.style From 187bacc25402c1673437c2b0ea83459c512dc263 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 10 Mar 2022 15:49:46 +0100 Subject: [PATCH 005/606] tests: don't wipe python environment * tests/run.in: keep original PYTHONPATH contents * NEWS: mention the bug --- NEWS | 3 +++ tests/run.in | 9 ++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/NEWS b/NEWS index 89a1d52a2..345ec7fdd 100644 --- a/NEWS +++ b/NEWS @@ -77,6 +77,9 @@ New in spot 2.10.4.dev (net yet released) - Do not use the seq command in test cases, it is not available everywhere. + - Do not erase the previous contents of the PYTHONPATH environment + variable when running tests, prepend to it instead. + New in spot 2.10.4 (2022-02-01) Bug fixed: diff --git a/tests/run.in b/tests/run.in index d14bf52a9..7eaa7732c 100755 --- a/tests/run.in +++ b/tests/run.in @@ -104,18 +104,21 @@ export srcdir case $1 in *.ipynb) - PYTHONPATH=$pypath DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ + PYTHONPATH=$pypath:$PYTHONPATH \ + DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ PYTHONIOENCODING=utf-8:surrogateescape \ exec $PREFIXCMD @PYTHON@ @abs_srcdir@/python/ipnbdoctest.py "$@";; *.py) - PYTHONPATH=$pypath DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ + PYTHONPATH=$pypath:$PYTHONPATH \ + DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ exec $PREFIXCMD @PYTHON@ "$@";; *.test) exec sh -x "$@";; *.pl) exec $PERL "$@";; *python*|*jupyter*|*pypy*) - PYTHONPATH=$pypath DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ + PYTHONPATH=$pypath:$PYTHONPATH \ + DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ exec $PREFIXCMD "$@";; *) echo "Unknown extension" >&2 From 0745e735bb8bf90610bc297009cd07fa78a51205 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 10 Mar 2022 10:53:18 +0100 Subject: [PATCH 006/606] fix typos and make formula_from_bdd more usable in Python * python/spot/impl.i (formula_from_bdd): Instantiate for twa_graph. * spot/twa/twa.hh (register_aps_from_dict): Typo in exception. * tests/python/except.py: More tests for the above. * tests/python/bdddict.py: Typo in comment. --- python/spot/impl.i | 2 ++ spot/twa/twa.hh | 4 ++-- tests/python/bdddict.py | 4 ++-- tests/python/except.py | 36 ++++++++++++++++++++++++++++++++++++ 4 files changed, 42 insertions(+), 4 deletions(-) diff --git a/python/spot/impl.i b/python/spot/impl.i index 7132a5cc6..b7f116201 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -533,6 +533,8 @@ namespace std { %include %include %include +%template(formula_to_bdd) spot::formula_to_bdd; + %include /* These operators may raise exceptions, and we do not want Swig4 to convert those exceptions to NotImplemented. */ diff --git a/spot/twa/twa.hh b/spot/twa/twa.hh index cb1e208ec..819a90962 100644 --- a/spot/twa/twa.hh +++ b/spot/twa/twa.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2011, 2013-2021 Laboratoire de Recherche et +// Copyright (C) 2009, 2011, 2013-2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // Copyright (C) 2003-2005 Laboratoire d'Informatique de Paris 6 // (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -761,7 +761,7 @@ namespace spot void register_aps_from_dict() { if (!aps_.empty()) - throw std::runtime_error("register_ap_from_dict() may not be" + throw std::runtime_error("register_aps_from_dict() may not be" " called on an automaton that has already" " registered some AP"); auto& m = get_dict()->bdd_map; diff --git a/tests/python/bdddict.py b/tests/python/bdddict.py index 0172bd050..b7b442b1f 100644 --- a/tests/python/bdddict.py +++ b/tests/python/bdddict.py @@ -17,8 +17,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -# Make sure we can leep track of BDD association in Python using bdd_dict, as -# discussed in issue #372. +# Make sure we can keep track of BDD association in Python using bdd_dict, as +# discussed in (deleted???) issue #372. # CPython use reference counting, so that automata are destructed # when we expect them to be. However other implementations like diff --git a/tests/python/except.py b/tests/python/except.py index 76f17f76c..8674721c9 100644 --- a/tests/python/except.py +++ b/tests/python/except.py @@ -295,3 +295,39 @@ except RuntimeError as e: se = str(e) tc.assertIn("synthesis-outputs", se) tc.assertIn("unregistered proposition", se) +else: + report_missing_exception() + + +a = spot.make_twa_graph() +s = a.new_state() +b = spot.formula_to_bdd("a & b", a.get_dict(), a) +a.new_edge(s, s, b, []) +try: + print(a.to_str('hoa')) +except RuntimeError as e: + tc.assertIn("unregistered atomic propositions", str(e)) +else: + report_missing_exception() + +a.register_aps_from_dict() +tc.assertEqual(a.to_str('hoa'), """HOA: v1 +States: 1 +Start: 0 +AP: 2 "a" "b" +acc-name: all +Acceptance: 0 t +properties: trans-labels explicit-labels state-acc deterministic +--BODY-- +State: 0 +[0&1] 0 +--END--""") + +try: + a.register_aps_from_dict() +except RuntimeError as e: + se = str(e) + tc.assertIn("register_aps_from_dict", se) + tc.assertIn("already registered", se) +else: + report_missing_exception() From e248f4500d9ed1c0fca03a606a9a6442fa0ac560 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Tue, 15 Mar 2022 14:01:25 +0100 Subject: [PATCH 007/606] ltlsynt: typo in help * bin/ltlsynt.cc: here --- bin/ltlsynt.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 45fd3b159..73ec6b2b1 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -65,7 +65,7 @@ static const argp_option options[] = "comma-separated list of controllable (a.k.a. output) atomic" " propositions", 0}, { "ins", OPT_INPUT, "PROPS", 0, - "comma-separated list of controllable (a.k.a. output) atomic" + "comma-separated list of uncontrollable (a.k.a. input) atomic" " propositions", 0}, /**************************************************/ { nullptr, 0, nullptr, 0, "Fine tuning:", 10 }, From 4f69e99c453af747a16a852bb96c25f97bfaf3e0 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Thu, 17 Mar 2022 11:38:23 +0100 Subject: [PATCH 008/606] synthesis.ipynb: correct typos * tests/python/synthesis.ipynb: here --- tests/python/synthesis.ipynb | 3402 +++++++++++++++++----------------- 1 file changed, 1685 insertions(+), 1717 deletions(-) diff --git a/tests/python/synthesis.ipynb b/tests/python/synthesis.ipynb index be001a9b3..e290f02b5 100644 --- a/tests/python/synthesis.ipynb +++ b/tests/python/synthesis.ipynb @@ -3,7 +3,6 @@ { "cell_type": "code", "execution_count": 1, - "id": "452c00ae", "metadata": {}, "outputs": [], "source": [ @@ -14,13 +13,12 @@ }, { "cell_type": "markdown", - "id": "7545ab74", "metadata": {}, "source": [ "This notebook presents functions that can be used to solve the Reactive Synthesis problem using games.\n", - "If you are not familiar with how Spot represent games, please read the `games` notebook first.\n", + "If you are not familiar with how Spot represents games, please read the `games` notebook first.\n", "\n", - "In Reactive Synthesis, the goal is to build an electronic circuit that reacts to some input signals by producing some output signals, under some LTL constraints that tie both input and output. Of course the input signals are not controlable, so only job is to decide what output signal to produce.\n", + "In Reactive Synthesis, the goal is to build an electronic circuit that reacts to some input signals by producing some output signals, under some LTL constraints that tie both input and output. Of course the input signals are not controllable, so only job is to decide what output signal to produce.\n", "\n", "# Reactive synthesis in four steps\n", "\n", @@ -33,13 +31,12 @@ "\n", "Each of these steps is parametrized by a structure called `synthesis_info`. This structure stores some additional data needed to pass fine-tuning options or to store statistics.\n", "\n", - "The `ltl_to_game` function takes the LTL specification, and the list of controlable atomic propositions (or output signals). It returns a two-player game, where player 0 plays the input variables (and wants to invalidate the acceptance condition), and player 1 plays the output variables (and wants to satisfy the output condition). The conversion from LTL to parity automata can use one of many algorithms, and can be specified in the `synthesis_info` structure (this works like the `--algo=` option of `ltlsynt`)." + "The `ltl_to_game` function takes the LTL specification, and the list of controllable atomic propositions (or output signals). It returns a two-player game, where player 0 plays the input variables (and wants to invalidate the acceptance condition), and player 1 plays the output variables (and wants to satisfy the output condition). The conversion from LTL to parity automata can use one of many algorithms, and can be specified in the `synthesis_info` structure (this works like the `--algo=` option of `ltlsynt`)." ] }, { "cell_type": "code", "execution_count": 2, - "id": "fb49e681", "metadata": {}, "outputs": [ { @@ -56,649 +53,649 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))\n", - "[parity max odd 4]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")))\n", + "[parity max odd 4]\n", "\n", "\n", "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", "\n", "I->9\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "25\n", - "\n", - "25\n", + "\n", + "25\n", "\n", "\n", "\n", "9->25\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "26\n", - "\n", - "26\n", + "\n", + "26\n", "\n", "\n", "\n", "9->26\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "27\n", - "\n", - "27\n", + "\n", + "27\n", "\n", "\n", "\n", "9->27\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "28\n", - "\n", - "28\n", + "\n", + "28\n", "\n", "\n", "\n", "9->28\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "0->10\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", + "\n", "\n", "\n", "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "0->11\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "10->1\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "11->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "12\n", - "\n", - "12\n", + "\n", + "12\n", "\n", "\n", "\n", "1->12\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", + "\n", "\n", "\n", "\n", "13\n", - "\n", - "13\n", + "\n", + "13\n", "\n", "\n", "\n", "1->13\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", + "\n", "\n", "\n", "\n", "12->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "13->0\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "14\n", - "\n", - "14\n", + "\n", + "14\n", "\n", "\n", "\n", "2->14\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", + "\n", "\n", "\n", "\n", "16\n", - "\n", - "16\n", + "\n", + "16\n", "\n", "\n", "\n", "2->16\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", + "\n", "\n", "\n", "\n", "15\n", - "\n", - "15\n", + "\n", + "15\n", "\n", "\n", "\n", "14->15\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "16->2\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "3->13\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", + "\n", "\n", "\n", "\n", "17\n", - "\n", - "17\n", + "\n", + "17\n", "\n", "\n", "\n", "3->17\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", + "\n", "\n", "\n", "\n", "17->2\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "17->3\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "4->14\n", - "\n", - "\n", - "i0\n", - "\n", + "\n", + "\n", + "i0\n", + "\n", "\n", "\n", "\n", "18\n", - "\n", - "18\n", + "\n", + "18\n", "\n", "\n", "\n", "4->18\n", - "\n", - "\n", - "!i0\n", - "\n", + "\n", + "\n", + "!i0\n", + "\n", "\n", "\n", "\n", "18->4\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "5->14\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "5->16\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "5->18\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "19\n", - "\n", - "19\n", + "\n", + "19\n", "\n", "\n", "\n", "5->19\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "19->5\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "6->11\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "20\n", - "\n", - "20\n", + "\n", + "20\n", "\n", "\n", "\n", "6->20\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "21\n", - "\n", - "21\n", + "\n", + "21\n", "\n", "\n", "\n", "6->21\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "20->4\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "20->7\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "21->4\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "21->6\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "7->12\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "7->13\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "22\n", - "\n", - "22\n", + "\n", + "22\n", "\n", "\n", "\n", "7->22\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "23\n", - "\n", - "23\n", + "\n", + "23\n", "\n", "\n", "\n", "7->23\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "22->4\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "22->7\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "23->4\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "23->6\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->13\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "8->17\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "8->23\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "24\n", - "\n", - "24\n", + "\n", + "24\n", "\n", "\n", "\n", "8->24\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "24->5\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "24->8\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "25->8\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "26->3\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "27->6\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "28->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "15->14\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7ff27d57dc90> >" + " *' at 0x7f44ac249ed0> >" ] }, "metadata": {}, @@ -717,7 +714,6 @@ }, { "cell_type": "markdown", - "id": "3797307f", "metadata": {}, "source": [ "Solving the game, is done with `solve_game()` as with any game. There is also a version that takes a `synthesis_info` as second argument in case the time it takes has to be recorded. Here passing `si` or not makes no difference." @@ -726,7 +722,6 @@ { "cell_type": "code", "execution_count": 3, - "id": "62fb169f", "metadata": {}, "outputs": [ { @@ -742,588 +737,588 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))\n", - "[parity max odd 4]\n", + " viewBox=\"0.00 0.00 650.40 360.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")))\n", + "[parity max odd 4]\n", "\n", "\n", "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", "\n", "I->9\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "25\n", - "\n", - "25\n", + "\n", + "25\n", "\n", "\n", "\n", "9->25\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "26\n", - "\n", - "26\n", + "\n", + "26\n", "\n", "\n", "\n", "9->26\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "27\n", - "\n", - "27\n", + "\n", + "27\n", "\n", "\n", "\n", "9->27\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "28\n", - "\n", - "28\n", + "\n", + "28\n", "\n", "\n", "\n", "9->28\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "0->10\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "0->11\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "10->1\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "11->0\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "12\n", - "\n", - "12\n", + "\n", + "12\n", "\n", "\n", "\n", "1->12\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "13\n", - "\n", - "13\n", + "\n", + "13\n", "\n", "\n", "\n", "1->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "12->1\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "13->0\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "14\n", - "\n", - "14\n", + "\n", + "14\n", "\n", "\n", "\n", "2->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "16\n", - "\n", - "16\n", + "\n", + "16\n", "\n", "\n", "\n", "2->16\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "15\n", - "\n", - "15\n", + "\n", + "15\n", "\n", "\n", "\n", "14->15\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "16->2\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "3->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "17\n", - "\n", - "17\n", + "\n", + "17\n", "\n", "\n", "\n", "3->17\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "17->2\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "17->3\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "4->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "18\n", - "\n", - "18\n", + "\n", + "18\n", "\n", "\n", "\n", "4->18\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "18->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "5->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "5->16\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "5->18\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "19\n", - "\n", - "19\n", + "\n", + "19\n", "\n", "\n", "\n", "5->19\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "19->5\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "6->11\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "20\n", - "\n", - "20\n", + "\n", + "20\n", "\n", "\n", "\n", "6->20\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "21\n", - "\n", - "21\n", + "\n", + "21\n", "\n", "\n", "\n", "6->21\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "20->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "20->7\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "21->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "21->6\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "7->12\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "7->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "22\n", - "\n", - "22\n", + "\n", + "22\n", "\n", "\n", "\n", "7->22\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "23\n", - "\n", - "23\n", + "\n", + "23\n", "\n", "\n", "\n", "7->23\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "22->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "22->7\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "23->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "23->6\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "8->17\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "8->23\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "24\n", - "\n", - "24\n", + "\n", + "24\n", "\n", "\n", "\n", "8->24\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "24->5\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "24->8\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "25->8\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "26->3\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "27->6\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "28->0\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "15->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n" @@ -1345,16 +1340,14 @@ }, { "cell_type": "markdown", - "id": "d5a53d3f", "metadata": {}, "source": [ - "Once a strategy has been found, it can be extracted as an automaton and simplified using 6 different levels (the default is 2). The output should be interpreted as a mealy automaton, where transition have the form `(ins)&(outs)` where `ins` and `outs` are Boolean formulas representing possible possibles inputs and outputs (they could be more than just conjunctions of atomic proposition). Mealy machines with this type of labels are called \"separated\" in Spot." + "Once a strategy has been found, it can be extracted as an automaton and simplified using 6 different levels (the default is 2). The output should be interpreted as a Mealy automaton, where transition have the form `(ins)&(outs)` where `ins` and `outs` are Boolean formulas representing possible inputs and outputs (they could be more than just conjunctions of atomic proposition). Mealy machines with this type of labels are called \"separated\" in Spot." ] }, { "cell_type": "code", "execution_count": 4, - "id": "cdf8f5f1", "metadata": {}, "outputs": [ { @@ -1370,303 +1363,303 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "3->5\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "4->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "4->5\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "5->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5->5\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->5\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->6\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" @@ -1691,169 +1684,169 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n" @@ -1869,7 +1862,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "simplification lvl 2 : bisimulation-based reduction with output output assignement\n" + "simplification lvl 2 : bisimulation-based reduction with output assignement\n" ] }, { @@ -1878,119 +1871,119 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "I->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" @@ -2015,75 +2008,75 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" @@ -2108,75 +2101,75 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", - "\n", + "\n", "0->1\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", - "\n", + "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", - "\n", + "\n", "1->1\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n" @@ -2201,119 +2194,119 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" @@ -2330,14 +2323,14 @@ "# We have different levels of simplification:\n", "# 0 : No simplification\n", "# 1 : bisimulation-based reduction\n", - "# 2 : bisimulation-based reduction with output output assignement\n", + "# 2 : bisimulation-based reduction with output assignement\n", "# 3 : SAT-based exact minimization\n", "# 4 : First 1 then 3 (exact)\n", "# 5 : First 2 then 3 (not exact)\n", "\n", "descr = [\"0 : No simplification\", \n", " \"1 : bisimulation-based reduction\", \n", - " \"2 : bisimulation-based reduction with output output assignement\",\n", + " \"2 : bisimulation-based reduction with output assignement\",\n", " \"3 : SAT-based exact minimization\",\n", " \"4 : First 1 then 3 (exact)\",\n", " \"5 : First 2 then 3 (not exact)\"]\n", @@ -2352,7 +2345,6 @@ }, { "cell_type": "markdown", - "id": "511093c3", "metadata": {}, "source": [ "If needed, a separated Mealy machine can be turned into game shape using `split_sepearated_mealy()`, which is more efficient than `split_2step()`." @@ -2361,7 +2353,6 @@ { "cell_type": "code", "execution_count": 5, - "id": "cc977286", "metadata": {}, "outputs": [ { @@ -2370,260 +2361,260 @@ "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", @@ -2643,12 +2634,11 @@ }, { "cell_type": "markdown", - "id": "aa6fe484", "metadata": {}, "source": [ - "# Converting the separated mealy machine to AIGER\n", + "# Converting the separated mealy machine to AIG\n", "\n", - "A separated mealy machine can be converted to a circuit in the [AIGER format](http://fmv.jku.at/aiger/FORMAT.aiger) using `mealy_machine_to_aig()`. This takes a second argument specifying what type of encoding to use (exactly like `ltlsynt`'s `--aiger=...` option). \n", + "A separated Mealy machine can be converted to a circuit in the [AIGER format](http://fmv.jku.at/aiger/FORMAT.aiger) using `mealy_machine_to_aig()`. This takes a second argument specifying what type of encoding to use (exactly like `ltlsynt`'s `--aiger=...` option). \n", "\n", "In this case, the circuit is quite simple: `o0` should be the negation of previous value of `i1`. This is done by storing the value of `i1` in a latch. And the value if `i0` can be ignored." ] @@ -2656,7 +2646,6 @@ { "cell_type": "code", "execution_count": 6, - "id": "78261ec4", "metadata": {}, "outputs": [ { @@ -2665,60 +2654,60 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "2\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "2->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2732,7 +2721,6 @@ }, { "cell_type": "markdown", - "id": "f95dc6b7", "metadata": {}, "source": [ "While we are at it, let us mention that you can render those circuits horizontally as follows:" @@ -2741,7 +2729,6 @@ { "cell_type": "code", "execution_count": 7, - "id": "14410565", "metadata": {}, "outputs": [ { @@ -2750,54 +2737,54 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:w\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "2\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "2->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n" @@ -2817,16 +2804,14 @@ }, { "cell_type": "markdown", - "id": "9ccbc2e2", "metadata": {}, "source": [ - "To encode the circuit in the aig format (ASCII version) use:" + "To encode the circuit in the AIGER format (ASCII version) use:" ] }, { "cell_type": "code", "execution_count": 8, - "id": "06e485d0", "metadata": {}, "outputs": [ { @@ -2850,7 +2835,6 @@ }, { "cell_type": "markdown", - "id": "5f006648", "metadata": {}, "source": [ "# Adding more inputs and outputs by force" @@ -2858,7 +2842,6 @@ }, { "cell_type": "markdown", - "id": "9905208f", "metadata": {}, "source": [ "It can happen that propositions declared as output are ommited in the aig circuit (either because they are not part of the specification, or because they do not appear in the winning strategy). In that case those \n", @@ -2870,7 +2853,6 @@ { "cell_type": "code", "execution_count": 9, - "id": "560a7e46", "metadata": {}, "outputs": [ { @@ -2879,167 +2861,167 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))))\n", - "[parity max odd 6]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")))))\n", + "[parity max odd 6]\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "I->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", - "i0\n", - "\n", + "\n", + "\n", + "i0\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "3->7\n", - "\n", - "\n", - "!i0\n", - "\n", + "\n", + "\n", + "!i0\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "6->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "6->2\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "7->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7ff27ca90390> >" + " *' at 0x7f44ac1d6750> >" ] }, "metadata": {}, @@ -3051,70 +3033,70 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "\n", - "!i0\n", - "/\n", + "!i0\n", + "/\n", "\n", - "!o0\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", + "\n", + "\n", "\n", - "1\n", - "/\n", + "1\n", + "/\n", "\n", - "!o0\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7ff27d57dd20> >" + " *' at 0x7f44ac249ed0> >" ] }, "metadata": {}, @@ -3126,72 +3108,72 @@ "\n", "\n", - "\n", "\n", "\n", + " viewBox=\"0.00 0.00 143.20 352.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", "\n", - "\n", + "\n", "\n", "\n", "4\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "6->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3211,7 +3193,6 @@ }, { "cell_type": "markdown", - "id": "06d42ec3", "metadata": {}, "source": [ "To force the presence of extra variables in the circuit, they can be passed to `mealy_machine_to_aig()`." @@ -3220,7 +3201,6 @@ { "cell_type": "code", "execution_count": 10, - "id": "6ea759ea", "metadata": {}, "outputs": [ { @@ -3229,96 +3209,96 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "6->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "8->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "8->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "0\n", - "\n", - "False\n", + "\n", + "False\n", "\n", "\n", "\n", "0->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3331,22 +3311,20 @@ }, { "cell_type": "markdown", - "id": "4135f43e", "metadata": {}, "source": [ - "# Combining mealy machines\n", + "# Combining Mealy machines\n", "\n", - "It can happen that the complet specification of the controller can be separated into sub-specifications with DISJOINT output propositions, see Finkbeiner et al. Specification Decomposition for Reactive Synthesis.\n", - "This results in multiple mealy machines which have to be converted into one single aiger circuit.\n", + "It can happen that the complete specification of the controller can be separated into sub-specifications with DISJOINT output propositions, see Finkbeiner et al. Specification Decomposition for Reactive Synthesis.\n", + "This results in multiple Mealy machines which have to be converted into one single AIG circuit.\n", "\n", - "This can be done using the function `mealy_machines_to_aig()`, which takes a vector of separated mealy machines as argument.\n", - "In order for this to work, all mealy machines need to share the same `bdd_dict`. This can be ensured by passing a common options strucuture." + "This can be done using the function `mealy_machines_to_aig()`, which takes a vector of separated Mealy machines as argument.\n", + "In order for this to work, all Mealy machines need to share the same `bdd_dict`. This can be ensured by passing a common options strucuture." ] }, { "cell_type": "code", "execution_count": 11, - "id": "4f9be142", "metadata": {}, "outputs": [ { @@ -3362,158 +3340,158 @@ "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))\n", - "[parity max odd 4]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")))\n", + "[parity max odd 4]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))\n", - "[parity max odd 4]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")))\n", + "[parity max odd 4]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "o1\n", - "\n", + "\n", + "\n", + "o1\n", + "\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "!o1\n", - "\n", + "\n", + "\n", + "!o1\n", + "\n", "\n", "\n", "\n", @@ -3539,94 +3517,94 @@ "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "o1\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "o1\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "!o1\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "!o1\n", "\n", "\n", "\n", @@ -3652,108 +3630,108 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "10->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3780,7 +3758,6 @@ }, { "cell_type": "markdown", - "id": "b3985f04", "metadata": {}, "source": [ "# Reading an AIGER-file\n", @@ -3795,7 +3772,6 @@ { "cell_type": "code", "execution_count": 12, - "id": "3bc0b1f2", "metadata": {}, "outputs": [], "source": [ @@ -3816,7 +3792,6 @@ { "cell_type": "code", "execution_count": 13, - "id": "1455e6ab", "metadata": {}, "outputs": [ { @@ -3825,108 +3800,108 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "d\n", + "\n", + "d\n", "\n", "\n", "\n", "6->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "c\n", + "\n", + "c\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "a\n", + "\n", + "a\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "b\n", + "\n", + "b\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3941,7 +3916,6 @@ { "cell_type": "code", "execution_count": 14, - "id": "0c418256", "metadata": {}, "outputs": [ { @@ -3970,7 +3944,6 @@ { "cell_type": "code", "execution_count": 15, - "id": "bd4b6aa2", "metadata": {}, "outputs": [ { @@ -3987,16 +3960,14 @@ }, { "cell_type": "markdown", - "id": "94fd22a1", "metadata": {}, "source": [ - "An aiger circuit can be transformed into a monitor/mealy machine. This can be used for instance to check that it does not intersect the negation of the specification." + "An AIG circuit can be transformed into a monitor/Mealy machine. This can be used for instance to check that it does not intersect the negation of the specification." ] }, { "cell_type": "code", "execution_count": 16, - "id": "b157ec16", "metadata": {}, "outputs": [ { @@ -4005,52 +3976,52 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!a & !b\n", - "/\n", - "\n", - "!c & !d\n", - "\n", - "a & b\n", - "/\n", - "\n", - "!c & d\n", - "\n", - "(!a & b) | (a & !b)\n", - "/\n", - "\n", - "c & !d\n", + "\n", + "\n", + "\n", + "!a & !b\n", + "/\n", + "\n", + "!c & !d\n", + "\n", + "a & b\n", + "/\n", + "\n", + "!c & d\n", + "\n", + "(!a & b) | (a & !b)\n", + "/\n", + "\n", + "c & !d\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7ff27ca90cf0> >" + " *' at 0x7f44ac2649c0> >" ] }, "execution_count": 16, @@ -4064,17 +4035,15 @@ }, { "cell_type": "markdown", - "id": "671b849d", "metadata": {}, "source": [ - "Note that the generation of aiger circuits from mealy machines is flexible and accepts separated mealy machines\n", - "as well as split mealy machines." + "Note that the generation of aiger circuits from Mealy machines is flexible and accepts separated Mealy machines\n", + "as well as split Mealy machines." ] }, { "cell_type": "code", "execution_count": 17, - "id": "fcf3b73e", "metadata": {}, "outputs": [ { @@ -4083,114 +4052,114 @@ "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", @@ -4222,7 +4191,6 @@ { "cell_type": "code", "execution_count": 18, - "id": "cd06f9ab", "metadata": {}, "outputs": [ { @@ -4231,180 +4199,180 @@ "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", @@ -4425,9 +4393,9 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "'Python Interactive'", "language": "python", - "name": "python3" + "name": "748aac80-c5a9-4430-8d88-15820461ebdf" }, "language_info": { "codemirror_mode": { @@ -4439,7 +4407,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.7.3" } }, "nbformat": 4, From c1e6340228303ba5ca0766597f815119f0fefa29 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 17 Mar 2022 10:55:30 +0100 Subject: [PATCH 009/606] optionmap: set_if_unset and simplifications * spot/misc/optionmap.hh (set_if_unset): New method. * spot/misc/optionmap.cc (set_if_unset, set, set_str): Implement set_if_unset, and simplify set and set_str to not perform two lookups. * spot/twaalgos/synthesis.cc (create_translator): Use set_if_unset to simplify the code. --- spot/misc/optionmap.cc | 36 +++++++++++++++++++++++++++++------- spot/misc/optionmap.hh | 7 +++++-- spot/twaalgos/synthesis.cc | 12 ++++-------- 3 files changed, 38 insertions(+), 17 deletions(-) diff --git a/spot/misc/optionmap.cc b/spot/misc/optionmap.cc index 3349f0f0d..4db5235eb 100644 --- a/spot/misc/optionmap.cc +++ b/spot/misc/optionmap.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2013-2016, 2018 Laboratoire de Recherche +// Copyright (C) 2008, 2013-2016, 2018, 2022 Laboratoire de Recherche // et Développement de l'Epita (LRDE). // Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -158,17 +158,39 @@ namespace spot int option_map::set(const char* option, int val, int def) { - int old = get(option, def); - set_(option, val); - return old; + if (auto [p, b] = options_.emplace(option, val); b) + { + unused_.insert(option); + return def; + } + else + { + int old = p->second; + p->second = val; + return old; + } + } + + void + option_map::set_if_unset(const char* option, int val) + { + if (options_.emplace(option, val).second) + unused_.insert(option); } std::string option_map::set_str(const char* option, std::string val, std::string def) { - std::string old = get_str(option, def); - set_str_(option, val); - return old; + if (auto [p, b] = options_str_.emplace(option, val); b) + { + unused_.insert(option); + return def; + } + else + { + std::swap(val, p->second); + return val; + } } void diff --git a/spot/misc/optionmap.hh b/spot/misc/optionmap.hh index ea06c62f9..229733a18 100644 --- a/spot/misc/optionmap.hh +++ b/spot/misc/optionmap.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2015, 2016-2017 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE) +// Copyright (C) 2013, 2015, 2016-2017, 2022 Laboratoire de Recherche +// et Developpement de l'Epita (LRDE) // Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre // et Marie Curie. @@ -80,6 +80,9 @@ namespace spot /// or \a def otherwise. int set(const char* option, int val, int def = 0); + /// \brief Set the value of \a option to \a val if it is unset. + void set_if_unset(const char* option, int val); + /// \brief Set the value of a string \a option to \a val. /// /// \return The previous value associated to \a option if declared, diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 9025bb303..ec2defb4f 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2020, 2021 Laboratoire de Recherche et +// Copyright (C) 2020-2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -768,13 +768,9 @@ namespace spot auto sol = gi.s; const bdd_dict_ptr& dict = gi.dict; - for (auto&& p : std::vector> - {{"simul", 0}, - {"ba-simul", 0}, - {"det-simul", 0}, - {"tls-impl", 1}, - {"wdba-minimize", 2}}) - extra_options.set(p.first, extra_options.get(p.first, p.second)); + extra_options.set_if_unset("simul", 0); + extra_options.set_if_unset("tls-impl", 1); + extra_options.set_if_unset("wdba-minimize", 2); translator trans(dict, &extra_options); switch (sol) From 75818fde1361203c2b63c717f914fc589306cc10 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 17 Mar 2022 14:30:05 +0100 Subject: [PATCH 010/606] synthesis: fix suboptimal colorization after LAR * spot/twaalgos/synthesis.cc (ltl_to_game): In LAR and LAR_OLD mode, for max odd and colorize the game after the split, not before. The previous code used to colorize twice, and could waste up to 4 colors in the process. * tests/core/ltlsynt.test, tests/python/_mealy.ipynb, tests/python/games.ipynb, tests/python/synthesis.ipynb, tests/python/synthesis.py: Adjust all test cases to reflect the fact that the game uses fewer colors. --- spot/twaalgos/synthesis.cc | 6 +- tests/core/ltlsynt.test | 64 +- tests/python/_mealy.ipynb | 89 +- tests/python/games.ipynb | 119 +- tests/python/synthesis.ipynb | 3338 +++++++++++++++++----------------- tests/python/synthesis.py | 10 +- 6 files changed, 1820 insertions(+), 1806 deletions(-) diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index ec2defb4f..6d4537206 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -974,16 +974,13 @@ namespace spot if (gi.s == algo::LAR) { dpa = to_parity(aut); - // reduce_parity is called by to_parity(), - // but with colorization turned off. - colorize_parity_here(dpa, true); + // reduce_parity is called by to_parity() } else { dpa = to_parity_old(aut); dpa = reduce_parity_here(dpa, true); } - change_parity_here(dpa, parity_kind_max, parity_style_odd); if (bv) bv->paritize_time += sw.stop(); if (vs) @@ -995,6 +992,7 @@ namespace spot if (bv) sw.start(); dpa = split_2step(dpa, outs, true); + change_parity_here(dpa, parity_kind_max, parity_style_odd); colorize_parity_here(dpa, true); if (bv) bv->split_time += sw.stop(); diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 24a53556a..215143df2 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -59,11 +59,11 @@ parity 13; 1 1 1 6,3; parity 5; 1 1 0 4,5 "INIT"; -5 4 1 1,1; -4 5 1 0,1; +5 2 1 1,1; +4 3 1 0,1; 0 1 0 2,3; -3 5 1 1; -2 3 1 0,0; +3 3 1 1; +2 1 1 0,0; EOF : > out @@ -230,10 +230,10 @@ direct strategy might exist but was not found. translating formula done in X seconds automaton has 2 states and 3 colors LAR construction done in X seconds -DPA has 4 states, 3 colors +DPA has 4 states, 1 colors split inputs and outputs done in X seconds automaton has 12 states -solving game with acceptance: parity max odd 5 +solving game with acceptance: parity max odd 3 game solved in X seconds EOF ltlsynt -f "G(Fi0 && Fi1 && Fi2) -> G(i1 <-> o0)" --outs="o0" --algo=lar \ @@ -569,10 +569,10 @@ direct strategy might exist but was not found. translating formula done in X seconds automaton has 1 states and 1 colors LAR construction done in X seconds -DPA has 1 states, 2 colors +DPA has 1 states, 0 colors split inputs and outputs done in X seconds automaton has 2 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: Streett 1 game solved in X seconds EOF ltlsynt -f '(GFa <-> GFb) && (Gc)' --outs=b,c --verbose 2> out @@ -646,10 +646,10 @@ direct strategy might exist but was not found. translating formula done in X seconds automaton has 4 states and 1 colors LAR construction done in X seconds -DPA has 4 states, 4 colors +DPA has 4 states, 1 colors split inputs and outputs done in X seconds automaton has 9 states -solving game with acceptance: parity max odd 6 +solving game with acceptance: Streett 1 game solved in X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF @@ -663,20 +663,20 @@ direct strategy might exist but was not found. translating formula done in X seconds automaton has 2 states and 1 colors LAR construction done in X seconds -DPA has 2 states, 2 colors +DPA has 2 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: Streett 1 game solved in X seconds trying to create strategy directly for (a | x) -> x direct strategy might exist but was not found. translating formula done in X seconds automaton has 2 states and 1 colors LAR construction done in X seconds -DPA has 2 states, 2 colors +DPA has 2 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: Streett 1 game solved in X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF @@ -692,20 +692,20 @@ direct strategy might exist but was not found. translating formula done in X seconds automaton has 1 states and 1 colors LAR construction done in X seconds -DPA has 1 states, 2 colors +DPA has 1 states, 0 colors split inputs and outputs done in X seconds automaton has 2 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: Streett 1 game solved in X seconds trying to create strategy directly for Gy direct strategy might exist but was not found. translating formula done in X seconds automaton has 1 states and 1 colors LAR construction done in X seconds -DPA has 1 states, 2 colors +DPA has 1 states, 0 colors split inputs and outputs done in X seconds automaton has 2 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: Streett 1 game solved in X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF @@ -720,10 +720,10 @@ direct strategy might exist but was not found. translating formula done in X seconds automaton has 1 states and 1 colors LAR construction done in X seconds -DPA has 1 states, 2 colors +DPA has 1 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: parity max odd 3 game solved in X seconds EOF ltlsynt -f '!F(a|b)' --outs=b --decompose=yes --aiger --verbose 2> out || true @@ -737,10 +737,10 @@ direct strategy might exist but was not found. translating formula done in X seconds automaton has 1 states and 1 colors LAR construction done in X seconds -DPA has 1 states, 2 colors +DPA has 1 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: parity max odd 3 game solved in X seconds EOF ltlsynt -f 'G!(a -> b)' --outs=b --decompose=yes --aiger\ @@ -755,10 +755,10 @@ direct strategy might exist but was not found. translating formula done in X seconds automaton has 2 states and 1 colors LAR construction done in X seconds -DPA has 2 states, 4 colors +DPA has 2 states, 1 colors split inputs and outputs done in X seconds automaton has 5 states -solving game with acceptance: parity max odd 6 +solving game with acceptance: Streett 1 game solved in X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF @@ -775,30 +775,30 @@ direct strategy might exist but was not found. translating formula done in X seconds automaton has 2 states and 1 colors LAR construction done in X seconds -DPA has 2 states, 2 colors +DPA has 2 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: Streett 1 game solved in X seconds trying to create strategy directly for a -> c direct strategy might exist but was not found. translating formula done in X seconds automaton has 2 states and 1 colors LAR construction done in X seconds -DPA has 2 states, 2 colors +DPA has 2 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: Streett 1 game solved in X seconds trying to create strategy directly for a -> d direct strategy might exist but was not found. translating formula done in X seconds automaton has 2 states and 1 colors LAR construction done in X seconds -DPA has 2 states, 2 colors +DPA has 2 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: Streett 1 game solved in X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF @@ -814,10 +814,10 @@ direct strategy might exist but was not found. translating formula done in X seconds automaton has 1 states and 1 colors LAR construction done in X seconds -DPA has 1 states, 2 colors +DPA has 1 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: parity max odd 3 game solved in X seconds EOF ltlsynt -f '!(F(a | b))' --outs=b, --decompose=yes \ diff --git a/tests/python/_mealy.ipynb b/tests/python/_mealy.ipynb index 0fbad3d08..4e7374852 100644 --- a/tests/python/_mealy.ipynb +++ b/tests/python/_mealy.ipynb @@ -3,6 +3,7 @@ { "cell_type": "code", "execution_count": 1, + "id": "8bca10b8", "metadata": {}, "outputs": [], "source": [ @@ -12,6 +13,7 @@ }, { "cell_type": "markdown", + "id": "c73e997a", "metadata": {}, "source": [ "Test the Mealy printer." @@ -20,6 +22,7 @@ { "cell_type": "code", "execution_count": 2, + "id": "f8eff7ed", "metadata": {}, "outputs": [], "source": [ @@ -29,6 +32,7 @@ { "cell_type": "code", "execution_count": 3, + "id": "ad3c80bc", "metadata": {}, "outputs": [ { @@ -49,6 +53,7 @@ { "cell_type": "code", "execution_count": 4, + "id": "50130d85", "metadata": {}, "outputs": [ { @@ -60,82 +65,78 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))\n", - "[parity max odd 4]\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett 1]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!a & !c\n", - "\n", + "\n", + "\n", + "!a & !c\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a | c\n", - "\n", + "\n", + "\n", + "a | c\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!b & !d\n", - "\n", + "\n", + "\n", + "!b & !d\n", + "\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "b | d\n", - "\n", + "\n", + "\n", + "b | d\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f300caabba0> >" + " *' at 0x7f32ec50ce40> >" ] }, "execution_count": 4, @@ -150,6 +151,7 @@ { "cell_type": "code", "execution_count": 5, + "id": "3d56cda6", "metadata": {}, "outputs": [], "source": [ @@ -159,6 +161,7 @@ { "cell_type": "code", "execution_count": 6, + "id": "c24548a1", "metadata": {}, "outputs": [ { @@ -213,7 +216,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f300c179300> >" + " *' at 0x7f32ec571c30> >" ] }, "execution_count": 6, @@ -228,6 +231,7 @@ { "cell_type": "code", "execution_count": 7, + "id": "88f2c0e0", "metadata": {}, "outputs": [], "source": [ @@ -237,6 +241,7 @@ { "cell_type": "code", "execution_count": 8, + "id": "e626997e", "metadata": {}, "outputs": [ { @@ -285,7 +290,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f300c179300> >" + " *' at 0x7f32ec571c30> >" ] }, "execution_count": 8, @@ -300,7 +305,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -314,7 +319,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.9.2" } }, "nbformat": 4, diff --git a/tests/python/games.ipynb b/tests/python/games.ipynb index 324aab546..028ed0372 100644 --- a/tests/python/games.ipynb +++ b/tests/python/games.ipynb @@ -693,18 +693,14 @@ " viewBox=\"0.00 0.00 566.58 353.20\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", "\n", "\n", - "Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - "))))\n", - "[parity max odd 5]\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[parity max odd 3]\n", "\n", "\n", "\n", @@ -786,7 +782,7 @@ "\n", "\n", "1\n", - "\n", + "\n", "\n", "\n", "\n", @@ -800,7 +796,7 @@ "\n", "\n", "1\n", - "\n", + "\n", "\n", "\n", "\n", @@ -830,7 +826,7 @@ "\n", "\n", "1\n", - "\n", + "\n", "\n", "\n", "\n", @@ -858,7 +854,7 @@ "\n", "\n", "1\n", - "\n", + "\n", "\n", "\n", "\n", @@ -886,7 +882,7 @@ "\n", "\n", "!b\n", - "\n", + "\n", "\n", "\n", "\n", @@ -894,7 +890,7 @@ "\n", "\n", "b\n", - "\n", + "\n", "\n", "\n", "\n", @@ -902,7 +898,7 @@ "\n", "\n", "!b\n", - "\n", + "\n", "\n", "\n", "\n", @@ -910,7 +906,7 @@ "\n", "\n", "b\n", - "\n", + "\n", "\n", "\n", "\n", @@ -918,7 +914,7 @@ "\n", "\n", "!b\n", - "\n", + "\n", "\n", "\n", "\n", @@ -926,13 +922,13 @@ "\n", "\n", "b\n", - "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f5c143ec630> >" + " *' at 0x7f80642eee70> >" ] }, "execution_count": 8, @@ -967,8 +963,8 @@ "States: 12\n", "Start: 4\n", "AP: 2 \"b\" \"a\"\n", - "acc-name: parity max odd 5\n", - "Acceptance: 5 Fin(4) & (Inf(3) | (Fin(2) & (Inf(1) | Fin(0))))\n", + "acc-name: parity max odd 3\n", + "Acceptance: 3 Fin(2) & (Inf(1) | Fin(0))\n", "properties: trans-labels explicit-labels trans-acc colored complete\n", "properties: deterministic\n", "spot-state-player: 0 0 0 0 0 1 1 1 1 1 1 1\n", @@ -988,22 +984,22 @@ "[!1] 10 {1}\n", "[1] 11 {1}\n", "State: 5\n", - "[t] 0 {3}\n", + "[t] 0 {1}\n", "State: 6\n", - "[t] 1 {4}\n", + "[t] 1 {2}\n", "State: 7\n", - "[t] 0 {4}\n", + "[t] 0 {2}\n", "State: 8\n", - "[t] 2 {3}\n", + "[t] 2 {1}\n", "State: 9\n", - "[!0] 2 {3}\n", - "[0] 3 {4}\n", + "[!0] 2 {1}\n", + "[0] 3 {2}\n", "State: 10\n", - "[!0] 0 {3}\n", - "[0] 3 {3}\n", + "[!0] 0 {1}\n", + "[0] 3 {1}\n", "State: 11\n", - "[!0] 1 {3}\n", - "[0] 3 {3}\n", + "[!0] 1 {1}\n", + "[0] 3 {1}\n", "--END--\n" ] } @@ -1057,18 +1053,14 @@ " viewBox=\"0.00 0.00 566.58 353.20\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", "\n", "\n", - "Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - "))))\n", - "[parity max odd 5]\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[parity max odd 3]\n", "\n", "\n", "\n", @@ -1150,7 +1142,7 @@ "\n", "\n", "1\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1164,7 +1156,7 @@ "\n", "\n", "1\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1194,7 +1186,7 @@ "\n", "\n", "1\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1222,7 +1214,7 @@ "\n", "\n", "1\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1250,7 +1242,7 @@ "\n", "\n", "!b\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1258,7 +1250,7 @@ "\n", "\n", "b\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1266,7 +1258,7 @@ "\n", "\n", "!b\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1274,7 +1266,7 @@ "\n", "\n", "b\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1282,7 +1274,7 @@ "\n", "\n", "!b\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1290,13 +1282,13 @@ "\n", "\n", "b\n", - "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f5c143fdf30> >" + " *' at 0x7f806443b1b0> >" ] }, "execution_count": 11, @@ -1307,11 +1299,18 @@ "source": [ "spot.highlight_strategy(game)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -1325,7 +1324,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.9.2" } }, "nbformat": 4, diff --git a/tests/python/synthesis.ipynb b/tests/python/synthesis.ipynb index e290f02b5..8c706c481 100644 --- a/tests/python/synthesis.ipynb +++ b/tests/python/synthesis.ipynb @@ -3,6 +3,7 @@ { "cell_type": "code", "execution_count": 1, + "id": "1b9b5964", "metadata": {}, "outputs": [], "source": [ @@ -13,6 +14,7 @@ }, { "cell_type": "markdown", + "id": "b465c6ba", "metadata": {}, "source": [ "This notebook presents functions that can be used to solve the Reactive Synthesis problem using games.\n", @@ -37,6 +39,7 @@ { "cell_type": "code", "execution_count": 2, + "id": "8576bdad", "metadata": {}, "outputs": [ { @@ -53,649 +56,647 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))\n", - "[parity max odd 4]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[parity max odd 3]\n", "\n", "\n", "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", "\n", "I->9\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "25\n", - "\n", - "25\n", + "\n", + "25\n", "\n", "\n", "\n", "9->25\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "26\n", - "\n", - "26\n", + "\n", + "26\n", "\n", "\n", "\n", "9->26\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "27\n", - "\n", - "27\n", + "\n", + "27\n", "\n", "\n", "\n", "9->27\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "28\n", - "\n", - "28\n", + "\n", + "28\n", "\n", "\n", "\n", "9->28\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "0->10\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", + "\n", "\n", "\n", "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "0->11\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "10->1\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "11->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "12\n", - "\n", - "12\n", + "\n", + "12\n", "\n", "\n", "\n", "1->12\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", + "\n", "\n", "\n", "\n", "13\n", - "\n", - "13\n", + "\n", + "13\n", "\n", "\n", "\n", "1->13\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", + "\n", "\n", "\n", "\n", "12->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "13->0\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "14\n", - "\n", - "14\n", + "\n", + "14\n", "\n", "\n", "\n", "2->14\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", + "\n", "\n", "\n", "\n", "16\n", - "\n", - "16\n", + "\n", + "16\n", "\n", "\n", "\n", "2->16\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", + "\n", "\n", "\n", "\n", "15\n", - "\n", - "15\n", + "\n", + "15\n", "\n", "\n", "\n", "14->15\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "16->2\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "3->13\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", + "\n", "\n", "\n", "\n", "17\n", - "\n", - "17\n", + "\n", + "17\n", "\n", "\n", "\n", "3->17\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", + "\n", "\n", "\n", "\n", "17->2\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "17->3\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "4->14\n", - "\n", - "\n", - "i0\n", - "\n", + "\n", + "\n", + "i0\n", + "\n", "\n", "\n", "\n", "18\n", - "\n", - "18\n", + "\n", + "18\n", "\n", "\n", "\n", "4->18\n", - "\n", - "\n", - "!i0\n", - "\n", + "\n", + "\n", + "!i0\n", + "\n", "\n", "\n", "\n", "18->4\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "5->14\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "5->16\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "5->18\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "19\n", - "\n", - "19\n", + "\n", + "19\n", "\n", "\n", "\n", "5->19\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "19->5\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "6->11\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "20\n", - "\n", - "20\n", + "\n", + "20\n", "\n", "\n", "\n", "6->20\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "21\n", - "\n", - "21\n", + "\n", + "21\n", "\n", "\n", "\n", "6->21\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "20->4\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "20->7\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "21->4\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "21->6\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "7->12\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "7->13\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "22\n", - "\n", - "22\n", + "\n", + "22\n", "\n", "\n", "\n", "7->22\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "23\n", - "\n", - "23\n", + "\n", + "23\n", "\n", "\n", "\n", "7->23\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "22->4\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "22->7\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "23->4\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "23->6\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->13\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "8->17\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "8->23\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "24\n", - "\n", - "24\n", + "\n", + "24\n", "\n", "\n", "\n", "8->24\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "24->5\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "24->8\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "25->8\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "26->3\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "27->6\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "28->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "15->14\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f44ac249ed0> >" + " *' at 0x7fc5ec2bf930> >" ] }, "metadata": {}, @@ -714,6 +715,7 @@ }, { "cell_type": "markdown", + "id": "fbd7095b", "metadata": {}, "source": [ "Solving the game, is done with `solve_game()` as with any game. There is also a version that takes a `synthesis_info` as second argument in case the time it takes has to be recorded. Here passing `si` or not makes no difference." @@ -722,6 +724,7 @@ { "cell_type": "code", "execution_count": 3, + "id": "02d7525e", "metadata": {}, "outputs": [ { @@ -737,588 +740,586 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))\n", - "[parity max odd 4]\n", + " viewBox=\"0.00 0.00 650.45 360.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[parity max odd 3]\n", "\n", "\n", "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", "\n", "I->9\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "25\n", - "\n", - "25\n", + "\n", + "25\n", "\n", "\n", "\n", "9->25\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "26\n", - "\n", - "26\n", + "\n", + "26\n", "\n", "\n", "\n", "9->26\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "27\n", - "\n", - "27\n", + "\n", + "27\n", "\n", "\n", "\n", "9->27\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "28\n", - "\n", - "28\n", + "\n", + "28\n", "\n", "\n", "\n", "9->28\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "0->10\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "0->11\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "10->1\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "11->0\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "12\n", - "\n", - "12\n", + "\n", + "12\n", "\n", "\n", "\n", "1->12\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "13\n", - "\n", - "13\n", + "\n", + "13\n", "\n", "\n", "\n", "1->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "12->1\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "13->0\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "14\n", - "\n", - "14\n", + "\n", + "14\n", "\n", "\n", "\n", "2->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "16\n", - "\n", - "16\n", + "\n", + "16\n", "\n", "\n", "\n", "2->16\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "15\n", - "\n", - "15\n", + "\n", + "15\n", "\n", "\n", "\n", "14->15\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "16->2\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "3->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "17\n", - "\n", - "17\n", + "\n", + "17\n", "\n", "\n", "\n", "3->17\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "17->2\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "17->3\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "4->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "18\n", - "\n", - "18\n", + "\n", + "18\n", "\n", "\n", "\n", "4->18\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "18->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "5->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "5->16\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "5->18\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "19\n", - "\n", - "19\n", + "\n", + "19\n", "\n", "\n", "\n", "5->19\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "19->5\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "6->11\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "20\n", - "\n", - "20\n", + "\n", + "20\n", "\n", "\n", "\n", "6->20\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "21\n", - "\n", - "21\n", + "\n", + "21\n", "\n", "\n", "\n", "6->21\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "20->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "20->7\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "21->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "21->6\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "7->12\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "7->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "22\n", - "\n", - "22\n", + "\n", + "22\n", "\n", "\n", "\n", "7->22\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "23\n", - "\n", - "23\n", + "\n", + "23\n", "\n", "\n", "\n", "7->23\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "22->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "22->7\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "23->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "23->6\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "8->17\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "8->23\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "24\n", - "\n", - "24\n", + "\n", + "24\n", "\n", "\n", "\n", "8->24\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "24->5\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "24->8\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "25->8\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "26->3\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "27->6\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "28->0\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "15->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n" @@ -1340,6 +1341,7 @@ }, { "cell_type": "markdown", + "id": "d66f3da1", "metadata": {}, "source": [ "Once a strategy has been found, it can be extracted as an automaton and simplified using 6 different levels (the default is 2). The output should be interpreted as a Mealy automaton, where transition have the form `(ins)&(outs)` where `ins` and `outs` are Boolean formulas representing possible inputs and outputs (they could be more than just conjunctions of atomic proposition). Mealy machines with this type of labels are called \"separated\" in Spot." @@ -1348,6 +1350,7 @@ { "cell_type": "code", "execution_count": 4, + "id": "89342e18", "metadata": {}, "outputs": [ { @@ -1363,303 +1366,303 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "3->5\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "4->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "4->5\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "5->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5->5\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->5\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->6\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" @@ -1684,169 +1687,169 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n" @@ -1871,119 +1874,119 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "I->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" @@ -2008,75 +2011,75 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" @@ -2101,75 +2104,75 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n" @@ -2194,119 +2197,119 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" @@ -2345,6 +2348,7 @@ }, { "cell_type": "markdown", + "id": "435c9bae", "metadata": {}, "source": [ "If needed, a separated Mealy machine can be turned into game shape using `split_sepearated_mealy()`, which is more efficient than `split_2step()`." @@ -2353,6 +2357,7 @@ { "cell_type": "code", "execution_count": 5, + "id": "688a1ced", "metadata": {}, "outputs": [ { @@ -2361,260 +2366,260 @@ "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", @@ -2634,6 +2639,7 @@ }, { "cell_type": "markdown", + "id": "e3bb2d7d", "metadata": {}, "source": [ "# Converting the separated mealy machine to AIG\n", @@ -2646,6 +2652,7 @@ { "cell_type": "code", "execution_count": 6, + "id": "b5fea2d1", "metadata": {}, "outputs": [ { @@ -2654,60 +2661,60 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "2\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "2->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2721,6 +2728,7 @@ }, { "cell_type": "markdown", + "id": "2a1a6fc9", "metadata": {}, "source": [ "While we are at it, let us mention that you can render those circuits horizontally as follows:" @@ -2729,6 +2737,7 @@ { "cell_type": "code", "execution_count": 7, + "id": "f909d578", "metadata": {}, "outputs": [ { @@ -2737,54 +2746,54 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:w\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "2\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "2->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n" @@ -2804,6 +2813,7 @@ }, { "cell_type": "markdown", + "id": "2c87313f", "metadata": {}, "source": [ "To encode the circuit in the AIGER format (ASCII version) use:" @@ -2812,6 +2822,7 @@ { "cell_type": "code", "execution_count": 8, + "id": "1b787f50", "metadata": {}, "outputs": [ { @@ -2835,6 +2846,7 @@ }, { "cell_type": "markdown", + "id": "72038258", "metadata": {}, "source": [ "# Adding more inputs and outputs by force" @@ -2842,6 +2854,7 @@ }, { "cell_type": "markdown", + "id": "3fbb3c2f", "metadata": {}, "source": [ "It can happen that propositions declared as output are ommited in the aig circuit (either because they are not part of the specification, or because they do not appear in the winning strategy). In that case those \n", @@ -2853,6 +2866,7 @@ { "cell_type": "code", "execution_count": 9, + "id": "8ed4e382", "metadata": {}, "outputs": [ { @@ -2861,167 +2875,159 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))))\n", - "[parity max odd 6]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett 1]\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "I->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", - "i0\n", - "\n", + "\n", + "\n", + "i0\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "3->7\n", - "\n", - "\n", - "!i0\n", - "\n", + "\n", + "\n", + "!i0\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "6->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "6->2\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "7->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f44ac1d6750> >" + " *' at 0x7fc5dc8463f0> >" ] }, "metadata": {}, @@ -3033,70 +3039,70 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "\n", - "!i0\n", - "/\n", + "!i0\n", + "/\n", "\n", - "!o0\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", + "\n", + "\n", "\n", - "1\n", - "/\n", + "1\n", + "/\n", "\n", - "!o0\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f44ac249ed0> >" + " *' at 0x7fc5ec2bf990> >" ] }, "metadata": {}, @@ -3108,72 +3114,72 @@ "\n", "\n", - "\n", "\n", "\n", + " viewBox=\"0.00 0.00 142.70 352.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", "\n", - "\n", + "\n", "\n", "\n", "4\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "6->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3193,6 +3199,7 @@ }, { "cell_type": "markdown", + "id": "a82ca6c6", "metadata": {}, "source": [ "To force the presence of extra variables in the circuit, they can be passed to `mealy_machine_to_aig()`." @@ -3201,6 +3208,7 @@ { "cell_type": "code", "execution_count": 10, + "id": "a86436a7", "metadata": {}, "outputs": [ { @@ -3209,96 +3217,96 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "6->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "8->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "8->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "0\n", - "\n", - "False\n", + "\n", + "False\n", "\n", "\n", "\n", "0->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3311,6 +3319,7 @@ }, { "cell_type": "markdown", + "id": "9af5c9b9", "metadata": {}, "source": [ "# Combining Mealy machines\n", @@ -3325,6 +3334,7 @@ { "cell_type": "code", "execution_count": 11, + "id": "750e55f5", "metadata": {}, "outputs": [ { @@ -3340,158 +3350,150 @@ "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))\n", - "[parity max odd 4]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett 1]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))\n", - "[parity max odd 4]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett 1]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "o1\n", - "\n", + "\n", + "\n", + "o1\n", + "\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "!o1\n", - "\n", + "\n", + "\n", + "!o1\n", + "\n", "\n", "\n", "\n", @@ -3517,94 +3519,94 @@ "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "o1\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "o1\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "!o1\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "!o1\n", "\n", "\n", "\n", @@ -3630,108 +3632,108 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "10->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3758,6 +3760,7 @@ }, { "cell_type": "markdown", + "id": "5102e762", "metadata": {}, "source": [ "# Reading an AIGER-file\n", @@ -3772,6 +3775,7 @@ { "cell_type": "code", "execution_count": 12, + "id": "29d37752", "metadata": {}, "outputs": [], "source": [ @@ -3792,6 +3796,7 @@ { "cell_type": "code", "execution_count": 13, + "id": "8989722d", "metadata": {}, "outputs": [ { @@ -3800,108 +3805,108 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "d\n", + "\n", + "d\n", "\n", "\n", "\n", "6->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "c\n", + "\n", + "c\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "a\n", + "\n", + "a\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "b\n", + "\n", + "b\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3916,6 +3921,7 @@ { "cell_type": "code", "execution_count": 14, + "id": "9560f368", "metadata": {}, "outputs": [ { @@ -3944,6 +3950,7 @@ { "cell_type": "code", "execution_count": 15, + "id": "9dadee6a", "metadata": {}, "outputs": [ { @@ -3960,6 +3967,7 @@ }, { "cell_type": "markdown", + "id": "734f10f1", "metadata": {}, "source": [ "An AIG circuit can be transformed into a monitor/Mealy machine. This can be used for instance to check that it does not intersect the negation of the specification." @@ -3968,6 +3976,7 @@ { "cell_type": "code", "execution_count": 16, + "id": "b29c95b4", "metadata": {}, "outputs": [ { @@ -3976,52 +3985,52 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!a & !b\n", - "/\n", - "\n", - "!c & !d\n", - "\n", - "a & b\n", - "/\n", - "\n", - "!c & d\n", - "\n", - "(!a & b) | (a & !b)\n", - "/\n", - "\n", - "c & !d\n", + "\n", + "\n", + "\n", + "!a & !b\n", + "/\n", + "\n", + "!c & !d\n", + "\n", + "a & b\n", + "/\n", + "\n", + "!c & d\n", + "\n", + "(!a & b) | (a & !b)\n", + "/\n", + "\n", + "c & !d\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f44ac2649c0> >" + " *' at 0x7fc5dc846690> >" ] }, "execution_count": 16, @@ -4035,6 +4044,7 @@ }, { "cell_type": "markdown", + "id": "09cad9f5", "metadata": {}, "source": [ "Note that the generation of aiger circuits from Mealy machines is flexible and accepts separated Mealy machines\n", @@ -4044,6 +4054,7 @@ { "cell_type": "code", "execution_count": 17, + "id": "62ebedae", "metadata": {}, "outputs": [ { @@ -4052,114 +4063,114 @@ "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", @@ -4191,6 +4202,7 @@ { "cell_type": "code", "execution_count": 18, + "id": "4a0bb1a7", "metadata": {}, "outputs": [ { @@ -4199,180 +4211,180 @@ "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", @@ -4393,9 +4405,9 @@ ], "metadata": { "kernelspec": { - "display_name": "'Python Interactive'", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "748aac80-c5a9-4430-8d88-15820461ebdf" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -4407,7 +4419,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.9.2" } }, "nbformat": 4, diff --git a/tests/python/synthesis.py b/tests/python/synthesis.py index e1a88650a..1b1cf4fbb 100644 --- a/tests/python/synthesis.py +++ b/tests/python/synthesis.py @@ -35,18 +35,18 @@ tc.assertEqual(game.to_str(), """HOA: v1 States: 3 Start: 0 AP: 1 "a" -acc-name: parity max odd 6 -Acceptance: 6 Inf(5) | (Fin(4) & (Inf(3) | (Fin(2) & (Inf(1) | Fin(0))))) +acc-name: Streett 1 +Acceptance: 2 Fin(0) | Inf(1) properties: trans-labels explicit-labels state-acc colored complete properties: deterministic spot-state-player: 0 1 1 controllable-AP: --BODY-- -State: 0 {1} +State: 0 {0} [!0] 1 [0] 2 -State: 1 {4} +State: 1 {0} [t] 0 -State: 2 {5} +State: 2 {1} [t] 0 --END--""") From 5cd0ce14b0de9c822c4dd32a2fe793778a4e106c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 17 Mar 2022 14:37:57 +0100 Subject: [PATCH 011/606] fix mempool test to use __has_include This follows 6b88d6f35b2e. * tests/core/mempool.cc: Use __has_include too. --- tests/core/mempool.cc | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/core/mempool.cc b/tests/core/mempool.cc index 0dae6ce0e..9d3610df7 100644 --- a/tests/core/mempool.cc +++ b/tests/core/mempool.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2018 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) 2016, 2018, 2022 Laboratoire de Recherche et +// Développement de l'Epita. // // This file is part of Spot, a model checking library. // @@ -103,7 +103,7 @@ namespace int main() { -#ifndef HAVE_VALGRIND_MEMCHECK_H +#if !__has_include() return 77; #endif @@ -186,4 +186,3 @@ int main() return 0; } - From 86de4d40529b61cd93bfcbe53ade5fa774c46450 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Fri, 18 Mar 2022 01:02:45 +0100 Subject: [PATCH 012/606] Introduce mealy_prod Product between mealy machines with propagation of synthesis outputs and additional assertions. Currently it only supports input complete machines * spot/twaalgos/mealy_machine.cc, spot/twaalgos/mealy_machine.hh: Here * bin/ltlsynt.cc: Use * tests/python/except.py, tests/python/synthesis.ipynb: Test --- bin/ltlsynt.cc | 3 +- spot/twaalgos/mealy_machine.cc | 54 ++++++++ spot/twaalgos/mealy_machine.hh | 10 ++ tests/python/except.py | 17 +++ tests/python/synthesis.ipynb | 220 ++++++++++++++++++++++++++++++--- 5 files changed, 288 insertions(+), 16 deletions(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 73ec6b2b1..4d118dd46 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -545,7 +545,8 @@ namespace && "ltlsynt: Cannot handle TGBA as strategy."); tot_strat = mealy_machines.front().mealy_like; for (size_t i = 1; i < mealy_machines.size(); ++i) - tot_strat = spot::product(tot_strat, mealy_machines[i].mealy_like); + tot_strat = spot::mealy_product(tot_strat, + mealy_machines[i].mealy_like); printer.print(tot_strat, timer_printer_dummy); } diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index 99b762f16..36c162402 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -55,6 +56,7 @@ namespace { + using namespace spot; bool is_deterministic_(const std::vector& ins) { const unsigned n_ins = ins.size(); @@ -64,6 +66,24 @@ namespace return false; return true; } + + bool is_complete_(const const_twa_graph_ptr& m, + const bdd& outs) + { + auto* sp = m->get_named_prop("state-player"); + const auto N = m->num_states(); + for (auto s = 0u; s < N; ++s) + { + if (sp && sp->at(s)) + continue; // No need tpo check player states + bdd all_cond = bddfalse; + for (const auto& e : m->out(s)) + all_cond |= bdd_exist(e.cond, outs); + if (all_cond != bddtrue) + return false; + } + return true; + } } @@ -3843,4 +3863,38 @@ namespace spot return true; } + twa_graph_ptr + mealy_product(const const_twa_graph_ptr& left, + const const_twa_graph_ptr& right) + { + bdd outs[] = {get_synthesis_outputs(left), + get_synthesis_outputs(right)}; + +#ifndef NDEBUG + for (const auto& [m, n, o] : {std::tuple{left, "left", outs[0]}, + {right, "right", outs[1]}}) + { + if (!is_mealy(m)) + throw std::runtime_error(std::string("mealy_prod(): ") + n + + " is not a mealy machine"); + if (!is_complete_(m, o)) + throw std::runtime_error(std::string("mealy_prod(): ") + n + + " is not input complete"); + } +#endif + + auto p = product(left, right); + bdd pouts = outs[0] & outs[1]; + set_synthesis_outputs(p, pouts); + +#ifndef NDEBUG + if (!is_mealy(p)) + throw std::runtime_error("mealy_prod(): Prooduct is not mealy"); + if (!is_complete_(p, pouts)) + throw std::runtime_error("mealy_prod(): Prooduct is not input complete. " + "Incompatible machines?"); +#endif + + return p; + } } diff --git a/spot/twaalgos/mealy_machine.hh b/spot/twaalgos/mealy_machine.hh index 139f7cce2..77c3968ab 100644 --- a/spot/twaalgos/mealy_machine.hh +++ b/spot/twaalgos/mealy_machine.hh @@ -128,4 +128,14 @@ namespace spot is_split_mealy_specialization(const_twa_graph_ptr left, const_twa_graph_ptr right, bool verbose = false); + + /// \brief Product between two mealy machines \a left and \a right. + /// \pre The machines have to be both either split or unsplit, + /// input complete and compatible. All of this is check by assertion + /// \result The mealy machine representing the shared behaviour. + /// The resulting machine has the same class (mealy/separated/split) + /// as the input machines + SPOT_API twa_graph_ptr + mealy_product(const const_twa_graph_ptr& left, + const const_twa_graph_ptr& right); } \ No newline at end of file diff --git a/tests/python/except.py b/tests/python/except.py index 8674721c9..3aeee7a3e 100644 --- a/tests/python/except.py +++ b/tests/python/except.py @@ -331,3 +331,20 @@ except RuntimeError as e: tc.assertIn("already registered", se) else: report_missing_exception() + + +si = spot.synthesis_info() +si.s = spot.synthesis_info.algo_LAR +g1 = spot.ltl_to_game("G((i0 xor i1) <-> o0)", ["o0"], si) +g2 = spot.ltl_to_game("G((i0 xor i1) <-> (!o0 & !o1))", ["o0", "o1"], si) +spot.solve_game(g1) +spot.solve_game(g2) +strat1 = spot.solved_game_to_separated_mealy(g1) +strat2 = spot.solved_game_to_separated_mealy(g2) +try: + stratcomp = spot.mealy_product(strat1, strat2) +except RuntimeError as e: + se = str(e) + tc.assertIn("Incompatible", se) +else: + report_missing_exception() \ No newline at end of file diff --git a/tests/python/synthesis.ipynb b/tests/python/synthesis.ipynb index 8c706c481..7ec181ebe 100644 --- a/tests/python/synthesis.ipynb +++ b/tests/python/synthesis.ipynb @@ -696,7 +696,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fc5ec2bf930> >" + " *' at 0x7f05083f22a0> >" ] }, "metadata": {}, @@ -2714,7 +2714,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3027,7 +3027,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fc5dc8463f0> >" + " *' at 0x7f05082c7a20> >" ] }, "metadata": {}, @@ -3102,7 +3102,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fc5ec2bf990> >" + " *' at 0x7f05083f2300> >" ] }, "metadata": {}, @@ -3179,7 +3179,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3306,7 +3306,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3327,8 +3327,13 @@ "It can happen that the complete specification of the controller can be separated into sub-specifications with DISJOINT output propositions, see Finkbeiner et al. Specification Decomposition for Reactive Synthesis.\n", "This results in multiple Mealy machines which have to be converted into one single AIG circuit.\n", "\n", - "This can be done using the function `mealy_machines_to_aig()`, which takes a vector of separated Mealy machines as argument.\n", - "In order for this to work, all Mealy machines need to share the same `bdd_dict`. This can be ensured by passing a common options strucuture." + "This can be done in two ways:\n", + "\n", + "1. Using the function `mealy_machines_to_aig()`, which takes a vector of separated mealy machines as argument.\n", + "2. Combine the mealy machines into one before passing it to `mealy_machine_to aig(). This currently only supports input complete machines of the same type (mealy/separated mealy/split mealy)\n", + "\n", + "Note that the method version is usually preferable as it is faster.\n", + "Also note that in order for this to work, all mealy machines need to share the same `bdd_dict`. This can be ensured by passing a common options strucuture." ] }, { @@ -3623,7 +3628,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Circuit implementing both machines:\n" + "Circuit implementing both machines from a vector of machines:\n" ] }, { @@ -3733,7 +3738,185 @@ "\n" ], "text/plain": [ - " >" + " >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Combining the two machines into one.\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0 & o1\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0 & !o1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f05082c7ba0> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "6->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "8->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o0\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "10->o0:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o1\n", + "\n", + "o1\n", + "\n", + "\n", + "\n", + "10->o1:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "i0\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "i1\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " >" ] }, "metadata": {}, @@ -3753,9 +3936,16 @@ "strat2 = spot.solved_game_to_separated_mealy(g2)\n", "print(\"Reduced strategies:\")\n", "display_inline(strat1, strat2)\n", - "print(\"Circuit implementing both machines:\")\n", + "#Method 1\n", + "print(\"Circuit implementing both machines from a vector of machines:\")\n", "aig = spot.mealy_machines_to_aig([strat1, strat2], \"isop\")\n", - "display(aig)" + "display(aig)\n", + "#Method 2\n", + "strat_comb = spot.mealy_product(strat1, strat2)\n", + "print(\"Combining the two machines into one.\")\n", + "display(strat_comb)\n", + "aig_comb = spot.mealy_machine_to_aig(strat_comb, \"isop\")\n", + "display(aig_comb)" ] }, { @@ -3906,7 +4096,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4030,7 +4220,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fc5dc846690> >" + " *' at 0x7f05082c7690> >" ] }, "execution_count": 16, @@ -4419,7 +4609,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.2" + "version": "3.8.10" } }, "nbformat": 4, From 97fc3f6c0bd9732e9ec16fdb3fd34c7973155ced Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Fri, 18 Mar 2022 15:27:46 +0100 Subject: [PATCH 013/606] Introduce simplify_mealy Convenience function dispatching to minimize_mealy and reduce_mealy. Change tests accordingly * spot/twaalgos/mealy_machine.cc, spot/twaalgos/mealy_machine.hh: Here * bin/ltlsynt.cc: Use simplify * spot/twaalgos/synthesis.cc, spot/twaalgos/synthesis.hh: Remove minimization, Update options * tests/core/ltlsynt.test, tests/python/synthesis.ipynb, tests/python/_synthesis.ipynb: Adapt --- bin/ltlsynt.cc | 64 +--- spot/twaalgos/mealy_machine.cc | 167 +++++++-- spot/twaalgos/mealy_machine.hh | 23 +- spot/twaalgos/synthesis.cc | 26 +- spot/twaalgos/synthesis.hh | 3 + tests/core/ltlsynt.test | 39 +- tests/python/_synthesis.ipynb | 660 +++++++++++++++++++++++---------- tests/python/synthesis.ipynb | 246 ++++++++++-- 8 files changed, 901 insertions(+), 327 deletions(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 4d118dd46..305c7a2f4 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -408,14 +408,13 @@ namespace spot::mealy_like ml; ml.success = spot::mealy_like::realizability_code::REALIZABLE_REGULAR; - if (opt_print_aiger) - // we do not care about the type, - // machine to aiger can handle it - ml.mealy_like = - spot::solved_game_to_mealy(arena, *gi); - else - ml.mealy_like = - spot::solved_game_to_separated_mealy(arena, *gi); + // By default this produces a split machine + ml.mealy_like = + spot::solved_game_to_mealy(arena, *gi); + // Keep the machine split for aiger + // else -> separated + spot::simplify_mealy_here(ml.mealy_like, *gi, + opt_print_aiger); ml.glob_cond = bddfalse; mealy_machines.push_back(ml); } @@ -429,51 +428,10 @@ namespace assert(m_like.mealy_like && "Expected success but found no mealy!"); if (!opt_real) { - spot::stopwatch sw_direct; - sw_direct.start(); - - if ((0 < gi->minimize_lvl) && (gi->minimize_lvl < 3)) - // Uses reduction or not, - // both work with mealy machines (non-separated) - reduce_mealy_here(m_like.mealy_like, gi->minimize_lvl == 2); - - auto delta = sw_direct.stop(); - - sw_direct.start(); - // todo better algo here? - m_like.mealy_like = - split_2step(m_like.mealy_like, - spot::get_synthesis_outputs(m_like.mealy_like), - false); - if (gi->bv) - gi->bv->split_time += sw_direct.stop(); - - sw_direct.start(); - if (gi->minimize_lvl >= 3) - { - sw_direct.start(); - // actual minimization, works on split mealy - m_like.mealy_like = minimize_mealy(m_like.mealy_like, - gi->minimize_lvl - 4); - delta = sw_direct.stop(); - } - - // If our goal is to have an aiger, - // we can use split or separated machines - if (!opt_print_aiger) - // Unsplit to have separated mealy - m_like.mealy_like = unsplit_mealy(m_like.mealy_like); - - if (gi->bv) - gi->bv->strat2aut_time += delta; - if (gi->verbose_stream) - *gi->verbose_stream << "final strategy has " - << m_like.mealy_like->num_states() - << " states and " - << m_like.mealy_like->num_edges() - << " edges\n" - << "minimization took " << delta - << " seconds\n"; + // Keep the machine split for aiger + // else -> separated + spot::simplify_mealy_here(m_like.mealy_like, *gi, + opt_print_aiger); } SPOT_FALLTHROUGH; } diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index 36c162402..f985da506 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -135,11 +135,12 @@ namespace spot if (!is_mealy(m)) return false; - if (m->get_named_prop("state-player") == nullptr) + if (!m->get_named_prop("state-player")) { trace << "is_split_mealy(): Split mealy machine must define the named " "property \"state-player\"!\n"; } + auto sp = get_state_players(m); if (sp.size() != m->num_states()) @@ -1027,6 +1028,28 @@ namespace std::pair reorganize_mm(const_twa_graph_ptr mm, const std::vector& sp) { + // Check if the twa_graph already has the correct form + { + auto sp = get_state_players(mm); + // All player states mus be at the end + bool is_ok = true; + bool seen_player = false; + for (const auto& p : sp) + { + if (seen_player & !p) + { + is_ok = false; + break; + } + seen_player |= p; + } + if (is_ok) + return {mm, + mm->num_states() + - std::accumulate(sp.begin(), sp.end(), 0)}; + } + // We actually need to generate a new graph with the correct + // form // Purge unreachable and reorganize the graph std::vector renamed(mm->num_states(), -1u); const unsigned n_old = mm->num_states(); @@ -3607,7 +3630,7 @@ namespace spot twa_graph_ptr minimize_mealy(const const_twa_graph_ptr& mm, int premin) { - assert(is_split_mealy(mm)); + assert(is_mealy(mm)); stopwatch sw; sw.start(); @@ -3615,38 +3638,33 @@ namespace spot if ((premin < -1) || (premin > 1)) throw std::runtime_error("premin has to be -1, 0 or 1"); - auto orig_spref = get_state_players(mm); - - // Check if finite traces exist - // If so, deactivate fast minimization - // todo : this is overly conservative - // If unreachable states have no outgoing edges we do not care - // but testing this as well starts to be expensive... - if (premin != -1 - && [&]() - { - for (unsigned s = 0; s < mm->num_states(); ++s) - { - auto eit = mm->out(s); - if (eit.begin() == eit.end()) - return true; - } - return false; - }()) - premin = -1; - auto do_premin = [&]()->const_twa_graph_ptr { if (premin == -1) - return mm; + { + if (!mm->get_named_prop("state-player")) + return split_2step(mm, false); + else + return mm; + } else { + bool is_split = mm->get_named_prop("state-player"); // We have a split machine -> unsplit then resplit, // as reduce mealy works on separated - auto mms = unsplit_mealy(mm); - reduce_mealy_here(mms, premin == 1); - split_separated_mealy_here(mms); - return mms; + twa_graph_ptr mms; + if (is_split) + { + auto mmi = unsplit_2step(mm); + reduce_mealy_here(mmi, premin == 1); + split_separated_mealy_here(mmi); + return mmi; + } + else + { + auto mms = reduce_mealy(mm, premin == 1); + return split_2step(mms, false); + } } }; @@ -3689,9 +3707,13 @@ namespace spot auto early_exit = [&]() { // Always keep machines split - assert(is_split_mealy_specialization(mm, mmw)); + if (mm->get_named_prop("state-player")) + assert(is_split_mealy_specialization(mm, mmw)); + else + assert(is_split_mealy_specialization(split_2step(mm, false), + mmw)); return std::const_pointer_cast(mmw); - }; + }; // If the partial solution has the same number of // states as the original automaton -> we are done @@ -3897,4 +3919,91 @@ namespace spot return p; } + + + void + simplify_mealy_here(twa_graph_ptr& m, int minimize_lvl, + bool split_out) + { + auto si = synthesis_info(); + si.minimize_lvl = minimize_lvl; + return simplify_mealy_here(m, si, split_out); + } + + void + simplify_mealy_here(twa_graph_ptr& m, synthesis_info& si, + bool split_out) + { + const auto minimize_lvl = si.minimize_lvl; + assert(is_mealy(m) + && "simplify_mealy_here(): m is not a mealy machine!"); + if (minimize_lvl < 0 || 5 < minimize_lvl) + throw std::runtime_error("simplify_mealy_here(): minimize_lvl " + "must be between 0 and 5."); + + stopwatch sw; + if (si.bv) + sw.start(); + + bool is_separated = false; + if (0 < minimize_lvl && minimize_lvl < 3) + { + // unsplit if necessary + if (m->get_named_prop("state-player")) + { + m = unsplit_mealy(m); + is_separated = true; + } + reduce_mealy_here(m, minimize_lvl == 2); + } + else if (3 <= minimize_lvl) + m = minimize_mealy(m, minimize_lvl - 4); + + // Convert to demanded output format + bool is_split = m->get_named_prop("state-player"); + if (minimize_lvl == 0) + { + if (is_split && !split_out) + m = unsplit_mealy(m); + else if (!is_split && split_out) + m = split_2step(m, false); + } + else if (0 < minimize_lvl && minimize_lvl < 3 && split_out) + { + if (is_separated) + split_separated_mealy_here(m); + else + m = split_2step(m, false); + } + else if (3 <= minimize_lvl && !split_out) + m = unsplit_mealy(m); + + if (si.bv) + { + if (si.verbose_stream) + *si.verbose_stream << "simplification took " << sw.stop() + << " seconds\n"; + si.bv->simplify_strat_time += sw.stop(); + auto n_s_env = 0u; + auto n_e_env = 0u; + if (auto sp = m->get_named_prop("state-player")) + { + n_s_env = sp->size() - std::accumulate(sp->begin(), + sp->end(), + 0u); + std::for_each(m->edges().begin(), m->edges().end(), + [&n_e_env, &sp](const auto& e) + { + n_e_env += (*sp)[e.src]; + }); + } + else + { + n_s_env = m->num_states(); + n_e_env = m->num_edges(); + } + si.bv->nb_simpl_strat_states += n_s_env; + si.bv->nb_simpl_strat_edges += n_e_env; + } + } } diff --git a/spot/twaalgos/mealy_machine.hh b/spot/twaalgos/mealy_machine.hh index 77c3968ab..7406cb61d 100644 --- a/spot/twaalgos/mealy_machine.hh +++ b/spot/twaalgos/mealy_machine.hh @@ -23,6 +23,9 @@ namespace spot { + // Forward decl + struct synthesis_info; + /// todo /// Comment je faire au mieux pour expliquer mealy dans les doc @@ -104,7 +107,7 @@ namespace spot bool output_assignment = false); /// @} - /// \brief Minimizes a split (in)completely specified mealy machine + /// \brief Minimizes an (in)completely specified mealy machine /// The approach is described in \todo TACAS /// \param premin Use reduce_mealy before applying the /// main algorithm if demanded AND @@ -138,4 +141,22 @@ namespace spot SPOT_API twa_graph_ptr mealy_product(const const_twa_graph_ptr& left, const const_twa_graph_ptr& right); + + /// \brief Convenience function to call minimize_mealy or reduce_mealy. + /// Uses the same convention as ltlsynt for \a minimize_lvl: + /// 0: no reduction + /// 1: bisimulation based reduction + /// 2: bisimulation with output assignment + /// 3: SAT minimization + /// 4: 1 then 3 + /// 5: 2 then 3 + /// Minimizes the given machine \a m inplace, the parameter + /// \a split_out defines whether it is split or not + SPOT_API void + simplify_mealy_here(twa_graph_ptr& m, int minimize_lvl, + bool split_out); + + SPOT_API void + simplify_mealy_here(twa_graph_ptr& m, synthesis_info& si, + bool split_out); } \ No newline at end of file diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 6d4537206..7620a1098 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1051,22 +1051,25 @@ namespace spot if (!get_state_winner(arena, arena->get_init_state_number())) return nullptr; - // If we use minimizations 0,1 or 2 -> unsplit - const bool do_unsplit = gi.minimize_lvl < 3; - auto m = apply_strategy(arena, do_unsplit, false); + auto m = apply_strategy(arena, false, false); m->prop_universal(true); - if ((0 < gi.minimize_lvl) && (gi.minimize_lvl < 3)) - reduce_mealy_here(m, gi.minimize_lvl == 2); - else if (gi.minimize_lvl >= 3) - m = minimize_mealy(m, gi.minimize_lvl - 4); - if (gi.bv) { + auto sp = get_state_players(m); + auto n_s_env = sp.size() - std::accumulate(sp.begin(), + sp.end(), + 0u); + auto n_e_env = 0u; + std::for_each(m->edges().begin(), m->edges().end(), + [&n_e_env, &sp](const auto& e) + { + n_e_env += sp[e.src]; + }); gi.bv->strat2aut_time += sw.stop(); - gi.bv->nb_strat_states += m->num_states(); - gi.bv->nb_strat_edges += m->num_edges(); + gi.bv->nb_strat_states += n_s_env; + gi.bv->nb_strat_edges += n_e_env; } assert(is_mealy(m)); @@ -1200,7 +1203,8 @@ namespace spot { *vs << "direct strategy was found.\n" << "direct strat has " << strat->num_states() - << " states and " << strat->num_sets() << " colors\n"; + << " states, " << strat->num_edges() + << " edges and " << strat->num_sets() << " colors\n"; } return mealy_like{ mealy_like::realizability_code::REALIZABLE_REGULAR, diff --git a/spot/twaalgos/synthesis.hh b/spot/twaalgos/synthesis.hh index 95590504c..46c0bc2bd 100644 --- a/spot/twaalgos/synthesis.hh +++ b/spot/twaalgos/synthesis.hh @@ -96,11 +96,14 @@ namespace spot double paritize_time = 0.0; double solve_time = 0.0; double strat2aut_time = 0.0; + double simplify_strat_time = 0.0; double aig_time = 0.0; unsigned nb_states_arena = 0; unsigned nb_states_arena_env = 0; unsigned nb_strat_states = 0; unsigned nb_strat_edges = 0; + unsigned nb_simpl_strat_states = 0; + unsigned nb_simpl_strat_edges = 0; unsigned nb_latches = 0; unsigned nb_gates = 0; bool realizable = false; diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 215143df2..335c1b01e 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -195,7 +195,7 @@ cat >exp < GFb tanslating formula done in X seconds direct strategy was found. -direct strat has 1 states and 0 colors +direct strat has 1 states, 2 edges and 0 colors EOF ltlsynt --ins='a' --outs='b' -f 'GFa <-> GFb' --verbose --realizability 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx @@ -205,9 +205,8 @@ cat >exp < GFb tanslating formula done in X seconds direct strategy was found. -direct strat has 1 states and 0 colors -final strategy has 1 states and 2 edges -minimization took X seconds +direct strat has 1 states, 2 edges and 0 colors +simplification took X seconds EOF ltlsynt --ins=a --outs=b -f 'GFa <-> GFb' --verbose --algo=ps 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx @@ -217,7 +216,7 @@ cat >exp < GFe tanslating formula done in X seconds direct strategy was found. -direct strat has 16 states and 0 colors +direct strat has 16 states, 81 edges and 0 colors EOF ltlsynt --ins='a,b,c,d' --outs='e' -f '(Fa & Fb & Fc & Fd) <-> GFe' \ --verbose --realizability --algo=lar 2> out @@ -561,9 +560,8 @@ cat >exp < GFb tanslating formula done in X seconds direct strategy was found. -direct strat has 1 states and 0 colors -final strategy has 1 states and 2 edges -minimization took X seconds +direct strat has 1 states, 2 edges and 0 colors +simplification took X seconds trying to create strategy directly for Gc direct strategy might exist but was not found. translating formula done in X seconds @@ -574,6 +572,7 @@ split inputs and outputs done in X seconds automaton has 2 states solving game with acceptance: Streett 1 game solved in X seconds +simplification took X seconds EOF ltlsynt -f '(GFa <-> GFb) && (Gc)' --outs=b,c --verbose 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx @@ -588,9 +587,8 @@ cat >exp < out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx @@ -615,9 +613,8 @@ cat >exp < GFa) & G((a & c) | (!a & !c)) tanslating formula done in X seconds direct strategy was found. -direct strat has 1 states and 0 colors -final strategy has 1 states and 2 edges -minimization took X seconds +direct strat has 1 states, 2 edges and 0 colors +simplification took X seconds EOF ltlsynt -f '(GFb <-> GFa) && (G((a&c)|(!a&!c)))' --outs=b,c --verbose\ --verify --decompose=0 2> out @@ -630,9 +627,8 @@ cat >exp < FGb tanslating formula done in X seconds direct strategy was found. -direct strat has 2 states and 0 colors -final strategy has 2 states and 3 edges -minimization took X seconds +direct strat has 2 states, 3 edges and 0 colors +simplification took X seconds EOF ltlsynt -f "Fa <-> FGb" --outs=b,c --verbose --decompose=0 --verify 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx @@ -651,6 +647,7 @@ split inputs and outputs done in X seconds automaton has 9 states solving game with acceptance: Streett 1 game solved in X seconds +simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF ltlsynt -f "Ga <-> Gb" --outs=b --verbose --decompose=0 --verify --aiger 2> out @@ -668,6 +665,7 @@ split inputs and outputs done in X seconds automaton has 4 states solving game with acceptance: Streett 1 game solved in X seconds +simplification took X seconds trying to create strategy directly for (a | x) -> x direct strategy might exist but was not found. translating formula done in X seconds @@ -678,6 +676,7 @@ split inputs and outputs done in X seconds automaton has 4 states solving game with acceptance: Streett 1 game solved in X seconds +simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF ltlsynt -f '((a|x) & (b | y) & b) => (x & y)' --outs="x,y" --aiger=ite\ @@ -697,6 +696,7 @@ split inputs and outputs done in X seconds automaton has 2 states solving game with acceptance: Streett 1 game solved in X seconds +simplification took X seconds trying to create strategy directly for Gy direct strategy might exist but was not found. translating formula done in X seconds @@ -707,6 +707,7 @@ split inputs and outputs done in X seconds automaton has 2 states solving game with acceptance: Streett 1 game solved in X seconds +simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF ltlsynt -f 'G!(!x | !y)' --outs="x, y" --aiger=ite --verify --verbose 2> out @@ -760,6 +761,7 @@ split inputs and outputs done in X seconds automaton has 5 states solving game with acceptance: Streett 1 game solved in X seconds +simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF ltlsynt -f '(a & b) U (b & c)' --outs=b,c --decompose=yes --aiger --verbose\ @@ -780,6 +782,7 @@ split inputs and outputs done in X seconds automaton has 4 states solving game with acceptance: Streett 1 game solved in X seconds +simplification took X seconds trying to create strategy directly for a -> c direct strategy might exist but was not found. translating formula done in X seconds @@ -790,6 +793,7 @@ split inputs and outputs done in X seconds automaton has 4 states solving game with acceptance: Streett 1 game solved in X seconds +simplification took X seconds trying to create strategy directly for a -> d direct strategy might exist but was not found. translating formula done in X seconds @@ -800,6 +804,7 @@ split inputs and outputs done in X seconds automaton has 4 states solving game with acceptance: Streett 1 game solved in X seconds +simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF ltlsynt -f 'a => (b & c & d)' --outs=b,c,d, --decompose=yes\ diff --git a/tests/python/_synthesis.ipynb b/tests/python/_synthesis.ipynb index 3a91415e9..5866057a1 100644 --- a/tests/python/_synthesis.ipynb +++ b/tests/python/_synthesis.ipynb @@ -3,6 +3,7 @@ { "cell_type": "code", "execution_count": 1, + "id": "c54c43ba", "metadata": {}, "outputs": [], "source": [ @@ -12,6 +13,7 @@ }, { "cell_type": "markdown", + "id": "0576f64a", "metadata": {}, "source": [ "Additional testing for synthesis" @@ -19,6 +21,7 @@ }, { "cell_type": "markdown", + "id": "e25b7989", "metadata": {}, "source": [ "Testing the different methods to solve" @@ -27,6 +30,7 @@ { "cell_type": "code", "execution_count": 2, + "id": "007107a6", "metadata": {}, "outputs": [ { @@ -50,6 +54,7 @@ { "cell_type": "code", "execution_count": 3, + "id": "a7859f19", "metadata": {}, "outputs": [ { @@ -57,43 +62,72 @@ "output_type": "stream", "text": [ "HOA: v1\n", - "States: 7\n", + "States: 21\n", "Start: 0\n", "AP: 3 \"i1\" \"i0\" \"o0\"\n", "acc-name: all\n", "Acceptance: 0 t\n", "properties: trans-labels explicit-labels state-acc deterministic\n", + "spot-state-player: 0 0 1 0 1 0 1 0 1 0 1 1 1 1 0 1 1 1 1 1 1\n", "controllable-AP: 2\n", "--BODY--\n", "State: 0\n", - "[!0&!1] 1\n", - "[!0&1] 2\n", - "[0&!1] 3\n", - "[0&1] 4\n", + "[!0&!1] 2\n", + "[!0&1] 4\n", + "[0&!1] 6\n", + "[0&1] 8\n", "State: 1\n", - "[0&1&!2] 4\n", - "[0&!1&!2] 3\n", - "[!0&1&!2] 2\n", - "[!0&!1&!2] 1\n", + "[0&1] 13\n", + "[0&!1] 18\n", + "[!0&1] 19\n", + "[!0&!1] 20\n", "State: 2\n", - "[0&!2] 4\n", - "[!0&!2] 2\n", + "[t] 1\n", "State: 3\n", - "[!0&1&2] 5\n", - "[0&1&2] 4\n", - "[!0&!1&2] 6\n", - "[0&!1&2] 3\n", + "[0] 13\n", + "[!0] 19\n", "State: 4\n", - "[!0&2] 5\n", - "[0&2] 4\n", + "[t] 3\n", "State: 5\n", - "[!0&!2] 5\n", - "[0&!2] 4\n", + "[!0&1] 10\n", + "[0&1] 11\n", + "[!0&!1] 15\n", + "[0&!1] 16\n", "State: 6\n", - "[!0&1&!2] 5\n", - "[0&1&!2] 4\n", - "[!0&!1&!2] 6\n", - "[0&!1&!2] 3\n", + "[t] 5\n", + "State: 7\n", + "[!0] 10\n", + "[0] 11\n", + "State: 8\n", + "[t] 7\n", + "State: 9\n", + "[!0] 12\n", + "[0] 13\n", + "State: 10\n", + "[2] 9\n", + "State: 11\n", + "[2] 7\n", + "State: 12\n", + "[!2] 9\n", + "State: 13\n", + "[!2] 7\n", + "State: 14\n", + "[!0&1] 12\n", + "[0&1] 13\n", + "[!0&!1] 17\n", + "[0&!1] 18\n", + "State: 15\n", + "[2] 14\n", + "State: 16\n", + "[2] 5\n", + "State: 17\n", + "[!2] 14\n", + "State: 18\n", + "[!2] 5\n", + "State: 19\n", + "[!2] 3\n", + "State: 20\n", + "[!2] 1\n", "--END--\n", "HOA: v1\n", "States: 7\n", @@ -141,163 +175,137 @@ "acc-name: all\n", "Acceptance: 0 t\n", "properties: trans-labels explicit-labels state-acc deterministic\n", - "spot-state-player: 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n", + "spot-state-player: 0 0 1 0 1 0 1 0 1 0 1 1 1 1 0 1 1 1 1 1 1\n", "controllable-AP: 2\n", "--BODY--\n", "State: 0\n", - "[!0&!1] 7\n", - "[!0&1] 8\n", - "[0&!1] 9\n", - "[0&1] 10\n", + "[!0&!1] 2\n", + "[!0&1] 4\n", + "[0&!1] 6\n", + "[0&1] 8\n", "State: 1\n", - "[0&1] 11\n", - "[0&!1] 12\n", - "[!0&1] 13\n", - "[!0&!1] 14\n", + "[0&1] 13\n", + "[0&!1] 18\n", + "[!0&1] 19\n", + "[!0&!1] 20\n", "State: 2\n", - "[0] 11\n", - "[!0] 13\n", + "[t] 1\n", "State: 3\n", - "[!0&1] 15\n", - "[0&1] 16\n", + "[0] 13\n", + "[!0] 19\n", + "State: 4\n", + "[t] 3\n", + "State: 5\n", + "[!0&1] 10\n", + "[0&1] 11\n", + "[!0&!1] 15\n", + "[0&!1] 16\n", + "State: 6\n", + "[t] 5\n", + "State: 7\n", + "[!0] 10\n", + "[0] 11\n", + "State: 8\n", + "[t] 7\n", + "State: 9\n", + "[!0] 12\n", + "[0] 13\n", + "State: 10\n", + "[2] 9\n", + "State: 11\n", + "[2] 7\n", + "State: 12\n", + "[!2] 9\n", + "State: 13\n", + "[!2] 7\n", + "State: 14\n", + "[!0&1] 12\n", + "[0&1] 13\n", "[!0&!1] 17\n", "[0&!1] 18\n", - "State: 4\n", - "[!0] 15\n", - "[0] 16\n", - "State: 5\n", - "[!0] 19\n", - "[0] 11\n", - "State: 6\n", - "[!0&1] 19\n", - "[0&1] 11\n", - "[!0&!1] 20\n", - "[0&!1] 12\n", - "State: 7\n", - "[t] 1\n", - "State: 8\n", - "[t] 2\n", - "State: 9\n", - "[t] 3\n", - "State: 10\n", - "[t] 4\n", - "State: 11\n", - "[!2] 4\n", - "State: 12\n", - "[!2] 3\n", - "State: 13\n", - "[!2] 2\n", - "State: 14\n", - "[!2] 1\n", "State: 15\n", - "[2] 5\n", + "[2] 14\n", "State: 16\n", - "[2] 4\n", + "[2] 5\n", "State: 17\n", - "[2] 6\n", + "[!2] 14\n", "State: 18\n", - "[2] 3\n", - "State: 19\n", "[!2] 5\n", + "State: 19\n", + "[!2] 3\n", "State: 20\n", - "[!2] 6\n", + "[!2] 1\n", "--END--\n", "HOA: v1\n", - "States: 2\n", - "Start: 1\n", + "States: 21\n", + "Start: 0\n", "AP: 3 \"i1\" \"i0\" \"o0\"\n", "acc-name: all\n", "Acceptance: 0 t\n", "properties: trans-labels explicit-labels state-acc deterministic\n", + "spot-state-player: 0 0 1 0 1 0 1 0 1 0 1 1 1 1 0 1 1 1 1 1 1\n", "controllable-AP: 2\n", "--BODY--\n", "State: 0\n", - "[0&1&!2] 1\n", - "[0&!1&!2] 1\n", - "[!0&1&!2] 0\n", - "[!0&!1&!2] 0\n", - "State: 1\n", - "[!0&1&2] 0\n", - "[0&1&2] 1\n", - "[!0&!1&2] 0\n", - "[0&!1&2] 1\n", - "--END--\n", - "HOA: v1\n", - "States: 2\n", - "Start: 1\n", - "AP: 3 \"i1\" \"i0\" \"o0\"\n", - "acc-name: all\n", - "Acceptance: 0 t\n", - "properties: trans-labels explicit-labels state-acc deterministic\n", - "controllable-AP: 2\n", - "--BODY--\n", - "State: 0\n", - "[0&1&!2] 1\n", - "[0&!1&!2] 1\n", - "[!0&1&!2] 0\n", - "[!0&!1&!2] 0\n", - "State: 1\n", - "[!0&1&2] 0\n", - "[0&1&2] 1\n", - "[!0&!1&2] 0\n", - "[0&!1&2] 1\n", - "--END--\n", - "HOA: v1\n", - "States: 6\n", - "Start: 1\n", - "AP: 3 \"i1\" \"i0\" \"o0\"\n", - "acc-name: all\n", - "Acceptance: 0 t\n", - "properties: trans-labels explicit-labels state-acc deterministic\n", - "spot-state-player: 0 0 1 1 1 1\n", - "controllable-AP: 2\n", - "--BODY--\n", - "State: 0\n", - "[0&1] 2\n", - "[0&!1] 2\n", - "[!0&1] 3\n", - "[!0&!1] 3\n", - "State: 1\n", + "[!0&!1] 2\n", "[!0&1] 4\n", - "[0&1] 5\n", - "[!0&!1] 4\n", - "[0&!1] 5\n", - "State: 2\n", - "[!2] 1\n", - "State: 3\n", - "[!2] 0\n", - "State: 4\n", - "[2] 0\n", - "State: 5\n", - "[2] 1\n", - "--END--\n", - "HOA: v1\n", - "States: 6\n", - "Start: 1\n", - "AP: 3 \"i1\" \"i0\" \"o0\"\n", - "acc-name: all\n", - "Acceptance: 0 t\n", - "properties: trans-labels explicit-labels state-acc deterministic\n", - "spot-state-player: 0 0 1 1 1 1\n", - "controllable-AP: 2\n", - "--BODY--\n", - "State: 0\n", - "[0] 2\n", - "[!0] 3\n", + "[0&!1] 6\n", + "[0&1] 8\n", "State: 1\n", - "[0] 4\n", - "[!0] 5\n", + "[0&1] 13\n", + "[0&!1] 18\n", + "[!0&1] 19\n", + "[!0&!1] 20\n", "State: 2\n", - "[!2] 1\n", + "[t] 1\n", "State: 3\n", - "[!2] 0\n", + "[0] 13\n", + "[!0] 19\n", "State: 4\n", - "[2] 1\n", + "[t] 3\n", "State: 5\n", - "[2] 0\n", + "[!0&1] 10\n", + "[0&1] 11\n", + "[!0&!1] 15\n", + "[0&!1] 16\n", + "State: 6\n", + "[t] 5\n", + "State: 7\n", + "[!0] 10\n", + "[0] 11\n", + "State: 8\n", + "[t] 7\n", + "State: 9\n", + "[!0] 12\n", + "[0] 13\n", + "State: 10\n", + "[2] 9\n", + "State: 11\n", + "[2] 7\n", + "State: 12\n", + "[!2] 9\n", + "State: 13\n", + "[!2] 7\n", + "State: 14\n", + "[!0&1] 12\n", + "[0&1] 13\n", + "[!0&!1] 17\n", + "[0&!1] 18\n", + "State: 15\n", + "[2] 14\n", + "State: 16\n", + "[2] 5\n", + "State: 17\n", + "[!2] 14\n", + "State: 18\n", + "[!2] 5\n", + "State: 19\n", + "[!2] 3\n", + "State: 20\n", + "[!2] 1\n", "--END--\n", "HOA: v1\n", - "States: 2\n", + "States: 7\n", "Start: 0\n", "AP: 3 \"i1\" \"i0\" \"o0\"\n", "acc-name: all\n", @@ -306,36 +314,277 @@ "controllable-AP: 2\n", "--BODY--\n", "State: 0\n", - "[0&2] 0\n", - "[!0&2] 1\n", + "[!0&!1] 1\n", + "[!0&1] 2\n", + "[0&!1] 3\n", + "[0&1] 4\n", "State: 1\n", - "[0&!2] 0\n", - "[!0&!2] 1\n", + "[0&1&!2] 4\n", + "[0&!1&!2] 3\n", + "[!0&1&!2] 2\n", + "[!0&!1&!2] 1\n", + "State: 2\n", + "[0&!2] 4\n", + "[!0&!2] 2\n", + "State: 3\n", + "[!0&1&2] 5\n", + "[0&1&2] 4\n", + "[!0&!1&2] 6\n", + "[0&!1&2] 3\n", + "State: 4\n", + "[!0&2] 5\n", + "[0&2] 4\n", + "State: 5\n", + "[!0&!2] 5\n", + "[0&!2] 4\n", + "State: 6\n", + "[!0&1&!2] 5\n", + "[0&1&!2] 4\n", + "[!0&!1&!2] 6\n", + "[0&!1&!2] 3\n", "--END--\n", "HOA: v1\n", - "States: 6\n", - "Start: 1\n", + "States: 21\n", + "Start: 0\n", "AP: 3 \"i1\" \"i0\" \"o0\"\n", "acc-name: all\n", "Acceptance: 0 t\n", "properties: trans-labels explicit-labels state-acc deterministic\n", - "spot-state-player: 0 0 1 1 1 1\n", + "spot-state-player: 0 0 1 0 1 0 1 0 1 0 1 1 1 1 0 1 1 1 1 1 1\n", "controllable-AP: 2\n", "--BODY--\n", "State: 0\n", - "[0] 2\n", - "[!0] 3\n", + "[!0&!1] 2\n", + "[!0&1] 4\n", + "[0&!1] 6\n", + "[0&1] 8\n", "State: 1\n", - "[0] 4\n", - "[!0] 5\n", + "[0&1] 13\n", + "[0&!1] 18\n", + "[!0&1] 19\n", + "[!0&!1] 20\n", "State: 2\n", - "[!2] 1\n", + "[t] 1\n", "State: 3\n", - "[!2] 0\n", + "[0] 13\n", + "[!0] 19\n", "State: 4\n", - "[2] 1\n", + "[t] 3\n", "State: 5\n", - "[2] 0\n", + "[!0&1] 10\n", + "[0&1] 11\n", + "[!0&!1] 15\n", + "[0&!1] 16\n", + "State: 6\n", + "[t] 5\n", + "State: 7\n", + "[!0] 10\n", + "[0] 11\n", + "State: 8\n", + "[t] 7\n", + "State: 9\n", + "[!0] 12\n", + "[0] 13\n", + "State: 10\n", + "[2] 9\n", + "State: 11\n", + "[2] 7\n", + "State: 12\n", + "[!2] 9\n", + "State: 13\n", + "[!2] 7\n", + "State: 14\n", + "[!0&1] 12\n", + "[0&1] 13\n", + "[!0&!1] 17\n", + "[0&!1] 18\n", + "State: 15\n", + "[2] 14\n", + "State: 16\n", + "[2] 5\n", + "State: 17\n", + "[!2] 14\n", + "State: 18\n", + "[!2] 5\n", + "State: 19\n", + "[!2] 3\n", + "State: 20\n", + "[!2] 1\n", + "--END--\n", + "HOA: v1\n", + "States: 21\n", + "Start: 0\n", + "AP: 3 \"i1\" \"i0\" \"o0\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc deterministic\n", + "spot-state-player: 0 0 1 0 1 0 1 0 1 0 1 1 1 1 0 1 1 1 1 1 1\n", + "controllable-AP: 2\n", + "--BODY--\n", + "State: 0\n", + "[!0&!1] 2\n", + "[!0&1] 4\n", + "[0&!1] 6\n", + "[0&1] 8\n", + "State: 1\n", + "[0&1] 13\n", + "[0&!1] 18\n", + "[!0&1] 19\n", + "[!0&!1] 20\n", + "State: 2\n", + "[t] 1\n", + "State: 3\n", + "[0] 13\n", + "[!0] 19\n", + "State: 4\n", + "[t] 3\n", + "State: 5\n", + "[!0&1] 10\n", + "[0&1] 11\n", + "[!0&!1] 15\n", + "[0&!1] 16\n", + "State: 6\n", + "[t] 5\n", + "State: 7\n", + "[!0] 10\n", + "[0] 11\n", + "State: 8\n", + "[t] 7\n", + "State: 9\n", + "[!0] 12\n", + "[0] 13\n", + "State: 10\n", + "[2] 9\n", + "State: 11\n", + "[2] 7\n", + "State: 12\n", + "[!2] 9\n", + "State: 13\n", + "[!2] 7\n", + "State: 14\n", + "[!0&1] 12\n", + "[0&1] 13\n", + "[!0&!1] 17\n", + "[0&!1] 18\n", + "State: 15\n", + "[2] 14\n", + "State: 16\n", + "[2] 5\n", + "State: 17\n", + "[!2] 14\n", + "State: 18\n", + "[!2] 5\n", + "State: 19\n", + "[!2] 3\n", + "State: 20\n", + "[!2] 1\n", + "--END--\n", + "HOA: v1\n", + "States: 7\n", + "Start: 0\n", + "AP: 3 \"i1\" \"i0\" \"o0\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc deterministic\n", + "controllable-AP: 2\n", + "--BODY--\n", + "State: 0\n", + "[!0&!1] 1\n", + "[!0&1] 2\n", + "[0&!1] 3\n", + "[0&1] 4\n", + "State: 1\n", + "[0&1&!2] 4\n", + "[0&!1&!2] 3\n", + "[!0&1&!2] 2\n", + "[!0&!1&!2] 1\n", + "State: 2\n", + "[0&!2] 4\n", + "[!0&!2] 2\n", + "State: 3\n", + "[!0&1&2] 5\n", + "[0&1&2] 4\n", + "[!0&!1&2] 6\n", + "[0&!1&2] 3\n", + "State: 4\n", + "[!0&2] 5\n", + "[0&2] 4\n", + "State: 5\n", + "[!0&!2] 5\n", + "[0&!2] 4\n", + "State: 6\n", + "[!0&1&!2] 5\n", + "[0&1&!2] 4\n", + "[!0&!1&!2] 6\n", + "[0&!1&!2] 3\n", + "--END--\n", + "HOA: v1\n", + "States: 21\n", + "Start: 0\n", + "AP: 3 \"i1\" \"i0\" \"o0\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc deterministic\n", + "spot-state-player: 0 0 1 0 1 0 1 0 1 0 1 1 1 1 0 1 1 1 1 1 1\n", + "controllable-AP: 2\n", + "--BODY--\n", + "State: 0\n", + "[!0&!1] 2\n", + "[!0&1] 4\n", + "[0&!1] 6\n", + "[0&1] 8\n", + "State: 1\n", + "[0&1] 13\n", + "[0&!1] 18\n", + "[!0&1] 19\n", + "[!0&!1] 20\n", + "State: 2\n", + "[t] 1\n", + "State: 3\n", + "[0] 13\n", + "[!0] 19\n", + "State: 4\n", + "[t] 3\n", + "State: 5\n", + "[!0&1] 10\n", + "[0&1] 11\n", + "[!0&!1] 15\n", + "[0&!1] 16\n", + "State: 6\n", + "[t] 5\n", + "State: 7\n", + "[!0] 10\n", + "[0] 11\n", + "State: 8\n", + "[t] 7\n", + "State: 9\n", + "[!0] 12\n", + "[0] 13\n", + "State: 10\n", + "[2] 9\n", + "State: 11\n", + "[2] 7\n", + "State: 12\n", + "[!2] 9\n", + "State: 13\n", + "[!2] 7\n", + "State: 14\n", + "[!0&1] 12\n", + "[0&1] 13\n", + "[!0&!1] 17\n", + "[0&!1] 18\n", + "State: 15\n", + "[2] 14\n", + "State: 16\n", + "[2] 5\n", + "State: 17\n", + "[!2] 14\n", + "State: 18\n", + "[!2] 5\n", + "State: 19\n", + "[!2] 3\n", + "State: 20\n", + "[!2] 1\n", "--END--\n" ] } @@ -345,7 +594,7 @@ "mm0 = spot.solved_game_to_mealy(game, si)\n", "msep0 = spot.solved_game_to_separated_mealy(game, si)\n", "msplit0 = spot.solved_game_to_split_mealy(game, si)\n", - "assert(spot.is_separated_mealy(mm0)) #Not imposed by the functions pre or post, but results of current impl, change if necessary\n", + "assert(spot.is_mealy(mm0))\n", "assert(spot.is_separated_mealy(msep0))\n", "assert(spot.is_split_mealy(msplit0))\n", "print(mm0.to_str(\"hoa\"))\n", @@ -355,7 +604,7 @@ "mm2 = spot.solved_game_to_mealy(game, si)\n", "msep2 = spot.solved_game_to_separated_mealy(game, si)\n", "msplit2 = spot.solved_game_to_split_mealy(game, si)\n", - "assert(spot.is_separated_mealy(mm2)) #Not imposed by the functions pre or post, but results of current impl, change if necessary\n", + "assert(spot.is_mealy(mm2))\n", "assert(spot.is_separated_mealy(msep2))\n", "assert(spot.is_split_mealy(msplit2))\n", "print(mm2.to_str(\"hoa\"))\n", @@ -365,7 +614,7 @@ "mm3 = spot.solved_game_to_mealy(game, si)\n", "msep3 = spot.solved_game_to_separated_mealy(game, si)\n", "msplit3 = spot.solved_game_to_split_mealy(game, si)\n", - "assert(spot.is_split_mealy(mm3)) #Not imposed by the functions pre or post, but results of current impl, change if necessary\n", + "assert(spot.is_mealy(mm3))\n", "assert(spot.is_separated_mealy(msep3))\n", "assert(spot.is_split_mealy(msplit3))\n", "print(mm3.to_str(\"hoa\"))\n", @@ -376,31 +625,48 @@ { "cell_type": "code", "execution_count": 4, + "id": "fb57ac53", "metadata": {}, "outputs": [], "source": [ "mus0 = spot.unsplit_mealy(msplit0)\n", "mus2 = spot.unsplit_mealy(msplit2)\n", - "mus3 = spot.unsplit_mealy(msplit3)\n", - "mmus3 = spot.unsplit_mealy(mm3)" + "mus3 = spot.unsplit_mealy(msplit3)" ] }, { "cell_type": "code", "execution_count": 5, + "id": "40fc65b5", "metadata": {}, "outputs": [], "source": [ - "assert(mm0.equivalent_to(msep0))\n", - "assert(mm0.equivalent_to(mus0))\n", - "assert(mm2.equivalent_to(msep2))\n", - "assert(mm2.equivalent_to(mus2))\n", - "assert(mmus3.equivalent_to(msep3))\n", - "assert(mmus3.equivalent_to(mus3))" + "assert(mus0.equivalent_to(msep0))" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "f6d8b29c", + "metadata": {}, + "outputs": [], + "source": [ + "assert(mus2.equivalent_to(msep2))" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "db8d47f2", + "metadata": {}, + "outputs": [], + "source": [ + "assert(mus3.equivalent_to(msep3))" ] }, { "cell_type": "markdown", + "id": "c19beeb0", "metadata": {}, "source": [ "Testing related to #495" @@ -408,7 +674,8 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 8, + "id": "3736cd1b", "metadata": {}, "outputs": [ { @@ -470,10 +737,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fee3716f7b0> >" + " *' at 0x7f7458055570> >" ] }, - "execution_count": 6, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -486,7 +753,8 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 9, + "id": "da6a7802", "metadata": {}, "outputs": [ { @@ -552,10 +820,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fee3716f7b0> >" + " *' at 0x7f7458055570> >" ] }, - "execution_count": 7, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -567,7 +835,8 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 10, + "id": "987219a4", "metadata": {}, "outputs": [ { @@ -675,10 +944,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fee3716f630> >" + " *' at 0x7f74580553c0> >" ] }, - "execution_count": 8, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -691,7 +960,8 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 11, + "id": "958d81f2", "metadata": {}, "outputs": [ { @@ -772,10 +1042,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fee36703f60> >" + " *' at 0x7f743a5ca6c0> >" ] }, - "execution_count": 9, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -798,7 +1068,8 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 12, + "id": "078bb43e", "metadata": {}, "outputs": [ { @@ -939,10 +1210,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fee3716f930> >" + " *' at 0x7f7458059f90> >" ] }, - "execution_count": 10, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -955,7 +1226,8 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 13, + "id": "05b4a138", "metadata": {}, "outputs": [ { @@ -1147,10 +1419,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fee3716fa20> >" + " *' at 0x7f7458055870> >" ] }, - "execution_count": 11, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -1164,7 +1436,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/tests/python/synthesis.ipynb b/tests/python/synthesis.ipynb index 7ec181ebe..47cafbf41 100644 --- a/tests/python/synthesis.ipynb +++ b/tests/python/synthesis.ipynb @@ -9,7 +9,8 @@ "source": [ "import spot\n", "spot.setup()\n", - "from spot.jupyter import display_inline" + "from spot.jupyter import display_inline\n", + "import time" ] }, { @@ -696,7 +697,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f05083f22a0> >" + " *' at 0x7f7a0c2640c0> >" ] }, "metadata": {}, @@ -1668,7 +1669,7 @@ "\n" ], "text/plain": [ - "" + " *' at 0x7f7a0c264e40> >" ] }, "metadata": {}, @@ -1855,7 +1856,7 @@ "\n" ], "text/plain": [ - "" + " *' at 0x7f7a0c264d50> >" ] }, "metadata": {}, @@ -1992,7 +1993,7 @@ "\n" ], "text/plain": [ - "" + " *' at 0x7f7a0c2643c0> >" ] }, "metadata": {}, @@ -2085,7 +2086,7 @@ "\n" ], "text/plain": [ - "" + " *' at 0x7f7a0c264a20> >" ] }, "metadata": {}, @@ -2178,7 +2179,7 @@ "\n" ], "text/plain": [ - "" + " *' at 0x7f7a0c264ea0> >" ] }, "metadata": {}, @@ -2315,7 +2316,7 @@ "\n" ], "text/plain": [ - "" + " *' at 0x7f7a0c2646f0> >" ] }, "metadata": {}, @@ -2342,8 +2343,9 @@ "for i in range(6):\n", " print(\"simplification lvl \", descr[i])\n", " si.minimize_lvl = i\n", - " mealy = spot.solved_game_to_separated_mealy(game, si)\n", - " display(mealy.show())" + " mealy = spot.solved_game_to_mealy(game, si)\n", + " spot.simplify_mealy_here(mealy, si.minimize_lvl, False)\n", + " display(mealy)" ] }, { @@ -2714,7 +2716,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3027,7 +3029,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f05082c7a20> >" + " *' at 0x7f7a0c009f00> >" ] }, "metadata": {}, @@ -3042,6 +3044,203 @@ "\n", "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "i0\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "!i0\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "1->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f7a0c009180> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "i0\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!i0\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", "\n", "\n", @@ -3099,10 +3298,11 @@ "!o0\n", "\n", "\n", - "\n" + "\n", + "
" ], "text/plain": [ - " *' at 0x7f05083f2300> >" + "" ] }, "metadata": {}, @@ -3179,7 +3379,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3191,8 +3391,10 @@ "spot.solve_game(game)\n", "spot.highlight_strategy(game)\n", "display(game)\n", - "mealy = spot.solved_game_to_separated_mealy(game)\n", + "mealy = spot.solved_game_to_mealy(game)\n", "display(mealy)\n", + "spot.simplify_mealy_here(mealy, 2, True)\n", + "display_inline(mealy, spot.unsplit_mealy(mealy))\n", "aig = spot.mealy_machine_to_aig(mealy, \"isop\")\n", "display(aig)" ] @@ -3306,7 +3508,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3738,7 +3940,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3803,7 +4005,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f05082c7ba0> >" + " *' at 0x7f7a0c009de0> >" ] }, "metadata": {}, @@ -3916,7 +4118,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4096,7 +4298,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4220,7 +4422,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f05082c7690> >" + " *' at 0x7f7a0c2640c0> >" ] }, "execution_count": 16, From bb7072402a4eb9865baaa0a424a6f8613caa214b Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Mon, 21 Mar 2022 10:51:35 +0100 Subject: [PATCH 014/606] Removing eeroneaus test * tests/python/except.py: Here --- tests/python/except.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/tests/python/except.py b/tests/python/except.py index 3aeee7a3e..8674721c9 100644 --- a/tests/python/except.py +++ b/tests/python/except.py @@ -331,20 +331,3 @@ except RuntimeError as e: tc.assertIn("already registered", se) else: report_missing_exception() - - -si = spot.synthesis_info() -si.s = spot.synthesis_info.algo_LAR -g1 = spot.ltl_to_game("G((i0 xor i1) <-> o0)", ["o0"], si) -g2 = spot.ltl_to_game("G((i0 xor i1) <-> (!o0 & !o1))", ["o0", "o1"], si) -spot.solve_game(g1) -spot.solve_game(g2) -strat1 = spot.solved_game_to_separated_mealy(g1) -strat2 = spot.solved_game_to_separated_mealy(g2) -try: - stratcomp = spot.mealy_product(strat1, strat2) -except RuntimeError as e: - se = str(e) - tc.assertIn("Incompatible", se) -else: - report_missing_exception() \ No newline at end of file From 3ed337ec4642603b1ab7fad7566a7f0d3f274fe8 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 22 Mar 2022 12:18:25 +0100 Subject: [PATCH 015/606] graph: fix invalid read Reported by Florian Renkin. * spot/graph/graph.hh (sort_edges_of): Fix invalid read when sorting a state without successor. Seen on core/tgbagraph.test. --- spot/graph/graph.hh | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/spot/graph/graph.hh b/spot/graph/graph.hh index 75e0977b7..fa276131d 100644 --- a/spot/graph/graph.hh +++ b/spot/graph/graph.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2018, 2020, 2021 Laboratoire de Recherche et +// Copyright (C) 2014-2018, 2020-2022 Laboratoire de Recherche et // Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -1243,14 +1243,19 @@ namespace spot //dump_storage(std::cerr); auto pi = [&](unsigned t1, unsigned t2) {return p(edges_[t1], edges_[t2]); }; + + // Sort the outgoing edges of each selected state according + // to predicate p. Do that in place. std::vector sort_idx_; - for (unsigned i = 0; i < num_states(); ++i) + unsigned ns = num_states(); + for (unsigned i = 0; i < ns; ++i) { if (to_sort_ptr && !(*to_sort_ptr)[i]) continue; - - sort_idx_.clear(); unsigned t = states_[i].succ; + if (t == 0) + continue; + sort_idx_.clear(); do { sort_idx_.push_back(t); From e9c1aeaa54b7ab0c872f4de0f77f18abc034b6c3 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 22 Mar 2022 12:22:48 +0100 Subject: [PATCH 016/606] * spot/twaalgos/gfguarantee.hh: Typos in comments. --- spot/twaalgos/gfguarantee.hh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spot/twaalgos/gfguarantee.hh b/spot/twaalgos/gfguarantee.hh index 5124667f4..40cb16f97 100644 --- a/spot/twaalgos/gfguarantee.hh +++ b/spot/twaalgos/gfguarantee.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018 Laboratoire de Recherche et Développement +// Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -48,7 +48,7 @@ namespace spot /// \brief Convert GF(φ) into a (D)BA if φ is a guarantee property. /// /// If the formula \a gf has the form GΦ where Φ matches either F(φ) - /// or F(φ₁)|F(φ₂)|...|F(φₙ), we translate Φ into A_Φ and attempt to + /// or F(φ₁)&F(φ₂)&...&F(φₙ), we translate Φ into A_Φ and attempt to /// minimize it to a WDBA W_Φ. If the resulting automaton is /// terminal, we then call g_f_terminal_inplace(W_Φ). If \a /// deterministic is not set, we keep the minimized automaton only From 0dd36e9a53a4c94ece27a95b6884cde6d8c18911 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Mon, 21 Mar 2022 10:46:42 +0100 Subject: [PATCH 017/606] ltlsynt: don't fail if --outs or --ins is set to empty * bin/ltlsynt.cc: here * tests/core/ltlsynt.test: add tests --- bin/ltlsynt.cc | 58 +++++++++++++++++++++++------------------ tests/core/ltlsynt.test | 8 ++++++ 2 files changed, 40 insertions(+), 26 deletions(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 305c7a2f4..25b8bb04a 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -135,8 +135,8 @@ Exit status:\n\ 1 if the input problem is not realizable\n\ 2 if any error has been reported"; -static std::vector all_output_aps; -static std::vector all_input_aps; +static std::optional> all_output_aps; +static std::optional> all_input_aps; static const char* opt_csv = nullptr; static bool opt_print_pg = false; @@ -545,12 +545,12 @@ namespace class ltl_processor final : public job_processor { private: - std::vector input_aps_; - std::vector output_aps_; + std::optional> input_aps_; + std::optional> output_aps_; public: - ltl_processor(std::vector input_aps_, - std::vector output_aps_) + ltl_processor(std::optional> input_aps_, + std::optional> output_aps_) : input_aps_(std::move(input_aps_)), output_aps_(std::move(output_aps_)) { @@ -560,11 +560,13 @@ namespace const char* filename, int linenum) override { auto unknown_aps = [](spot::formula f, - const std::vector& known, - const std::vector* known2 = nullptr) + const std::optional>& known, + const std::optional>& known2 = {}) { std::vector unknown; std::set seen; + // If we don't have --ins and --outs, we must not find an AP. + bool can_have_ap = known.has_value(); f.traverse([&](const spot::formula& s) { if (s.is(spot::op::ap)) @@ -572,10 +574,11 @@ namespace if (!seen.insert(s).second) return false; const std::string& a = s.ap_name(); - if (std::find(known.begin(), known.end(), a) == known.end() - && (!known2 + if (!can_have_ap + || (std::find(known->begin(), known->end(), a) == known->end() + && (!known2.has_value() || std::find(known2->begin(), - known2->end(), a) == known2->end())) + known2->end(), a) == known2->end()))) unknown.push_back(a); } return false; @@ -585,30 +588,30 @@ namespace // Decide which atomic propositions are input or output. int res; - if (input_aps_.empty() && !output_aps_.empty()) + if (!input_aps_.has_value() && output_aps_.has_value()) { - res = solve_formula(f, unknown_aps(f, output_aps_), output_aps_); + res = solve_formula(f, unknown_aps(f, output_aps_), *output_aps_); } - else if (output_aps_.empty() && !input_aps_.empty()) + else if (!output_aps_.has_value() && input_aps_.has_value()) { - res = solve_formula(f, input_aps_, unknown_aps(f, input_aps_)); + res = solve_formula(f, *input_aps_, unknown_aps(f, input_aps_)); } - else if (output_aps_.empty() && input_aps_.empty()) + else if (!output_aps_.has_value() && !input_aps_.has_value()) { - for (const std::string& ap: unknown_aps(f, input_aps_, &output_aps_)) + for (const std::string& ap: unknown_aps(f, input_aps_, output_aps_)) error_at_line(2, 0, filename, linenum, "one of --ins or --outs should list '%s'", ap.c_str()); - res = solve_formula(f, input_aps_, output_aps_); + res = solve_formula(f, *input_aps_, *output_aps_); } else { - for (const std::string& ap: unknown_aps(f, input_aps_, &output_aps_)) + for (const std::string& ap: unknown_aps(f, input_aps_, output_aps_)) error_at_line(2, 0, filename, linenum, "both --ins and --outs are specified, " "but '%s' is unlisted", ap.c_str()); - res = solve_formula(f, input_aps_, output_aps_); + res = solve_formula(f, *input_aps_, *output_aps_); } if (opt_csv) @@ -639,23 +642,25 @@ parse_opt(int key, char *arg, struct argp_state *) break; case OPT_INPUT: { + all_input_aps.emplace(std::vector{}); std::istringstream aps(arg); std::string ap; while (std::getline(aps, ap, ',')) { ap.erase(remove_if(ap.begin(), ap.end(), isspace), ap.end()); - all_input_aps.push_back(str_tolower(ap)); + all_input_aps->push_back(str_tolower(ap)); } break; } case OPT_OUTPUT: { + all_output_aps.emplace(std::vector{}); std::istringstream aps(arg); std::string ap; while (std::getline(aps, ap, ',')) { ap.erase(remove_if(ap.begin(), ap.end(), isspace), ap.end()); - all_output_aps.push_back(str_tolower(ap)); + all_output_aps->push_back(str_tolower(ap)); } break; } @@ -716,10 +721,11 @@ main(int argc, char **argv) check_no_formula(); // Check if inputs and outputs are distinct - for (const std::string& ai : all_input_aps) - if (std::find(all_output_aps.begin(), all_output_aps.end(), ai) - != all_output_aps.end()) - error(2, 0, "'%s' appears both in --ins and --outs", ai.c_str()); + if (all_input_aps.has_value() && all_output_aps.has_value()) + for (const std::string& ai : *all_input_aps) + if (std::find(all_output_aps->begin(), all_output_aps->end(), ai) + != all_output_aps->end()) + error(2, 0, "'%s' appears both in --ins and --outs", ai.c_str()); ltl_processor processor(all_input_aps, all_output_aps); if (int res = processor.run(); res == 0 || res == 1) diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 335c1b01e..c22cd4e6b 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -829,3 +829,11 @@ ltlsynt -f '!(F(a | b))' --outs=b, --decompose=yes \ --verbose --aiger 2> out || true sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp + +ltlsynt --ins="" -f "GFa" +ltlsynt --outs="" -f "GFb" | grep "UNREALIZABLE" + +ltlsynt --outs="" -f "1" + +ltlsynt --outs="" --ins="" -f "GFa" 2>&1 | \ + grep "both --ins and --outs are specified" \ No newline at end of file From dd587476593a8b179d1fafaf24d7dfccf517e355 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Mon, 21 Mar 2022 13:56:50 +0100 Subject: [PATCH 018/606] synthesis.ipynb: remove useless import * tests/python/synthesis.ipynb: here. --- tests/python/synthesis.ipynb | 3589 +++++++++++++++++----------------- 1 file changed, 1778 insertions(+), 1811 deletions(-) diff --git a/tests/python/synthesis.ipynb b/tests/python/synthesis.ipynb index 47cafbf41..3738e6f72 100644 --- a/tests/python/synthesis.ipynb +++ b/tests/python/synthesis.ipynb @@ -3,19 +3,16 @@ { "cell_type": "code", "execution_count": 1, - "id": "1b9b5964", "metadata": {}, "outputs": [], "source": [ "import spot\n", "spot.setup()\n", - "from spot.jupyter import display_inline\n", - "import time" + "from spot.jupyter import display_inline" ] }, { "cell_type": "markdown", - "id": "b465c6ba", "metadata": {}, "source": [ "This notebook presents functions that can be used to solve the Reactive Synthesis problem using games.\n", @@ -40,7 +37,6 @@ { "cell_type": "code", "execution_count": 2, - "id": "8576bdad", "metadata": {}, "outputs": [ { @@ -57,647 +53,647 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - "))\n", - "[parity max odd 3]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[parity max odd 3]\n", "\n", "\n", "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", "\n", "I->9\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "25\n", - "\n", - "25\n", + "\n", + "25\n", "\n", "\n", "\n", "9->25\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "26\n", - "\n", - "26\n", + "\n", + "26\n", "\n", "\n", "\n", "9->26\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "27\n", - "\n", - "27\n", + "\n", + "27\n", "\n", "\n", "\n", "9->27\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "28\n", - "\n", - "28\n", + "\n", + "28\n", "\n", "\n", "\n", "9->28\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "0->10\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", + "\n", "\n", "\n", "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "0->11\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "10->1\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "11->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "12\n", - "\n", - "12\n", + "\n", + "12\n", "\n", "\n", "\n", "1->12\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", + "\n", "\n", "\n", "\n", "13\n", - "\n", - "13\n", + "\n", + "13\n", "\n", "\n", "\n", "1->13\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", + "\n", "\n", "\n", "\n", "12->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "13->0\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "14\n", - "\n", - "14\n", + "\n", + "14\n", "\n", "\n", "\n", "2->14\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", + "\n", "\n", "\n", "\n", "16\n", - "\n", - "16\n", + "\n", + "16\n", "\n", "\n", "\n", "2->16\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", + "\n", "\n", "\n", "\n", "15\n", - "\n", - "15\n", + "\n", + "15\n", "\n", "\n", "\n", "14->15\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "16->2\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "3->13\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", + "\n", "\n", "\n", "\n", "17\n", - "\n", - "17\n", + "\n", + "17\n", "\n", "\n", "\n", "3->17\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", + "\n", "\n", "\n", "\n", "17->2\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "17->3\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "4->14\n", - "\n", - "\n", - "i0\n", - "\n", + "\n", + "\n", + "i0\n", + "\n", "\n", "\n", "\n", "18\n", - "\n", - "18\n", + "\n", + "18\n", "\n", "\n", "\n", "4->18\n", - "\n", - "\n", - "!i0\n", - "\n", + "\n", + "\n", + "!i0\n", + "\n", "\n", "\n", "\n", "18->4\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "5->14\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "5->16\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "5->18\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "19\n", - "\n", - "19\n", + "\n", + "19\n", "\n", "\n", "\n", "5->19\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "19->5\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "6->11\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "20\n", - "\n", - "20\n", + "\n", + "20\n", "\n", "\n", "\n", "6->20\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "21\n", - "\n", - "21\n", + "\n", + "21\n", "\n", "\n", "\n", "6->21\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "20->4\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "20->7\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "21->4\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "21->6\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "7->12\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "7->13\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "22\n", - "\n", - "22\n", + "\n", + "22\n", "\n", "\n", "\n", "7->22\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "23\n", - "\n", - "23\n", + "\n", + "23\n", "\n", "\n", "\n", "7->23\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "22->4\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "22->7\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "23->4\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "23->6\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->13\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "8->17\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "8->23\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "24\n", - "\n", - "24\n", + "\n", + "24\n", "\n", "\n", "\n", "8->24\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "24->5\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "24->8\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "25->8\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "26->3\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "27->6\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "28->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "15->14\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f7a0c2640c0> >" + " *' at 0x7fb964737d50> >" ] }, "metadata": {}, @@ -716,7 +712,6 @@ }, { "cell_type": "markdown", - "id": "fbd7095b", "metadata": {}, "source": [ "Solving the game, is done with `solve_game()` as with any game. There is also a version that takes a `synthesis_info` as second argument in case the time it takes has to be recorded. Here passing `si` or not makes no difference." @@ -725,7 +720,6 @@ { "cell_type": "code", "execution_count": 3, - "id": "02d7525e", "metadata": {}, "outputs": [ { @@ -741,586 +735,586 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - "))\n", - "[parity max odd 3]\n", + " viewBox=\"0.00 0.00 650.40 360.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[parity max odd 3]\n", "\n", "\n", "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", "\n", "I->9\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "25\n", - "\n", - "25\n", + "\n", + "25\n", "\n", "\n", "\n", "9->25\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "26\n", - "\n", - "26\n", + "\n", + "26\n", "\n", "\n", "\n", "9->26\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "27\n", - "\n", - "27\n", + "\n", + "27\n", "\n", "\n", "\n", "9->27\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "28\n", - "\n", - "28\n", + "\n", + "28\n", "\n", "\n", "\n", "9->28\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "0->10\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "0->11\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "10->1\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "11->0\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "12\n", - "\n", - "12\n", + "\n", + "12\n", "\n", "\n", "\n", "1->12\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "13\n", - "\n", - "13\n", + "\n", + "13\n", "\n", "\n", "\n", "1->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "12->1\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "13->0\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "14\n", - "\n", - "14\n", + "\n", + "14\n", "\n", "\n", "\n", "2->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "16\n", - "\n", - "16\n", + "\n", + "16\n", "\n", "\n", "\n", "2->16\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "15\n", - "\n", - "15\n", + "\n", + "15\n", "\n", "\n", "\n", "14->15\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "16->2\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "3->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "17\n", - "\n", - "17\n", + "\n", + "17\n", "\n", "\n", "\n", "3->17\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "17->2\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "17->3\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "4->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "18\n", - "\n", - "18\n", + "\n", + "18\n", "\n", "\n", "\n", "4->18\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "18->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "5->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "5->16\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "5->18\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "19\n", - "\n", - "19\n", + "\n", + "19\n", "\n", "\n", "\n", "5->19\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "19->5\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "6->11\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "20\n", - "\n", - "20\n", + "\n", + "20\n", "\n", "\n", "\n", "6->20\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "21\n", - "\n", - "21\n", + "\n", + "21\n", "\n", "\n", "\n", "6->21\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "20->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "20->7\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "21->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "21->6\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "7->12\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "7->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "22\n", - "\n", - "22\n", + "\n", + "22\n", "\n", "\n", "\n", "7->22\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "23\n", - "\n", - "23\n", + "\n", + "23\n", "\n", "\n", "\n", "7->23\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "22->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "22->7\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "23->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "23->6\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "8->17\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "8->23\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "24\n", - "\n", - "24\n", + "\n", + "24\n", "\n", "\n", "\n", "8->24\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "24->5\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "24->8\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "25->8\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "26->3\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "27->6\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "28->0\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "15->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n" @@ -1342,7 +1336,6 @@ }, { "cell_type": "markdown", - "id": "d66f3da1", "metadata": {}, "source": [ "Once a strategy has been found, it can be extracted as an automaton and simplified using 6 different levels (the default is 2). The output should be interpreted as a Mealy automaton, where transition have the form `(ins)&(outs)` where `ins` and `outs` are Boolean formulas representing possible inputs and outputs (they could be more than just conjunctions of atomic proposition). Mealy machines with this type of labels are called \"separated\" in Spot." @@ -1351,7 +1344,6 @@ { "cell_type": "code", "execution_count": 4, - "id": "89342e18", "metadata": {}, "outputs": [ { @@ -1367,309 +1359,309 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "3->5\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "4->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "4->5\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "5->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5->5\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->5\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->6\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f7a0c264e40> >" + " *' at 0x7fb9646b7870> >" ] }, "metadata": {}, @@ -1688,175 +1680,175 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f7a0c264d50> >" + " *' at 0x7fb9646b7de0> >" ] }, "metadata": {}, @@ -1875,125 +1867,125 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "I->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f7a0c2643c0> >" + " *' at 0x7fb9646b7630> >" ] }, "metadata": {}, @@ -2012,81 +2004,81 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f7a0c264a20> >" + " *' at 0x7fb9646b77b0> >" ] }, "metadata": {}, @@ -2105,81 +2097,81 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f7a0c264ea0> >" + " *' at 0x7fb9646b7f00> >" ] }, "metadata": {}, @@ -2198,125 +2190,125 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f7a0c2646f0> >" + " *' at 0x7fb9646b74b0> >" ] }, "metadata": {}, @@ -2350,7 +2342,6 @@ }, { "cell_type": "markdown", - "id": "435c9bae", "metadata": {}, "source": [ "If needed, a separated Mealy machine can be turned into game shape using `split_sepearated_mealy()`, which is more efficient than `split_2step()`." @@ -2359,7 +2350,6 @@ { "cell_type": "code", "execution_count": 5, - "id": "688a1ced", "metadata": {}, "outputs": [ { @@ -2368,260 +2358,260 @@ "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", @@ -2641,10 +2631,9 @@ }, { "cell_type": "markdown", - "id": "e3bb2d7d", "metadata": {}, "source": [ - "# Converting the separated mealy machine to AIG\n", + "# Converting the separated Mealy machine to AIG\n", "\n", "A separated Mealy machine can be converted to a circuit in the [AIGER format](http://fmv.jku.at/aiger/FORMAT.aiger) using `mealy_machine_to_aig()`. This takes a second argument specifying what type of encoding to use (exactly like `ltlsynt`'s `--aiger=...` option). \n", "\n", @@ -2654,7 +2643,6 @@ { "cell_type": "code", "execution_count": 6, - "id": "b5fea2d1", "metadata": {}, "outputs": [ { @@ -2663,60 +2651,60 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "2\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "2->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2730,7 +2718,6 @@ }, { "cell_type": "markdown", - "id": "2a1a6fc9", "metadata": {}, "source": [ "While we are at it, let us mention that you can render those circuits horizontally as follows:" @@ -2739,7 +2726,6 @@ { "cell_type": "code", "execution_count": 7, - "id": "f909d578", "metadata": {}, "outputs": [ { @@ -2748,54 +2734,54 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:w\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "2\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "2->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n" @@ -2815,7 +2801,6 @@ }, { "cell_type": "markdown", - "id": "2c87313f", "metadata": {}, "source": [ "To encode the circuit in the AIGER format (ASCII version) use:" @@ -2824,7 +2809,6 @@ { "cell_type": "code", "execution_count": 8, - "id": "1b787f50", "metadata": {}, "outputs": [ { @@ -2848,7 +2832,6 @@ }, { "cell_type": "markdown", - "id": "72038258", "metadata": {}, "source": [ "# Adding more inputs and outputs by force" @@ -2856,7 +2839,6 @@ }, { "cell_type": "markdown", - "id": "3fbb3c2f", "metadata": {}, "source": [ "It can happen that propositions declared as output are ommited in the aig circuit (either because they are not part of the specification, or because they do not appear in the winning strategy). In that case those \n", @@ -2868,7 +2850,6 @@ { "cell_type": "code", "execution_count": 9, - "id": "8ed4e382", "metadata": {}, "outputs": [ { @@ -2877,159 +2858,159 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett 1]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett 1]\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "I->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", - "i0\n", - "\n", + "\n", + "\n", + "i0\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "3->7\n", - "\n", - "\n", - "!i0\n", - "\n", + "\n", + "\n", + "!i0\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "6->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "6->2\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "7->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f7a0c009f00> >" + " *' at 0x7fb9646b7570> >" ] }, "metadata": {}, @@ -3041,112 +3022,112 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "i0\n", + "\n", + "\n", + "i0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "!i0\n", + "\n", + "\n", + "!i0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f7a0c009180> >" + " *' at 0x7fb9646b7630> >" ] }, "metadata": {}, @@ -3158,144 +3139,144 @@ "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", - "t\n", - "[all]\n", + " viewBox=\"0.00 0.00 282.00 148.79\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "i0\n", + "\n", + "\n", + "i0\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!i0\n", + "\n", + "\n", + "!i0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "3->1\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "\n", - "!i0\n", - "/\n", + "!i0\n", + "/\n", "\n", - "!o0\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", + "\n", + "\n", "\n", - "1\n", - "/\n", + "1\n", + "/\n", "\n", - "!o0\n", + "!o0\n", "\n", "\n", "\n", @@ -3314,72 +3295,72 @@ "\n", "\n", - "\n", "\n", "\n", + " viewBox=\"0.00 0.00 143.20 352.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", "\n", - "\n", + "\n", "\n", "\n", "4\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "6->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3401,7 +3382,6 @@ }, { "cell_type": "markdown", - "id": "a82ca6c6", "metadata": {}, "source": [ "To force the presence of extra variables in the circuit, they can be passed to `mealy_machine_to_aig()`." @@ -3410,7 +3390,6 @@ { "cell_type": "code", "execution_count": 10, - "id": "a86436a7", "metadata": {}, "outputs": [ { @@ -3419,96 +3398,96 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "6->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "8->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "8->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "0\n", - "\n", - "False\n", + "\n", + "False\n", "\n", "\n", "\n", "0->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3521,7 +3500,6 @@ }, { "cell_type": "markdown", - "id": "9af5c9b9", "metadata": {}, "source": [ "# Combining Mealy machines\n", @@ -3531,7 +3509,7 @@ "\n", "This can be done in two ways:\n", "\n", - "1. Using the function `mealy_machines_to_aig()`, which takes a vector of separated mealy machines as argument.\n", + "1. Using the function `mealy_machines_to_aig()`, which takes a vector of separated Mealy machines as argument.\n", "2. Combine the mealy machines into one before passing it to `mealy_machine_to aig(). This currently only supports input complete machines of the same type (mealy/separated mealy/split mealy)\n", "\n", "Note that the method version is usually preferable as it is faster.\n", @@ -3541,7 +3519,6 @@ { "cell_type": "code", "execution_count": 11, - "id": "750e55f5", "metadata": {}, "outputs": [ { @@ -3557,150 +3534,150 @@ "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett 1]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett 1]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett 1]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett 1]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "o1\n", - "\n", + "\n", + "\n", + "o1\n", + "\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "!o1\n", - "\n", + "\n", + "\n", + "!o1\n", + "\n", "\n", "\n", "\n", @@ -3726,94 +3703,94 @@ "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "o1\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "o1\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "!o1\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "!o1\n", "\n", "\n", "\n", @@ -3839,108 +3816,108 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "10->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3959,53 +3936,53 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0 & o1\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0 & o1\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0 & !o1\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0 & !o1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f7a0c009de0> >" + " *' at 0x7fb9646b7c60> >" ] }, "metadata": {}, @@ -4017,108 +3994,108 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "10->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4152,7 +4129,6 @@ }, { "cell_type": "markdown", - "id": "5102e762", "metadata": {}, "source": [ "# Reading an AIGER-file\n", @@ -4167,7 +4143,6 @@ { "cell_type": "code", "execution_count": 12, - "id": "29d37752", "metadata": {}, "outputs": [], "source": [ @@ -4188,7 +4163,6 @@ { "cell_type": "code", "execution_count": 13, - "id": "8989722d", "metadata": {}, "outputs": [ { @@ -4197,108 +4171,108 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "d\n", + "\n", + "d\n", "\n", "\n", "\n", "6->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "c\n", + "\n", + "c\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "a\n", + "\n", + "a\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "b\n", + "\n", + "b\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4313,7 +4287,6 @@ { "cell_type": "code", "execution_count": 14, - "id": "9560f368", "metadata": {}, "outputs": [ { @@ -4342,7 +4315,6 @@ { "cell_type": "code", "execution_count": 15, - "id": "9dadee6a", "metadata": {}, "outputs": [ { @@ -4359,7 +4331,6 @@ }, { "cell_type": "markdown", - "id": "734f10f1", "metadata": {}, "source": [ "An AIG circuit can be transformed into a monitor/Mealy machine. This can be used for instance to check that it does not intersect the negation of the specification." @@ -4368,7 +4339,6 @@ { "cell_type": "code", "execution_count": 16, - "id": "b29c95b4", "metadata": {}, "outputs": [ { @@ -4377,52 +4347,52 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!a & !b\n", - "/\n", - "\n", - "!c & !d\n", - "\n", - "a & b\n", - "/\n", - "\n", - "!c & d\n", - "\n", - "(!a & b) | (a & !b)\n", - "/\n", - "\n", - "c & !d\n", + "\n", + "\n", + "\n", + "!a & !b\n", + "/\n", + "\n", + "!c & !d\n", + "\n", + "a & b\n", + "/\n", + "\n", + "!c & d\n", + "\n", + "(!a & b) | (a & !b)\n", + "/\n", + "\n", + "c & !d\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f7a0c2640c0> >" + " *' at 0x7fb9646c8360> >" ] }, "execution_count": 16, @@ -4436,7 +4406,6 @@ }, { "cell_type": "markdown", - "id": "09cad9f5", "metadata": {}, "source": [ "Note that the generation of aiger circuits from Mealy machines is flexible and accepts separated Mealy machines\n", @@ -4446,7 +4415,6 @@ { "cell_type": "code", "execution_count": 17, - "id": "62ebedae", "metadata": {}, "outputs": [ { @@ -4455,114 +4423,114 @@ "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", @@ -4594,7 +4562,6 @@ { "cell_type": "code", "execution_count": 18, - "id": "4a0bb1a7", "metadata": {}, "outputs": [ { @@ -4603,180 +4570,180 @@ "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", @@ -4797,7 +4764,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -4811,7 +4778,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.7.3" } }, "nbformat": 4, From 8d9597d80dcaa1ef98f65985a7c8a0964e2631df Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Mon, 21 Mar 2022 15:58:58 +0100 Subject: [PATCH 019/606] ltlsynt: add --algo=acd * bin/ltlsynt.cc: Add "acd" to the list of possible paritization algorithms used by ltlsynt * spot/twaalgos/synthesis.cc, spot/twaalgos/synthesis.hh: Add ACD as paritisation algorithm * tests/core/ltlsynt.test: add tests --- bin/ltlsynt.cc | 12 +++++++++--- spot/twaalgos/synthesis.cc | 14 ++++++++++++-- spot/twaalgos/synthesis.hh | 1 + tests/core/ltlsynt.test | 12 +++++++++++- 4 files changed, 33 insertions(+), 6 deletions(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 25b8bb04a..8dcd9511a 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -69,7 +69,7 @@ static const argp_option options[] = " propositions", 0}, /**************************************************/ { nullptr, 0, nullptr, 0, "Fine tuning:", 10 }, - { "algo", OPT_ALGO, "sd|ds|ps|lar|lar.old", 0, + { "algo", OPT_ALGO, "sd|ds|ps|lar|lar.old|acd", 0, "choose the algorithm for synthesis:" " \"sd\": translate to tgba, split, then determinize;" " \"ds\": translate to tgba, determinize, then split;" @@ -77,7 +77,10 @@ static const argp_option options[] = " \"lar\": translate to a deterministic automaton with arbitrary" " acceptance condition, then use LAR to turn to parity," " then split (default);" - " \"lar.old\": old version of LAR, for benchmarking.\n", 0 }, + " \"lar.old\": old version of LAR, for benchmarking;" + " \"acd\": translate to a deterministic automaton with arbitrary" + " acceptance condition, then use ACD to turn to parity," + " then split.\n", 0 }, { "decompose", OPT_DECOMPOSE, "yes|no", 0, "whether to decompose the specification as multiple output-disjoint " "problems to solve independently (enabled by default)", 0 }, @@ -154,7 +157,8 @@ static char const *const algo_names[] = "sd", "ps", "lar", - "lar.old" + "lar.old", + "acd", }; static char const *const algo_args[] = @@ -164,6 +168,7 @@ static char const *const algo_args[] = "dpasplit", "ps", "lar", "lar.old", + "acd", nullptr }; static spot::synthesis_info::algo const algo_types[] = @@ -173,6 +178,7 @@ static spot::synthesis_info::algo const algo_types[] = spot::synthesis_info::algo::DPA_SPLIT, spot::synthesis_info::algo::DPA_SPLIT, spot::synthesis_info::algo::LAR, spot::synthesis_info::algo::LAR_OLD, + spot::synthesis_info::algo::ACD, }; ARGMATCH_VERIFY(algo_args, algo_types); diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 7620a1098..95e0725d9 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -741,6 +742,9 @@ namespace spot case (algo::LAR_OLD): name = "lar.old"; break; + case (algo::ACD): + name = "acd"; + break; } return os << name; } @@ -775,6 +779,8 @@ namespace spot translator trans(dict, &extra_options); switch (sol) { + case algo::ACD: + SPOT_FALLTHROUGH; case algo::LAR: SPOT_FALLTHROUGH; case algo::LAR_OLD: @@ -965,6 +971,8 @@ namespace spot alternate_players(dpa); break; } + case algo::ACD: + SPOT_FALLTHROUGH; case algo::LAR: SPOT_FALLTHROUGH; case algo::LAR_OLD: @@ -976,11 +984,13 @@ namespace spot dpa = to_parity(aut); // reduce_parity is called by to_parity() } - else + else if (gi.s == algo::LAR_OLD) { dpa = to_parity_old(aut); - dpa = reduce_parity_here(dpa, true); + reduce_parity_here(dpa, true); } + else + dpa = acd_transform(aut); if (bv) bv->paritize_time += sw.stop(); if (vs) diff --git a/spot/twaalgos/synthesis.hh b/spot/twaalgos/synthesis.hh index 46c0bc2bd..a5fced429 100644 --- a/spot/twaalgos/synthesis.hh +++ b/spot/twaalgos/synthesis.hh @@ -86,6 +86,7 @@ namespace spot DPA_SPLIT, LAR, LAR_OLD, + ACD, }; struct bench_var diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index c22cd4e6b..742f05a5e 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -836,4 +836,14 @@ ltlsynt --outs="" -f "GFb" | grep "UNREALIZABLE" ltlsynt --outs="" -f "1" ltlsynt --outs="" --ins="" -f "GFa" 2>&1 | \ - grep "both --ins and --outs are specified" \ No newline at end of file + grep "both --ins and --outs are specified" + +LTL='(((((G (((((((g_0) && (G (! (r_0)))) -> (F (! (g_0)))) && (((g_0) && +(X ((! (r_0)) && (! (g_0))))) -> (X ((r_0) R (! (g_0)))))) && (((g_1) && +(G (! (r_1)))) -> (F (! (g_1))))) && (((g_1) && (X ((! (r_1)) && (! (g_1))))) -> +(X ((r_1) R (! (g_1)))))) && (((! (g_0)) && (true)) || ((true) && (! (g_1)))))) +&& ((r_0) R (! (g_0)))) && (G ((r_0) -> (F (g_0))))) && ((r_1) R (! (g_1)))) && +(G ((r_1) -> (F (g_1)))))' +OUT='g_0, g_1' +ltlsynt --outs="$OUT" -f "$LTL" --aiger=both --algo=acd | grep "aag 8 2 2 2 4" +ltlsynt --outs="$OUT" -f "$LTL" --aiger=both --algo=lar | grep "aag 34 2 3 2 29" From 0a6b627914eee3c8c2465a2c5b427455008239c0 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Tue, 22 Mar 2022 14:46:31 +0100 Subject: [PATCH 020/606] option_map: Don't report unused options if option_map is not used * spot/misc/optionmap.cc, spot/misc/optionmap.hh: here. --- spot/misc/optionmap.cc | 38 ++++++++++++++++++++++---------------- spot/misc/optionmap.hh | 1 + 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/spot/misc/optionmap.cc b/spot/misc/optionmap.cc index 4db5235eb..8be8d1adc 100644 --- a/spot/misc/optionmap.cc +++ b/spot/misc/optionmap.cc @@ -130,6 +130,7 @@ namespace spot int option_map::get(const char* option, int def) const { + is_used_ = true; unused_.erase(option); auto it = options_.find(option); return (it == options_.end()) ? def : it->second; @@ -138,6 +139,7 @@ namespace spot std::string option_map::get_str(const char* option, std::string def) const { + is_used_ = true; unused_.erase(option); auto it = options_str_.find(option); return (it == options_str_.end()) ? def : it->second; @@ -226,21 +228,25 @@ namespace spot void option_map::report_unused_options() const { - auto s = unused_.size(); - if (s == 0U) - return; - std::ostringstream os; - if (s == 1U) - { - os << "option '" << *unused_.begin() - << "' was not used (possible typo?)"; - } - else - { - os << "the following options where not used (possible typos?):"; - for (auto opt: unused_) - os << "\n\t- '" << opt << '\''; - } - throw std::runtime_error(os.str()); + // We don't consider that an unused map has unused options. + if (is_used_) + { + auto s = unused_.size(); + if (s == 0U) + return; + std::ostringstream os; + if (s == 1U) + { + os << "option '" << *unused_.begin() + << "' was not used (possible typo?)"; + } + else + { + os << "the following options where not used (possible typos?):"; + for (auto opt: unused_) + os << "\n\t- '" << opt << '\''; + } + throw std::runtime_error(os.str()); + } } } diff --git a/spot/misc/optionmap.hh b/spot/misc/optionmap.hh index 229733a18..11ec8c456 100644 --- a/spot/misc/optionmap.hh +++ b/spot/misc/optionmap.hh @@ -110,6 +110,7 @@ namespace spot // will be erased as they are used. The resulting set can be used // for diagnosing errors. mutable std::set unused_; + mutable bool is_used_ = false; void set_(const std::string&, int val); void set_str_(const std::string&, const std::string& val); From 328cf9581629f29a168b376be0e4309c86a7f258 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Tue, 22 Mar 2022 14:50:49 +0100 Subject: [PATCH 021/606] ltlsynt: generalization of the bypass * spot/twaalgos/synthesis.cc, spot/twaalgos/synthesis.hh: generalize the bypass and avoid to construct a strategy when we want realizability. * bin/ltlsynt.cc: adapt for realizability * tests/core/ltlsynt.test: update tests --- bin/ltlsynt.cc | 5 +- spot/twaalgos/synthesis.cc | 325 ++++++++++++++++++++++--------------- spot/twaalgos/synthesis.hh | 4 +- tests/core/ltlsynt.test | 75 ++------- 4 files changed, 212 insertions(+), 197 deletions(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 8dcd9511a..3d9f46900 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -376,7 +376,7 @@ namespace // we never use the direct approach if (!want_game) m_like = - spot::try_create_direct_strategy(*sub_f, *sub_o, *gi); + spot::try_create_direct_strategy(*sub_f, *sub_o, *gi, !opt_real); switch (m_like.success) { @@ -431,7 +431,8 @@ namespace // the direct approach yielded a strategy // which can now be minimized // We minimize only if we need it - assert(m_like.mealy_like && "Expected success but found no mealy!"); + assert(opt_real || + (m_like.mealy_like && "Expected success but found no mealy!")); if (!opt_real) { // Keep the machine split for aiger diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 95e0725d9..dc79f8cce 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1180,16 +1180,19 @@ namespace spot mealy_like try_create_direct_strategy(formula f, const std::vector& output_aps, - synthesis_info &gi) + synthesis_info &gi, bool want_strategy) { auto vs = gi.verbose_stream; auto& bv = gi.bv; + bdd_dict_ptr& dict = gi.dict; + int tmp; if (vs) *vs << "trying to create strategy directly for " << f << '\n'; - auto ret_sol_maybe = [&vs]() + auto ret_sol_maybe = [&vs, &tmp, &dict]() { + dict->unregister_all_my_variables(&tmp); if (vs) *vs << "direct strategy might exist but was not found.\n"; return mealy_like{ @@ -1197,8 +1200,9 @@ namespace spot nullptr, bddfalse}; }; - auto ret_sol_none = [&vs]() + auto ret_sol_none = [&vs, &tmp, &dict]() { + dict->unregister_all_my_variables(&tmp); if (vs) *vs << "no strategy exists.\n"; return mealy_like{ @@ -1207,15 +1211,23 @@ namespace spot bddfalse}; }; - auto ret_sol_exists = [&vs](auto strat) + auto ret_sol_exists = + [&vs, &want_strategy, &tmp, &dict](twa_graph_ptr strat) { + dict->unregister_all_my_variables(&tmp); if (vs) { - *vs << "direct strategy was found.\n" - << "direct strat has " << strat->num_states() + *vs << "direct strategy was found.\n"; + if (want_strategy) + { + *vs << "direct strat has " << strat->num_states() << " states, " << strat->num_edges() << " edges and " << strat->num_sets() << " colors\n"; + + } } + if (strat) + strat->merge_edges(); return mealy_like{ mealy_like::realizability_code::REALIZABLE_REGULAR, strat, @@ -1223,88 +1235,142 @@ namespace spot }; formula_2_inout_props form2props(output_aps); - auto output_aps_set = std::set(output_aps.begin(), - output_aps.end()); - - formula f_g = formula::tt(), f_left, f_right; - - // If we have a formula like G(b₁) ∧ (φ ↔ GFb₂), we extract b₁ and - // continue the construction for (φ ↔ GFb₂). + formula f_g, f_other; + // If it is G(α) ∧ G(β) ∧ … if (f.is(op::And)) { - if (f.size() != 2) - return ret_sol_maybe(); - if (f[0].is(op::G) && f[0][0].is_boolean()) - { - f_g = f[0]; - f = f[1]; - } - else if (f[1].is(op::G) && f[1][0].is_boolean()) - { - f_g = f[1]; - f = f[0]; - } - else - return ret_sol_maybe(); - } - if (f.is(op::Equiv)) - { - auto [left_ins, left_outs] = form2props.aps_of(f[0]); - auto [right_ins, right_outs] = form2props.aps_of(f[1]); + std::vector gs; + std::vector others; + for (auto child : f) + if (child.is(op::G) && child[0].is_boolean()) + gs.push_back(child[0]); + else + others.push_back(child); - auto properties_vector = [](const formula& f, - const std::set& ins, - const std::set& outs) + f_g = formula::And(gs); + f_other = formula::And(others); + } + else if (f.is(op::G) && f[0].is_boolean()) + { + f_g = f[0]; + f_other = formula::tt(); + } + else + { + f_g = formula::tt(); + f_other = f; + } + + // We have to check if the content of G is realizable (input-complete) + bdd output_bdd_tmp = bddtrue; + for (auto& out : output_aps) + output_bdd_tmp &= bdd_ithvar( + dict->register_proposition(formula::ap(out), &tmp)); + + if (!f_g.is_tt()) + { + auto g_bdd = formula_to_bdd(f_g, dict, &tmp); + if (bdd_exist(g_bdd, output_bdd_tmp) != bddtrue) + return ret_sol_none(); + } + + if (f_other.is(op::Equiv)) + { + // Check if FG or GF + auto is_general = [&tmp, &output_bdd_tmp, &dict](const formula &f, + op first, op second) { - return std::vector - { - f.is({op::G, op::F}) && f[0][0].is_boolean() && ins.empty(), - f.is_syntactic_recurrence() && outs.empty(), - // f is FG(bool) - f.is({op::F, op::G}) && f[0][0].is_boolean() && ins.empty(), - f.is_syntactic_persistence() && outs.empty() - }; + if (!f.is({first, second}) || !f[0][0].is_boolean()) + return false; + auto f_bdd = formula_to_bdd(f[0][0], dict, &tmp); + if (bdd_exist(f_bdd, output_bdd_tmp) != bddtrue) + return false; + f_bdd = formula_to_bdd(formula::Not(f[0][0]), dict, &tmp); + bool res = (bdd_exist(f_bdd, output_bdd_tmp) == bddtrue); + return res; }; - // We need to detect - // GF(outs) ↔ recurrence(ins), - // recurrence(ins) ↔ GF(outs), - // FG(outs) ↔ persistence(ins), - // persistence(ins) ↔ FG(outs) - const auto left_properties = properties_vector(f[0], left_ins, left_outs), - right_properties = properties_vector(f[1], right_ins, right_outs); + + auto is_gf = [is_general](const formula& f) + { + return is_general(f, op::G, op::F); + }; + + auto is_fg = [is_general](const formula& f) + { + return is_general(f, op::F, op::G); + }; + + auto is_co_bu = [](const formula &f, const std::set& outs) + { + return outs.empty() && f.is_syntactic_obligation(); + }; + + auto is_buchi = [](const formula &f, const std::set& outs) + { + return outs.empty() && f.is_syntactic_recurrence(); + }; + + auto properties_vector = [&](const formula &f, + const std::set &outs) + { + auto is_lgf = is_gf(f); + auto is_lfg = is_fg(f); + return std::vector{ + // f is GF(ins + outs) <-> buchi(ins) + is_lgf, + is_buchi(f, outs), + // f is FG(ins + outs) <-> co-buchi(ins) + is_lfg, + is_co_bu(f, outs)}; + }; + + + auto [left_ins, left_outs] = form2props.aps_of(f_other[0]); + auto [right_ins, right_outs] = form2props.aps_of(f_other[1]); + + auto left_properties = properties_vector(f_other[0], left_outs); + auto right_properties = properties_vector(f_other[1], right_outs); + unsigned combin = -1U; for (unsigned i = 0; i < 4; ++i) - { - if (left_properties[i] && right_properties[(i%2) ? (i-1) : (i+1)]) + if (left_properties[i] && right_properties[(i % 2) ? (i - 1) : (i + 1)]) { combin = i; break; } - } + + // If we don't match, we don't know if (combin == -1U) return ret_sol_maybe(); - // left is the recurrence (resp. persistence) - // right is GF(outs) (resp. GF(outs)) - // If f[0] is GF or FG - f_left = f[(combin+1)%2]; - f_right = f[combin%2]; - if (!(combin%2)) + // We know that a strategy exists and we don't want to construct it. + if (!want_strategy) + return ret_sol_exists(nullptr); + + formula f_left = f_other[(combin + 1) % 2]; + formula f_right = f_other[combin % 2]; + if (!(combin % 2)) { std::swap(left_ins, right_ins); std::swap(left_outs, right_outs); } auto trans = create_translator(gi); - trans.set_type(combin < 2 ? postprocessor::Buchi - : postprocessor::CoBuchi); + trans.set_pref(postprocessor::Deterministic | postprocessor::Complete); + if (combin < 2) + trans.set_type(postprocessor::Buchi); + else + trans.set_type(postprocessor::CoBuchi); stopwatch sw; if (bv) sw.start(); auto res = trans.run(f_left); + if (!is_deterministic(res)) + return ret_sol_maybe(); + if (bv) { auto delta = sw.stop(); @@ -1312,79 +1378,76 @@ namespace spot if (vs) *vs << "tanslating formula done in " << delta << " seconds\n"; } - - if (!is_deterministic(res)) - return ret_sol_maybe(); - for (auto& out : right_outs) - res->register_ap(out.ap_name()); - - // The BDD that describes the content of the G in a conjunction - bdd g_bdd = bddtrue; - - // Convert the set of outputs to a BDD - bdd output_bdd = bddtrue; - for (auto &out : output_aps_set) - output_bdd &= bdd_ithvar(res->register_ap(out)); - - if (!f_g.is_tt()) - { - g_bdd = formula_to_bdd(f_g[0], res->get_dict(), res); - // If the content of G is not input-complete, a simple strategy for - // env is to play this missing value. - if (bdd_exist(g_bdd, output_bdd) != bddtrue) - { - return ret_sol_none(); - } - } - - // For the GF(outs) (resp. GF(outs)), the content and its negation can be - // converted to a BDD. - bdd right_bdd, neg_right_bdd; - if (combin < 2) - { - right_bdd = formula_to_bdd(f_right[0][0], res->get_dict(), res); - neg_right_bdd = bdd_not(right_bdd); - } - else - { - neg_right_bdd = formula_to_bdd(f_right[0][0], res->get_dict(), res); - right_bdd = bdd_not(neg_right_bdd); - } - // Monitor is a special case. As we color accepting transitions, if the - // acceptance is true, we cannot say that a transition is accepting if - // a color is seen. - const bool is_true = res->acc().is_t(); - scc_info si(res, scc_info_options::NONE); - for (auto& e : res->edges()) - { - // Here the part describing the outputs is based on the fact that - // they must be seen infinitely often. As these edges are seen - // finitely often, we can let the minimization choose the value. - if (si.scc_of(e.src) == si.scc_of(e.dst)) - { - if (e.acc || is_true) - e.cond &= right_bdd; - else - e.cond &= neg_right_bdd; - } - // g_bdd has to be true all the time. So we cannot only do it - // between SCCs. - e.cond &= g_bdd; - if (e.cond == bddfalse) - return ret_sol_maybe(); - // The recurrence is Büchi but the strategy is a monitor. We need - // to remove the color. - e.acc = {}; - } - - set_synthesis_outputs(res, output_bdd); - res->set_acceptance(acc_cond::acc_code::t()); - res->prop_complete(trival::maybe()); + + bdd output_bdd = bddtrue; + auto [is, os] = form2props.aps_of(f); + for (auto i : is) + res->register_ap(i); + for (auto o : os) + output_bdd &= bdd_ithvar(res->register_ap(o)); + + bdd right_bdd = formula_to_bdd(f_right[0][0], dict, res); + bdd neg_right_bdd = bdd_not(right_bdd); + bdd g_bdd = formula_to_bdd(f_g, dict, res); + + if (combin > 1) + std::swap(right_bdd, neg_right_bdd); + + right_bdd = bdd_and(right_bdd, g_bdd); + neg_right_bdd = bdd_and(neg_right_bdd, g_bdd); + + scc_info si(res, scc_info_options::NONE); + + bool is_true_acc = ((combin < 2) && res->acc().is_t()) + || ((combin > 1) && res->acc().is_f()); + auto prop_vector = propagate_marks_vector(res); + auto& ev = res->edge_vector(); + for (unsigned i = 1; i < ev.size(); ++i) + { + auto &edge = ev[i]; + if (si.scc_of(edge.src) == si.scc_of(edge.dst)) + { + if (edge.acc || is_true_acc) + edge.cond &= right_bdd; + // If we have a GF and an edge is not colored but prop_vector says + // that this edge could be colored, it means that we can do what we + // want + else if (!prop_vector[i]) + edge.cond &= neg_right_bdd; + else + edge.cond &= g_bdd; + } + else + edge.cond &= g_bdd; + edge.acc = {}; + } + res->set_acceptance(acc_cond::acc_code::t()); + res->set_named_prop("synthesis-outputs", new bdd(output_bdd)); + return ret_sol_exists(res); } - else - return ret_sol_maybe(); + else if (f_other.is_tt()) + { + if (!want_strategy) + return ret_sol_exists(nullptr); + auto res = make_twa_graph(dict); + + bdd output_bdd = bddtrue; + auto [ins_f, _] = form2props.aps_of(f_g); + for (auto &out : output_aps) + output_bdd &= bdd_ithvar(res->register_ap(out)); + + for (auto &in : ins_f) + res->register_ap(in); + + res->set_named_prop("synthesis-outputs", new bdd(output_bdd)); + bdd g_bdd = formula_to_bdd(f_g, dict, res); + res->new_state(); + res->new_edge(0, 0, g_bdd); + return ret_sol_exists(res); + } + return ret_sol_maybe(); } } // spot diff --git a/spot/twaalgos/synthesis.hh b/spot/twaalgos/synthesis.hh index a5fced429..115b8097c 100644 --- a/spot/twaalgos/synthesis.hh +++ b/spot/twaalgos/synthesis.hh @@ -241,10 +241,12 @@ namespace spot /// \param f The formula to synthesize a strategy for /// \param output_aps A vector with the name of all output properties. /// All APs not named in this vector are treated as inputs + /// \param want_strategy Set to false if we don't want to construct the + /// strategy but only test realizability. SPOT_API mealy_like try_create_direct_strategy(formula f, const std::vector& output_aps, - synthesis_info& gi); + synthesis_info& gi, bool want_strategy = false); /// \ingroup synthesis /// \brief Solve a game, and update synthesis_info diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 742f05a5e..f0cda98af 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -193,9 +193,7 @@ diff out exp cat >exp < GFb -tanslating formula done in X seconds direct strategy was found. -direct strat has 1 states, 2 edges and 0 colors EOF ltlsynt --ins='a' --outs='b' -f 'GFa <-> GFb' --verbose --realizability 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx @@ -214,9 +212,7 @@ diff outx exp cat >exp < GFe -tanslating formula done in X seconds direct strategy was found. -direct strat has 16 states, 81 edges and 0 colors EOF ltlsynt --ins='a,b,c,d' --outs='e' -f '(Fa & Fb & Fc & Fd) <-> GFe' \ --verbose --realizability --algo=lar 2> out @@ -526,15 +522,14 @@ REALIZABLE HOA: v1 States: 1 Start: 0 -AP: 3 "a" "b" "c" +AP: 3 "c" "a" "b" acc-name: all Acceptance: 0 t properties: trans-labels explicit-labels state-acc deterministic -controllable-AP: 2 +controllable-AP: 0 --BODY-- State: 0 -[!0&!2 | !1&!2] 0 -[0&1&2] 0 +[!0&!1 | !0&!2 | 0&1&2] 0 --END-- EOF ltlsynt --ins=a,b -f 'G (a & b <=> c)' >stdout @@ -563,15 +558,8 @@ direct strategy was found. direct strat has 1 states, 2 edges and 0 colors simplification took X seconds trying to create strategy directly for Gc -direct strategy might exist but was not found. -translating formula done in X seconds -automaton has 1 states and 1 colors -LAR construction done in X seconds -DPA has 1 states, 0 colors -split inputs and outputs done in X seconds -automaton has 2 states -solving game with acceptance: Streett 1 -game solved in X seconds +direct strategy was found. +direct strat has 1 states, 1 edges and 0 colors simplification took X seconds EOF ltlsynt -f '(GFa <-> GFb) && (Gc)' --outs=b,c --verbose 2> out @@ -599,7 +587,6 @@ done # # impossible to find a strategy. cat >exp < GFa) & G(a & c) -tanslating formula done in X seconds no strategy exists. EOF ltlsynt -f '(GFb <-> GFa) && G(a&c)' --outs=b,c --verbose\ @@ -687,26 +674,12 @@ diff outx exp # Here, G!(!x | !y) should be Gx & Gy cat >exp <exp < out || true sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx @@ -734,15 +699,7 @@ diff outx exp # Here, G!(a -> b) should be G(a) & G(!b) cat >exp < b)' --outs=b --decompose=yes --aiger\ --verbose 2> out || true @@ -815,15 +772,7 @@ diff outx exp # Here, !(F(a | b)) should be G!a & G!b cat >exp < out || true From 7abcf4e38bfb97c884adf2e6e3977432261314c9 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Tue, 22 Mar 2022 15:08:40 +0100 Subject: [PATCH 022/606] ltlsynt: create a "bypass" option * bin/ltlsynt.cc: here. * tests/core/ltlsynt.test: add tests --- bin/ltlsynt.cc | 23 ++++++++++++++++++++++- tests/core/ltlsynt.test | 38 ++++++++++++++++++++++++++++++++++++-- 2 files changed, 58 insertions(+), 3 deletions(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 3d9f46900..0e5d765a1 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -44,6 +44,7 @@ enum { OPT_ALGO = 256, + OPT_BYPASS, OPT_CSV, OPT_DECOMPOSE, OPT_INPUT, @@ -81,6 +82,9 @@ static const argp_option options[] = " \"acd\": translate to a deterministic automaton with arbitrary" " acceptance condition, then use ACD to turn to parity," " then split.\n", 0 }, + { "bypass", OPT_BYPASS, "yes|no", 0, + "whether to try to avoid to construct a parity game " + "(enabled by default)", 0}, { "decompose", OPT_DECOMPOSE, "yes|no", 0, "whether to decompose the specification as multiple output-disjoint " "problems to solve independently (enabled by default)", 0 }, @@ -182,6 +186,20 @@ static spot::synthesis_info::algo const algo_types[] = }; ARGMATCH_VERIFY(algo_args, algo_types); +static const char* const bypass_args[] = + { + "yes", "true", "enabled", "1", + "no", "false", "disabled", "0", + nullptr + }; +static bool bypass_values[] = + { + true, true, true, true, + false, false, false, false, + }; +ARGMATCH_VERIFY(bypass_args, bypass_values); +bool opt_bypass = true; + static const char* const decompose_args[] = { "yes", "true", "enabled", "1", @@ -374,7 +392,7 @@ namespace }; // If we want to print a game, // we never use the direct approach - if (!want_game) + if (!want_game && opt_bypass) m_like = spot::try_create_direct_strategy(*sub_f, *sub_o, *gi, !opt_real); @@ -638,6 +656,9 @@ parse_opt(int key, char *arg, struct argp_state *) case OPT_ALGO: gi->s = XARGMATCH("--algo", arg, algo_args, algo_types); break; + case OPT_BYPASS: + opt_bypass = XARGMATCH("--bypass", arg, bypass_args, bypass_values); + break; case OPT_CSV: opt_csv = arg ? arg : "-"; if (not gi->bv) diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index f0cda98af..60b96bb46 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -566,7 +566,7 @@ ltlsynt -f '(GFa <-> GFb) && (Gc)' --outs=b,c --verbose 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp -# Try to find a direct strategy for (GFa <-> GFb) & Gc. THe order should not +# Try to find a direct strategy for (GFa <-> GFb) & Gc. The order should not # impact the result for f in "(GFa <-> GFb) & Gc" "(GFb <-> GFa) & Gc" \ "Gc & (GFa <-> GFb)" "Gc & (GFb <-> GFa)" @@ -594,7 +594,7 @@ ltlsynt -f '(GFb <-> GFa) && G(a&c)' --outs=b,c --verbose\ sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp -# # Ltlsynt should be able to create a strategy when the last G +# # ltlsynt should be able to create a strategy when the last G # is input-complete. cat >exp < GFa) & G((a & c) | (!a & !c)) @@ -796,3 +796,37 @@ LTL='(((((G (((((((g_0) && (G (! (r_0)))) -> (F (! (g_0)))) && (((g_0) && OUT='g_0, g_1' ltlsynt --outs="$OUT" -f "$LTL" --aiger=both --algo=acd | grep "aag 8 2 2 2 4" ltlsynt --outs="$OUT" -f "$LTL" --aiger=both --algo=lar | grep "aag 34 2 3 2 29" + +ltlsynt -f 'G(c) & (G(a) <-> GFb)' --outs=b,c --decompose=yes\ + --verbose --realizability 2> out +cat >exp < GFb +direct strategy was found. +EOF +diff out exp + +ltlsynt -f 'G(c) & (G(a) <-> GFb)' --outs=b,c --decompose=yes\ + --verbose --realizability --bypass=no 2> out +cat >exp < outx +diff outx exp From 46f3f5aaf42d96cc68826758837bc3e4a6eef5b2 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 25 Mar 2022 09:25:04 +0100 Subject: [PATCH 023/606] * doc/org/tut40.org: Clarify, as suggested by a CAV'22 reviewer. --- doc/org/tut40.org | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/org/tut40.org b/doc/org/tut40.org index b68efe558..8d9b004da 100644 --- a/doc/org/tut40.org +++ b/doc/org/tut40.org @@ -144,9 +144,11 @@ states. We now look at how to create such a game in Python. -Essentially, a game in Spot is just an automaton equiped with a -special property to indicate the owner of each states. So it can be -created using the usual interface: +Essentially, a game in Spot is just an automaton equiped with a [[file:concepts.org::#named-properties][named +property "state-player"]] that hold a Boolean vector indicating the +owner of each state. The game can be created using the usual +automaton interface, and the owners are set by calling +=game.set_state_players()= with a vector of Boolean at the very end. #+NAME: build_game #+BEGIN_SRC python :exports code @@ -173,7 +175,7 @@ created using the usual interface: todo = [] # Create the state (i, j) for a player if it does not exist yet and - # returns the state's number in the game. + # return the state's number in the game. def get_game_state(player, i, j): orig_state = s_orig_states if player else d_orig_states if (i, j) in orig_state: From 9c6a09890ea2820771bc08df96d4d1fca8d9dc75 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sat, 26 Mar 2022 15:57:56 +0100 Subject: [PATCH 024/606] parsetl: speedup parsing of n-ary operators with many operands Issue #500, reported by Yann Thierry-Mieg. * spot/parsetl/parsetl.yy, spot/parsetl/scantl.ll: Use variant to store a new pnode objects that delays the construction of n-ary operators. * spot/parsetl/Makefile.am: Do not distribute stack.hh anymore. * spot/tl/formula.cc: Fix detection of overflow in Star and FStar. * HACKING: Update Bison requirements to 3.3. * tests/core/500.test: New test case. * tests/Makefile.am: Add it. * tests/core/ltl2tgba2.test, tests/core/ltlsynt.test, tests/core/tostring.test: Adjust to new expected order. * NEWS: Mention the change. --- HACKING | 2 +- NEWS | 9 + spot/parsetl/Makefile.am | 3 +- spot/parsetl/parsetl.yy | 416 +++++++++++++++++++++++++------------- spot/parsetl/scantl.ll | 55 ++--- spot/tl/formula.cc | 12 +- tests/Makefile.am | 1 + tests/core/500.test | 43 ++++ tests/core/ltl2tgba2.test | 6 +- tests/core/ltlsynt.test | 2 +- tests/core/tostring.test | 6 +- 11 files changed, 374 insertions(+), 181 deletions(-) create mode 100755 tests/core/500.test diff --git a/HACKING b/HACKING index c6e127a70..de461376b 100644 --- a/HACKING +++ b/HACKING @@ -25,7 +25,7 @@ since the generated files they produce are distributed.) GNU Automake >= 1.11 GNU Libtool >= 2.4 GNU Flex >= 2.6 - GNU Bison >= 3.0 + GNU Bison >= 3.3 GNU Emacs (preferably >= 24 but it may work with older versions) org-mode >= 9.1 (the version that comes bundled with your emacs version is likely out-of-date; but distribution often have diff --git a/NEWS b/NEWS index 345ec7fdd..81aaf9a22 100644 --- a/NEWS +++ b/NEWS @@ -66,6 +66,15 @@ New in spot 2.10.4.dev (net yet released) - purge_dead_states() will now also remove edges labeled by false (except self-loops). + - When parsing formulas with a huge number of operands for an n-ary + operator (for instance 'p1 | p2 | ... | p1000') the LTL parser + would construct that formula two operand at a time, and the + formula constructor for that operator would be responsible for + inlining, sorting, deduplicating, ... all operands at each step. + This resulted in a worst-than-quadratic slowdown. This is now + averted in the parser by delaying the construction of such n-ary + nodes until all children are known. + Bugs fixed: - reduce_parity() produced incorrect results when applied to diff --git a/spot/parsetl/Makefile.am b/spot/parsetl/Makefile.am index d98c9ebab..f218ca067 100644 --- a/spot/parsetl/Makefile.am +++ b/spot/parsetl/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2008-2015, 2018 Laboratoire de Recherche et +## Copyright (C) 2008-2015, 2018, 2022 Laboratoire de Recherche et ## Développement de l'Epita (LRDE). ## Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris ## 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), @@ -30,7 +30,6 @@ noinst_LTLIBRARIES = libparsetl.la PARSETL_YY = parsetl.yy FROM_PARSETL_YY_MAIN = parsetl.cc FROM_PARSETL_YY_OTHERS = \ - stack.hh \ parsetl.hh FROM_PARSETL_YY = $(FROM_PARSETL_YY_MAIN) $(FROM_PARSETL_YY_OTHERS) diff --git a/spot/parsetl/parsetl.yy b/spot/parsetl/parsetl.yy index bbcdedcb5..e6defffb3 100644 --- a/spot/parsetl/parsetl.yy +++ b/spot/parsetl/parsetl.yy @@ -1,7 +1,6 @@ /* -*- coding: utf-8 -*- - -** Copyright (C) 2009-2019, 2021 Laboratoire de Recherche et Développement -** de l'Epita (LRDE). +** Copyright (C) 2009-2019, 2021, 2022 Laboratoire de Recherche et +** Développement de l'Epita (LRDE). ** Copyright (C) 2003-2006 Laboratoire d'Informatique de Paris 6 ** (LIP6), département Systèmes Répartis Coopératifs (SRC), Université ** Pierre et Marie Curie. @@ -21,11 +20,13 @@ ** You should have received a copy of the GNU General Public License ** along with this program. If not, see . */ -%require "3.0" +%require "3.3" %language "C++" %locations %defines %define api.prefix {tlyy} +%define api.value.type variant +%define api.value.automove true %debug %define parse.error verbose %expect 0 @@ -37,25 +38,164 @@ #include "config.h" #include #include +#include #include #include #include struct minmax_t { unsigned min, max; }; + + // pnode (parsing node) is simular to fnode (formula node) except + // that n-ary operators will delay their construction until all + // children are known; this is a hack to speedup the parsing, + // because n-ary operator usually do a lot of work on construction + // (sorting all children if the operator is commutative, removing + // duplicates if applicable, etc.). Building n-ary nodes by + // repeatedly calling the binary constructor as we did in the past + // has a prohibitive cost. See issue #500. + + struct nary + { + std::vector children; + spot::op kind; + }; + + struct pnode + { + // Hold either a constructed formula, or an n-ary operator that we + // will construct only when it is combined with a different + // operator. + std::variant data; + // Record whether this pnode has been transformed into a fnode( or + // moved to another pnode). If that occurred, the ownership of + // any fnode we store has been transfered to the constructed fnode + // (or to the other pnode), and our destructor has nothing to do. + // This is the usual case while parsing a formula without error. + // However during error recovering, the parser may have to discard + // unused pnode, in which case we have to remember to free fnode + // during destruction. + // + // We have to track this used status because pnode are destructed + // whenever the parser pops a token, and as of Bison 3.7.6, the + // handling of "%destructor" is broken when + // "%define api.value.type variant" is used. See + // https://lists.gnu.org/archive/html/bug-bison/2022-03/msg00000.html + bool used = false; + + pnode() + : data(nullptr) + { + } + + pnode(const spot::fnode* ltl) + : data(ltl) + { + } + + // We only support move construction. + pnode(const pnode& other) = delete; + pnode& operator=(const pnode& other) = delete; + + pnode(pnode&& other) + : data(std::move(other.data)) + { + other.used = true; + } + + pnode& operator=(pnode&& other) + { + data = std::move(other.data); + other.used = true; + return *this; + } + + ~pnode() + { + if (used) + return; + if (auto* n = std::get_if(&data)) + { + for (auto f: n->children) + f->destroy(); + } + else + { + auto* f = std::get(data); + // The only case where we expect f to be nullptr, is if + // parse_ap() return nullptr: then $$ is unset when YYERROR + // is called. + if (f) + f->destroy(); + } + } + + // Create a new n-ary node from left and right. + // This will empty left and right so that their + // destructor do nothing. + pnode(spot::op o, pnode&& left, pnode&& right) + : data(nary{}) + { + nary& n = std::get(data); + n.kind = o; + if (auto* nleft = std::get_if(&left.data); + nleft && nleft->kind == o) + std::swap(n.children, nleft->children); + else + n.children.push_back(left); + if (auto* nright = std::get_if(&right.data); + nright && nright->kind == o) + { + auto& rch = nright->children; + n.children.insert(n.children.end(), rch.begin(), rch.end()); + rch.clear(); + } + else + { + n.children.push_back(right); + } + } + + operator const spot::fnode*() + { + used = true; + if (auto* n = std::get_if(&data)) + { + return spot::fnode::multop(n->kind, n->children); + } + else + { + return std::get(data); + } + } + + // Convert to a temporary formula, for printing, do not mark as + // used. + const spot::formula tmp() const + { + const spot::fnode* f; + if (auto* n = std::get_if(&data)) + { + for (auto c: n->children) + c->clone(); + f = spot::fnode::multop(n->kind, n->children); + } + else + { + f = std::get(data); + assert(f != nullptr); + f->clone(); + } + return spot::formula(f); + } + }; + + } %parse-param {spot::parse_error_list &error_list} %parse-param {spot::environment &parse_environment} %parse-param {spot::formula &result} -%union -{ - std::string* str; - const spot::fnode* ltl; - unsigned num; - minmax_t minmax; -} - %code { /* parsetl.hh and parsedecl.hh include each other recursively. We mut ensure that YYSTYPE is declared (by the above %union) @@ -84,28 +224,20 @@ using namespace spot; } \ while (0); -// right is missing, so complain and use false. -#define missing_right_binop_hard(res, left, op, str) \ - do \ - { \ - left->destroy(); \ - missing_right_op(res, op, str); \ - } \ - while (0); - - static bool + static const fnode* sere_ensure_bool(const fnode* f, const spot::location& loc, const char* oper, spot::parse_error_list& error_list) { if (f->is_boolean()) - return true; + return f; + f->destroy(); std::string s; s.reserve(80); s = "not a Boolean expression: in a SERE "; s += oper; s += " can only be applied to a Boolean expression"; error_list.emplace_back(loc, s); - return false; + return nullptr; } static const fnode* @@ -196,9 +328,9 @@ using namespace spot; %token START_SERE "SERE start marker" %token START_BOOL "BOOLEAN start marker" %token PAR_OPEN "opening parenthesis" PAR_CLOSE "closing parenthesis" -%token PAR_BLOCK "(...) block" -%token BRA_BLOCK "{...} block" -%token BRA_BANG_BLOCK "{...}! block" +%token PAR_BLOCK "(...) block" +%token BRA_BLOCK "{...} block" +%token BRA_BANG_BLOCK "{...}! block" %token BRACE_OPEN "opening brace" BRACE_CLOSE "closing brace" %token BRACE_BANG_CLOSE "closing brace-bang" %token OP_OR "or operator" OP_XOR "xor operator" @@ -221,7 +353,7 @@ using namespace spot; %token OP_GOTO_OPEN "opening bracket for goto operator" %token OP_SQBKT_CLOSE "closing bracket" %token OP_SQBKT_STRONG_CLOSE "closing !]" -%token OP_SQBKT_NUM "number for square bracket operator" +%token OP_SQBKT_NUM "number for square bracket operator" %token OP_UNBOUNDED "unbounded mark" %token OP_SQBKT_SEP "separator for square bracket operator" %token OP_UCONCAT "universal concat operator" @@ -229,12 +361,12 @@ using namespace spot; %token OP_UCONCAT_NONO "universal non-overlapping concat operator" %token OP_ECONCAT_NONO "existential non-overlapping concat operator" %token OP_FIRST_MATCH "first_match" -%token ATOMIC_PROP "atomic proposition" +%token ATOMIC_PROP "atomic proposition" %token OP_CONCAT "concat operator" OP_FUSION "fusion operator" %token CONST_TRUE "constant true" CONST_FALSE "constant false" %token END_OF_INPUT "end of formula" %token OP_POST_NEG "negative suffix" OP_POST_POS "positive suffix" -%token OP_DELAY_N "SVA delay operator" +%token OP_DELAY_N "SVA delay operator" %token OP_DELAY_OPEN "opening bracket for SVA delay operator" %token OP_DELAY_PLUS "##[+] operator" %token OP_DELAY_STAR "##[*] operator" @@ -276,19 +408,16 @@ using namespace spot; need any precedence). */ %precedence OP_NOT -%type subformula atomprop booleanatom sere lbtformula boolformula -%type bracedsere parenthesedsubformula -%type starargs fstarargs equalargs sqbracketargs gotoargs delayargs -%type sqbkt_num +%type subformula atomprop booleanatom sere lbtformula +%type boolformula bracedsere parenthesedsubformula +%type starargs fstarargs equalargs sqbracketargs gotoargs delayargs +%type sqbkt_num -%destructor { delete $$; } -%destructor { $$->destroy(); } - -%printer { debug_stream() << *$$; } -%printer { print_psl(debug_stream(), formula($$->clone())); } -%printer { print_sere(debug_stream(), formula($$->clone())); } sere bracedsere -%printer { debug_stream() << $$; } -%printer { debug_stream() << $$.min << ".." << $$.max; } +%printer { debug_stream() << $$; } +%printer { print_psl(debug_stream(), $$.tmp()); } +%printer { print_sere(debug_stream(), $$.tmp()); } sere bracedsere +%printer { debug_stream() << $$; } +%printer { debug_stream() << $$.min << ".." << $$.max; } %% result: START_LTL subformula END_OF_INPUT @@ -380,18 +509,19 @@ error_opt: %empty sqbkt_num: OP_SQBKT_NUM { - if ($1 >= fnode::unbounded()) + auto n = $1; + if (n >= fnode::unbounded()) { auto max = fnode::unbounded() - 1; std::ostringstream s; - s << $1 << " exceeds maximum supported repetition (" + s << n << " exceeds maximum supported repetition (" << max << ")"; error_list.emplace_back(@1, s.str()); $$ = max; } else { - $$ = $1; + $$ = n; } } @@ -484,10 +614,10 @@ delayargs: OP_DELAY_OPEN sqbracketargs atomprop: ATOMIC_PROP { - $$ = parse_ap(*$1, @1, parse_environment, error_list); - delete $1; - if (!$$) + auto* f = parse_ap($1, @1, parse_environment, error_list); + if (!f) YYERROR; + $$ = f; } booleanatom: atomprop @@ -504,13 +634,12 @@ booleanatom: atomprop sere: booleanatom | OP_NOT sere { - if (sere_ensure_bool($2, @2, "`!'", error_list)) + if (auto f = sere_ensure_bool($2, @2, "`!'", error_list)) { - $$ = fnode::unop(op::Not, $2); + $$ = fnode::unop(op::Not, f); } else { - $2->destroy(); $$ = error_false_block(@$, error_list); } } @@ -518,9 +647,8 @@ sere: booleanatom | PAR_BLOCK { $$ = - try_recursive_parse(*$1, @1, parse_environment, + try_recursive_parse($1, @1, parse_environment, debug_level(), parser_sere, error_list); - delete $1; if (!$$) YYERROR; } @@ -543,134 +671,142 @@ sere: booleanatom $$ = fnode::ff(); } | sere OP_AND sere - { $$ = fnode::multop(op::AndRat, {$1, $3}); } + { $$ = pnode(op::AndRat, $1, $3); } | sere OP_AND error { missing_right_binop($$, $1, @2, "length-matching and operator"); } | sere OP_SHORT_AND sere - { $$ = fnode::multop(op::AndNLM, {$1, $3}); } + { $$ = pnode(op::AndNLM, $1, $3); } | sere OP_SHORT_AND error { missing_right_binop($$, $1, @2, "non-length-matching and operator"); } | sere OP_OR sere - { $$ = fnode::multop(op::OrRat, {$1, $3}); } + { $$ = pnode(op::OrRat, $1, $3); } | sere OP_OR error { missing_right_binop($$, $1, @2, "or operator"); } | sere OP_CONCAT sere - { $$ = fnode::multop(op::Concat, {$1, $3}); } + { $$ = pnode(op::Concat, $1, $3); } | sere OP_CONCAT error { missing_right_binop($$, $1, @2, "concat operator"); } | sere OP_FUSION sere - { $$ = fnode::multop(op::Fusion, {$1, $3}); } + { $$ = pnode(op::Fusion, $1, $3); } | sere OP_FUSION error { missing_right_binop($$, $1, @2, "fusion operator"); } | OP_DELAY_N sere - { $$ = formula::sugar_delay(formula($2), $1, $1).to_node_(); } + { unsigned n = $1; $$ = formula::sugar_delay(formula($2), n, n).to_node_(); } | OP_DELAY_N error { missing_right_binop($$, fnode::tt(), @1, "SVA delay operator"); } | sere OP_DELAY_N sere - { $$ = formula::sugar_delay(formula($1), formula($3), - $2, $2).to_node_(); } + { unsigned n = $2; + $$ = formula::sugar_delay(formula($1), formula($3), + n, n).to_node_(); } | sere OP_DELAY_N error { missing_right_binop($$, $1, @2, "SVA delay operator"); } | delayargs sere %prec OP_DELAY_OPEN { - if ($1.max < $1.min) + auto [min, max] = $1; + if (max < min) { error_list.emplace_back(@1, "reversed range"); - std::swap($1.max, $1.min); + std::swap(max, min); } $$ = formula::sugar_delay(formula($2), - $1.min, $1.max).to_node_(); + min, max).to_node_(); } | delayargs error { missing_right_binop($$, fnode::tt(), @1, "SVA delay operator"); } | sere delayargs sere %prec OP_DELAY_OPEN { - if ($2.max < $2.min) + auto [min, max] = $2; + if (max < min) { error_list.emplace_back(@1, "reversed range"); - std::swap($2.max, $2.min); + std::swap(max, min); } $$ = formula::sugar_delay(formula($1), formula($3), - $2.min, $2.max).to_node_(); + min, max).to_node_(); } | sere delayargs error { missing_right_binop($$, $1, @2, "SVA delay operator"); } | starargs { - if ($1.max < $1.min) + auto [min, max] = $1; + if (max < min) { error_list.emplace_back(@1, "reversed range"); - std::swap($1.max, $1.min); + std::swap(max, min); } - $$ = fnode::bunop(op::Star, fnode::tt(), $1.min, $1.max); + $$ = fnode::bunop(op::Star, fnode::tt(), min, max); } | sere starargs { - if ($2.max < $2.min) + auto [min, max] = $2; + if (max < min) { error_list.emplace_back(@2, "reversed range"); - std::swap($2.max, $2.min); + std::swap(max, min); } - $$ = fnode::bunop(op::Star, $1, $2.min, $2.max); + $$ = fnode::bunop(op::Star, $1, min, max); } | sere fstarargs { - if ($2.max < $2.min) + auto [min, max] = $2; + if (max < min) { error_list.emplace_back(@2, "reversed range"); - std::swap($2.max, $2.min); + std::swap(max, min); } - $$ = fnode::bunop(op::FStar, $1, $2.min, $2.max); + $$ = fnode::bunop(op::FStar, $1, min, max); } | sere equalargs { - if ($2.max < $2.min) + auto [min, max] = $2; + if (max < min) { error_list.emplace_back(@2, "reversed range"); - std::swap($2.max, $2.min); + std::swap(max, min); } - if (sere_ensure_bool($1, @1, "[=...]", error_list)) + if (auto f = sere_ensure_bool($1, @1, "[=...]", error_list)) { - $$ = formula::sugar_equal(formula($1), - $2.min, $2.max).to_node_(); + $$ = formula::sugar_equal(formula(f), + min, max).to_node_(); } else { - $1->destroy(); $$ = error_false_block(@$, error_list); } } | sere gotoargs { - if ($2.max < $2.min) + auto [min, max] = $2; + if (max < min) { error_list.emplace_back(@2, "reversed range"); - std::swap($2.max, $2.min); + std::swap(max, min); } - if (sere_ensure_bool($1, @1, "[->...]", error_list)) + if (auto f = sere_ensure_bool($1, @1, "[->...]", error_list)) { - $$ = formula::sugar_goto(formula($1), - $2.min, $2.max).to_node_(); + $$ = formula::sugar_goto(formula(f), min, max).to_node_(); } else { - $1->destroy(); $$ = error_false_block(@$, error_list); } } | sere OP_XOR sere { - if (sere_ensure_bool($1, @1, "`^'", error_list) - && sere_ensure_bool($3, @3, "`^'", error_list)) + auto left = sere_ensure_bool($1, @1, "`^'", error_list); + auto right = sere_ensure_bool($3, @3, "`^'", error_list); + if (left && right) { - $$ = fnode::binop(op::Xor, $1, $3); + $$ = fnode::binop(op::Xor, left, right); } else { - $1->destroy(); - $3->destroy(); + if (left) + left->destroy(); + else if (right) + right->destroy(); $$ = error_false_block(@$, error_list); } } @@ -678,14 +814,13 @@ sere: booleanatom { missing_right_binop($$, $1, @2, "xor operator"); } | sere OP_IMPLIES sere { - if (sere_ensure_bool($1, @1, "`->'", error_list)) + auto left = sere_ensure_bool($1, @1, "`->'", error_list); + if (left) { - $$ = fnode::binop(op::Implies, $1, $3); + $$ = fnode::binop(op::Implies, left, $3); } else { - $1->destroy(); - $3->destroy(); $$ = error_false_block(@$, error_list); } } @@ -693,15 +828,18 @@ sere: booleanatom { missing_right_binop($$, $1, @2, "implication operator"); } | sere OP_EQUIV sere { - if (sere_ensure_bool($1, @1, "`<->'", error_list) - && sere_ensure_bool($3, @3, "`<->'", error_list)) + auto left = sere_ensure_bool($1, @1, "`<->'", error_list); + auto right = sere_ensure_bool($3, @3, "`<->'", error_list); + if (left && right) { - $$ = fnode::binop(op::Equiv, $1, $3); + $$ = fnode::binop(op::Equiv, left, right); } else { - $1->destroy(); - $3->destroy(); + if (left) + left->destroy(); + else if (right) + right->destroy(); $$ = error_false_block(@$, error_list); } } @@ -739,19 +877,17 @@ bracedsere: BRACE_OPEN sere BRACE_CLOSE } | BRA_BLOCK { - $$ = try_recursive_parse(*$1, @1, parse_environment, + $$ = try_recursive_parse($1, @1, parse_environment, debug_level(), parser_sere, error_list); - delete $1; if (!$$) YYERROR; } parenthesedsubformula: PAR_BLOCK { - $$ = try_recursive_parse(*$1, @1, parse_environment, + $$ = try_recursive_parse($1, @1, parse_environment, debug_level(), parser_ltl, error_list); - delete $1; if (!$$) YYERROR; } @@ -786,10 +922,9 @@ parenthesedsubformula: PAR_BLOCK boolformula: booleanatom | PAR_BLOCK { - $$ = try_recursive_parse(*$1, @1, parse_environment, + $$ = try_recursive_parse($1, @1, parse_environment, debug_level(), parser_bool, error_list); - delete $1; if (!$$) YYERROR; } @@ -821,19 +956,19 @@ boolformula: booleanatom $$ = fnode::ff(); } | boolformula OP_AND boolformula - { $$ = fnode::multop(op::And, {$1, $3}); } + { $$ = pnode(op::And, $1, $3); } | boolformula OP_AND error { missing_right_binop($$, $1, @2, "and operator"); } | boolformula OP_SHORT_AND boolformula - { $$ = fnode::multop(op::And, {$1, $3}); } + { $$ = pnode(op::And, $1, $3); } | boolformula OP_SHORT_AND error { missing_right_binop($$, $1, @2, "and operator"); } | boolformula OP_STAR boolformula - { $$ = fnode::multop(op::And, {$1, $3}); } + { $$ = pnode(op::And, $1, $3); } | boolformula OP_STAR error { missing_right_binop($$, $1, @2, "and operator"); } | boolformula OP_OR boolformula - { $$ = fnode::multop(op::Or, {$1, $3}); } + { $$ = pnode(op::Or, $1, $3); } | boolformula OP_OR error { missing_right_binop($$, $1, @2, "or operator"); } | boolformula OP_XOR boolformula @@ -856,19 +991,19 @@ boolformula: booleanatom subformula: booleanatom | parenthesedsubformula | subformula OP_AND subformula - { $$ = fnode::multop(op::And, {$1, $3}); } + { $$ = pnode(op::And, $1, $3); } | subformula OP_AND error { missing_right_binop($$, $1, @2, "and operator"); } | subformula OP_SHORT_AND subformula - { $$ = fnode::multop(op::And, {$1, $3}); } + { $$ = pnode(op::And, $1, $3); } | subformula OP_SHORT_AND error { missing_right_binop($$, $1, @2, "and operator"); } | subformula OP_STAR subformula - { $$ = fnode::multop(op::And, {$1, $3}); } + { $$ = pnode(op::And, $1, $3); } | subformula OP_STAR error { missing_right_binop($$, $1, @2, "and operator"); } | subformula OP_OR subformula - { $$ = fnode::multop(op::Or, {$1, $3}); } + { $$ = pnode(op::Or, $1, $3); } | subformula OP_OR error { missing_right_binop($$, $1, @2, "or operator"); } | subformula OP_XOR subformula @@ -904,13 +1039,15 @@ subformula: booleanatom | OP_F error { missing_right_op($$, @1, "sometimes operator"); } | OP_FREP sqbkt_num OP_SQBKT_CLOSE subformula %prec OP_FREP - { $$ = fnode::nested_unop_range(op::X, op::Or, $2, $2, $4); + { unsigned n = $2; + $$ = fnode::nested_unop_range(op::X, op::Or, n, n, $4); error_list.emplace_back(@1 + @3, "F[n:m] expects two parameters"); } | OP_FREP sqbkt_num OP_SQBKT_STRONG_CLOSE subformula %prec OP_FREP - { $$ = fnode::nested_unop_range(op::strong_X, op::Or, $2, $2, $4); + { unsigned n = $2; + $$ = fnode::nested_unop_range(op::strong_X, op::Or, n, n, $4); error_list.emplace_back(@1 + @3, "F[n:m!] expects two parameters"); } @@ -966,14 +1103,16 @@ subformula: booleanatom { $$ = fnode::nested_unop_range(op::strong_X, op::And, $2, fnode::unbounded(), $5); } | OP_GREP sqbkt_num OP_SQBKT_CLOSE subformula %prec OP_GREP - { $$ = fnode::nested_unop_range(op::X, op::And, $2, $2, $4); + { unsigned n = $2; + $$ = fnode::nested_unop_range(op::X, op::And, n, n, $4); error_list.emplace_back(@1 + @3, "G[n:m] expects two parameters"); } | OP_GREP sqbkt_num OP_SQBKT_STRONG_CLOSE subformula %prec OP_GREP - { $$ = fnode::nested_unop_range(op::strong_X, op::And, - $2, $2, $4); + { unsigned n = $2; + $$ = fnode::nested_unop_range(op::strong_X, op::And, + n, n, $4); error_list.emplace_back(@1 + @3, "G[n:m!] expects two parameters"); } @@ -1003,7 +1142,8 @@ subformula: booleanatom | OP_STRONG_X error { missing_right_op($$, @1, "strong next operator"); } | OP_XREP sqbkt_num OP_SQBKT_CLOSE subformula %prec OP_XREP - { $$ = fnode::nested_unop_range(op::X, op::Or, $2, $2, $4); } + { unsigned n = $2; + $$ = fnode::nested_unop_range(op::X, op::Or, n, n, $4); } | OP_XREP sqbkt_num OP_SQBKT_CLOSE error { missing_right_op($$, @1 + @3, "X[.] operator"); } | OP_XREP error OP_SQBKT_CLOSE subformula %prec OP_XREP @@ -1013,8 +1153,9 @@ subformula: booleanatom { $$ = fnode::unop(op::strong_X, $3); } | OP_XREP sqbkt_num OP_SQBKT_STRONG_CLOSE subformula %prec OP_XREP - { $$ = fnode::nested_unop_range(op::strong_X, - op::Or, $2, $2, $4); } + { unsigned n = $2; + $$ = fnode::nested_unop_range(op::strong_X, + op::Or, n, n, $4); } | OP_XREP error OP_SQBKT_STRONG_CLOSE subformula %prec OP_XREP { error_list.emplace_back(@$, "treating this X[.!] as a simple X[!]"); $$ = fnode::unop(op::strong_X, $4); } @@ -1032,41 +1173,40 @@ subformula: booleanatom | bracedsere parenthesedsubformula { $$ = fnode::binop(op::UConcat, $1, $2); } | bracedsere OP_UCONCAT error - { missing_right_binop_hard($$, $1, @2, - "universal overlapping concat operator"); } + { missing_right_op($$, @2, + "universal overlapping concat operator"); } | bracedsere OP_ECONCAT subformula { $$ = fnode::binop(op::EConcat, $1, $3); } | bracedsere OP_ECONCAT error - { missing_right_binop_hard($$, $1, @2, - "existential overlapping concat operator"); + { missing_right_op($$, @2, + "existential overlapping concat operator"); } | bracedsere OP_UCONCAT_NONO subformula /* {SERE}[]=>EXP = {SERE;1}[]->EXP */ { $$ = fnode::binop(op::UConcat, - fnode::multop(op::Concat, {$1, fnode::tt()}), + pnode(op::Concat, $1, fnode::tt()), $3); } | bracedsere OP_UCONCAT_NONO error - { missing_right_binop_hard($$, $1, @2, - "universal non-overlapping concat operator"); + { missing_right_op($$, @2, + "universal non-overlapping concat operator"); } | bracedsere OP_ECONCAT_NONO subformula /* {SERE}<>=>EXP = {SERE;1}<>->EXP */ { $$ = fnode::binop(op::EConcat, - fnode::multop(op::Concat, {$1, fnode::tt()}), + pnode(op::Concat, $1, fnode::tt()), $3); } | bracedsere OP_ECONCAT_NONO error - { missing_right_binop_hard($$, $1, @2, - "existential non-overlapping concat operator"); + { missing_right_op($$, @2, + "existential non-overlapping concat operator"); } | BRACE_OPEN sere BRACE_BANG_CLOSE /* {SERE}! = {SERE} <>-> 1 */ { $$ = fnode::binop(op::EConcat, $2, fnode::tt()); } | BRA_BANG_BLOCK { - $$ = try_recursive_parse(*$1, @1, parse_environment, + $$ = try_recursive_parse($1, @1, parse_environment, debug_level(), parser_sere, error_list); - delete $1; if (!$$) YYERROR; $$ = fnode::binop(op::EConcat, $$, fnode::tt()); @@ -1076,9 +1216,9 @@ lbtformula: atomprop | '!' lbtformula { $$ = fnode::unop(op::Not, $2); } | '&' lbtformula lbtformula - { $$ = fnode::multop(op::And, {$2, $3}); } + { $$ = pnode(op::And, $2, $3); } | '|' lbtformula lbtformula - { $$ = fnode::multop(op::Or, {$2, $3}); } + { $$ = pnode(op::Or, $2, $3); } | '^' lbtformula lbtformula { $$ = fnode::binop(op::Xor, $2, $3); } | 'i' lbtformula lbtformula diff --git a/spot/parsetl/scantl.ll b/spot/parsetl/scantl.ll index 34fbfef32..871f1300d 100644 --- a/spot/parsetl/scantl.ll +++ b/spot/parsetl/scantl.ll @@ -130,26 +130,26 @@ eol2 (\n\r)+|(\r\n)+ recursively. */ BEGIN(in_par); parent_level = 1; - yylval->str = new std::string(); + yylval->emplace(); } { "(" { ++parent_level; - yylval->str->append(yytext, yyleng); + yylval->as().append(yytext, yyleng); } ")" { if (--parent_level) { - yylval->str->append(yytext, yyleng); + yylval->as().append(yytext, yyleng); } else { BEGIN(not_prop); - spot::trim(*yylval->str); + spot::trim(yylval->as()); return token::PAR_BLOCK; } } - [^()]+ yylval->str->append(yytext, yyleng); + [^()]+ yylval->as().append(yytext, yyleng); <> { unput(')'); if (!missing_parent) @@ -172,38 +172,38 @@ eol2 (\n\r)+|(\r\n)+ recursively. */ BEGIN(in_bra); parent_level = 1; - yylval->str = new std::string(); + yylval->emplace(); } { "{" { ++parent_level; - yylval->str->append(yytext, yyleng); + yylval->as().append(yytext, yyleng); } "}"[ \t]*"!" { if (--parent_level) { - yylval->str->append(yytext, yyleng); + yylval->as().append(yytext, yyleng); } else { BEGIN(not_prop); - spot::trim(*yylval->str); + spot::trim(yylval->as()); return token::BRA_BANG_BLOCK; } } "}" { if (--parent_level) { - yylval->str->append(yytext, yyleng); + yylval->as().append(yytext, yyleng); } else { BEGIN(not_prop); - spot::trim(*yylval->str); + spot::trim(yylval->as()); return token::BRA_BLOCK; } } - [^{}]+ yylval->str->append(yytext, yyleng); + [^{}]+ yylval->as().append(yytext, yyleng); <> { unput('}'); if (!missing_parent) @@ -231,35 +231,36 @@ eol2 (\n\r)+|(\r\n)+ /* SVA operators */ "##"[0-9] { - yylval->num = yytext[2] - '0'; + yylval->emplace(yytext[2] - '0'); return token::OP_DELAY_N; } "##"[0-9][0-9] { - yylval->num = - yytext[2] * 10 + yytext[3] - '0' * 11; + yylval->emplace(yytext[2] * 10 + + yytext[3] + - '0' * 11); return token::OP_DELAY_N; } "##"[0-9]{3,} { errno = 0; unsigned long n = strtoul(yytext + 2, 0, 10); - yylval->num = n; - if (errno || yylval->num != n) + yylval->emplace(n); + if (errno || yylval->as() != n) { error_list.push_back( spot::one_parse_error(*yylloc, "value too large ignored")); - yylval->num = 1; + yylval->emplace(1); } - if (yylval->num >= spot::fnode::unbounded()) + if (yylval->as() >= spot::fnode::unbounded()) { auto max = spot::fnode::unbounded() - 1; std::ostringstream s; - s << yylval->num + s << yylval->as() << (" exceeds maximum supported " "repetition (") << max << ")"; error_list.emplace_back(*yylloc, s.str()); - yylval->num = max; + yylval->emplace(max); } return token::OP_DELAY_N; } @@ -288,8 +289,8 @@ eol2 (\n\r)+|(\r\n)+ [0-9]+ { errno = 0; unsigned long n = strtoul(yytext, 0, 10); - yylval->num = n; - if (errno || yylval->num != n) + yylval->emplace(n); + if (errno || yylval->as() != n) { error_list.push_back( spot::one_parse_error(*yylloc, @@ -380,7 +381,7 @@ eol2 (\n\r)+|(\r\n)+ */ [a-zA-EH-LN-QSTYZ_.][a-zA-EH-WYZ0-9_.]* | [a-zA-EH-LN-QSTYZ_.][a-zA-EH-WYZ0-9_.][a-zA-Z0-9_.]* { - yylval->str = new std::string(yytext, yyleng); + yylval->emplace(yytext, yyleng); BEGIN(not_prop); return token::ATOMIC_PROP; } @@ -401,7 +402,7 @@ eol2 (\n\r)+|(\r\n)+ { \" { BEGIN(orig_cond); - yylval->str = new std::string(s); + yylval->emplace(s); return token::ATOMIC_PROP; } {eol} { @@ -419,7 +420,7 @@ eol2 (\n\r)+|(\r\n)+ spot::one_parse_error(*yylloc, "unclosed string")); BEGIN(orig_cond); - yylval->str = new std::string(s); + yylval->emplace(s); return token::ATOMIC_PROP; } } @@ -430,7 +431,7 @@ eol2 (\n\r)+|(\r\n)+ for compatibility with ltl2dstar we also accept any alphanumeric string that is not an operator. */ [a-zA-Z._][a-zA-Z0-9._]* { - yylval->str = new std::string(yytext, yyleng); + yylval->emplace(yytext, yyleng); return token::ATOMIC_PROP; } diff --git a/spot/tl/formula.cc b/spot/tl/formula.cc index 8fe91bf70..fb4ab0d49 100644 --- a/spot/tl/formula.cc +++ b/spot/tl/formula.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2019, 2021 Laboratoire de Recherche et +// Copyright (C) 2015-2019, 2021, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -136,7 +136,7 @@ namespace spot // - AndRat(Exps1...,Bool1,Exps2...,Bool2,Exps3...) = // AndRat(And(Bool1,Bool2),Exps1...,Exps2...,Exps3...) // - OrRat(Exps1...,Bool1,Exps2...,Bool2,Exps3...) = - // AndRat(Or(Bool1,Bool2),Exps1...,Exps2...,Exps3...) + // OrRat(Or(Bool1,Bool2),Exps1...,Exps2...,Exps3...) if (!b.empty()) v.insert(v.begin(), fnode::multop(o, std::move(b))); } @@ -588,9 +588,9 @@ namespace spot } else if (min != unbounded()) { - min += min2; - if (SPOT_UNLIKELY(min >= unbounded())) + if (SPOT_UNLIKELY(min + min2 >= unbounded())) break; + min += min2; } if (max2 == unbounded()) { @@ -598,9 +598,9 @@ namespace spot } else if (max != unbounded()) { - max += max2; - if (SPOT_UNLIKELY(max >= unbounded())) + if (SPOT_UNLIKELY(max + max2 >= unbounded())) break; + max += max2; } (*i)->destroy(); i = v.erase(i); diff --git a/tests/Makefile.am b/tests/Makefile.am index afcd0c8d2..c8a722f5c 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -165,6 +165,7 @@ TESTS_tl = \ core/parse.test \ core/parseerr.test \ core/utf8.test \ + core/500.test \ core/length.test \ core/equals.test \ core/tostring.test \ diff --git a/tests/core/500.test b/tests/core/500.test new file mode 100755 index 000000000..60d5c6365 --- /dev/null +++ b/tests/core/500.test @@ -0,0 +1,43 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs + +set -e + +# The LTL parser used to exhibit a worse-than-quadratic behavior on +# n-ary operators with many children. See issue #500. Before the +# fix, this test would run for ages. + +awk 'BEGIN{x="s0"; for(i = 1; i < 40000; ++i) x=x " | s" i; print x;}' | + ltlfilt --stats=%x > out +test 40000 = `cat out` + +awk 'BEGIN{x="s0"; for(i = 1; i < 40000; ++i) x=x " & s" i; print x;}' | + ltlfilt --stats=%x > out +test 40000 = `cat out` + +awk 'BEGIN{x="s0"; for(i = 1; i < 40000; ++i) x=x ";s" i; print "{" x "}";}' | + ltlfilt --stats=%x > out +test 40000 = `cat out` + +awk 'BEGIN{x="s0"; for(i = 1; i < 40000; ++i) x=x ":s" i; print "{" x "}";}' | + ltlfilt --stats=%x > out +test 40000 = `cat out` diff --git a/tests/core/ltl2tgba2.test b/tests/core/ltl2tgba2.test index 79a07a17a..8397bbc85 100755 --- a/tests/core/ltl2tgba2.test +++ b/tests/core/ltl2tgba2.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009-2021 Laboratoire de Recherche et Développement de +# Copyright (C) 2009-2022 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -375,8 +375,8 @@ diff output expected cat >formulas < outx diff outx exp cat >exp < GFe +trying to create strategy directly for GFe <-> (Fa & Fb & Fc & Fd) direct strategy was found. EOF ltlsynt --ins='a,b,c,d' --outs='e' -f '(Fa & Fb & Fc & Fd) <-> GFe' \ diff --git a/tests/core/tostring.test b/tests/core/tostring.test index e559ea198..7067a8b2c 100755 --- a/tests/core/tostring.test +++ b/tests/core/tostring.test @@ -1,7 +1,7 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2011, 2013, 2016 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) 2009-2011, 2013, 2016, 2022 Laboratoire de Recherche +# et Développement de l'Epita (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre # et Marie Curie. @@ -63,7 +63,7 @@ X"R" {a;b;{c && d[*]};[+]}[]-> G{a[*]:b[*]} GF!(b & (a | c)) GF!({b && {a | c[*]}}<>-> {{!a}[*]}) -GF({{a | c[*]} & b[*]}[]-> d) +GF({b[*] & {a | c[*]}}[]-> d) {a[*2..3]} {a[*0..1]} {a[*]} From 27d455389efc3272213193c6e493bb2f0f5ae3ac Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Tue, 29 Mar 2022 15:51:31 +0200 Subject: [PATCH 025/606] Correct bug in zielonka Optimization in Zielonka failed under certain circumstances todo: Devise a specialized test for direct attr computation * spot/twaalgos/game.cc: Correction * tests/python/game.py: Test --- spot/twaalgos/game.cc | 46 ++++++--- tests/python/game.py | 211 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 246 insertions(+), 11 deletions(-) diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index 9b8fdcee9..6bb62500d 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -309,16 +309,21 @@ namespace spot { auto scc_acc = info_->acc_sets_of(c_scc_idx_); // We will override all parities of edges leaving the scc + // Currently game is colored max odd + // So there is at least one color bool added[] = {false, false}; unsigned par_pair[2]; unsigned scc_new_par = std::max(scc_acc.max_set(), 1u); + bool player_color_larger; if (scc_new_par&1) { + player_color_larger = false; par_pair[1] = scc_new_par; par_pair[0] = scc_new_par+1; } else { + player_color_larger = true; par_pair[1] = scc_new_par+1; par_pair[0] = scc_new_par; } @@ -331,6 +336,7 @@ namespace spot for (unsigned v : c_states()) { assert(subgame_[v] == unseen_mark); + bool owner = (*owner_ptr_)[v]; for (auto &e : arena_->out(v)) { // The outgoing edges are taken finitely often @@ -342,14 +348,20 @@ namespace spot e.dst, e.acc); if (w_.winner(e.dst)) { - // Winning region of player -> odd - e.acc = odd_mark; + // Winning region off player -> + // odd mark if player + // else 1 (smallest loosing for env) + e.acc = owner ? odd_mark + : acc_cond::mark_t({1}); added[1] = true; } else { - // Winning region of env -> even - e.acc = even_mark; + // Winning region of env -> + // even mark for env, + // else 0 (smallest loosing for player) + e.acc = !owner ? even_mark + : acc_cond::mark_t({0}); added[0] = true; } // Replace with self-loop @@ -360,13 +372,22 @@ namespace spot // Compute the attractors of the self-loops/transitions leaving scc // These can be directly added to the winning states - // Note: attractors can not intersect therefore the order in which - // they are computed does not matter + // To avoid disregarding edges in attr computation we + // need to start with the larger color + // Todo come up with a test for this unsigned dummy_rd; - for (bool p : {false, true}) - if (added[p]) - attr(dummy_rd, p, par_pair[p], true, par_pair[p]); + for (bool p : {player_color_larger, + !player_color_larger}) + { + if (added[p]) + { + // Always take the larger, + // Otherwise states with an transition to a winning AND + // a loosing scc are treated incorrectly + attr(dummy_rd, p, par_pair[p], true, par_pair[p]); + } + } if (added[0] || added[1]) // Fix "negative" strategy @@ -379,8 +400,11 @@ namespace spot inline bool attr(unsigned &rd, bool p, unsigned max_par, - bool acc_par, unsigned min_win_par) + bool acc_par, unsigned min_win_par, + bool no_check=false) { + // In fix_scc, the attr computation is + // abused so we can not check ertain things // Computes the attractor of the winning set of player p within a // subgame given as rd. // If acc_par is true, max_par transitions are also accepting and @@ -394,7 +418,7 @@ namespace spot // As proposed in Oink! / PGSolver // Needs the transposed graph however - assert((!acc_par) || (acc_par && (max_par&1) == p)); + assert((no_check || !acc_par) || (acc_par && (max_par&1) == p)); assert(!acc_par || (0 < min_win_par)); assert((min_win_par <= max_par) && (max_par <= max_abs_par_)); diff --git a/tests/python/game.py b/tests/python/game.py index d7aec2f38..cea09f295 100644 --- a/tests/python/game.py +++ b/tests/python/game.py @@ -63,3 +63,214 @@ State: 7 State: 8 {1} [0] 2 --END--""") + +# Testing case where parity_game optimization +# lead to wrong results +si = spot.synthesis_info() + +game = spot.automaton("""HOA: v1 +States: 27 +Start: 7 +AP: 11 "a" "b" "c" "d" "e" "f" "g" "h" "i" "j" "k" +acc-name: parity max odd 3 +Acceptance: 3 Fin(2) & (Inf(1) | Fin(0)) +properties: trans-labels explicit-labels trans-acc colored +properties: deterministic +spot-state-player: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 +controllable-AP: 0 1 2 3 4 5 6 7 +--BODY-- +State: 0 +[t] 8 {0} +State: 1 +[8&9] 8 {0} +[!8&!10 | !9&!10] 9 {0} +[!8&10 | !9&10] 10 {0} +State: 2 +[8&9] 8 {0} +[!8&!10 | !9&!10] 11 {0} +[!8&10 | !9&10] 12 {0} +State: 3 +[8&9] 8 {0} +[!9&!10] 13 {0} +[!8&10 | !9&10] 14 {0} +[!8&9&!10] 15 {0} +State: 4 +[8&9] 8 {0} +[!8&!10 | !9&!10] 16 {0} +[!8&!9&10] 17 {0} +[!8&9&10] 18 {0} +[8&!9&10] 19 {0} +State: 5 +[8&9] 8 {0} +[!9&!10] 20 {0} +[!8&10 | !9&10] 21 {0} +[!8&9&!10] 22 {0} +State: 6 +[8&9] 8 {0} +[!8&!10 | !9&!10] 23 {0} +[!8&!9&10] 24 {0} +[!8&9&10] 25 {0} +[8&!9&10] 26 {0} +State: 7 +[8&9] 8 {0} +[!9&!10] 13 {0} +[!8&9&!10] 15 {0} +[!8&!9&10] 17 {0} +[!8&9&10] 18 {0} +[8&!9&10] 19 {0} +State: 8 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | +!0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | +!0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | + 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 0 {1} +State: 9 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 1 {2} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | +!0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {2} +State: 10 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 0 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | +!0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {2} +State: 11 +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 0 {1} +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {2} +State: 12 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {2} +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {2} +State: 13 +[!0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7] 1 {1} +[!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&4&!5&!6&7] 3 {1} +[!0&!1&2&3&!4&!5&!6&7] 5 {1} +State: 14 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 0 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | +!0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +State: 15 +[!0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7] 1 {1} +[!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&4&!5&!6&7] 4 {1} +[!0&!1&2&3&!4&!5&!6&7] 6 {1} +State: 16 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 1 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | +!0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +State: 17 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 0 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&3&!4&!5&!6&7] 6 {1} +State: 18 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 0 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&3&!4&!5&!6&7] 5 {1} +State: 19 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 0 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | +0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&3&!4&!5&6&!7] 6 {1} +State: 20 +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 0 {1} +[!0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&!4&5&!6&7] 3 {1} +State: 21 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +State: 22 +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 0 {1} +[!0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&!4&5&!6&7] 4 {1} +State: 23 +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 0 {1} +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +State: 24 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | +0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&!3&!4&5&!6&7] 4 {1} +[!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&3&!4&!5&!6&7] 6 {1} +State: 25 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | +0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&!3&!4&5&!6&7] 3 {1} +[!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&3&!4&!5&!6&7] 5 {1} +State: 26 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&3&!4&!5&!6&7 | +0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | +0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&!3&!4&5&6&!7] 4 {1} +[!0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&6&!7] 6 {1} +--END--""") + +tc.assertTrue(spot.solve_game(game, si)) + +games = spot.split_edges(game) +spot.set_state_players(games, spot.get_state_players(game)) +tc.assertTrue(spot.solve_game(games, si)) + From 5e1b75197122cdf3d3a461354f42dc577c3e017c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 29 Mar 2022 11:13:19 +0200 Subject: [PATCH 026/606] debian: simplify LTO configuration to work around newer libtool Libtool 2.4.7 breaks if AR_FLAGS contains a space. See https://lists.gnu.org/archive/html/bug-libtool/2022-03/msg00009.html * debian/rules: Use gcc-{nm,ar,ranlib} so we do not have to pass the plugin explicitly. --- debian/rules | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/debian/rules b/debian/rules index 0193e9a62..51daf21ed 100755 --- a/debian/rules +++ b/debian/rules @@ -20,20 +20,16 @@ include /usr/share/dpkg/default.mk %: dh $@ --with=python3 -# Find the LTO plugin, which we need to pass to ar, nm, and ranlib. -LTOPLUG := $(shell gcc -v 2>&1 | \ - sed -n 's:COLLECT_LTO_WRAPPER=\(/.*/\)[^/]*:\1:p')liblto_plugin.so - # ARFLAGS is for Automake -# AR_FLAGS is for Libtool -# These activate the LTO pluggin, but also remove the 'u' option -# from ar, since its now ignored with Debian's default to 'D'. -LTOSETUP = \ - LDFLAGS='-fuse-linker-plugin' \ - NM='nm --plugin $(LTOPLUG)' \ - ARFLAGS='cr --plugin $(LTOPLUG)' \ - AR_FLAGS='cr --plugin $(LTOPLUG)' \ - RANLIB='ranlib --plugin $(LTOPLUG)' \ +# AR_FLAGS is for Libtool, (but libtool 2.4.7 will now use ARFLAGS as well) +# The gcc-tools activate the LTO plugin. +LTOSETUP = \ + LDFLAGS='-fuse-linker-plugin' \ + NM='gcc-nm' \ + AR='gcc-ar' \ + ARFLAGS='cr' \ + AR_FLAGS='cr' \ + RANLIB='gcc-ranlib' \ VALGRIND=false GCDADIR := $(shell pwd)/gcda From a211bace688577fe62160ab22b49fabcbdef922b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 6 Apr 2022 15:25:44 +0200 Subject: [PATCH 027/606] autcross: implement --language-complemented MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Suggested by Ondřej Lengál. Fixes #504. * bin/autcross.cc: Implement the --language-complemented option. * NEWS, doc/org/autcross.org: Document it. * tests/core/autcross.test: Test it. * THANKS: Add Ondřej. --- NEWS | 3 +++ THANKS | 1 + bin/autcross.cc | 45 +++++++++++++++++++++++++++++++--------- doc/org/autcross.org | 9 +++++++- tests/core/autcross.test | 16 +++++++++++++- 5 files changed, 62 insertions(+), 12 deletions(-) diff --git a/NEWS b/NEWS index 81aaf9a22..78a14561b 100644 --- a/NEWS +++ b/NEWS @@ -14,6 +14,9 @@ New in spot 2.10.4.dev (net yet released) associated option --sonf-aps allows listing the newly introduced atomic propositions. + - autcross learned a --language-complemented option to assist in the + case one is testing tools that complement automata. (issue #504). + Library: - The new function suffix_operator_normal_form() implements diff --git a/THANKS b/THANKS index 9eb566483..b49b3eb95 100644 --- a/THANKS +++ b/THANKS @@ -41,6 +41,7 @@ Michael Weber Mikuláš Klokočka Ming-Hsien Tsai Nikos Gorogiannis +Ondřej Lengál Paul Guénézan Reuben Rowe Roei Nahum diff --git a/bin/autcross.cc b/bin/autcross.cc index 81b6bcef5..2aade5e49 100644 --- a/bin/autcross.cc +++ b/bin/autcross.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2020 Laboratoire de Recherche et Développement de +// Copyright (C) 2017-2020, 2022 Laboratoire de Recherche et Développement de // l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -64,6 +64,7 @@ Exit status:\n\ enum { OPT_BOGUS = 256, + OPT_COMPLEMENTED, OPT_CSV, OPT_HIGH, OPT_FAIL_ON_TIMEOUT, @@ -94,6 +95,8 @@ static const argp_option options[] = "consider timeouts as errors", 0 }, { "language-preserved", OPT_LANG, nullptr, 0, "expect that each tool preserves the input language", 0 }, + { "language-complemented", OPT_COMPLEMENTED, nullptr, 0, + "expect that each tool complements the input language", 0 }, { "no-checks", OPT_NOCHECKS, nullptr, 0, "do not perform any sanity checks", 0 }, /**************************************************/ @@ -144,6 +147,7 @@ static bool fail_on_timeout = false; static bool stop_on_error = false; static bool no_checks = false; static bool opt_language_preserved = false; +static bool opt_language_complemented = false; static bool opt_omit = false; static const char* csv_output = nullptr; static unsigned round_num = 0; @@ -170,6 +174,9 @@ parse_opt(int key, char* arg, struct argp_state*) bogus_output_filename = arg; break; } + case OPT_COMPLEMENTED: + opt_language_complemented = true; + break; case OPT_CSV: csv_output = arg ? arg : "-"; break; @@ -533,25 +540,32 @@ namespace const spot::const_twa_graph_ptr& aut_j, size_t i, size_t j) { + auto is_really_comp = [lc = opt_language_complemented, + ts = tools.size()](unsigned i) { + return lc && i == ts; + }; + if (aut_i->num_sets() + aut_j->num_sets() > spot::acc_cond::mark_t::max_accsets()) { if (!quiet) - std::cerr << "info: building " << autname(i) - << '*' << autname(j, true) + std::cerr << "info: building " << autname(i, is_really_comp(i)) + << '*' << autname(j, true ^ is_really_comp(j)) << " requires more acceptance sets than supported\n"; return false; } if (verbose) std::cerr << "info: check_empty " - << autname(i) << '*' << autname(j, true) << '\n'; + << autname(i, is_really_comp(i)) + << '*' << autname(j, true ^ is_really_comp(j)) << '\n'; auto w = aut_i->intersecting_word(aut_j); if (w) { std::ostream& err = global_error(); - err << "error: " << autname(i) << '*' << autname(j, true) + err << "error: " << autname(i, is_really_comp(i)) + << '*' << autname(j, true ^ is_really_comp(j)) << (" is nonempty; both automata accept the infinite word:\n" " "); example() << *w << '\n'; @@ -621,12 +635,15 @@ namespace int problems = 0; size_t m = tools.size(); - size_t mi = m + opt_language_preserved; + size_t mi = m + opt_language_preserved + opt_language_complemented; std::vector pos(mi); std::vector neg(mi); vector_tool_statistics stats(m); - if (opt_language_preserved) + // For --language-complemented, we store the input automata in + // pos and will compute its complement in neg. Before running + // checks we will swap both automata. + if (opt_language_preserved || opt_language_complemented) pos[mi - 1] = input; if (verbose) @@ -718,6 +735,9 @@ namespace }; } + if (opt_language_complemented) + std::swap(pos[mi - 1], neg[mi - 1]); + // Just make a circular implication check // A0 <= A1, A1 <= A2, ..., AN <= A0 unsigned ok = 0; @@ -824,10 +844,15 @@ main(int argc, char** argv) check_no_automaton(); - if (s == 1 && !opt_language_preserved && !no_checks) - error(2, 0, "Since --language-preserved is not used, you need " - "at least two tools to compare."); + if (s == 1 && !no_checks + && !opt_language_preserved + && !opt_language_complemented) + error(2, 0, "Since --language-preserved and --language-complemented " + "are not used, you need at least two tools to compare."); + if (opt_language_preserved && opt_language_complemented) + error(2, 0, "Options --language-preserved and --language-complemented " + "are incompatible."); setup_color(); setup_sig_handler(); diff --git a/doc/org/autcross.org b/doc/org/autcross.org index 90a268b44..9e4972cf6 100644 --- a/doc/org/autcross.org +++ b/doc/org/autcross.org @@ -249,7 +249,7 @@ EOF | -:95.1-140.7 | automaton 2 | 2 | 10 | 26 | 26 | 1 | 2 | 6 | 1 | 0 | AF | ok | 0 | 0.0211636 | 2 | 21 | 66 | 84 | 2 | 4 | 0 | 0 | 0 | | -:95.1-140.7 | automaton 2 | 2 | 10 | 26 | 26 | 1 | 2 | 6 | 1 | 0 | L2D | ok | 0 | 0.0028508 | 2 | 24 | 74 | 96 | 2 | 4 | 0 | 0 | 0 | -* Language preserving transformation +* Transformation that preserve or complement languages By default =autcross= assumes that for a given input the automata produced by all tools should be equivalent. However it does not @@ -261,6 +261,13 @@ automaton, it is worth to pass the =--language-preserved= option to =autfilt=. Doing so a bit like adding =cat %H>%O= as another tool: it will also ensure that the output is equivalent to the input. +Similarly, if the tools being tested implement complementation +algorithm, adding the =--language-complemented= will additionally +compare the outputs using this own complementation algorithm. Using +this option is more efficient than passing =autfilt --complement= as a +tool, since =autcross= can save on complementation by using the input +automaton. + * Detecting problems :PROPERTIES: :CUSTOM_ID: checks diff --git a/tests/core/autcross.test b/tests/core/autcross.test index 2ac14eb34..b3d27ec0a 100755 --- a/tests/core/autcross.test +++ b/tests/core/autcross.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2018 Laboratoire de Recherche et Développement +# Copyright (C) 2017, 2018, 2022 Laboratoire de Recherche et Développement # de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -46,3 +46,17 @@ for f in out.csv out2.csv; do sed 's/,[0-9]*\.[0-9]*,/,TIME,/' $f > _$f done diff _out.csv _out2.csv + + +# The {autfilt {complement}} name makes sure we can nest braces. +randaut -n10 2 | + autcross 'ltl2dstar --complement-input=yes' 'autfilt --complement' \ + --language-complemented --csv=out3.csv --verbose 2>stderr +test 10 = `grep 'check_empty Comp(input)\*Comp(A0)' stderr | wc -l` + + +randaut -n1 2 | + autcross 'ltl2dstar --complement-input=yes' 'autfilt --complement' \ + --language-complemented --language-preserved 2> stderr && exit 1 +cat stderr +grep 'preserved.*complemented.*incompatible' stderr From dfb75632ba50f2243abb12147986fa73079a9db6 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Thu, 24 Mar 2022 09:45:33 +0100 Subject: [PATCH 028/606] Update merge_states Current implementation of merge_states fails on certain self-loops. Updated implementation to take them into account and use a hashbased implementation to speed up calculations. Moreover, merge_states() is now aware of "state-player", just like defrag_states_ * spot/twa/twagraph.cc: Here * spot/twaalgos/game.cc: Fix odd cycle for sink * spot/twaalgos/synthesis.cc: Adapt split_det pipeline * tests/python/_synthesis.ipynb: Tests --- spot/twa/twagraph.cc | 413 ++- spot/twaalgos/game.cc | 11 +- spot/twaalgos/synthesis.cc | 7 +- tests/python/_synthesis.ipynb | 4541 ++++++++++++++++++++++++++++++++- 4 files changed, 4919 insertions(+), 53 deletions(-) diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 051514550..b11ca12c5 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -21,13 +21,89 @@ #include #include #include +#include #include #include +#include #include #include using namespace std::string_literals; +namespace +{ + using namespace spot; + // If LAST is false, + // it is guaranteed that there will be another src state + template + void treat(std::vector>& e_idx, + const twa_graph::graph_t::edge_vector_t& e_vec, + std::vector& e_chain, + std::vector& use_for_hash, + unsigned& idx, + unsigned s, + unsigned n_e) + { + assert(s < e_idx.size()); + assert(idx < e_vec.size()); + assert(e_chain.size() == e_vec.size()); + + //std::cout << s << "; " << idx << std::endl; + + // Check if this state has outgoing transitions + if (s != e_vec[idx].src) + // Nothing to do + { + assert(!LAST); + return; + } + + auto& s_idx = e_idx[s]; + s_idx[0] = idx; + + // helper + unsigned sub_idx[] = {-1u, -1u}; + + // All transitions of this state + while (true) + { + assert(idx < e_vec.size() + LAST); + if constexpr (!LAST) + { + if (e_vec[idx].src != s) + break; + } + else + { + if (idx == n_e) + break; + } + + // Argh so many ifs + unsigned which = e_vec[idx].src == e_vec[idx].dst; + if (sub_idx[which] == -1u) + { + // First non-selflooping + sub_idx[which] = idx; + s_idx[1u+which] = idx; + } + else + { + // Continue the chained list + e_chain[sub_idx[which]] = idx; + sub_idx[which] = idx; + } + ++idx; + } + s_idx[3] = idx; + + // Check if self-loops appeared + // If so -> do not use for hash + if constexpr (!SPE) + use_for_hash[s] = s_idx[2] == -1u; + } +} + namespace spot { @@ -306,68 +382,287 @@ namespace spot return true; if (lhs.acc > rhs.acc) return false; + // compare with id? if (bdd_less_than_stable lt; lt(lhs.cond, rhs.cond)) return true; if (rhs.cond != lhs.cond) return false; - // The destination must be sorted last - // for our self-loop optimization to work. return lhs.dst < rhs.dst; }); g_.chain_edges_(); + const auto n_states = num_states(); + + // Edges are nicely chained and there are no erased edges + // -> We can work with the edge_vector + + // Check if it is a game <-> "state-player" is defined + // if so, the graph alternates between env and player vertices, + // so there are, by definition, no self-loops + auto sp = get_named_prop>("state-player"); + const auto spe = (bool) sp; + + // The hashing is a bit delicat: We may only use the dst + // if it has no self-loop + auto use_for_hash = spe ? std::vector() + : std::vector(n_states); + + const auto& e_vec = edge_vector(); + const auto n_edges = e_vec.size(); + + // For each state we need 4 indices of the edge vector + // [first, first_non_sfirst_selflooplfloop, first_selfloop, end] + // The init value makes sure nothing is done for dead end states + auto e_idx = + std::vector>(n_states, {-1u, -1u, + -1u, -1u}); + // Like a linked list holding the non-selfloop and selfloop transitions + auto e_chain = std::vector(e_vec.size(), -1u); + + unsigned idx = 1; + + // Edges are sorted with repected to src first + const unsigned n_high = e_vec.back().src; + if (spe) + for (auto s = 0u; s < n_high; ++s) + treat(e_idx, e_vec, e_chain, + use_for_hash, idx, s, n_edges); + else + for (auto s = 0u; s < n_high; ++s) + treat(e_idx, e_vec, e_chain, + use_for_hash, idx, s, n_edges); + // Last one + if (spe) + treat(e_idx, e_vec, e_chain, + use_for_hash, idx, n_high, n_edges); + else + treat(e_idx, e_vec, e_chain, + use_for_hash, idx, n_high, n_edges); + + assert(idx == e_vec.size() && "Something went wrong during indexing"); + + auto n_players = 0u; + if (sp) + n_players = std::accumulate(sp->begin(), sp->end(), 0u); + + // Represents which states share a hash + // Head is in the unordered_map, + // hash_linked_list is like a linked list structure + // of false pointers + + auto hash_linked_list = std::vector(n_states, -1u); + auto s_to_hash = std::vector(n_states, 0); + auto env_map = + robin_hood::unordered_flat_map>(); + auto player_map = + robin_hood::unordered_flat_map>(); + env_map.reserve(n_states - n_players); + player_map.reserve(n_players); + + // Sadly we need to loop the edges twice since we have + // to check for self-loops before hashing + + auto emplace = [&hash_linked_list](auto& m, auto h, auto s) + { + auto it = m.find(h); + if (it == m.end()) + m.emplace(h, std::make_pair(s, s)); + else + { + // "tail" + auto idx = it->second.second; + assert(idx < s && "Must be monotone"); + hash_linked_list[idx] = s; + it->second.second = s; + } + }; + + // Hash all states + constexpr auto SHIFT = sizeof(size_t)/2 * CHAR_BIT; + for (auto s = 0u; s != n_states; ++s) + { + auto h = fnv::init; + const auto e = e_idx[s][3]; + for (auto i = e_idx[s][0]; i != e; ++i) + { + // If size_t has 8byte and unsigned has 4byte + // then this works fine, otherwise there might be more collisions + size_t hh = spe || use_for_hash[e_vec[i].dst] + ? e_vec[i].dst + : fnv::init; + hh <<= SHIFT; + hh += e_vec[i].cond.id(); + h ^= hh; + h *= fnv::prime; + h ^= e_vec[i].acc.hash(); + h *= fnv::prime; + } + s_to_hash[s] = h; + if (spe && (*sp)[s]) + emplace(player_map, h, s); + else + emplace(env_map, h, s); + } + // All states that might possible be merged share the same hash + // Info hash coll + //std::cout << "Hash collission rate pre merge: " + // << ((env_map.size()+player_map.size())/((float)n_states)) + // << '\n'; + + // Check whether we can merge two states + // and takes into account the self-loops + auto state_equal = [&](unsigned s1, unsigned s2) + { + auto edge_data_comp = [](const auto& lhs, + const auto& rhs) + { + if (lhs.acc < rhs.acc) + return true; + if (lhs.acc > rhs.acc) + return false; + // todo compare with id + if (bdd_less_than_stable lt; lt(lhs.cond, rhs.cond)) + return true; + return false; + }; + + + static auto checked1 = std::vector(); + static auto checked2 = std::vector(); + + auto [i1, nsl1, sl1, e1] = e_idx[s1]; + auto [i2, nsl2, sl2, e2] = e_idx[s2]; + + if ((e2-i2) != (e1-i1)) + return false; // Different number of outgoing trans + + // checked1/2 is one element larger than necessary + // the last element is always false + // and acts like a nulltermination + checked1.resize(e1-i1+1); + std::fill(checked1.begin(), checked1.end(), false); + checked2.resize(e2-i2+1); + std::fill(checked2.begin(), checked2.end(), false); + + // Try to match self-loops + // Not entirely sure when this helps exactly + while ((sl1 < e1) & (sl2 < e2)) + { + // Like a search in ordered array + if (e_vec[sl1].data() == e_vec[sl2].data()) + { + // Matched + checked1[sl1 - i1] = true; //never touches last element + checked2[sl2 - i2] = true; + // Advance both + sl1 = e_chain[sl1]; + sl2 = e_chain[sl2]; + } + else if (edge_data_comp(e_vec[sl1].data(), + e_vec[sl2].data())) + // sl1 needs to advance + sl1 = e_chain[sl1]; + else + // sl2 needs to advance + sl2 = e_chain[sl2]; + } + + // If there are no non-self-loops, in s1 + // Check if all have been correctly treated + if ((nsl1 > e1) + && std::all_of(checked1.begin(), checked1.end(), + [](const auto& e){return e; })) + return true; + + // The remaining edges need to match exactly + auto idx1 = i1; + auto idx2 = i2; + while (((idx1 < e1) & (idx2 < e2))) + { + // More efficient version? + // Skip checked edges + // Last element serves as break + for (; checked1[idx1 - i1]; ++idx1) + { + } + for (; checked2[idx2 - i2]; ++idx2) + { + } + // If one is out of bounds, so is the other + if (idx1 == e1) + { + assert(idx2 == e2); + break; + } + + + if ((e_vec[idx1].dst != e_vec[idx2].dst) + || !(e_vec[idx1].data() == e_vec[idx2].data())) + return false; + + // Advance + ++idx1; + ++idx2; + } + // All edges have bee paired + return true; + }; + const unsigned nb_states = num_states(); std::vector remap(nb_states, -1U); + for (unsigned i = 0; i != nb_states; ++i) { - auto out1 = out(i); - for (unsigned j = 0; j != i; ++j) + auto j = spe && (*sp)[i] ? player_map.at(s_to_hash[i]).first + : env_map.at(s_to_hash[i]).first; + for (; jat(e.dst) == sp->at(remap[e.dst]))) + && "States do not have the same owner"); + e.dst = remap[e.dst]; + } + if (remap[get_init_state_number()] != -1U) set_init_state(remap[get_init_state_number()]); @@ -382,6 +677,10 @@ namespace spot unsigned merged = num_states() - st; if (merged) defrag_states(remap, st); + // Info hash coll 2 + //std::cout << "Hash collission rate post merge: " + // << ((env_map.size()+player_map.size())/((float)num_states())) + // << '\n'; return merged; } @@ -942,8 +1241,36 @@ namespace spot s = newst[s]; } } + // Reassign the state-players + if (auto sp = get_named_prop>("state-player")) + { + const auto ns = (unsigned) used_states; + const auto sps = (unsigned) sp->size(); + assert(ns <= sps); + assert(sps == newst.size()); + + for (unsigned i = 0; i < sps; ++i) + { + if (newst[i] == -1u) + continue; + (*sp)[newst[i]] = (*sp)[i]; + } + sp->resize(ns); + } init_number_ = newst[init_number_]; g_.defrag_states(newst, used_states); + // Make sure we did not mess up the structure + assert([&]() + { + if (auto sp = get_named_prop>("state-player")) + { + for (const auto& e : edges()) + if (sp->at(e.src) == sp->at(e.dst)) + return false; + return true; + } + return true; + }() && "Game not alternating!"); } void twa_graph::remove_unused_ap() diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index 6bb62500d..6d319eea8 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -896,10 +896,19 @@ namespace spot arena->new_edge(sink_con, sink_env, bddtrue, um.second); arena->new_edge(sink_env, sink_con, bddtrue, um.second); } - arena->new_edge(src, sink_con, missing, um.second); + arena->new_edge(src, sink_env, missing, um.second); + assert(owner->at(src) != owner->at(sink_env)); } } + assert([&]() + { + for (const auto& e : arena->edges()) + if (owner->at(e.src) == owner->at(e.dst)) + return false; + return true; + }() && "Not alternating"); + arena->set_named_prop("state-player", owner); } diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index dc79f8cce..1708923c6 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -958,6 +958,10 @@ namespace spot *vs << "determinization done\nDPA has " << dpa->num_states() << " states, " << dpa->num_sets() << " colors\n"; + // The named property "state-player" is set in split_2step + // but not propagated by ntgba2dpa + alternate_players(dpa); + // Merge states knows about players dpa->merge_states(); if (bv) bv->paritize_time += sw.stop(); @@ -966,9 +970,6 @@ namespace spot << dpa->num_states() << " states\n" << "determinization and simplification took " << bv->paritize_time << " seconds\n"; - // The named property "state-player" is set in split_2step - // but not propagated by ntgba2dpa - alternate_players(dpa); break; } case algo::ACD: diff --git a/tests/python/_synthesis.ipynb b/tests/python/_synthesis.ipynb index 5866057a1..b9e065c18 100644 --- a/tests/python/_synthesis.ipynb +++ b/tests/python/_synthesis.ipynb @@ -737,7 +737,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f7458055570> >" + " *' at 0x7fcc883a7720> >" ] }, "execution_count": 8, @@ -820,7 +820,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f7458055570> >" + " *' at 0x7fcc883a7720> >" ] }, "execution_count": 9, @@ -944,7 +944,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f74580553c0> >" + " *' at 0x7fcc8833aa80> >" ] }, "execution_count": 10, @@ -1042,7 +1042,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f743a5ca6c0> >" + " *' at 0x7fcc880c2ab0> >" ] }, "execution_count": 11, @@ -1210,7 +1210,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f7458059f90> >" + " *' at 0x7fcc8833ae70> >" ] }, "execution_count": 12, @@ -1419,7 +1419,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f7458055870> >" + " *' at 0x7fcc880c2240> >" ] }, "execution_count": 13, @@ -1432,6 +1432,4535 @@ "print(a_s.acc())\n", "a_s" ] + }, + { + "cell_type": "markdown", + "id": "0ee90b2a", + "metadata": {}, + "source": [ + "## A problematic case for merge\n", + "\n", + "This is an example graph for which the self-loop optimisation in merge_states does not work" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "06b20a8c", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc880bdc30> >" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut = spot.make_twa_graph()\n", + "aut.set_buchi()\n", + "aut.new_states(5)\n", + "\n", + "a = buddy.bdd_ithvar(aut.register_ap(\"a\"))\n", + "b = buddy.bdd_ithvar(aut.register_ap(\"b\"))\n", + "c = buddy.bdd_ithvar(aut.register_ap(\"c\"))\n", + "x = buddy.bdd_ithvar(aut.register_ap(\"x\"))\n", + "\n", + "\n", + "aut.new_edge(0, 1, buddy.bddtrue)\n", + "aut.new_edge(0, 4, buddy.bddtrue)\n", + "\n", + "# OK, edge conditions ensure \"correct\" ordering\n", + "aut.new_edge(1, 1, a)\n", + "aut.new_edge(1, 2, b)\n", + "aut.new_edge(1, 3, c)\n", + "\n", + "aut.new_edge(4, 4, a)\n", + "aut.new_edge(4, 2, b)\n", + "aut.new_edge(4, 3, c)\n", + "\n", + "aut.new_edge(2, 2, x)\n", + "aut.new_edge(3, 3, buddy.bdd_not(x))\n", + "\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "8a2f2e4d", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc880bdc30> >" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut.merge_states()\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "b40f8ce7", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc880c5210> >" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut = spot.make_twa_graph()\n", + "aut.set_buchi()\n", + "aut.new_states(5)\n", + "\n", + "a = buddy.bdd_ithvar(aut.register_ap(\"a\"))\n", + "b = buddy.bdd_ithvar(aut.register_ap(\"b\"))\n", + "c = buddy.bdd_ithvar(aut.register_ap(\"c\"))\n", + "x = buddy.bdd_ithvar(aut.register_ap(\"x\"))\n", + "\n", + "\n", + "aut.new_edge(0, 1, buddy.bddtrue)\n", + "aut.new_edge(0, 4, buddy.bddtrue)\n", + "\n", + "# Not OK, all edge equal -> sorted by destination\n", + "# Fails to merge\n", + "aut.new_edge(1, 1, a)\n", + "aut.new_edge(1, 2, a)\n", + "aut.new_edge(1, 3, a)\n", + "\n", + "aut.new_edge(4, 4, a)\n", + "aut.new_edge(4, 2, a)\n", + "aut.new_edge(4, 3, a)\n", + "\n", + "aut.new_edge(2, 2, x)\n", + "aut.new_edge(3, 3, buddy.bdd_not(x))\n", + "\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "1f596284", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc880c5210> >" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut.merge_states()\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "761b4c96", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "g\n", + "\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "11\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "13\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "cond\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "a\n", + "\n", + "b\n", + "\n", + "c\n", + "\n", + "a\n", + "\n", + "b\n", + "\n", + "c\n", + "\n", + "x\n", + "\n", + "!x\n", + "\n", + "a\n", + "\n", + "b\n", + "\n", + "c\n", + "\n", + "acc\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "dst\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "0\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "0\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "meta\n", + "init_state:\n", + "\n", + "0\n", + "num_sets:\n", + "1\n", + "acceptance:\n", + "Inf(0)\n", + "ap_vars:\n", + "a b c x\n", + "\n", + "\n", + "\n", + "\n", + "props\n", + "prop_state_acc:\n", + "maybe\n", + "prop_inherently_weak:\n", + "maybe\n", + "prop_terminal:\n", + "maybe\n", + "prop_weak:\n", + "maybe\n", + "prop_very_weak:\n", + "maybe\n", + "prop_complete:\n", + "maybe\n", + "prop_universal:\n", + "maybe\n", + "prop_unambiguous:\n", + "maybe\n", + "prop_semi_deterministic:\n", + "maybe\n", + "prop_stutter_invariant:\n", + "maybe\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc880c5d50> >" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut = spot.make_twa_graph()\n", + "aut.set_buchi()\n", + "aut.new_states(8)\n", + "\n", + "a = buddy.bdd_ithvar(aut.register_ap(\"a\"))\n", + "b = buddy.bdd_ithvar(aut.register_ap(\"b\"))\n", + "c = buddy.bdd_ithvar(aut.register_ap(\"c\"))\n", + "x = buddy.bdd_ithvar(aut.register_ap(\"x\"))\n", + "\n", + "\n", + "aut.new_edge(0, 1, buddy.bddtrue)\n", + "aut.new_edge(0, 4, buddy.bddtrue)\n", + "\n", + "aut.new_edge(1, 1, a)\n", + "aut.new_edge(1, 2, b)\n", + "aut.new_edge(1, 3, c)\n", + "\n", + "aut.new_edge(4, 4, a)\n", + "aut.new_edge(4, 2, b)\n", + "aut.new_edge(4, 3, c)\n", + "\n", + "aut.new_edge(2, 2, x)\n", + "aut.new_edge(3, 3, buddy.bdd_not(x))\n", + "\n", + "aut.new_edge(5, 1, a)\n", + "aut.new_edge(5, 2, b)\n", + "aut.new_edge(5, 3, c)\n", + "\n", + "display(aut.show_storage())\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "d4e09261", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc880c5d50> >" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut.merge_states()\n", + "aut" + ] + }, + { + "cell_type": "markdown", + "id": "4a8ace82", + "metadata": {}, + "source": [ + "## Splitting can inhibit merging\n", + "\n", + "In split automata, no self-loops exist.\n", + "Therefore states that can be merged pre-split can not be merged in a split automaton" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "c9e38db9", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc880cd090> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "g\n", + "\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "\n", + "14\n", + "\n", + "\n", + "15\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "17\n", + "\n", + "\n", + "18\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "18\n", + "\n", + "\n", + "20\n", + "\n", + "\n", + "21\n", + "\n", + "\n", + "22\n", + "\n", + "\n", + "24\n", + "\n", + "\n", + "25\n", + "\n", + "\n", + "27\n", + "\n", + "\n", + "29\n", + "\n", + "\n", + "32\n", + "\n", + "\n", + "33\n", + "\n", + "\n", + "34\n", + "\n", + "\n", + "35\n", + "\n", + "\n", + "37\n", + "\n", + "\n", + "39\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "17\n", + "\n", + "\n", + "19\n", + "\n", + "\n", + "20\n", + "\n", + "\n", + "21\n", + "\n", + "\n", + "23\n", + "\n", + "\n", + "24\n", + "\n", + "\n", + "26\n", + "\n", + "\n", + "28\n", + "\n", + "\n", + "31\n", + "\n", + "\n", + "32\n", + "\n", + "\n", + "33\n", + "\n", + "\n", + "34\n", + "\n", + "\n", + "36\n", + "\n", + "\n", + "38\n", + "\n", + "\n", + "41\n", + "\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "\n", + "14\n", + "\n", + "\n", + "15\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "17\n", + "\n", + "\n", + "18\n", + "\n", + "\n", + "19\n", + "\n", + "\n", + "20\n", + "\n", + "\n", + "21\n", + "\n", + "\n", + "22\n", + "\n", + "\n", + "23\n", + "\n", + "\n", + "24\n", + "\n", + "\n", + "25\n", + "\n", + "\n", + "26\n", + "\n", + "\n", + "27\n", + "\n", + "\n", + "28\n", + "\n", + "\n", + "29\n", + "\n", + "\n", + "30\n", + "\n", + "\n", + "31\n", + "\n", + "\n", + "32\n", + "\n", + "\n", + "33\n", + "\n", + "\n", + "34\n", + "\n", + "\n", + "35\n", + "\n", + "\n", + "36\n", + "\n", + "\n", + "37\n", + "\n", + "\n", + "38\n", + "\n", + "\n", + "39\n", + "\n", + "\n", + "40\n", + "\n", + "\n", + "41\n", + "\n", + "cond\n", + "\n", + "1\n", + "\n", + "!a & !b & c\n", + "\n", + "!a & b & !c\n", + "\n", + "!a & b & c\n", + "\n", + "a & !b & !c\n", + "\n", + "a & !b & c\n", + "\n", + "a & b & !c\n", + "\n", + "a & b & c\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "!a & !b & c\n", + "\n", + "!a & b & !c\n", + "\n", + "!a & b & c\n", + "\n", + "a & !b & !c\n", + "\n", + "a & !b & c\n", + "\n", + "a & b & !c\n", + "\n", + "a & b & c\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "x\n", + "\n", + "!x\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "acc\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "dst\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "\n", + "14\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "15\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "17\n", + "\n", + "\n", + "18\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "next_succ\n", + "\n", + "0\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "\n", + "14\n", + "\n", + "\n", + "15\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "17\n", + "\n", + "0\n", + "\n", + "\n", + "19\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "23\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "26\n", + "\n", + "0\n", + "\n", + "\n", + "28\n", + "\n", + "0\n", + "\n", + "\n", + "30\n", + "\n", + "\n", + "31\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "36\n", + "\n", + "0\n", + "\n", + "\n", + "38\n", + "\n", + "0\n", + "\n", + "\n", + "40\n", + "\n", + "\n", + "41\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "\n", + "14\n", + "\n", + "\n", + "15\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "17\n", + "\n", + "\n", + "17\n", + "\n", + "\n", + "18\n", + "\n", + "\n", + "18\n", + "\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "meta\n", + "init_state:\n", + "\n", + "0\n", + "num_sets:\n", + "1\n", + "acceptance:\n", + "Inf(0)\n", + "ap_vars:\n", + "a b c x\n", + "\n", + "\n", + "\n", + "\n", + "props\n", + "prop_state_acc:\n", + "maybe\n", + "prop_inherently_weak:\n", + "maybe\n", + "prop_terminal:\n", + "maybe\n", + "prop_weak:\n", + "maybe\n", + "prop_very_weak:\n", + "maybe\n", + "prop_complete:\n", + "maybe\n", + "prop_universal:\n", + "maybe\n", + "prop_unambiguous:\n", + "maybe\n", + "prop_semi_deterministic:\n", + "maybe\n", + "prop_stutter_invariant:\n", + "maybe\n", + "\n", + "\n", + "\n", + "\n", + "namedprops\n", + "named properties:\n", + "state-player\n", + "synthesis-outputs\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!a & b & c\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "1->9\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "1->10\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "1->11\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "1->12\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "2->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "3->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "14->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "!a & b & c\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "15\n", + "\n", + "\n", + "\n", + "4->15\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "4->16\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "4->17\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "4->18\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "15->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc880bdc00> >" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut = spot.make_twa_graph()\n", + "aut.set_buchi()\n", + "aut.new_states(5)\n", + "\n", + "a = buddy.bdd_ithvar(aut.register_ap(\"a\"))\n", + "b = buddy.bdd_ithvar(aut.register_ap(\"b\"))\n", + "c = buddy.bdd_ithvar(aut.register_ap(\"c\"))\n", + "x = buddy.bdd_ithvar(aut.register_ap(\"x\"))\n", + "\n", + "\n", + "aut.new_edge(0, 1, buddy.bddtrue)\n", + "aut.new_edge(0, 4, buddy.bddtrue)\n", + "\n", + "aut.new_edge(1, 1, a)\n", + "aut.new_edge(1, 2, b)\n", + "aut.new_edge(1, 3, c)\n", + "\n", + "aut.new_edge(4, 4, a)\n", + "aut.new_edge(4, 2, b)\n", + "aut.new_edge(4, 3, c)\n", + "\n", + "aut.new_edge(2, 2, x)\n", + "aut.new_edge(3, 3, buddy.bdd_not(x))\n", + "\n", + "display(aut)\n", + "\n", + "aut = spot.split_2step(aut, x, False)\n", + "\n", + "display(aut.show_storage())\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "2009f279", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!a & b & c\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "1->9\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "1->10\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "1->11\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "1->12\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "2->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "3->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "14->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "!a & b & c\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "15\n", + "\n", + "\n", + "\n", + "4->15\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "4->16\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "4->17\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "4->18\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "15->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc880bdc00> >" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "print(aut.merge_states())\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "17c8d6bc", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc880cd2a0> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "g\n", + "\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "\n", + "14\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "17\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "\n", + "15\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "\n", + "14\n", + "\n", + "\n", + "15\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "17\n", + "\n", + "cond\n", + "\n", + "1\n", + "\n", + "!b & c\n", + "\n", + "b & !c\n", + "\n", + "b & c\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "!b & c\n", + "\n", + "b & !c\n", + "\n", + "b & c\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "x\n", + "\n", + "!x\n", + "\n", + "acc\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "dst\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "next_succ\n", + "\n", + "0\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "0\n", + "\n", + "\n", + "11\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "15\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "meta\n", + "init_state:\n", + "\n", + "0\n", + "num_sets:\n", + "1\n", + "acceptance:\n", + "Inf(0)\n", + "ap_vars:\n", + "a b c x\n", + "\n", + "\n", + "\n", + "\n", + "props\n", + "prop_state_acc:\n", + "maybe\n", + "prop_inherently_weak:\n", + "maybe\n", + "prop_terminal:\n", + "maybe\n", + "prop_weak:\n", + "maybe\n", + "prop_very_weak:\n", + "maybe\n", + "prop_complete:\n", + "maybe\n", + "prop_universal:\n", + "maybe\n", + "prop_unambiguous:\n", + "maybe\n", + "prop_semi_deterministic:\n", + "maybe\n", + "prop_stutter_invariant:\n", + "maybe\n", + "\n", + "\n", + "\n", + "\n", + "namedprops\n", + "named properties:\n", + "state-player\n", + "synthesis-outputs\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "2->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "3->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc880c55a0> >" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Merging possible even in split case\n", + "aut = spot.make_twa_graph()\n", + "aut.set_buchi()\n", + "aut.new_states(5)\n", + "\n", + "a = buddy.bdd_ithvar(aut.register_ap(\"a\"))\n", + "b = buddy.bdd_ithvar(aut.register_ap(\"b\"))\n", + "c = buddy.bdd_ithvar(aut.register_ap(\"c\"))\n", + "x = buddy.bdd_ithvar(aut.register_ap(\"x\"))\n", + "\n", + "\n", + "aut.new_edge(0, 1, buddy.bddtrue)\n", + "aut.new_edge(0, 4, buddy.bddtrue)\n", + "\n", + "aut.new_edge(1, 2, b)\n", + "aut.new_edge(1, 3, c)\n", + "\n", + "aut.new_edge(4, 2, b)\n", + "aut.new_edge(4, 3, c)\n", + "\n", + "aut.new_edge(2, 2, x)\n", + "aut.new_edge(3, 3, buddy.bdd_not(x))\n", + "\n", + "display(aut)\n", + "\n", + "aut = spot.split_2step(aut, x, False)\n", + "\n", + "display(aut.show_storage())\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "b3e90235", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "1->5\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "3->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc880c55a0> >" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "print(aut.merge_states())\n", + "aut" + ] + }, + { + "cell_type": "markdown", + "id": "05785bb1", + "metadata": {}, + "source": [ + "Fail case for alternate_players" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "df4aa681", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "i\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc880c5a50> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "i\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!i\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc880c5a50> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "aut = spot.make_twa_graph()\n", + "aut.set_buchi()\n", + "i = buddy.bdd_ithvar(aut.register_ap(\"i\"))\n", + "o = buddy.bdd_ithvar(aut.register_ap(\"o\"))\n", + "\n", + "spot.set_synthesis_outputs(aut, o)\n", + "\n", + "aut.new_states(2)\n", + "aut.new_edge(0,1,i)\n", + "aut.new_edge(1,0,o,spot.mark_t([0]))\n", + "display(aut)\n", + "spot.alternate_players(aut)\n", + "display(aut)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f3b2d981", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { From 524edea8da75486402481ec33c51763374852a3c Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Mon, 4 Apr 2022 08:47:11 +0200 Subject: [PATCH 029/606] Propagate colors in split_2step Reduce the amount of uncolored transitions after split_2step by trying to color the env transitions. This is currently only supported for parity like acceptance conditions. * spot/twaalgos/game.cc: Determinizatio of "colored" game can created trivial self-loops. Fix them * spot/twaalgos/synthesis.cc: Here * tests/core/ltlsynt.test, tests/python/_synthesis.ipynb, tests/python/games.ipynb, tests/python/synthesis.ipynb, tests/python/synthesis.py: New and adjusted tests --- spot/twaalgos/game.cc | 17 +- spot/twaalgos/synthesis.cc | 47 +- tests/core/ltlsynt.test | 28 +- tests/python/_synthesis.ipynb | 3 +- tests/python/games.ipynb | 24 +- tests/python/synthesis.ipynb | 3582 +++++++++++++++++---------------- tests/python/synthesis.py | 16 +- 7 files changed, 1896 insertions(+), 1821 deletions(-) diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index 6d319eea8..e1d23e381 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -867,7 +867,7 @@ namespace spot todo.pop_back(); seen[src] = true; bdd missing = bddtrue; - for (const auto& e: arena->out(src)) + for (auto& e: arena->out(src)) { bool osrc = (*owner)[src]; if (complete0 && !osrc) @@ -878,6 +878,21 @@ namespace spot (*owner)[e.dst] = !osrc; todo.push_back(e.dst); } + else if (e.src == e.dst) + { + if (e.cond == bddtrue) + { + // Fix trivial self-loop + // No need to add it to seen + auto inter = arena->new_state(); + owner->push_back(!osrc); + e.dst = inter; + arena->new_edge(inter, src, bddtrue, e.acc); + } + else + throw std::runtime_error("alternate_players(): " + "Nontrivial selfloop"); + } else if ((*owner)[e.dst] == osrc) { delete owner; diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 1708923c6..ed4915c4b 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -447,9 +447,13 @@ namespace spot split_2step(const const_twa_graph_ptr& aut, const bdd& output_bdd, bool complete_env) { + assert(!aut->get_named_prop("state-player") + && "aut is already split!"); auto split = make_twa_graph(aut->get_dict()); auto [has_unsat, unsat_mark] = aut->acc().unsat_mark(); + bool max_par, odd_par, color_env; + color_env = aut->acc().is_parity(max_par, odd_par, true); split->copy_ap_of(aut); split->new_states(aut->num_states()); @@ -457,6 +461,7 @@ namespace spot set_synthesis_outputs(split, output_bdd); const auto use_color = has_unsat; + color_env &= use_color; if (has_unsat) split->copy_acceptance_of(aut); else @@ -490,8 +495,10 @@ namespace spot // So we can first loop over the aut // and then deduce the owner - // a sort of hash-map for all new intermediate states - std::unordered_multimap env_hash; + // a sort of hash-map for all new intermediate stat + // second is the color of the incoming env trans + std::unordered_multimap> env_hash; env_hash.reserve((int) (1.5 * aut->num_states())); // a local map for edges leaving the current src // this avoids creating and then combining edges for each minterm @@ -590,7 +597,7 @@ namespace spot auto range_h = env_hash.equal_range(h); for (auto it_h = range_h.first; it_h != range_h.second; ++it_h) { - unsigned i = it_h->second; + const auto& [i, this_color] = it_h->second; auto out = split->out(i); if (std::equal(out.begin(), out.end(), dests.begin(), dests.end(), @@ -612,9 +619,10 @@ namespace spot if (it != env_edge_hash.end()) it->second.second |= one_letter; else - // Uncolored env_edge_hash.emplace(i, - eeh_t(split->new_edge(src, i, bddtrue), one_letter)); + eeh_t(split->new_edge(src, i, bddtrue, + this_color), + one_letter)); break; } } @@ -622,12 +630,31 @@ namespace spot if (to_add) { unsigned d = split->new_state(); - unsigned n_e = split->new_edge(src, d, bddtrue); - env_hash.emplace(h, d); + auto this_color = acc_cond::mark_t({}); + bool has_uncolored = false; + for (const auto &t: dests) + { + split->new_edge(d, t->dst, t->econdout, + use_color ? t->acc + : acc_cond::mark_t({})); + this_color |= t->acc; + has_uncolored |= !t->acc; + } + + if (!color_env | has_uncolored) + this_color = acc_cond::mark_t({}); + else if (max_par) + this_color = + acc_cond::mark_t({this_color.min_set()-1}); + else // min_par + this_color = + acc_cond::mark_t({this_color.max_set()-1}); + + unsigned n_e = split->new_edge(src, d, bddtrue, this_color); + env_hash.emplace(std::piecewise_construct, + std::forward_as_tuple(h), + std::forward_as_tuple(d, this_color)); env_edge_hash.emplace(d, eeh_t(n_e, one_letter)); - for (const auto &t: dests) - split->new_edge(d, t->dst, t->econdout, - use_color ? t->acc : acc_cond::mark_t({})); } } // letters // save locally stored condition diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 8e63344b2..03f7598c9 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -25,23 +25,23 @@ set -e cat >exp <\n", "\n", "a\n", - "\n", + "\n", "\n", "\n", "\n", @@ -804,7 +804,7 @@ "\n", "\n", "a\n", - "\n", + "\n", "\n", "\n", "\n", @@ -818,7 +818,7 @@ "\n", "\n", "!a\n", - "\n", + "\n", "\n", "\n", "\n", @@ -928,7 +928,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f80642eee70> >" + " *' at 0x7f202420db10> >" ] }, "execution_count": 8, @@ -972,10 +972,10 @@ "--BODY--\n", "State: 0\n", "[!1] 5 {1}\n", - "[1] 6 {1}\n", + "[1] 6 {2}\n", "State: 1\n", - "[1] 6 {1}\n", - "[!1] 7 {1}\n", + "[1] 6 {2}\n", + "[!1] 7 {2}\n", "State: 2\n", "[t] 8 {1}\n", "State: 3\n", @@ -1134,7 +1134,7 @@ "\n", "\n", "a\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1164,7 +1164,7 @@ "\n", "\n", "a\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1178,7 +1178,7 @@ "\n", "\n", "!a\n", - "\n", + "\n", "\n", "\n", "\n", @@ -1288,7 +1288,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f806443b1b0> >" + " *' at 0x7f202420df90> >" ] }, "execution_count": 11, @@ -1324,7 +1324,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.2" + "version": "3.8.10" } }, "nbformat": 4, diff --git a/tests/python/synthesis.ipynb b/tests/python/synthesis.ipynb index 3738e6f72..08af437e2 100644 --- a/tests/python/synthesis.ipynb +++ b/tests/python/synthesis.ipynb @@ -3,6 +3,7 @@ { "cell_type": "code", "execution_count": 1, + "id": "7a864ea1", "metadata": {}, "outputs": [], "source": [ @@ -13,6 +14,7 @@ }, { "cell_type": "markdown", + "id": "9a294cae", "metadata": {}, "source": [ "This notebook presents functions that can be used to solve the Reactive Synthesis problem using games.\n", @@ -37,6 +39,7 @@ { "cell_type": "code", "execution_count": 2, + "id": "70429a41", "metadata": {}, "outputs": [ { @@ -53,647 +56,647 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - "))\n", - "[parity max odd 3]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[parity max odd 3]\n", "\n", "\n", "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", "\n", "I->9\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "25\n", - "\n", - "25\n", + "\n", + "25\n", "\n", "\n", "\n", "9->25\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "26\n", - "\n", - "26\n", + "\n", + "26\n", "\n", "\n", "\n", "9->26\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "27\n", - "\n", - "27\n", + "\n", + "27\n", "\n", "\n", "\n", "9->27\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "28\n", - "\n", - "28\n", + "\n", + "28\n", "\n", "\n", "\n", "9->28\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "0->10\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", + "\n", "\n", "\n", "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "0->11\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "10->1\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "11->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "12\n", - "\n", - "12\n", + "\n", + "12\n", "\n", "\n", "\n", "1->12\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", + "\n", "\n", "\n", "\n", "13\n", - "\n", - "13\n", + "\n", + "13\n", "\n", "\n", "\n", "1->13\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", + "\n", "\n", "\n", "\n", "12->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "13->0\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "14\n", - "\n", - "14\n", + "\n", + "14\n", "\n", "\n", "\n", "2->14\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", + "\n", "\n", "\n", "\n", "16\n", - "\n", - "16\n", + "\n", + "16\n", "\n", "\n", "\n", "2->16\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", + "\n", "\n", "\n", "\n", "15\n", - "\n", - "15\n", + "\n", + "15\n", "\n", "\n", "\n", "14->15\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "16->2\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "3->13\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", + "\n", "\n", "\n", "\n", "17\n", - "\n", - "17\n", + "\n", + "17\n", "\n", "\n", "\n", "3->17\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", + "\n", "\n", "\n", "\n", "17->2\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "17->3\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "4->14\n", - "\n", - "\n", - "i0\n", - "\n", + "\n", + "\n", + "i0\n", + "\n", "\n", "\n", "\n", "18\n", - "\n", - "18\n", + "\n", + "18\n", "\n", "\n", "\n", "4->18\n", - "\n", - "\n", - "!i0\n", - "\n", + "\n", + "\n", + "!i0\n", + "\n", "\n", "\n", "\n", "18->4\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "5->14\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "5->16\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "5->18\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "19\n", - "\n", - "19\n", + "\n", + "19\n", "\n", "\n", "\n", "5->19\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "19->5\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "6->11\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "20\n", - "\n", - "20\n", + "\n", + "20\n", "\n", "\n", "\n", "6->20\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "21\n", - "\n", - "21\n", + "\n", + "21\n", "\n", "\n", "\n", "6->21\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "20->4\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "20->7\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "21->4\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "21->6\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "7->12\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "7->13\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "22\n", - "\n", - "22\n", + "\n", + "22\n", "\n", "\n", "\n", "7->22\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "23\n", - "\n", - "23\n", + "\n", + "23\n", "\n", "\n", "\n", "7->23\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "22->4\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "22->7\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "23->4\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "23->6\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->13\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", + "\n", "\n", "\n", "\n", "8->17\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", "\n", "\n", "\n", "8->23\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", "\n", "\n", "\n", "24\n", - "\n", - "24\n", + "\n", + "24\n", "\n", "\n", "\n", "8->24\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", "\n", "\n", "\n", "24->5\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "24->8\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "25->8\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "26->3\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "27->6\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "28->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "15->14\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fb964737d50> >" + " *' at 0x7f01fc12f030> >" ] }, "metadata": {}, @@ -712,6 +715,7 @@ }, { "cell_type": "markdown", + "id": "c02b2d8f", "metadata": {}, "source": [ "Solving the game, is done with `solve_game()` as with any game. There is also a version that takes a `synthesis_info` as second argument in case the time it takes has to be recorded. Here passing `si` or not makes no difference." @@ -720,6 +724,7 @@ { "cell_type": "code", "execution_count": 3, + "id": "d08e7b9f", "metadata": {}, "outputs": [ { @@ -735,586 +740,586 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - "))\n", - "[parity max odd 3]\n", + " viewBox=\"0.00 0.00 650.45 360.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[parity max odd 3]\n", "\n", "\n", "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", "\n", "I->9\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "25\n", - "\n", - "25\n", + "\n", + "25\n", "\n", "\n", "\n", "9->25\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "26\n", - "\n", - "26\n", + "\n", + "26\n", "\n", "\n", "\n", "9->26\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "27\n", - "\n", - "27\n", + "\n", + "27\n", "\n", "\n", "\n", "9->27\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "28\n", - "\n", - "28\n", + "\n", + "28\n", "\n", "\n", "\n", "9->28\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "0->10\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "0->11\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "10->1\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "11->0\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "12\n", - "\n", - "12\n", + "\n", + "12\n", "\n", "\n", "\n", "1->12\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "13\n", - "\n", - "13\n", + "\n", + "13\n", "\n", "\n", "\n", "1->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "12->1\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "13->0\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "14\n", - "\n", - "14\n", + "\n", + "14\n", "\n", "\n", "\n", "2->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "16\n", - "\n", - "16\n", + "\n", + "16\n", "\n", "\n", "\n", "2->16\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "15\n", - "\n", - "15\n", + "\n", + "15\n", "\n", "\n", "\n", "14->15\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "16->2\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "3->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "17\n", - "\n", - "17\n", + "\n", + "17\n", "\n", "\n", "\n", "3->17\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "17->2\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "17->3\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "4->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "18\n", - "\n", - "18\n", + "\n", + "18\n", "\n", "\n", "\n", "4->18\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "18->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "5->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "5->16\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "5->18\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "19\n", - "\n", - "19\n", + "\n", + "19\n", "\n", "\n", "\n", "5->19\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "19->5\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "6->11\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "20\n", - "\n", - "20\n", + "\n", + "20\n", "\n", "\n", "\n", "6->20\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "21\n", - "\n", - "21\n", + "\n", + "21\n", "\n", "\n", "\n", "6->21\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "20->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "20->7\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "21->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "21->6\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "7->12\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "7->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "22\n", - "\n", - "22\n", + "\n", + "22\n", "\n", "\n", "\n", "7->22\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "23\n", - "\n", - "23\n", + "\n", + "23\n", "\n", "\n", "\n", "7->23\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "22->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "22->7\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "23->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "23->6\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "8->17\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "8->23\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "24\n", - "\n", - "24\n", + "\n", + "24\n", "\n", "\n", "\n", "8->24\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "24->5\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "24->8\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "25->8\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "26->3\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "27->6\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "28->0\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "15->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n" @@ -1336,6 +1341,7 @@ }, { "cell_type": "markdown", + "id": "9590cf55", "metadata": {}, "source": [ "Once a strategy has been found, it can be extracted as an automaton and simplified using 6 different levels (the default is 2). The output should be interpreted as a Mealy automaton, where transition have the form `(ins)&(outs)` where `ins` and `outs` are Boolean formulas representing possible inputs and outputs (they could be more than just conjunctions of atomic proposition). Mealy machines with this type of labels are called \"separated\" in Spot." @@ -1344,6 +1350,7 @@ { "cell_type": "code", "execution_count": 4, + "id": "d6cb467d", "metadata": {}, "outputs": [ { @@ -1359,309 +1366,309 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "3->5\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "4->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "4->5\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "5->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5->5\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->5\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->6\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fb9646b7870> >" + " *' at 0x7f01fc1b5f00> >" ] }, "metadata": {}, @@ -1680,175 +1687,175 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fb9646b7de0> >" + " *' at 0x7f01fc12fd80> >" ] }, "metadata": {}, @@ -1867,125 +1874,125 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "I->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fb9646b7630> >" + " *' at 0x7f01fc1b5bd0> >" ] }, "metadata": {}, @@ -2004,81 +2011,81 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fb9646b77b0> >" + " *' at 0x7f01fc1b5cc0> >" ] }, "metadata": {}, @@ -2097,81 +2104,81 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fb9646b7f00> >" + " *' at 0x7f01fc2a8b70> >" ] }, "metadata": {}, @@ -2190,125 +2197,125 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fb9646b74b0> >" + " *' at 0x7f01fc1b5de0> >" ] }, "metadata": {}, @@ -2342,6 +2349,7 @@ }, { "cell_type": "markdown", + "id": "7ee86443", "metadata": {}, "source": [ "If needed, a separated Mealy machine can be turned into game shape using `split_sepearated_mealy()`, which is more efficient than `split_2step()`." @@ -2350,6 +2358,7 @@ { "cell_type": "code", "execution_count": 5, + "id": "80510b01", "metadata": {}, "outputs": [ { @@ -2358,260 +2367,260 @@ "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", @@ -2631,6 +2640,7 @@ }, { "cell_type": "markdown", + "id": "8f97aa04", "metadata": {}, "source": [ "# Converting the separated Mealy machine to AIG\n", @@ -2643,6 +2653,7 @@ { "cell_type": "code", "execution_count": 6, + "id": "9c6d9e8b", "metadata": {}, "outputs": [ { @@ -2651,60 +2662,60 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "2\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "2->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2718,6 +2729,7 @@ }, { "cell_type": "markdown", + "id": "d67f8bce", "metadata": {}, "source": [ "While we are at it, let us mention that you can render those circuits horizontally as follows:" @@ -2726,6 +2738,7 @@ { "cell_type": "code", "execution_count": 7, + "id": "3a363374", "metadata": {}, "outputs": [ { @@ -2734,54 +2747,54 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:w\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "2\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "2->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n" @@ -2801,6 +2814,7 @@ }, { "cell_type": "markdown", + "id": "e4f607c3", "metadata": {}, "source": [ "To encode the circuit in the AIGER format (ASCII version) use:" @@ -2809,6 +2823,7 @@ { "cell_type": "code", "execution_count": 8, + "id": "564f7d0b", "metadata": {}, "outputs": [ { @@ -2832,6 +2847,7 @@ }, { "cell_type": "markdown", + "id": "cf2d4831", "metadata": {}, "source": [ "# Adding more inputs and outputs by force" @@ -2839,6 +2855,7 @@ }, { "cell_type": "markdown", + "id": "874a108e", "metadata": {}, "source": [ "It can happen that propositions declared as output are ommited in the aig circuit (either because they are not part of the specification, or because they do not appear in the winning strategy). In that case those \n", @@ -2850,6 +2867,7 @@ { "cell_type": "code", "execution_count": 9, + "id": "1fc4c566", "metadata": {}, "outputs": [ { @@ -2858,159 +2876,159 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett 1]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett 1]\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "I->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", - "i0\n", - "\n", + "\n", + "\n", + "i0\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "3->7\n", - "\n", - "\n", - "!i0\n", - "\n", + "\n", + "\n", + "!i0\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "6->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "6->2\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "7->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fb9646b7570> >" + " *' at 0x7f01fc14bb10> >" ] }, "metadata": {}, @@ -3022,112 +3040,112 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "i0\n", + "\n", + "\n", + "i0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "!i0\n", + "\n", + "\n", + "!i0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fb9646b7630> >" + " *' at 0x7f01fc12f090> >" ] }, "metadata": {}, @@ -3139,144 +3157,144 @@ "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", - "t\n", - "[all]\n", + " viewBox=\"0.00 0.00 282.00 148.80\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "i0\n", + "\n", + "\n", + "i0\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!i0\n", + "\n", + "\n", + "!i0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "3->1\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "\n", - "!i0\n", - "/\n", + "!i0\n", + "/\n", "\n", - "!o0\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", + "\n", + "\n", "\n", - "1\n", - "/\n", + "1\n", + "/\n", "\n", - "!o0\n", + "!o0\n", "\n", "\n", "\n", @@ -3295,72 +3313,72 @@ "\n", "\n", - "\n", "\n", "\n", + " viewBox=\"0.00 0.00 142.70 352.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", "\n", - "\n", + "\n", "\n", "\n", "4\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "6->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3382,6 +3400,7 @@ }, { "cell_type": "markdown", + "id": "f8dab019", "metadata": {}, "source": [ "To force the presence of extra variables in the circuit, they can be passed to `mealy_machine_to_aig()`." @@ -3390,6 +3409,7 @@ { "cell_type": "code", "execution_count": 10, + "id": "091d7c97", "metadata": {}, "outputs": [ { @@ -3398,96 +3418,96 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "6->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "8->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "8->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "0\n", - "\n", - "False\n", + "\n", + "False\n", "\n", "\n", "\n", "0->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3500,6 +3520,7 @@ }, { "cell_type": "markdown", + "id": "364c8d76", "metadata": {}, "source": [ "# Combining Mealy machines\n", @@ -3519,6 +3540,7 @@ { "cell_type": "code", "execution_count": 11, + "id": "57b3b51d", "metadata": {}, "outputs": [ { @@ -3534,150 +3556,150 @@ "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett 1]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett 1]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett 1]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett 1]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "o1\n", - "\n", + "\n", + "\n", + "o1\n", + "\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "!o1\n", - "\n", + "\n", + "\n", + "!o1\n", + "\n", "\n", "\n", "\n", @@ -3703,94 +3725,94 @@ "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "o1\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "o1\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "!o1\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "!o1\n", "\n", "\n", "\n", @@ -3816,108 +3838,108 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "10->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3936,53 +3958,53 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0 & o1\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0 & o1\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0 & !o1\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0 & !o1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fb9646b7c60> >" + " *' at 0x7f01fc14bae0> >" ] }, "metadata": {}, @@ -3994,108 +4016,108 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "10->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4129,6 +4151,7 @@ }, { "cell_type": "markdown", + "id": "7d5a8a32", "metadata": {}, "source": [ "# Reading an AIGER-file\n", @@ -4143,6 +4166,7 @@ { "cell_type": "code", "execution_count": 12, + "id": "9da1f39e", "metadata": {}, "outputs": [], "source": [ @@ -4163,6 +4187,7 @@ { "cell_type": "code", "execution_count": 13, + "id": "7295f20a", "metadata": {}, "outputs": [ { @@ -4171,108 +4196,108 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "d\n", + "\n", + "d\n", "\n", "\n", "\n", "6->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "c\n", + "\n", + "c\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "a\n", + "\n", + "a\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "b\n", + "\n", + "b\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4287,6 +4312,7 @@ { "cell_type": "code", "execution_count": 14, + "id": "730952f7", "metadata": {}, "outputs": [ { @@ -4315,6 +4341,7 @@ { "cell_type": "code", "execution_count": 15, + "id": "38b5b8a1", "metadata": {}, "outputs": [ { @@ -4331,6 +4358,7 @@ }, { "cell_type": "markdown", + "id": "6bde5eac", "metadata": {}, "source": [ "An AIG circuit can be transformed into a monitor/Mealy machine. This can be used for instance to check that it does not intersect the negation of the specification." @@ -4339,6 +4367,7 @@ { "cell_type": "code", "execution_count": 16, + "id": "14f89c9b", "metadata": {}, "outputs": [ { @@ -4347,52 +4376,52 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!a & !b\n", - "/\n", - "\n", - "!c & !d\n", - "\n", - "a & b\n", - "/\n", - "\n", - "!c & d\n", - "\n", - "(!a & b) | (a & !b)\n", - "/\n", - "\n", - "c & !d\n", + "\n", + "\n", + "\n", + "!a & !b\n", + "/\n", + "\n", + "!c & !d\n", + "\n", + "a & b\n", + "/\n", + "\n", + "!c & d\n", + "\n", + "(!a & b) | (a & !b)\n", + "/\n", + "\n", + "c & !d\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fb9646c8360> >" + " *' at 0x7f01fc1b5f90> >" ] }, "execution_count": 16, @@ -4406,6 +4435,7 @@ }, { "cell_type": "markdown", + "id": "e1f01aa0", "metadata": {}, "source": [ "Note that the generation of aiger circuits from Mealy machines is flexible and accepts separated Mealy machines\n", @@ -4415,6 +4445,7 @@ { "cell_type": "code", "execution_count": 17, + "id": "93e1fc70", "metadata": {}, "outputs": [ { @@ -4423,114 +4454,114 @@ "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", @@ -4562,6 +4593,7 @@ { "cell_type": "code", "execution_count": 18, + "id": "6cb96c81", "metadata": {}, "outputs": [ { @@ -4570,180 +4602,180 @@ "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", @@ -4764,7 +4796,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -4778,7 +4810,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.8.10" } }, "nbformat": 4, diff --git a/tests/python/synthesis.py b/tests/python/synthesis.py index 1b1cf4fbb..559dc2d24 100644 --- a/tests/python/synthesis.py +++ b/tests/python/synthesis.py @@ -37,16 +37,16 @@ Start: 0 AP: 1 "a" acc-name: Streett 1 Acceptance: 2 Fin(0) | Inf(1) -properties: trans-labels explicit-labels state-acc colored complete +properties: trans-labels explicit-labels trans-acc colored complete properties: deterministic spot-state-player: 0 1 1 controllable-AP: --BODY-- -State: 0 {0} -[!0] 1 -[0] 2 -State: 1 {0} -[t] 0 -State: 2 {1} -[t] 0 +State: 0 +[!0] 1 {0} +[0] 2 {1} +State: 1 +[t] 0 {0} +State: 2 +[t] 0 {1} --END--""") From 06b73c39faedcb02016bea2e12d99569b6fb5c98 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Wed, 6 Apr 2022 21:16:35 +0200 Subject: [PATCH 030/606] +ud option of mealy_machine_to_aig received wrong value Also aiger received a tracing option for debugging * spot/twaalgos/aiger.cc: Here * tests/core/ltlsynt.test: Test --- spot/twaalgos/aiger.cc | 127 +++++++++++++++++++++++++++------------- tests/core/ltlsynt.test | 81 +++++++++++++++++++++++-- 2 files changed, 164 insertions(+), 44 deletions(-) diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index d846e678c..660d5b46a 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -41,6 +42,13 @@ #define STR_(x) STR(x) #define STR_LINE STR_(__LINE__) +//#define TRACE +#ifdef TRACE +# define trace std::cerr +#else +# define trace while (0) std::cerr +#endif + namespace { using namespace spot; @@ -459,6 +467,7 @@ namespace spot aig::roll_back_(safe_point sf, bool do_stash) { // todo specialise for safe_all? + trace << "Roll back to sf: " << sf.first << "; " << sf.second << '\n'; safe_stash ss; auto& [gates, vardict, negs] = ss; if (do_stash) @@ -480,6 +489,7 @@ namespace spot // Copy the gates std::copy(and_gates_.begin()+sf.second, and_gates_.end(), gates.begin()); + trace << "Safed " << gates.size() << '\n'; } // 1. Delete all literals // max_var_old was used before @@ -489,6 +499,8 @@ namespace spot // 2. Set back the gates and_gates_.erase(and_gates_.begin() + sf.second, and_gates_.end()); max_var_ = sf.first; + trace << "After rollback: \n" << and_gates_.size() << " gates and\n" + << max_var_ << " variables\n\n"; return ss; } @@ -497,6 +509,8 @@ namespace spot { // Do some check_ups auto& [gates, vardict, _] = ss; + trace << "Reapplying sf: " << sf.first << "; " << sf.second + << "\nwith " << gates.size() << " additional gates.\n\n"; assert(gates.size() == vardict.size()); assert(sf.first == max_var_); assert(sf.second == and_gates_.size()); @@ -511,6 +525,7 @@ namespace spot and_gates_.insert(and_gates_.end(), gates.begin(), gates.end()); max_var_ = new_max_var_; + trace << "New Ngates: " << num_gates() << '\n'; } void aig::set_output(unsigned i, unsigned v) @@ -698,7 +713,6 @@ namespace spot while ((prod = cond.next()) != bddfalse) plus_vars_.push_back(cube2var_(prod, use_split_off == 2 ? 0 : use_split_off)); - // Done building -> sum them return aig_or(plus_vars_); } @@ -709,11 +723,20 @@ namespace spot { // Before doing anything else, let us check if one the variables // already exists in which case we are done +#ifdef TRACE + trace << "encoding one of \n"; + for (const auto& c: c_alt) + trace << c << '\n'; +#endif + for (const bdd& cond : c_alt) { auto it = bdd2var_.find(cond.id()); if (it != bdd2var_.end()) - return it->second; + { + trace << "Condition already encoded -> Direct return\n\n"; + return it->second; + } } safe_point sf = get_safe_point_(); @@ -732,9 +755,6 @@ namespace spot && "Cannot convert the given method. " "Only 0,1 and 2 are currently supported"); - const auto negate = use_dual ? std::vector{false} - : std::vector{false, true}; - auto enc_1 = [&](const bdd& b, const char m) { @@ -751,41 +771,60 @@ namespace spot std::vector cond_parts; std::vector cond_vars; - for (bool do_negate : negate) - for (const bdd& b : c_alt) - { - bdd b_used = do_negate ? bdd_not(b) : b; - cond_parts.clear(); - split_cond_(b_used, - use_split_off != 1 ? use_split_off : 0, cond_parts); + //for (bool do_negate : (use_dual ? std::initializer_list{false, true} + // : std::initializer_list{false})) + for (unsigned neg_counter = 0; neg_counter <= 0 + use_dual; ++neg_counter) + { + bool do_negate = neg_counter; + for (const bdd& b : c_alt) + { + bdd b_used = do_negate ? bdd_not(b) : b; + cond_parts.clear(); + split_cond_(b_used, + use_split_off != 1 ? use_split_off : 0, cond_parts); - for (auto m : used_m) - { - cond_vars.clear(); - for (const bdd& cpart : cond_parts) - { - cond_vars.push_back(enc_1(cpart, m)); - if (num_gates() >= ngates_min) - break; // Cannot be optimal - } - // Compute the and if there is still hope - unsigned this_res = -1u; - if (num_gates() < ngates_min) - this_res = aig_and(cond_vars); - - if (num_gates() < ngates_min) - { - // This is the new best - res_var = do_negate ? aig_not(this_res) : this_res; - ngates_min = num_gates(); - ss_min = roll_back_(sf, true); - } - else - // Reset the computations - roll_back_(sf, false); - } // Encoding styles - } // alternatives - // end do_negate + for (auto m : used_m) + { + cond_vars.clear(); + for (const bdd& cpart : cond_parts) + { + cond_vars.push_back(enc_1(cpart, m)); + if (num_gates() >= ngates_min) + break; // Cannot be optimal + } + // Compute the and if there is still hope + auto this_res = -1u; + if (num_gates() < ngates_min) + this_res = aig_and(cond_vars); + // Check if after adding these gates + // the circuit is still smaller + if (num_gates() < ngates_min) + { + // This is the new best + assert(this_res != -1u); + res_var = do_negate ? aig_not(this_res) : this_res; + ngates_min = num_gates(); + trace << "Found new best encoding with\nneg: " + << do_negate << "\nmethod: " << (m == 0 ? "INF" + : "ISOP") + << "\nalt: " << b + << "\nNgates: " << num_gates() << "\n\n"; + ss_min = roll_back_(sf, true); + } + else + // Reset the computations + { + trace << "Method \nneg: " + << do_negate << "\nmethod: " << (m == 0 ? "INF" + : "ISOP") + << "\nalt: " << b + << "\nNgates: " << num_gates() + << " discarded.\n\n"; + roll_back_(sf, false); + } + } // Encoding styles + } // alternatives + } // end do_negate // Reapply the best result reapply_(sf, ss_min); @@ -1753,6 +1792,7 @@ namespace bool use_dual = false; bool use_dontcare = false; int use_split_off = 0; + std::string s; }; auto to_treat = [&mode]() @@ -1766,6 +1806,8 @@ namespace while (std::getline(s, buffer, ',')) { tr_opt this_opt; + // Store raw info + this_opt.s = buffer; std::stringstream s2; s2 << buffer; std::getline(s2, buffer2, '+'); @@ -1865,15 +1907,16 @@ namespace }; // Create the vars - std::vector alt_conds(amodedescr.use_dontcare ? 1 : 2); for (unsigned i = 0; i < n_outs; ++i) { + trace << "Assign out " << i << '\n'; if (circuit.num_gates() > min_gates) break; circuit.set_output(i, bdd2var(out[i], out_dc[i])); } for (unsigned i = 0; i < n_latches; ++i) { + trace << "Assign latch " << i << '\n'; if (circuit.num_gates() > min_gates) break; circuit.set_next_latch(i, bdd2var(latch[i], bddfalse)); @@ -1883,6 +1926,8 @@ namespace // Overwrite the stash if we generated less gates if (circuit.num_gates() < min_gates) { + trace << "New best mode: " << amodedescr.s + << " with Ngates: " << circuit.num_gates() << '\n'; min_gates = circuit.num_gates(); ss = circuit.roll_back_(sf, true); bdd2var_min = bdd2var; @@ -1892,6 +1937,8 @@ namespace } //Use the best sol circuit.reapply_(sf, ss); + trace << "Finished encoding, reasssigning\n" + << "Final gate count is " << circuit.num_gates() << '\n'; // Reset them for (unsigned i = 0; i < n_outs; ++i) circuit.set_output(i, bdd2var_min(out[i], out_dc[i])); diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 03f7598c9..07e2690e7 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -470,10 +470,81 @@ i3 i3 o0 o0 o1 o1 EOF +ltlsynt -f "G((i0 && i1)<->X(o0)) && G((i2|i3)<->X(o1))" --outs="o0,o1"\ + --aiger=isop+ud --algo=lar --decompose=no --simpl=no >out +diff out exp + +cat >exp <X(o0)) && G((i2|i3)<->X(o1))" --outs="o0,o1"\ --aiger=isop --algo=lar --decompose=no --simpl=no >out diff out exp + cat >exp <X(o0)) && G((i2|i3)<->X(o1))" --outs="o0,o1"\ - --aiger=isop --algo=lar --decompose=yes --simpl=no >out + --aiger=isop+ud --algo=lar --decompose=yes --simpl=no >out diff out exp ltlsynt -f "G((i0 && i1)<->X(o0)) && G((i2|i3)<->X(o1))" --outs="o0,o1"\ - --aiger=isop --algo=lar --simpl=no >out + --aiger=isop+ud --algo=lar --simpl=no >out diff out exp # Issue #477 @@ -794,8 +865,10 @@ LTL='(((((G (((((((g_0) && (G (! (r_0)))) -> (F (! (g_0)))) && (((g_0) && && ((r_0) R (! (g_0)))) && (G ((r_0) -> (F (g_0))))) && ((r_1) R (! (g_1)))) && (G ((r_1) -> (F (g_1)))))' OUT='g_0, g_1' -ltlsynt --outs="$OUT" -f "$LTL" --aiger=both --algo=acd | grep "aag 8 2 2 2 4" -ltlsynt --outs="$OUT" -f "$LTL" --aiger=both --algo=lar | grep "aag 34 2 3 2 29" +ltlsynt --outs="$OUT" -f "$LTL" --aiger=both+ud\ + --algo=acd | grep "aag 8 2 2 2 4" +ltlsynt --outs="$OUT" -f "$LTL" --aiger=both+ud\ + --algo=lar | grep "aag 34 2 3 2 29" ltlsynt -f 'G(c) & (G(a) <-> GFb)' --outs=b,c --decompose=yes\ --verbose --realizability 2> out From 5f43c9bfcecabde80fb195e5ab47946b2345893e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 8 Apr 2022 18:50:13 +0200 Subject: [PATCH 031/606] ltlsynt: implement --tlsf to call syfco automatically Fixes #473. * NEWS, doc/org/ltlsynt.org: Mention it. * bin/common_trans.cc, bin/common_trans.hh (read_stdout_of_command): New function. * bin/ltlsynt.cc: Implement the --tlsf option. * tests/core/syfco.test: New file. * tests/Makefile.am: Add it. --- NEWS | 4 ++ bin/common_trans.cc | 89 +++++++++++++++++++++++++++++++++++++++++++ bin/common_trans.hh | 8 +++- bin/ltlsynt.cc | 73 ++++++++++++++++++++++++++--------- doc/org/ltlsynt.org | 16 +++++--- tests/Makefile.am | 1 + tests/core/syfco.test | 48 +++++++++++++++++++++++ 7 files changed, 214 insertions(+), 25 deletions(-) create mode 100755 tests/core/syfco.test diff --git a/NEWS b/NEWS index 78a14561b..0d165bd95 100644 --- a/NEWS +++ b/NEWS @@ -17,6 +17,10 @@ New in spot 2.10.4.dev (net yet released) - autcross learned a --language-complemented option to assist in the case one is testing tools that complement automata. (issue #504). + - ltlsynt as a new option --tlsf that takes the filename of a TLSF + specification and calls syfco (which must be installed) to convert + it into an LTL formula. + Library: - The new function suffix_operator_normal_form() implements diff --git a/bin/common_trans.cc b/bin/common_trans.cc index a9b823ff4..9ab719a5b 100644 --- a/bin/common_trans.cc +++ b/bin/common_trans.cc @@ -27,6 +27,7 @@ #include #include #include +#include #if __has_include() #define HAVE_SPAWN_H 1 #include @@ -461,6 +462,93 @@ autproc_runner::round_automaton(spot::const_twa_graph_ptr aut, unsigned serial) filename_automaton.new_round(aut, serial); } + +std::string +read_stdout_of_command(char* const* args) +{ +#if HAVE_SPAWN_H + int cout_pipe[2]; + if (int err = pipe(cout_pipe)) + error(2, err, "pipe() failed"); + + posix_spawn_file_actions_t actions; + if (int err = posix_spawn_file_actions_init(&actions)) + error(2, err, "posix_spawn_file_actions_init() failed"); + + posix_spawn_file_actions_addclose(&actions, STDIN_FILENO); + posix_spawn_file_actions_addclose(&actions, cout_pipe[0]); + posix_spawn_file_actions_adddup2(&actions, cout_pipe[1], STDOUT_FILENO); + posix_spawn_file_actions_addclose(&actions, cout_pipe[1]); + + pid_t pid; + if (int err = posix_spawnp(&pid, args[0], &actions, nullptr, args, environ)) + error(2, err, "failed to run '%s'", args[0]); + + if (int err = posix_spawn_file_actions_destroy(&actions)) + error(2, err, "posix_spawn_file_actions_destroy() failed"); + + if (close(cout_pipe[1]) < 0) + error(2, errno, "closing write-side of pipe failed"); + + std::string buffer(32, 0); + std::string results; + int bytes_read; + for (;;) + { + static char buffer[512]; + bytes_read = read(cout_pipe[0], buffer, sizeof(buffer)); + if (bytes_read > 0) + results.insert(results.end(), buffer, buffer + bytes_read); + else + break; + } + if (bytes_read < 0) + error(2, bytes_read, "failed to read from pipe"); + + if (cout_pipe[0] < 0) + error(2, errno, "closing read-side of pipe failed"); + + int exit_code = 0; + if (waitpid(pid, &exit_code, 0) == -1) + error(2, errno, "waitpid() failed"); + + if (exit_code) + error(2, 0, "'%s' exited with status %d", args[0], exit_code); + + return results; +#else + // We could provide a pipe+fork+exec alternative implementation, but + // systems without posix_spawn() might also not have fork and exec. + // For instance MinGW does not. So let's fallback to system+tmpfile + // instead for maximum portability. + char prefix[30]; + snprintf(prefix, sizeof prefix, "spot-tmp"); + spot::temporary_file* tmpfile = spot::create_tmpfile(prefix); + std::string tmpname = tmpfile->name(); + std::ostringstream cmd; + for (auto t = args; *t != nullptr; ++t) + spot::quote_shell_string(cmd, *t) << ' '; + cmd << '>'; + spot::quote_shell_string(cmd, tmpfile->name()); + std::string cmdstr = cmd.str(); + int exit_code = system(cmdstr.c_str()); + if (exit_code < 0) + error(2, errno, "failed to execute %s", cmdstr.c_str()); + if (exit_code > 0) + error(2, 0, "'%s' exited with status %d", args[0], exit_code); + + std::ifstream ifs(tmpname, std::ifstream::in); + if (!ifs) + error(2, 0, "failed to open %s (output of %s)", tmpname.c_str(), args[0]); + ifs.exceptions(std::ifstream::failbit | std::ifstream::badbit); + std::stringstream buffer; + buffer << ifs.rdbuf(); + delete tmpfile; + return buffer.str(); +#endif +} + + std::atomic timed_out{false}; unsigned timeout_count = 0; @@ -706,6 +794,7 @@ parse_simple_command(const char* cmd) return res; } + #ifndef HAVE_SPAWN_H static void exec_command(const char* cmd) diff --git a/bin/common_trans.hh b/bin/common_trans.hh index e01131350..31c88c80c 100644 --- a/bin/common_trans.hh +++ b/bin/common_trans.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2018, 2020 Laboratoire de Recherche et +// Copyright (C) 2015-2018, 2020, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -175,3 +175,9 @@ int exec_with_timeout(const char* cmd); #define exec_with_timeout(cmd) system(cmd) #define setup_sig_handler() while (0); #endif // !ENABLE_TIMEOUT + +// Run a command (whose args[0], args[1], etc. are given by args), and +// return its captured stdout. Stderr is not captured. Will abort +// with an error message if the command is not found, or if it exit +// with a non-zero status code. +std::string read_stdout_of_command(char* const* args); diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 0e5d765a1..a6abb7c81 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -25,6 +25,7 @@ #include "common_finput.hh" #include "common_setup.hh" #include "common_sys.hh" +#include "common_trans.hh" #include #include @@ -54,6 +55,7 @@ enum OPT_PRINT_HOA, OPT_REAL, OPT_SIMPLIFY, + OPT_TLSF, OPT_VERBOSE, OPT_VERIFY }; @@ -64,10 +66,13 @@ static const argp_option options[] = { nullptr, 0, nullptr, 0, "Input options:", 1 }, { "outs", OPT_OUTPUT, "PROPS", 0, "comma-separated list of controllable (a.k.a. output) atomic" - " propositions", 0}, + " propositions", 0 }, { "ins", OPT_INPUT, "PROPS", 0, "comma-separated list of uncontrollable (a.k.a. input) atomic" - " propositions", 0}, + " propositions", 0 }, + { "tlsf", OPT_TLSF, "FILENAME", 0, + "Read a TLSF specification from FILENAME, and call syfco to " + "convert it into LTL", 0 }, /**************************************************/ { nullptr, 0, nullptr, 0, "Fine tuning:", 10 }, { "algo", OPT_ALGO, "sd|ds|ps|lar|lar.old|acd", 0, @@ -152,6 +157,8 @@ static const char* opt_print_hoa_args = nullptr; static bool opt_real = false; static bool opt_do_verify = false; static const char* opt_print_aiger = nullptr; +static char* opt_tlsf = nullptr; +static std::string opt_tlsf_string; static spot::synthesis_info* gi; @@ -486,8 +493,8 @@ namespace // If we reach this line // a strategy was found for each subformula assert(mealy_machines.size() == sub_form.size() - && "There are subformula for which no mealy like object" - "has been created."); + && ("There are subformula for which no mealy like object" + " has been created.")); spot::aig_ptr saig = nullptr; spot::twa_graph_ptr tot_strat = nullptr; @@ -646,6 +653,18 @@ namespace }; } +static void +split_aps(std::string arg, std::vector& where) +{ + std::istringstream aps(arg); + std::string ap; + while (std::getline(aps, ap, ',')) + { + ap.erase(remove_if(ap.begin(), ap.end(), isspace), ap.end()); + where.push_back(str_tolower(ap)); + } +} + static int parse_opt(int key, char *arg, struct argp_state *) { @@ -671,25 +690,13 @@ parse_opt(int key, char *arg, struct argp_state *) case OPT_INPUT: { all_input_aps.emplace(std::vector{}); - std::istringstream aps(arg); - std::string ap; - while (std::getline(aps, ap, ',')) - { - ap.erase(remove_if(ap.begin(), ap.end(), isspace), ap.end()); - all_input_aps->push_back(str_tolower(ap)); - } + split_aps(arg, *all_input_aps); break; } case OPT_OUTPUT: { all_output_aps.emplace(std::vector{}); - std::istringstream aps(arg); - std::string ap; - while (std::getline(aps, ap, ',')) - { - ap.erase(remove_if(ap.begin(), ap.end(), isspace), ap.end()); - all_output_aps->push_back(str_tolower(ap)); - } + split_aps(arg, *all_output_aps); break; } case OPT_PRINT: @@ -710,6 +717,11 @@ parse_opt(int key, char *arg, struct argp_state *) gi->minimize_lvl = XARGMATCH("--simplify", arg, simplify_args, simplify_values); break; + case OPT_TLSF: + if (opt_tlsf) + error(2, 0, "option --tlsf may only be used once"); + opt_tlsf = arg; + break; case OPT_VERBOSE: gi->verbose_stream = &std::cerr; if (not gi->bv) @@ -746,6 +758,29 @@ main(int argc, char **argv) argp_program_doc, children, nullptr, nullptr }; if (int err = argp_parse(&ap, argc, argv, ARGP_NO_HELP, nullptr, nullptr)) exit(err); + + if (opt_tlsf) + { + static char arg0[] = "syfco"; + static char arg1[] = "-f"; + static char arg2[] = "ltlxba"; + static char arg3[] = "-m"; + static char arg4[] = "fully"; + char* command[] = { arg0, arg1, arg2, arg3, arg4, opt_tlsf, nullptr }; + opt_tlsf_string = read_stdout_of_command(command); + jobs.emplace_back(opt_tlsf_string.c_str(), false); + + if (!all_input_aps.has_value() && !all_output_aps.has_value()) + { + static char arg5[] = "--print-output-signals"; + char* command[] = { arg0, arg5, opt_tlsf, nullptr }; + std::string res = read_stdout_of_command(command); + + all_output_aps.emplace(std::vector{}); + split_aps(res, *all_output_aps); + } + } + check_no_formula(); // Check if inputs and outputs are distinct diff --git a/doc/org/ltlsynt.org b/doc/org/ltlsynt.org index 45b4b2b1c..f05d58309 100644 --- a/doc/org/ltlsynt.org +++ b/doc/org/ltlsynt.org @@ -104,14 +104,20 @@ specification language created for the purpose of this competition. Fortunately, the SYNTCOMP organizers also provide a tool called [[https://github.com/reactive-systems/syfco][=syfco=]] which can translate a TLSF specification to an LTL formula. -The following four steps show you how a TLSF specification called =FILE= can +The following line shows how a TLSF specification called =FILE= can be synthesized using =syfco= and =ltlsynt=: #+BEGIN_SRC sh :export code -LTL=$(syfco FILE -f ltlxba -m fully) -IN=$(syfco FILE --print-input-signals) -OUT=$(syfco FILE --print-output-signals) -ltlsynt --formula="$LTL" --ins="$IN" --outs="$OUT" +ltlsynt --tlsf FILE +#+END_SRC + +The above =--tlsf= option will call =syfco= to perform the conversion +and extract output signals, as if you had used: + +#+BEGIN_SRC sh :export code +LTL=$(syfco -f ltlxba -m fully FILE) +OUT=$(syfco --print-output-signals FILE) +ltlsynt --formula="$LTL" --outs="$OUT" #+END_SRC * Internal details diff --git a/tests/Makefile.am b/tests/Makefile.am index c8a722f5c..3582a8493 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -342,6 +342,7 @@ TESTS_twa = \ core/parity.test \ core/parity2.test \ core/ltlsynt.test \ + core/syfco.test \ core/rabin2parity.test \ core/twacube.test diff --git a/tests/core/syfco.test b/tests/core/syfco.test new file mode 100755 index 000000000..b63f729a8 --- /dev/null +++ b/tests/core/syfco.test @@ -0,0 +1,48 @@ +#! /bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs || exit 1 +set -e + +# Test that we can pass a tlsf specification to ltlsynt. This +# only work if syfco is installed. + +(syfco --version) || exit 77 + +cat >test.tlsf < X(out)); } +} +EOF + +test REALIZABLE = `ltlsynt --tlsf test.tlsf --realizability` +test UNREALIZABLE = `ltlsynt --tlsf test.tlsf --outs=foo --realizability` +test UNREALIZABLE = `ltlsynt --outs=foo --tlsf test.tlsf --realizability` + +ltlsynt --tlsf test.tlsf --tlsf test.tlsf 2>stderr && exit 0 +grep 'option --tlsf may only be used once' stderr From 55aac8e107a9ebe9c9b416b91d1c1f5e180feffc Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Tue, 12 Apr 2022 11:19:49 +0200 Subject: [PATCH 032/606] ltlsynt: display ACD instead of LAR when needed * spot/twaalgos/synthesis.cc: here * tests/core/ltlsynt.test: add test --- spot/twaalgos/synthesis.cc | 3 ++- tests/core/ltlsynt.test | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index ed4915c4b..b249acce9 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1022,7 +1022,8 @@ namespace spot if (bv) bv->paritize_time += sw.stop(); if (vs) - *vs << "LAR construction done in " << bv->paritize_time + *vs << (gi.s == algo::ACD ? "ACD" : "LAR") + << " construction done in " << bv->paritize_time << " seconds\nDPA has " << dpa->num_states() << " states, " << dpa->num_sets() << " colors\n"; diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 07e2690e7..7a7084099 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -903,3 +903,29 @@ EOF sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp + +# ACD verbose +cat >exp < GFb) && (Gc)' --outs=b,c --verbose --bypass=no\ + --algo=acd 2> out +sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx +diff outx exp From 62725fb507eb77edab751410c5cba81f273f4092 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Tue, 12 Apr 2022 11:21:14 +0200 Subject: [PATCH 033/606] ltlsynt: don't solve games when we want to display them * bin/ltlsynt.cc: here --- bin/ltlsynt.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index a6abb7c81..2c5141557 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -426,6 +426,8 @@ namespace && "Env needs first turn"); } print_game(arena); + if (want_game) + continue; if (!spot::solve_game(arena, *gi)) { std::cout << "UNREALIZABLE" << std::endl; From 355c5ffeb109a2758f10c578369cd3382bcef5ef Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Wed, 13 Apr 2022 10:51:52 +0200 Subject: [PATCH 034/606] ltlsynt: display the number of subformulas * bin/ltlsynt.cc: here * tests/core/ltlsynt.test: ajust tests --- bin/ltlsynt.cc | 6 ++++++ tests/core/ltlsynt.test | 15 +++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 2c5141557..50bae5f9e 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -336,6 +336,12 @@ namespace if (opt_decompose_ltl) { auto subs = split_independant_formulas(f, output_aps); + if (gi->verbose_stream) + { + *gi->verbose_stream << "there are " + << subs.first.size() + << " subformulas\n"; + } if (subs.first.size() > 1) { sub_form = subs.first; diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 7a7084099..1badb9b4b 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -192,6 +192,7 @@ ltlsynt --ins=a --outs=b,c -f 'GFa <-> (GFb & GFc)' \ diff out exp cat >exp < GFb direct strategy was found. EOF @@ -200,6 +201,7 @@ sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp cat >exp < GFb tanslating formula done in X seconds direct strategy was found. @@ -211,6 +213,7 @@ sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp cat >exp < (Fa & Fb & Fc & Fd) direct strategy was found. EOF @@ -220,6 +223,7 @@ sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp cat >exp < G(i1 <-> o0) direct strategy might exist but was not found. translating formula done in X seconds @@ -623,6 +627,7 @@ grep "one of --ins or --outs" stderr # Try to find a direct strategy for GFa <-> GFb and a direct strategy for # Gc cat >exp < GFb tanslating formula done in X seconds direct strategy was found. @@ -713,6 +718,7 @@ sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp cat >exp < y direct strategy might exist but was not found. translating formula done in X seconds @@ -744,6 +750,7 @@ diff outx exp # Here, G!(!x | !y) should be Gx & Gy cat >exp <exp < b) should be G(a) & G(!b) cat >exp <exp < (b & c & d) should be # (a => b) & (a => c) & (a => d) cat >exp < b direct strategy might exist but was not found. translating formula done in X seconds @@ -842,6 +853,7 @@ diff outx exp # Here, !(F(a | b)) should be G!a & G!b cat >exp < GFb)' --outs=b,c --decompose=yes\ --verbose --realizability 2> out cat >exp < GFb @@ -883,6 +896,7 @@ diff out exp ltlsynt -f 'G(c) & (G(a) <-> GFb)' --outs=b,c --decompose=yes\ --verbose --realizability --bypass=no 2> out cat >exp <exp < Date: Mon, 7 Feb 2022 16:10:40 +0100 Subject: [PATCH 035/606] remove uses of unary_function and binary_function These were deprecated in C++11, and are supposed to be removed from C++17, however gcc-snapshot just started warning about those. * spot/misc/bddlt.hh, spot/misc/hash.hh, spot/misc/ltstr.hh, spot/twa/taatgba.hh, spot/twaalgos/ltl2tgba_fm.cc: Here. --- spot/misc/bddlt.hh | 11 ++++------- spot/misc/hash.hh | 8 +++----- spot/misc/ltstr.hh | 5 ++--- spot/twa/taatgba.hh | 8 +++----- spot/twaalgos/ltl2tgba_fm.cc | 7 +++---- 5 files changed, 15 insertions(+), 24 deletions(-) diff --git a/spot/misc/bddlt.hh b/spot/misc/bddlt.hh index 327fcd00a..46e24ed33 100644 --- a/spot/misc/bddlt.hh +++ b/spot/misc/bddlt.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2014, 2017, 2021 Laboratoire de Recherche et +// Copyright (C) 2011, 2014, 2017, 2021, 2022 Laboratoire de Recherche et // Developpement de l'Epita (LRDE). // Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -33,8 +33,7 @@ namespace spot /// This comparison function use BDD ids for efficiency. An /// algorithm depending on this order may return different results /// depending on how the BDD library has been used before. - struct bdd_less_than : - public std::binary_function + struct bdd_less_than { bool operator()(const bdd& left, const bdd& right) const @@ -50,8 +49,7 @@ namespace spot /// long as the variable order is the same, the output of this /// comparison will be stable and independent on previous BDD /// operations. - struct bdd_less_than_stable : - public std::binary_function + struct bdd_less_than_stable { bool operator()(const bdd& left, const bdd& right) const @@ -62,8 +60,7 @@ namespace spot /// \ingroup misc_tools /// \brief Hash functor for BDDs. - struct bdd_hash : - public std::unary_function + struct bdd_hash { size_t operator()(const bdd& b) const noexcept diff --git a/spot/misc/hash.hh b/spot/misc/hash.hh index b99c4ab53..cad845a68 100644 --- a/spot/misc/hash.hh +++ b/spot/misc/hash.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2011, 2014, 2015-2018, 2021 Laboratoire de +// Copyright (C) 2008, 2011, 2014, 2015-2018, 2021, 2022 Laboratoire de // Recherche et Développement de l'Epita (LRDE). // Copyright (C) 2003, 2004, 2005 Laboratoire d'Informatique de // Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), @@ -36,8 +36,7 @@ namespace spot /// \ingroup hash_funcs /// \brief A hash function for pointers. template - struct ptr_hash : - public std::unary_function + struct ptr_hash { // A default constructor is needed if the ptr_hash object is // stored in a const member. This occur with the clang version @@ -59,8 +58,7 @@ namespace spot /// \ingroup hash_funcs /// \brief A hash function that returns identity template - struct identity_hash: - public std::unary_function + struct identity_hash { // A default constructor is needed if the identity_hash object is // stored in a const member. diff --git a/spot/misc/ltstr.hh b/spot/misc/ltstr.hh index 25b561b3b..15f7d11ef 100644 --- a/spot/misc/ltstr.hh +++ b/spot/misc/ltstr.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015 Laboratoire de Recherche et Développement +// Copyright (C) 2015, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE) // Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -39,8 +39,7 @@ namespace spot /// \code /// std::map seen; /// \endcode - struct char_ptr_less_than: - public std::binary_function + struct char_ptr_less_than { bool operator()(const char* left, const char* right) const diff --git a/spot/twa/taatgba.hh b/spot/twa/taatgba.hh index 9daa4975d..6a5b1c470 100644 --- a/spot/twa/taatgba.hh +++ b/spot/twa/taatgba.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2011-2019 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) 2009, 2011-2019, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -121,9 +121,7 @@ namespace spot std::vector, state_ptr_hash, state_ptr_equal> seen_map; - struct distance_sort : - public std::binary_function + struct distance_sort { bool operator()(const iterator_pair& lhs, const iterator_pair& rhs) const diff --git a/spot/twaalgos/ltl2tgba_fm.cc b/spot/twaalgos/ltl2tgba_fm.cc index 9ad149c1c..3566abc97 100644 --- a/spot/twaalgos/ltl2tgba_fm.cc +++ b/spot/twaalgos/ltl2tgba_fm.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008-2019, 2021 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2008-2019, 2021-2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // Copyright (C) 2003-2006 Laboratoire d'Informatique de Paris 6 // (LIP6), département Systèmes Répartis Coopératifs (SRC), Université // Pierre et Marie Curie. @@ -199,8 +199,7 @@ namespace spot } }; - struct flagged_formula_hash: - public std::unary_function + struct flagged_formula_hash { size_t operator()(const flagged_formula& that) const From 64020279cb9393d7783d72c2c8ae9bcb19923301 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sun, 13 Feb 2022 18:37:34 +0100 Subject: [PATCH 036/606] reduce_parity: fix to work on automata with deleted edges * spot/twaalgos/parity.cc (reduce_parity): Use the size of the edge vector to initialize piprime1 and piprime2, not the number of edges. * tests/python/parity.py: Add test case, based on a report by Yann Thierry-Mieg. --- NEWS | 5 +++++ spot/twaalgos/parity.cc | 9 +++++---- tests/python/parity.py | 16 +++++++++++++++- 3 files changed, 25 insertions(+), 5 deletions(-) diff --git a/NEWS b/NEWS index 6d5fd9fc3..99a239a81 100644 --- a/NEWS +++ b/NEWS @@ -2,6 +2,11 @@ New in spot 2.10.4.dev (net yet released) Nothing yet. + Bugs fixed: + + - reduce_parity() produced incorrect results when applied to + automata with deleted edges. + New in spot 2.10.4 (2022-02-01) Bug fixed: diff --git a/spot/twaalgos/parity.cc b/spot/twaalgos/parity.cc index 35ec10a90..94c7bd922 100644 --- a/spot/twaalgos/parity.cc +++ b/spot/twaalgos/parity.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2018, 2019 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2016, 2018, 2019, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -465,8 +465,9 @@ namespace spot // using k=0 or k=1. // // -2 means the edge was never assigned a color. - std::vector piprime1(aut->num_edges() + 1, -2); // k=1 - std::vector piprime2(aut->num_edges() + 1, -2); // k=0 + unsigned evs = aut->edge_vector().size(); + std::vector piprime1(evs, -2); // k=1 + std::vector piprime2(evs, -2); // k=0 bool sba = aut->prop_state_acc().is_true(); auto rec = diff --git a/tests/python/parity.py b/tests/python/parity.py index dfeb24d3d..b0389c40e 100644 --- a/tests/python/parity.py +++ b/tests/python/parity.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018, 2019 Laboratoire de Recherche et Développement de +# Copyright (C) 2018, 2019, 2022 Laboratoire de Recherche et Développement de # l'EPITA. # # This file is part of Spot, a model checking library. @@ -155,3 +155,17 @@ except RuntimeError as e: assert "invalid state number" in str(e) else: report_missing_exception() + + +a = spot.automaton("""HOA: v1 name: "F(!p0 | X!p1)" States: 3 +Start: 1 AP: 2 "p0" "p1" acc-name: Buchi Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels trans-acc complete +properties: deterministic terminal --BODY-- State: 0 [t] 0 {0} State: +1 [!0] 0 [0] 2 State: 2 [!0 | !1] 0 [0&1] 2 --END--""") +# Erase the first edge of state 1 +oi = a.out_iteraser(1) +oi.erase() +# postprocess used to call reduce_parity that did not +# work correctly on automata with deleted edges. +sm = a.postprocess("gen", "small") +assert sm.num_states() == 3 From 7d9fddadceb51b512e294eed7f088aff9e78a4ad Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 16 Feb 2022 11:42:06 +0100 Subject: [PATCH 037/606] work around an issue in Flex 2.6.4 The fallback definition of SIZE_MAX supplied by flex was not preprocessor friendly and prevented robin_hood.hh from doing "#if SIZE_MAX == UINT64_MAX", as observed by Marc Espie on OpenBSD. * spot/parseaut/scanaut.ll, spot/parsetl/scantl.ll: Define __STDC_VERSION__ just so that the code generated by Flex include . * NEWS: Mention the bug. * THANKS: Add Marc. --- NEWS | 3 +++ THANKS | 1 + spot/parseaut/scanaut.ll | 8 ++++++++ spot/parsetl/scantl.ll | 12 ++++++++++-- 4 files changed, 22 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index 99a239a81..f2b95e7ba 100644 --- a/NEWS +++ b/NEWS @@ -7,6 +7,9 @@ New in spot 2.10.4.dev (net yet released) - reduce_parity() produced incorrect results when applied to automata with deleted edges. + - work around a portability issue in Flex 2.6.4 preventing + compilation on OpenBSD. + New in spot 2.10.4 (2022-02-01) Bug fixed: diff --git a/THANKS b/THANKS index 193dccaf7..9eb566483 100644 --- a/THANKS +++ b/THANKS @@ -31,6 +31,7 @@ Joachim Klein Juan Tzintzun Juraj Major Kristin Y. Rozier +Marc Espie Martin Dieguez Lodeiro Matthias Heizmann Maxime Bouton diff --git a/spot/parseaut/scanaut.ll b/spot/parseaut/scanaut.ll index c1c4fb44f..bf35810ed 100644 --- a/spot/parseaut/scanaut.ll +++ b/spot/parseaut/scanaut.ll @@ -26,6 +26,14 @@ /* %option debug */ %top{ #include "config.h" +/* Flex 2.6.4's test for relies on __STDC_VERSION__ + which is undefined in C++. So without that, it will define + its own integer types, including a broken SIZE_MAX definition. + So let's define __STDC_VERSION__ to make sure gets + included. */ +#if HAVE_INTTYPES_H && !(defined __STDC_VERSION__) +# define __STDC_VERSION__ 199901L +#endif } %{ #include diff --git a/spot/parsetl/scantl.ll b/spot/parsetl/scantl.ll index ff15685f8..554c28298 100644 --- a/spot/parsetl/scantl.ll +++ b/spot/parsetl/scantl.ll @@ -1,6 +1,6 @@ /* -*- coding: utf-8 -*- -** Copyright (C) 2010-2015, 2017-2019, 2021, Laboratoire de Recherche -** et Développement de l'Epita (LRDE). +** Copyright (C) 2010-2015, 2017-2019, 2021-2022, Laboratoire de +** Recherche et Développement de l'Epita (LRDE). ** Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 ** (LIP6), département Systèmes Répartis Coopératifs (SRC), Université ** Pierre et Marie Curie. @@ -28,6 +28,14 @@ %top{ #include "config.h" +/* Flex 2.6.4's test for relies on __STDC_VERSION__ + which is undefined in C++. So without that, it will define + its own integer types, including a broken SIZE_MAX definition. + So let's define __STDC_VERSION__ to make sure gets + included. */ +#if HAVE_INTTYPES_H && !(defined __STDC_VERSION__) +# define __STDC_VERSION__ 199901L +#endif } %{ #include From 7149e5a34d4dc5374f81833e2f49f77c3a214e1e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 3 Mar 2022 09:10:53 +0100 Subject: [PATCH 038/606] * .gitlab-ci.yml (alpine-gcc): Fix path for logs. --- .gitlab-ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 583774da0..3c66af0b7 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -115,8 +115,9 @@ alpine-gcc: artifacts: when: always paths: - - tests/*/*.log + - ./spot-*/_build/sub/tests/*/*.log - ./*.log + - ./*.tar.gz arch-clang: stage: build From d61d6e5e2fa91046a20acad8820c88d2448a408e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 3 Mar 2022 18:01:11 +0100 Subject: [PATCH 039/606] tests: avoid seq Partial fix for #501. * tests/core/prodchain.test: Hardcode the seq output. * tests/core/bricks.test: Use $AWK instead of seq. * tests/core/defs.in: Define $AWK. * NEWS: Mention the bug. --- NEWS | 3 +++ tests/core/bricks.test | 11 ++++++----- tests/core/defs.in | 3 ++- tests/core/prodchain.test | 5 +++-- 4 files changed, 14 insertions(+), 8 deletions(-) diff --git a/NEWS b/NEWS index f2b95e7ba..7f3429991 100644 --- a/NEWS +++ b/NEWS @@ -10,6 +10,9 @@ New in spot 2.10.4.dev (net yet released) - work around a portability issue in Flex 2.6.4 preventing compilation on OpenBSD. + - Do not use the seq command in test cases, it is not available + everywhere. + New in spot 2.10.4 (2022-02-01) Bug fixed: diff --git a/tests/core/bricks.test b/tests/core/bricks.test index b98c7e856..37ff57cb0 100644 --- a/tests/core/bricks.test +++ b/tests/core/bricks.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -21,12 +21,13 @@ . ./defs set -e -seq 0 1999 > expected +# The seq command is not always available, but we assume awk is. +$AWK 'BEGIN{for(x=0;x<2000;++x) print x;}' >expected ../bricks > stdout -cat stdout | head -n 2000 | awk '{print $2}' | sed 's/{//g' | \ - awk -F',' '{print $1}' | sort -n > map +cat stdout | head -n 2000 | $AWK '{print $2}' | sed 's/{//g' | \ + $AWK -F',' '{print $1}' | sort -n > map diff expected map diff --git a/tests/core/defs.in b/tests/core/defs.in index 7df6fdf77..d06a3b67d 100644 --- a/tests/core/defs.in +++ b/tests/core/defs.in @@ -1,5 +1,5 @@ # -*- mode: shell-script; coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2012, 2013, 2015 Laboratoire de Recherche +# Copyright (C) 2009, 2010, 2012, 2013, 2015, 2022 Laboratoire de Recherche # et Développement de l'Epita (LRDE). # Copyright (C) 2003, 2004, 2006 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -57,6 +57,7 @@ case $srcdir in *) srcdir=../$srcdir esac +AWK='@AWK@' DOT='@DOT@' LBTT="@LBTT@" LBTT_TRANSLATE="@LBTT_TRANSLATE@" diff --git a/tests/core/prodchain.test b/tests/core/prodchain.test index b5037782f..0a7f1a1d9 100755 --- a/tests/core/prodchain.test +++ b/tests/core/prodchain.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2018 Laboratoire de Recherche et Développement +# Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement # de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -23,7 +23,8 @@ set -e set x shift -for i in `seq 1 42`; do +for i in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 \ + 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42; do ltl2tgba "{a[*$i]}[]->GFb" > $i.hoa done for i in *.hoa; do From 734de00bfd54de20ac142aabb90f1fca0cde0d7d Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 10 Mar 2022 15:49:46 +0100 Subject: [PATCH 040/606] tests: don't wipe python environment * tests/run.in: keep original PYTHONPATH contents * NEWS: mention the bug --- NEWS | 3 +++ tests/run.in | 9 ++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/NEWS b/NEWS index 7f3429991..301711588 100644 --- a/NEWS +++ b/NEWS @@ -13,6 +13,9 @@ New in spot 2.10.4.dev (net yet released) - Do not use the seq command in test cases, it is not available everywhere. + - Do not erase the previous contents of the PYTHONPATH environment + variable when running tests, prepend to it instead. + New in spot 2.10.4 (2022-02-01) Bug fixed: diff --git a/tests/run.in b/tests/run.in index d14bf52a9..7eaa7732c 100755 --- a/tests/run.in +++ b/tests/run.in @@ -104,18 +104,21 @@ export srcdir case $1 in *.ipynb) - PYTHONPATH=$pypath DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ + PYTHONPATH=$pypath:$PYTHONPATH \ + DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ PYTHONIOENCODING=utf-8:surrogateescape \ exec $PREFIXCMD @PYTHON@ @abs_srcdir@/python/ipnbdoctest.py "$@";; *.py) - PYTHONPATH=$pypath DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ + PYTHONPATH=$pypath:$PYTHONPATH \ + DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ exec $PREFIXCMD @PYTHON@ "$@";; *.test) exec sh -x "$@";; *.pl) exec $PERL "$@";; *python*|*jupyter*|*pypy*) - PYTHONPATH=$pypath DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ + PYTHONPATH=$pypath:$PYTHONPATH \ + DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ exec $PREFIXCMD "$@";; *) echo "Unknown extension" >&2 From 2aecf9a79e64c55e8cac803591f73d93552a46e4 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 10 Mar 2022 10:53:18 +0100 Subject: [PATCH 041/606] fix typos and make formula_from_bdd more usable in Python * python/spot/impl.i (formula_from_bdd): Instantiate for twa_graph. * spot/twa/twa.hh (register_aps_from_dict): Typo in exception. * tests/python/except.py: More tests for the above. * tests/python/bdddict.py: Typo in comment. --- python/spot/impl.i | 2 ++ spot/twa/twa.hh | 4 ++-- tests/python/bdddict.py | 4 ++-- tests/python/except.py | 38 +++++++++++++++++++++++++++++++++++++- 4 files changed, 43 insertions(+), 5 deletions(-) diff --git a/python/spot/impl.i b/python/spot/impl.i index 12cae6311..21a9a68b8 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -530,6 +530,8 @@ namespace std { %include %include %include +%template(formula_to_bdd) spot::formula_to_bdd; + %include /* These operators may raise exceptions, and we do not want Swig4 to convert those exceptions to NotImplemented. */ diff --git a/spot/twa/twa.hh b/spot/twa/twa.hh index cb1e208ec..819a90962 100644 --- a/spot/twa/twa.hh +++ b/spot/twa/twa.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2011, 2013-2021 Laboratoire de Recherche et +// Copyright (C) 2009, 2011, 2013-2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // Copyright (C) 2003-2005 Laboratoire d'Informatique de Paris 6 // (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -761,7 +761,7 @@ namespace spot void register_aps_from_dict() { if (!aps_.empty()) - throw std::runtime_error("register_ap_from_dict() may not be" + throw std::runtime_error("register_aps_from_dict() may not be" " called on an automaton that has already" " registered some AP"); auto& m = get_dict()->bdd_map; diff --git a/tests/python/bdddict.py b/tests/python/bdddict.py index d6222b58f..45c505385 100644 --- a/tests/python/bdddict.py +++ b/tests/python/bdddict.py @@ -17,8 +17,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -# Make sure we can leep track of BDD association in Python using bdd_dict, as -# discussed in issue #372. +# Make sure we can keep track of BDD association in Python using bdd_dict, as +# discussed in (deleted???) issue #372. # CPython use reference counting, so that automata are destructed # when we expect them to be. However other implementations like diff --git a/tests/python/except.py b/tests/python/except.py index ef6ec3cc5..76ae88b0a 100644 --- a/tests/python/except.py +++ b/tests/python/except.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018-2021 Laboratoire de Recherche et Développement de +# Copyright (C) 2018-2022 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -24,6 +24,8 @@ import spot import buddy +from unittest import TestCase +tc = TestCase() def report_missing_exception(): @@ -278,3 +280,37 @@ except OverflowError as e: assert "reversed" in str(e) else: report_missing_exception() + + +a = spot.make_twa_graph() +s = a.new_state() +b = spot.formula_to_bdd("a & b", a.get_dict(), a) +a.new_edge(s, s, b, []) +try: + print(a.to_str('hoa')) +except RuntimeError as e: + tc.assertIn("unregistered atomic propositions", str(e)) +else: + report_missing_exception() + +a.register_aps_from_dict() +tc.assertEqual(a.to_str('hoa'), """HOA: v1 +States: 1 +Start: 0 +AP: 2 "a" "b" +acc-name: all +Acceptance: 0 t +properties: trans-labels explicit-labels state-acc deterministic +--BODY-- +State: 0 +[0&1] 0 +--END--""") + +try: + a.register_aps_from_dict() +except RuntimeError as e: + se = str(e) + tc.assertIn("register_aps_from_dict", se) + tc.assertIn("already registered", se) +else: + report_missing_exception() From 968ef0f7b89431f44f6c90c4ed5b2c7b92bb345a Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Tue, 15 Mar 2022 14:01:25 +0100 Subject: [PATCH 042/606] ltlsynt: typo in help * bin/ltlsynt.cc: here --- bin/ltlsynt.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 57b67bcc0..c6e53258c 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -65,7 +65,7 @@ static const argp_option options[] = "comma-separated list of controllable (a.k.a. output) atomic" " propositions", 0}, { "ins", OPT_INPUT, "PROPS", 0, - "comma-separated list of controllable (a.k.a. output) atomic" + "comma-separated list of uncontrollable (a.k.a. input) atomic" " propositions", 0}, /**************************************************/ { nullptr, 0, nullptr, 0, "Fine tuning:", 10 }, From 96e051d2bb971ebecb27b21db3ea1d9c65a9f3bd Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 22 Mar 2022 12:18:25 +0100 Subject: [PATCH 043/606] graph: fix invalid read Reported by Florian Renkin. * spot/graph/graph.hh (sort_edges_of): Fix invalid read when sorting a state without successor. Seen on core/tgbagraph.test. --- spot/graph/graph.hh | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/spot/graph/graph.hh b/spot/graph/graph.hh index 75e0977b7..fa276131d 100644 --- a/spot/graph/graph.hh +++ b/spot/graph/graph.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2018, 2020, 2021 Laboratoire de Recherche et +// Copyright (C) 2014-2018, 2020-2022 Laboratoire de Recherche et // Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -1243,14 +1243,19 @@ namespace spot //dump_storage(std::cerr); auto pi = [&](unsigned t1, unsigned t2) {return p(edges_[t1], edges_[t2]); }; + + // Sort the outgoing edges of each selected state according + // to predicate p. Do that in place. std::vector sort_idx_; - for (unsigned i = 0; i < num_states(); ++i) + unsigned ns = num_states(); + for (unsigned i = 0; i < ns; ++i) { if (to_sort_ptr && !(*to_sort_ptr)[i]) continue; - - sort_idx_.clear(); unsigned t = states_[i].succ; + if (t == 0) + continue; + sort_idx_.clear(); do { sort_idx_.push_back(t); From 53118d9314783ce97a0069e14d390fba9f7f7eed Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 22 Mar 2022 12:22:48 +0100 Subject: [PATCH 044/606] * spot/twaalgos/gfguarantee.hh: Typos in comments. --- spot/twaalgos/gfguarantee.hh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spot/twaalgos/gfguarantee.hh b/spot/twaalgos/gfguarantee.hh index 5124667f4..40cb16f97 100644 --- a/spot/twaalgos/gfguarantee.hh +++ b/spot/twaalgos/gfguarantee.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018 Laboratoire de Recherche et Développement +// Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -48,7 +48,7 @@ namespace spot /// \brief Convert GF(φ) into a (D)BA if φ is a guarantee property. /// /// If the formula \a gf has the form GΦ where Φ matches either F(φ) - /// or F(φ₁)|F(φ₂)|...|F(φₙ), we translate Φ into A_Φ and attempt to + /// or F(φ₁)&F(φ₂)&...&F(φₙ), we translate Φ into A_Φ and attempt to /// minimize it to a WDBA W_Φ. If the resulting automaton is /// terminal, we then call g_f_terminal_inplace(W_Φ). If \a /// deterministic is not set, we keep the minimized automaton only From d1f49c721a45c58f24c085dfb14e3f70f1c7089d Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Mon, 21 Mar 2022 10:46:42 +0100 Subject: [PATCH 045/606] ltlsynt: don't fail if --outs or --ins is set to empty * bin/ltlsynt.cc: here * tests/core/ltlsynt.test: add tests --- bin/ltlsynt.cc | 58 +++++++++++++++++++++++------------------ tests/core/ltlsynt.test | 8 ++++++ 2 files changed, 40 insertions(+), 26 deletions(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index c6e53258c..f5b2ade51 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -135,8 +135,8 @@ Exit status:\n\ 1 if the input problem is not realizable\n\ 2 if any error has been reported"; -static std::vector all_output_aps; -static std::vector all_input_aps; +static std::optional> all_output_aps; +static std::optional> all_input_aps; static const char* opt_csv = nullptr; static bool opt_print_pg = false; @@ -577,12 +577,12 @@ namespace class ltl_processor final : public job_processor { private: - std::vector input_aps_; - std::vector output_aps_; + std::optional> input_aps_; + std::optional> output_aps_; public: - ltl_processor(std::vector input_aps_, - std::vector output_aps_) + ltl_processor(std::optional> input_aps_, + std::optional> output_aps_) : input_aps_(std::move(input_aps_)), output_aps_(std::move(output_aps_)) { @@ -592,11 +592,13 @@ namespace const char* filename, int linenum) override { auto unknown_aps = [](spot::formula f, - const std::vector& known, - const std::vector* known2 = nullptr) + const std::optional>& known, + const std::optional>& known2 = {}) { std::vector unknown; std::set seen; + // If we don't have --ins and --outs, we must not find an AP. + bool can_have_ap = known.has_value(); f.traverse([&](const spot::formula& s) { if (s.is(spot::op::ap)) @@ -604,10 +606,11 @@ namespace if (!seen.insert(s).second) return false; const std::string& a = s.ap_name(); - if (std::find(known.begin(), known.end(), a) == known.end() - && (!known2 + if (!can_have_ap + || (std::find(known->begin(), known->end(), a) == known->end() + && (!known2.has_value() || std::find(known2->begin(), - known2->end(), a) == known2->end())) + known2->end(), a) == known2->end()))) unknown.push_back(a); } return false; @@ -617,30 +620,30 @@ namespace // Decide which atomic propositions are input or output. int res; - if (input_aps_.empty() && !output_aps_.empty()) + if (!input_aps_.has_value() && output_aps_.has_value()) { - res = solve_formula(f, unknown_aps(f, output_aps_), output_aps_); + res = solve_formula(f, unknown_aps(f, output_aps_), *output_aps_); } - else if (output_aps_.empty() && !input_aps_.empty()) + else if (!output_aps_.has_value() && input_aps_.has_value()) { - res = solve_formula(f, input_aps_, unknown_aps(f, input_aps_)); + res = solve_formula(f, *input_aps_, unknown_aps(f, input_aps_)); } - else if (output_aps_.empty() && input_aps_.empty()) + else if (!output_aps_.has_value() && !input_aps_.has_value()) { - for (const std::string& ap: unknown_aps(f, input_aps_, &output_aps_)) + for (const std::string& ap: unknown_aps(f, input_aps_, output_aps_)) error_at_line(2, 0, filename, linenum, "one of --ins or --outs should list '%s'", ap.c_str()); - res = solve_formula(f, input_aps_, output_aps_); + res = solve_formula(f, *input_aps_, *output_aps_); } else { - for (const std::string& ap: unknown_aps(f, input_aps_, &output_aps_)) + for (const std::string& ap: unknown_aps(f, input_aps_, output_aps_)) error_at_line(2, 0, filename, linenum, "both --ins and --outs are specified, " "but '%s' is unlisted", ap.c_str()); - res = solve_formula(f, input_aps_, output_aps_); + res = solve_formula(f, *input_aps_, *output_aps_); } if (opt_csv) @@ -671,23 +674,25 @@ parse_opt(int key, char *arg, struct argp_state *) break; case OPT_INPUT: { + all_input_aps.emplace(std::vector{}); std::istringstream aps(arg); std::string ap; while (std::getline(aps, ap, ',')) { ap.erase(remove_if(ap.begin(), ap.end(), isspace), ap.end()); - all_input_aps.push_back(str_tolower(ap)); + all_input_aps->push_back(str_tolower(ap)); } break; } case OPT_OUTPUT: { + all_output_aps.emplace(std::vector{}); std::istringstream aps(arg); std::string ap; while (std::getline(aps, ap, ',')) { ap.erase(remove_if(ap.begin(), ap.end(), isspace), ap.end()); - all_output_aps.push_back(str_tolower(ap)); + all_output_aps->push_back(str_tolower(ap)); } break; } @@ -748,10 +753,11 @@ main(int argc, char **argv) check_no_formula(); // Check if inputs and outputs are distinct - for (const std::string& ai : all_input_aps) - if (std::find(all_output_aps.begin(), all_output_aps.end(), ai) - != all_output_aps.end()) - error(2, 0, "'%s' appears both in --ins and --outs", ai.c_str()); + if (all_input_aps.has_value() && all_output_aps.has_value()) + for (const std::string& ai : *all_input_aps) + if (std::find(all_output_aps->begin(), all_output_aps->end(), ai) + != all_output_aps->end()) + error(2, 0, "'%s' appears both in --ins and --outs", ai.c_str()); ltl_processor processor(all_input_aps, all_output_aps); if (int res = processor.run(); res == 0 || res == 1) diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 4d318dbae..99d1e92da 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -821,3 +821,11 @@ ltlsynt -f '!(F(a | b))' --outs=b, --decompose=yes \ --verbose --aiger 2> out || true sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp + +ltlsynt --ins="" -f "GFa" +ltlsynt --outs="" -f "GFb" | grep "UNREALIZABLE" + +ltlsynt --outs="" -f "1" + +ltlsynt --outs="" --ins="" -f "GFa" 2>&1 | \ + grep "both --ins and --outs are specified" \ No newline at end of file From 58f39ec2874c34a7f7e71c5fb683e89ab41ca095 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 25 Mar 2022 09:25:04 +0100 Subject: [PATCH 046/606] * doc/org/tut40.org: Clarify, as suggested by a CAV'22 reviewer. --- doc/org/tut40.org | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/org/tut40.org b/doc/org/tut40.org index b68efe558..8d9b004da 100644 --- a/doc/org/tut40.org +++ b/doc/org/tut40.org @@ -144,9 +144,11 @@ states. We now look at how to create such a game in Python. -Essentially, a game in Spot is just an automaton equiped with a -special property to indicate the owner of each states. So it can be -created using the usual interface: +Essentially, a game in Spot is just an automaton equiped with a [[file:concepts.org::#named-properties][named +property "state-player"]] that hold a Boolean vector indicating the +owner of each state. The game can be created using the usual +automaton interface, and the owners are set by calling +=game.set_state_players()= with a vector of Boolean at the very end. #+NAME: build_game #+BEGIN_SRC python :exports code @@ -173,7 +175,7 @@ created using the usual interface: todo = [] # Create the state (i, j) for a player if it does not exist yet and - # returns the state's number in the game. + # return the state's number in the game. def get_game_state(player, i, j): orig_state = s_orig_states if player else d_orig_states if (i, j) in orig_state: From add2fced4430e01304a2cfae1b81675c7fe8c51f Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Tue, 29 Mar 2022 15:51:31 +0200 Subject: [PATCH 047/606] Correct bug in zielonka Optimization in Zielonka failed under certain circumstances todo: Devise a specialized test for direct attr computation * spot/twaalgos/game.cc: Correction * tests/python/game.py: Test --- spot/twaalgos/game.cc | 46 ++++++--- tests/python/game.py | 212 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 246 insertions(+), 12 deletions(-) diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index 9b8fdcee9..6bb62500d 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -309,16 +309,21 @@ namespace spot { auto scc_acc = info_->acc_sets_of(c_scc_idx_); // We will override all parities of edges leaving the scc + // Currently game is colored max odd + // So there is at least one color bool added[] = {false, false}; unsigned par_pair[2]; unsigned scc_new_par = std::max(scc_acc.max_set(), 1u); + bool player_color_larger; if (scc_new_par&1) { + player_color_larger = false; par_pair[1] = scc_new_par; par_pair[0] = scc_new_par+1; } else { + player_color_larger = true; par_pair[1] = scc_new_par+1; par_pair[0] = scc_new_par; } @@ -331,6 +336,7 @@ namespace spot for (unsigned v : c_states()) { assert(subgame_[v] == unseen_mark); + bool owner = (*owner_ptr_)[v]; for (auto &e : arena_->out(v)) { // The outgoing edges are taken finitely often @@ -342,14 +348,20 @@ namespace spot e.dst, e.acc); if (w_.winner(e.dst)) { - // Winning region of player -> odd - e.acc = odd_mark; + // Winning region off player -> + // odd mark if player + // else 1 (smallest loosing for env) + e.acc = owner ? odd_mark + : acc_cond::mark_t({1}); added[1] = true; } else { - // Winning region of env -> even - e.acc = even_mark; + // Winning region of env -> + // even mark for env, + // else 0 (smallest loosing for player) + e.acc = !owner ? even_mark + : acc_cond::mark_t({0}); added[0] = true; } // Replace with self-loop @@ -360,13 +372,22 @@ namespace spot // Compute the attractors of the self-loops/transitions leaving scc // These can be directly added to the winning states - // Note: attractors can not intersect therefore the order in which - // they are computed does not matter + // To avoid disregarding edges in attr computation we + // need to start with the larger color + // Todo come up with a test for this unsigned dummy_rd; - for (bool p : {false, true}) - if (added[p]) - attr(dummy_rd, p, par_pair[p], true, par_pair[p]); + for (bool p : {player_color_larger, + !player_color_larger}) + { + if (added[p]) + { + // Always take the larger, + // Otherwise states with an transition to a winning AND + // a loosing scc are treated incorrectly + attr(dummy_rd, p, par_pair[p], true, par_pair[p]); + } + } if (added[0] || added[1]) // Fix "negative" strategy @@ -379,8 +400,11 @@ namespace spot inline bool attr(unsigned &rd, bool p, unsigned max_par, - bool acc_par, unsigned min_win_par) + bool acc_par, unsigned min_win_par, + bool no_check=false) { + // In fix_scc, the attr computation is + // abused so we can not check ertain things // Computes the attractor of the winning set of player p within a // subgame given as rd. // If acc_par is true, max_par transitions are also accepting and @@ -394,7 +418,7 @@ namespace spot // As proposed in Oink! / PGSolver // Needs the transposed graph however - assert((!acc_par) || (acc_par && (max_par&1) == p)); + assert((no_check || !acc_par) || (acc_par && (max_par&1) == p)); assert(!acc_par || (0 < min_win_par)); assert((min_win_par <= max_par) && (max_par <= max_abs_par_)); diff --git a/tests/python/game.py b/tests/python/game.py index 9d77c153d..f45bed532 100644 --- a/tests/python/game.py +++ b/tests/python/game.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Développement de +# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement de # l'EPITA. # # This file is part of Spot, a model checking library. @@ -61,3 +61,213 @@ State: 7 State: 8 {1} [0] 2 --END--""" + +# Testing case where parity_game optimization +# lead to wrong results +si = spot.synthesis_info() + +game = spot.automaton("""HOA: v1 +States: 27 +Start: 7 +AP: 11 "a" "b" "c" "d" "e" "f" "g" "h" "i" "j" "k" +acc-name: parity max odd 3 +Acceptance: 3 Fin(2) & (Inf(1) | Fin(0)) +properties: trans-labels explicit-labels trans-acc colored +properties: deterministic +spot-state-player: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 +controllable-AP: 0 1 2 3 4 5 6 7 +--BODY-- +State: 0 +[t] 8 {0} +State: 1 +[8&9] 8 {0} +[!8&!10 | !9&!10] 9 {0} +[!8&10 | !9&10] 10 {0} +State: 2 +[8&9] 8 {0} +[!8&!10 | !9&!10] 11 {0} +[!8&10 | !9&10] 12 {0} +State: 3 +[8&9] 8 {0} +[!9&!10] 13 {0} +[!8&10 | !9&10] 14 {0} +[!8&9&!10] 15 {0} +State: 4 +[8&9] 8 {0} +[!8&!10 | !9&!10] 16 {0} +[!8&!9&10] 17 {0} +[!8&9&10] 18 {0} +[8&!9&10] 19 {0} +State: 5 +[8&9] 8 {0} +[!9&!10] 20 {0} +[!8&10 | !9&10] 21 {0} +[!8&9&!10] 22 {0} +State: 6 +[8&9] 8 {0} +[!8&!10 | !9&!10] 23 {0} +[!8&!9&10] 24 {0} +[!8&9&10] 25 {0} +[8&!9&10] 26 {0} +State: 7 +[8&9] 8 {0} +[!9&!10] 13 {0} +[!8&9&!10] 15 {0} +[!8&!9&10] 17 {0} +[!8&9&10] 18 {0} +[8&!9&10] 19 {0} +State: 8 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | +!0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | +!0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | + 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 0 {1} +State: 9 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 1 {2} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | +!0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {2} +State: 10 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 0 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | +!0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {2} +State: 11 +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 0 {1} +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {2} +State: 12 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {2} +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {2} +State: 13 +[!0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7] 1 {1} +[!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&4&!5&!6&7] 3 {1} +[!0&!1&2&3&!4&!5&!6&7] 5 {1} +State: 14 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 0 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | +!0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +State: 15 +[!0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7] 1 {1} +[!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&4&!5&!6&7] 4 {1} +[!0&!1&2&3&!4&!5&!6&7] 6 {1} +State: 16 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 1 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | +!0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +State: 17 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 0 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&3&!4&!5&!6&7] 6 {1} +State: 18 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 0 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&3&!4&!5&!6&7] 5 {1} +State: 19 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 0 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | +0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&3&!4&!5&6&!7] 6 {1} +State: 20 +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 0 {1} +[!0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&!4&5&!6&7] 3 {1} +State: 21 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +State: 22 +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 0 {1} +[!0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&!4&5&!6&7] 4 {1} +State: 23 +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 0 {1} +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +State: 24 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | +0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&!3&!4&5&!6&7] 4 {1} +[!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&3&!4&!5&!6&7] 6 {1} +State: 25 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | +0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&!3&!4&5&!6&7] 3 {1} +[!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&3&!4&!5&!6&7] 5 {1} +State: 26 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&3&!4&!5&!6&7 | +0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | +0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&!3&!4&5&6&!7] 4 {1} +[!0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&6&!7] 6 {1} +--END--""") + +assert spot.solve_game(game, si) + +games = spot.split_edges(game) +spot.set_state_players(games, spot.get_state_players(game)) +assert spot.solve_game(games, si) From fa6912a5745ed02f883fba03406949080a5a997b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 29 Mar 2022 11:13:19 +0200 Subject: [PATCH 048/606] debian: simplify LTO configuration to work around newer libtool Libtool 2.4.7 breaks if AR_FLAGS contains a space. See https://lists.gnu.org/archive/html/bug-libtool/2022-03/msg00009.html * debian/rules: Use gcc-{nm,ar,ranlib} so we do not have to pass the plugin explicitly. --- debian/rules | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/debian/rules b/debian/rules index 0193e9a62..51daf21ed 100755 --- a/debian/rules +++ b/debian/rules @@ -20,20 +20,16 @@ include /usr/share/dpkg/default.mk %: dh $@ --with=python3 -# Find the LTO plugin, which we need to pass to ar, nm, and ranlib. -LTOPLUG := $(shell gcc -v 2>&1 | \ - sed -n 's:COLLECT_LTO_WRAPPER=\(/.*/\)[^/]*:\1:p')liblto_plugin.so - # ARFLAGS is for Automake -# AR_FLAGS is for Libtool -# These activate the LTO pluggin, but also remove the 'u' option -# from ar, since its now ignored with Debian's default to 'D'. -LTOSETUP = \ - LDFLAGS='-fuse-linker-plugin' \ - NM='nm --plugin $(LTOPLUG)' \ - ARFLAGS='cr --plugin $(LTOPLUG)' \ - AR_FLAGS='cr --plugin $(LTOPLUG)' \ - RANLIB='ranlib --plugin $(LTOPLUG)' \ +# AR_FLAGS is for Libtool, (but libtool 2.4.7 will now use ARFLAGS as well) +# The gcc-tools activate the LTO plugin. +LTOSETUP = \ + LDFLAGS='-fuse-linker-plugin' \ + NM='gcc-nm' \ + AR='gcc-ar' \ + ARFLAGS='cr' \ + AR_FLAGS='cr' \ + RANLIB='gcc-ranlib' \ VALGRIND=false GCDADIR := $(shell pwd)/gcda From 0f3ffd59cec1f152733c07458078f8a21ca1edc8 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Tue, 12 Apr 2022 11:21:14 +0200 Subject: [PATCH 049/606] ltlsynt: don't solve games when we want to display them * bin/ltlsynt.cc: here --- bin/ltlsynt.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index f5b2ade51..5ffa22f39 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -395,6 +395,8 @@ namespace && "Env needs first turn"); } print_game(arena); + if (want_game) + continue; if (!spot::solve_game(arena, *gi)) { std::cout << "UNREALIZABLE" << std::endl; From 385da8ebd0c8ec01c3e69c76830a7a1462996c74 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 2 May 2022 17:47:53 +0200 Subject: [PATCH 050/606] update NEWS for upcoming release * NEWS: Here. --- NEWS | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/NEWS b/NEWS index 301711588..f857fb97d 100644 --- a/NEWS +++ b/NEWS @@ -1,13 +1,20 @@ New in spot 2.10.4.dev (net yet released) - Nothing yet. - Bugs fixed: - reduce_parity() produced incorrect results when applied to automata with deleted edges. - - work around a portability issue in Flex 2.6.4 preventing + - An optimization of Zielonka could result in incorrect results + in some cases. + + - ltlsynt --print-pg incorrectly solved the game in addition to + printing it. + + - ltlsynt would fail if only one of --ins or --outs was set, and + if it was set empty. + + - Work around a portability issue in Flex 2.6.4 preventing compilation on OpenBSD. - Do not use the seq command in test cases, it is not available @@ -16,6 +23,12 @@ New in spot 2.10.4.dev (net yet released) - Do not erase the previous contents of the PYTHONPATH environment variable when running tests, prepend to it instead. + - Simplify Debian instructions for LTO build to work around newer + libtool version. + + - Fix invalid read in digraph::sort_edges_of_(), currently unused in + Spot. + New in spot 2.10.4 (2022-02-01) Bug fixed: From c70a06ae0adbd16ca9f16f67b6892f7e4306a2e8 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 3 May 2022 08:59:17 +0200 Subject: [PATCH 051/606] Release Spot 2.10.5 * NEWS, configure.ac, doc/org/setup.org: Update. --- NEWS | 2 +- configure.ac | 2 +- doc/org/setup.org | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/NEWS b/NEWS index f857fb97d..8f512fcb7 100644 --- a/NEWS +++ b/NEWS @@ -1,4 +1,4 @@ -New in spot 2.10.4.dev (net yet released) +New in spot 2.10.5 (2022-05-03) Bugs fixed: diff --git a/configure.ac b/configure.ac index 31002ccda..5815a2c13 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.10.4.dev], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.10.5], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) diff --git a/doc/org/setup.org b/doc/org/setup.org index 3b8b1b404..c1b7e9235 100644 --- a/doc/org/setup.org +++ b/doc/org/setup.org @@ -1,11 +1,11 @@ #+OPTIONS: H:2 num:nil toc:t html-postamble:nil ^:nil #+EMAIL: spot@lrde.epita.fr #+HTML_LINK_HOME: index.html -#+MACRO: SPOTVERSION 2.10.4 -#+MACRO: LASTRELEASE 2.10.4 -#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.10.4.tar.gz][=spot-2.10.4.tar.gz=]] -#+MACRO: LASTNEWS [[https://gitlab.lrde.epita.fr/spot/spot/blob/spot-2-10-4/NEWS][summary of the changes]] -#+MACRO: LASTDATE 2022-02-01 +#+MACRO: SPOTVERSION 2.10.5 +#+MACRO: LASTRELEASE 2.10.5 +#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.10.5.tar.gz][=spot-2.10.5.tar.gz=]] +#+MACRO: LASTNEWS [[https://gitlab.lrde.epita.fr/spot/spot/blob/spot-2-10-5/NEWS][summary of the changes]] +#+MACRO: LASTDATE 2022-05-03 #+ATTR_HTML: :id spotlogo [[file:spot2.svg]] From 56666e0db592c3528ddf015f783a81bf77f74fb2 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 3 May 2022 09:01:03 +0200 Subject: [PATCH 052/606] * NEWS, configure.ac: Bump version to 2.10.5.dev. --- NEWS | 4 ++++ configure.ac | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 8f512fcb7..031df5b71 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,7 @@ +New in spot 2.10.5.dev (not yet released) + + Nothing yet. + New in spot 2.10.5 (2022-05-03) Bugs fixed: diff --git a/configure.ac b/configure.ac index 5815a2c13..6527900ce 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.10.5], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.10.5.dev], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) From 4a2bdd6e86e526e976a8c06d11fdf875899a3d7e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 4 May 2022 17:40:17 +0200 Subject: [PATCH 053/606] Fix link to parity game example Reported by Florian Renkin. * doc/org/index.org: Here. --- doc/org/index.org | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/org/index.org b/doc/org/index.org index f676b8aa4..9af23dba4 100644 --- a/doc/org/index.org +++ b/doc/org/index.org @@ -25,7 +25,7 @@ checking. It has the following notable features: weak-DBA, removal of useless SCCs, acceptance-condition transformations, determinization, [[file:satmin.org][SAT-based minimization of deterministic automata]], [[https://spot.lrde.epita.fr/ipynb/zlktree.html][Alternating Cycle Decomposition]], etc. -- Support for [[file:tut40.org][Safety]] and [[https://spot-dev.lrde.epita.fr/ipynb/games.html][parity games]]. +- Support for [[file:tut40.org][Safety]] and [[https://spot.lrde.epita.fr/ipynb/games.html][parity games]]. - Applications to [[file:ltlsynt.org][reactive synthesis]] and [[https://spot.lrde.epita.fr/ipynb/atva16-fig2b.html][model checking]]. - In addition to the C++ interface, most of its algorithms are usable via [[file:tools.org][command-line tools]], and via [[file:tut.org][Python bindings]]. From ef9267a58ef71659c37f94c2c7458c84c51bb195 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 6 May 2022 13:58:52 +0200 Subject: [PATCH 054/606] parsetl: remove a superfluous diagnostic on some erroneous input * tests/core/neverclaimread.test: Adjust and remove FIXME. * spot/parsetl/parsetl.yy (try_recursive_parse): Return false on empty string. --- spot/parsetl/parsetl.yy | 2 +- tests/core/neverclaimread.test | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/spot/parsetl/parsetl.yy b/spot/parsetl/parsetl.yy index e6defffb3..117695404 100644 --- a/spot/parsetl/parsetl.yy +++ b/spot/parsetl/parsetl.yy @@ -296,7 +296,7 @@ using namespace spot; if (str.empty()) { error_list.emplace_back(location, "unexpected empty block"); - return nullptr; + return fnode::ff(); } spot::parsed_formula pf; diff --git a/tests/core/neverclaimread.test b/tests/core/neverclaimread.test index bf736f55d..09af8af58 100755 --- a/tests/core/neverclaimread.test +++ b/tests/core/neverclaimread.test @@ -350,10 +350,8 @@ digraph "-" { } EOF diff stdout expected -# FIXME: the "ignoring trailing garbage" is unwanted cat >expected.err < Date: Mon, 9 May 2022 13:42:20 +0200 Subject: [PATCH 055/606] twagraph: improve copy of kripke_graph Fix #505, Reported by Edmond Irani Liu. * spot/twa/twagraph.cc (copy): Deal with kripke_graph in a better way. * spot/twaalgos/hoa.cc: Do not force the use of named-states since when the input is a kripke_graph. * tests/python/kripke.py: Adjust test cases. * NEWS: Mention the change. * THANKS: Add Edmund. --- NEWS | 5 +++++ THANKS | 1 + spot/twa/twagraph.cc | 13 ++++++++++++- spot/twaalgos/hoa.cc | 7 ++++++- tests/python/kripke.py | 42 ++++++++++++++++++++++++++++++------------ 5 files changed, 54 insertions(+), 14 deletions(-) diff --git a/NEWS b/NEWS index 098a4948b..5bc21f22d 100644 --- a/NEWS +++ b/NEWS @@ -82,6 +82,11 @@ New in spot 2.10.5.dev (not yet released) averted in the parser by delaying the construction of such n-ary nodes until all children are known. + - make_twa_graph() will now preserve state number when copying a + kripke_graph object. As a consequence, print_dot() and + print_hoa() will now use state numbers matching those of the + kripke_graph (issue #505). + New in spot 2.10.5 (2022-05-03) Bugs fixed: diff --git a/THANKS b/THANKS index b49b3eb95..4a7259d46 100644 --- a/THANKS +++ b/THANKS @@ -11,6 +11,7 @@ Christian Dax Christopher Ziegler Clément Tamines David Müller +Edmond Irani Liu Ernesto Posse Étienne Renault Fabrice Kordon diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index b11ca12c5..c2bdd5650 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -1600,6 +1601,17 @@ namespace spot return p.first->second; }; + // If the input is a kripke_graph and the number of states is + // not restricted, predeclare all states to keep their + // numbering, and also copy unreachable states. + if (max_states == -1U) + if (auto kg = std::dynamic_pointer_cast(aut)) + { + unsigned ns = kg->num_states(); + for (unsigned s = 0; s < ns; ++s) + new_state(kg->state_from_number(s)); + } + out->set_init_state(new_state(aut->get_init_state())); while (!todo.empty()) { @@ -1638,7 +1650,6 @@ namespace spot } } - auto s = seen.begin(); while (s != seen.end()) { diff --git a/spot/twaalgos/hoa.cc b/spot/twaalgos/hoa.cc index 1865a6d49..0e03b07f5 100644 --- a/spot/twaalgos/hoa.cc +++ b/spot/twaalgos/hoa.cc @@ -31,6 +31,7 @@ #include #include #include +#include using namespace std::string_literals; @@ -973,7 +974,11 @@ namespace spot strcpy(tmpopt, opt); tmpopt[n] = 'k'; tmpopt[n + 1] = 0; - preserve_names = true; + // Preserve names if we have some state names, or if we are + // not a kripke_graph. + auto sn = aut->get_named_prop>("state-names"); + preserve_names = + !!sn || !std::dynamic_pointer_cast(aut); } auto a = std::dynamic_pointer_cast(aut); diff --git a/tests/python/kripke.py b/tests/python/kripke.py index fa92b3fa9..b1669bb78 100644 --- a/tests/python/kripke.py +++ b/tests/python/kripke.py @@ -29,35 +29,53 @@ p2 = buddy.bdd_ithvar(k.register_ap("p2")) cond1 = p1 & p2 cond2 = p1 & -p2 cond3 = -p1 & -p2 -s2 = k.new_state(cond1) +s0 = k.new_state(cond1) s1 = k.new_state(cond2) -s3 = k.new_state(cond3) +s2 = k.new_state(cond3) +k.new_edge(s1, s0) +k.new_edge(s0, s0) k.new_edge(s1, s2) k.new_edge(s2, s2) -k.new_edge(s1, s3) -k.new_edge(s3, s3) -k.new_edge(s3, s2) +k.new_edge(s2, s0) k.set_init_state(s1) hoa = """HOA: v1 States: 3 -Start: 0 +Start: 1 AP: 2 "p1" "p2" acc-name: all Acceptance: 0 t properties: state-labels explicit-labels state-acc --BODY-- -State: [0&!1] 0 "1" -1 2 -State: [0&1] 1 "0" -1 -State: [!0&!1] 2 "2" -2 1 +State: [0&1] 0 +0 +State: [0&!1] 1 +0 2 +State: [!0&!1] 2 +2 0 --END--""" tc.assertEqual(hoa, k.to_str('HOA')) tc.assertEqual(k.num_states(), 3) tc.assertEqual(k.num_edges(), 5) +k.set_state_names(["s0", "s1", "s2"]) +hoa = """HOA: v1 +States: 3 +Start: 1 +AP: 2 "p1" "p2" +acc-name: all +Acceptance: 0 t +properties: state-labels explicit-labels state-acc +--BODY-- +State: [0&1] 0 "s0" +0 +State: [0&!1] 1 "s1" +0 2 +State: [!0&!1] 2 "s2" +2 0 +--END--""" +tc.assertEqual(hoa, k.to_str('HOA')) + res = [] for e in k.out(s1): res.append((e.src, e.dst)) From 3b809c0a1471d68d4c42500c6362653ad88efafb Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 09:27:30 +0200 Subject: [PATCH 056/606] Fix compilation on MacOS X Patch by Shachar Itzhaky. * spot/parseaut/scanaut.ll, spot/parsetl/scantl.ll: Include libc-config.h instead of config.h. * NEWS: Mention the fix. * THANKS: Add Shachar. --- NEWS | 4 ++++ THANKS | 5 +++-- spot/parseaut/scanaut.ll | 11 +++++++---- spot/parsetl/scantl.ll | 11 +++++++---- 4 files changed, 21 insertions(+), 10 deletions(-) diff --git a/NEWS b/NEWS index 5bc21f22d..a9804623f 100644 --- a/NEWS +++ b/NEWS @@ -87,6 +87,10 @@ New in spot 2.10.5.dev (not yet released) print_hoa() will now use state numbers matching those of the kripke_graph (issue #505). + Bug fixed: + + - Fix compilation error on MacOS X. + New in spot 2.10.5 (2022-05-03) Bugs fixed: diff --git a/THANKS b/THANKS index 4a7259d46..8a9c1b630 100644 --- a/THANKS +++ b/THANKS @@ -47,10 +47,11 @@ Paul Guénézan Reuben Rowe Roei Nahum Rüdiger Ehlers -Silien Hong -Simon Jantsch +Shachar Itzhaky Shengping Shaw Shufang Zhu +Silien Hong +Simon Jantsch Sonali Dutta Tereza Šťastná Tobias Meggendorfer. diff --git a/spot/parseaut/scanaut.ll b/spot/parseaut/scanaut.ll index 8cccaec0e..711c74c64 100644 --- a/spot/parseaut/scanaut.ll +++ b/spot/parseaut/scanaut.ll @@ -25,12 +25,15 @@ /* %option debug */ %top{ -#include "config.h" +#include "libc-config.h" /* Flex 2.6.4's test for relies on __STDC_VERSION__ which is undefined in C++. So without that, it will define - its own integer types, including a broken SIZE_MAX definition. - So let's define __STDC_VERSION__ to make sure gets - included. */ + its own integer types, including a broken SIZE_MAX definition that + breaks compilation on OpenBSD. So let's define __STDC_VERSION__ to + make sure gets included. Redefining __STDC_VERSION__ + this way can break all sort of macros defined in , so + we include "libc-config.h" instead of "config.h" above to define + those macros first. */ #if HAVE_INTTYPES_H && !(defined __STDC_VERSION__) # define __STDC_VERSION__ 199901L #endif diff --git a/spot/parsetl/scantl.ll b/spot/parsetl/scantl.ll index 871f1300d..33667a849 100644 --- a/spot/parsetl/scantl.ll +++ b/spot/parsetl/scantl.ll @@ -27,12 +27,15 @@ %option never-interactive %top{ -#include "config.h" +#include "libc-config.h" /* Flex 2.6.4's test for relies on __STDC_VERSION__ which is undefined in C++. So without that, it will define - its own integer types, including a broken SIZE_MAX definition. - So let's define __STDC_VERSION__ to make sure gets - included. */ + its own integer types, including a broken SIZE_MAX definition that + breaks compilation on OpenBSD. So let's define __STDC_VERSION__ to + make sure gets included. Redefining __STDC_VERSION__ + this way can break all sort of macros defined in , so + we include "libc-config.h" instead of "config.h" above to define + those macros first. */ #if HAVE_INTTYPES_H && !(defined __STDC_VERSION__) # define __STDC_VERSION__ 199901L #endif From d697f57a97c586afada6ff90f17bdc2cbc4cb408 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 10:49:06 +0200 Subject: [PATCH 057/606] bin: introduce a non-binary job_type This will later help improve the handling of different input types of ltlsynt. * bin/common_finput.hh (job_type): New enum. (job::type): Use it. * bin/autcross.cc, bin/autfilt.cc, bin/common_finput.cc, bin/dstar2tgba.cc, bin/ltl2tgba.cc, bin/ltl2tgta.cc, bin/ltlcross.cc, bin/ltldo.cc, bin/ltlfilt.cc, bin/ltlgrind.cc, bin/ltlsynt.cc: Adjust to use the job_type enum instead of a boolean. --- bin/autcross.cc | 4 ++-- bin/autfilt.cc | 4 ++-- bin/common_finput.cc | 28 ++++++++++++++++++---------- bin/common_finput.hh | 14 +++++++++----- bin/dstar2tgba.cc | 6 +++--- bin/ltl2tgba.cc | 11 +++++------ bin/ltl2tgta.cc | 11 +++++------ bin/ltlcross.cc | 4 ++-- bin/ltldo.cc | 4 ++-- bin/ltlfilt.cc | 6 +++--- bin/ltlgrind.cc | 4 ++-- bin/ltlsynt.cc | 2 +- 12 files changed, 54 insertions(+), 44 deletions(-) diff --git a/bin/autcross.cc b/bin/autcross.cc index 2aade5e49..e2224643d 100644 --- a/bin/autcross.cc +++ b/bin/autcross.cc @@ -162,7 +162,7 @@ parse_opt(int key, char* arg, struct argp_state*) switch (key) { case 'F': - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::AUT_FILENAME); break; case 'q': quiet = true; @@ -216,7 +216,7 @@ parse_opt(int key, char* arg, struct argp_state*) break; case ARGP_KEY_ARG: if (arg[0] == '-' && !arg[1]) - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::AUT_FILENAME); else tools_push_autproc(arg); break; diff --git a/bin/autfilt.cc b/bin/autfilt.cc index 8fe95c396..74fe44220 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -761,7 +761,7 @@ parse_opt(int key, char* arg, struct argp_state*) automaton_format = Count; break; case 'F': - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::AUT_FILENAME); break; case 'n': opt_max_count = to_pos_int(arg, "-n/--max-count"); @@ -1252,7 +1252,7 @@ parse_opt(int key, char* arg, struct argp_state*) opt_art_sccs_set = true; break; case ARGP_KEY_ARG: - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::AUT_FILENAME); break; default: diff --git a/bin/common_finput.cc b/bin/common_finput.cc index 8df1fb028..559a5f312 100644 --- a/bin/common_finput.cc +++ b/bin/common_finput.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2017, 2019, 2021 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2012-2017, 2019, 2021, 2022 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -68,10 +68,10 @@ parse_opt_finput(int key, char* arg, struct argp_state*) switch (key) { case 'f': - jobs.emplace_back(arg, false); + jobs.emplace_back(arg, job_type::LTL_STRING); break; case 'F': - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::LTL_FILENAME); break; case OPT_LBT: lbt_input = true; @@ -358,10 +358,18 @@ job_processor::run() int error = 0; for (auto& j: jobs) { - if (!j.file_p) - error |= process_string(j.str); - else - error |= process_file(j.str); + switch (j.type) + { + case job_type::LTL_STRING: + error |= process_string(j.str); + break; + case job_type::LTL_FILENAME: + case job_type::AUT_FILENAME: + error |= process_file(j.str); + break; + default: + throw std::runtime_error("unexpected job type"); + } if (abort_run) break; } @@ -376,7 +384,7 @@ void check_no_formula() error(2, 0, "No formula to translate? Run '%s --help' for help.\n" "Use '%s -' to force reading formulas from the standard " "input.", program_name, program_name); - jobs.emplace_back("-", true); + jobs.emplace_back("-", job_type::LTL_FILENAME); } void check_no_automaton() @@ -387,5 +395,5 @@ void check_no_automaton() error(2, 0, "No automaton to process? Run '%s --help' for help.\n" "Use '%s -' to force reading automata from the standard " "input.", program_name, program_name); - jobs.emplace_back("-", true); + jobs.emplace_back("-", job_type::AUT_FILENAME); } diff --git a/bin/common_finput.hh b/bin/common_finput.hh index 5d8feb3ed..54ced7f7b 100644 --- a/bin/common_finput.hh +++ b/bin/common_finput.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2017 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2012-2017, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -25,13 +25,17 @@ #include #include +enum class job_type : char { LTL_STRING, + LTL_FILENAME, + AUT_FILENAME }; + struct job { const char* str; - bool file_p; // true if str is a filename, false if it is a formula + job_type type; - job(const char* str, bool file_p) noexcept - : str(str), file_p(file_p) + job(const char* str, job_type type) noexcept + : str(str), type(type) { } }; diff --git a/bin/dstar2tgba.cc b/bin/dstar2tgba.cc index 3bf5b9393..1d5cf8762 100644 --- a/bin/dstar2tgba.cc +++ b/bin/dstar2tgba.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2019 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2019, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -89,7 +89,7 @@ parse_opt(int key, char* arg, struct argp_state*) switch (key) { case 'F': - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::AUT_FILENAME); break; case 'x': { @@ -99,7 +99,7 @@ parse_opt(int key, char* arg, struct argp_state*) } break; case ARGP_KEY_ARG: - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::AUT_FILENAME); break; default: return ARGP_ERR_UNKNOWN; diff --git a/bin/ltl2tgba.cc b/bin/ltl2tgba.cc index f3de65a56..ee3d9f777 100644 --- a/bin/ltl2tgba.cc +++ b/bin/ltl2tgba.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2019 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2012-2019, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -105,10 +105,9 @@ parse_opt(int key, char* arg, struct argp_state*) break; case ARGP_KEY_ARG: // FIXME: use stat() to distinguish filename from string? - if (*arg == '-' && !arg[1]) - jobs.emplace_back(arg, true); - else - jobs.emplace_back(arg, false); + jobs.emplace_back(arg, ((*arg == '-' && !arg[1]) + ? job_type::LTL_FILENAME + : job_type::LTL_STRING)); break; default: diff --git a/bin/ltl2tgta.cc b/bin/ltl2tgta.cc index ad3a64299..e3f241385 100644 --- a/bin/ltl2tgta.cc +++ b/bin/ltl2tgta.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2020 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2012-2020, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -148,10 +148,9 @@ parse_opt(int key, char* arg, struct argp_state*) break; case ARGP_KEY_ARG: // FIXME: use stat() to distinguish filename from string? - if (*arg == '-' && !arg[1]) - jobs.emplace_back(arg, true); - else - jobs.emplace_back(arg, false); + jobs.emplace_back(arg, ((*arg == '-' && !arg[1]) + ? job_type::LTL_FILENAME + : job_type::LTL_STRING)); break; default: diff --git a/bin/ltlcross.cc b/bin/ltlcross.cc index d36478837..396806f96 100644 --- a/bin/ltlcross.cc +++ b/bin/ltlcross.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2020 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2020, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -484,7 +484,7 @@ parse_opt(int key, char* arg, struct argp_state*) break; case ARGP_KEY_ARG: if (arg[0] == '-' && !arg[1]) - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::LTL_FILENAME); else tools_push_trans(arg); break; diff --git a/bin/ltldo.cc b/bin/ltldo.cc index f57a528b2..705e71105 100644 --- a/bin/ltldo.cc +++ b/bin/ltldo.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2020 Laboratoire de Recherche et Développement +// Copyright (C) 2015-2020, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -193,7 +193,7 @@ parse_opt(int key, char* arg, struct argp_state*) break; case ARGP_KEY_ARG: if (arg[0] == '-' && !arg[1]) - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::LTL_FILENAME); else tools_push_trans(arg); break; diff --git a/bin/ltlfilt.cc b/bin/ltlfilt.cc index af9316192..b74f7bc0c 100644 --- a/bin/ltlfilt.cc +++ b/bin/ltlfilt.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -387,7 +387,7 @@ parse_opt(int key, char* arg, struct argp_state*) break; case ARGP_KEY_ARG: // FIXME: use stat() to distinguish filename from string? - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::LTL_FILENAME); break; case OPT_ACCEPT_WORD: try @@ -876,7 +876,7 @@ main(int argc, char** argv) exit(err); if (jobs.empty()) - jobs.emplace_back("-", 1); + jobs.emplace_back("-", job_type::LTL_FILENAME); if (boolean_to_isop && simplification_level == 0) simplification_level = 1; diff --git a/bin/ltlgrind.cc b/bin/ltlgrind.cc index 393656b00..b59569a59 100644 --- a/bin/ltlgrind.cc +++ b/bin/ltlgrind.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2015, 2016, 2017, 2018, 2019 Laboratoire de Recherche et +// Copyright (C) 2014-2019, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -143,7 +143,7 @@ parse_opt(int key, char* arg, struct argp_state*) break; case ARGP_KEY_ARG: // FIXME: use stat() to distinguish filename from string? - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::LTL_FILENAME); break; case OPT_AP2CONST: opt_all = 0; diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 50bae5f9e..0d89c2fc5 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -776,7 +776,7 @@ main(int argc, char **argv) static char arg4[] = "fully"; char* command[] = { arg0, arg1, arg2, arg3, arg4, opt_tlsf, nullptr }; opt_tlsf_string = read_stdout_of_command(command); - jobs.emplace_back(opt_tlsf_string.c_str(), false); + jobs.emplace_back(opt_tlsf_string.c_str(), job_type::LTL_STRING); if (!all_input_aps.has_value() && !all_output_aps.has_value()) { From d35f7bd37cc10e40ae539969e237212def7be566 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 11:11:23 +0200 Subject: [PATCH 058/606] bin: reset column specification between files * bin/common_finput.cc (job_processor::process_file): Reset col_to_read. * tests/core/ltlfilt.test: Test it. * NEWS: Mention the bug. --- NEWS | 5 ++++- bin/common_finput.cc | 2 ++ tests/core/ltlfilt.test | 16 +++++++++++++++- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index a9804623f..204ae4a3b 100644 --- a/NEWS +++ b/NEWS @@ -87,10 +87,13 @@ New in spot 2.10.5.dev (not yet released) print_hoa() will now use state numbers matching those of the kripke_graph (issue #505). - Bug fixed: + Bugs fixed: - Fix compilation error on MacOS X. + - Using -Ffile/N to read column N of a CSV file would not reset the + /N specification for the next file. + New in spot 2.10.5 (2022-05-03) Bugs fixed: diff --git a/bin/common_finput.cc b/bin/common_finput.cc index 559a5f312..6f09601e0 100644 --- a/bin/common_finput.cc +++ b/bin/common_finput.cc @@ -305,6 +305,8 @@ job_processor::process_stream(std::istream& is, int job_processor::process_file(const char* filename) { + col_to_read = 0; + // Special case for stdin. if (filename[0] == '-' && filename[1] == 0) return process_stream(std::cin, filename); diff --git a/tests/core/ltlfilt.test b/tests/core/ltlfilt.test index 501ae94b2..43d50ce06 100755 --- a/tests/core/ltlfilt.test +++ b/tests/core/ltlfilt.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013-2020 Laboratoire de Recherche et Développement de +# Copyright (C) 2013-2020, 2022 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -562,3 +562,17 @@ f1=`genltl --pps-arbiter-standard=2` f2=`genltl --pps-arbiter-strict=2` run 1 ltlfilt -f "$f2" --implied-by "$f1" run 0 ltlfilt -f "$f1" --implied-by "$f2" + + +# Reading two different columns of the same file +echo a,b > file +run 0 ltlfilt -Ffile/1 -Ffile/2 --stats=%f >out +cat >expected < Date: Tue, 17 May 2022 11:17:37 +0200 Subject: [PATCH 059/606] * spot/twa/bdddict.hh (bdd_info): Add noexcept, suggested by gcc 12. --- spot/twa/bdddict.hh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/spot/twa/bdddict.hh b/spot/twa/bdddict.hh index c9b39d8a5..f9c2ed6df 100644 --- a/spot/twa/bdddict.hh +++ b/spot/twa/bdddict.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2017 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2011-2017, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // Copyright (C) 2003, 2004, 2006 Laboratoire d'Informatique de Paris // 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), // Université Pierre et Marie Curie. @@ -78,7 +78,7 @@ namespace spot enum var_type { anon = 0, var, acc }; struct bdd_info { - bdd_info() : type(anon) {} + bdd_info() noexcept: type(anon) {} var_type type; formula f; // Used unless t==anon. ref_set refs; From 5dab2ede416e8e66583984988e857006b8e76baa Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 11:37:29 +0200 Subject: [PATCH 060/606] [buddy] remove some unused variables * src/reorder.c (reorder_win2ite): Do not set c=1. --- buddy/src/reorder.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/buddy/src/reorder.c b/buddy/src/reorder.c index d61630801..b107c8b6a 100644 --- a/buddy/src/reorder.c +++ b/buddy/src/reorder.c @@ -210,7 +210,6 @@ static BddTree *reorder_win2ite(BddTree *t) { BddTree *this, *first=t; int lastsize; - int c=1; if (t == NULL) return t; @@ -246,7 +245,6 @@ static BddTree *reorder_win2ite(BddTree *t) if (verbose > 1) printf(" %d nodes\n", reorder_nodenum()); - c++; } while (reorder_nodenum() != lastsize); From 3a234e24ae487fd75175a27c4b80b11cd154bbb8 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 11:43:55 +0200 Subject: [PATCH 061/606] fix warnings from clang-15 * spot/twa/acc.cc (acc_cond::acc_code::symmetries): Fix weird loop. * spot/twaalgos/aiger.cc (aig::circ_step): Replace & by &&. --- spot/twa/acc.cc | 4 ++-- spot/twaalgos/aiger.cc | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/spot/twa/acc.cc b/spot/twa/acc.cc index 5b7985d70..ce5d463aa 100644 --- a/spot/twa/acc.cc +++ b/spot/twa/acc.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2015-2022 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -1029,7 +1029,7 @@ namespace spot int base = ba.allocate_variables(umax+2); assert(base == 0); std::vector r; - for (unsigned i = 0; r.size() < umax; ++i) + while (r.size() < umax) r.emplace_back(bdd_ithvar(base++)); bdd bddcode = to_bdd(&r[0]); bdd tmp; diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index 660d5b46a..e3c3bb6c5 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -1398,7 +1398,7 @@ namespace spot { unsigned var_g = gate_var(i); state_[var_g] = state_[and_gates_[i].first] - & state_[and_gates_[i].second]; + && state_[and_gates_[i].second]; state_[aig_not(var_g)] = !state_[var_g]; } // Update latches From e8f496bb6c77d29b241dcaed9d265472cec5f25e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 12:01:11 +0200 Subject: [PATCH 062/606] Fix a clang++15 warning * spot/parseaut/parseaut.yy: Move the try-block inside the code of the constructors, so that they can refer to non-static members. --- spot/parseaut/parseaut.yy | 52 ++++++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/spot/parseaut/parseaut.yy b/spot/parseaut/parseaut.yy index 1e3de6781..71ab8aaea 100644 --- a/spot/parseaut/parseaut.yy +++ b/spot/parseaut/parseaut.yy @@ -2618,45 +2618,51 @@ namespace spot { automaton_stream_parser::automaton_stream_parser(const std::string& name, automaton_parser_options opt) - try : filename_(name), opts_(opt) { - if (hoayyopen(name, &scanner_)) - throw std::runtime_error("Cannot open file "s + name); - } - catch (...) - { - hoayyclose(scanner_); - throw; + try + { + if (hoayyopen(name, &scanner_)) + throw std::runtime_error("Cannot open file "s + name); + } + catch (...) + { + hoayyclose(scanner_); + throw; + } } automaton_stream_parser::automaton_stream_parser(int fd, const std::string& name, automaton_parser_options opt) - try : filename_(name), opts_(opt) { - if (hoayyopen(fd, &scanner_)) - throw std::runtime_error("Cannot open file "s + name); - } - catch (...) - { - hoayyclose(scanner_); - throw; + try + { + if (hoayyopen(fd, &scanner_)) + throw std::runtime_error("Cannot open file "s + name); + } + catch (...) + { + hoayyclose(scanner_); + throw; + } } automaton_stream_parser::automaton_stream_parser(const char* data, const std::string& filename, automaton_parser_options opt) - try : filename_(filename), opts_(opt) { - hoayystring(data, &scanner_); - } - catch (...) - { - hoayyclose(scanner_); - throw; + try + { + hoayystring(data, &scanner_); + } + catch (...) + { + hoayyclose(scanner_); + throw; + } } automaton_stream_parser::~automaton_stream_parser() From e4f8226c62d4d3f6eebea4de15aa9261fcae3e00 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 15:25:17 +0200 Subject: [PATCH 063/606] work around spurious gcc 12 "potentially null dereference" The issue seems to be inside std::vector's copy constructor, but it highlighted places in Spot were we could avoid this copy. * spot/twaalgos/ltl2taa.cc: Avoid some copies of std::vector. --- spot/twaalgos/ltl2taa.cc | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/spot/twaalgos/ltl2taa.cc b/spot/twaalgos/ltl2taa.cc index 9c10777a9..eaba49e92 100644 --- a/spot/twaalgos/ltl2taa.cc +++ b/spot/twaalgos/ltl2taa.cc @@ -61,7 +61,8 @@ namespace spot { std::vector empty; res_->create_transition(init_, empty); - succ_state ss = { empty, f, empty }; + succ_state ss; + ss.condition = f; succ_.emplace_back(ss); return; } @@ -76,7 +77,8 @@ namespace spot std::vector empty; taa_tgba::transition* t = res_->create_transition(init_, empty); res_->add_condition(t, f); - succ_state ss = { empty, f, empty }; + succ_state ss; + ss.condition = f; succ_.emplace_back(ss); return; } @@ -90,7 +92,7 @@ namespace spot return; dst.emplace_back(v.init_); res_->create_transition(init_, dst); - succ_state ss = { dst, formula::tt(), a }; + succ_state ss = { std::move(dst), formula::tt(), std::move(a) }; succ_.emplace_back(ss); return; } @@ -206,7 +208,7 @@ namespace spot } t = res_->create_transition(init_, u); res_->add_condition(t, f); - succ_state ss = { u, f, a }; + succ_state ss = { std::move(u), f, std::move(a) }; succ_.emplace_back(ss); } From a23b30abdcacedcd70387194030c2fb0f072e0c4 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 16:59:01 +0200 Subject: [PATCH 064/606] GCC 12 warns too much about See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105562 * m4/gccwarn.m4: Compile a small regex and add -Wno-maybe-uninitialized if needed. --- m4/gccwarn.m4 | 39 +++++++++++++++++++++++++++++++-------- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/m4/gccwarn.m4 b/m4/gccwarn.m4 index 4f719e55f..dc6969add 100644 --- a/m4/gccwarn.m4 +++ b/m4/gccwarn.m4 @@ -21,6 +21,7 @@ AC_DEFUN([CF_GXX_WARNINGS], cat > conftest.$ac_ext < +#include int main(int argc, char *argv[[]]) { // This string comparison is here to detect superfluous @@ -33,19 +34,26 @@ int main(int argc, char *argv[[]]) std::string a{"foo"}, b{"bar"}; if (b < a) return 1; + // GCC 12 has spurious warnings about ininialized values in regex. + // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105562 + // We need -Wno-maybe-uninitialized in this case. + std::regex r{"a"}; + (void)r; return argv[[argc-1]] == nullptr; } EOF cf_save_CXXFLAGS="$CXXFLAGS" - ac_cv_prog_gxx_warn_flags="-W -Wall" + ac_cv_prog_gxx_warn_flags="-W -Werror" +dnl The following list has options of the form OPT:BAD:GOOD +dnl if -OPT fails we try -OPT -BAD. If -OPT succeeds we add -GOOD. for cf_opt in \ - Werror \ + Wall:Wno-maybe-uninitialized:\ Wint-to-void-pointer-cast \ Wzero-as-null-pointer-constant \ Wcast-align \ Wpointer-arith \ Wwrite-strings \ - Wcast-qual \ + Wcast-qual::DXTSTRINGDEFINES \ Wdocumentation \ Wmissing-declarations \ Wnoexcept \ @@ -58,11 +66,26 @@ EOF Wsuggest-override \ Wpedantic do - CXXFLAGS="$cf_save_CXXFLAGS $ac_cv_prog_gxx_warn_flags -$cf_opt" - if AC_TRY_EVAL(ac_compile); then - ac_cv_prog_gxx_warn_flags="$ac_cv_prog_gxx_warn_flags -$cf_opt" - test "$cf_opt" = Wcast-qual && ac_cv_prog_gxx_warn_flags="$ac_cv_prog_gxx_warn_flags -DXTSTRINGDEFINES" - fi + fopt=${cf_opt%%:*} + CXXFLAGS="$cf_save_CXXFLAGS $ac_cv_prog_gxx_warn_flags -$fopt" + if AC_TRY_EVAL(ac_compile); then + ac_cv_prog_gxx_warn_flags="$ac_cv_prog_gxx_warn_flags -$fopt" + case $cf_opt in + *:*:);; + *:*:*)ac_cv_prog_gxx_warn_flags="$ac_cv_prog_gxx_warn_flags -${cf_opt##*:}";; + esac + else + case $cf_opt in + *::*);; + *:*:*) + sopt=${cf_opt%:*} + sopt=${sopt#*:} + CXXFLAGS="$cf_save_CXXFLAGS $ac_cv_prog_gxx_warn_flags -$fopt -$sopt" + if AC_TRY_EVAL(ac_compile); then + ac_cv_prog_gxx_warn_flags="$ac_cv_prog_gxx_warn_flags -$fopt -$sopt" + fi;; + esac + fi done rm -f conftest* CXXFLAGS="$cf_save_CXXFLAGS"]) From 2a4e68bfae3c5a93f37c4f3ceb3cd23d3deee8e9 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 17:43:27 +0200 Subject: [PATCH 065/606] more noexcept to pleace GCC * spot/bricks/brick-hashset (Row): Add noexcept. * bin/autcross.cc (out_statistics): Likewise. * bin/ltlcross.cc (statistics): Likewise. --- bin/autcross.cc | 3 +-- bin/ltlcross.cc | 2 +- spot/bricks/brick-hashset | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/bin/autcross.cc b/bin/autcross.cc index e2224643d..21d21f2c7 100644 --- a/bin/autcross.cc +++ b/bin/autcross.cc @@ -345,7 +345,6 @@ struct in_statistics struct out_statistics { - // If OK is false, output statistics are not available. bool ok; const char* status_str; @@ -353,7 +352,7 @@ struct out_statistics double time; aut_statistics output; - out_statistics() + out_statistics() noexcept : ok(false), status_str(nullptr), status_code(0), diff --git a/bin/ltlcross.cc b/bin/ltlcross.cc index 396806f96..8e1005db6 100644 --- a/bin/ltlcross.cc +++ b/bin/ltlcross.cc @@ -264,7 +264,7 @@ end_error() struct statistics { - statistics() + statistics() noexcept : ok(false), alternating(false), status_str(nullptr), diff --git a/spot/bricks/brick-hashset b/spot/bricks/brick-hashset index 1c97c9618..7763d29ae 100644 --- a/spot/bricks/brick-hashset +++ b/spot/bricks/brick-hashset @@ -583,7 +583,7 @@ struct _ConcurrentHashSet : HashSetBase< Cell > return begin() + size(); } - Row() : _data( nullptr ), _size( 0 ) {} + Row() noexcept : _data( nullptr ), _size( 0 ) {} ~Row() { free(); } }; From 9ae2e9c03d1c44c455f8aac5875f9ed7b56594ff Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 4 May 2022 17:40:17 +0200 Subject: [PATCH 066/606] Fix link to parity game example Reported by Florian Renkin. * doc/org/index.org: Here. --- doc/org/index.org | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/org/index.org b/doc/org/index.org index f676b8aa4..9af23dba4 100644 --- a/doc/org/index.org +++ b/doc/org/index.org @@ -25,7 +25,7 @@ checking. It has the following notable features: weak-DBA, removal of useless SCCs, acceptance-condition transformations, determinization, [[file:satmin.org][SAT-based minimization of deterministic automata]], [[https://spot.lrde.epita.fr/ipynb/zlktree.html][Alternating Cycle Decomposition]], etc. -- Support for [[file:tut40.org][Safety]] and [[https://spot-dev.lrde.epita.fr/ipynb/games.html][parity games]]. +- Support for [[file:tut40.org][Safety]] and [[https://spot.lrde.epita.fr/ipynb/games.html][parity games]]. - Applications to [[file:ltlsynt.org][reactive synthesis]] and [[https://spot.lrde.epita.fr/ipynb/atva16-fig2b.html][model checking]]. - In addition to the C++ interface, most of its algorithms are usable via [[file:tools.org][command-line tools]], and via [[file:tut.org][Python bindings]]. From 506442450eaece31d3d8a11ca80c6b2dd85c7579 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 6 May 2022 13:58:52 +0200 Subject: [PATCH 067/606] parsetl: remove a superfluous diagnostic on some erroneous input * tests/core/neverclaimread.test: Adjust and remove FIXME. * spot/parsetl/parsetl.yy (try_recursive_parse): Return false on empty string. --- spot/parsetl/parsetl.yy | 2 +- tests/core/neverclaimread.test | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/spot/parsetl/parsetl.yy b/spot/parsetl/parsetl.yy index bbcdedcb5..dda0866f1 100644 --- a/spot/parsetl/parsetl.yy +++ b/spot/parsetl/parsetl.yy @@ -164,7 +164,7 @@ using namespace spot; if (str.empty()) { error_list.emplace_back(location, "unexpected empty block"); - return nullptr; + return fnode::ff(); } spot::parsed_formula pf; diff --git a/tests/core/neverclaimread.test b/tests/core/neverclaimread.test index bf736f55d..09af8af58 100755 --- a/tests/core/neverclaimread.test +++ b/tests/core/neverclaimread.test @@ -350,10 +350,8 @@ digraph "-" { } EOF diff stdout expected -# FIXME: the "ignoring trailing garbage" is unwanted cat >expected.err < Date: Mon, 9 May 2022 13:42:20 +0200 Subject: [PATCH 068/606] twagraph: improve copy of kripke_graph Fix #505, Reported by Edmond Irani Liu. * spot/twa/twagraph.cc (copy): Deal with kripke_graph in a better way. * spot/twaalgos/hoa.cc: Do not force the use of named-states since when the input is a kripke_graph. * tests/python/kripke.py: Adjust test cases. * NEWS: Mention the change. * THANKS: Add Edmund. --- NEWS | 5 +++++ THANKS | 1 + spot/twa/twagraph.cc | 13 ++++++++++++- spot/twaalgos/hoa.cc | 7 ++++++- tests/python/kripke.py | 42 ++++++++++++++++++++++++++++++------------ 5 files changed, 54 insertions(+), 14 deletions(-) diff --git a/NEWS b/NEWS index 031df5b71..86b03b39c 100644 --- a/NEWS +++ b/NEWS @@ -2,6 +2,11 @@ New in spot 2.10.5.dev (not yet released) Nothing yet. + - make_twa_graph() will now preserve state number when copying a + kripke_graph object. As a consequence, print_dot() and + print_hoa() will now use state numbers matching those of the + kripke_graph (issue #505). + New in spot 2.10.5 (2022-05-03) Bugs fixed: diff --git a/THANKS b/THANKS index 9eb566483..ea99ede61 100644 --- a/THANKS +++ b/THANKS @@ -11,6 +11,7 @@ Christian Dax Christopher Ziegler Clément Tamines David Müller +Edmond Irani Liu Ernesto Posse Étienne Renault Fabrice Kordon diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 1fbe76f77..ef1c64d2c 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -1275,6 +1276,17 @@ namespace spot return p.first->second; }; + // If the input is a kripke_graph and the number of states is + // not restricted, predeclare all states to keep their + // numbering, and also copy unreachable states. + if (max_states == -1U) + if (auto kg = std::dynamic_pointer_cast(aut)) + { + unsigned ns = kg->num_states(); + for (unsigned s = 0; s < ns; ++s) + new_state(kg->state_from_number(s)); + } + out->set_init_state(new_state(aut->get_init_state())); while (!todo.empty()) { @@ -1313,7 +1325,6 @@ namespace spot } } - auto s = seen.begin(); while (s != seen.end()) { diff --git a/spot/twaalgos/hoa.cc b/spot/twaalgos/hoa.cc index 6c474fb2a..e3a378bfc 100644 --- a/spot/twaalgos/hoa.cc +++ b/spot/twaalgos/hoa.cc @@ -31,6 +31,7 @@ #include #include #include +#include using namespace std::string_literals; @@ -771,7 +772,11 @@ namespace spot strcpy(tmpopt, opt); tmpopt[n] = 'k'; tmpopt[n + 1] = 0; - preserve_names = true; + // Preserve names if we have some state names, or if we are + // not a kripke_graph. + auto sn = aut->get_named_prop>("state-names"); + preserve_names = + !!sn || !std::dynamic_pointer_cast(aut); } auto a = std::dynamic_pointer_cast(aut); diff --git a/tests/python/kripke.py b/tests/python/kripke.py index f3ce218b2..e1fe12169 100644 --- a/tests/python/kripke.py +++ b/tests/python/kripke.py @@ -26,35 +26,53 @@ p2 = buddy.bdd_ithvar(k.register_ap("p2")) cond1 = p1 & p2 cond2 = p1 & -p2 cond3 = -p1 & -p2 -s2 = k.new_state(cond1) +s0 = k.new_state(cond1) s1 = k.new_state(cond2) -s3 = k.new_state(cond3) +s2 = k.new_state(cond3) +k.new_edge(s1, s0) +k.new_edge(s0, s0) k.new_edge(s1, s2) k.new_edge(s2, s2) -k.new_edge(s1, s3) -k.new_edge(s3, s3) -k.new_edge(s3, s2) +k.new_edge(s2, s0) k.set_init_state(s1) hoa = """HOA: v1 States: 3 -Start: 0 +Start: 1 AP: 2 "p1" "p2" acc-name: all Acceptance: 0 t properties: state-labels explicit-labels state-acc --BODY-- -State: [0&!1] 0 "1" -1 2 -State: [0&1] 1 "0" -1 -State: [!0&!1] 2 "2" -2 1 +State: [0&1] 0 +0 +State: [0&!1] 1 +0 2 +State: [!0&!1] 2 +2 0 --END--""" assert hoa == k.to_str('HOA') assert k.num_states() == 3 assert k.num_edges() == 5 +k.set_state_names(["s0", "s1", "s2"]) +hoa = """HOA: v1 +States: 3 +Start: 1 +AP: 2 "p1" "p2" +acc-name: all +Acceptance: 0 t +properties: state-labels explicit-labels state-acc +--BODY-- +State: [0&1] 0 "s0" +0 +State: [0&!1] 1 "s1" +0 2 +State: [!0&!1] 2 "s2" +2 0 +--END--""" +assert hoa == k.to_str('HOA') + res = [] for e in k.out(s1): res.append((e.src, e.dst)) From f14b0bb4bd0df63e3b002126f57324ee6f2b490c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 09:27:30 +0200 Subject: [PATCH 069/606] Fix compilation on MacOS X Patch by Shachar Itzhaky. * spot/parseaut/scanaut.ll, spot/parsetl/scantl.ll: Include libc-config.h instead of config.h. * NEWS: Mention the fix. * THANKS: Add Shachar. --- NEWS | 4 ++++ THANKS | 5 +++-- spot/parseaut/scanaut.ll | 11 +++++++---- spot/parsetl/scantl.ll | 11 +++++++---- 4 files changed, 21 insertions(+), 10 deletions(-) diff --git a/NEWS b/NEWS index 86b03b39c..79f1c890f 100644 --- a/NEWS +++ b/NEWS @@ -7,6 +7,10 @@ New in spot 2.10.5.dev (not yet released) print_hoa() will now use state numbers matching those of the kripke_graph (issue #505). + Bug fixed: + + - Fix compilation error on MacOS X. + New in spot 2.10.5 (2022-05-03) Bugs fixed: diff --git a/THANKS b/THANKS index ea99ede61..0258eb7c9 100644 --- a/THANKS +++ b/THANKS @@ -46,10 +46,11 @@ Paul Guénézan Reuben Rowe Roei Nahum Rüdiger Ehlers -Silien Hong -Simon Jantsch +Shachar Itzhaky Shengping Shaw Shufang Zhu +Silien Hong +Simon Jantsch Sonali Dutta Tereza Šťastná Tobias Meggendorfer. diff --git a/spot/parseaut/scanaut.ll b/spot/parseaut/scanaut.ll index bf35810ed..71e677044 100644 --- a/spot/parseaut/scanaut.ll +++ b/spot/parseaut/scanaut.ll @@ -25,12 +25,15 @@ /* %option debug */ %top{ -#include "config.h" +#include "libc-config.h" /* Flex 2.6.4's test for relies on __STDC_VERSION__ which is undefined in C++. So without that, it will define - its own integer types, including a broken SIZE_MAX definition. - So let's define __STDC_VERSION__ to make sure gets - included. */ + its own integer types, including a broken SIZE_MAX definition that + breaks compilation on OpenBSD. So let's define __STDC_VERSION__ to + make sure gets included. Redefining __STDC_VERSION__ + this way can break all sort of macros defined in , so + we include "libc-config.h" instead of "config.h" above to define + those macros first. */ #if HAVE_INTTYPES_H && !(defined __STDC_VERSION__) # define __STDC_VERSION__ 199901L #endif diff --git a/spot/parsetl/scantl.ll b/spot/parsetl/scantl.ll index 554c28298..b315f636c 100644 --- a/spot/parsetl/scantl.ll +++ b/spot/parsetl/scantl.ll @@ -27,12 +27,15 @@ %option never-interactive %top{ -#include "config.h" +#include "libc-config.h" /* Flex 2.6.4's test for relies on __STDC_VERSION__ which is undefined in C++. So without that, it will define - its own integer types, including a broken SIZE_MAX definition. - So let's define __STDC_VERSION__ to make sure gets - included. */ + its own integer types, including a broken SIZE_MAX definition that + breaks compilation on OpenBSD. So let's define __STDC_VERSION__ to + make sure gets included. Redefining __STDC_VERSION__ + this way can break all sort of macros defined in , so + we include "libc-config.h" instead of "config.h" above to define + those macros first. */ #if HAVE_INTTYPES_H && !(defined __STDC_VERSION__) # define __STDC_VERSION__ 199901L #endif From cb5bc38f35d70990a0e79f9e94ae5e6332d06bf1 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 11:11:23 +0200 Subject: [PATCH 070/606] bin: reset column specification between files * bin/common_finput.cc (job_processor::process_file): Reset col_to_read. * tests/core/ltlfilt.test: Test it. * NEWS: Mention the bug. --- NEWS | 5 ++++- bin/common_finput.cc | 2 ++ tests/core/ltlfilt.test | 16 +++++++++++++++- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index 79f1c890f..1ffa26ad5 100644 --- a/NEWS +++ b/NEWS @@ -7,10 +7,13 @@ New in spot 2.10.5.dev (not yet released) print_hoa() will now use state numbers matching those of the kripke_graph (issue #505). - Bug fixed: + Bugs fixed: - Fix compilation error on MacOS X. + - Using -Ffile/N to read column N of a CSV file would not reset the + /N specification for the next file. + New in spot 2.10.5 (2022-05-03) Bugs fixed: diff --git a/bin/common_finput.cc b/bin/common_finput.cc index 8df1fb028..aae9e5b63 100644 --- a/bin/common_finput.cc +++ b/bin/common_finput.cc @@ -305,6 +305,8 @@ job_processor::process_stream(std::istream& is, int job_processor::process_file(const char* filename) { + col_to_read = 0; + // Special case for stdin. if (filename[0] == '-' && filename[1] == 0) return process_stream(std::cin, filename); diff --git a/tests/core/ltlfilt.test b/tests/core/ltlfilt.test index 501ae94b2..43d50ce06 100755 --- a/tests/core/ltlfilt.test +++ b/tests/core/ltlfilt.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013-2020 Laboratoire de Recherche et Développement de +# Copyright (C) 2013-2020, 2022 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -562,3 +562,17 @@ f1=`genltl --pps-arbiter-standard=2` f2=`genltl --pps-arbiter-strict=2` run 1 ltlfilt -f "$f2" --implied-by "$f1" run 0 ltlfilt -f "$f1" --implied-by "$f2" + + +# Reading two different columns of the same file +echo a,b > file +run 0 ltlfilt -Ffile/1 -Ffile/2 --stats=%f >out +cat >expected < Date: Tue, 17 May 2022 11:17:37 +0200 Subject: [PATCH 071/606] * spot/twa/bdddict.hh (bdd_info): Add noexcept, suggested by gcc 12. --- spot/twa/bdddict.hh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/spot/twa/bdddict.hh b/spot/twa/bdddict.hh index c9b39d8a5..f9c2ed6df 100644 --- a/spot/twa/bdddict.hh +++ b/spot/twa/bdddict.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2017 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2011-2017, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // Copyright (C) 2003, 2004, 2006 Laboratoire d'Informatique de Paris // 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), // Université Pierre et Marie Curie. @@ -78,7 +78,7 @@ namespace spot enum var_type { anon = 0, var, acc }; struct bdd_info { - bdd_info() : type(anon) {} + bdd_info() noexcept: type(anon) {} var_type type; formula f; // Used unless t==anon. ref_set refs; From b5f8e3c75df8528a7030696c4fdb50aa33426970 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 11:37:29 +0200 Subject: [PATCH 072/606] [buddy] remove some unused variables * src/reorder.c (reorder_win2ite): Do not set c=1. --- buddy/src/reorder.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/buddy/src/reorder.c b/buddy/src/reorder.c index d61630801..b107c8b6a 100644 --- a/buddy/src/reorder.c +++ b/buddy/src/reorder.c @@ -210,7 +210,6 @@ static BddTree *reorder_win2ite(BddTree *t) { BddTree *this, *first=t; int lastsize; - int c=1; if (t == NULL) return t; @@ -246,7 +245,6 @@ static BddTree *reorder_win2ite(BddTree *t) if (verbose > 1) printf(" %d nodes\n", reorder_nodenum()); - c++; } while (reorder_nodenum() != lastsize); From 8a628d96bcbf0d43d5de0f9fe349777e7f70df3f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 11:43:55 +0200 Subject: [PATCH 073/606] fix warnings from clang-15 * spot/twa/acc.cc (acc_cond::acc_code::symmetries): Fix weird loop. * spot/twaalgos/aiger.cc (aig::circ_step): Replace & by &&. --- spot/twa/acc.cc | 4 ++-- spot/twaalgos/aiger.cc | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/spot/twa/acc.cc b/spot/twa/acc.cc index 5b7985d70..ce5d463aa 100644 --- a/spot/twa/acc.cc +++ b/spot/twa/acc.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2015-2022 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -1029,7 +1029,7 @@ namespace spot int base = ba.allocate_variables(umax+2); assert(base == 0); std::vector r; - for (unsigned i = 0; r.size() < umax; ++i) + while (r.size() < umax) r.emplace_back(bdd_ithvar(base++)); bdd bddcode = to_bdd(&r[0]); bdd tmp; diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index ae7b665e9..8d730c34d 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -1359,7 +1359,7 @@ namespace spot { unsigned var_g = gate_var(i); state_[var_g] = state_[and_gates_[i].first] - & state_[and_gates_[i].second]; + && state_[and_gates_[i].second]; state_[aig_not(var_g)] = !state_[var_g]; } // Update latches From eecb9af21ea167346d897a652168c1b3e478dd58 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 12:01:11 +0200 Subject: [PATCH 074/606] Fix a clang++15 warning * spot/parseaut/parseaut.yy: Move the try-block inside the code of the constructors, so that they can refer to non-static members. --- spot/parseaut/parseaut.yy | 52 ++++++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/spot/parseaut/parseaut.yy b/spot/parseaut/parseaut.yy index c71636bde..4f57fcd35 100644 --- a/spot/parseaut/parseaut.yy +++ b/spot/parseaut/parseaut.yy @@ -2552,45 +2552,51 @@ namespace spot { automaton_stream_parser::automaton_stream_parser(const std::string& name, automaton_parser_options opt) - try : filename_(name), opts_(opt) { - if (hoayyopen(name, &scanner_)) - throw std::runtime_error("Cannot open file "s + name); - } - catch (...) - { - hoayyclose(scanner_); - throw; + try + { + if (hoayyopen(name, &scanner_)) + throw std::runtime_error("Cannot open file "s + name); + } + catch (...) + { + hoayyclose(scanner_); + throw; + } } automaton_stream_parser::automaton_stream_parser(int fd, const std::string& name, automaton_parser_options opt) - try : filename_(name), opts_(opt) { - if (hoayyopen(fd, &scanner_)) - throw std::runtime_error("Cannot open file "s + name); - } - catch (...) - { - hoayyclose(scanner_); - throw; + try + { + if (hoayyopen(fd, &scanner_)) + throw std::runtime_error("Cannot open file "s + name); + } + catch (...) + { + hoayyclose(scanner_); + throw; + } } automaton_stream_parser::automaton_stream_parser(const char* data, const std::string& filename, automaton_parser_options opt) - try : filename_(filename), opts_(opt) { - hoayystring(data, &scanner_); - } - catch (...) - { - hoayyclose(scanner_); - throw; + try + { + hoayystring(data, &scanner_); + } + catch (...) + { + hoayyclose(scanner_); + throw; + } } automaton_stream_parser::~automaton_stream_parser() From 10bc253dd855801d71765dcb0956060092c64e79 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 15:25:17 +0200 Subject: [PATCH 075/606] work around spurious gcc 12 "potentially null dereference" The issue seems to be inside std::vector's copy constructor, but it highlighted places in Spot were we could avoid this copy. * spot/twaalgos/ltl2taa.cc: Avoid some copies of std::vector. --- spot/twaalgos/ltl2taa.cc | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/spot/twaalgos/ltl2taa.cc b/spot/twaalgos/ltl2taa.cc index 9c10777a9..eaba49e92 100644 --- a/spot/twaalgos/ltl2taa.cc +++ b/spot/twaalgos/ltl2taa.cc @@ -61,7 +61,8 @@ namespace spot { std::vector empty; res_->create_transition(init_, empty); - succ_state ss = { empty, f, empty }; + succ_state ss; + ss.condition = f; succ_.emplace_back(ss); return; } @@ -76,7 +77,8 @@ namespace spot std::vector empty; taa_tgba::transition* t = res_->create_transition(init_, empty); res_->add_condition(t, f); - succ_state ss = { empty, f, empty }; + succ_state ss; + ss.condition = f; succ_.emplace_back(ss); return; } @@ -90,7 +92,7 @@ namespace spot return; dst.emplace_back(v.init_); res_->create_transition(init_, dst); - succ_state ss = { dst, formula::tt(), a }; + succ_state ss = { std::move(dst), formula::tt(), std::move(a) }; succ_.emplace_back(ss); return; } @@ -206,7 +208,7 @@ namespace spot } t = res_->create_transition(init_, u); res_->add_condition(t, f); - succ_state ss = { u, f, a }; + succ_state ss = { std::move(u), f, std::move(a) }; succ_.emplace_back(ss); } From 99d030f5e1328f787888e94f13a6127cdcc8f3fd Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 16:59:01 +0200 Subject: [PATCH 076/606] GCC 12 warns too much about See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105562 * m4/gccwarn.m4: Compile a small regex and add -Wno-maybe-uninitialized if needed. --- m4/gccwarn.m4 | 39 +++++++++++++++++++++++++++++++-------- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/m4/gccwarn.m4 b/m4/gccwarn.m4 index 4f719e55f..dc6969add 100644 --- a/m4/gccwarn.m4 +++ b/m4/gccwarn.m4 @@ -21,6 +21,7 @@ AC_DEFUN([CF_GXX_WARNINGS], cat > conftest.$ac_ext < +#include int main(int argc, char *argv[[]]) { // This string comparison is here to detect superfluous @@ -33,19 +34,26 @@ int main(int argc, char *argv[[]]) std::string a{"foo"}, b{"bar"}; if (b < a) return 1; + // GCC 12 has spurious warnings about ininialized values in regex. + // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105562 + // We need -Wno-maybe-uninitialized in this case. + std::regex r{"a"}; + (void)r; return argv[[argc-1]] == nullptr; } EOF cf_save_CXXFLAGS="$CXXFLAGS" - ac_cv_prog_gxx_warn_flags="-W -Wall" + ac_cv_prog_gxx_warn_flags="-W -Werror" +dnl The following list has options of the form OPT:BAD:GOOD +dnl if -OPT fails we try -OPT -BAD. If -OPT succeeds we add -GOOD. for cf_opt in \ - Werror \ + Wall:Wno-maybe-uninitialized:\ Wint-to-void-pointer-cast \ Wzero-as-null-pointer-constant \ Wcast-align \ Wpointer-arith \ Wwrite-strings \ - Wcast-qual \ + Wcast-qual::DXTSTRINGDEFINES \ Wdocumentation \ Wmissing-declarations \ Wnoexcept \ @@ -58,11 +66,26 @@ EOF Wsuggest-override \ Wpedantic do - CXXFLAGS="$cf_save_CXXFLAGS $ac_cv_prog_gxx_warn_flags -$cf_opt" - if AC_TRY_EVAL(ac_compile); then - ac_cv_prog_gxx_warn_flags="$ac_cv_prog_gxx_warn_flags -$cf_opt" - test "$cf_opt" = Wcast-qual && ac_cv_prog_gxx_warn_flags="$ac_cv_prog_gxx_warn_flags -DXTSTRINGDEFINES" - fi + fopt=${cf_opt%%:*} + CXXFLAGS="$cf_save_CXXFLAGS $ac_cv_prog_gxx_warn_flags -$fopt" + if AC_TRY_EVAL(ac_compile); then + ac_cv_prog_gxx_warn_flags="$ac_cv_prog_gxx_warn_flags -$fopt" + case $cf_opt in + *:*:);; + *:*:*)ac_cv_prog_gxx_warn_flags="$ac_cv_prog_gxx_warn_flags -${cf_opt##*:}";; + esac + else + case $cf_opt in + *::*);; + *:*:*) + sopt=${cf_opt%:*} + sopt=${sopt#*:} + CXXFLAGS="$cf_save_CXXFLAGS $ac_cv_prog_gxx_warn_flags -$fopt -$sopt" + if AC_TRY_EVAL(ac_compile); then + ac_cv_prog_gxx_warn_flags="$ac_cv_prog_gxx_warn_flags -$fopt -$sopt" + fi;; + esac + fi done rm -f conftest* CXXFLAGS="$cf_save_CXXFLAGS"]) From a6639e56cb3ce5be884c1f068479aedb67c89e9b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 May 2022 17:43:27 +0200 Subject: [PATCH 077/606] more noexcept to pleace GCC * spot/bricks/brick-hashset (Row): Add noexcept. * bin/autcross.cc (out_statistics): Likewise. * bin/ltlcross.cc (statistics): Likewise. --- bin/autcross.cc | 3 +-- bin/ltlcross.cc | 2 +- spot/bricks/brick-hashset | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/bin/autcross.cc b/bin/autcross.cc index 81b6bcef5..0711562a2 100644 --- a/bin/autcross.cc +++ b/bin/autcross.cc @@ -338,7 +338,6 @@ struct in_statistics struct out_statistics { - // If OK is false, output statistics are not available. bool ok; const char* status_str; @@ -346,7 +345,7 @@ struct out_statistics double time; aut_statistics output; - out_statistics() + out_statistics() noexcept : ok(false), status_str(nullptr), status_code(0), diff --git a/bin/ltlcross.cc b/bin/ltlcross.cc index d36478837..f75075f82 100644 --- a/bin/ltlcross.cc +++ b/bin/ltlcross.cc @@ -264,7 +264,7 @@ end_error() struct statistics { - statistics() + statistics() noexcept : ok(false), alternating(false), status_str(nullptr), diff --git a/spot/bricks/brick-hashset b/spot/bricks/brick-hashset index 1c97c9618..7763d29ae 100644 --- a/spot/bricks/brick-hashset +++ b/spot/bricks/brick-hashset @@ -583,7 +583,7 @@ struct _ConcurrentHashSet : HashSetBase< Cell > return begin() + size(); } - Row() : _data( nullptr ), _size( 0 ) {} + Row() noexcept : _data( nullptr ), _size( 0 ) {} ~Row() { free(); } }; From e0de77d8a404014cbad5340ece60eccc02aa6530 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 18 May 2022 07:12:05 +0200 Subject: [PATCH 078/606] Release Spot 2.10.6 * NEWS, configure.ac, doc/org/setup.org: Set version to 2.10.6. --- NEWS | 17 +++++++++-------- configure.ac | 2 +- doc/org/setup.org | 10 +++++----- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/NEWS b/NEWS index 1ffa26ad5..07f188ac2 100644 --- a/NEWS +++ b/NEWS @@ -1,11 +1,4 @@ -New in spot 2.10.5.dev (not yet released) - - Nothing yet. - - - make_twa_graph() will now preserve state number when copying a - kripke_graph object. As a consequence, print_dot() and - print_hoa() will now use state numbers matching those of the - kripke_graph (issue #505). +New in spot 2.10.6 (2022-05-18) Bugs fixed: @@ -14,6 +7,14 @@ New in spot 2.10.5.dev (not yet released) - Using -Ffile/N to read column N of a CSV file would not reset the /N specification for the next file. + - make_twa_graph() will now preserve state number when copying a + kripke_graph object. As a consequence, print_dot() and + print_hoa() will now use state numbers matching those of the + kripke_graph (issue #505). + + - Fix several compilation warning introduced by newer versions + of GCC and Clang. + New in spot 2.10.5 (2022-05-03) Bugs fixed: diff --git a/configure.ac b/configure.ac index 6527900ce..e14698a3f 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.10.5.dev], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.10.6], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) diff --git a/doc/org/setup.org b/doc/org/setup.org index c1b7e9235..52aa02639 100644 --- a/doc/org/setup.org +++ b/doc/org/setup.org @@ -1,11 +1,11 @@ #+OPTIONS: H:2 num:nil toc:t html-postamble:nil ^:nil #+EMAIL: spot@lrde.epita.fr #+HTML_LINK_HOME: index.html -#+MACRO: SPOTVERSION 2.10.5 -#+MACRO: LASTRELEASE 2.10.5 -#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.10.5.tar.gz][=spot-2.10.5.tar.gz=]] -#+MACRO: LASTNEWS [[https://gitlab.lrde.epita.fr/spot/spot/blob/spot-2-10-5/NEWS][summary of the changes]] -#+MACRO: LASTDATE 2022-05-03 +#+MACRO: SPOTVERSION 2.10.6 +#+MACRO: LASTRELEASE 2.10.6 +#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.10.6.tar.gz][=spot-2.10.6.tar.gz=]] +#+MACRO: LASTNEWS [[https://gitlab.lrde.epita.fr/spot/spot/blob/spot-2-10-6/NEWS][summary of the changes]] +#+MACRO: LASTDATE 2022-05-18 #+ATTR_HTML: :id spotlogo [[file:spot2.svg]] From 457839df36762ce04a257e9eb06dcbe363212d55 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 18 May 2022 07:14:58 +0200 Subject: [PATCH 079/606] * NEWS, configure.ac: Bump version to 2.10.6. --- NEWS | 4 ++++ configure.ac | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 07f188ac2..1d656e05f 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,7 @@ +New in spot 2.10.6.dev (not yet released) + + Nothing yet. + New in spot 2.10.6 (2022-05-18) Bugs fixed: diff --git a/configure.ac b/configure.ac index e14698a3f..162bf463b 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.10.6], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.10.6.dev], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) From f784e405489edbeb024b34ff0a7730e161c17f3d Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 19 May 2022 16:38:02 +0200 Subject: [PATCH 080/606] =?UTF-8?q?complete:=20do=20not=20force=20B=C3=BCc?= =?UTF-8?q?hi=20on=20universal=20automata?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * spot/twaalgos/complete.hh: Adjust documentation. * spot/twaalgos/complete.cc: If the acceptance condition is a tautology, delay the forcing of Büchi acceptance until we are sure it is needed. * NEWS: Mention the change. --- NEWS | 3 +++ spot/twaalgos/complete.cc | 24 +++++++++++++++++++----- spot/twaalgos/complete.hh | 9 +++++---- 3 files changed, 27 insertions(+), 9 deletions(-) diff --git a/NEWS b/NEWS index 1cbd800e1..23739d738 100644 --- a/NEWS +++ b/NEWS @@ -82,6 +82,9 @@ New in spot 2.10.6.dev (not yet released) averted in the parser by delaying the construction of such n-ary nodes until all children are known. + - complement() used to always turn tautological acceptance conditions + into Büchi. It now only does that if the automaton is modified. + New in spot 2.10.6 (2022-05-18) Bugs fixed: diff --git a/spot/twaalgos/complete.cc b/spot/twaalgos/complete.cc index 20be2ea06..803b3f440 100644 --- a/spot/twaalgos/complete.cc +++ b/spot/twaalgos/complete.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2018, 2022 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -29,6 +29,8 @@ namespace spot return; unsigned n = aut->num_states(); + bool need_acc_fix = false; + // UM is a pair (bool, mark). If the Boolean is false, the // acceptance is always satisfiable. Otherwise, MARK is an // example of unsatisfiable mark. @@ -36,10 +38,11 @@ namespace spot if (!um.first) { // We cannot safely complete an automaton if its - // acceptance is always satisfiable. - auto acc = aut->set_buchi(); - for (auto& t: aut->edge_vector()) - t.acc = acc; + // acceptance is always satisfiable, so we will + // have to fix the acceptance automaton. However + // postpone that until we are sure that the + // automaton really need to be completed. + need_acc_fix = true; } else { @@ -129,6 +132,8 @@ namespace spot // acceptance sets as the last outgoing edge of the // state. acc = t.acc; + // If a state already has a edge to a sink, remember it + // so we can add the missing conditions to it. if (t.dst == sink) edge_to_sink = aut->edge_number(t); } @@ -136,6 +141,15 @@ namespace spot // edge to some sink state. if (missingcond != bddfalse) { + if (need_acc_fix) + { + auto a = aut->set_buchi(); + for (auto& t: aut->edge_vector()) + t.acc = a; + if (aut->num_edges()) + acc = a; + need_acc_fix = false; + } // If we haven't found any sink, simply add one. if (sink == -1U) { diff --git a/spot/twaalgos/complete.hh b/spot/twaalgos/complete.hh index 87703dcc2..3525904be 100644 --- a/spot/twaalgos/complete.hh +++ b/spot/twaalgos/complete.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014, 2015, 2017 Laboratoire de Recherche et +// Copyright (C) 2013-2015, 2017, 2022 Laboratoire de Recherche et // Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -25,12 +25,13 @@ namespace spot { /// \brief Complete a twa_graph in place. /// - /// If the TωA has an acceptance condition that is a tautology, - /// it will be changed into a Büchi automaton. + /// If the TωA is incomplete and has an acceptance condition that is + /// a tautology, it will be changed into a Büchi automaton. SPOT_API void complete_here(twa_graph_ptr aut); /// \brief Clone a twa and complete it. /// - /// If the twa has no acceptance set, one will be added. + /// If the TωA is incomplete and has an acceptance condition that is + /// a tautology, it will be changed into a Büchi automaton. SPOT_API twa_graph_ptr complete(const const_twa_ptr& aut); } From b11208440b872c1a23621d5ccb1c03179b002d8c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 20 May 2022 16:51:16 +0200 Subject: [PATCH 081/606] zlktree: use a cache in the construction of zielonka_tree This largely speeds up the computation for conditions like "Rabin n" sharing a lot of subtrees. Also implement options to stop the construction if the shape is wrong. * spot/twaalgos/zlktree.cc, spot/twaalgos/zlktree.hh: Implement the cache and the options. * tests/python/zlktree.ipynb, tests/python/zlktree.py: New tests. --- NEWS | 5 + spot/twaalgos/zlktree.cc | 74 +++++- spot/twaalgos/zlktree.hh | 67 ++++- tests/python/zlktree.ipynb | 513 +++++++++++++++++++++++++++---------- tests/python/zlktree.py | 9 + 5 files changed, 517 insertions(+), 151 deletions(-) diff --git a/NEWS b/NEWS index 23739d738..32a1b0f19 100644 --- a/NEWS +++ b/NEWS @@ -85,6 +85,11 @@ New in spot 2.10.6.dev (not yet released) - complement() used to always turn tautological acceptance conditions into Büchi. It now only does that if the automaton is modified. + - The zielonka_tree construction was optimized using the same + memoization trick that is used in ACD. Additionally it can now be + run with additional option to abort when the tree as an unwanted + shape, or to turn the tree into a DAG. + New in spot 2.10.6 (2022-05-18) Bugs fixed: diff --git a/spot/twaalgos/zlktree.cc b/spot/twaalgos/zlktree.cc index 2f87e6352..f31c46896 100644 --- a/spot/twaalgos/zlktree.cc +++ b/spot/twaalgos/zlktree.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021 Laboratoire de Recherche et Developpement de +// Copyright (C) 2021, 2022 Laboratoire de Recherche et Developpement de // l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -109,7 +109,8 @@ namespace spot } } - zielonka_tree::zielonka_tree(const acc_cond& cond) + zielonka_tree::zielonka_tree(const acc_cond& cond, + zielonka_tree_options opt) { const acc_cond::acc_code& code = cond.get_acceptance(); auto all = cond.all_sets(); @@ -120,11 +121,47 @@ namespace spot nodes_[0].colors = all; nodes_[0].level = 0; + robin_hood::unordered_node_map nmap; + std::vector models; // This loop is a BFS over the increasing set of nodes. for (unsigned node = 0; node < nodes_.size(); ++node) { acc_cond::mark_t colors = nodes_[node].colors; + unsigned nextlvl = nodes_[node].level + 1; + + // Have we already seen this combination of colors previously? + // If yes, simply copy the children. + if (auto p = nmap.emplace(colors, node); !p.second) + { + unsigned fc = nodes_[p.first->second].first_child; + if (!fc) // this is a leaf + { + ++num_branches_; + continue; + } + if (!!(opt & zielonka_tree_options::MERGE_SUBTREES)) + { + nodes_[node].first_child = fc; + continue; + } + unsigned child = fc; + unsigned first = nodes_.size(); + nodes_[node].first_child = first; + do + { + auto& c = nodes_[child]; + child = c.next_sibling; + nodes_.push_back({node, static_cast(nodes_.size() + 1), + 0, nextlvl, c.colors}); + } + while (child != fc); + nodes_.back().next_sibling = first; + // We do not have to test the shape since this is the second time + // we see these colors; + continue; + } + bool is_accepting = code.accepting(colors); if (node == 0) is_even_ = is_accepting; @@ -145,15 +182,32 @@ namespace spot nodes_.reserve(first + num_children); for (auto& m: models) nodes_.push_back({node, static_cast(nodes_.size() + 1), - 0, nodes_[node].level + 1, m.model}); + 0, nextlvl, m.model}); nodes_.back().next_sibling = first; if (num_children > 1) { + bool abort = false; if (is_accepting) - has_rabin_shape_ = false; + { + has_rabin_shape_ = false; + if (!!(opt & zielonka_tree_options::ABORT_WRONG_SHAPE) + && !!(opt & zielonka_tree_options::CHECK_RABIN)) + abort = true; + } else - has_streett_shape_ = false; + { + has_streett_shape_ = false; + if (!!(opt & zielonka_tree_options::ABORT_WRONG_SHAPE) + && !!(opt & zielonka_tree_options::CHECK_STREETT)) + abort = true; + } + if (abort) + { + nodes_.clear(); + num_branches_ = 0; + return; + } } } @@ -523,14 +577,18 @@ namespace spot do { auto& c = nodes_[child]; + // We have to read anything we need from C + // before emplace_back, which may reallocate. + acc_cond::mark_t colors = c.colors; + unsigned minstate = c.minstate; + child = c.next_sibling; nodes_.emplace_back(c.edges, c.states); auto& n = nodes_.back(); n.parent = node; n.level = lvl + 1; n.scc = ref.scc; - n.colors = c.colors; - n.minstate = c.minstate; - child = c.next_sibling; + n.colors = colors; + n.minstate = minstate; } while (child != fc); chain_children(node, before, nodes_.size()); diff --git a/spot/twaalgos/zlktree.hh b/spot/twaalgos/zlktree.hh index 675224682..b8e47bc2a 100644 --- a/spot/twaalgos/zlktree.hh +++ b/spot/twaalgos/zlktree.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021 Laboratoire de Recherche et Developpement de +// Copyright (C) 2021, 2022 Laboratoire de Recherche et Developpement de // l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -28,6 +28,68 @@ namespace spot { + /// \ingroup twa_acc_transform + /// \brief Options to alter the behavior of acd + enum class zielonka_tree_options + { + /// Build the ZlkTree, without checking its shape. + NONE = 0, + /// Check if the ZlkTree has Rabin shape. + /// This actually has no effect unless ABORT_WRONG_SHAPE is set, + /// because zielonka_tree always check the shape. + CHECK_RABIN = 1, + /// Check if the ZlkTree has Streett shape. + /// This actually has no effect unless ABORT_WRONG_SHAPE is set, + /// because zielonka_tree always check the shape. + CHECK_STREETT = 2, + /// Check if the ZlkTree has Parity shape + /// This actually has no effect unless ABORT_WRONG_SHAPE is set, + /// because zielonka_tree always check the shape. + CHECK_PARITY = CHECK_RABIN | CHECK_STREETT, + /// Abort the construction of the ZlkTree if it does not have the + /// shape that is tested. When that happens, num_branches() is set + /// to 0. + ABORT_WRONG_SHAPE = 4, + /// Fuse identical substree. This cannot be used with + /// zielonka_tree_transform(). However it saves memory if the + /// only use of the zielonka_tree to check the shape. + MERGE_SUBTREES = 8, + }; + +#ifndef SWIG + inline + bool operator!(zielonka_tree_options me) + { + return me == zielonka_tree_options::NONE; + } + + inline + zielonka_tree_options operator&(zielonka_tree_options left, + zielonka_tree_options right) + { + typedef std::underlying_type_t ut; + return static_cast(static_cast(left) + & static_cast(right)); + } + + inline + zielonka_tree_options operator|(zielonka_tree_options left, + zielonka_tree_options right) + { + typedef std::underlying_type_t ut; + return static_cast(static_cast(left) + | static_cast(right)); + } + + inline + zielonka_tree_options operator-(zielonka_tree_options left, + zielonka_tree_options right) + { + typedef std::underlying_type_t ut; + return static_cast(static_cast(left) + & ~static_cast(right)); + } +#endif /// \ingroup twa_acc_transform /// \brief Zielonka Tree implementation /// @@ -41,7 +103,8 @@ namespace spot { public: /// \brief Build a Zielonka tree from the acceptance condition. - zielonka_tree(const acc_cond& cond); + zielonka_tree(const acc_cond& cond, + zielonka_tree_options opt = zielonka_tree_options::NONE); /// \brief The number of branches in the Zielonka tree. /// diff --git a/tests/python/zlktree.ipynb b/tests/python/zlktree.ipynb index d46e2ce2c..ae44ad37d 100644 --- a/tests/python/zlktree.ipynb +++ b/tests/python/zlktree.ipynb @@ -216,7 +216,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 2, @@ -640,7 +640,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c49328600> >" + " *' at 0x7f14701b7510> >" ] }, "execution_count": 10, @@ -1063,7 +1063,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c493284b0> >" + " *' at 0x7f1470220960> >" ] }, "execution_count": 11, @@ -1256,7 +1256,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c49328f30> >" + " *' at 0x7f14701b75d0> >" ] }, "execution_count": 13, @@ -1701,7 +1701,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c49330420> >" + " *' at 0x7f1470142240> >" ] }, "execution_count": 14, @@ -2096,7 +2096,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2427,7 +2427,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2513,7 +2513,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2624,7 +2624,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2662,7 +2662,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2700,7 +2700,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2717,6 +2717,237 @@ " display(tcond)" ] }, + { + "cell_type": "markdown", + "id": "77db26c3", + "metadata": {}, + "source": [ + "## `zielonka_tree_options`\n", + "\n", + "The `zielonka_tree` class accepts a few options that can alter its behaviour.\n", + "\n", + "Options `CHECK_RABIN`, `CHECK_STREETT`, `CHECK_PARITY` can be combined with\n", + "`ABORT_WRONG_SHAPE` to abort the construction as soon as it is detected that the Zielonka tree has the wrong shape. When this happens, the number of branchs of the tree is set to 0.\n", + "\n", + "For instance we can check that the original acceptance condition does not behaves like a Parity condition." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "4fa47daf", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(4, (Fin(0) & Inf(1) & (Inf(2) | Fin(3))) | ((Inf(0) | Fin(1)) & Fin(2) & Inf(3)))\n", + "0\n" + ] + } + ], + "source": [ + "print(c)\n", + "z = spot.zielonka_tree(c, spot.zielonka_tree_options_ABORT_WRONG_SHAPE \n", + " | spot.zielonka_tree_options_CHECK_PARITY)\n", + "print(z.num_branches())" + ] + }, + { + "cell_type": "markdown", + "id": "4786f64c", + "metadata": {}, + "source": [ + "Option `MERGE_SUBTREE` will fuse identical nodes, turning the tree into a DAG. (Actually, because this tree is stored as a left-child right-sibling tree, only the children of identical nodes are merged.):" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "bc826090", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "zielonka_tree\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "{0,1,2,3}\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "{1,2,3}\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "{0,1,3}\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "{2,3}\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "{1,3}\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "{1,3}\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "{0,1}\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "{3}\n", + "<7>\n", + "\n", + "\n", + "\n", + "3->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "{1}\n", + "<8>\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "{3}\n", + "<9>\n", + "\n", + "\n", + "\n", + "4->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "{1}\n", + "<10>\n", + "\n", + "\n", + "\n", + "6->10\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " >" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "spot.zielonka_tree(c, spot.zielonka_tree_options_MERGE_SUBTREES)" + ] + }, + { + "cell_type": "markdown", + "id": "9d7688b3", + "metadata": {}, + "source": [ + "Such a DAG cannot be used by `zielonka_tree_transform()`, but it saves memory if we are only checking the shape of the tree/DAG." + ] + }, { "cell_type": "markdown", "id": "75838579", @@ -2731,7 +2962,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 19, "id": "ea3488b1", "metadata": {}, "outputs": [], @@ -2763,7 +2994,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 20, "id": "fad721c0", "metadata": {}, "outputs": [ @@ -3862,10 +4093,10 @@ "};$(\"#acdaut0 #E9\").addClass(\"acdN0\");$(\"#acdaut0 #E10\").addClass(\"acdN0\");$(\"#acdaut0 #E11\").addClass(\"acdN0\");$(\"#acdaut0 #E12\").addClass(\"acdN0\");$(\"#acdaut0 #E13\").addClass(\"acdN0\");$(\"#acdaut0 #E14\").addClass(\"acdN0\");$(\"#acdaut0 #E15\").addClass(\"acdN0\");$(\"#acdaut0 #E16\").addClass(\"acdN0\");$(\"#acdaut0 #E21\").addClass(\"acdN0\");$(\"#acdaut0 #E22\").addClass(\"acdN0\");$(\"#acdaut0 #E23\").addClass(\"acdN0\");$(\"#acdaut0 #E24\").addClass(\"acdN0\");$(\"#acdaut0 #E25\").addClass(\"acdN0\");$(\"#acdaut0 #E26\").addClass(\"acdN0\");$(\"#acdaut0 #E27\").addClass(\"acdN0\");$(\"#acdaut0 #E28\").addClass(\"acdN0\");$(\"#acdaut0 #E33\").addClass(\"acdN0\");$(\"#acdaut0 #E34\").addClass(\"acdN0\");$(\"#acdaut0 #E35\").addClass(\"acdN0\");$(\"#acdaut0 #E36\").addClass(\"acdN0\");$(\"#acdaut0 #E31\").addClass(\"acdN1\");$(\"#acdaut0 #E32\").addClass(\"acdN1\");$(\"#acdaut0 #E39\").addClass(\"acdN1\");$(\"#acdaut0 #E40\").addClass(\"acdN1\");$(\"#acdaut0 #E5\").addClass(\"acdN2\");$(\"#acdaut0 #E7\").addClass(\"acdN2\");$(\"#acdaut0 #E17\").addClass(\"acdN2\");$(\"#acdaut0 #E1\").addClass(\"acdN3\");$(\"#acdaut0 #E10\").addClass(\"acdN4\");$(\"#acdaut0 #E12\").addClass(\"acdN4\");$(\"#acdaut0 #E13\").addClass(\"acdN4\");$(\"#acdaut0 #E15\").addClass(\"acdN4\");$(\"#acdaut0 #E21\").addClass(\"acdN4\");$(\"#acdaut0 #E22\").addClass(\"acdN4\");$(\"#acdaut0 #E23\").addClass(\"acdN5\");$(\"#acdaut0 #E24\").addClass(\"acdN5\");$(\"#acdaut0 #E34\").addClass(\"acdN5\");$(\"#acdaut0 #E36\").addClass(\"acdN5\");$(\"#acdaut0 #E14\").addClass(\"acdN6\");$(\"#acdaut0 #E15\").addClass(\"acdN6\");$(\"#acdaut0 #E22\").addClass(\"acdN6\");$(\"#acdaut0 #E23\").addClass(\"acdN6\");$(\"#acdaut0 #E14\").addClass(\"acdN7\");$(\"#acdaut0 #E16\").addClass(\"acdN7\");$(\"#acdaut0 #E26\").addClass(\"acdN7\");$(\"#acdaut0 #E9\").addClass(\"acdN8\");$(\"#acdaut0 #E40\").addClass(\"acdN9\");$(\"#acdaut0 #E5\").addClass(\"acdN10\");$(\"#acdaut0 #E23\").addClass(\"acdN11\");$(\"#acdaut0 #E14\").addClass(\"acdN12\");$(\"#acdaut0 #E23\").addClass(\"acdN13\");$(\"#acdaut0 #E14\").addClass(\"acdN14\");$(\"#acdaut0 #E1\").click(function(){acd0_edge(1);});$(\"#acdaut0 #E2\").click(function(){acd0_edge(2);});$(\"#acdaut0 #E3\").click(function(){acd0_edge(3);});$(\"#acdaut0 #E4\").click(function(){acd0_edge(4);});$(\"#acdaut0 #E5\").click(function(){acd0_edge(5);});$(\"#acdaut0 #E6\").click(function(){acd0_edge(6);});$(\"#acdaut0 #E7\").click(function(){acd0_edge(7);});$(\"#acdaut0 #E8\").click(function(){acd0_edge(8);});$(\"#acdaut0 #E9\").click(function(){acd0_edge(9);});$(\"#acdaut0 #E10\").click(function(){acd0_edge(10);});$(\"#acdaut0 #E11\").click(function(){acd0_edge(11);});$(\"#acdaut0 #E12\").click(function(){acd0_edge(12);});$(\"#acdaut0 #E13\").click(function(){acd0_edge(13);});$(\"#acdaut0 #E14\").click(function(){acd0_edge(14);});$(\"#acdaut0 #E15\").click(function(){acd0_edge(15);});$(\"#acdaut0 #E16\").click(function(){acd0_edge(16);});$(\"#acdaut0 #E17\").click(function(){acd0_edge(17);});$(\"#acdaut0 #E18\").click(function(){acd0_edge(18);});$(\"#acdaut0 #E19\").click(function(){acd0_edge(19);});$(\"#acdaut0 #E20\").click(function(){acd0_edge(20);});$(\"#acdaut0 #E21\").click(function(){acd0_edge(21);});$(\"#acdaut0 #E22\").click(function(){acd0_edge(22);});$(\"#acdaut0 #E23\").click(function(){acd0_edge(23);});$(\"#acdaut0 #E24\").click(function(){acd0_edge(24);});$(\"#acdaut0 #E25\").click(function(){acd0_edge(25);});$(\"#acdaut0 #E26\").click(function(){acd0_edge(26);});$(\"#acdaut0 #E27\").click(function(){acd0_edge(27);});$(\"#acdaut0 #E28\").click(function(){acd0_edge(28);});$(\"#acdaut0 #E29\").click(function(){acd0_edge(29);});$(\"#acdaut0 #E30\").click(function(){acd0_edge(30);});$(\"#acdaut0 #E31\").click(function(){acd0_edge(31);});$(\"#acdaut0 #E32\").click(function(){acd0_edge(32);});$(\"#acdaut0 #E33\").click(function(){acd0_edge(33);});$(\"#acdaut0 #E34\").click(function(){acd0_edge(34);});$(\"#acdaut0 #E35\").click(function(){acd0_edge(35);});$(\"#acdaut0 #E36\").click(function(){acd0_edge(36);});$(\"#acdaut0 #E37\").click(function(){acd0_edge(37);});$(\"#acdaut0 #E38\").click(function(){acd0_edge(38);});$(\"#acdaut0 #E39\").click(function(){acd0_edge(39);});$(\"#acdaut0 #E40\").click(function(){acd0_edge(40);});$(\"#acdaut0 #S0\").click(function(){acd0_state(0);});$(\"#acdaut0 #S1\").click(function(){acd0_state(1);});$(\"#acdaut0 #S2\").click(function(){acd0_state(2);});$(\"#acdaut0 #S3\").click(function(){acd0_state(3);});$(\"#acdaut0 #S4\").click(function(){acd0_state(4);});$(\"#acdaut0 #S5\").click(function(){acd0_state(5);});$(\"#acdaut0 #S6\").click(function(){acd0_state(6);});$(\"#acdaut0 #S7\").click(function(){acd0_state(7);});$(\"#acdaut0 #S8\").click(function(){acd0_state(8);});$(\"#acdaut0 #S9\").click(function(){acd0_state(9);});$(\"#acd0 #N0\").click(function(){acd0_node(0, 0);});$(\"#acd0 #N1\").click(function(){acd0_node(1, 1);});$(\"#acd0 #N2\").click(function(){acd0_node(2, 1);});$(\"#acd0 #N3\").click(function(){acd0_node(3, 1);});$(\"#acd0 #N4\").click(function(){acd0_node(4, 1);});$(\"#acd0 #N5\").click(function(){acd0_node(5, 1);});$(\"#acd0 #N6\").click(function(){acd0_node(6, 1);});$(\"#acd0 #N7\").click(function(){acd0_node(7, 1);});$(\"#acd0 #N8\").click(function(){acd0_node(8, 1);});$(\"#acd0 #N9\").click(function(){acd0_node(9, 0);});$(\"#acd0 #N10\").click(function(){acd0_node(10, 0);});$(\"#acd0 #N11\").click(function(){acd0_node(11, 0);});$(\"#acd0 #N12\").click(function(){acd0_node(12, 0);});$(\"#acd0 #N13\").click(function(){acd0_node(13, 0);});$(\"#acd0 #N14\").click(function(){acd0_node(14, 0);});" ], "text/plain": [ - " >" + " >" ] }, - "execution_count": 18, + "execution_count": 20, "metadata": {}, "output_type": "execute_result" } @@ -3886,7 +4117,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 21, "id": "859a993a", "metadata": {}, "outputs": [ @@ -3896,7 +4127,7 @@ "False" ] }, - "execution_count": 19, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } @@ -3932,7 +4163,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 22, "id": "a8bd0844", "metadata": {}, "outputs": [ @@ -3942,7 +4173,7 @@ "(4, 1)" ] }, - "execution_count": 20, + "execution_count": 22, "metadata": {}, "output_type": "execute_result" } @@ -3953,7 +4184,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 23, "id": "93116a05", "metadata": {}, "outputs": [ @@ -3963,7 +4194,7 @@ "(4, 1)" ] }, - "execution_count": 21, + "execution_count": 23, "metadata": {}, "output_type": "execute_result" } @@ -3984,7 +4215,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 24, "id": "23940b6a", "metadata": {}, "outputs": [ @@ -3994,7 +4225,7 @@ "(12, 0)" ] }, - "execution_count": 22, + "execution_count": 24, "metadata": {}, "output_type": "execute_result" } @@ -4005,7 +4236,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 25, "id": "de7cbd02", "metadata": {}, "outputs": [ @@ -4015,7 +4246,7 @@ "(8, 0)" ] }, - "execution_count": 23, + "execution_count": 25, "metadata": {}, "output_type": "execute_result" } @@ -4026,7 +4257,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 26, "id": "8b0305d4", "metadata": {}, "outputs": [ @@ -4036,7 +4267,7 @@ "(4, 0)" ] }, - "execution_count": 24, + "execution_count": 26, "metadata": {}, "output_type": "execute_result" } @@ -4047,7 +4278,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 27, "id": "4f0a10f5", "metadata": {}, "outputs": [ @@ -4057,7 +4288,7 @@ "(4, 1)" ] }, - "execution_count": 25, + "execution_count": 27, "metadata": {}, "output_type": "execute_result" } @@ -4094,7 +4325,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 28, "id": "2bd04c1f", "metadata": {}, "outputs": [ @@ -4104,7 +4335,7 @@ "4" ] }, - "execution_count": 26, + "execution_count": 28, "metadata": {}, "output_type": "execute_result" } @@ -4131,7 +4362,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 29, "id": "e28035e8", "metadata": {}, "outputs": [ @@ -4737,10 +4968,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c49328840> >" + " *' at 0x7f14701670f0> >" ] }, - "execution_count": 27, + "execution_count": 29, "metadata": {}, "output_type": "execute_result" } @@ -4761,7 +4992,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 30, "id": "numerical-education", "metadata": {}, "outputs": [ @@ -4771,7 +5002,7 @@ "True" ] }, - "execution_count": 28, + "execution_count": 30, "metadata": {}, "output_type": "execute_result" } @@ -4790,7 +5021,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 31, "id": "3e239a0c", "metadata": {}, "outputs": [ @@ -5376,10 +5607,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c49328e70> >" + " *' at 0x7f1470167210> >" ] }, - "execution_count": 29, + "execution_count": 31, "metadata": {}, "output_type": "execute_result" } @@ -5401,7 +5632,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 32, "id": "4f62e612", "metadata": {}, "outputs": [ @@ -5411,7 +5642,7 @@ "15" ] }, - "execution_count": 30, + "execution_count": 32, "metadata": {}, "output_type": "execute_result" } @@ -5422,7 +5653,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 33, "id": "20f2a45c", "metadata": {}, "outputs": [ @@ -5432,7 +5663,7 @@ "27" ] }, - "execution_count": 31, + "execution_count": 33, "metadata": {}, "output_type": "execute_result" } @@ -5461,7 +5692,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 34, "id": "7727735d", "metadata": {}, "outputs": [], @@ -5471,7 +5702,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 35, "id": "2d0bbc0b", "metadata": {}, "outputs": [ @@ -5505,7 +5736,7 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 36, "id": "78643aae", "metadata": {}, "outputs": [ @@ -5523,7 +5754,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 37, "id": "13a7796b", "metadata": {}, "outputs": [], @@ -5533,7 +5764,7 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 38, "id": "3ee900b7", "metadata": {}, "outputs": [ @@ -5564,7 +5795,7 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 39, "id": "e12bb020", "metadata": {}, "outputs": [], @@ -5574,7 +5805,7 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": 40, "id": "813d15ed", "metadata": {}, "outputs": [ @@ -6673,10 +6904,10 @@ "};$(\"#acdaut1 #E9\").addClass(\"acdN0\");$(\"#acdaut1 #E10\").addClass(\"acdN0\");$(\"#acdaut1 #E11\").addClass(\"acdN0\");$(\"#acdaut1 #E12\").addClass(\"acdN0\");$(\"#acdaut1 #E13\").addClass(\"acdN0\");$(\"#acdaut1 #E14\").addClass(\"acdN0\");$(\"#acdaut1 #E15\").addClass(\"acdN0\");$(\"#acdaut1 #E16\").addClass(\"acdN0\");$(\"#acdaut1 #E21\").addClass(\"acdN0\");$(\"#acdaut1 #E22\").addClass(\"acdN0\");$(\"#acdaut1 #E23\").addClass(\"acdN0\");$(\"#acdaut1 #E24\").addClass(\"acdN0\");$(\"#acdaut1 #E25\").addClass(\"acdN0\");$(\"#acdaut1 #E26\").addClass(\"acdN0\");$(\"#acdaut1 #E27\").addClass(\"acdN0\");$(\"#acdaut1 #E28\").addClass(\"acdN0\");$(\"#acdaut1 #E33\").addClass(\"acdN0\");$(\"#acdaut1 #E34\").addClass(\"acdN0\");$(\"#acdaut1 #E35\").addClass(\"acdN0\");$(\"#acdaut1 #E36\").addClass(\"acdN0\");$(\"#acdaut1 #E31\").addClass(\"acdN1\");$(\"#acdaut1 #E32\").addClass(\"acdN1\");$(\"#acdaut1 #E39\").addClass(\"acdN1\");$(\"#acdaut1 #E40\").addClass(\"acdN1\");$(\"#acdaut1 #E5\").addClass(\"acdN2\");$(\"#acdaut1 #E7\").addClass(\"acdN2\");$(\"#acdaut1 #E17\").addClass(\"acdN2\");$(\"#acdaut1 #E1\").addClass(\"acdN3\");$(\"#acdaut1 #E10\").addClass(\"acdN4\");$(\"#acdaut1 #E12\").addClass(\"acdN4\");$(\"#acdaut1 #E13\").addClass(\"acdN4\");$(\"#acdaut1 #E15\").addClass(\"acdN4\");$(\"#acdaut1 #E21\").addClass(\"acdN4\");$(\"#acdaut1 #E22\").addClass(\"acdN4\");$(\"#acdaut1 #E14\").addClass(\"acdN5\");$(\"#acdaut1 #E15\").addClass(\"acdN5\");$(\"#acdaut1 #E22\").addClass(\"acdN5\");$(\"#acdaut1 #E23\").addClass(\"acdN5\");$(\"#acdaut1 #E23\").addClass(\"acdN6\");$(\"#acdaut1 #E24\").addClass(\"acdN6\");$(\"#acdaut1 #E34\").addClass(\"acdN6\");$(\"#acdaut1 #E36\").addClass(\"acdN6\");$(\"#acdaut1 #E14\").addClass(\"acdN7\");$(\"#acdaut1 #E16\").addClass(\"acdN7\");$(\"#acdaut1 #E26\").addClass(\"acdN7\");$(\"#acdaut1 #E9\").addClass(\"acdN8\");$(\"#acdaut1 #E40\").addClass(\"acdN9\");$(\"#acdaut1 #E5\").addClass(\"acdN10\");$(\"#acdaut1 #E14\").addClass(\"acdN11\");$(\"#acdaut1 #E23\").addClass(\"acdN12\");$(\"#acdaut1 #E23\").addClass(\"acdN13\");$(\"#acdaut1 #E14\").addClass(\"acdN14\");$(\"#acdaut1 #E1\").click(function(){acd1_edge(1);});$(\"#acdaut1 #E2\").click(function(){acd1_edge(2);});$(\"#acdaut1 #E3\").click(function(){acd1_edge(3);});$(\"#acdaut1 #E4\").click(function(){acd1_edge(4);});$(\"#acdaut1 #E5\").click(function(){acd1_edge(5);});$(\"#acdaut1 #E6\").click(function(){acd1_edge(6);});$(\"#acdaut1 #E7\").click(function(){acd1_edge(7);});$(\"#acdaut1 #E8\").click(function(){acd1_edge(8);});$(\"#acdaut1 #E9\").click(function(){acd1_edge(9);});$(\"#acdaut1 #E10\").click(function(){acd1_edge(10);});$(\"#acdaut1 #E11\").click(function(){acd1_edge(11);});$(\"#acdaut1 #E12\").click(function(){acd1_edge(12);});$(\"#acdaut1 #E13\").click(function(){acd1_edge(13);});$(\"#acdaut1 #E14\").click(function(){acd1_edge(14);});$(\"#acdaut1 #E15\").click(function(){acd1_edge(15);});$(\"#acdaut1 #E16\").click(function(){acd1_edge(16);});$(\"#acdaut1 #E17\").click(function(){acd1_edge(17);});$(\"#acdaut1 #E18\").click(function(){acd1_edge(18);});$(\"#acdaut1 #E19\").click(function(){acd1_edge(19);});$(\"#acdaut1 #E20\").click(function(){acd1_edge(20);});$(\"#acdaut1 #E21\").click(function(){acd1_edge(21);});$(\"#acdaut1 #E22\").click(function(){acd1_edge(22);});$(\"#acdaut1 #E23\").click(function(){acd1_edge(23);});$(\"#acdaut1 #E24\").click(function(){acd1_edge(24);});$(\"#acdaut1 #E25\").click(function(){acd1_edge(25);});$(\"#acdaut1 #E26\").click(function(){acd1_edge(26);});$(\"#acdaut1 #E27\").click(function(){acd1_edge(27);});$(\"#acdaut1 #E28\").click(function(){acd1_edge(28);});$(\"#acdaut1 #E29\").click(function(){acd1_edge(29);});$(\"#acdaut1 #E30\").click(function(){acd1_edge(30);});$(\"#acdaut1 #E31\").click(function(){acd1_edge(31);});$(\"#acdaut1 #E32\").click(function(){acd1_edge(32);});$(\"#acdaut1 #E33\").click(function(){acd1_edge(33);});$(\"#acdaut1 #E34\").click(function(){acd1_edge(34);});$(\"#acdaut1 #E35\").click(function(){acd1_edge(35);});$(\"#acdaut1 #E36\").click(function(){acd1_edge(36);});$(\"#acdaut1 #E37\").click(function(){acd1_edge(37);});$(\"#acdaut1 #E38\").click(function(){acd1_edge(38);});$(\"#acdaut1 #E39\").click(function(){acd1_edge(39);});$(\"#acdaut1 #E40\").click(function(){acd1_edge(40);});$(\"#acdaut1 #S0\").click(function(){acd1_state(0);});$(\"#acdaut1 #S1\").click(function(){acd1_state(1);});$(\"#acdaut1 #S2\").click(function(){acd1_state(2);});$(\"#acdaut1 #S3\").click(function(){acd1_state(3);});$(\"#acdaut1 #S4\").click(function(){acd1_state(4);});$(\"#acdaut1 #S5\").click(function(){acd1_state(5);});$(\"#acdaut1 #S6\").click(function(){acd1_state(6);});$(\"#acdaut1 #S7\").click(function(){acd1_state(7);});$(\"#acdaut1 #S8\").click(function(){acd1_state(8);});$(\"#acdaut1 #S9\").click(function(){acd1_state(9);});$(\"#acd1 #N0\").click(function(){acd1_node(0, 0);});$(\"#acd1 #N1\").click(function(){acd1_node(1, 1);});$(\"#acd1 #N2\").click(function(){acd1_node(2, 1);});$(\"#acd1 #N3\").click(function(){acd1_node(3, 1);});$(\"#acd1 #N4\").click(function(){acd1_node(4, 1);});$(\"#acd1 #N5\").click(function(){acd1_node(5, 1);});$(\"#acd1 #N6\").click(function(){acd1_node(6, 1);});$(\"#acd1 #N7\").click(function(){acd1_node(7, 1);});$(\"#acd1 #N8\").click(function(){acd1_node(8, 1);});$(\"#acd1 #N9\").click(function(){acd1_node(9, 0);});$(\"#acd1 #N10\").click(function(){acd1_node(10, 0);});$(\"#acd1 #N11\").click(function(){acd1_node(11, 0);});$(\"#acd1 #N12\").click(function(){acd1_node(12, 0);});$(\"#acd1 #N13\").click(function(){acd1_node(13, 0);});$(\"#acd1 #N14\").click(function(){acd1_node(14, 0);});" ], "text/plain": [ - " >" + " >" ] }, - "execution_count": 38, + "execution_count": 40, "metadata": {}, "output_type": "execute_result" } @@ -6695,7 +6926,7 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 41, "id": "00acde97", "metadata": {}, "outputs": [ @@ -6705,7 +6936,7 @@ "4" ] }, - "execution_count": 39, + "execution_count": 41, "metadata": {}, "output_type": "execute_result" } @@ -6716,7 +6947,7 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": 42, "id": "23c5f4df", "metadata": {}, "outputs": [ @@ -6726,7 +6957,7 @@ "8" ] }, - "execution_count": 40, + "execution_count": 42, "metadata": {}, "output_type": "execute_result" } @@ -6737,7 +6968,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 43, "id": "da0bbcbe", "metadata": {}, "outputs": [ @@ -6747,7 +6978,7 @@ "0" ] }, - "execution_count": 41, + "execution_count": 43, "metadata": {}, "output_type": "execute_result" } @@ -6758,7 +6989,7 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 44, "id": "da0dc5bc", "metadata": {}, "outputs": [ @@ -6768,7 +6999,7 @@ "8" ] }, - "execution_count": 42, + "execution_count": 44, "metadata": {}, "output_type": "execute_result" } @@ -6788,7 +7019,7 @@ }, { "cell_type": "code", - "execution_count": 43, + "execution_count": 45, "id": "94999c2e", "metadata": {}, "outputs": [ @@ -7586,10 +7817,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c49342570> >" + " *' at 0x7f14700fe1e0> >" ] }, - "execution_count": 43, + "execution_count": 45, "metadata": {}, "output_type": "execute_result" } @@ -7611,7 +7842,7 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 46, "id": "b57476cf", "metadata": {}, "outputs": [ @@ -7640,7 +7871,7 @@ }, { "cell_type": "code", - "execution_count": 45, + "execution_count": 47, "id": "f082b433", "metadata": {}, "outputs": [ @@ -7912,10 +8143,10 @@ "};$(\"#acdaut2 #E1\").addClass(\"acdN0\");$(\"#acdaut2 #E2\").addClass(\"acdN0\");$(\"#acdaut2 #E3\").addClass(\"acdN0\");$(\"#acdaut2 #E4\").addClass(\"acdN0\");$(\"#acdaut2 #E5\").addClass(\"acdN0\");$(\"#acdaut2 #E6\").addClass(\"acdN0\");$(\"#acdaut2 #E2\").addClass(\"acdN1\");$(\"#acdaut2 #E3\").addClass(\"acdN1\");$(\"#acdaut2 #E4\").addClass(\"acdN1\");$(\"#acdaut2 #E5\").addClass(\"acdN1\");$(\"#acdaut2 #E6\").addClass(\"acdN1\");$(\"#acdaut2 #E1\").addClass(\"acdN2\");$(\"#acdaut2 #E2\").addClass(\"acdN2\");$(\"#acdaut2 #E4\").addClass(\"acdN2\");$(\"#acdaut2 #E6\").addClass(\"acdN2\");$(\"#acdaut2 #E1\").click(function(){acd2_edge(1);});$(\"#acdaut2 #E2\").click(function(){acd2_edge(2);});$(\"#acdaut2 #E3\").click(function(){acd2_edge(3);});$(\"#acdaut2 #E4\").click(function(){acd2_edge(4);});$(\"#acdaut2 #E5\").click(function(){acd2_edge(5);});$(\"#acdaut2 #E6\").click(function(){acd2_edge(6);});$(\"#acdaut2 #S0\").click(function(){acd2_state(0);});$(\"#acdaut2 #S1\").click(function(){acd2_state(1);});$(\"#acdaut2 #S2\").click(function(){acd2_state(2);});$(\"#acd2 #N0\").click(function(){acd2_node(0, 1);});$(\"#acd2 #N1\").click(function(){acd2_node(1, 0);});$(\"#acd2 #N2\").click(function(){acd2_node(2, 0);});" ], "text/plain": [ - " >" + " >" ] }, - "execution_count": 45, + "execution_count": 47, "metadata": {}, "output_type": "execute_result" } @@ -7931,7 +8162,7 @@ }, { "cell_type": "code", - "execution_count": 46, + "execution_count": 48, "id": "597185c0", "metadata": {}, "outputs": [ @@ -7944,11 +8175,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -8122,10 +8353,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480e0690> >" + " *' at 0x7f14700feb40> >" ] }, - "execution_count": 46, + "execution_count": 48, "metadata": {}, "output_type": "execute_result" } @@ -8154,7 +8385,7 @@ }, { "cell_type": "code", - "execution_count": 47, + "execution_count": 49, "id": "a4fd4105", "metadata": {}, "outputs": [ @@ -8426,10 +8657,10 @@ "};$(\"#acdaut3 #E1\").addClass(\"acdN0\");$(\"#acdaut3 #E2\").addClass(\"acdN0\");$(\"#acdaut3 #E3\").addClass(\"acdN0\");$(\"#acdaut3 #E4\").addClass(\"acdN0\");$(\"#acdaut3 #E5\").addClass(\"acdN0\");$(\"#acdaut3 #E6\").addClass(\"acdN0\");$(\"#acdaut3 #E1\").addClass(\"acdN1\");$(\"#acdaut3 #E2\").addClass(\"acdN1\");$(\"#acdaut3 #E4\").addClass(\"acdN1\");$(\"#acdaut3 #E6\").addClass(\"acdN1\");$(\"#acdaut3 #E2\").addClass(\"acdN2\");$(\"#acdaut3 #E3\").addClass(\"acdN2\");$(\"#acdaut3 #E4\").addClass(\"acdN2\");$(\"#acdaut3 #E5\").addClass(\"acdN2\");$(\"#acdaut3 #E6\").addClass(\"acdN2\");$(\"#acdaut3 #E1\").click(function(){acd3_edge(1);});$(\"#acdaut3 #E2\").click(function(){acd3_edge(2);});$(\"#acdaut3 #E3\").click(function(){acd3_edge(3);});$(\"#acdaut3 #E4\").click(function(){acd3_edge(4);});$(\"#acdaut3 #E5\").click(function(){acd3_edge(5);});$(\"#acdaut3 #E6\").click(function(){acd3_edge(6);});$(\"#acdaut3 #S0\").click(function(){acd3_state(0);});$(\"#acdaut3 #S1\").click(function(){acd3_state(1);});$(\"#acdaut3 #S2\").click(function(){acd3_state(2);});$(\"#acd3 #N0\").click(function(){acd3_node(0, 1);});$(\"#acd3 #N1\").click(function(){acd3_node(1, 0);});$(\"#acd3 #N2\").click(function(){acd3_node(2, 0);});" ], "text/plain": [ - " >" + " >" ] }, - "execution_count": 47, + "execution_count": 49, "metadata": {}, "output_type": "execute_result" } @@ -8440,7 +8671,7 @@ }, { "cell_type": "code", - "execution_count": 48, + "execution_count": 50, "id": "1a68f96a", "metadata": {}, "outputs": [ @@ -8453,11 +8684,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -8610,10 +8841,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480e0de0> >" + " *' at 0x7f14700fea80> >" ] }, - "execution_count": 48, + "execution_count": 50, "metadata": {}, "output_type": "execute_result" } @@ -8636,7 +8867,7 @@ }, { "cell_type": "code", - "execution_count": 49, + "execution_count": 51, "id": "criminal-northwest", "metadata": {}, "outputs": [ @@ -8762,10 +8993,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480e21e0> >" + " *' at 0x7f1470107240> >" ] }, - "execution_count": 49, + "execution_count": 51, "metadata": {}, "output_type": "execute_result" } @@ -8796,7 +9027,7 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": 52, "id": "63c7c062", "metadata": {}, "outputs": [ @@ -8874,10 +9105,10 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, - "execution_count": 50, + "execution_count": 52, "metadata": {}, "output_type": "execute_result" } @@ -8888,7 +9119,7 @@ }, { "cell_type": "code", - "execution_count": 51, + "execution_count": 53, "id": "balanced-investing", "metadata": {}, "outputs": [ @@ -9040,10 +9271,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480e2360> >" + " *' at 0x7f1470107030> >" ] }, - "execution_count": 51, + "execution_count": 53, "metadata": {}, "output_type": "execute_result" } @@ -9054,7 +9285,7 @@ }, { "cell_type": "code", - "execution_count": 52, + "execution_count": 54, "id": "nutritional-rugby", "metadata": {}, "outputs": [], @@ -9064,7 +9295,7 @@ }, { "cell_type": "code", - "execution_count": 53, + "execution_count": 55, "id": "criminal-marking", "metadata": {}, "outputs": [ @@ -9333,10 +9564,10 @@ "};$(\"#acdaut4 #E1\").addClass(\"acdN0\");$(\"#acdaut4 #E2\").addClass(\"acdN0\");$(\"#acdaut4 #E3\").addClass(\"acdN0\");$(\"#acdaut4 #E4\").addClass(\"acdN0\");$(\"#acdaut4 #E5\").addClass(\"acdN0\");$(\"#acdaut4 #E6\").addClass(\"acdN0\");$(\"#acdaut4 #E7\").addClass(\"acdN0\");$(\"#acdaut4 #E8\").addClass(\"acdN0\");$(\"#acdaut4 #E6\").addClass(\"acdN1\");$(\"#acdaut4 #E1\").click(function(){acd4_edge(1);});$(\"#acdaut4 #E2\").click(function(){acd4_edge(2);});$(\"#acdaut4 #E3\").click(function(){acd4_edge(3);});$(\"#acdaut4 #E4\").click(function(){acd4_edge(4);});$(\"#acdaut4 #E5\").click(function(){acd4_edge(5);});$(\"#acdaut4 #E6\").click(function(){acd4_edge(6);});$(\"#acdaut4 #E7\").click(function(){acd4_edge(7);});$(\"#acdaut4 #E8\").click(function(){acd4_edge(8);});$(\"#acdaut4 #S0\").click(function(){acd4_state(0);});$(\"#acdaut4 #S1\").click(function(){acd4_state(1);});$(\"#acd4 #N0\").click(function(){acd4_node(0, 1);});$(\"#acd4 #N1\").click(function(){acd4_node(1, 0);});" ], "text/plain": [ - " >" + " >" ] }, - "execution_count": 53, + "execution_count": 55, "metadata": {}, "output_type": "execute_result" } @@ -9347,7 +9578,7 @@ }, { "cell_type": "code", - "execution_count": 54, + "execution_count": 56, "id": "e7760d51", "metadata": {}, "outputs": [ @@ -9357,7 +9588,7 @@ "0" ] }, - "execution_count": 54, + "execution_count": 56, "metadata": {}, "output_type": "execute_result" } @@ -9368,7 +9599,7 @@ }, { "cell_type": "code", - "execution_count": 55, + "execution_count": 57, "id": "unusual-dependence", "metadata": { "scrolled": true @@ -9477,10 +9708,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480e2d50> >" + " *' at 0x7f1470107b70> >" ] }, - "execution_count": 55, + "execution_count": 57, "metadata": {}, "output_type": "execute_result" } @@ -9491,7 +9722,7 @@ }, { "cell_type": "code", - "execution_count": 56, + "execution_count": 58, "id": "d5440de1", "metadata": {}, "outputs": [ @@ -9504,11 +9735,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -9624,10 +9855,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480ea150> >" + " *' at 0x7f147010d240> >" ] }, - "execution_count": 56, + "execution_count": 58, "metadata": {}, "output_type": "execute_result" } @@ -9638,7 +9869,7 @@ }, { "cell_type": "code", - "execution_count": 57, + "execution_count": 59, "id": "9ed0bc59", "metadata": {}, "outputs": [], @@ -9658,7 +9889,7 @@ }, { "cell_type": "code", - "execution_count": 58, + "execution_count": 60, "id": "deb92971", "metadata": {}, "outputs": [ @@ -9963,10 +10194,10 @@ "};$(\"#acdaut5 #E1\").addClass(\"acdN0\");$(\"#acdaut5 #E2\").addClass(\"acdN0\");$(\"#acdaut5 #E3\").addClass(\"acdN0\");$(\"#acdaut5 #E4\").addClass(\"acdN0\");$(\"#acdaut5 #E5\").addClass(\"acdN0\");$(\"#acdaut5 #E6\").addClass(\"acdN0\");$(\"#acdaut5 #E7\").addClass(\"acdN0\");$(\"#acdaut5 #E1\").addClass(\"acdN1\");$(\"#acdaut5 #E3\").addClass(\"acdN1\");$(\"#acdaut5 #E4\").addClass(\"acdN1\");$(\"#acdaut5 #E5\").addClass(\"acdN1\");$(\"#acdaut5 #E7\").addClass(\"acdN2\");$(\"#acdaut5 #E1\").click(function(){acd5_edge(1);});$(\"#acdaut5 #E2\").click(function(){acd5_edge(2);});$(\"#acdaut5 #E3\").click(function(){acd5_edge(3);});$(\"#acdaut5 #E4\").click(function(){acd5_edge(4);});$(\"#acdaut5 #E5\").click(function(){acd5_edge(5);});$(\"#acdaut5 #E6\").click(function(){acd5_edge(6);});$(\"#acdaut5 #E7\").click(function(){acd5_edge(7);});$(\"#acdaut5 #S0\").click(function(){acd5_state(0);});$(\"#acdaut5 #S1\").click(function(){acd5_state(1);});$(\"#acdaut5 #S2\").click(function(){acd5_state(2);});$(\"#acdaut5 #S3\").click(function(){acd5_state(3);});$(\"#acd5 #N0\").click(function(){acd5_node(0, 1);});$(\"#acd5 #N1\").click(function(){acd5_node(1, 0);});$(\"#acd5 #N2\").click(function(){acd5_node(2, 0);});" ], "text/plain": [ - " >" + " >" ] }, - "execution_count": 58, + "execution_count": 60, "metadata": {}, "output_type": "execute_result" } @@ -9984,7 +10215,7 @@ }, { "cell_type": "code", - "execution_count": 59, + "execution_count": 61, "id": "94a02201", "metadata": {}, "outputs": [ @@ -10091,10 +10322,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480e2300> >" + " *' at 0x7f147010d5a0> >" ] }, - "execution_count": 59, + "execution_count": 61, "metadata": {}, "output_type": "execute_result" } @@ -10105,7 +10336,7 @@ }, { "cell_type": "code", - "execution_count": 60, + "execution_count": 62, "id": "d484ba8f", "metadata": {}, "outputs": [ @@ -10118,11 +10349,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -10221,10 +10452,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480f0420> >" + " *' at 0x7f147010d6f0> >" ] }, - "execution_count": 60, + "execution_count": 62, "metadata": {}, "output_type": "execute_result" } @@ -10235,7 +10466,7 @@ }, { "cell_type": "code", - "execution_count": 61, + "execution_count": 63, "id": "3332e850", "metadata": {}, "outputs": [ @@ -10501,10 +10732,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480f0840> >" + " *' at 0x7f1470116270> >" ] }, - "execution_count": 61, + "execution_count": 63, "metadata": {}, "output_type": "execute_result" } @@ -10525,7 +10756,7 @@ }, { "cell_type": "code", - "execution_count": 62, + "execution_count": 64, "id": "german-vienna", "metadata": {}, "outputs": [ @@ -10595,10 +10826,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480f30c0> >" + " *' at 0x7f1470116630> >" ] }, - "execution_count": 62, + "execution_count": 64, "metadata": {}, "output_type": "execute_result" } @@ -10622,7 +10853,7 @@ }, { "cell_type": "code", - "execution_count": 63, + "execution_count": 65, "id": "chemical-primary", "metadata": {}, "outputs": [ @@ -10632,7 +10863,7 @@ "(spot.trival_maybe(), spot.trival(True))" ] }, - "execution_count": 63, + "execution_count": 65, "metadata": {}, "output_type": "execute_result" } @@ -10643,7 +10874,7 @@ }, { "cell_type": "code", - "execution_count": 64, + "execution_count": 66, "id": "hispanic-floor", "metadata": {}, "outputs": [ @@ -10706,10 +10937,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480f3210> >" + " *' at 0x7f1470116450> >" ] }, - "execution_count": 64, + "execution_count": 66, "metadata": {}, "output_type": "execute_result" } @@ -10720,7 +10951,7 @@ }, { "cell_type": "code", - "execution_count": 65, + "execution_count": 67, "id": "central-london", "metadata": {}, "outputs": [ @@ -10730,7 +10961,7 @@ "(spot.trival(True), spot.trival(True))" ] }, - "execution_count": 65, + "execution_count": 67, "metadata": {}, "output_type": "execute_result" } @@ -10750,7 +10981,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/tests/python/zlktree.py b/tests/python/zlktree.py index e1b0c9e7b..c3cb262f2 100644 --- a/tests/python/zlktree.py +++ b/tests/python/zlktree.py @@ -152,3 +152,12 @@ tc.assertTrue(a.equivalent_to(b)) b = spot.acd_transform_sbacc(a, False) tc.assertEqual(str(b.acc()), '(2, Fin(0) & Inf(1))') tc.assertTrue(a.equivalent_to(b)) + + +# This used to be very slow. +c = spot.acc_cond("Rabin 9") +n = spot.zielonka_tree(c).num_branches() +tc.assertEqual(n, 362880) +opt = spot.zielonka_tree_options_MERGE_SUBTREES; +n = spot.zielonka_tree(c, opt).num_branches() +tc.assertEqual(n, 9) From e064726b64155d62aeaab7994649aa54c5829e05 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Sun, 15 May 2022 23:54:18 +0200 Subject: [PATCH 082/606] Introducing a global variable to define the number of threads * NEWS: Announce * spot/Makefile.am: Add pthread to use threads * spot/misc/common.cc, spot/misc/common.hh: Add variable + getter/setter * spot/misc/Makefile.am: Add common.cc --- NEWS | 3 +++ spot/Makefile.am | 2 +- spot/misc/Makefile.am | 1 + spot/misc/common.cc | 33 +++++++++++++++++++++++++++++++++ spot/misc/common.hh | 8 ++++++++ 5 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 spot/misc/common.cc diff --git a/NEWS b/NEWS index 32a1b0f19..33b74132c 100644 --- a/NEWS +++ b/NEWS @@ -23,6 +23,9 @@ New in spot 2.10.6.dev (not yet released) Library: + - A global variable, together with its setters and getters to define the + maximal number of threads is added to common.hh/common.cc + - The new function suffix_operator_normal_form() implements transformation of formulas to Suffix Operator Normal Form, described in [cimatti.06.fmcad]. diff --git a/spot/Makefile.am b/spot/Makefile.am index 821979f1d..c7bebfe6a 100644 --- a/spot/Makefile.am +++ b/spot/Makefile.am @@ -35,7 +35,7 @@ SUBDIRS = misc priv tl graph twa twacube twaalgos ta taalgos kripke \ lib_LTLIBRARIES = libspot.la libspot_la_SOURCES = -libspot_la_LDFLAGS = $(BUDDY_LDFLAGS) -no-undefined $(SYMBOLIC_LDFLAGS) +libspot_la_LDFLAGS = $(BUDDY_LDFLAGS) -no-undefined -pthread $(SYMBOLIC_LDFLAGS) libspot_la_LIBADD = \ kripke/libkripke.la \ misc/libmisc.la \ diff --git a/spot/misc/Makefile.am b/spot/misc/Makefile.am index e509dbe87..623a13c87 100644 --- a/spot/misc/Makefile.am +++ b/spot/misc/Makefile.am @@ -63,6 +63,7 @@ libmisc_la_SOURCES = \ bareword.cc \ bitset.cc \ bitvect.cc \ + common.cc \ escape.cc \ formater.cc \ intvcomp.cc \ diff --git a/spot/misc/common.cc b/spot/misc/common.cc new file mode 100644 index 000000000..adf9f2da0 --- /dev/null +++ b/spot/misc/common.cc @@ -0,0 +1,33 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2018 Laboratoire de Recherche et Développement +// de l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" +#include + +static unsigned N_MAX_THREADS = 1; + +void set_nthreads(unsigned nthreads) +{ + N_MAX_THREADS = nthreads; +} + +unsigned get_nthreads() +{ + return N_MAX_THREADS; +} \ No newline at end of file diff --git a/spot/misc/common.hh b/spot/misc/common.hh index e38f9f15a..fc74a8ee7 100644 --- a/spot/misc/common.hh +++ b/spot/misc/common.hh @@ -169,3 +169,11 @@ namespace spot # define SPOT_make_shared_enabled__(TYPE, ...) \ std::make_shared(__VA_ARGS__) #endif + + +// Global variable to determine the maximal number of threads +SPOT_API void +set_nthreads(unsigned nthreads); + +SPOT_API unsigned +get_nthreads(); From 71c2a7b1a60e6aaaf4488d031659ebc75cc512ba Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Sun, 15 May 2022 23:56:19 +0200 Subject: [PATCH 083/606] Add a new function to sort edges sort_edge_srcfirst_ will sort the edge with respect to the src state, then sort each sub list with respect to the given predicate, possibly in parallel. * spot/graph/graph.hh: Here --- spot/graph/graph.hh | 70 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/spot/graph/graph.hh b/spot/graph/graph.hh index fa276131d..dc7ffc6ae 100644 --- a/spot/graph/graph.hh +++ b/spot/graph/graph.hh @@ -28,6 +28,7 @@ #include #include #include +#include namespace spot { @@ -1226,6 +1227,75 @@ namespace spot std::stable_sort(edges_.begin() + 1, edges_.end(), p); } + /// \brief Sort all edges by src first, then, within edges of the same + /// source use the predicate + /// + /// This will invalidate all iterators, and also destroy edge + /// chains. Call chain_edges_() immediately afterwards unless you + /// know what you are doing. + /// \note: for performance this will work in parallel (if enabled) + /// and make a temporary copy of the edges (needs more ram) + /// \pre This needs the edge_vector to be in a coherent state when called + template> + void sort_edges_srcfirst_(Predicate p = Predicate()) + { + //std::cerr << "\nbefore\n"; + //dump_storage(std::cerr); + const auto N = num_states(); + // Read threads once + const unsigned nthreads = get_nthreads(); + + auto idx_list = std::vector(N+1); + auto new_edges = edge_vector_t(); + new_edges.reserve(edges_.size()); + if (SPOT_UNLIKELY(edges_.empty())) + throw std::runtime_error("Empty edge vector!"); + new_edges.resize(1); + // This causes edge 0 to be considered as dead. + new_edges[0].next_succ = 0; + // Copy the edges such that they are sorted by src + for (auto s = 0u; s < N; ++s) + { + idx_list[s] = new_edges.size(); + for (const auto& e : out(s)) + new_edges.push_back(e); + } + idx_list[N] = new_edges.size(); + // New edge sorted by source + // If we have few edge or only one threads + // Benchmark few? + auto bne = new_edges.begin(); + if (nthreads == 1 || edges_.size() < 1000) + { + for (auto s = 0u; s < N; ++s) + std::stable_sort(bne + idx_list[s], + bne + idx_list[s+1], + p); + } + else + { + static auto tv = std::vector(); + SPOT_ASSERT(tv.empty()); + tv.resize(nthreads); + for (unsigned id = 0; id < nthreads; ++id) + tv[id] = std::thread( + [bne, id, N, &idx_list, p, nthreads]() + { + for (auto s = id; s < N; s+=nthreads) + std::stable_sort(bne + idx_list[s], + bne + idx_list[s+1], + p); + return; + }); + for (auto& t : tv) + t.join(); + tv.clear(); + } + // Done + std::swap(edges_, new_edges); + // Like after normal sort_edges, they need to be chained before usage + } + /// \brief Sort edges of the given states /// /// \tparam Predicate : Comparison type From d8cc0c5acbc437fbea741b494fde06de317e9309 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Mon, 16 May 2022 00:04:04 +0200 Subject: [PATCH 084/606] Introduce a faster merge_states merge_states is now hash-based, uses the new edge-sorting with src first and can be executed in parallel. * spot/twa/twagraph.cc: Here * tests/python/mergedge.py: Test --- spot/twa/twagraph.cc | 161 +++++++++++++++------- tests/python/mergedge.py | 285 ++++++++++++++++++++------------------- 2 files changed, 253 insertions(+), 193 deletions(-) diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index c2bdd5650..2fd2eb070 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -372,13 +372,12 @@ namespace spot throw std::runtime_error( "twa_graph::merge_states() does not work on alternating automata"); + const unsigned nthreads = get_nthreads(); + typedef graph_t::edge_storage_t tr_t; - g_.sort_edges_([](const tr_t& lhs, const tr_t& rhs) + g_.sort_edges_srcfirst_([](const tr_t& lhs, const tr_t& rhs) { - if (lhs.src < rhs.src) - return true; - if (lhs.src > rhs.src) - return false; + assert(lhs.src == rhs.src); if (lhs.acc < rhs.acc) return true; if (lhs.acc > rhs.acc) @@ -449,7 +448,7 @@ namespace spot // Represents which states share a hash // Head is in the unordered_map, // hash_linked_list is like a linked list structure - // of false pointers + // of fake pointers auto hash_linked_list = std::vector(n_states, -1u); auto s_to_hash = std::vector(n_states, 0); @@ -530,8 +529,8 @@ namespace spot }; - static auto checked1 = std::vector(); - static auto checked2 = std::vector(); + thread_local auto checked1 = std::vector(); + thread_local auto checked2 = std::vector(); auto [i1, nsl1, sl1, e1] = e_idx[s1]; auto [i2, nsl2, sl2, e2] = e_idx[s2]; @@ -585,12 +584,10 @@ namespace spot // More efficient version? // Skip checked edges // Last element serves as break - for (; checked1[idx1 - i1]; ++idx1) - { - } - for (; checked2[idx2 - i2]; ++idx2) - { - } + while (checked1[idx1 - i1]) + ++idx1; + while (checked2[idx2 - i2]) + ++idx2; // If one is out of bounds, so is the other if (idx1 == e1) { @@ -614,47 +611,107 @@ namespace spot const unsigned nb_states = num_states(); std::vector remap(nb_states, -1U); - for (unsigned i = 0; i != nb_states; ++i) + // Check each hash + auto check_ix = [&](unsigned ix) { - auto j = spe && (*sp)[i] ? player_map.at(s_to_hash[i]).first - : env_map.at(s_to_hash[i]).first; - for (; j(); + v.clear(); + for (auto i = ix; i != -1U; i = hash_linked_list[i]) + v.push_back(i); + const unsigned N = v.size(); - // Because of the special self-loop tests we use above, - // it's possible that i can be mapped to remap[j] even - // if j was last compatible states found. Consider the - // following cases, taken from an actual test case: - // 18 is equal to 5, 35 is equal to 18, but 35 is not - // equal to 5. - // - // State: 5 - // [0&1&2] 8 {3} - // [!0&1&2] 10 {1} - // [!0&!1&!2] 18 {1} - // [!0&!1&2] 19 {1} - // [!0&1&!2] 20 {1} - // - // State: 18 - // [0&1&2] 8 {3} - // [!0&1&2] 10 {1} - // [!0&!1&!2] 18 {1} // self-loop - // [!0&!1&2] 19 {1} - // [!0&1&!2] 20 {1} - // - // State: 35 - // [0&1&2] 8 {3} - // [!0&1&2] 10 {1} - // [!0&!1&!2] 35 {1} // self-loop - // [!0&!1&2] 19 {1} - // [!0&1&!2] 20 {1} - break; + for (unsigned idx = 0; idx < N; ++idx) + { + auto i = v[idx]; + for (unsigned jdx = 0; jdx < idx; ++jdx) + { + auto j = v[jdx]; + if (state_equal(j, i)) + { + remap[i] = (remap[j] != -1U) ? remap[j] : j; + + // Because of the special self-loop tests we use above, + // it's possible that i can be mapped to remap[j] even + // if j was last compatible states found. Consider the + // following cases, taken from an actual test case: + // 18 is equal to 5, 35 is equal to 18, but 35 is not + // equal to 5. + // + // State: 5 + // [0&1&2] 8 {3} + // [!0&1&2] 10 {1} + // [!0&!1&!2] 18 {1} + // [!0&!1&2] 19 {1} + // [!0&1&!2] 20 {1} + // + // State: 18 + // [0&1&2] 8 {3} + // [!0&1&2] 10 {1} + // [!0&!1&!2] 18 {1} // self-loop + // [!0&!1&2] 19 {1} + // [!0&1&!2] 20 {1} + // + // State: 35 + // [0&1&2] 8 {3} + // [!0&1&2] 10 {1} + // [!0&!1&!2] 35 {1} // self-loop + // [!0&!1&2] 19 {1} + // [!0&1&!2] 20 {1} + break; + } } } - } + }; + + auto upd = [](auto& b, const auto&e, unsigned it) + { + while ((it > 0) & (b != e)) + { + --it; + ++b; + } + }; + + auto worker = [&upd, check_ix, nthreads](unsigned pid, auto begp, auto endp, + auto bege, auto ende) + { + upd(begp, endp, pid); + upd(bege, ende, pid); + for (; begp != endp; upd(begp, endp, nthreads)) + check_ix(begp->second.first); + for (; bege != ende; upd(bege, ende, nthreads)) + check_ix(bege->second.first); + }; + + { + auto begp = player_map.begin(); + auto endp = player_map.end(); + auto bege = env_map.begin(); + auto ende = env_map.end(); + + + if ((nthreads == 1) & (num_states() > 1000)) // Bound? + { + worker(0, begp, endp, bege, ende); + } + else + { + static auto tv = std::vector(); + assert(tv.empty()); + tv.resize(nthreads); + for (unsigned pid = 0; pid < nthreads; ++pid) + tv[pid] = std::thread( + [worker, pid, begp, endp, bege, ende]() + { + worker(pid, begp, endp, bege, ende); + return; + }); + for (auto& t : tv) + t.join(); + tv.clear(); + } + } for (auto& e: edges()) if (remap[e.dst] != -1U) @@ -765,7 +822,7 @@ namespace spot comp_classes_.clear(); // get all compatible classes // Candidate classes share a hash - // A state is compatible to a class if it is compatble + // A state is compatible to a class if it is compatible // to any of its states auto& cand_classes = equiv_class_[hi]; unsigned n_c_classes = cand_classes.size(); diff --git a/tests/python/mergedge.py b/tests/python/mergedge.py index 4e97abe23..2be4d4984 100644 --- a/tests/python/mergedge.py +++ b/tests/python/mergedge.py @@ -23,148 +23,151 @@ import spot from unittest import TestCase tc = TestCase() -aut = spot.automaton("""HOA: v1 States: 1 Start: 0 AP: 1 "a" -Acceptance: 1 Inf(0) --BODY-- State: 0 [0] 0 [0] 0 {0} --END--""") -tc.assertEqual(aut.num_edges(), 2) -aut.merge_edges() -tc.assertEqual(aut.num_edges(), 1) +for nthread in range(1, 16, 2): + spot.set_nthreads(nthread) + tc.assertEqual(spot.get_nthreads(), nthread) + aut = spot.automaton("""HOA: v1 States: 1 Start: 0 AP: 1 "a" + Acceptance: 1 Inf(0) --BODY-- State: 0 [0] 0 [0] 0 {0} --END--""") + tc.assertEqual(aut.num_edges(), 2) + aut.merge_edges() + tc.assertEqual(aut.num_edges(), 1) -aut = spot.automaton(""" -HOA: v1 -States: 2 -Start: 0 -AP: 2 "p0" "p1" -acc-name: Buchi -Acceptance: 1 Inf(0) -properties: trans-labels explicit-labels trans-acc complete ---BODY-- -State: 0 -[!0] 0 {0} -[0] 1 {0} -State: 1 -[!0&!1] 0 {0} -[0 | 1] 1 -[0&!1] 1 {0} ---END--""") -tc.assertEqual(aut.num_edges(), 5) -aut.merge_edges() -tc.assertEqual(aut.num_edges(), 5) -tc.assertFalse(spot.is_deterministic(aut)) -aut = spot.split_edges(aut) -tc.assertEqual(aut.num_edges(), 9) -aut.merge_edges() -tc.assertEqual(aut.num_edges(), 5) -tc.assertTrue(spot.is_deterministic(aut)) + aut = spot.automaton(""" + HOA: v1 + States: 2 + Start: 0 + AP: 2 "p0" "p1" + acc-name: Buchi + Acceptance: 1 Inf(0) + properties: trans-labels explicit-labels trans-acc complete + --BODY-- + State: 0 + [!0] 0 {0} + [0] 1 {0} + State: 1 + [!0&!1] 0 {0} + [0 | 1] 1 + [0&!1] 1 {0} + --END--""") + tc.assertEqual(aut.num_edges(), 5) + aut.merge_edges() + tc.assertEqual(aut.num_edges(), 5) + tc.assertFalse(spot.is_deterministic(aut)) + aut = spot.split_edges(aut) + tc.assertEqual(aut.num_edges(), 9) + aut.merge_edges() + tc.assertEqual(aut.num_edges(), 5) + tc.assertTrue(spot.is_deterministic(aut)) -aut = spot.automaton(""" -HOA: v1 -States: 3 -Start: 0 -AP: 1 "a" -acc-name: Buchi -Acceptance: 1 Inf(0) -properties: trans-labels explicit-labels trans-acc complete ---BODY-- -State: 0 -[!0] 1 {0} -[0] 2 {0} -State: 1 -[!0] 1 {0} -[0] 1 -State: 2 -[!0] 2 {0} -[0] 1 ---END--""") -aut.merge_states() -tc.assertEqual(aut.num_edges(), 4) -tc.assertEqual(aut.num_states(), 2) -tc.assertTrue(spot.is_deterministic(aut)) -tc.assertTrue(aut.prop_complete()) -aut.merge_states() -tc.assertEqual(aut.num_edges(), 4) -tc.assertEqual(aut.num_states(), 2) -tc.assertTrue(spot.is_deterministic(aut)) -tc.assertTrue(aut.prop_complete()) + aut = spot.automaton(""" + HOA: v1 + States: 3 + Start: 0 + AP: 1 "a" + acc-name: Buchi + Acceptance: 1 Inf(0) + properties: trans-labels explicit-labels trans-acc complete + --BODY-- + State: 0 + [!0] 1 {0} + [0] 2 {0} + State: 1 + [!0] 1 {0} + [0] 1 + State: 2 + [!0] 2 {0} + [0] 1 + --END--""") + aut.merge_states() + tc.assertEqual(aut.num_edges(), 4) + tc.assertEqual(aut.num_states(), 2) + tc.assertTrue(spot.is_deterministic(aut)) + tc.assertTrue(aut.prop_complete()) + aut.merge_states() + tc.assertEqual(aut.num_edges(), 4) + tc.assertEqual(aut.num_states(), 2) + tc.assertTrue(spot.is_deterministic(aut)) + tc.assertTrue(aut.prop_complete()) -aa = spot.automaton(""" -HOA: v1 States: 41 Start: 0 AP: 3 "allfinished" "finished_0" -"finished_1" acc-name: parity max odd 4 Acceptance: 4 Inf(3) | (Fin(2) -& (Inf(1) | Fin(0))) properties: trans-labels explicit-labels -trans-acc colored properties: deterministic --BODY-- State: 0 -[!0&!1&!2] 1 {1} [!0&!1&2] 2 {1} [!0&1&!2] 3 {1} [!0&1&2] 4 {1} -[0&!1&!2] 5 {1} [0&!1&2] 6 {1} [0&1&!2] 7 {1} [0&1&2] 8 {1} State: 1 -[!0&!1&!2] 1 {1} [!0&!1&2] 2 {1} [0&1&!2] 7 {1} [0&1&2] 8 {1} -[!0&1&!2] 9 {1} [!0&1&2] 10 {1} [0&!1&!2] 11 {1} [0&!1&2] 12 {1} -State: 2 [!0&!1&2] 2 {1} [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} -[0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&!1&!2] 13 {1} [!0&1&!2] 14 {1} -State: 3 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&!2] 9 {1} [!0&1&2] 10 {1} -[0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&!1&!2] 15 {1} [!0&!1&2] 16 {1} -State: 4 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} [0&!1&!2] 11 {1} -[0&!1&2] 12 {1} [!0&1&!2] 14 {1} [!0&!1&2] 16 {1} [!0&!1&!2] 17 {1} -State: 5 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&!2] 18 {1} [!0&!1&2] 19 -{1} [!0&1&!2] 20 {1} State: 6 [0&1&2] 8 {1} [!0&1&2] 10 {1} [!0&!1&2] -19 {1} [!0&1&!2] 20 {1} [!0&!1&!2] 21 {1} State: 7 [0&1&2] 8 {3} -[!0&1&2] 10 {1} [!0&!1&2] 19 {1} [!0&1&!2] 20 {1} [!0&!1&!2] 22 {1} -State: 8 [!0&!1&!2] 5 {1} [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] 19 -{1} [!0&1&!2] 20 {1} State: 9 [0&!1&!2] 5 {1} [0&1&!2] 7 {1} [0&1&2] 8 -{1} [!0&1&!2] 9 {1} [!0&1&2] 10 {1} [0&!1&2] 12 {1} [!0&!1&!2] 23 {1} -[!0&!1&2] 24 {1} State: 10 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 -{3} [!0&1&2] 10 {2} [0&!1&2] 12 {3} [!0&1&!2] 14 {1} [!0&!1&2] 24 {1} -[!0&!1&!2] 25 {1} State: 11 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] 19 -{1} [!0&!1&!2] 26 {1} [!0&1&!2] 27 {1} State: 12 [0&1&2] 8 {3} -[!0&1&2] 10 {1} [!0&!1&2] 19 {1} [!0&1&!2] 27 {1} [!0&!1&!2] 28 {1} -State: 13 [!0&!1&2] 2 {1} [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} -[0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&!1&!2] 13 {1} [!0&1&!2] 14 {1} -State: 14 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} [!0&1&2] 10 {2} -[0&!1&2] 12 {3} [!0&1&!2] 14 {1} [!0&!1&2] 24 {2} [!0&!1&!2] 29 {1} -State: 15 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&!2] 9 {1} [!0&1&2] 10 {1} -[0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&!1&!2] 15 {1} [!0&!1&2] 16 {1} -State: 16 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} [0&!1&!2] 11 -{1} [0&!1&2] 12 {1} [!0&1&!2] 14 {1} [!0&!1&2] 16 {1} [!0&!1&!2] 17 -{1} State: 17 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} [0&!1&!2] -11 {1} [0&!1&2] 12 {1} [!0&1&!2] 14 {1} [!0&!1&2] 16 {1} [!0&!1&!2] 17 -{1} State: 18 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&!2] 18 {1} -[!0&!1&2] 19 {1} [!0&1&!2] 20 {1} State: 19 [0&1&!2] 7 {3} [0&1&2] 8 -{3} [!0&!1&2] 19 {1} [!0&!1&!2] 30 {1} [!0&1&!2] 31 {1} [!0&1&2] 32 -{1} State: 20 [0&1&2] 8 {3} [0&!1&2] 12 {1} [!0&1&!2] 20 {1} [!0&1&2] -32 {1} [!0&!1&!2] 33 {1} [!0&!1&2] 34 {1} State: 21 [0&1&2] 8 {1} -[!0&1&2] 10 {1} [!0&!1&!2] 18 {1} [!0&!1&2] 19 {1} [!0&1&!2] 20 {1} -State: 22 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] 19 {1} [!0&1&!2] 20 -{1} [!0&!1&!2] 35 {1} State: 23 [0&!1&!2] 5 {1} [0&1&!2] 7 {1} [0&1&2] -8 {1} [!0&1&!2] 9 {1} [!0&1&2] 10 {1} [0&!1&2] 12 {1} [!0&!1&!2] 23 -{1} [!0&!1&2] 24 {1} State: 24 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] -8 {3} [!0&1&2] 10 {2} [0&!1&2] 12 {3} [!0&1&!2] 14 {2} [!0&!1&2] 24 -{1} [!0&!1&!2] 25 {1} State: 25 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] -8 {3} [!0&1&2] 10 {2} [0&!1&2] 12 {3} [!0&1&!2] 14 {2} [!0&!1&2] 24 -{1} [!0&!1&!2] 25 {1} State: 26 [0&1&2] 8 {3} [!0&1&2] 10 {1} -[!0&!1&2] 19 {1} [!0&!1&!2] 26 {1} [!0&1&!2] 27 {1} State: 27 [0&1&2] -8 {3} [0&!1&2] 12 {3} [!0&1&!2] 27 {1} [!0&1&2] 32 {1} [!0&!1&!2] 36 -{1} [!0&!1&2] 37 {1} State: 28 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] -19 {1} [!0&!1&!2] 26 {1} [!0&1&!2] 27 {1} State: 29 [0&!1&!2] 5 {3} -[0&1&!2] 7 {3} [0&1&2] 8 {3} [!0&1&2] 10 {2} [0&!1&2] 12 {3} [!0&1&!2] -14 {1} [!0&!1&2] 24 {2} [!0&!1&!2] 29 {1} State: 30 [0&1&!2] 7 {3} -[0&1&2] 8 {3} [!0&!1&2] 19 {1} [!0&!1&!2] 30 {1} [!0&1&!2] 31 {1} -[!0&1&2] 32 {1} State: 31 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} -[0&!1&2] 12 {3} [!0&1&!2] 31 {1} [!0&1&2] 32 {2} [!0&!1&2] 37 {2} -[!0&!1&!2] 38 {1} State: 32 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 -{3} [0&!1&2] 12 {3} [!0&1&!2] 31 {1} [!0&1&2] 32 {2} [!0&!1&2] 37 {1} -[!0&!1&!2] 39 {1} State: 33 [0&1&2] 8 {3} [0&!1&2] 12 {1} [!0&1&!2] 20 -{1} [!0&1&2] 32 {1} [!0&!1&!2] 33 {1} [!0&!1&2] 34 {1} State: 34 -[0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&!2] 11 {1} [0&!1&2] 12 {1} -[!0&1&!2] 31 {1} [!0&1&2] 32 {1} [!0&!1&2] 34 {1} [!0&!1&!2] 40 {1} -State: 35 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] 19 {1} [!0&1&!2] 20 -{1} [!0&!1&!2] 35 {1} State: 36 [0&1&2] 8 {3} [0&!1&2] 12 {3} -[!0&1&!2] 27 {1} [!0&1&2] 32 {1} [!0&!1&!2] 36 {1} [!0&!1&2] 37 {1} -State: 37 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&2] 12 {3} -[!0&1&!2] 31 {2} [!0&1&2] 32 {2} [!0&!1&2] 37 {1} [!0&!1&!2] 39 {1} -State: 38 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&2] 12 {3} -[!0&1&!2] 31 {1} [!0&1&2] 32 {2} [!0&!1&2] 37 {2} [!0&!1&!2] 38 {1} -State: 39 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&2] 12 {3} -[!0&1&!2] 31 {2} [!0&1&2] 32 {2} [!0&!1&2] 37 {1} [!0&!1&!2] 39 {1} -State: 40 [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&!2] 11 {1} [0&!1&2] 12 -{1} [!0&1&!2] 31 {1} [!0&1&2] 32 {1} [!0&!1&2] 34 {1} [!0&!1&!2] 40 -{1} --END--""") -aa.merge_states() -# This used to cause a segfault reported by Philipp. -print(aa.to_str()) + aa = spot.automaton(""" + HOA: v1 States: 41 Start: 0 AP: 3 "allfinished" "finished_0" + "finished_1" acc-name: parity max odd 4 Acceptance: 4 Inf(3) | (Fin(2) + & (Inf(1) | Fin(0))) properties: trans-labels explicit-labels + trans-acc colored properties: deterministic --BODY-- State: 0 + [!0&!1&!2] 1 {1} [!0&!1&2] 2 {1} [!0&1&!2] 3 {1} [!0&1&2] 4 {1} + [0&!1&!2] 5 {1} [0&!1&2] 6 {1} [0&1&!2] 7 {1} [0&1&2] 8 {1} State: 1 + [!0&!1&!2] 1 {1} [!0&!1&2] 2 {1} [0&1&!2] 7 {1} [0&1&2] 8 {1} + [!0&1&!2] 9 {1} [!0&1&2] 10 {1} [0&!1&!2] 11 {1} [0&!1&2] 12 {1} + State: 2 [!0&!1&2] 2 {1} [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} + [0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&!1&!2] 13 {1} [!0&1&!2] 14 {1} + State: 3 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&!2] 9 {1} [!0&1&2] 10 {1} + [0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&!1&!2] 15 {1} [!0&!1&2] 16 {1} + State: 4 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} [0&!1&!2] 11 {1} + [0&!1&2] 12 {1} [!0&1&!2] 14 {1} [!0&!1&2] 16 {1} [!0&!1&!2] 17 {1} + State: 5 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&!2] 18 {1} [!0&!1&2] 19 + {1} [!0&1&!2] 20 {1} State: 6 [0&1&2] 8 {1} [!0&1&2] 10 {1} [!0&!1&2] + 19 {1} [!0&1&!2] 20 {1} [!0&!1&!2] 21 {1} State: 7 [0&1&2] 8 {3} + [!0&1&2] 10 {1} [!0&!1&2] 19 {1} [!0&1&!2] 20 {1} [!0&!1&!2] 22 {1} + State: 8 [!0&!1&!2] 5 {1} [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] 19 + {1} [!0&1&!2] 20 {1} State: 9 [0&!1&!2] 5 {1} [0&1&!2] 7 {1} [0&1&2] 8 + {1} [!0&1&!2] 9 {1} [!0&1&2] 10 {1} [0&!1&2] 12 {1} [!0&!1&!2] 23 {1} + [!0&!1&2] 24 {1} State: 10 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 + {3} [!0&1&2] 10 {2} [0&!1&2] 12 {3} [!0&1&!2] 14 {1} [!0&!1&2] 24 {1} + [!0&!1&!2] 25 {1} State: 11 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] 19 + {1} [!0&!1&!2] 26 {1} [!0&1&!2] 27 {1} State: 12 [0&1&2] 8 {3} + [!0&1&2] 10 {1} [!0&!1&2] 19 {1} [!0&1&!2] 27 {1} [!0&!1&!2] 28 {1} + State: 13 [!0&!1&2] 2 {1} [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} + [0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&!1&!2] 13 {1} [!0&1&!2] 14 {1} + State: 14 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} [!0&1&2] 10 {2} + [0&!1&2] 12 {3} [!0&1&!2] 14 {1} [!0&!1&2] 24 {2} [!0&!1&!2] 29 {1} + State: 15 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&!2] 9 {1} [!0&1&2] 10 {1} + [0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&!1&!2] 15 {1} [!0&!1&2] 16 {1} + State: 16 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} [0&!1&!2] 11 + {1} [0&!1&2] 12 {1} [!0&1&!2] 14 {1} [!0&!1&2] 16 {1} [!0&!1&!2] 17 + {1} State: 17 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} [0&!1&!2] + 11 {1} [0&!1&2] 12 {1} [!0&1&!2] 14 {1} [!0&!1&2] 16 {1} [!0&!1&!2] 17 + {1} State: 18 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&!2] 18 {1} + [!0&!1&2] 19 {1} [!0&1&!2] 20 {1} State: 19 [0&1&!2] 7 {3} [0&1&2] 8 + {3} [!0&!1&2] 19 {1} [!0&!1&!2] 30 {1} [!0&1&!2] 31 {1} [!0&1&2] 32 + {1} State: 20 [0&1&2] 8 {3} [0&!1&2] 12 {1} [!0&1&!2] 20 {1} [!0&1&2] + 32 {1} [!0&!1&!2] 33 {1} [!0&!1&2] 34 {1} State: 21 [0&1&2] 8 {1} + [!0&1&2] 10 {1} [!0&!1&!2] 18 {1} [!0&!1&2] 19 {1} [!0&1&!2] 20 {1} + State: 22 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] 19 {1} [!0&1&!2] 20 + {1} [!0&!1&!2] 35 {1} State: 23 [0&!1&!2] 5 {1} [0&1&!2] 7 {1} [0&1&2] + 8 {1} [!0&1&!2] 9 {1} [!0&1&2] 10 {1} [0&!1&2] 12 {1} [!0&!1&!2] 23 + {1} [!0&!1&2] 24 {1} State: 24 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] + 8 {3} [!0&1&2] 10 {2} [0&!1&2] 12 {3} [!0&1&!2] 14 {2} [!0&!1&2] 24 + {1} [!0&!1&!2] 25 {1} State: 25 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] + 8 {3} [!0&1&2] 10 {2} [0&!1&2] 12 {3} [!0&1&!2] 14 {2} [!0&!1&2] 24 + {1} [!0&!1&!2] 25 {1} State: 26 [0&1&2] 8 {3} [!0&1&2] 10 {1} + [!0&!1&2] 19 {1} [!0&!1&!2] 26 {1} [!0&1&!2] 27 {1} State: 27 [0&1&2] + 8 {3} [0&!1&2] 12 {3} [!0&1&!2] 27 {1} [!0&1&2] 32 {1} [!0&!1&!2] 36 + {1} [!0&!1&2] 37 {1} State: 28 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] + 19 {1} [!0&!1&!2] 26 {1} [!0&1&!2] 27 {1} State: 29 [0&!1&!2] 5 {3} + [0&1&!2] 7 {3} [0&1&2] 8 {3} [!0&1&2] 10 {2} [0&!1&2] 12 {3} [!0&1&!2] + 14 {1} [!0&!1&2] 24 {2} [!0&!1&!2] 29 {1} State: 30 [0&1&!2] 7 {3} + [0&1&2] 8 {3} [!0&!1&2] 19 {1} [!0&!1&!2] 30 {1} [!0&1&!2] 31 {1} + [!0&1&2] 32 {1} State: 31 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} + [0&!1&2] 12 {3} [!0&1&!2] 31 {1} [!0&1&2] 32 {2} [!0&!1&2] 37 {2} + [!0&!1&!2] 38 {1} State: 32 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 + {3} [0&!1&2] 12 {3} [!0&1&!2] 31 {1} [!0&1&2] 32 {2} [!0&!1&2] 37 {1} + [!0&!1&!2] 39 {1} State: 33 [0&1&2] 8 {3} [0&!1&2] 12 {1} [!0&1&!2] 20 + {1} [!0&1&2] 32 {1} [!0&!1&!2] 33 {1} [!0&!1&2] 34 {1} State: 34 + [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&!2] 11 {1} [0&!1&2] 12 {1} + [!0&1&!2] 31 {1} [!0&1&2] 32 {1} [!0&!1&2] 34 {1} [!0&!1&!2] 40 {1} + State: 35 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] 19 {1} [!0&1&!2] 20 + {1} [!0&!1&!2] 35 {1} State: 36 [0&1&2] 8 {3} [0&!1&2] 12 {3} + [!0&1&!2] 27 {1} [!0&1&2] 32 {1} [!0&!1&!2] 36 {1} [!0&!1&2] 37 {1} + State: 37 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&2] 12 {3} + [!0&1&!2] 31 {2} [!0&1&2] 32 {2} [!0&!1&2] 37 {1} [!0&!1&!2] 39 {1} + State: 38 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&2] 12 {3} + [!0&1&!2] 31 {1} [!0&1&2] 32 {2} [!0&!1&2] 37 {2} [!0&!1&!2] 38 {1} + State: 39 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&2] 12 {3} + [!0&1&!2] 31 {2} [!0&1&2] 32 {2} [!0&!1&2] 37 {1} [!0&!1&!2] 39 {1} + State: 40 [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&!2] 11 {1} [0&!1&2] 12 + {1} [!0&1&!2] 31 {1} [!0&1&2] 32 {1} [!0&!1&2] 34 {1} [!0&!1&!2] 40 + {1} --END--""") + aa.merge_states() + # This used to cause a segfault reported by Philipp. + print(aa.to_str()) From aca6bd90429b2c4d6828c61ba08ccd9d7e97f2b6 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Tue, 31 May 2022 13:40:58 +0200 Subject: [PATCH 085/606] synthesis: Deletion of an incorrect case in the bypass With a formula like G(b1) & (GFi <-> GF(b1)), a direct strategy was created while it is unrealizable. * spot/twaalgos/synthesis.cc: here. * tests/core/ltlsynt.test: add tests --- spot/twaalgos/synthesis.cc | 55 ++++++++++++++++++++------------------ tests/core/ltlsynt.test | 41 ++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+), 26 deletions(-) diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index b249acce9..5a5d1297a 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1168,6 +1168,27 @@ namespace spot //Anonymous for try_create_strat namespace { + // Checks that 2 sets have a common element. Use it instead + // of set_intersection when we just want to check if they have a common + // element because it avoids going through the rest of the sets after an + // element is found. + static bool + are_intersecting(const std::set &v1, + const std::set &v2) + { + auto v1_pos = v1.begin(), v2_pos = v2.begin(), v1_end = v1.end(), + v2_end = v2.end(); + while (v1_pos != v1_end && v2_pos != v2_end) + { + if (*v1_pos < *v2_pos) + ++v1_pos; + else if (*v2_pos < *v1_pos) + ++v2_pos; + else + return true; + } + return false; + } class formula_2_inout_props { private: @@ -1372,10 +1393,6 @@ namespace spot if (combin == -1U) return ret_sol_maybe(); - // We know that a strategy exists and we don't want to construct it. - if (!want_strategy) - return ret_sol_exists(nullptr); - formula f_left = f_other[(combin + 1) % 2]; formula f_right = f_other[combin % 2]; if (!(combin % 2)) @@ -1384,6 +1401,14 @@ namespace spot std::swap(left_outs, right_outs); } + auto [_, g_outs] = form2props.aps_of(f_g); + if (are_intersecting(g_outs, right_outs)) + return ret_sol_maybe(); + + // We know that a strategy exists and we don't want to construct it. + if (!want_strategy) + return ret_sol_exists(nullptr); + auto trans = create_translator(gi); trans.set_pref(postprocessor::Deterministic | postprocessor::Complete); @@ -1484,28 +1509,6 @@ namespace spot namespace // anonymous for subsformula { using namespace spot; - // Checks that 2 sets have a common element. Use it instead - // of set_intersection when we just want to check if they have a common - // element because it avoids going through the rest of the sets after an - // element is found. - static bool - are_intersecting(const std::set &v1, - const std::set &v2) - { - auto v1_pos = v1.begin(), v2_pos = v2.begin(), v1_end = v1.end(), - v2_end = v2.end(); - while (v1_pos != v1_end && v2_pos != v2_end) - { - if (*v1_pos < *v2_pos) - ++v1_pos; - else if (*v2_pos < *v1_pos) - ++v2_pos; - else - return true; - } - return false; - } - static std::pair, std::set> algo4(const std::vector &assumptions, const std::set &outs, diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 1badb9b4b..9319d96a8 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -944,3 +944,44 @@ ltlsynt -f '(GFa <-> GFb) && (Gc)' --outs=b,c --verbose --bypass=no\ --algo=acd 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp + +# Bypass: check that in G(b1) ∧ (Büchi ↔ GF(b2)), b1 and b2 don't share an AP. +# We do it because G(o1 ∨ o2) ∧ (GFi ↔ GFo1) is realizable while +# G(o1) ∧ (GFi ↔ GFo1) is not realizable. So we cannot conclude if +# they share an AP. +cat >exp < GFo1) +direct strategy might exist but was not found. +translating formula done in X seconds +automaton has 1 states and 1 colors +LAR construction done in X seconds +DPA has 1 states, 1 colors +split inputs and outputs done in X seconds +automaton has 3 states +solving game with acceptance: Streett 1 +game solved in X seconds +EOF +ltlsynt -f "G(o1) & (GFi <-> GFo1)" --outs="o1" --verbose\ + --bypass=yes 2> out || true +sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx +diff outx exp + +cat >exp < GFo1) +direct strategy might exist but was not found. +translating formula done in X seconds +automaton has 1 states and 2 colors +LAR construction done in X seconds +DPA has 2 states, 2 colors +split inputs and outputs done in X seconds +automaton has 6 states +solving game with acceptance: parity max odd 4 +game solved in X seconds +simplification took X seconds +EOF +ltlsynt -f "G(o1|o2) & (GFi <-> GFo1)" --outs="o1,o2" --verbose\ + --bypass=yes 2> out +sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx +diff outx exp \ No newline at end of file From 721d5695ec73ca234dce7a45fa0fd76519cb2013 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 25 May 2022 17:02:38 +0200 Subject: [PATCH 086/606] add a newer version of the generic emptiness check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As discussed with Jan Strejček. * spot/twa/acc.cc, spot/twa/acc.hh (fin_unit_one_split): New function. (fin_one_extract): Return the simplified acceptance condition as an optimization. * python/spot/impl.i: Bind this new function. * tests/python/acc.py: New file, to test it. * tests/Makefile.am: Add acc.py. * spot/twaalgos/genem.cc, spot/twaalgos/genem.hh: Implement the spot211 variant of the emptiness check. * tests/python/genem.py: Test it. * tests/python/acc_cond.ipynb: Adjust test for fin_one_extract. --- python/spot/impl.i | 11 +++++ spot/twa/acc.cc | 99 ++++++++++++++++++++++++++++++++++++- spot/twa/acc.hh | 75 +++++++++++++++++++++++----- spot/twaalgos/genem.cc | 27 ++++++++-- spot/twaalgos/genem.hh | 10 +++- tests/Makefile.am | 1 + tests/python/acc.py | 65 ++++++++++++++++++++++++ tests/python/acc_cond.ipynb | 41 ++++++++++++--- tests/python/genem.py | 9 ++-- 9 files changed, 308 insertions(+), 30 deletions(-) create mode 100644 tests/python/acc.py diff --git a/python/spot/impl.i b/python/spot/impl.i index b7f116201..a07709005 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -551,6 +551,17 @@ namespace std { } %apply std::vector &OUTPUT {std::vector& pairs} %apply std::vector &OUTPUT {std::vector& pairs} +// Must occur before the twa declaration +%typemap(out) SWIGTYPE spot::acc_cond::fin_unit_one_split %{ + { + auto& v = static_cast>($1); + $result = PyTuple_Pack(3, + swig::from(std::get<0>(v)), + swig::from(std::get<1>(v)), + swig::from(std::get<2>(v))); + } +%} + %include %template(pair_bool_mark) std::pair; diff --git a/spot/twa/acc.cc b/spot/twa/acc.cc index ce5d463aa..07aac36f9 100644 --- a/spot/twa/acc.cc +++ b/spot/twa/acc.cc @@ -2707,6 +2707,35 @@ namespace spot return false; } + // Check wheter pos looks like Fin(f) or Fin(f)&rest + bool is_conj_fin(const acc_cond::acc_word* pos, acc_cond::mark_t f) + { + auto sub = pos - pos->sub.size; + do + { + switch (pos->sub.op) + { + case acc_cond::acc_op::And: + --pos; + break; + case acc_cond::acc_op::Or: + pos -= pos->sub.size + 1; + break; + case acc_cond::acc_op::Fin: + if (pos[-1].mark & f) + return true; + SPOT_FALLTHROUGH; + case acc_cond::acc_op::Inf: + case acc_cond::acc_op::InfNeg: + case acc_cond::acc_op::FinNeg: + pos -= 2; + break; + } + } + while (sub < pos); + return false; + } + acc_cond::acc_code extract_fin(const acc_cond::acc_word* pos, acc_cond::mark_t f) { @@ -2716,7 +2745,7 @@ namespace spot case acc_cond::acc_op::And: case acc_cond::acc_op::Fin: case acc_cond::acc_op::Inf: - return pos; + return strip_rec(pos, f, true, false); case acc_cond::acc_op::Or: { --pos; @@ -2725,7 +2754,7 @@ namespace spot { if (uses_fin(pos, f)) { - acc_cond::acc_code tmp(pos); + auto tmp = strip_rec(pos, f, true, false); tmp |= std::move(res); std::swap(tmp, res); } @@ -2742,6 +2771,52 @@ namespace spot SPOT_UNREACHABLE(); return {}; } + + std::pair + split_top_fin(const acc_cond::acc_word* pos, acc_cond::mark_t f) + { + auto start = pos - pos->sub.size; + switch (pos->sub.op) + { + case acc_cond::acc_op::And: + case acc_cond::acc_op::Fin: + if (is_conj_fin(pos, f)) + return {pos, acc_cond::acc_code::f()}; + SPOT_FALLTHROUGH; + case acc_cond::acc_op::Inf: + return {acc_cond::acc_code::f(), pos}; + case acc_cond::acc_op::Or: + { + --pos; + auto left = acc_cond::acc_code::f(); + auto right = acc_cond::acc_code::f(); + do + { + if (is_conj_fin(pos, f)) + { + auto tmp = strip_rec(pos, f, true, false); + tmp |= std::move(left); + std::swap(tmp, left); + } + else + { + acc_cond::acc_code tmp(pos); + tmp |= std::move(right); + std::swap(tmp, right); + } + pos -= pos->sub.size + 1; + } + while (pos > start); + return {std::move(left), std::move(right)}; + } + case acc_cond::acc_op::FinNeg: + case acc_cond::acc_op::InfNeg: + SPOT_UNREACHABLE(); + return {acc_cond::acc_code::f(), acc_cond::acc_code::f()}; + } + SPOT_UNREACHABLE(); + return {acc_cond::acc_code::f(), acc_cond::acc_code::f()}; + } } std::pair @@ -2756,6 +2831,26 @@ namespace spot return {selected_fin, extract_fin(pos, {(unsigned) selected_fin})}; } + std::tuple + acc_cond::acc_code::fin_unit_one_split() const + { + if (SPOT_UNLIKELY(is_t() || is_f())) + err: + throw std::runtime_error("fin_unit_one_split(): no Fin"); + const acc_cond::acc_word* pos = &back(); + int selected_fin = has_top_fin(pos); + if (selected_fin >= 0) + { + auto [left, right] = split_top_fin(pos, {(unsigned) selected_fin}); + return {selected_fin, std::move(left), std::move(right)}; + } + selected_fin = fin_one(); + if (selected_fin < 0) + goto err; + acc_cond::mark_t fo_m = {(unsigned) selected_fin}; + return {selected_fin, extract_fin(pos, fo_m), force_inf(fo_m)}; + } + namespace { bool diff --git a/spot/twa/acc.hh b/spot/twa/acc.hh index 455850f35..905f5c40a 100644 --- a/spot/twa/acc.hh +++ b/spot/twa/acc.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2014-2022 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -1271,7 +1272,8 @@ namespace spot int fin_one() const; /// \brief Return one acceptance set i that appears as `Fin(i)` - /// in the condition, and all disjuncts containing it. + /// in the condition, and all disjuncts containing it with + /// Fin(i) changed to true and Inf(i) to false. /// /// If the condition is a disjunction and one of the disjunct /// has the shape `...&Fin(i)&...`, then `i` will be prefered @@ -1282,13 +1284,34 @@ namespace spot /// `Fin(i)` have been removed. /// /// For example on - /// `Fin(1)&Inf(2)|Inf(3)&Inf(4)|Inf(5)&(Fin(1)|Fin(7))` - /// the output would be the pair - /// `(1, Fin(1)&Inf(2)|Inf(5)&(Fin(1)|Fin(7)))`. - /// On that example `Fin(1)` is prefered to `Fin(7)` because - /// it appears at the top-level. + /// `Fin(1)&Inf(2)|Inf(3)&Inf(4)|Inf(5)&(Fin(1)|Fin(7))` the + /// output would be the pair, we would select `Fin(1)` over + /// `Fin(7)` because it appears at the top-level. Then we would + /// collect the disjuncts containing `Fin(1)`, that is, + /// `Fin(1)&Inf(2)|Inf(5)&(Fin(1)|Fin(7)))`. Finally we would + /// replace `Fin(1)` by true and `Inf(1)` by false. The return + /// value would then be `(1, Inf(2)|Inf(5))`. std::pair fin_one_extract() const; + /// \brief Split an acceptance condition, trying to select one + /// unit-Fin. + /// + /// If the condition is a disjunction and one of the disjunct as + /// has the shape `...&Fin(i)&...`, then this will return + /// (i, left, right), where left is all disjunct of this form, and + /// right are all the others. + /// + /// If the input formula has the shape `...&Fin(i)&...` then left + /// is set to the entire formula, and right is empty. + /// + /// If no disjunct has the right shape, then a random Fin(i) is + /// searched in the formula, and the output (i, left, right). + /// is such that left contains all disjuncts containing Fin(i) + /// (at any depth), and right contains the original formlula + /// where Fin(i) has been replaced by false. + std::tuple + fin_unit_one_split() const; + /// \brief Help closing accepting or rejecting cycle. /// /// Assuming you have a partial cycle visiting all acceptance @@ -2203,7 +2226,8 @@ namespace spot } /// \brief Return one acceptance set i that appears as `Fin(i)` - /// in the condition, and all disjuncts containing it. + /// in the condition, and all disjuncts containing it with + /// Fin(i) changed to true and Inf(i) to false. /// /// If the condition is a disjunction and one of the disjunct /// has the shape `...&Fin(i)&...`, then `i` will be prefered @@ -2214,17 +2238,42 @@ namespace spot /// `Fin(i)` have been removed. /// /// For example on - /// `Fin(1)&Inf(2)|Inf(3)&Inf(4)|Inf(5)&(Fin(1)|Fin(7))` - /// the output would be the pair - /// `(1, Fin(1)&Inf(2)|Inf(5)&(Fin(1)|Fin(7)))`. - /// On that example `Fin(1)` is prefered to `Fin(7)` because - /// it appears at the top-level. + /// `Fin(1)&Inf(2)|Inf(3)&Inf(4)|Inf(5)&(Fin(1)|Fin(7))` the + /// output would be the pair, we would select `Fin(1)` over + /// `Fin(7)` because it appears at the top-level. Then we would + /// collect the disjuncts containing `Fin(1)`, that is, + /// `Fin(1)&Inf(2)|Inf(5)&(Fin(1)|Fin(7)))`. Finally we would + /// replace `Fin(1)` by true and `Inf(1)` by false. The return + /// value would then be `(1, Inf(2)|Inf(5))`. std::pair fin_one_extract() const { auto [f, c] = code_.fin_one_extract(); return {f, {num_sets(), std::move(c)}}; } + /// \brief Split an acceptance condition, trying to select one + /// unit-Fin. + /// + /// If the condition is a disjunction and one of the disjunct as + /// has the shape `...&Fin(i)&...`, then this will return + /// (i, left, right), where left is all disjunct of this form, and + /// right are all the others. + /// + /// If the input formula has the shape `...&Fin(i)&...` then left + /// is set to the entire formula, and right is empty. + /// + /// If no disjunct has the right shape, then a random Fin(i) is + /// searched in the formula, and the output (i, left, right). + /// is such that left contains all disjuncts containing Fin(i) + /// (at any depth), and right contains the original formlula + /// where Fin(i) has been replaced by false. + std::tuple + fin_unit_one_split() const + { + auto [f, l, r] = code_.fin_unit_one_split(); + return {f, {num_sets(), std::move(l)}, {num_sets(), std::move(r)}}; + } + /// \brief Return the top-level disjuncts. /// /// For instance, if the formula is diff --git a/spot/twaalgos/genem.cc b/spot/twaalgos/genem.cc index e49f5b07c..51b2ea903 100644 --- a/spot/twaalgos/genem.cc +++ b/spot/twaalgos/genem.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2021 Laboratoire de Recherche et Developpement +// Copyright (C) 2017-2022 Laboratoire de Recherche et Developpement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -25,7 +25,7 @@ namespace spot { namespace { - enum genem_version_t { spot28, atva19, spot29, spot210 }; + enum genem_version_t { spot28, atva19, spot29, spot210, spot211 }; static genem_version_t genem_version = spot29; } @@ -33,6 +33,8 @@ namespace spot { if (emversion == nullptr || !strcasecmp(emversion, "spot29")) genem_version = spot29; + else if (!strcasecmp(emversion, "spot211")) + genem_version = spot211; else if (!strcasecmp(emversion, "spot210")) genem_version = spot210; else if (!strcasecmp(emversion, "spot28")) @@ -41,7 +43,8 @@ namespace spot genem_version = atva19; else throw std::invalid_argument("generic_emptiness_check version should be " - "one of {spot28, atva19, spot29, spot210}"); + "one of {spot28, atva19, spot29, spot210, " + "spot211}"); } namespace @@ -84,6 +87,8 @@ namespace spot scc_split_check(const scc_info& si, unsigned scc, const acc_cond& acc, Extra extra, acc_cond::mark_t tocut) { + if (genem_version == spot211 || genem_version == spot210) + tocut |= acc.fin_unit(); scc_and_mark_filter filt(si, scc, tocut); filt.override_acceptance(acc); scc_info upper_si(filt, EarlyStop @@ -118,13 +123,27 @@ namespace spot // Try to accept when Fin(fo) == true acc_cond::mark_t fo_m = {(unsigned) fo}; if (!scc_split_check - (si, scc, fpart.remove(fo_m, true), extra, fo_m)) + (si, scc, fpart, extra, fo_m)) if constexpr (EarlyStop) return false; // Try to accept when Fin(fo) == false acc = acc.force_inf(fo_m); } while (!acc.is_f()); + else if (genem_version == spot211) + { + do + { + auto [fo, fpart, rest] = acc.fin_unit_one_split(); + acc_cond::mark_t fo_m = {(unsigned) fo}; + if (!scc_split_check + (si, scc, fpart, extra, fo_m)) + if constexpr (EarlyStop) + return false; + acc = rest; + } + while (!acc.is_f()); + } else if (genem_version == spot29) do { diff --git a/spot/twaalgos/genem.hh b/spot/twaalgos/genem.hh index 2d1ded4c7..3c3e5de51 100644 --- a/spot/twaalgos/genem.hh +++ b/spot/twaalgos/genem.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2021 Laboratoire de Recherche et Developpement +// Copyright (C) 2017-2022 Laboratoire de Recherche et Developpement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -100,7 +100,13 @@ namespace spot /// - "spot29" improves upon the worst case of atva19. This is /// the default. /// - "spot210" improves upon "spot29" in a few cases where a Fin - /// is shared by multiple disjuncts. + /// is shared by multiple disjuncts. This improve the worst + /// case complexity of EL-automata in the general case, but worsen + /// the complexity of Hyper-Rabin in particular. + /// - "spot211" is another attempt at fixing worst case complexities. + /// Compared to atva19, this improves the complexities for Rabin, + /// GeneralizedRabin, and EL without worsening the complexity of + /// Hyper-Rabin. SPOT_API void generic_emptiness_check_select_version(const char* emversion = nullptr); diff --git a/tests/Makefile.am b/tests/Makefile.am index 3582a8493..b4627f3e6 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -391,6 +391,7 @@ TESTS_python = \ python/_altscc.ipynb \ python/_autparserr.ipynb \ python/_aux.ipynb \ + python/acc.py \ python/accparse2.py \ python/alarm.py \ python/aliases.py \ diff --git a/tests/python/acc.py b/tests/python/acc.py new file mode 100644 index 000000000..8a23dcd46 --- /dev/null +++ b/tests/python/acc.py @@ -0,0 +1,65 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (C) 2022 Laboratoire de Recherche et Développement +# de l'Epita +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import spot +from unittest import TestCase +tc = TestCase() + +a = spot.acc_cond('parity min odd 5') +tc.assertEqual(str(a.fin_unit_one_split()), + '(0, {}, spot.acc_cond(5, "f"))'.format(repr(a))) + +a.set_acceptance('Rabin 3') +tc.assertEqual(str(a.fin_unit_one_split()), + '(0, spot.acc_cond(5, "Inf(1)"), ' + 'spot.acc_cond(5, "(Fin(2) & Inf(3)) | (Fin(4) & Inf(5))"))') + +a.set_acceptance('(Fin(0)|Inf(3))&(Fin(1)|Inf(4))&(Fin(2)|Inf(5)) |\ +(Fin(0)|Inf(4))&(Fin(1)|Inf(5))&(Fin(2)|Inf(3)) |\ +(Fin(0)|Inf(5))&(Fin(1)|Inf(3))&(Fin(2)|Inf(4))') + +tc.maxDiff = None +tc.assertEqual(str(a.fin_unit_one_split()), + '(0, spot.acc_cond(5, ' + '"((Fin(1) | Inf(4)) & (Fin(2) | Inf(5))) | ' + '((Fin(1) | Inf(5)) & (Fin(2) | Inf(3))) | ' + '((Fin(1) | Inf(3)) & (Fin(2) | Inf(4)))"), ' + 'spot.acc_cond(5, ' + '"(Inf(3) & (Fin(1) | Inf(4)) & (Fin(2) | Inf(5))) | ' + '(Inf(4) & (Fin(1) | Inf(5)) & (Fin(2) | Inf(3))) | ' + '(Inf(5) & (Fin(1) | Inf(3)) & (Fin(2) | Inf(4)))"))') + +a = a.remove([4], True) +tc.assertEqual(str(a.fin_unit_one_split()), + '(1, spot.acc_cond(5, ' + '"(Fin(0) | Inf(3)) & (Fin(2) | Inf(5))"), ' + 'spot.acc_cond(5, ' + '"(Fin(0) & (Fin(1) | Inf(5)) & (Fin(2) | Inf(3))) | ' + '((Fin(0) | Inf(5)) & (Fin(1) | Inf(3)) & Fin(2))"))') + +def report_missing_exception(): + raise RuntimeError("missing exception") + +a.set_acceptance("Inf(0)") +try: + a.fin_unit_one_split() +except RuntimeError as e: + tc.assertIn('no Fin', str(e)) +else: + report_missing_exception() diff --git a/tests/python/acc_cond.ipynb b/tests/python/acc_cond.ipynb index 76580fcdd..492c416ca 100644 --- a/tests/python/acc_cond.ipynb +++ b/tests/python/acc_cond.ipynb @@ -1416,7 +1416,7 @@ "source": [ "`fin_one()` return the number of one color `x` that appears as `Fin(x)` in the formula, or `-1` if the formula is Fin-less.\n", "\n", - "The variant `fin_one_extract()` consider the acceptance condition as a disjunction (if the top-level operator is not a disjunction, we just assume the formula is a disjunction with only one disjunct), and return a pair `(x,c)` where `c` is the disjunction of all disjuncts of the original formula where `Fin(x)` appear. Also this function tries to choose an `x` such that one of the disjunct has the form `...&Fin(x)&...` if possible: this is visible in the third example, where 5 is prefered to 2." + "The variant `fin_one_extract()` consider the acceptance condition as a disjunction (if the top-level operator is not a disjunction, we just assume the formula is a disjunction with only one disjunct), and return a pair `(x,c)` where `c` is the disjunction of all disjuncts of the original formula where `Fin(x)` used to appear but where `Fin(x)` have been replaced by `true`, and `Inf(x)` by `false`. Also this function tries to choose an `x` such that one of the disjunct has the form `...&Fin(x)&...` if possible: this is visible in the third example, where 5 is prefered to 2." ] }, { @@ -1430,7 +1430,7 @@ "text": [ "(4, (Fin(0) | Inf(1)) & (Fin(2) | Inf(3)))\n", "0\n", - "(0, spot.acc_cond(4, \"(Fin(0) | Inf(1)) & (Fin(2) | Inf(3))\"))\n" + "(0, spot.acc_cond(4, \"Fin(2) | Inf(3)\"))\n" ] } ], @@ -1451,7 +1451,7 @@ "text": [ "(6, (Fin(0) & Inf(1)) | (Fin(2) & Inf(3)) | (Fin(4) & Inf(5)))\n", "0\n", - "(0, spot.acc_cond(6, \"Fin(0) & Inf(1)\"))\n" + "(0, spot.acc_cond(6, \"Inf(1)\"))\n" ] } ], @@ -1473,7 +1473,7 @@ "text": [ "(6, (Inf(0) & (Fin(2) | Inf(3))) | (Inf(4) & Fin(5)) | ((Inf(0)&Inf(5)) & (Fin(0)|Fin(5))))\n", "2\n", - "(5, spot.acc_cond(6, \"(Inf(4) & Fin(5)) | ((Inf(0)&Inf(5)) & (Fin(0)|Fin(5)))\"))\n" + "(5, spot.acc_cond(6, \"Inf(4)\"))\n" ] } ], @@ -1483,11 +1483,40 @@ "print(acc3.fin_one())\n", "print(acc3.fin_one_extract())" ] + }, + { + "cell_type": "code", + "execution_count": 58, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(8, (Fin(1) & Inf(2)) | (Inf(3)&Inf(4)) | (Inf(5) & (Fin(1)|Fin(7))))\n", + "1\n", + "(1, spot.acc_cond(8, \"Inf(2) | Inf(5)\"))\n" + ] + } + ], + "source": [ + "acc4 = spot.acc_cond('Fin(1)&Inf(2)|Inf(3)&Inf(4)|Inf(5)&(Fin(1)|Fin(7))')\n", + "print(acc4)\n", + "print(acc4.fin_one())\n", + "print(acc4.fin_one_extract())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -1501,7 +1530,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.10.5" } }, "nbformat": 4, diff --git a/tests/python/genem.py b/tests/python/genem.py index 0c9d0809a..962112ac0 100644 --- a/tests/python/genem.py +++ b/tests/python/genem.py @@ -305,14 +305,17 @@ def run_bench(automata): res3c = spot.generic_emptiness_check(aut) spot.generic_emptiness_check_select_version("spot210") res3d = spot.generic_emptiness_check(aut) + spot.generic_emptiness_check_select_version("spot211") + res3e = spot.generic_emptiness_check(aut) + spot.generic_emptiness_check_select_version("spot29") res2 = spot.remove_fin(aut).is_empty() res1 = generic_emptiness2(aut) res = (str(res1)[0] + str(res2)[0] + str(res3a)[0] + str(res3b)[0] + str(res3c)[0] + str(res3d)[0] - + str(res4)[0] + str(res5)[0]) + + str(res3e)[0] + str(res4)[0] + str(res5)[0]) print(res) - tc.assertIn(res, ('TTTTTTTT', 'FFFFFFFF')) - if res == 'FFFFFFFF': + tc.assertIn(res, ('TTTTTTTTT', 'FFFFFFFFF')) + if res == 'FFFFFFFFF': run3 = spot.generic_accepting_run(aut) tc.assertTrue(run3.replay(spot.get_cout())) From 23908f3d2f2854872c7f80053e3961a52d8a60f2 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 16 Jun 2022 23:43:50 +0200 Subject: [PATCH 087/606] Add a --enable-pthread option to activate experimental threading code * NEWS, README, doc/org/compile.org: Mention the option and its effect on compilation requirements. * configure.ac: Add the --enable-pthread option, and ENABLE_PTHREAD macro. * doc/org/g++wrap.in, spot/Makefile.am, spot/libspot.pc.in: Compile with -pthread conditionally. * spot/graph/graph.hh, spot/twa/twagraph.cc: Adjust the code to not use thread-local variables, and let the pthread code be optional. * .gitlab-ci.yml: Activate --enable-pthread in two configurations. --- .gitlab-ci.yml | 8 ++++---- NEWS | 12 ++++++++++++ README | 6 ++++++ configure.ac | 11 ++++++++++- doc/org/compile.org | 5 ++++- doc/org/g++wrap.in | 4 ++-- spot/Makefile.am | 4 ++-- spot/graph/graph.hh | 13 +++++++++---- spot/libspot.pc.in | 2 +- spot/twa/twagraph.cc | 46 ++++++++++++++++++++++++++++---------------- 10 files changed, 79 insertions(+), 32 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3c66af0b7..a1bb0ca9a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -22,9 +22,9 @@ debian-stable-gcc: image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian:stable script: - autoreconf -vfi - - ./configure --enable-max-accsets=256 + - ./configure --enable-max-accsets=256 --enable-pthread - make - - make distcheck + - make distcheck DISTCHECK_CONFIGURE_FLAGS='--enable-max-accsets=256 --enable-pthread' artifacts: when: always paths: @@ -89,7 +89,7 @@ debian-gcc-snapshot: - autoreconf -vfi - ./configure --with-included-ltdl CXX='g++' - make - - make distcheck DISTCHECK_CONFIGURE_FLAGS='--with-included-ltdl' + - make distcheck DISTCHECK_CONFIGURE_FLAGS='--with-included-ltdl' allow_failure: true artifacts: when: always @@ -111,7 +111,7 @@ alpine-gcc: - autoreconf -vfi - ./configure - make - - make distcheck || { chmod -R u+w ./spot-*; false; } + - make distcheck DISTCHECK_CONFIGURE_FLAGS='--enable-pthread' || { chmod -R u+w ./spot-*; false; } artifacts: when: always paths: diff --git a/NEWS b/NEWS index 33b74132c..b8067d47a 100644 --- a/NEWS +++ b/NEWS @@ -1,5 +1,17 @@ New in spot 2.10.6.dev (not yet released) + Build: + + - A new configure option --enable-pthread enable the compilation of + Spot with -pthread, and activate the parallel version of some + algorithms. If Spot is compiled with -pthread enabled, any user + linking with Spot should also link with the pthread library. In + order to not break existing build setups using Spot, this option + is currently disabled by default in this release. We plan to turn + it on by default in some future release. Third-party project + using Spot may want to start linking with -pthread in prevision + for this change. + Command-line tools: - autfilt has a new options --aliases=drop|keep to specify diff --git a/README b/README index 836bc51b0..7581df4ad 100644 --- a/README +++ b/README @@ -173,6 +173,12 @@ flags specific to Spot: client code should be compiled with -D_GLIBCXX_DEBUG as well. This options should normally only be useful to run Spot's test-suite. + --enable-pthread + Build and link with the -pthread option, and activate a few + parallel variants of the algorithms. This is currently disabled + by default, as it require all third-party tools using Spot to + build with -pthread as well. + --enable-c++20 Build everything in C++20 mode. We use that in our build farm to ensure that Spot can be used in C++20 projects as well. diff --git a/configure.ac b/configure.ac index 2ac9ff616..7c0ff62ae 100644 --- a/configure.ac +++ b/configure.ac @@ -53,6 +53,15 @@ AC_ARG_ENABLE([c++20], [Compile in C++20 mode.])], [enable_20=$enableval], [enable_20=no]) +AC_ARG_ENABLE([pthread], + [AS_HELP_STRING([--enable-pthread], + [Allow libspot to use POSIX threads.])], + [enable_pthread=$enableval], [enable_pthread=no]) +if test "$enable_pthread" = yes; then + AC_DEFINE([ENABLE_PTHREAD], [1], [Whether Spot is compiled with -pthread.]) + AC_SUBST([LIBSPOT_PTHREAD], [-pthread]) +fi + AC_ARG_ENABLE([doxygen], [AS_HELP_STRING([--enable-doxygen], [enable generation of Doxygen documentation (requires Doxygen)])], @@ -150,7 +159,7 @@ AX_CHECK_BUDDY AC_CHECK_FUNCS([times kill alarm sigaction sched_getcpu]) oLIBS=$LIBS -LIBS="$LIBS -lpthread" +LIBS="$LIBS -pthread" AC_CHECK_FUNCS([pthread_setaffinity_np]) LIBS=$oLIBS diff --git a/doc/org/compile.org b/doc/org/compile.org index 8d4a3b1ca..6c2f8e6c6 100644 --- a/doc/org/compile.org +++ b/doc/org/compile.org @@ -210,11 +210,14 @@ one library requiring another, you will need to link with the =bddx= library. This should be as simple as adding =-lbddx= after =-lspot= in the first three cases. +Similarly, if Spot has been configured with =--enable-pthread=, you +will need to add =-pthread= to the compiler flags. + In the fourth case where =libtool= is used to link against =libspot.la= linking against =libbddx.la= should not be necessary because Libtool already handles such dependencies. However the version of =libtool= distributed with Debian is patched to ignore those dependencies, so in this -case you 2 +case you have to list all dependencies. * Additional suggestions diff --git a/doc/org/g++wrap.in b/doc/org/g++wrap.in index c4a61a39c..b176af28b 100755 --- a/doc/org/g++wrap.in +++ b/doc/org/g++wrap.in @@ -1,6 +1,6 @@ #!/bin/sh # This is a wrapper around the compiler, to ensure that the code -# example run from the org-mode file are all linked with Spot. +# examples run from org-mode files are all linked with Spot. # # Also we save errors to org.errors, so that we can detect issues # after org-mode has exported everything. Otherwise these errors @@ -8,7 +8,7 @@ @top_builddir@/libtool link @CXX@ @CXXFLAGS@ @CPPFLAGS@ -Wall -Werror \ -I@abs_top_builddir@ -I@abs_top_srcdir@ -I@abs_top_srcdir@/buddy/src \ "$@" @abs_top_builddir@/spot/libspot.la \ - @abs_top_builddir@/buddy/src/libbddx.la 2> errors.$$ + @abs_top_builddir@/buddy/src/libbddx.la @LIBSPOT_PTHREAD@ 2> errors.$$ code=$? if test $code -ne 0 && test -s errors.$$; then cat errors.$$ >>org.errors diff --git a/spot/Makefile.am b/spot/Makefile.am index c7bebfe6a..72cbef22d 100644 --- a/spot/Makefile.am +++ b/spot/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2009, 2010, 2012, 2013, 2014, 2015, 2016, 2017, 2020 +## Copyright (C) 2009, 2010, 2012, 2013, 2014, 2015, 2016, 2017, 2020, 2022 ## Laboratoire de Recherche et Développement de l'Epita (LRDE). ## Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), ## département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -35,7 +35,7 @@ SUBDIRS = misc priv tl graph twa twacube twaalgos ta taalgos kripke \ lib_LTLIBRARIES = libspot.la libspot_la_SOURCES = -libspot_la_LDFLAGS = $(BUDDY_LDFLAGS) -no-undefined -pthread $(SYMBOLIC_LDFLAGS) +libspot_la_LDFLAGS = $(BUDDY_LDFLAGS) -no-undefined @LIBSPOT_PTHREAD@ $(SYMBOLIC_LDFLAGS) libspot_la_LIBADD = \ kripke/libkripke.la \ misc/libmisc.la \ diff --git a/spot/graph/graph.hh b/spot/graph/graph.hh index dc7ffc6ae..6419fc27a 100644 --- a/spot/graph/graph.hh +++ b/spot/graph/graph.hh @@ -20,6 +20,7 @@ #pragma once #include +#include #include #include #include @@ -28,7 +29,9 @@ #include #include #include -#include +#ifdef SPOT_ENABLE_PTHREAD +# include +#endif // SPOT_ENABLE_PTHREAD namespace spot { @@ -1242,8 +1245,6 @@ namespace spot //std::cerr << "\nbefore\n"; //dump_storage(std::cerr); const auto N = num_states(); - // Read threads once - const unsigned nthreads = get_nthreads(); auto idx_list = std::vector(N+1); auto new_edges = edge_vector_t(); @@ -1265,13 +1266,17 @@ namespace spot // If we have few edge or only one threads // Benchmark few? auto bne = new_edges.begin(); +#ifdef SPOT_ENABLE_PTHREAD + const unsigned nthreads = get_nthreads(); if (nthreads == 1 || edges_.size() < 1000) +#endif { for (auto s = 0u; s < N; ++s) std::stable_sort(bne + idx_list[s], bne + idx_list[s+1], p); } +#ifdef SPOT_ENABLE_PTHREAD else { static auto tv = std::vector(); @@ -1291,7 +1296,7 @@ namespace spot t.join(); tv.clear(); } - // Done +#endif std::swap(edges_, new_edges); // Like after normal sort_edges, they need to be chained before usage } diff --git a/spot/libspot.pc.in b/spot/libspot.pc.in index 2dac1de5d..9cb877b34 100644 --- a/spot/libspot.pc.in +++ b/spot/libspot.pc.in @@ -8,5 +8,5 @@ Description: A library of LTL and omega-automata algorithms for model checking URL: https://spot.lrde.epita.fr/ Version: @PACKAGE_VERSION@ Cflags: -I${includedir} -Libs: -L${libdir} -lspot +Libs: -L${libdir} -lspot @LIBSPOT_PTHREAD@ Requires: libbddx diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 2fd2eb070..5b4da10a3 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -372,7 +372,11 @@ namespace spot throw std::runtime_error( "twa_graph::merge_states() does not work on alternating automata"); +#ifdef ENABLE_PTHREAD const unsigned nthreads = get_nthreads(); +#else + constexpr unsigned nthreads = 1; +#endif typedef graph_t::edge_storage_t tr_t; g_.sort_edges_srcfirst_([](const tr_t& lhs, const tr_t& rhs) @@ -511,9 +515,12 @@ namespace spot // << ((env_map.size()+player_map.size())/((float)n_states)) // << '\n'; + // Check whether we can merge two states // and takes into account the self-loops - auto state_equal = [&](unsigned s1, unsigned s2) + auto state_equal = [&e_vec, &e_chain, &e_idx](unsigned s1, unsigned s2, + std::vector& checked1, + std::vector& checked2) { auto edge_data_comp = [](const auto& lhs, const auto& rhs) @@ -528,10 +535,6 @@ namespace spot return false; }; - - thread_local auto checked1 = std::vector(); - thread_local auto checked2 = std::vector(); - auto [i1, nsl1, sl1, e1] = e_idx[s1]; auto [i2, nsl2, sl2, e2] = e_idx[s2]; @@ -612,10 +615,10 @@ namespace spot std::vector remap(nb_states, -1U); // Check each hash - auto check_ix = [&](unsigned ix) + auto check_ix = [&](unsigned ix, std::vector& v, + std::vector& checked1, + std::vector& checked2) { - // Reduce cache miss - thread_local auto v = std::vector(); v.clear(); for (auto i = ix; i != -1U; i = hash_linked_list[i]) v.push_back(i); @@ -627,7 +630,7 @@ namespace spot for (unsigned jdx = 0; jdx < idx; ++jdx) { auto j = v[jdx]; - if (state_equal(j, i)) + if (state_equal(j, i, checked1, checked2)) { remap[i] = (remap[j] != -1U) ? remap[j] : j; @@ -675,13 +678,19 @@ namespace spot auto worker = [&upd, check_ix, nthreads](unsigned pid, auto begp, auto endp, auto bege, auto ende) - { - upd(begp, endp, pid); - upd(bege, ende, pid); - for (; begp != endp; upd(begp, endp, nthreads)) - check_ix(begp->second.first); - for (; bege != ende; upd(bege, ende, nthreads)) - check_ix(bege->second.first); + { + // Temporary storage for list of edges to reduce cache misses + std::vector v; + // Vector reused by all invocations of state_equal to mark edges + // that have been matched already. + std::vector checked1; + std::vector checked2; + upd(begp, endp, pid); + upd(bege, ende, pid); + for (; begp != endp; upd(begp, endp, nthreads)) + check_ix(begp->second.first, v, checked1, checked2); + for (; bege != ende; upd(bege, ende, nthreads)) + check_ix(bege->second.first, v, checked1, checked2); }; { @@ -690,10 +699,12 @@ namespace spot auto bege = env_map.begin(); auto ende = env_map.end(); - +#ifdef ENABLE_PTHREAD if ((nthreads == 1) & (num_states() > 1000)) // Bound? { +#endif // ENABLE_PTHREAD worker(0, begp, endp, bege, ende); +#ifdef ENABLE_PTHREAD } else { @@ -711,6 +722,7 @@ namespace spot t.join(); tv.clear(); } +#endif // ENABLE_PTHREAD } for (auto& e: edges()) From 8161a8c53176cdbd0fd196fddc140a0416aad060 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 21 Jun 2022 14:24:08 +0200 Subject: [PATCH 088/606] tests: workaround test not failing if the Spot support more colors * configure.ac (MAX_ACCSETS): Add AC_SUBST. * tests/run.in: Define MAX_ACCSETS. * tests/core/prodchain.test: Test MAX_ACCSETS. --- configure.ac | 1 + tests/core/prodchain.test | 12 ++++++++---- tests/run.in | 5 ++++- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/configure.ac b/configure.ac index 7c0ff62ae..2d6b4be1f 100644 --- a/configure.ac +++ b/configure.ac @@ -79,6 +79,7 @@ if test 0 -eq `expr $enable_max_accsets % $default_max_accsets` then AC_DEFINE_UNQUOTED([MAX_ACCSETS], [$enable_max_accsets], [The maximal number of acceptance sets supported (also known as acceptance marks)]) + AC_SUBST([MAX_ACCSETS], [$enable_max_accsets]) else AC_MSG_ERROR([The argument of --enable-max-accsets must be a multiple of $default_max_accsets]) fi diff --git a/tests/core/prodchain.test b/tests/core/prodchain.test index 0a7f1a1d9..e00422148 100755 --- a/tests/core/prodchain.test +++ b/tests/core/prodchain.test @@ -32,8 +32,10 @@ for i in *.hoa; do shift done shift -autfilt "$@" 2> error && exit 1 -grep 'Too many acceptance sets used' error +if $MAX_ACCSETS -eq 32; then + autfilt "$@" 2> error && exit 1 + grep 'Too many acceptance sets used' error +fi autfilt -B "$@" > result test "127,253,508,1" = `autfilt --stats=%s,%e,%t,%a result` @@ -44,7 +46,9 @@ for i in *.hoa; do shift done shift -autfilt "$@" 2> error && exit 1 -grep 'Too many acceptance sets used' error +if $MAX_ACCSETS -eq 32; then + autfilt "$@" 2> error && exit 1 + grep 'Too many acceptance sets used' error +fi autfilt -B "$@" > result test "45,89,180,1" = `autfilt --stats=%s,%e,%t,%a result` diff --git a/tests/run.in b/tests/run.in index 7eaa7732c..3b9470bef 100755 --- a/tests/run.in +++ b/tests/run.in @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2010-2011, 2014-2016, 2018-2021 Laboratoire de Recherche +# Copyright (C) 2010-2011, 2014-2016, 2018-2022 Laboratoire de Recherche # et Developpement de l'EPITA (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 # (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -75,6 +75,9 @@ export SPOT_DOTDEFAULT= SPOT_UNINSTALLED=1 export SPOT_UNINSTALLED +MAX_ACCSETS=@MAX_ACCSETS@ +export MAX_ACCSETS + case $1 in */*) dir=${1%/*} From df685433f473b2830cf5aae93581489b0964ad0b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 22 Jun 2022 14:33:16 +0200 Subject: [PATCH 089/606] bin: separate process_file() for aut and ltl * bin/common_finput.cc, bin/common_finput.hh, bin/common_hoaread.hh (process_file): Split into... (process_ltl_file, process_aut_filt): ... these, as we will need both in ltlsynt. --- bin/common_finput.cc | 12 ++++++++++-- bin/common_finput.hh | 5 ++++- bin/common_hoaread.hh | 8 +++++--- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/bin/common_finput.cc b/bin/common_finput.cc index 6f09601e0..8e78b5599 100644 --- a/bin/common_finput.cc +++ b/bin/common_finput.cc @@ -303,7 +303,13 @@ job_processor::process_stream(std::istream& is, } int -job_processor::process_file(const char* filename) +job_processor::process_aut_file(const char*) +{ + throw std::runtime_error("process_aut_file not defined for this tool"); +} + +int +job_processor::process_ltl_file(const char* filename) { col_to_read = 0; @@ -366,8 +372,10 @@ job_processor::run() error |= process_string(j.str); break; case job_type::LTL_FILENAME: + error |= process_ltl_file(j.str); + break; case job_type::AUT_FILENAME: - error |= process_file(j.str); + error |= process_aut_file(j.str); break; default: throw std::runtime_error("unexpected job type"); diff --git a/bin/common_finput.hh b/bin/common_finput.hh index 54ced7f7b..44a78bada 100644 --- a/bin/common_finput.hh +++ b/bin/common_finput.hh @@ -72,7 +72,10 @@ public: process_stream(std::istream& is, const char* filename); virtual int - process_file(const char* filename); + process_ltl_file(const char* filename); + + virtual int + process_aut_file(const char* filename); virtual int run(); diff --git a/bin/common_hoaread.hh b/bin/common_hoaread.hh index b3cc912a5..e66967393 100644 --- a/bin/common_hoaread.hh +++ b/bin/common_hoaread.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2017, 2018 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) 2015, 2017, 2018, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -80,8 +80,10 @@ public: } int - process_file(const char* filename) override + process_aut_file(const char* filename) override { + col_to_read = 0; + // If we have a filename like "foo/NN" such // that: // ① foo/NN is not a file, From 04d718ab9cd8efc8c6b7953468a9eef2ec58bd2e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 22 Jun 2022 15:20:54 +0200 Subject: [PATCH 090/606] ltlsynt: support multiple --tlsf options * bin/common_finput.cc, bin/common_finput.hh: Add support for process_tlsf_file. * bin/ltlsynt.cc: Implement it. * tests/core/syfco.test: Adjust test case. --- bin/common_finput.cc | 9 +++++ bin/common_finput.hh | 6 +++- bin/ltlsynt.cc | 81 +++++++++++++++++++++++-------------------- tests/core/syfco.test | 7 ++-- 4 files changed, 63 insertions(+), 40 deletions(-) diff --git a/bin/common_finput.cc b/bin/common_finput.cc index 8e78b5599..80aca5df7 100644 --- a/bin/common_finput.cc +++ b/bin/common_finput.cc @@ -308,6 +308,12 @@ job_processor::process_aut_file(const char*) throw std::runtime_error("process_aut_file not defined for this tool"); } +int +job_processor::process_tlsf_file(const char*) +{ + throw std::runtime_error("process_tlsf_file not defined for this tool"); +} + int job_processor::process_ltl_file(const char* filename) { @@ -377,6 +383,9 @@ job_processor::run() case job_type::AUT_FILENAME: error |= process_aut_file(j.str); break; + case job_type::TLSF_FILENAME: + error |= process_tlsf_file(j.str); + break; default: throw std::runtime_error("unexpected job type"); } diff --git a/bin/common_finput.hh b/bin/common_finput.hh index 44a78bada..2a5815fc3 100644 --- a/bin/common_finput.hh +++ b/bin/common_finput.hh @@ -27,7 +27,8 @@ enum class job_type : char { LTL_STRING, LTL_FILENAME, - AUT_FILENAME }; + AUT_FILENAME, + TLSF_FILENAME }; struct job { @@ -77,6 +78,9 @@ public: virtual int process_aut_file(const char* filename); + virtual int + process_tlsf_file(const char* filename); + virtual int run(); diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 0d89c2fc5..b5d4289f5 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -157,8 +157,6 @@ static const char* opt_print_hoa_args = nullptr; static bool opt_real = false; static bool opt_do_verify = false; static const char* opt_print_aiger = nullptr; -static char* opt_tlsf = nullptr; -static std::string opt_tlsf_string; static spot::synthesis_info* gi; @@ -582,6 +580,18 @@ namespace return 0; } + static void + split_aps(std::string arg, std::vector& where) + { + std::istringstream aps(arg); + std::string ap; + while (std::getline(aps, ap, ',')) + { + ap.erase(remove_if(ap.begin(), ap.end(), isspace), ap.end()); + where.push_back(str_tolower(ap)); + } + } + class ltl_processor final : public job_processor { private: @@ -658,19 +668,40 @@ namespace print_csv(f); return res; } - }; -} -static void -split_aps(std::string arg, std::vector& where) -{ - std::istringstream aps(arg); - std::string ap; - while (std::getline(aps, ap, ',')) + int + process_tlsf_file(const char* filename) override { - ap.erase(remove_if(ap.begin(), ap.end(), isspace), ap.end()); - where.push_back(str_tolower(ap)); + static char arg0[] = "syfco"; + static char arg1[] = "-f"; + static char arg2[] = "ltlxba"; + static char arg3[] = "-m"; + static char arg4[] = "fully"; + char* command[] = { arg0, arg1, arg2, arg3, arg4, + const_cast(filename), nullptr }; + std::string tlsf_string = read_stdout_of_command(command); + + // The set of atomic proposition will be temporary set to those + // given by syfco, unless they were forced from the command-line. + bool reset_aps = false; + if (!input_aps_.has_value() && !output_aps_.has_value()) + { + reset_aps = true; + static char arg5[] = "--print-output-signals"; + char* command[] = { arg0, arg5, + const_cast(filename), nullptr }; + std::string res = read_stdout_of_command(command); + + output_aps_.emplace(std::vector{}); + split_aps(res, *output_aps_); + } + int res = process_string(tlsf_string, filename); + if (reset_aps) + output_aps_.reset(); + return res; } + + }; } static int @@ -726,9 +757,7 @@ parse_opt(int key, char *arg, struct argp_state *) simplify_args, simplify_values); break; case OPT_TLSF: - if (opt_tlsf) - error(2, 0, "option --tlsf may only be used once"); - opt_tlsf = arg; + jobs.emplace_back(arg, job_type::TLSF_FILENAME); break; case OPT_VERBOSE: gi->verbose_stream = &std::cerr; @@ -767,28 +796,6 @@ main(int argc, char **argv) if (int err = argp_parse(&ap, argc, argv, ARGP_NO_HELP, nullptr, nullptr)) exit(err); - if (opt_tlsf) - { - static char arg0[] = "syfco"; - static char arg1[] = "-f"; - static char arg2[] = "ltlxba"; - static char arg3[] = "-m"; - static char arg4[] = "fully"; - char* command[] = { arg0, arg1, arg2, arg3, arg4, opt_tlsf, nullptr }; - opt_tlsf_string = read_stdout_of_command(command); - jobs.emplace_back(opt_tlsf_string.c_str(), job_type::LTL_STRING); - - if (!all_input_aps.has_value() && !all_output_aps.has_value()) - { - static char arg5[] = "--print-output-signals"; - char* command[] = { arg0, arg5, opt_tlsf, nullptr }; - std::string res = read_stdout_of_command(command); - - all_output_aps.emplace(std::vector{}); - split_aps(res, *all_output_aps); - } - } - check_no_formula(); // Check if inputs and outputs are distinct diff --git a/tests/core/syfco.test b/tests/core/syfco.test index b63f729a8..7141f0b4c 100755 --- a/tests/core/syfco.test +++ b/tests/core/syfco.test @@ -44,5 +44,8 @@ test REALIZABLE = `ltlsynt --tlsf test.tlsf --realizability` test UNREALIZABLE = `ltlsynt --tlsf test.tlsf --outs=foo --realizability` test UNREALIZABLE = `ltlsynt --outs=foo --tlsf test.tlsf --realizability` -ltlsynt --tlsf test.tlsf --tlsf test.tlsf 2>stderr && exit 0 -grep 'option --tlsf may only be used once' stderr +# --tlsf can be used several time +ltlsynt --tlsf test.tlsf > out1 +ltlsynt --tlsf test.tlsf --tlsf test.tlsf > out2 +cat out1 out1 > out11 +diff out11 out2 From be28365db4a9c1290074f6f5a8a1005c70c9fc55 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 22 Jun 2022 19:31:24 +0200 Subject: [PATCH 091/606] ltlsynt: add --from-pgame option to read parity games * bin/common_file.cc, bin/common_file.hh (output_file): Add a force_append option. * bin/ltlsynt.cc: Implement the --from-pgame option, and fix suppot for --csv when multiple inputs are processed. * NEWS: Mention the new option. * tests/core/syfco.test: Add a test case. * tests/core/ltlsynt-pgame.test: New file. * tests/Makefile.am: Add it. --- NEWS | 5 +- bin/common_file.cc | 11 ++- bin/common_file.hh | 4 +- bin/ltlsynt.cc | 170 ++++++++++++++++++++++++++++++++-- tests/Makefile.am | 1 + tests/core/ltlsynt-pgame.test | 157 +++++++++++++++++++++++++++++++ tests/core/syfco.test | 4 + 7 files changed, 336 insertions(+), 16 deletions(-) create mode 100755 tests/core/ltlsynt-pgame.test diff --git a/NEWS b/NEWS index b8067d47a..307cc4a3a 100644 --- a/NEWS +++ b/NEWS @@ -29,10 +29,13 @@ New in spot 2.10.6.dev (not yet released) - autcross learned a --language-complemented option to assist in the case one is testing tools that complement automata. (issue #504). - - ltlsynt as a new option --tlsf that takes the filename of a TLSF + - ltlsynt has a new option --tlsf that takes the filename of a TLSF specification and calls syfco (which must be installed) to convert it into an LTL formula. + - ltlsynt has a new option --from-pgame that takes a parity game in + extended HOA format, as used in the Synthesis Competition. + Library: - A global variable, together with its setters and getters to define the diff --git a/bin/common_file.cc b/bin/common_file.cc index ab89fbfe4..005bb5479 100644 --- a/bin/common_file.cc +++ b/bin/common_file.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) 2015, 2016, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -22,15 +22,18 @@ #include -output_file::output_file(const char* name) +output_file::output_file(const char* name, bool force_append) { std::ios_base::openmode mode = std::ios_base::trunc; if (name[0] == '>' && name[1] == '>') { - mode = std::ios_base::app; append_ = true; name += 2; } + if (force_append) + append_ = true; + if (append_) + mode = std::ios_base::app; if (name[0] == '-' && name[1] == 0) { os_ = &std::cout; diff --git a/bin/common_file.hh b/bin/common_file.hh index fba62dec0..b8f9842b8 100644 --- a/bin/common_file.hh +++ b/bin/common_file.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016 Laboratoire de Recherche et Développement de +// Copyright (C) 2015, 2016, 2022 Laboratoire de Recherche et Développement de // l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -33,7 +33,7 @@ public: // Open a file for output. "-" is interpreted as stdout. // Names that start with ">>" are opened for append. // The function calls error() on... error. - output_file(const char* name); + output_file(const char* name, bool force_append = false); void close(const std::string& name); diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index b5d4289f5..e7d6b73d1 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -23,6 +23,7 @@ #include "common_aoutput.hh" #include "common_finput.hh" +#include "common_hoaread.hh" #include "common_setup.hh" #include "common_sys.hh" #include "common_trans.hh" @@ -48,6 +49,7 @@ enum OPT_BYPASS, OPT_CSV, OPT_DECOMPOSE, + OPT_FROM_PGAME, OPT_INPUT, OPT_OUTPUT, OPT_PRINT, @@ -73,6 +75,9 @@ static const argp_option options[] = { "tlsf", OPT_TLSF, "FILENAME", 0, "Read a TLSF specification from FILENAME, and call syfco to " "convert it into LTL", 0 }, + { "from-pgame", OPT_FROM_PGAME, "FILENAME", 0, + "Read a parity game in Extended HOA format instead of building it.", + 0 }, /**************************************************/ { nullptr, 0, nullptr, 0, "Fine tuning:", 10 }, { "algo", OPT_ALGO, "sd|ds|ps|lar|lar.old|acd", 0, @@ -250,7 +255,7 @@ namespace }; static void - print_csv(const spot::formula& f) + print_csv(const spot::formula& f, const char* filename = nullptr) { auto& vs = gi->verbose_stream; auto& bv = gi->bv; @@ -259,7 +264,9 @@ namespace if (vs) *vs << "writing CSV to " << opt_csv << '\n'; - output_file outf(opt_csv); + static bool not_first_time = false; + output_file outf(opt_csv, not_first_time); + not_first_time = true; // force append on next print. std::ostream& out = outf.ostream(); // Do not output the header line if we append to a file. @@ -284,10 +291,15 @@ namespace out << '\n'; } std::ostringstream os; - os << f; - spot::escape_rfc4180(out << '"', os.str()); - out << "\",\"" << algo_names[(int) gi->s] - << "\"," << bv->total_time + if (filename) + os << filename; + else + os << f; + spot::escape_rfc4180(out << '"', os.str()) << "\","; + // if a filename was given, assume the game has been read directly + if (!filename) + out << '"' << algo_names[(int) gi->s] << '"'; + out << ',' << bv->total_time << ',' << bv->trans_time << ',' << bv->split_time << ',' << bv->paritize_time; @@ -319,6 +331,8 @@ namespace const std::vector& input_aps, const std::vector& output_aps) { + if (opt_csv) // reset benchmark data + gi->bv = spot::synthesis_info::bench_var(); spot::stopwatch sw; if (gi->bv) sw.start(); @@ -386,7 +400,7 @@ namespace [](const spot::twa_graph_ptr& game)->void { if (opt_print_pg) - pg_print(std::cout, game); + spot::pg_print(std::cout, game); else spot::print_hoa(std::cout, game, opt_print_hoa_args) << '\n'; } @@ -438,6 +452,8 @@ namespace safe_tot_time(); return 1; } + if (gi->bv) + gi->bv->realizable = true; // Create the (partial) strategy // only if we need it if (!opt_real) @@ -701,6 +717,141 @@ namespace return res; } + int process_pgame(spot::twa_graph_ptr arena, + const std::string& location) + { + if (opt_csv) // reset benchmark data + gi->bv = spot::synthesis_info::bench_var(); + spot::stopwatch sw_global; + spot::stopwatch sw_local; + if (gi->bv) + { + sw_global.start(); + sw_local.start(); + } + if (!arena->get_named_prop("synthesis-outputs")) + { + std::cerr << location << ": controllable-AP is not specified\n"; + return 2; + } + if (!arena->get_named_prop>("state-player")) + arena = spot::split_2step(arena, true); + // FIXME: If we do not split the game, we should check that it is + // alternating. + spot::change_parity_here(arena, + spot::parity_kind_max, + spot::parity_style_odd); + spot::colorize_parity_here(arena, true); + if (gi->bv) + { + gi->bv->split_time += sw_local.stop(); + gi->bv->nb_states_arena += arena->num_states(); + auto spptr = + arena->get_named_prop>("state-player"); + assert(spptr); + gi->bv->nb_states_arena_env += + std::count(spptr->cbegin(), spptr->cend(), false); + } + if (opt_print_pg || opt_print_hoa) + { + if (opt_print_pg) + spot::pg_print(std::cout, arena); + else + spot::print_hoa(std::cout, arena, opt_print_hoa_args) << '\n'; + return 0; + } + auto safe_tot_time = [&]() { + if (gi->bv) + gi->bv->total_time = sw_global.stop(); + }; + if (!spot::solve_game(arena, *gi)) + { + std::cout << "UNREALIZABLE" << std::endl; + safe_tot_time(); + return 1; + } + if (gi->bv) + gi->bv->realizable = true; + std::cout << "REALIZABLE" << std::endl; + if (opt_real) + { + safe_tot_time(); + return 0; + } + sw_local.start(); + spot::twa_graph_ptr mealy_like = + spot::solved_game_to_mealy(arena, *gi); + // Keep the machine split for aiger otherwise, separate it. + spot::simplify_mealy_here(mealy_like, *gi, opt_print_aiger); + + automaton_printer printer; + spot::process_timer timer_printer_dummy; + if (opt_print_aiger) + { + if (gi->bv) + sw_local.start(); + spot::aig_ptr saig = + spot::mealy_machine_to_aig(mealy_like, opt_print_aiger); + if (gi->bv) + { + gi->bv->aig_time = sw_local.stop(); + gi->bv->nb_latches = saig->num_latches(); + gi->bv->nb_gates = saig->num_gates(); + } + if (gi->verbose_stream) + { + *gi->verbose_stream << "AIG circuit was created in " + << gi->bv->aig_time + << " seconds and has " << saig->num_latches() + << " latches and " + << saig->num_gates() << " gates\n"; + } + spot::print_aiger(std::cout, saig) << '\n'; + } + else + { + printer.print(mealy_like, timer_printer_dummy); + } + safe_tot_time(); + return 0; + } + + int + process_aut_file(const char* filename) override + { + spot::automaton_stream_parser hp(filename); + int err = 0; + while (!abort_run) + { + spot::parsed_aut_ptr haut = hp.parse(spot::make_bdd_dict()); + if (!haut->aut && haut->errors.empty()) + break; + if (haut->format_errors(std::cerr)) + err = 2; + if (!haut->aut /*|| (err && abort_on_error_)*/) + { + error(2, 0, "failed to read automaton from %s", + haut->filename.c_str()); + } + else if (haut->aborted) + { + std::cerr << haut->filename << ':' << haut->loc + << ": aborted input automaton\n"; + err = std::max(err, 2); + } + else + { + std::ostringstream os; + os << haut->filename << ':' << haut->loc; + std::string loc = os.str(); + int res = process_pgame(haut->aut, loc); + if (res < 2 && opt_csv) + print_csv(nullptr, loc.c_str()); + err = std::max(err, res); + } + } + return err; + } }; } @@ -719,13 +870,14 @@ parse_opt(int key, char *arg, struct argp_state *) break; case OPT_CSV: opt_csv = arg ? arg : "-"; - if (not gi->bv) - gi->bv = spot::synthesis_info::bench_var(); break; case OPT_DECOMPOSE: opt_decompose_ltl = XARGMATCH("--decompose", arg, decompose_args, decompose_values); break; + case OPT_FROM_PGAME: + jobs.emplace_back(arg, job_type::AUT_FILENAME); + break; case OPT_INPUT: { all_input_aps.emplace(std::vector{}); diff --git a/tests/Makefile.am b/tests/Makefile.am index b4627f3e6..1a3d440c3 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -342,6 +342,7 @@ TESTS_twa = \ core/parity.test \ core/parity2.test \ core/ltlsynt.test \ + core/ltlsynt-pgame.test \ core/syfco.test \ core/rabin2parity.test \ core/twacube.test diff --git a/tests/core/ltlsynt-pgame.test b/tests/core/ltlsynt-pgame.test new file mode 100755 index 000000000..b4bada798 --- /dev/null +++ b/tests/core/ltlsynt-pgame.test @@ -0,0 +1,157 @@ +#! /bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs || exit 1 + +set -e + +# From SYNTCOMP +cat >aut7.hoa < Xa)" +Start: 0 +acc-name: Buchi +Acceptance: 1 Inf(0) +AP: 2 "a" "b" +controllable-AP: 1 +properties: explicit-labels trans-labels +--BODY-- +State: 0 + [t] 1 + [1] 2 + [!1] 3 +State: 1 "GFa" + [0] 1 {0} + [!0] 1 +State: 2 "a & G(b <-> Xa)" {0} + [0&1] 2 + [0&!1] 3 +State: 3 "!a & G(b <-> Xa)" {0} + [!0&1] 2 + [!0&!1] 3 +--END-- +EOF + +test UNREALIZABLE = `ltlsynt --realizability --from-pgame aut7.hoa` + +grep -v controllable-AP aut7.hoa > aut7b.hoa +run 2 ltlsynt --realizability --from-pgame aut7b.hoa 2>stderr +grep 'aut7b.*controllable-AP' stderr + + +# From SYNTCOMP +cat >UnderapproxDemo2.ehoa <starve.ehoa <expect <results +diff expect results + +ltlsynt --realizability --from-pgame starve.ehoa \ + --from-pgame UnderapproxDemo2.ehoa \ + --from-pgame aut7.hoa --csv=out.csv >result || : +cat >expect <result || : +test 4 = `wc -l < out.csv` +cut -d, -f 9,10,11,12,13 right +end='"strat_num_states","strat_num_edges"' +cat >expect < out1 ltlsynt --tlsf test.tlsf --tlsf test.tlsf > out2 cat out1 out1 > out11 diff out11 out2 + +ltlsynt --tlsf test.tlsf --tlsf test.tlsf --print-game > pgame.hoa +ltlsynt --from-pgame pgame.hoa > out3 +diff out2 out3 From 288b1c79586472828d31e74dfd2de0708c0ce86a Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 22 Jun 2022 23:43:07 +0200 Subject: [PATCH 092/606] contains: generalize second argument to a twa This was triggered by a question from Pierre Ganty on the mailing list. * spot/twaalgos/contains.hh, spot/twaalgos/contains.cc (contains): Generalize second argument to const_twa_ptr instead of const_twa_graph_ptr. * NEWS: Mention this. * tests/python/ltsmin-pml.ipynb: Show that it work. * THANKS: Mention Pierre. --- NEWS | 5 ++++ THANKS | 1 + spot/twaalgos/contains.cc | 6 ++--- spot/twaalgos/contains.hh | 11 +++++--- tests/python/ltsmin-pml.ipynb | 51 ++++++++++++++++++++++++++--------- 5 files changed, 56 insertions(+), 18 deletions(-) diff --git a/NEWS b/NEWS index 307cc4a3a..a1e2361f5 100644 --- a/NEWS +++ b/NEWS @@ -108,6 +108,11 @@ New in spot 2.10.6.dev (not yet released) run with additional option to abort when the tree as an unwanted shape, or to turn the tree into a DAG. + - contains() can now take a twa as a second argument, not just a + twa_graph. This allows for instance to do contains(ltl, kripke) + to obtain a simple model checker (that returns true or false, + without counterexample). + New in spot 2.10.6 (2022-05-18) Bugs fixed: diff --git a/THANKS b/THANKS index 8a9c1b630..c53a0aafb 100644 --- a/THANKS +++ b/THANKS @@ -44,6 +44,7 @@ Ming-Hsien Tsai Nikos Gorogiannis Ondřej Lengál Paul Guénézan +Pierre Ganty Reuben Rowe Roei Nahum Rüdiger Ehlers diff --git a/spot/twaalgos/contains.cc b/spot/twaalgos/contains.cc index 6c76249d5..cf2680d01 100644 --- a/spot/twaalgos/contains.cc +++ b/spot/twaalgos/contains.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018, 2019 Laboratoire de Recherche et Développement de +// Copyright (C) 2018, 2019, 2022 Laboratoire de Recherche et Développement de // l'Epita. // // This file is part of Spot, a model checking library. @@ -34,7 +34,7 @@ namespace spot } } - bool contains(const_twa_graph_ptr left, const_twa_graph_ptr right) + bool contains(const_twa_graph_ptr left, const_twa_ptr right) { return !complement(left)->intersects(right); } @@ -44,7 +44,7 @@ namespace spot return contains(left, translate(right, left->get_dict())); } - bool contains(formula left, const_twa_graph_ptr right) + bool contains(formula left, const_twa_ptr right) { return !translate(formula::Not(left), right->get_dict())->intersects(right); } diff --git a/spot/twaalgos/contains.hh b/spot/twaalgos/contains.hh index 61c53076a..a1d64f1b1 100644 --- a/spot/twaalgos/contains.hh +++ b/spot/twaalgos/contains.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018 Laboratoire de Recherche et Développement de +// Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement de // l'Epita. // // This file is part of Spot, a model checking library. @@ -38,10 +38,15 @@ namespace spot /// associated to the complement of \a left. It helps if \a left /// is a deterministic automaton or a formula (because in both cases /// complementation is easier). + /// + /// Complementation is only supported on twa_graph automata, so that + /// is the reason \a left must be a twa_graph. Right will be + /// explored on-the-fly if it is not a twa_graph. + /// /// @{ - SPOT_API bool contains(const_twa_graph_ptr left, const_twa_graph_ptr right); + SPOT_API bool contains(const_twa_graph_ptr left, const_twa_ptr right); SPOT_API bool contains(const_twa_graph_ptr left, formula right); - SPOT_API bool contains(formula left, const_twa_graph_ptr right); + SPOT_API bool contains(formula left, const_twa_ptr right); SPOT_API bool contains(formula left, formula right); /// @} diff --git a/tests/python/ltsmin-pml.ipynb b/tests/python/ltsmin-pml.ipynb index 120ab11f5..5d25b207f 100644 --- a/tests/python/ltsmin-pml.ipynb +++ b/tests/python/ltsmin-pml.ipynb @@ -40,8 +40,8 @@ "SpinS Promela Compiler - version 1.1 (3-Feb-2015)\n", "(C) University of Twente, Formal Methods and Tools group\n", "\n", - "Parsing tmprn9_nun3.pml...\n", - "Parsing tmprn9_nun3.pml done (0.1 sec)\n", + "Parsing tmpwot5yb9c.pml...\n", + "Parsing tmpwot5yb9c.pml done (0.0 sec)\n", "\n", "Optimizing graphs...\n", " StateMerging changed 0 states/transitions.\n", @@ -84,8 +84,8 @@ " Found 2 / 2 (100.0%) Commuting actions \n", "Generating guard dependency matrices done (0.0 sec)\n", "\n", - "Written C code to /home/adl/git/spot/tests/python/tmprn9_nun3.pml.spins.c\n", - "Compiled C code to PINS library tmprn9_nun3.pml.spins\n", + "Written C code to /home/adl/git/spot/tests/python/tmpwot5yb9c.pml.spins.c\n", + "Compiled C code to PINS library tmpwot5yb9c.pml.spins\n", "\n" ] } @@ -419,7 +419,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7d8049450> >" + " *' at 0x7f7f9849ee20> >" ] }, "execution_count": 4, @@ -1120,6 +1120,33 @@ "k.show('.1K')" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Since a kripke structure is a `twa`, can be used on the right-hand side of `contains`. Here we show that every path of `k` contains a step where `P_0.a < 2`." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "spot.contains('F\"P_0.a < 2\"', k)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -1132,7 +1159,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -1141,7 +1168,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "metadata": {}, "outputs": [ { @@ -1173,7 +1200,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -1239,7 +1266,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -1251,7 +1278,7 @@ " P_0.b: int" ] }, - "execution_count": 10, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -1262,7 +1289,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ @@ -1286,7 +1313,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.2" + "version": "3.10.5" } }, "nbformat": 4, From 166a26417c10e1a97f834b384c2e700b6efc5505 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 23 Jun 2022 15:52:24 +0200 Subject: [PATCH 093/606] graph: fix creation of universal edge * spot/graph/graph.hh: Use a temporary array to store the destination vector if the passed range belong to the dests_ vector. Otherwise the passed begin/end risk being invalidated when dests_ is reallocated. * NEWS: Mention the bug. --- NEWS | 7 +++++++ spot/graph/graph.hh | 19 +++++++++++++++++-- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index a1e2361f5..3b94d43ab 100644 --- a/NEWS +++ b/NEWS @@ -113,6 +113,13 @@ New in spot 2.10.6.dev (not yet released) to obtain a simple model checker (that returns true or false, without counterexample). + Bugs fixed: + + - calling twa_graph::new_univ_edge(src, begin, end, cond, acc) could + produce unexpected result if begin and end where already pointing + into the universal edge vector, since the later can be + reallocated during that process. + New in spot 2.10.6 (2022-05-18) Bugs fixed: diff --git a/spot/graph/graph.hh b/spot/graph/graph.hh index 6419fc27a..06ddf0997 100644 --- a/spot/graph/graph.hh +++ b/spot/graph/graph.hh @@ -804,8 +804,23 @@ namespace spot return *dst_begin; SPOT_ASSERT(sz > 1); unsigned d = dests_.size(); - dests_.emplace_back(sz); - dests_.insert(dests_.end(), dst_begin, dst_end); + if (!dests_.empty() + && &*dst_begin >= &dests_.front() + && &*dst_begin <= &dests_.back() + && (dests_.capacity() - dests_.size()) < (sz + 1)) + { + // If dst_begin...dst_end points into dests_ and dests_ risk + // being reallocated, we have to savea the destination + // states before we lose them. + std::vector tmp(dst_begin, dst_end); + dests_.emplace_back(sz); + dests_.insert(dests_.end(), tmp.begin(), tmp.end()); + } + else + { + dests_.emplace_back(sz); + dests_.insert(dests_.end(), dst_begin, dst_end); + } return ~d; } From b4279d3a120bf555e6ec3eab3e352438600f64e9 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 23 Jun 2022 16:19:50 +0200 Subject: [PATCH 094/606] dot: quote identifiers containing a minus * spot/twaalgos/dot.cc: Quote identifiers containing a minus. * tests/core/alternating.test: Add test case. * NEWS: Mention the bug. --- NEWS | 4 ++++ spot/twaalgos/dot.cc | 25 +++++++++++++++++++---- tests/core/alternating.test | 40 ++++++++++++++++++++++++++++++++++++- 3 files changed, 64 insertions(+), 5 deletions(-) diff --git a/NEWS b/NEWS index 3b94d43ab..a08fa6d10 100644 --- a/NEWS +++ b/NEWS @@ -120,6 +120,10 @@ New in spot 2.10.6.dev (not yet released) into the universal edge vector, since the later can be reallocated during that process. + - Printing an alternating automaton with print_dot() using 'u' to + hide true state could produce some incorrect GraphViz output if + the automaton as a true state as part of a universal group. + New in spot 2.10.6 (2022-05-18) Bugs fixed: diff --git a/spot/twaalgos/dot.cc b/spot/twaalgos/dot.cc index 66804f304..70b707edc 100644 --- a/spot/twaalgos/dot.cc +++ b/spot/twaalgos/dot.cc @@ -578,10 +578,27 @@ namespace spot return tmp_dst.str(); } - template - void print_true_state(U to, V from) const + void print_hidden_true_name(unsigned to, unsigned from) const { - os_ << " T" << to << 'T' << from << " [label=\"\", style=invis, "; + os_ << 'T' << to << 'T' << from; + } + + void print_hidden_true_name(unsigned to, const std::string& from) const + { + bool neg = from[0] == '-'; + if (neg) + os_ << '"'; + os_ << 'T' << to << 'T' << from; + if (neg) + os_ << '"'; + } + + template + void print_true_state(unsigned to, F from) const + { + os_ << " "; + print_hidden_true_name(to, from); + os_ << " [label=\"\", style=invis, "; os_ << (opt_vertical_ ? "height=0]\n" : "width=0]\n"); } @@ -606,7 +623,7 @@ namespace spot print_true_state(d, dest); os_ << " " << dest << " -> "; if (dst_is_hidden_true_state) - os_ << 'T' << d << 'T' << dest; + print_hidden_true_name(d, dest); else os_ << d; if ((style && *style) || opt_id_) diff --git a/tests/core/alternating.test b/tests/core/alternating.test index 17f012675..df4e47624 100755 --- a/tests/core/alternating.test +++ b/tests/core/alternating.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016-2018, 2020-2021 Laboratoire de Recherche et +# Copyright (C) 2016-2018, 2020-2022 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -1009,3 +1009,41 @@ test '2_0_1_1_1_1_3_3' = "`autfilt --stats=$stats in`" autfilt --stats='%[x]U' in 2>stderr && exit2 grep '%\[x\]U' stderr + +cat >in <out.dot +# T0T-1 is not a valid name for GraphViz, it has to be quoted. +cat >exp.dot < 1 + 1 [label="1"] + 1 -> -1 [label="1\n{0}", arrowhead=onormal] + -1 [label=<>,shape=point,width=0.05,height=0.05] + "T0T-1" [label="", style=invis, width=0] + -1 -> "T0T-1" + -1 -> 1 + T0T1 [label="", style=invis, width=0] + 1 -> T0T1 [label="a"] +} +EOF +diff out.dot exp.dot From 9222e9713b5764396fb3a8d479acf263044f0f88 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 23 Jun 2022 17:19:09 +0200 Subject: [PATCH 095/606] parseaut: fix handling of [ outside HOA Fixes #509. * spot/parseaut/scanaut.ll: Reset ->str whenever a [ is read, so that we do not attempt to clear ->str while reading garbage. * NEWS: Mention the bug. * tests/core/parseaut.test: Test it. --- NEWS | 4 ++++ spot/parseaut/scanaut.ll | 5 +++++ tests/core/parseaut.test | 10 ++++++++++ 3 files changed, 19 insertions(+) diff --git a/NEWS b/NEWS index a08fa6d10..d7f3f4923 100644 --- a/NEWS +++ b/NEWS @@ -124,6 +124,10 @@ New in spot 2.10.6.dev (not yet released) hide true state could produce some incorrect GraphViz output if the automaton as a true state as part of a universal group. + - Due to an optimization introduces in 2.10 to parse HOA label more + efficiently, the automaton parser could crash when parsing random + input (not HOA) containing '[' (issue #509). + New in spot 2.10.6 (2022-05-18) Bugs fixed: diff --git a/spot/parseaut/scanaut.ll b/spot/parseaut/scanaut.ll index 711c74c64..db8ae75c6 100644 --- a/spot/parseaut/scanaut.ll +++ b/spot/parseaut/scanaut.ll @@ -454,6 +454,11 @@ identifier [[:alpha:]_][[:alnum:]_.-]* } } +"[" { + yylval->str = nullptr; + return *yytext; + } + . return *yytext; %{ diff --git a/tests/core/parseaut.test b/tests/core/parseaut.test index 6501bee02..56f2d54eb 100755 --- a/tests/core/parseaut.test +++ b/tests/core/parseaut.test @@ -2961,3 +2961,13 @@ EOF # At some point, this crashed with # input buffer overflow, can't enlarge buffer because scanner uses REJECT run 0 autfilt -q bigaut + + +# This issued to segfault, because the parser assumed a '[' token was +# always attached to a string, while that was only true in HOA mode. +cat >input < Date: Tue, 29 Sep 2020 16:38:07 +0200 Subject: [PATCH 096/606] Modifying Zielonka * spot/twaalgos/game.cc: solve_parity_game now works for any of the four parity types and partially colored graphs. Also removing unnescessary steps from Zielonka. h: Update * tests/python/game.py: Update and additional tests * tests/python/except.py: Remove outdated exception --- spot/twaalgos/game.cc | 464 ++++++++++++++++++++++------------------- tests/python/except.py | 10 - tests/python/game.py | 99 ++++++++- 3 files changed, 351 insertions(+), 222 deletions(-) diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index e1d23e381..3f041ac1c 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -30,6 +30,16 @@ namespace spot { namespace { + constexpr unsigned unseen_mark = std::numeric_limits::max(); + using par_t = int; + constexpr par_t limit_par_even = + std::numeric_limits::max() & 1 + ? std::numeric_limits::max()-3 + : std::numeric_limits::max()-2; + using strat_t = long long; + constexpr strat_t no_strat_mark = std::numeric_limits::min(); + + static const std::vector* ensure_game(const const_twa_graph_ptr& arena, const char* fnname) { @@ -48,15 +58,11 @@ namespace spot ensure_parity_game(const const_twa_graph_ptr& arena, const char* fnname) { bool max, odd; - arena->acc().is_parity(max, odd, true); - if (!(max && odd)) + bool is_par = arena->acc().is_parity(max, odd, true); + if (!is_par) throw std::runtime_error (std::string(fnname) + - ": arena must have max-odd acceptance condition"); - for (const auto& e : arena->edges()) - if (!e.acc) - throw std::runtime_error - (std::string(fnname) + ": arena must be colorized"); + ": arena must have one of the four parity acceptance conditions"); return ensure_game(arena, fnname); } @@ -71,10 +77,7 @@ namespace spot { // returns true if player p wins v // false otherwise - if (!has_winner_[v]) - return false; - - return winner_[v] == p; + return has_winner_[v] ? winner_[v] == p : false; } inline void set(unsigned v, bool p) @@ -95,40 +98,27 @@ namespace spot } }; // winner_t - // When using scc decomposition we need to track the - // changes made to the graph - struct edge_stash_t - { - edge_stash_t(unsigned num, unsigned dst, acc_cond::mark_t acc) noexcept - : e_num(num), - e_dst(dst), - e_acc(acc) - { - } - const unsigned e_num, e_dst; - const acc_cond::mark_t e_acc; - }; // edge_stash_t - // Internal structs used by parity_game // Struct to change recursive calls to stack struct work_t { - work_t(unsigned wstep_, unsigned rd_, unsigned min_par_, - unsigned max_par_) noexcept + work_t(unsigned wstep_, unsigned rd_, par_t min_par_, + par_t max_par_) noexcept : wstep(wstep_), rd(rd_), min_par(min_par_), max_par(max_par_) { } - const unsigned wstep, rd, min_par, max_par; + const unsigned wstep, rd; + const par_t min_par, max_par; }; // work_t // Collects information about an scc // Used to detect special cases struct subgame_info_t { - typedef std::set> all_parities_t; + typedef std::set> all_parities_t; subgame_info_t() noexcept { @@ -159,7 +149,7 @@ namespace spot { public: - bool solve(const twa_graph_ptr &arena) + bool solve(const twa_graph_ptr& arena) { // todo check if reordering states according to scc is worth it set_up(arena); @@ -167,11 +157,25 @@ namespace spot subgame_info_t subgame_info; for (c_scc_idx_ = 0; c_scc_idx_ < info_->scc_count(); ++c_scc_idx_) { + // Testing + // Make sure that every state that has a winner also + // belongs to a subgame + assert([&]() + { + for (unsigned i = 0; i < arena_->num_states(); ++i) + if (w_.has_winner_[i] + && (subgame_[i] == unseen_mark)) + return false; + return true; + }()); // Useless SCCs are winning for player 0. if (!info_->is_useful_scc(c_scc_idx_)) { + // This scc also gets its own subgame + ++rd_; for (unsigned v: c_states()) { + subgame_[v] = rd_; w_.set(v, false); // The strategy for player 0 is to take the first // available edge. @@ -197,27 +201,35 @@ namespace spot { // "Regular" solver max_abs_par_ = *subgame_info.all_parities.begin(); - w_stack_.emplace_back(0, 0, 0, max_abs_par_); + w_stack_.emplace_back(0, 0, + min_par_graph_, max_abs_par_); zielonka(); } } } - // All done -> restore graph, i.e. undo self-looping - restore(); - + // Every state needs a winner assert(std::all_of(w_.has_winner_.cbegin(), w_.has_winner_.cend(), [](bool b) { return b; })); - assert(std::all_of(s_.cbegin(), s_.cend(), - [](unsigned e_idx) - { return e_idx > 0; })); + // Only the states owned by the winner need a strategy + assert([&]() + { + for (unsigned v = 0; v < arena_->num_states(); ++v) + { + if (((*owner_ptr_)[v] == w_.winner(v)) + && ((s_[v] <= 0) || (s_[v] > arena_->num_edges()))) + return false; + } + return true; + }()); // Put the solution as named property region_t &w = *arena->get_or_set_named_prop("state-winner"); strategy_t &s = *arena->get_or_set_named_prop("strategy"); w.swap(w_.winner_); - s.resize(s_.size()); - std::copy(s_.begin(), s_.end(), s.begin()); + s.reserve(s_.size()); + for (auto as : s_) + s.push_back(as == no_strat_mark ? 0 : (unsigned) as); clean_up(); return w[arena->get_init_state_number()]; @@ -234,7 +246,7 @@ namespace spot return info_->states_of(c_scc_idx_); } - void set_up(const twa_graph_ptr &arena) + void set_up(const twa_graph_ptr& arena) { owner_ptr_ = ensure_parity_game(arena, "solve_parity_game()"); arena_ = arena; @@ -247,22 +259,63 @@ namespace spot w_.winner_.clear(); w_.winner_.resize(n_states, 0); s_.clear(); - s_.resize(n_states, -1); + s_.resize(n_states, no_strat_mark); // Init rd_ = 0; - max_abs_par_ = arena_->get_acceptance().used_sets().max_set() - 1; info_ = std::make_unique(arena_); - // Every edge leaving an scc needs to be "fixed" - // at some point. - // We store: number of edge fixed, original dst, original acc - change_stash_.clear(); - change_stash_.reserve(info_->scc_count() * 2); + // Create all the parities + // we want zielonka to work with any of the four parity types + // and we want it to work on partially colored arenas + // However the actually algorithm still supposes max odd. + // Therefore (and in order to avoid the manipulation of the mark + // at each step) we generate a vector directly storing the + // "equivalent" parity for each edge + bool max, odd; + arena_->acc().is_parity(max, odd, true); + max_abs_par_ = arena_->acc().all_sets().max_set()-1; + // Make it the next larger odd + par_t next_max_par = max_abs_par_ + 1; + all_edge_par_.resize(arena_->edge_vector().size(), + std::numeric_limits::max()); + + // The parities are modified much like for colorize_parity + // however if the acceptance condition is "min", we negate all + // parities to get "max" + // The algorithm works on negative or positive parities alike + //| kind/style | n | empty tr. | other tr. | result | min par + //|------------+-----+---------------+------------+--------------|--------- + //| max odd | any | set to {-1} | unchanged | max odd n | -1 + //| max even | any | set to {0} | add 1 | max odd n+1 | 0 + //| min odd | any | set to {-n} | negate | max odd 0 | -n + //| min even | any | set to {-n+1} | negate + 1 | max odd +1 | -n + 1 + min_par_graph_ = -(!max*max_abs_par_) - (max*odd); + max_par_graph_ = max*(max_abs_par_ + !odd) + !max*!odd; + + // Takes an edge and returns the "equivalent" max odd parity + auto equiv_par = [max, odd, next_max_par, inv = 2*max-1](const auto& e) + { + par_t e_par = e.acc.max_set() - 1; // -1 for empty + // If "min" and empty -> set to n + if (!max & (e_par == -1)) + e_par = next_max_par; + // Negate if min + e_par *= inv; + // even -> odd + e_par += !odd; + return e_par; + }; + + for (const auto& e : arena_->edges()) + { + unsigned e_idx = arena_->edge_number(e); + all_edge_par_[e_idx] = equiv_par(e); + } } // Checks if an scc is empty and reports the occurring parities // or special cases inline subgame_info_t - inspect_scc(unsigned max_par) + inspect_scc(par_t max_par) { subgame_info_t info; info.is_empty = true; @@ -278,7 +331,7 @@ namespace spot if (subgame_[e.dst] == unseen_mark) { info.is_empty = false; - unsigned this_par = e.acc.max_set() - 1; + par_t this_par = to_par(e); if (this_par <= max_par) { info.all_parities.insert(this_par); @@ -301,107 +354,78 @@ namespace spot return info; } - // Checks if an scc can be trivially solved, - // that is, all vertices of the scc belong to the - // attractor of a transition leaving the scc + // Computes the trivially solvable part of the scc + // That is the states that can be attracted to an + // outgoing transition inline subgame_info_t fix_scc() { - auto scc_acc = info_->acc_sets_of(c_scc_idx_); - // We will override all parities of edges leaving the scc - // Currently game is colored max odd - // So there is at least one color - bool added[] = {false, false}; - unsigned par_pair[2]; - unsigned scc_new_par = std::max(scc_acc.max_set(), 1u); - bool player_color_larger; - if (scc_new_par&1) - { - player_color_larger = false; - par_pair[1] = scc_new_par; - par_pair[0] = scc_new_par+1; - } - else - { - player_color_larger = true; - par_pair[1] = scc_new_par+1; - par_pair[0] = scc_new_par; - } - acc_cond::mark_t even_mark({par_pair[0]}); - acc_cond::mark_t odd_mark({par_pair[1]}); + // Note that the winner of the transitions + // leaving the scc are already determined + // attr(...) will only modify the + // states within the current scc + // but we have to "trick" it into + // not disregarding the transitions leaving the scc + // dummy needed to pass asserts + max_abs_par_ = limit_par_even+2; + // The attractors should define their own subgame + // but as we want to compute the attractors of the + // leaving transitions, we need to make + // sure that + // a) no transition is excluded due to its parity + // b) no transition is considered accepting/winning + // due to its parity + // Final note: Attractors cannot intersect by definition + // therefore the order in which they are computed + // is irrelevant + unsigned dummy_rd = 0; + // Attractor of outgoing transitions winning for env + attr(dummy_rd, false, limit_par_even, true, limit_par_even, false); + // Attractor of outgoing transitions winning for player + attr(dummy_rd, true, limit_par_even+1, true, limit_par_even+1, false); - // Only necessary to pass tests - max_abs_par_ = std::max(par_pair[0], par_pair[1]); + // No strategy fix need + // assert if all winning states of the current scc have a valid strategy - for (unsigned v : c_states()) - { - assert(subgame_[v] == unseen_mark); - bool owner = (*owner_ptr_)[v]; - for (auto &e : arena_->out(v)) - { - // The outgoing edges are taken finitely often - // -> disregard parity - if (info_->scc_of(e.dst) != c_scc_idx_) - { - // Edge leaving the scc - change_stash_.emplace_back(arena_->edge_number(e), - e.dst, e.acc); - if (w_.winner(e.dst)) - { - // Winning region off player -> - // odd mark if player - // else 1 (smallest loosing for env) - e.acc = owner ? odd_mark - : acc_cond::mark_t({1}); - added[1] = true; - } - else - { - // Winning region of env -> - // even mark for env, - // else 0 (smallest loosing for player) - e.acc = !owner ? even_mark - : acc_cond::mark_t({0}); - added[0] = true; - } - // Replace with self-loop - e.dst = e.src; - } - } // e - } // v + assert([&]() + { + for (unsigned v : c_states()) + { + if (!w_.has_winner_[v]) + continue; + // We only need a strategy if the winner + // of the state is also the owner + if (w_.winner(v) != (*owner_ptr_)[v]) + continue; + if (s_[v] <= 0) + { + std::cerr << "state " << v << " has a winner " + << w_.winner(v) << " and owner " + << (*owner_ptr_)[v] + << " but no strategy " + << s_[v] << '\n'; + return false; + } + const auto& e = arena_->edge_storage(s_[v]); + if (!w_.has_winner_[e.dst] + || (w_.winner(e.src) != w_.winner(e.dst))) + { + std::cerr << "state " << v << " has a winner " + << w_.winner(v) + << " but no valid strategy\n"; + return false; + } + } + return true; + }()); - // Compute the attractors of the self-loops/transitions leaving scc - // These can be directly added to the winning states - // To avoid disregarding edges in attr computation we - // need to start with the larger color - // Todo come up with a test for this - unsigned dummy_rd; - - for (bool p : {player_color_larger, - !player_color_larger}) - { - if (added[p]) - { - // Always take the larger, - // Otherwise states with an transition to a winning AND - // a loosing scc are treated incorrectly - attr(dummy_rd, p, par_pair[p], true, par_pair[p]); - } - } - - if (added[0] || added[1]) - // Fix "negative" strategy - for (unsigned v : c_states()) - if (subgame_[v] != unseen_mark) - s_[v] = std::abs(s_[v]); - - return inspect_scc(unseen_mark); + auto ins = inspect_scc(limit_par_even); + return ins; } // fix_scc inline bool - attr(unsigned &rd, bool p, unsigned max_par, - bool acc_par, unsigned min_win_par, - bool no_check=false) + attr(unsigned &rd, bool p, par_t max_par, + bool acc_par, par_t min_win_par, bool respect_sg=true) { // In fix_scc, the attr computation is // abused so we can not check ertain things @@ -418,8 +442,8 @@ namespace spot // As proposed in Oink! / PGSolver // Needs the transposed graph however - assert((no_check || !acc_par) || (acc_par && (max_par&1) == p)); - assert(!acc_par || (0 < min_win_par)); + assert((!acc_par) || (acc_par && to_player(max_par) == p)); + assert(!acc_par || (min_par_graph_ <= min_win_par)); assert((min_win_par <= max_par) && (max_par <= max_abs_par_)); bool grown = false; @@ -435,19 +459,16 @@ namespace spot do { - if (!to_add.empty()) + grown |= !to_add.empty(); + for (unsigned v : to_add) { - grown = true; - for (unsigned v : to_add) + // v is winning + w_.set(v, p); + // Mark if demanded + if (acc_par) { - // v is winning - w_.set(v, p); - // Mark if demanded - if (acc_par) - { - assert(subgame_[v] == unseen_mark); - subgame_[v] = rd; - } + assert(subgame_[v] == unseen_mark); + subgame_[v] = rd; } } to_add.clear(); @@ -455,7 +476,7 @@ namespace spot for (unsigned v : c_states()) { if ((subgame_[v] < rd) || (w_(v, p))) - // Not in subgame or winning + // Not in subgame or winning for p continue; bool is_owned = (*owner_ptr_)[v] == p; @@ -465,11 +486,12 @@ namespace spot // Optim: If given the choice, // we seek to go to the "oldest" subgame // That is the subgame with the lowest rd value - unsigned min_subgame_idx = -1u; + unsigned min_subgame_idx = unseen_mark; for (const auto &e: arena_->out(v)) { - unsigned this_par = e.acc.max_set() - 1; - if ((subgame_[e.dst] >= rd) && (this_par <= max_par)) + par_t this_par = to_par(e); + if ((!respect_sg || (subgame_[e.dst] >= rd)) + && (this_par <= max_par)) { // Check if winning if (w_(e.dst, p) @@ -477,7 +499,7 @@ namespace spot { assert(!acc_par || (this_par < min_win_par) || (acc_par && (min_win_par <= this_par) && - ((this_par&1) == p))); + (to_player(this_par) == p))); if (is_owned) { wins = true; @@ -528,7 +550,7 @@ namespace spot // We need to check if transitions that are accepted due // to their parity remain in the winning region of p inline bool - fix_strat_acc(unsigned rd, bool p, unsigned min_win_par, unsigned max_par) + fix_strat_acc(unsigned rd, bool p, par_t min_win_par, par_t max_par) { for (unsigned v : c_states()) { @@ -544,28 +566,28 @@ namespace spot const auto &e_s = arena_->edge_storage(s_[v]); // Optimization only for player if (!p && w_(e_s.dst, p)) - // If current strat is admissible -> nothing to do - // for env + // If current strat is admissible -> + // nothing to do for env continue; // This is an accepting edge that is no longer admissible // or we seek a more desirable edge (for player) - assert(min_win_par <= e_s.acc.max_set() - 1); - assert(e_s.acc.max_set() - 1 <= max_par); + assert(min_win_par <= to_par(e_s)); + assert(to_par(e_s) <= max_par); // Strategy heuristic : go to the oldest subgame - unsigned min_subgame_idx = -1u; + unsigned min_subgame_idx = unseen_mark; - s_[v] = -1; + s_[v] = no_strat_mark; for (const auto &e_fix : arena_->out(v)) { if (subgame_[e_fix.dst] >= rd) { - unsigned this_par = e_fix.acc.max_set() - 1; + par_t this_par = to_par(e_fix); // This edge must have less than max_par, // otherwise it would have already been attracted assert((this_par <= max_par) - || ((this_par&1) != (max_par&1))); + || (to_player(this_par) != (max_par&1))); // if it is accepting and leads to the winning region // -> valid fix if ((min_win_par <= this_par) @@ -579,7 +601,7 @@ namespace spot } } } - if (s_[v] == -1) + if (s_[v] == no_strat_mark) // NO fix found // This state is NOT won by p due to any accepting edges return true; // true -> grown @@ -600,7 +622,7 @@ namespace spot case (0): { assert(this_work.rd == 0); - assert(this_work.min_par == 0); + assert(this_work.min_par == min_par_graph_); unsigned rd; assert(this_work.max_par <= max_abs_par_); @@ -623,18 +645,20 @@ namespace spot // -> Priority compression // Optional, improves performance // Highest actually occurring - unsigned max_par = *subgame_info.all_parities.begin(); - unsigned min_win_par = max_par; - while ((min_win_par > 2) && - (!subgame_info.all_parities.count(min_win_par-1))) + // Attention in partially colored graphs + // the parity -1 and 0 appear + par_t max_par = *subgame_info.all_parities.begin(); + par_t min_win_par = max_par; + while ((min_win_par >= (min_par_graph_+2)) && + (!subgame_info.all_parities.count(min_win_par - 1))) min_win_par -= 2; - assert(max_par > 0); + assert(min_win_par >= min_par_graph_); + assert(max_par >= min_win_par); + assert((max_par&1) == (min_win_par&1)); assert(!subgame_info.all_parities.empty()); - assert(min_win_par > 0); // Get the player - bool p = min_win_par&1; - assert((max_par&1) == (min_win_par&1)); + bool p = to_player(min_win_par); // Attraction to highest par // This increases rd_ and passes it to rd attr(rd, p, max_par, true, min_win_par); @@ -643,17 +667,17 @@ namespace spot // Continuation w_stack_.emplace_back(1, rd, min_win_par, max_par); // Recursion - w_stack_.emplace_back(0, 0, 0, min_win_par-1); + w_stack_.emplace_back(0, 0, min_par_graph_, min_win_par - 1); // Others attracted will have higher counts in subgame break; } case (1): { unsigned rd = this_work.rd; - unsigned min_win_par = this_work.min_par; - unsigned max_par = this_work.max_par; - assert((min_win_par&1) == (max_par&1)); - bool p = min_win_par&1; + par_t min_win_par = this_work.min_par; + par_t max_par = this_work.max_par; + assert(to_player(min_win_par) == to_player(max_par)); + bool p = to_player(min_win_par); // Check if the attractor of w_[!p] is equal to w_[!p] // if so, player wins if there remain accepting transitions // for max_par (see fix_strat_acc) @@ -683,9 +707,9 @@ namespace spot // Mark as unseen subgame_[v] = unseen_mark; // Unset strat for testing - s_[v] = -1; + s_[v] = no_strat_mark; } - w_stack_.emplace_back(0, 0, 0, max_par); + w_stack_.emplace_back(0, 0, min_par_graph_, max_par); // No need to do anything else // the attractor of !p of this level is not changed } @@ -697,20 +721,6 @@ namespace spot } // while } // zielonka - // Undo change to the graph made along the way - inline void restore() - { - // "Unfix" the edges leaving the sccs - // This is called once the game has been solved - for (auto &e_stash : change_stash_) - { - auto &e = arena_->edge_storage(e_stash.e_num); - e.dst = e_stash.e_dst; - e.acc = e_stash.e_acc; - } - // Done - } - // Empty all internal variables inline void clean_up() { @@ -721,12 +731,11 @@ namespace spot s_.clear(); rd_ = 0; max_abs_par_ = 0; - change_stash_.clear(); } // Dedicated solver for special cases inline void one_par_subgame_solver(const subgame_info_t &info, - unsigned max_par) + par_t max_par) { assert(info.all_parities.size() == 1); // The entire subgame is won by the player of the only parity @@ -735,8 +744,8 @@ namespace spot // This subgame gets its own counter ++rd_; unsigned rd = rd_; - unsigned one_par = *info.all_parities.begin(); - bool winner = one_par & 1; + par_t one_par = *info.all_parities.begin(); + bool winner = to_player(one_par); assert(one_par <= max_par); for (unsigned v : c_states()) @@ -747,10 +756,10 @@ namespace spot subgame_[v] = rd; w_.set(v, winner); // Get the strategy - assert(s_[v] == -1); + assert(s_[v] == no_strat_mark); for (const auto &e : arena_->out(v)) { - unsigned this_par = e.acc.max_set() - 1; + par_t this_par = to_par(e); if ((subgame_[e.dst] >= rd) && (this_par <= max_par)) { assert(this_par == one_par); @@ -764,7 +773,18 @@ namespace spot // Done } - const unsigned unseen_mark = std::numeric_limits::max(); + template + inline par_t + to_par(const EDGE& e) + { + return all_edge_par_[arena_->edge_number(e)]; + } + + inline bool + to_player(par_t par) + { + return par & 1; + } twa_graph_ptr arena_; const std::vector *owner_ptr_; @@ -775,18 +795,22 @@ namespace spot // strategies for env and player; For synthesis only player is needed // We need a signed value here in order to "fix" the strategy // during construction - std::vector s_; + std::vector s_; // Informations about sccs andthe current scc std::unique_ptr info_; - unsigned max_abs_par_; // Max parity occurring in the current scc + par_t max_abs_par_; // Max parity occurring in the current scc + // Minimal and maximal parity occurring in the entire graph + par_t min_par_graph_, max_par_graph_; // Info on the current scc unsigned c_scc_idx_; - // Fixes made to the sccs that have to be undone - // before returning - std::vector change_stash_; // Change recursive calls to stack std::vector w_stack_; + // Directly store a vector of parities + // This vector will be created such + // that it takes care of the actual parity condition + // and after that zielonka can be called as if max odd + std::vector all_edge_par_; }; } // anonymous @@ -813,6 +837,24 @@ namespace spot void pg_print(std::ostream& os, const const_twa_graph_ptr& arena) { auto owner = ensure_parity_game(arena, "pg_print"); + // Ensure coloring + assert([&]() + { + bool max; + bool odd; + arena->acc().is_parity(max, odd, true); + return max && odd; + }() && "pg_printer needs max-odd parity"); + assert([&]() + { + for (unsigned ie = 0; ie < arena->num_edges(); ++ie) + { + const auto& es = arena->edge_storage(ie+1); + if (!es.acc) + return false; + } + return true; + }() && "Arena must be colorized"); unsigned ns = arena->num_states(); unsigned init = arena->get_init_state_number(); diff --git a/tests/python/except.py b/tests/python/except.py index 8674721c9..508ffd7f9 100644 --- a/tests/python/except.py +++ b/tests/python/except.py @@ -243,16 +243,6 @@ except RuntimeError as e: else: report_missing_exception() -try: - spot.solve_parity_game(a1) -except RuntimeError as e: - tc.assertIn( - "solve_parity_game(): arena must have max-odd acceptance condition", - str(e)) -else: - report_missing_exception() - - try: spot.formula_Star(spot.formula("a"), 10, 333) except OverflowError as e: diff --git a/tests/python/game.py b/tests/python/game.py index cea09f295..647c8d347 100644 --- a/tests/python/game.py +++ b/tests/python/game.py @@ -18,7 +18,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import spot +import spot, buddy from unittest import TestCase tc = TestCase() @@ -274,3 +274,100 @@ games = spot.split_edges(game) spot.set_state_players(games, spot.get_state_players(game)) tc.assertTrue(spot.solve_game(games, si)) +g = spot.translate("GF(a&X(a)) -> GFb") +a = buddy.bdd_ithvar(g.register_ap("a")) +b = buddy.bdd_ithvar(g.register_ap("b")) +gdpa = spot.tgba_determinize(spot.degeneralize_tba(g), + False, True, True, False) +spot.change_parity_here(gdpa, spot.parity_kind_max, spot.parity_style_odd) +gsdpa = spot.split_2step(gdpa, b, True) +spot.colorize_parity_here(gsdpa, True) +tc.assertTrue(spot.solve_parity_game(gsdpa)) +tc.assertEqual(spot.highlight_strategy(gsdpa).to_str("HOA", "1.1"), +"""HOA: v1.1 +States: 18 +Start: 0 +AP: 2 "a" "b" +acc-name: parity max odd 5 +Acceptance: 5 Fin(4) & (Inf(3) | (Fin(2) & (Inf(1) | Fin(0)))) +properties: trans-labels explicit-labels trans-acc colored complete +properties: deterministic +spot.highlight.states: 0 4 1 4 2 4 3 4 4 4 5 4 6 4 7 4 8 4 9 4 """ ++"""10 4 11 4 12 4 13 4 14 4 15 4 16 4 17 4 +spot.highlight.edges: 15 4 17 4 20 4 22 4 24 4 26 4 28 4 30 4 31 4 32 4 33 4 +spot.state-player: 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 +controllable-AP: 1 +--BODY-- +State: 0 +[!0] 7 {0} +[0] 8 {0} +State: 1 +[!0] 9 {3} +[0] 10 {3} +State: 2 +[!0] 11 {1} +[0] 12 {1} +State: 3 +[!0] 9 {3} +[0] 13 {4} +State: 4 +[!0] 11 {1} +[0] 14 {2} +State: 5 +[!0] 15 {3} +[0] 16 {3} +State: 6 +[!0] 15 {3} +[0] 17 {4} +State: 7 +[!1] 1 {0} +[1] 2 {0} +State: 8 +[!1] 3 {0} +[1] 4 {0} +State: 9 +[!1] 1 {3} +[1] 5 {3} +State: 10 +[!1] 3 {3} +[1] 6 {3} +State: 11 +[!1] 2 {1} +[1] 2 {3} +State: 12 +[!1] 4 {1} +[1] 4 {3} +State: 13 +[!1] 3 {4} +[1] 4 {4} +State: 14 +[!1] 4 {2} +[1] 4 {3} +State: 15 +[t] 5 {3} +State: 16 +[t] 6 {3} +State: 17 +[t] 4 {4} +--END--""" +) + +# Test the different parity conditions +gdpa = spot.tgba_determinize(spot.degeneralize_tba(g), + False, True, True, False) + +g_test = spot.change_parity(gdpa, spot.parity_kind_max, spot.parity_style_odd) +g_test_split = spot.split_2step(g_test, b, True) +sp = spot.get_state_players(g_test_split) +g_test_split_c = spot.colorize_parity(g_test_split) +spot.set_state_players(g_test_split_c, sp) +tc.assertTrue(spot.solve_parity_game(g_test_split_c)) +c_strat = spot.get_strategy(g_test_split_c) +# All versions of parity need to result in the same strategy +for kind in [spot.parity_kind_min, spot.parity_kind_max]: + for style in [spot.parity_style_even, spot.parity_style_odd]: + g_test_split1 = spot.change_parity(g_test_split, kind, style) + spot.set_state_players(g_test_split1, sp) + tc.assertTrue(spot.solve_parity_game(g_test_split1)) + c_strat1 = spot.get_strategy(g_test_split1) + tc.assertTrue(c_strat == c_strat1) From 6bc1dd0467025240e09b2d678c3d669feee899f6 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Fri, 24 Jun 2022 14:12:50 +0200 Subject: [PATCH 097/606] Use new zielonka for synthesis Remove all now unnecessary colorize_parity and change_parity calls. * spot/twaalgos/synthesis.cc: Change here * spot/twaalgos/game.cc: Adjust pg-print * tests/core/ltlsynt.test, tests/python/_mealy.ipynb, tests/python/games.ipynb, tests/python/synthesis.ipynb, tests/python/synthesis.py: Adjust tests --- spot/twaalgos/game.cc | 97 +-- spot/twaalgos/synthesis.cc | 9 +- tests/core/ltlsynt.test | 28 +- tests/python/_mealy.ipynb | 76 +-- tests/python/games.ipynb | 454 ++++++------- tests/python/synthesis.ipynb | 1186 +++++++++++++++------------------- tests/python/synthesis.py | 14 +- 7 files changed, 848 insertions(+), 1016 deletions(-) diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index 3f041ac1c..419b33fe3 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -836,54 +836,63 @@ namespace spot void pg_print(std::ostream& os, const const_twa_graph_ptr& arena) { - auto owner = ensure_parity_game(arena, "pg_print"); - // Ensure coloring - assert([&]() - { - bool max; - bool odd; - arena->acc().is_parity(max, odd, true); - return max && odd; - }() && "pg_printer needs max-odd parity"); - assert([&]() - { - for (unsigned ie = 0; ie < arena->num_edges(); ++ie) - { - const auto& es = arena->edge_storage(ie+1); - if (!es.acc) - return false; - } - return true; - }() && "Arena must be colorized"); + ensure_parity_game(arena, "pg_print"); - unsigned ns = arena->num_states(); - unsigned init = arena->get_init_state_number(); - os << "parity " << ns - 1 << ";\n"; - std::vector seen(ns, false); - std::vector todo({init}); - while (!todo.empty()) + auto do_print = [&os](const const_twa_graph_ptr& arena) { - unsigned src = todo.back(); - todo.pop_back(); - if (seen[src]) - continue; - seen[src] = true; - os << src << ' '; - os << arena->out(src).begin()->acc.max_set() - 1 << ' '; - os << (*owner)[src] << ' '; - bool first = true; - for (auto& e: arena->out(src)) + const region_t& owner = get_state_players(arena); + + unsigned ns = arena->num_states(); + unsigned init = arena->get_init_state_number(); + os << "parity " << ns - 1 << ";\n"; + std::vector seen(ns, false); + std::vector todo({init}); + while (!todo.empty()) { - if (!first) - os << ','; - first = false; - os << e.dst; - if (!seen[e.dst]) - todo.push_back(e.dst); + unsigned src = todo.back(); + todo.pop_back(); + if (seen[src]) + continue; + seen[src] = true; + os << src << ' '; + os << arena->out(src).begin()->acc.max_set() - 1 << ' '; + os << owner[src] << ' '; + bool first = true; + for (auto& e: arena->out(src)) + { + if (!first) + os << ','; + first = false; + os << e.dst; + if (!seen[e.dst]) + todo.push_back(e.dst); + } + if (src == init) + os << " \"INIT\""; + os << ";\n"; } - if (src == init) - os << " \"INIT\""; - os << ";\n"; + }; + // Ensure coloring + // PGSolver format expects max odd and colored + bool is_par, max, odd; + is_par = arena->acc().is_parity(max, odd, true); + assert(is_par && "pg_printer needs parity condition"); + bool is_colored = (max & odd) ? std::all_of(arena->edges().begin(), + arena->edges().end(), + [](const auto& e) + { + return (bool) e.acc; + }) + : false; + if (is_colored) + do_print(arena); + else + { + auto arena2 = change_parity(arena, parity_kind_max, parity_style_odd); + colorize_parity_here(arena2, true); + set_state_players(arena2, + get_state_players(arena)); + do_print(arena2); } } diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 5a5d1297a..41aa736e2 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -837,13 +837,10 @@ namespace spot if (force_sbacc) dpa = sbacc(dpa); reduce_parity_here(dpa, true); - change_parity_here(dpa, parity_kind_max, - parity_style_odd); assert(( [&dpa]() -> bool { bool max, odd; - dpa->acc().is_parity(max, odd); - return max && odd; + return dpa->acc().is_parity(max, odd); }())); assert(is_deterministic(dpa)); return dpa; @@ -936,7 +933,6 @@ namespace spot if (bv) sw.start(); dpa = split_2step(tmp, outs, true); - colorize_parity_here(dpa, true); if (bv) bv->split_time += sw.stop(); if (vs) @@ -959,7 +955,6 @@ namespace spot if (bv) sw.start(); dpa = split_2step(aut, outs, true); - colorize_parity_here(dpa, true); if (bv) bv->split_time += sw.stop(); if (vs) @@ -1031,8 +1026,6 @@ namespace spot if (bv) sw.start(); dpa = split_2step(dpa, outs, true); - change_parity_here(dpa, parity_kind_max, parity_style_odd); - colorize_parity_here(dpa, true); if (bv) bv->split_time += sw.stop(); if (vs) diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 9319d96a8..6cb449012 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -232,7 +232,7 @@ LAR construction done in X seconds DPA has 4 states, 1 colors split inputs and outputs done in X seconds automaton has 12 states -solving game with acceptance: parity max odd 3 +solving game with acceptance: co-Büchi game solved in X seconds EOF ltlsynt -f "G(Fi0 && Fi1 && Fi2) -> G(i1 <-> o0)" --outs="o0" --algo=lar \ @@ -708,7 +708,7 @@ LAR construction done in X seconds DPA has 4 states, 1 colors split inputs and outputs done in X seconds automaton has 9 states -solving game with acceptance: Streett 1 +solving game with acceptance: Büchi game solved in X seconds simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates @@ -727,7 +727,7 @@ LAR construction done in X seconds DPA has 2 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: Streett 1 +solving game with acceptance: all game solved in X seconds simplification took X seconds trying to create strategy directly for (a | x) -> x @@ -738,7 +738,7 @@ LAR construction done in X seconds DPA has 2 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: Streett 1 +solving game with acceptance: all game solved in X seconds simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates @@ -797,7 +797,7 @@ LAR construction done in X seconds DPA has 2 states, 1 colors split inputs and outputs done in X seconds automaton has 5 states -solving game with acceptance: Streett 1 +solving game with acceptance: Büchi game solved in X seconds simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates @@ -819,7 +819,7 @@ LAR construction done in X seconds DPA has 2 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: Streett 1 +solving game with acceptance: all game solved in X seconds simplification took X seconds trying to create strategy directly for a -> c @@ -830,7 +830,7 @@ LAR construction done in X seconds DPA has 2 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: Streett 1 +solving game with acceptance: all game solved in X seconds simplification took X seconds trying to create strategy directly for a -> d @@ -841,7 +841,7 @@ LAR construction done in X seconds DPA has 2 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: Streett 1 +solving game with acceptance: all game solved in X seconds simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates @@ -903,7 +903,7 @@ LAR construction done in X seconds DPA has 1 states, 0 colors split inputs and outputs done in X seconds automaton has 2 states -solving game with acceptance: Streett 1 +solving game with acceptance: all game solved in X seconds translating formula done in X seconds automaton has 2 states and 2 colors @@ -911,7 +911,7 @@ LAR construction done in X seconds DPA has 2 states, 2 colors split inputs and outputs done in X seconds automaton has 5 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: Streett 1 game solved in X seconds EOF @@ -927,7 +927,7 @@ ACD construction done in X seconds DPA has 2 states, 2 colors split inputs and outputs done in X seconds automaton has 6 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: generalized-Streett 1 1 game solved in X seconds simplification took X seconds translating formula done in X seconds @@ -936,7 +936,7 @@ ACD construction done in X seconds DPA has 1 states, 0 colors split inputs and outputs done in X seconds automaton has 2 states -solving game with acceptance: Streett 1 +solving game with acceptance: all game solved in X seconds simplification took X seconds EOF @@ -959,7 +959,7 @@ LAR construction done in X seconds DPA has 1 states, 1 colors split inputs and outputs done in X seconds automaton has 3 states -solving game with acceptance: Streett 1 +solving game with acceptance: Büchi game solved in X seconds EOF ltlsynt -f "G(o1) & (GFi <-> GFo1)" --outs="o1" --verbose\ @@ -977,7 +977,7 @@ LAR construction done in X seconds DPA has 2 states, 2 colors split inputs and outputs done in X seconds automaton has 6 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: Streett 1 game solved in X seconds simplification took X seconds EOF diff --git a/tests/python/_mealy.ipynb b/tests/python/_mealy.ipynb index 4e7374852..c2aeb125c 100644 --- a/tests/python/_mealy.ipynb +++ b/tests/python/_mealy.ipynb @@ -65,78 +65,70 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett 1]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!a & !c\n", - "\n", + "\n", + "\n", + "!a & !c\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a | c\n", - "\n", + "\n", + "\n", + "a | c\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!b & !d\n", - "\n", + "\n", + "\n", + "!b & !d\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "b | d\n", - "\n", + "\n", + "\n", + "b | d\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f32ec50ce40> >" + " *' at 0x7fc1244a3d50> >" ] }, "execution_count": 4, @@ -216,7 +208,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f32ec571c30> >" + " *' at 0x7fc124439570> >" ] }, "execution_count": 6, @@ -290,7 +282,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f32ec571c30> >" + " *' at 0x7fc124439570> >" ] }, "execution_count": 8, @@ -301,6 +293,14 @@ "source": [ "x" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "923a59d6", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -319,7 +319,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.2" + "version": "3.8.10" } }, "nbformat": 4, diff --git a/tests/python/games.ipynb b/tests/python/games.ipynb index ac3490b2b..891ebcd94 100644 --- a/tests/python/games.ipynb +++ b/tests/python/games.ipynb @@ -689,246 +689,230 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - "))\n", - "[parity max odd 3]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "I->4\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "4->10\n", - "\n", - "\n", - "!a\n", - "\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "4->11\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "0->5\n", - "\n", - "\n", - "!a\n", - "\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "0->6\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n", "5->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "6->1\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "1->6\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "1->7\n", - "\n", - "\n", - "!a\n", - "\n", + "\n", + "\n", + "!a\n", + "\n", "\n", "\n", "\n", "7->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "8->2\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", "\n", "3->9\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "9->2\n", - "\n", - "\n", - "!b\n", - "\n", + "\n", + "\n", + "!b\n", "\n", "\n", "\n", "9->3\n", - "\n", - "\n", - "b\n", - "\n", + "\n", + "\n", + "b\n", + "\n", "\n", "\n", "\n", "10->0\n", - "\n", - "\n", - "!b\n", - "\n", + "\n", + "\n", + "!b\n", "\n", "\n", "\n", "10->3\n", - "\n", - "\n", - "b\n", - "\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "11->1\n", - "\n", - "\n", - "!b\n", - "\n", + "\n", + "\n", + "!b\n", "\n", "\n", "\n", "11->3\n", - "\n", - "\n", - "b\n", - "\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f202420db10> >" + " *' at 0x7f657c403180> >" ] }, "execution_count": 8, @@ -963,43 +947,43 @@ "States: 12\n", "Start: 4\n", "AP: 2 \"b\" \"a\"\n", - "acc-name: parity max odd 3\n", - "Acceptance: 3 Fin(2) & (Inf(1) | Fin(0))\n", - "properties: trans-labels explicit-labels trans-acc colored complete\n", + "acc-name: co-Buchi\n", + "Acceptance: 1 Fin(0)\n", + "properties: trans-labels explicit-labels trans-acc complete\n", "properties: deterministic\n", "spot-state-player: 0 0 0 0 0 1 1 1 1 1 1 1\n", "controllable-AP: 0\n", "--BODY--\n", "State: 0\n", - "[!1] 5 {1}\n", - "[1] 6 {2}\n", + "[!1] 5\n", + "[1] 6 {0}\n", "State: 1\n", - "[1] 6 {2}\n", - "[!1] 7 {2}\n", + "[1] 6 {0}\n", + "[!1] 7 {0}\n", "State: 2\n", - "[t] 8 {1}\n", + "[t] 8\n", "State: 3\n", - "[t] 9 {1}\n", + "[t] 9\n", "State: 4\n", - "[!1] 10 {1}\n", - "[1] 11 {1}\n", + "[!1] 10\n", + "[1] 11\n", "State: 5\n", - "[t] 0 {1}\n", + "[t] 0\n", "State: 6\n", - "[t] 1 {2}\n", + "[t] 1 {0}\n", "State: 7\n", - "[t] 0 {2}\n", + "[t] 0 {0}\n", "State: 8\n", - "[t] 2 {1}\n", + "[t] 2\n", "State: 9\n", - "[!0] 2 {1}\n", - "[0] 3 {2}\n", + "[!0] 2\n", + "[0] 3 {0}\n", "State: 10\n", - "[!0] 0 {1}\n", - "[0] 3 {1}\n", + "[!0] 0\n", + "[0] 3\n", "State: 11\n", - "[!0] 1 {1}\n", - "[0] 3 {1}\n", + "[!0] 1\n", + "[0] 3\n", "--END--\n" ] } @@ -1049,246 +1033,230 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - "))\n", - "[parity max odd 3]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "I->4\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "4->10\n", - "\n", - "\n", - "!a\n", - "\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "4->11\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "0->5\n", - "\n", - "\n", - "!a\n", - "\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "0->6\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n", "5->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "6->1\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "1->6\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "1->7\n", - "\n", - "\n", - "!a\n", - "\n", + "\n", + "\n", + "!a\n", + "\n", "\n", "\n", "\n", "7->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "8->2\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", "\n", "3->9\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "9->2\n", - "\n", - "\n", - "!b\n", - "\n", + "\n", + "\n", + "!b\n", "\n", "\n", "\n", "9->3\n", - "\n", - "\n", - "b\n", - "\n", + "\n", + "\n", + "b\n", + "\n", "\n", "\n", "\n", "10->0\n", - "\n", - "\n", - "!b\n", - "\n", + "\n", + "\n", + "!b\n", "\n", "\n", "\n", "10->3\n", - "\n", - "\n", - "b\n", - "\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "11->1\n", - "\n", - "\n", - "!b\n", - "\n", + "\n", + "\n", + "!b\n", "\n", "\n", "\n", "11->3\n", - "\n", - "\n", - "b\n", - "\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f202420df90> >" + " *' at 0x7f658c612f30> >" ] }, "execution_count": 11, diff --git a/tests/python/synthesis.ipynb b/tests/python/synthesis.ipynb index 08af437e2..3e8b4f5ea 100644 --- a/tests/python/synthesis.ipynb +++ b/tests/python/synthesis.ipynb @@ -59,644 +59,587 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - "))\n", - "[parity max odd 3]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", "\n", "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", "\n", "I->9\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "25\n", - "\n", - "25\n", + "\n", + "25\n", "\n", "\n", "\n", "9->25\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "26\n", - "\n", - "26\n", + "\n", + "26\n", "\n", "\n", "\n", "9->26\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "27\n", - "\n", - "27\n", + "\n", + "27\n", "\n", "\n", "\n", "9->27\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "28\n", - "\n", - "28\n", + "\n", + "28\n", "\n", "\n", "\n", "9->28\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "0->10\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", "\n", "\n", "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "0->11\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "10->1\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "11->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "12\n", - "\n", - "12\n", + "\n", + "12\n", "\n", "\n", "\n", "1->12\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", "\n", "\n", "\n", "13\n", - "\n", - "13\n", + "\n", + "13\n", "\n", "\n", "\n", "1->13\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", "\n", "\n", "\n", "12->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "13->0\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "14\n", - "\n", - "14\n", + "\n", + "14\n", "\n", "\n", "\n", "2->14\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", "\n", "\n", "\n", "16\n", - "\n", - "16\n", + "\n", + "16\n", "\n", "\n", "\n", "2->16\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", "\n", "\n", "\n", "15\n", - "\n", - "15\n", + "\n", + "15\n", "\n", "\n", "\n", "14->15\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "16->2\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "3->13\n", - "\n", - "\n", - "i1\n", - "\n", + "\n", + "\n", + "i1\n", "\n", "\n", "\n", "17\n", - "\n", - "17\n", + "\n", + "17\n", "\n", "\n", "\n", "3->17\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i1\n", "\n", "\n", "\n", "17->2\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "17->3\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "4->14\n", - "\n", - "\n", - "i0\n", - "\n", + "\n", + "\n", + "i0\n", "\n", "\n", "\n", "18\n", - "\n", - "18\n", + "\n", + "18\n", "\n", "\n", "\n", "4->18\n", - "\n", - "\n", - "!i0\n", - "\n", + "\n", + "\n", + "!i0\n", "\n", "\n", "\n", "18->4\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "5->14\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "5->16\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "5->18\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "19\n", - "\n", - "19\n", + "\n", + "19\n", "\n", "\n", "\n", "5->19\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "19->5\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "6->11\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "20\n", - "\n", - "20\n", + "\n", + "20\n", "\n", "\n", "\n", "6->20\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "21\n", - "\n", - "21\n", + "\n", + "21\n", "\n", "\n", "\n", "6->21\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "20->4\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "20->7\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "21->4\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "21->6\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "7->12\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "7->13\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "22\n", - "\n", - "22\n", + "\n", + "22\n", "\n", "\n", "\n", "7->22\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "23\n", - "\n", - "23\n", + "\n", + "23\n", "\n", "\n", "\n", "7->23\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "22->4\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "22->7\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "23->4\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "23->6\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->13\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "8->17\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "8->23\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "24\n", - "\n", - "24\n", + "\n", + "24\n", "\n", "\n", "\n", "8->24\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "24->5\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "24->8\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "25->8\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "26->3\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "27->6\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "28->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "15->14\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f01fc12f030> >" + " *' at 0x7fc9680c32d0> >" ] }, "metadata": {}, @@ -743,583 +686,526 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - "))\n", - "[parity max odd 3]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", "\n", "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", "\n", "I->9\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "25\n", - "\n", - "25\n", + "\n", + "25\n", "\n", "\n", "\n", "9->25\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "26\n", - "\n", - "26\n", + "\n", + "26\n", "\n", "\n", "\n", "9->26\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "27\n", - "\n", - "27\n", + "\n", + "27\n", "\n", "\n", "\n", "9->27\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "28\n", - "\n", - "28\n", + "\n", + "28\n", "\n", "\n", "\n", "9->28\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "0->10\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "0->11\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "10->1\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "11->0\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "12\n", - "\n", - "12\n", + "\n", + "12\n", "\n", "\n", "\n", "1->12\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "13\n", - "\n", - "13\n", + "\n", + "13\n", "\n", "\n", "\n", "1->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "12->1\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "13->0\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "14\n", - "\n", - "14\n", + "\n", + "14\n", "\n", "\n", "\n", "2->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "16\n", - "\n", - "16\n", + "\n", + "16\n", "\n", "\n", "\n", "2->16\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "15\n", - "\n", - "15\n", + "\n", + "15\n", "\n", "\n", "\n", "14->15\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "16->2\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "3->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "17\n", - "\n", - "17\n", + "\n", + "17\n", "\n", "\n", "\n", "3->17\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "17->2\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "17->3\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "4->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "18\n", - "\n", - "18\n", + "\n", + "18\n", "\n", "\n", "\n", "4->18\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "18->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "5->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "5->16\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "5->18\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "19\n", - "\n", - "19\n", + "\n", + "19\n", "\n", "\n", "\n", "5->19\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "19->5\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6->11\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "20\n", - "\n", - "20\n", + "\n", + "20\n", "\n", "\n", "\n", "6->20\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "21\n", - "\n", - "21\n", + "\n", + "21\n", "\n", "\n", "\n", "6->21\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "20->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "20->7\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "21->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "21->6\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7->12\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "22\n", - "\n", - "22\n", + "\n", + "22\n", "\n", "\n", "\n", "7->22\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "23\n", - "\n", - "23\n", + "\n", + "23\n", "\n", "\n", "\n", "7->23\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "22->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "22->7\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "23->4\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "23->6\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8->17\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8->23\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "24\n", - "\n", - "24\n", + "\n", + "24\n", "\n", "\n", "\n", "8->24\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "24->5\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "24->8\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "25->8\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "26->3\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "27->6\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "28->0\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "15->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n" @@ -1668,7 +1554,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f01fc1b5f00> >" + " *' at 0x7fc9682230f0> >" ] }, "metadata": {}, @@ -1855,7 +1741,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f01fc12fd80> >" + " *' at 0x7fc968069180> >" ] }, "metadata": {}, @@ -1992,7 +1878,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f01fc1b5bd0> >" + " *' at 0x7fc968069210> >" ] }, "metadata": {}, @@ -2085,7 +1971,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f01fc1b5cc0> >" + " *' at 0x7fc968069180> >" ] }, "metadata": {}, @@ -2178,7 +2064,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f01fc2a8b70> >" + " *' at 0x7fc968069210> >" ] }, "metadata": {}, @@ -2315,7 +2201,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f01fc1b5de0> >" + " *' at 0x7fc968069060> >" ] }, "metadata": {}, @@ -2715,7 +2601,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2879,56 +2765,52 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett 1]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "I->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", - "i0\n", - "\n", + "\n", + "\n", + "i0\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "3->7\n", - "\n", - "\n", - "!i0\n", - "\n", + "\n", + "\n", + "!i0\n", "\n", "\n", "\n", @@ -2948,7 +2830,7 @@ "\n", "\n", "1\n", - "\n", + "\n", "\n", "\n", "\n", @@ -2956,7 +2838,7 @@ "\n", "\n", "1\n", - "\n", + "\n", "\n", "\n", "\n", @@ -2976,7 +2858,7 @@ "\n", "\n", "1\n", - "\n", + "\n", "\n", "\n", "\n", @@ -2984,51 +2866,47 @@ "\n", "\n", "!o0\n", - "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "6->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->2\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "7->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f01fc14bb10> >" + " *' at 0x7fc968143d80> >" ] }, "metadata": {}, @@ -3145,7 +3023,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f01fc12f090> >" + " *' at 0x7fc9680c3330> >" ] }, "metadata": {}, @@ -3378,7 +3256,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3507,7 +3385,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3559,72 +3437,64 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett 1]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", @@ -3634,72 +3504,64 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett 1]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "o1\n", - "\n", + "\n", + "\n", + "o1\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "!o1\n", - "\n", + "\n", + "\n", + "!o1\n", "\n", "\n", "\n", @@ -3939,7 +3801,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4004,7 +3866,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f01fc14bae0> >" + " *' at 0x7fc968143bd0> >" ] }, "metadata": {}, @@ -4117,7 +3979,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4297,7 +4159,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4421,7 +4283,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f01fc1b5f90> >" + " *' at 0x7fc968069ed0> >" ] }, "execution_count": 16, diff --git a/tests/python/synthesis.py b/tests/python/synthesis.py index 559dc2d24..98ac889d8 100644 --- a/tests/python/synthesis.py +++ b/tests/python/synthesis.py @@ -35,18 +35,18 @@ tc.assertEqual(game.to_str(), """HOA: v1 States: 3 Start: 0 AP: 1 "a" -acc-name: Streett 1 -Acceptance: 2 Fin(0) | Inf(1) -properties: trans-labels explicit-labels trans-acc colored complete +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels trans-acc complete properties: deterministic spot-state-player: 0 1 1 controllable-AP: --BODY-- State: 0 -[!0] 1 {0} -[0] 2 {1} +[!0] 1 +[0] 2 {0} State: 1 -[t] 0 {0} +[t] 0 State: 2 -[t] 0 {1} +[t] 0 {0} --END--""") From ddbdcd39cb4f721c75c67004fe46265aec3e17fd Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Wed, 29 Jun 2022 01:21:00 +0200 Subject: [PATCH 098/606] Adept ltlsynt pgame to new solver * bin/ltlsynt.cc: Remove change/colorize_parity, check alternating --- bin/ltlsynt.cc | 42 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index e7d6b73d1..bcd9d41d9 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -736,12 +736,42 @@ namespace } if (!arena->get_named_prop>("state-player")) arena = spot::split_2step(arena, true); - // FIXME: If we do not split the game, we should check that it is - // alternating. - spot::change_parity_here(arena, - spot::parity_kind_max, - spot::parity_style_odd); - spot::colorize_parity_here(arena, true); + else + { + // Check if the game is alternating and fix trivial cases + const unsigned N = arena->num_states(); + // Can not use get_state_players because we need a non-const version + auto spptr = + arena->get_named_prop>("state-player"); + assert(spptr); + const bdd& outs = get_synthesis_outputs(arena); + for (unsigned n = 0; n < N; ++n) + { + const bool p = (*spptr)[n]; + for (auto& e : arena->out(n)) + { + if (p != (*spptr)[e.dst]) + continue; // All good + // Check if the condition is a simply conjunction of input and + // output. If so insert an intermediate state + // This also covers trivial self-loops + bdd cond = e.cond; + bdd i_cond = bdd_exist(cond, outs); + bdd o_cond = bdd_existcomp(cond, outs); + if ((i_cond & o_cond) == cond) + { + unsigned inter = arena->new_state(); + spptr->push_back(!p); + e.cond = p ? o_cond : i_cond; + e.dst = inter; + arena->new_edge(inter, e.dst, !p ? o_cond : i_cond); + } + else + throw std::runtime_error("ltlsynt: given parity game is not" + "alternating and not trivially fixable!"); + } + } + } if (gi->bv) { gi->bv->split_time += sw_local.stop(); From 99bf152673592223114d308d426d86c248d274f3 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Wed, 29 Jun 2022 14:02:25 +0200 Subject: [PATCH 099/606] propagate_marks_here can break state-acc prop * spot/twaalgos/degen.cc: Fix * tests/python/pdegen.py: Test --- spot/twaalgos/degen.cc | 4 ++++ tests/python/pdegen.py | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/spot/twaalgos/degen.cc b/spot/twaalgos/degen.cc index 11092ddac..0c07ccfa8 100644 --- a/spot/twaalgos/degen.cc +++ b/spot/twaalgos/degen.cc @@ -1168,5 +1168,9 @@ namespace spot unsigned idx = aut->edge_number(e); e.acc = marks[idx]; } + // If aut was state-based acc before, this might no longer + // this might no longer be the case + if (aut->prop_state_acc() == 1) + aut->prop_state_acc(0); } } diff --git a/tests/python/pdegen.py b/tests/python/pdegen.py index 12bc9e39a..7df9f0878 100644 --- a/tests/python/pdegen.py +++ b/tests/python/pdegen.py @@ -442,3 +442,44 @@ si = spot.scc_info(aut15) aut15b = si.split_on_sets(2, [])[0]; d aut15c = spot.partial_degeneralize(aut15b) tc.assertTrue(aut15c.equivalent_to(aut15b)) + + +# Testing property propagation/update +# for propagate_marks_here + +s = """HOA: v1 +States: 3 +Start: 0 +AP: 1 "a" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels state-acc +--BODY-- +State: 0 +[0] 1 +[!0] 2 +State: 1 {0} +[0] 0 +State: 2 +[!0] 0 +--END--""" +aut = spot.automaton(s) +spot.propagate_marks_here(aut) +s2 = aut.to_str("hoa") + +tc.assertEqual(s2, """HOA: v1 +States: 3 +Start: 0 +AP: 1 "a" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels trans-acc deterministic +--BODY-- +State: 0 +[0] 1 {0} +[!0] 2 +State: 1 +[0] 0 {0} +State: 2 +[!0] 0 +--END--""") \ No newline at end of file From db725ffaf81fd90f44b1455d517a7d65a438dc84 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 30 Jun 2022 09:18:03 +0200 Subject: [PATCH 100/606] * spot/twaalgos/degen.cc (propagate_marks_here): Cleanup previous patch. --- spot/twaalgos/degen.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/spot/twaalgos/degen.cc b/spot/twaalgos/degen.cc index 0c07ccfa8..333efe6e6 100644 --- a/spot/twaalgos/degen.cc +++ b/spot/twaalgos/degen.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2020 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) 2012-2020, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -1170,7 +1170,7 @@ namespace spot } // If aut was state-based acc before, this might no longer // this might no longer be the case - if (aut->prop_state_acc() == 1) - aut->prop_state_acc(0); + if (aut->prop_state_acc().is_true()) + aut->prop_state_acc(false); } } From 1fc94ee6f2c0097086557416a8cad75d638285cc Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 30 Jun 2022 09:36:59 +0200 Subject: [PATCH 101/606] gnulib: install the environ module This should fix compilation on OSX, as reported by Yann Thierry-Mieg. * m4/environ.m4: New file. * m4/gnulib-cache.m4, m4/gnulib-comp.m4: Update. * bin/common_trans.cc [HAVE_SPAWN_H]: Do not define environ. --- bin/common_trans.cc | 2 -- m4/environ.m4 | 46 +++++++++++++++++++++++++++++++++++++++++++++ m4/gnulib-cache.m4 | 3 ++- m4/gnulib-comp.m4 | 4 +++- 4 files changed, 51 insertions(+), 4 deletions(-) create mode 100644 m4/environ.m4 diff --git a/bin/common_trans.cc b/bin/common_trans.cc index 9ab719a5b..e34f3d77d 100644 --- a/bin/common_trans.cc +++ b/bin/common_trans.cc @@ -840,8 +840,6 @@ exec_command(const char* cmd) SPOT_UNREACHABLE(); return; } -#else -extern char **environ; #endif int diff --git a/m4/environ.m4 b/m4/environ.m4 new file mode 100644 index 000000000..ae5329108 --- /dev/null +++ b/m4/environ.m4 @@ -0,0 +1,46 @@ +# environ.m4 serial 8 +dnl Copyright (C) 2001-2004, 2006-2021 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +AC_DEFUN_ONCE([gl_ENVIRON], +[ + AC_REQUIRE([gl_UNISTD_H_DEFAULTS]) + dnl Persuade glibc to declare environ. + AC_REQUIRE([gl_USE_SYSTEM_EXTENSIONS]) + + AC_CHECK_HEADERS_ONCE([unistd.h]) + gt_CHECK_VAR_DECL( + [#if HAVE_UNISTD_H + #include + #endif + /* mingw, BeOS, Haiku declare environ in , not in . */ + #include + ], + [environ]) + if test $gt_cv_var_environ_declaration != yes; then + HAVE_DECL_ENVIRON=0 + fi +]) + +# Check if a variable is properly declared. +# gt_CHECK_VAR_DECL(includes,variable) +AC_DEFUN([gt_CHECK_VAR_DECL], +[ + define([gt_cv_var], [gt_cv_var_]$2[_declaration]) + AC_CACHE_CHECK([if $2 is properly declared], [gt_cv_var], + [AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM( + [[$1 + typedef struct { int foo; } foo_t; + extern foo_t $2;]], + [[$2.foo = 1;]])], + [gt_cv_var=no], + [gt_cv_var=yes])]) + if test $gt_cv_var = yes; then + AC_DEFINE([HAVE_]m4_translit($2, [a-z], [A-Z])[_DECL], 1, + [Define if you have the declaration of $2.]) + fi + undefine([gt_cv_var]) +]) diff --git a/m4/gnulib-cache.m4 b/m4/gnulib-cache.m4 index ad3802b82..e7f448f36 100644 --- a/m4/gnulib-cache.m4 +++ b/m4/gnulib-cache.m4 @@ -1,4 +1,4 @@ -# Copyright (C) 2002-2020 Free Software Foundation, Inc. +# Copyright (C) 2002-2020, 2022 Free Software Foundation, Inc. # # This file is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -57,6 +57,7 @@ gl_MODULES([ argmatch argp closeout + environ error isatty mkstemp diff --git a/m4/gnulib-comp.m4 b/m4/gnulib-comp.m4 index 54215ad69..66d18ca01 100644 --- a/m4/gnulib-comp.m4 +++ b/m4/gnulib-comp.m4 @@ -1,5 +1,5 @@ # DO NOT EDIT! GENERATED AUTOMATICALLY! -# Copyright (C) 2002-2020 Free Software Foundation, Inc. +# Copyright (C) 2002-2020, 2022 Free Software Foundation, Inc. # # This file is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -246,6 +246,8 @@ AC_SUBST([LTALLOCA]) AC_LIBOBJ([lstat]) gl_PREREQ_LSTAT fi + gl_ENVIRON + gl_UNISTD_MODULE_INDICATOR([environ]) gl_SYS_STAT_MODULE_INDICATOR([lstat]) gl_FUNC_MALLOC_GNU if test $REPLACE_MALLOC = 1; then From 833fcdebc16551e515c3536440a2d81e787b604b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 5 Jul 2022 10:38:51 +0200 Subject: [PATCH 102/606] work around GCC bug 106159 * m4/gccwarn.m4: Add an example of multiple inheritance of virtual classes to trigger to new -Woverloaded-virtual warning on the destructor. --- m4/gccwarn.m4 | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/m4/gccwarn.m4 b/m4/gccwarn.m4 index dc6969add..13f770ccc 100644 --- a/m4/gccwarn.m4 +++ b/m4/gccwarn.m4 @@ -22,6 +22,13 @@ AC_DEFUN([CF_GXX_WARNINGS], #line __oline__ "configure" #include #include + +// From GCC bug 106159 +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106159 +struct left { virtual ~left() {} }; +struct right { virtual ~right() {} }; +struct both: public left, public right {}; + int main(int argc, char *argv[[]]) { // This string comparison is here to detect superfluous From efee1c4130f8d9b93a2540d69053b30614c7092d Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 5 Jul 2022 10:41:24 +0200 Subject: [PATCH 103/606] * spot/twaalgos/mealy_machine.cc (is_complete_): Define in debug only. --- spot/twaalgos/mealy_machine.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index f985da506..6bbb9c4f7 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021 Laboratoire de Recherche et Développement +// Copyright (C) 2021, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -67,6 +67,7 @@ namespace return true; } +#ifndef NDEBUG bool is_complete_(const const_twa_graph_ptr& m, const bdd& outs) { @@ -84,6 +85,7 @@ namespace } return true; } +#endif } From ff896013068e31aaa55761717b05d5525df118d1 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 5 Jul 2022 10:56:57 +0200 Subject: [PATCH 104/606] utf8: Update to match current version * utf8/LICENSE, utf8/utf8/cpp11.h, utf8/utf8/cpp17.h: New files. * Makefile.am: Distribute them. * utf8/README.md, utf8/utf8/checked.h, utf8/utf8/core.h, utf8/utf8/unchecked.h: Update to the current version of utfcpp. * README: Add a link to the upstream github. --- Makefile.am | 7 +- README | 4 +- utf8/LICENSE | 23 ++ utf8/README.md | 681 +++++++++++++++++++++++++++++++++--------- utf8/utf8/checked.h | 50 ++-- utf8/utf8/core.h | 42 +-- utf8/utf8/cpp11.h | 103 +++++++ utf8/utf8/cpp17.h | 103 +++++++ utf8/utf8/unchecked.h | 78 ++++- 9 files changed, 897 insertions(+), 194 deletions(-) create mode 100644 utf8/LICENSE create mode 100644 utf8/utf8/cpp11.h create mode 100644 utf8/utf8/cpp17.h diff --git a/Makefile.am b/Makefile.am index a0dc9a316..db7a60d9b 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2011-2017, 2020 Laboratoire de Recherche et Développement +## Copyright (C) 2011-2017, 2020, 2022 Laboratoire de Recherche et Développement ## de l'Epita (LRDE). ## Copyright (C) 2003, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), ## département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -36,8 +36,9 @@ DOC_SUBDIR = doc SUBDIRS = picosat buddy lib ltdl spot bin tests $(PYTHON_SUBDIR) $(DOC_SUBDIR) \ $(NEVER_SUBDIRS) -UTF8 = utf8/README.md utf8/utf8.h \ - utf8/utf8/checked.h utf8/utf8/core.h utf8/utf8/unchecked.h +UTF8 = utf8/README.md utf8/LICENSE utf8/utf8.h \ + utf8/utf8/checked.h utf8/utf8/core.h utf8/utf8/unchecked.h \ + utf8/utf8/cpp11.h utf8/utf8/cpp17.h DEBIAN = \ debian/changelog \ diff --git a/README b/README index 7581df4ad..a0b7c9579 100644 --- a/README +++ b/README @@ -325,13 +325,13 @@ bench/ Benchmarks for ... wdba/ ... WDBA minimization (for obligation properties). python/ Python bindings for Spot and BuDDy -Third party software +Third-party software -------------------- buddy/ A customized version of BuDDy 2.3 (a BDD library). ltdl/ Libtool's portable dlopen() wrapper library. lib/ Gnulib's portability modules. -utf8/ Nemanja Trifunovic's utf-8 routines. +utf8/ Trifunovic's utf-8 routines. https://github.com/nemtrif/utfcpp elisp/ Related emacs modes, used for building the documentation. picosat/ A distribution of PicoSAT 965 (a satsolver library). spot/bricks/ A collection of useful C++ code provided by DiVinE diff --git a/utf8/LICENSE b/utf8/LICENSE new file mode 100644 index 000000000..36b7cd93c --- /dev/null +++ b/utf8/LICENSE @@ -0,0 +1,23 @@ +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/utf8/README.md b/utf8/README.md index d4369e85d..a519cdb96 100644 --- a/utf8/README.md +++ b/utf8/README.md @@ -3,9 +3,9 @@ ## Introduction -Many C++ developers miss an easy and portable way of handling Unicode encoded strings. The original C++ Standard (known as C++98 or C++03) is Unicode agnostic. C++11 provides some support for Unicode on core language and library level: u8, u, and U character and string literals, char16_t and char32_t character types, u16string and u32string library classes, and codecvt support for conversions between Unicode encoding forms. In the meantime, developers use third party libraries like ICU, OS specific capabilities, or simply roll out their own solutions. +C++ developers miss an easy and portable way of handling Unicode encoded strings. The original C++ Standard (known as C++98 or C++03) is Unicode agnostic. C++11 provides some support for Unicode on core language and library level: u8, u, and U character and string literals, char16_t and char32_t character types, u16string and u32string library classes, and codecvt support for conversions between Unicode encoding forms. In the meantime, developers use third party libraries like ICU, OS specific capabilities, or simply roll out their own solutions. -In order to easily handle UTF-8 encoded Unicode strings, I came up with a small, C++98 compatible generic library. For anybody used to work with STL algorithms and iterators, it should be easy and natural to use. The code is freely available for any purpose - check out the license at the beginning of the utf8.h file. The library has been used a lot in the past ten years both in commercial and open-source projects and is considered feature-complete now. If you run into bugs or performance issues, please let me know and I'll do my best to address them. +In order to easily handle UTF-8 encoded Unicode strings, I came up with a small, C++98 compatible generic library. For anybody used to work with STL algorithms and iterators, it should be easy and natural to use. The code is freely available for any purpose - check out the [license](./LICENSE). The library has been used a lot in the past ten years both in commercial and open-source projects and is considered feature-complete now. If you run into bugs or performance issues, please let me know and I'll do my best to address them. The purpose of this article is not to offer an introduction to Unicode in general, and UTF-8 in particular. If you are not familiar with Unicode, be sure to check out [Unicode Home Page](http://www.unicode.org/) or some other source of information for Unicode. Also, it is not my aim to advocate the use of UTF-8 encoded strings in C++ programs; if you want to handle UTF-8 encoded strings from C++, I am sure you have good reasons for it. @@ -28,50 +28,78 @@ int main(int argc, char** argv) cout << "\nUsage: docsample filename\n"; return 0; } - const char* test_file_path = argv[1]; - // Open the test file (contains UTF-8 encoded text) + // Open the test file (must be UTF-8 encoded) ifstream fs8(test_file_path); if (!fs8.is_open()) { - cout << "Could not open " << test_file_path << endl; - return 0; + cout << "Could not open " << test_file_path << endl; + return 0; } unsigned line_count = 1; string line; // Play with all the lines in the file while (getline(fs8, line)) { - // check for invalid utf-8 (for a simple yes/no check, there is also utf8::is_valid function) + // check for invalid utf-8 (for a simple yes/no check, there is also utf8::is_valid function) +#if __cplusplus >= 201103L // C++ 11 or later + auto end_it = utf8::find_invalid(line.begin(), line.end()); +#else string::iterator end_it = utf8::find_invalid(line.begin(), line.end()); +#endif // C++ 11 if (end_it != line.end()) { cout << "Invalid UTF-8 encoding detected at line " << line_count << "\n"; cout << "This part is fine: " << string(line.begin(), end_it) << "\n"; } - // Get the line length (at least for the valid part) int length = utf8::distance(line.begin(), end_it); cout << "Length of line " << line_count << " is " << length << "\n"; // Convert it to utf-16 +#if __cplusplus >= 201103L // C++ 11 or later + u16string utf16line = utf8::utf8to16(line); +#else vector utf16line; utf8::utf8to16(line.begin(), end_it, back_inserter(utf16line)); - - // And back to utf-8 +#endif // C++ 11 + // And back to utf-8; +#if __cplusplus >= 201103L // C++ 11 or later + string utf8line = utf8::utf16to8(utf16line); +#else string utf8line; utf8::utf16to8(utf16line.begin(), utf16line.end(), back_inserter(utf8line)); - +#endif // C++ 11 // Confirm that the conversion went OK: if (utf8line != string(line.begin(), end_it)) cout << "Error in UTF-16 conversion at line: " << line_count << "\n"; line_count++; - } + } + return 0; } ``` In the previous code sample, for each line we performed a detection of invalid UTF-8 sequences with `find_invalid`; the number of characters (more precisely - the number of Unicode code points, including the end of line and even BOM if there is one) in each line was determined with a use of `utf8::distance`; finally, we have converted each line to UTF-16 encoding with `utf8to16` and back to UTF-8 with `utf16to8`. +Note a different pattern of usage for old compilers. For instance, this is how we convert +a UTF-8 encoded string to a UTF-16 encoded one with a pre - C++11 compiler: +```cpp + vector utf16line; + utf8::utf8to16(line.begin(), end_it, back_inserter(utf16line)); +``` + +With a more modern compiler, the same operation would look like: +```cpp + u16string utf16line = utf8::utf8to16(line); +``` +If `__cplusplus` macro points to a C++ 11 or later, the library exposes API that takes into +account C++ standard Unicode strings and move semantics. With an older compiler, it is still +possible to use the same functionality, just in a little less convenient way + +In case you do not trust the `__cplusplus` macro or, for instance, do not want to include +the C++ 11 helper functions even with a modern compiler, define `UTF_CPP_CPLUSPLUS` macro +before including `utf8.h` and assign it a value for the standard you want to use - the values are the same as for the `__cplusplus` macro. This can be also useful with compilers that are conservative in setting the `__cplusplus` macro even if they have a good support for a recent standard edition - Microsoft's Visual C++ is one example. + ### Checking if a file contains valid UTF-8 text Here is a function that checks whether the content of a file is valid UTF-8 encoded text without reading the content into the memory: @@ -90,7 +118,7 @@ bool valid_utf8_file(const char* file_name) } ``` -Because the function `utf8::is_valid()` works with input iterators, we were able to pass an `istreambuf_iterator` to it and read the content of the file directly without loading it to the memory first. +Because the function `utf8::is_valid()` works with input iterators, we were able to pass an `istreambuf_iterator` to `it` and read the content of the file directly without loading it to the memory first. Note that other functions that take input iterator arguments can be used in a similar way. For instance, to read the content of a UTF-8 encoded text file and convert the text to UTF-16, just do something like: @@ -113,10 +141,56 @@ void fix_utf8_string(std::string& str) The function will replace any invalid UTF-8 sequence with a Unicode replacement character. There is an overloaded function that enables the caller to supply their own replacement character. + +## Points of interest + +#### Design goals and decisions + +The library was designed to be: + +1. Generic: for better or worse, there are many C++ string classes out there, and the library should work with as many of them as possible. +2. Portable: the library should be portable both accross different platforms and compilers. The only non-portable code is a small section that declares unsigned integers of different sizes: three typedefs. They can be changed by the users of the library if they don't match their platform. The default setting should work for Windows (both 32 and 64 bit), and most 32 bit and 64 bit Unix derivatives. Support for post C++03 language features is included for modern compilers at API level only, so the library should work even with pretty old compilers. +3. Lightweight: follow the "pay only for what you use" guideline. +4. Unintrusive: avoid forcing any particular design or even programming style on the user. This is a library, not a framework. + +#### Alternatives + +In case you want to look into other means of working with UTF-8 strings from C++, here is the list of solutions I am aware of: + +1. [ICU Library](http://icu.sourceforge.net/). It is very powerful, complete, feature-rich, mature, and widely used. Also big, intrusive, non-generic, and doesn't play well with the Standard Library. I definitelly recommend looking at ICU even if you don't plan to use it. +2. C++11 language and library features. Still far from complete, and not easy to use. +3. [Glib::ustring](http://www.gtkmm.org/gtkmm2/docs/tutorial/html/ch03s04.html). A class specifically made to work with UTF-8 strings, and also feel like `std::string`. If you prefer to have yet another string class in your code, it may be worth a look. Be aware of the licensing issues, though. +4. Platform dependent solutions: Windows and POSIX have functions to convert strings from one encoding to another. That is only a subset of what my library offers, but if that is all you need it may be good enough. + + ## Reference ### Functions From utf8 Namespace +#### utf8::append + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Encodes a 32 bit code point as a UTF-8 sequence of octets and appends the sequence to a UTF-8 string. + +```cpp +void append(char32_t cp, std::string& s); +``` + +`cp`: a code point to append to the string. +`s`: a utf-8 encoded string to append the code point to. + +Example of use: + +```cpp +std::string u; +append(0x0448, u); +assert (u[0] == char(0xd1) && u[1] == char(0x88) && u.length() == 2); +``` + +In case of an invalid code point, a `utf8::invalid_code_point` exception is thrown. + + #### utf8::append Available in version 1.0 and later. @@ -238,39 +312,6 @@ In case `start` is reached before a UTF-8 lead octet is hit, or if an invalid UT In case `start` equals `it`, a `not_enough_room` exception is thrown. -#### utf8::previous - -Deprecated in version 1.02 and later. - -Given a reference to an iterator pointing to an octet in a UTF-8 seqence, it decreases the iterator until it hits the beginning of the previous UTF-8 encoded code point and returns the 32 bits representation of the code point. - -```cpp -template -uint32_t previous(octet_iterator& it, octet_iterator pass_start); -``` - -`octet_iterator`: a random access iterator. -`it`: a reference pointing to an octet within a UTF-8 encoded string. After the function returns, it is decremented to point to the beginning of the previous code point. -`pass_start`: an iterator to the point in the sequence where the search for the beginning of a code point is aborted if no result was reached. It is a safety measure to prevent passing the beginning of the string in the search for a UTF-8 lead octet. -Return value: the 32 bit representation of the previous code point. - -Example of use: - -```cpp -char* twochars = "\xe6\x97\xa5\xd1\x88"; -unsigned char* w = twochars + 3; -int cp = previous (w, twochars - 1); -assert (cp == 0x65e5); -assert (w == twochars); -``` - - -`utf8::previous` is deprecated, and `utf8::prior` should be used instead, although the existing code can continue using this function. The problem is the parameter `pass_start` that points to the position just before the beginning of the sequence. Standard containers don't have the concept of "pass start" and the function can not be used with their iterators. - -`it` will typically point to the beginning of a code point, and `pass_start` will point to the octet just before the beginning of the string to ensure we don't go backwards too far. `it` is decreased until it points to a lead UTF-8 octet, and then the UTF-8 sequence beginning with that octet is decoded to a 32 bit representation and returned. - -In case `pass_start` is reached before a UTF-8 lead octet is hit, or if an invalid UTF-8 sequence is started by the lead octet, an `invalid_utf8` exception is thrown - #### utf8::advance Available in version 1.0 and later. @@ -284,8 +325,8 @@ void advance (octet_iterator& it, distance_type n, octet_iterator end); `octet_iterator`: an input iterator. `distance_type`: an integral type convertible to `octet_iterator`'s difference type. `it`: a reference to an iterator pointing to the beginning of an UTF-8 encoded code point. After the function returns, it is incremented to point to the nth following code point. -`n`: a positive integer that shows how many code points we want to advance. -`end`: end of the UTF-8 sequence to be processed. If `it` gets equal to `end` during the extraction of a code point, an `utf8::not_enough_room` exception is thrown. +`n`: number of code points `it` should be advanced. A negative value means decrement. +`end`: limit of the UTF-8 sequence to be processed. If `n` is positive and `it` gets equal to `end` during the extraction of a code point, an `utf8::not_enough_room` exception is thrown. If `n` is negative and `it` reaches `end` while `it` points t a trail byte of a UTF-8 sequence, a `utf8::invalid_code_point` exception is thrown. Example of use: @@ -294,10 +335,10 @@ char* twochars = "\xe6\x97\xa5\xd1\x88"; unsigned char* w = twochars; advance (w, 2, twochars + 6); assert (w == twochars + 5); +advance (w, -2, twochars); +assert (w == twochars); ``` -This function works only "forward". In case of a negative `n`, there is no effect. - In case of an invalid code point, a `utf8::invalid_code_point` exception is thrown. #### utf8::distance @@ -328,6 +369,54 @@ This function is used to find the length (in code points) of a UTF-8 encoded str In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. If `last` does not point to the past-of-end of a UTF-8 seqence, a `utf8::not_enough_room` exception is thrown. +#### utf8::utf16to8 + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Converts a UTF-16 encoded string to UTF-8. + +```cpp +std::string utf16to8(const std::u16string& s); +``` + +`s`: a UTF-16 encoded string. +Return value: A UTF-8 encoded string. + +Example of use: + +```cpp + u16string utf16string = {0x41, 0x0448, 0x65e5, 0xd834, 0xdd1e}; + string u = utf16to8(utf16string); + assert (u.size() == 10); +``` + +In case of invalid UTF-16 sequence, a `utf8::invalid_utf16` exception is thrown. + +#### utf8::utf16to8 + +Available in version 3.2 and later. Requires a C++ 17 compliant compiler. + +Converts a UTF-16 encoded string to UTF-8. + +```cpp +std::string utf16to8(std::u16string_view s); +``` + +`s`: a UTF-16 encoded string. +Return value: A UTF-8 encoded string. + +Example of use: + +```cpp + u16string utf16string = {0x41, 0x0448, 0x65e5, 0xd834, 0xdd1e}; + u16string_view utf16stringview(u16string); + string u = utf16to8(utf16string); + assert (u.size() == 10); +``` + +In case of invalid UTF-16 sequence, a `utf8::invalid_utf16` exception is thrown. + + #### utf8::utf16to8 Available in version 1.0 and later. @@ -357,6 +446,57 @@ assert (utf8result.size() == 10); In case of invalid UTF-16 sequence, a `utf8::invalid_utf16` exception is thrown. +#### utf8::utf8to16 + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Converts an UTF-8 encoded string to UTF-16. + +```cpp +std::u16string utf8to16(const std::string& s); +``` + +`s`: an UTF-8 encoded string to convert. +Return value: A UTF-16 encoded string + +Example of use: + +```cpp +string utf8_with_surrogates = "\xe6\x97\xa5\xd1\x88\xf0\x9d\x84\x9e"; +u16string utf16result = utf8to16(utf8_with_surrogates); +assert (utf16result.length() == 4); +assert (utf16result[2] == 0xd834); +assert (utf16result[3] == 0xdd1e); +``` + +In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. + +#### utf8::utf8to16 + +Available in version 3.2 and later. Requires a C++ 17 compliant compiler. + +Converts an UTF-8 encoded string to UTF-16. + +```cpp +std::u16string utf8to16(std::string_view s); +``` + +`s`: an UTF-8 encoded string to convert. +Return value: A UTF-16 encoded string + +Example of use: + +```cpp +string_view utf8_with_surrogates = "\xe6\x97\xa5\xd1\x88\xf0\x9d\x84\x9e"; +u16string utf16result = utf8to16(utf8_with_surrogates); +assert (utf16result.length() == 4); +assert (utf16result[2] == 0xd834); +assert (utf16result[3] == 0xdd1e); +``` + +In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. + + #### utf8::utf8to16 Available in version 1.0 and later. @@ -387,6 +527,54 @@ assert (utf16result[3] == 0xdd1e); In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. If `end` does not point to the past-of-end of a UTF-8 seqence, a `utf8::not_enough_room` exception is thrown. +#### utf8::utf32to8 + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Converts a UTF-32 encoded string to UTF-8. + +```cpp +std::string utf32to8(const std::u32string& s); +``` + +`s`: a UTF-32 encoded string. +Return value: a UTF-8 encoded string. + +Example of use: + +```cpp +u32string utf32string = {0x448, 0x65E5, 0x10346}; +string utf8result = utf32to8(utf32string); +assert (utf8result.size() == 9); +``` + +In case of invalid UTF-32 string, a `utf8::invalid_code_point` exception is thrown. + +#### utf8::utf32to8 + +Available in version 3.2 and later. Requires a C++ 17 compliant compiler. + +Converts a UTF-32 encoded string to UTF-8. + +```cpp +std::string utf32to8(std::u32string_view s); +``` + +`s`: a UTF-32 encoded string. +Return value: a UTF-8 encoded string. + +Example of use: + +```cpp +u32string utf32string = {0x448, 0x65E5, 0x10346}; +u32string_view utf32stringview(utf32string); +string utf8result = utf32to8(utf32stringview); +assert (utf8result.size() == 9); +``` + +In case of invalid UTF-32 string, a `utf8::invalid_code_point` exception is thrown. + + #### utf8::utf32to8 Available in version 1.0 and later. @@ -407,7 +595,7 @@ Return value: An iterator pointing to the place after the appended UTF-8 string. Example of use: -``` +```cpp int utf32string[] = {0x448, 0x65E5, 0x10346, 0}; vector utf8result; utf32to8(utf32string, utf32string + 3, back_inserter(utf8result)); @@ -416,6 +604,53 @@ assert (utf8result.size() == 9); In case of invalid UTF-32 string, a `utf8::invalid_code_point` exception is thrown. +#### utf8::utf8to32 + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Converts a UTF-8 encoded string to UTF-32. + +```cpp +std::u32string utf8to32(const std::string& s); +``` + +`s`: a UTF-8 encoded string. +Return value: a UTF-32 encoded string. + +Example of use: + +```cpp +const char* twochars = "\xe6\x97\xa5\xd1\x88"; +u32string utf32result = utf8to32(twochars); +assert (utf32result.size() == 2); +``` + +In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. + +#### utf8::utf8to32 + +Available in version 3.2 and later. Requires a C++ 17 compliant compiler. + +Converts a UTF-8 encoded string to UTF-32. + +```cpp +std::u32string utf8to32(std::string_view s); +``` + +`s`: a UTF-8 encoded string. +Return value: a UTF-32 encoded string. + +Example of use: + +```cpp +string_view twochars = "\xe6\x97\xa5\xd1\x88"; +u32string utf32result = utf8to32(twochars); +assert (utf32result.size() == 2); +``` + +In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. + + #### utf8::utf8to32 Available in version 1.0 and later. @@ -445,6 +680,53 @@ assert (utf32result.size() == 2); In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. If `end` does not point to the past-of-end of a UTF-8 seqence, a `utf8::not_enough_room` exception is thrown. +#### utf8::find_invalid + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Detects an invalid sequence within a UTF-8 string. + +```cpp +std::size_t find_invalid(const std::string& s); +``` + +`s`: a UTF-8 encoded string. +Return value: the index of the first invalid octet in the UTF-8 string. In case none were found, equals `std::string::npos`. + +Example of use: + +```cpp +string utf_invalid = "\xe6\x97\xa5\xd1\x88\xfa"; +auto invalid = find_invalid(utf_invalid); +assert (invalid == 5); +``` + +This function is typically used to make sure a UTF-8 string is valid before processing it with other functions. It is especially important to call it if before doing any of the _unchecked_ operations on it. + +#### utf8::find_invalid + +Available in version 3.2 and later. Requires a C++ 17 compliant compiler. + +Detects an invalid sequence within a UTF-8 string. + +```cpp +std::size_t find_invalid(std::string_view s); +``` + +`s`: a UTF-8 encoded string. +Return value: the index of the first invalid octet in the UTF-8 string. In case none were found, equals `std::string_view::npos`. + +Example of use: + +```cpp +string_view utf_invalid = "\xe6\x97\xa5\xd1\x88\xfa"; +auto invalid = find_invalid(utf_invalid); +assert (invalid == 5); +``` + +This function is typically used to make sure a UTF-8 string is valid before processing it with other functions. It is especially important to call it if before doing any of the _unchecked_ operations on it. + + #### utf8::find_invalid Available in version 1.0 and later. @@ -471,6 +753,53 @@ assert (invalid == utf_invalid + 5); This function is typically used to make sure a UTF-8 string is valid before processing it with other functions. It is especially important to call it if before doing any of the _unchecked_ operations on it. +#### utf8::is_valid + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Checks whether a string object contains valid UTF-8 encoded text. + +```cpp +bool is_valid(const std::string& s); +``` + +`s`: a UTF-8 encoded string. +Return value: `true` if the string contains valid UTF-8 encoded text; `false` if not. + +Example of use: + +```cpp +char utf_invalid[] = "\xe6\x97\xa5\xd1\x88\xfa"; +bool bvalid = is_valid(utf_invalid); +assert (bvalid == false); +``` + +You may want to use `is_valid` to make sure that a string contains valid UTF-8 text without the need to know where it fails if it is not valid. + +#### utf8::is_valid + +Available in version 3.2 and later. Requires a C++ 17 compliant compiler. + +Checks whether a string object contains valid UTF-8 encoded text. + +```cpp +bool is_valid(std::string_view s); +``` + +`s`: a UTF-8 encoded string. +Return value: `true` if the string contains valid UTF-8 encoded text; `false` if not. + +Example of use: + +```cpp +string_view utf_invalid = "\xe6\x97\xa5\xd1\x88\xfa"; +bool bvalid = is_valid(utf_invalid); +assert (bvalid == false); +``` + +You may want to use `is_valid` to make sure that a string contains valid UTF-8 text without the need to know where it fails if it is not valid. + + #### utf8::is_valid Available in version 1.0 and later. @@ -497,6 +826,59 @@ assert (bvalid == false); `is_valid` is a shorthand for `find_invalid(start, end) == end;`. You may want to use it to make sure that a byte seqence is a valid UTF-8 string without the need to know where it fails if it is not valid. +#### utf8::replace_invalid + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Replaces all invalid UTF-8 sequences within a string with a replacement marker. + +```cpp +std::string replace_invalid(const std::string& s, char32_t replacement); +std::string replace_invalid(const std::string& s); +``` + +`s`: a UTF-8 encoded string. +`replacement`: A Unicode code point for the replacement marker. The version without this parameter assumes the value `0xfffd` +Return value: A UTF-8 encoded string with replaced invalid sequences. + +Example of use: + +```cpp +string invalid_sequence = "a\x80\xe0\xa0\xc0\xaf\xed\xa0\x80z"; +string replace_invalid_result = replace_invalid(invalid_sequence, '?'); +bvalid = is_valid(replace_invalid_result); +assert (bvalid); +const string fixed_invalid_sequence = "a????z"; +assert (fixed_invalid_sequence == replace_invalid_result); +``` + +#### utf8::replace_invalid + +Available in version 3.2 and later. Requires a C++ 17 compliant compiler. + +Replaces all invalid UTF-8 sequences within a string with a replacement marker. + +```cpp +std::string replace_invalid(std::string_view s, char32_t replacement); +std::string replace_invalid(std::string_view s); +``` + +`s`: a UTF-8 encoded string. +`replacement`: A Unicode code point for the replacement marker. The version without this parameter assumes the value `0xfffd` +Return value: A UTF-8 encoded string with replaced invalid sequences. + +Example of use: + +```cpp +string_view invalid_sequence = "a\x80\xe0\xa0\xc0\xaf\xed\xa0\x80z"; +string replace_invalid_result = replace_invalid(invalid_sequence, '?'); +bool bvalid = is_valid(replace_invalid_result); +assert (bvalid); +const string fixed_invalid_sequence = "a????z"; +assert(fixed_invalid_sequence, replace_invalid_result); +``` + + #### utf8::replace_invalid Available in version 2.0 and later. @@ -532,11 +914,64 @@ assert (std::equal(replace_invalid_result.begin(), replace_invalid_result.end(), `replace_invalid` does not perform in-place replacement of invalid sequences. Rather, it produces a copy of the original string with the invalid sequences replaced with a replacement marker. Therefore, `out` must not be in the `[start, end]` range. -If `end` does not point to the past-of-end of a UTF-8 sequence, a `utf8::not_enough_room` exception is thrown. +#### utf8::starts_with_bom + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Checks whether a string starts with a UTF-8 byte order mark (BOM) + +```cpp +bool starts_with_bom(const std::string& s); +``` + +`s`: a UTF-8 encoded string. +Return value: `true` if the string starts with a UTF-8 byte order mark; `false` if not. + +Example of use: + +```cpp +string byte_order_mark = {char(0xef), char(0xbb), char(0xbf)}; +bool bbom = starts_with_bom(byte_order_mark); +assert (bbom == true); +string threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88"; +bool no_bbom = starts_with_bom(threechars); +assert (no_bbom == false); + ``` + +The typical use of this function is to check the first three bytes of a file. If they form the UTF-8 BOM, we want to skip them before processing the actual UTF-8 encoded text. + #### utf8::starts_with_bom -Available in version 2.3 and later. Relaces deprecated `is_bom()` function. +Available in version 3.2 and later. Requires a C++ 17 compliant compiler. + +Checks whether a string starts with a UTF-8 byte order mark (BOM) + +```cpp +bool starts_with_bom(std::string_view s); +``` + +`s`: a UTF-8 encoded string. +Return value: `true` if the string starts with a UTF-8 byte order mark; `false` if not. + +Example of use: + +```cpp +string byte_order_mark = {char(0xef), char(0xbb), char(0xbf)}; +string_view byte_order_mark_view(byte_order_mark); +bool bbom = starts_with_bom(byte_order_mark_view); +assert (bbom); +string_view threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88"; +bool no_bbom = starts_with_bom(threechars); +assert (!no_bbom); + ``` + +The typical use of this function is to check the first three bytes of a file. If they form the UTF-8 BOM, we want to skip them before processing the actual UTF-8 encoded text. + + +#### utf8::starts_with_bom + +Available in version 2.3 and later. Checks whether an octet sequence starts with a UTF-8 byte order mark (BOM) @@ -560,33 +995,6 @@ assert (bbom == true); The typical use of this function is to check the first three bytes of a file. If they form the UTF-8 BOM, we want to skip them before processing the actual UTF-8 encoded text. -#### utf8::is_bom - -Available in version 1.0 and later. Deprecated in version 2.3\. `starts_with_bom()` should be used instead. - -Checks whether a sequence of three octets is a UTF-8 byte order mark (BOM) - -```cpp -template -bool is_bom (octet_iterator it); // Deprecated -``` - -`octet_iterator`: an input iterator. -`it`: beginning of the 3-octet sequence to check -Return value: `true` if the sequence is UTF-8 byte order mark; `false` if not. - -Example of use: - -```cpp -unsigned char byte_order_mark[] = {0xef, 0xbb, 0xbf}; -bool bbom = is_bom(byte_order_mark); -assert (bbom == true); -``` - -The typical use of this function is to check the first three bytes of a file. If they form the UTF-8 BOM, we want to skip them before processing the actual UTF-8 encoded text. - -If a sequence is shorter than three bytes, an invalid iterator will be dereferenced. Therefore, this function is deprecated in favor of `starts_with_bom()`that takes the end of sequence as an argument. - ### Types From utf8 Namespace #### utf8::exception @@ -678,15 +1086,24 @@ class iterator; ##### Member functions -`iterator();` the deafult constructor; the underlying octet_iterator is constructed with its default constructor. +`iterator();` the deafult constructor; the underlying octet_iterator is constructed with its default constructor. + `explicit iterator (const octet_iterator& octet_it, const octet_iterator& range_start, const octet_iterator& range_end);` a constructor that initializes the underlying octet_iterator with octet_it and sets the range in which the iterator is considered valid. + `octet_iterator base () const;` returns the underlying octet_iterator. + `uint32_t operator * () const;` decodes the utf-8 sequence the underlying octet_iterator is pointing to and returns the code point. + `bool operator == (const iterator& rhs) const;` returns `true` if the two underlaying iterators are equal. + `bool operator != (const iterator& rhs) const;` returns `true` if the two underlaying iterators are not equal. + `iterator& operator ++ ();` the prefix increment - moves the iterator to the next UTF-8 encoded code point. + `iterator operator ++ (int);` the postfix increment - moves the iterator to the next UTF-8 encoded code point and returns the current one. + `iterator& operator -- ();` the prefix decrement - moves the iterator to the previous UTF-8 encoded code point. + `iterator operator -- (int);` the postfix decrement - moves the iterator to the previous UTF-8 encoded code point and returns the current one. Example of use: @@ -824,34 +1241,6 @@ assert (w == twochars); This is a faster but less safe version of `utf8::prior`. It does not check for validity of the supplied UTF-8 sequence and offers no boundary checking. -#### utf8::unchecked::previous (deprecated, see utf8::unchecked::prior) - -Deprecated in version 1.02 and later. - -Given a reference to an iterator pointing to an octet in a UTF-8 seqence, it decreases the iterator until it hits the beginning of the previous UTF-8 encoded code point and returns the 32 bits representation of the code point. - -```cpp -template -uint32_t previous(octet_iterator& it); -``` - -`it`: a reference pointing to an octet within a UTF-8 encoded string. After the function returns, it is decremented to point to the beginning of the previous code point. -Return value: the 32 bit representation of the previous code point. - -Example of use: - -```cpp -char* twochars = "\xe6\x97\xa5\xd1\x88"; -char* w = twochars + 3; -int cp = unchecked::previous (w); -assert (cp == 0x65e5); -assert (w == twochars); -``` - -The reason this function is deprecated is just the consistency with the "checked" versions, where `prior` should be used instead of `previous`. In fact, `unchecked::previous` behaves exactly the same as `unchecked::prior` - -This is a faster but less safe version of `utf8::previous`. It does not check for validity of the supplied UTF-8 sequence and offers no boundary checking. - #### utf8::unchecked::advance Available in version 1.0 and later. @@ -863,8 +1252,8 @@ template void advance (octet_iterator& it, distance_type n); ``` -`it`: a reference to an iterator pointing to the beginning of an UTF-8 encoded code point. After the function returns, it is incremented to point to the nth following code point. -`n`: a positive integer that shows how many code points we want to advance. +`it`: a reference to an iterator pointing to the beginning of an UTF-8 encoded code point. After the function returns, it is incremented to point to the nth following code point. +`n`: number of code points `it` should be advanced. A negative value means decrement. Example of use: @@ -875,8 +1264,6 @@ unchecked::advance (w, 2); assert (w == twochars + 5); ``` -This function works only "forward". In case of a negative `n`, there is no effect. - This is a faster but less safe version of `utf8::advance`. It does not check for validity of the supplied UTF-8 sequence and offers no boundary checking. #### utf8::unchecked::distance @@ -1013,6 +1400,43 @@ assert (utf32result.size() == 2); This is a faster but less safe version of `utf8::utf8to32`. It does not check for validity of the supplied UTF-8 sequence. +#### utf8::unchecked::replace_invalid + +Available in version 3.1 and later. + +Replaces all invalid UTF-8 sequences within a string with a replacement marker. + +```cpp +template +output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out, uint32_t replacement); +template +output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out); +``` + +`octet_iterator`: an input iterator. +`output_iterator`: an output iterator. +`start`: an iterator pointing to the beginning of the UTF-8 string to look for invalid UTF-8 sequences. +`end`: an iterator pointing to pass-the-end of the UTF-8 string to look for invalid UTF-8 sequences. +`out`: An output iterator to the range where the result of replacement is stored. +`replacement`: A Unicode code point for the replacement marker. The version without this parameter assumes the value `0xfffd` +Return value: An iterator pointing to the place after the UTF-8 string with replaced invalid sequences. + +Example of use: + +```cpp +char invalid_sequence[] = "a\x80\xe0\xa0\xc0\xaf\xed\xa0\x80z"; +vector replace_invalid_result; +unchecked::replace_invalid (invalid_sequence, invalid_sequence + sizeof(invalid_sequence), back_inserter(replace_invalid_result), '?'); +bvalid = utf8::is_valid(replace_invalid_result.begin(), replace_invalid_result.end()); +assert (bvalid); +char* fixed_invalid_sequence = "a????z"; +assert (std::equal(replace_invalid_result.begin(), replace_invalid_result.end(), fixed_invalid_sequence)); +``` + +`replace_invalid` does not perform in-place replacement of invalid sequences. Rather, it produces a copy of the original string with the invalid sequences replaced with a replacement marker. Therefore, `out` must not be in the `[start, end]` range. + +Unlike `utf8::replace_invalid`, this function does not verify validity of the replacement marker. + ### Types From utf8::unchecked Namespace #### utf8::iterator @@ -1029,14 +1453,23 @@ class iterator; ##### Member functions `iterator();` the deafult constructor; the underlying octet_iterator is constructed with its default constructor. -`explicit iterator (const octet_iterator& octet_it);` a constructor that initializes the underlying octet_iterator with `octet_it` + +`explicit iterator (const octet_iterator& octet_it);` a constructor that initializes the underlying octet_iterator with `octet_it`. + `octet_iterator base () const;` returns the underlying octet_iterator. + `uint32_t operator * () const;` decodes the utf-8 sequence the underlying octet_iterator is pointing to and returns the code point. + `bool operator == (const iterator& rhs) const;` returns `true` if the two underlaying iterators are equal. + `bool operator != (const iterator& rhs) const;` returns `true` if the two underlaying iterators are not equal. + `iterator& operator ++ ();` the prefix increment - moves the iterator to the next UTF-8 encoded code point. + `iterator operator ++ (int);` the postfix increment - moves the iterator to the next UTF-8 encoded code point and returns the current one. + `iterator& operator -- ();` the prefix decrement - moves the iterator to the previous UTF-8 encoded code point. + `iterator operator -- (int);` the postfix decrement - moves the iterator to the previous UTF-8 encoded code point and returns the current one. Example of use: @@ -1062,26 +1495,6 @@ assert (*un_it == 0x10346); This is an unchecked version of `utf8::iterator`. It is faster in many cases, but offers no validity or range checks. -## Points of interest - -#### Design goals and decisions - -The library was designed to be: - -1. Generic: for better or worse, there are many C++ string classes out there, and the library should work with as many of them as possible. -2. Portable: the library should be portable both accross different platforms and compilers. The only non-portable code is a small section that declares unsigned integers of different sizes: three typedefs. They can be changed by the users of the library if they don't match their platform. The default setting should work for Windows (both 32 and 64 bit), and most 32 bit and 64 bit Unix derivatives. At this point I don't plan to use any post C++03 features, so the library should work even with pretty old compilers. -3. Lightweight: follow the "pay only for what you use" guideline. -4. Unintrusive: avoid forcing any particular design or even programming style on the user. This is a library, not a framework. - -#### Alternatives - -In case you want to look into other means of working with UTF-8 strings from C++, here is the list of solutions I am aware of: - -1. [ICU Library](http://icu.sourceforge.net/). It is very powerful, complete, feature-rich, mature, and widely used. Also big, intrusive, non-generic, and doesn't play well with the Standard Library. I definitelly recommend looking at ICU even if you don't plan to use it. -2. C++11 language and library features. Still far from complete, and not easy to use. -3. [Glib::ustring](http://www.gtkmm.org/gtkmm2/docs/tutorial/html/ch03s04.html). A class specifically made to work with UTF-8 strings, and also feel like `std::string`. If you prefer to have yet another string class in your code, it may be worth a look. Be aware of the licensing issues, though. -4. Platform dependent solutions: Windows and POSIX have functions to convert strings from one encoding to another. That is only a subset of what my library offers, but if that is all you need it may be good enough. - ## Links 1. [The Unicode Consortium](http://www.unicode.org/). diff --git a/utf8/utf8/checked.h b/utf8/utf8/checked.h index 2aef5838d..993b7f7c5 100644 --- a/utf8/utf8/checked.h +++ b/utf8/utf8/checked.h @@ -42,7 +42,7 @@ namespace utf8 uint32_t cp; public: invalid_code_point(uint32_t codepoint) : cp(codepoint) {} - virtual const char* what() const throw() { return "Invalid code point"; } + virtual const char* what() const UTF_CPP_NOEXCEPT UTF_CPP_OVERRIDE { return "Invalid code point"; } uint32_t code_point() const {return cp;} }; @@ -50,7 +50,7 @@ namespace utf8 uint8_t u8; public: invalid_utf8 (uint8_t u) : u8(u) {} - virtual const char* what() const throw() { return "Invalid UTF-8"; } + virtual const char* what() const UTF_CPP_NOEXCEPT UTF_CPP_OVERRIDE { return "Invalid UTF-8"; } uint8_t utf8_octet() const {return u8;} }; @@ -58,13 +58,13 @@ namespace utf8 uint16_t u16; public: invalid_utf16 (uint16_t u) : u16(u) {} - virtual const char* what() const throw() { return "Invalid UTF-16"; } + virtual const char* what() const UTF_CPP_NOEXCEPT UTF_CPP_OVERRIDE { return "Invalid UTF-16"; } uint16_t utf16_word() const {return u16;} }; class not_enough_room : public exception { public: - virtual const char* what() const throw() { return "Not enough space"; } + virtual const char* what() const UTF_CPP_NOEXCEPT UTF_CPP_OVERRIDE { return "Not enough space"; } }; /// The library API - functions intended to be called by the users @@ -107,7 +107,9 @@ namespace utf8 *out++ = *it; break; case internal::NOT_ENOUGH_ROOM: - throw not_enough_room(); + out = utf8::append (replacement, out); + start = end; + break; case internal::INVALID_LEAD: out = utf8::append (replacement, out); ++start; @@ -174,23 +176,19 @@ namespace utf8 return utf8::peek_next(it, end); } - /// Deprecated in versions that include "prior" - template - uint32_t previous(octet_iterator& it, octet_iterator pass_start) - { - octet_iterator end = it; - while (utf8::internal::is_trail(*(--it))) - if (it == pass_start) - throw invalid_utf8(*it); // error - no lead byte in the sequence - octet_iterator temp = it; - return utf8::next(temp, end); - } - template void advance (octet_iterator& it, distance_type n, octet_iterator end) { - for (distance_type i = 0; i < n; ++i) - utf8::next(it, end); + const distance_type zero(0); + if (n < zero) { + // backward + for (distance_type i = n; i < zero; ++i) + utf8::prior(it, end); + } else { + // forward + for (distance_type i = zero; i < n; ++i) + utf8::next(it, end); + } } template @@ -265,11 +263,16 @@ namespace utf8 // The iterator class template - class iterator : public std::iterator { + class iterator { octet_iterator it; octet_iterator range_start; octet_iterator range_end; public: + typedef uint32_t value_type; + typedef uint32_t* pointer; + typedef uint32_t& reference; + typedef std::ptrdiff_t difference_type; + typedef std::bidirectional_iterator_tag iterator_category; iterator () {} explicit iterator (const octet_iterator& octet_it, const octet_iterator& rangestart, @@ -322,6 +325,11 @@ namespace utf8 } // namespace utf8 +#if UTF_CPP_CPLUSPLUS >= 201703L // C++ 17 or later +#include "cpp17.h" +#elif UTF_CPP_CPLUSPLUS >= 201103L // C++ 11 or later +#include "cpp11.h" +#endif // C++ 11 or later + #endif //header guard - diff --git a/utf8/utf8/core.h b/utf8/utf8/core.h index ae0f367db..de6199f2a 100644 --- a/utf8/utf8/core.h +++ b/utf8/utf8/core.h @@ -30,6 +30,23 @@ DEALINGS IN THE SOFTWARE. #include +// Determine the C++ standard version. +// If the user defines UTF_CPP_CPLUSPLUS, use that. +// Otherwise, trust the unreliable predefined macro __cplusplus + +#if !defined UTF_CPP_CPLUSPLUS + #define UTF_CPP_CPLUSPLUS __cplusplus +#endif + +#if UTF_CPP_CPLUSPLUS >= 201103L // C++ 11 or later + #define UTF_CPP_OVERRIDE override + #define UTF_CPP_NOEXCEPT noexcept +#else // C++ 98/03 + #define UTF_CPP_OVERRIDE + #define UTF_CPP_NOEXCEPT throw() +#endif // C++ 11 or later + + namespace utf8 { // The typedefs for 8-bit, 16-bit and 32-bit unsigned integers @@ -49,8 +66,8 @@ namespace internal const uint16_t LEAD_SURROGATE_MAX = 0xdbffu; const uint16_t TRAIL_SURROGATE_MIN = 0xdc00u; const uint16_t TRAIL_SURROGATE_MAX = 0xdfffu; - const uint16_t LEAD_OFFSET = LEAD_SURROGATE_MIN - (0x10000 >> 10); - const uint32_t SURROGATE_OFFSET = 0x10000u - (LEAD_SURROGATE_MIN << 10) - TRAIL_SURROGATE_MIN; + const uint16_t LEAD_OFFSET = 0xd7c0u; // LEAD_SURROGATE_MIN - (0x10000 >> 10) + const uint32_t SURROGATE_OFFSET = 0xfca02400u; // 0x10000u - (LEAD_SURROGATE_MIN << 10) - TRAIL_SURROGATE_MIN // Maximum valid value for a Unicode code point const uint32_t CODE_POINT_MAX = 0x0010ffffu; @@ -142,7 +159,7 @@ namespace internal if (!utf8::internal::is_trail(*it)) return INCOMPLETE_SEQUENCE; - + return UTF8_OK; } @@ -165,7 +182,7 @@ namespace internal { if (it == end) return NOT_ENOUGH_ROOM; - + code_point = utf8::internal::mask8(*it); UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR(it, end) @@ -222,7 +239,7 @@ namespace internal template utf_error validate_next(octet_iterator& it, octet_iterator end, uint32_t& code_point) { - if (it == end) + if (it == end) return NOT_ENOUGH_ROOM; // Save the original value of it so we can go back in case of failure @@ -237,7 +254,7 @@ namespace internal // Get trail octets and calculate the code point utf_error err = UTF8_OK; switch (length) { - case 0: + case 0: return INVALID_LEAD; case 1: err = utf8::internal::get_sequence_1(it, end, cp); @@ -313,18 +330,7 @@ namespace internal ((it != end) && (utf8::internal::mask8(*it++)) == bom[1]) && ((it != end) && (utf8::internal::mask8(*it)) == bom[2]) ); - } - - //Deprecated in release 2.3 - template - inline bool is_bom (octet_iterator it) - { - return ( - (utf8::internal::mask8(*it++)) == bom[0] && - (utf8::internal::mask8(*it++)) == bom[1] && - (utf8::internal::mask8(*it)) == bom[2] - ); - } + } } // namespace utf8 #endif // header guard diff --git a/utf8/utf8/cpp11.h b/utf8/utf8/cpp11.h new file mode 100644 index 000000000..d93961b04 --- /dev/null +++ b/utf8/utf8/cpp11.h @@ -0,0 +1,103 @@ +// Copyright 2018 Nemanja Trifunovic + +/* +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +*/ + + +#ifndef UTF8_FOR_CPP_a184c22c_d012_11e8_a8d5_f2801f1b9fd1 +#define UTF8_FOR_CPP_a184c22c_d012_11e8_a8d5_f2801f1b9fd1 + +#include "checked.h" +#include + +namespace utf8 +{ + + inline void append(char32_t cp, std::string& s) + { + append(uint32_t(cp), std::back_inserter(s)); + } + + inline std::string utf16to8(const std::u16string& s) + { + std::string result; + utf16to8(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline std::u16string utf8to16(const std::string& s) + { + std::u16string result; + utf8to16(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline std::string utf32to8(const std::u32string& s) + { + std::string result; + utf32to8(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline std::u32string utf8to32(const std::string& s) + { + std::u32string result; + utf8to32(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline std::size_t find_invalid(const std::string& s) + { + std::string::const_iterator invalid = find_invalid(s.begin(), s.end()); + return (invalid == s.end()) ? std::string::npos : (invalid - s.begin()); + } + + inline bool is_valid(const std::string& s) + { + return is_valid(s.begin(), s.end()); + } + + inline std::string replace_invalid(const std::string& s, char32_t replacement) + { + std::string result; + replace_invalid(s.begin(), s.end(), std::back_inserter(result), replacement); + return result; + } + + inline std::string replace_invalid(const std::string& s) + { + std::string result; + replace_invalid(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline bool starts_with_bom(const std::string& s) + { + return starts_with_bom(s.begin(), s.end()); + } + +} // namespace utf8 + +#endif // header guard + diff --git a/utf8/utf8/cpp17.h b/utf8/utf8/cpp17.h new file mode 100644 index 000000000..7bfa86994 --- /dev/null +++ b/utf8/utf8/cpp17.h @@ -0,0 +1,103 @@ +// Copyright 2018 Nemanja Trifunovic + +/* +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +*/ + + +#ifndef UTF8_FOR_CPP_7e906c01_03a3_4daf_b420_ea7ea952b3c9 +#define UTF8_FOR_CPP_7e906c01_03a3_4daf_b420_ea7ea952b3c9 + +#include "checked.h" +#include + +namespace utf8 +{ + + inline void append(char32_t cp, std::string& s) + { + append(uint32_t(cp), std::back_inserter(s)); + } + + inline std::string utf16to8(std::u16string_view s) + { + std::string result; + utf16to8(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline std::u16string utf8to16(std::string_view s) + { + std::u16string result; + utf8to16(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline std::string utf32to8(std::u32string_view s) + { + std::string result; + utf32to8(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline std::u32string utf8to32(std::string_view s) + { + std::u32string result; + utf8to32(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline std::size_t find_invalid(std::string_view s) + { + std::string_view::const_iterator invalid = find_invalid(s.begin(), s.end()); + return (invalid == s.end()) ? std::string_view::npos : (invalid - s.begin()); + } + + inline bool is_valid(std::string_view s) + { + return is_valid(s.begin(), s.end()); + } + + inline std::string replace_invalid(std::string_view s, char32_t replacement) + { + std::string result; + replace_invalid(s.begin(), s.end(), std::back_inserter(result), replacement); + return result; + } + + inline std::string replace_invalid(std::string_view s) + { + std::string result; + replace_invalid(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline bool starts_with_bom(std::string_view s) + { + return starts_with_bom(s.begin(), s.end()); + } + +} // namespace utf8 + +#endif // header guard + diff --git a/utf8/utf8/unchecked.h b/utf8/utf8/unchecked.h index cb2427166..0e1b51cc7 100644 --- a/utf8/utf8/unchecked.h +++ b/utf8/utf8/unchecked.h @@ -32,13 +32,13 @@ DEALINGS IN THE SOFTWARE. namespace utf8 { - namespace unchecked + namespace unchecked { template octet_iterator append(uint32_t cp, octet_iterator result) { if (cp < 0x80) // one octet - *(result++) = static_cast(cp); + *(result++) = static_cast(cp); else if (cp < 0x800) { // two octets *(result++) = static_cast((cp >> 6) | 0xc0); *(result++) = static_cast((cp & 0x3f) | 0x80); @@ -57,6 +57,46 @@ namespace utf8 return result; } + template + output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out, uint32_t replacement) + { + while (start != end) { + octet_iterator sequence_start = start; + internal::utf_error err_code = utf8::internal::validate_next(start, end); + switch (err_code) { + case internal::UTF8_OK : + for (octet_iterator it = sequence_start; it != start; ++it) + *out++ = *it; + break; + case internal::NOT_ENOUGH_ROOM: + out = utf8::unchecked::append (replacement, out); + start = end; + break; + case internal::INVALID_LEAD: + out = utf8::unchecked::append (replacement, out); + ++start; + break; + case internal::INCOMPLETE_SEQUENCE: + case internal::OVERLONG_SEQUENCE: + case internal::INVALID_CODE_POINT: + out = utf8::unchecked::append (replacement, out); + ++start; + // just one replacement mark for the sequence + while (start != end && utf8::internal::is_trail(*start)) + ++start; + break; + } + } + return out; + } + + template + inline output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out) + { + static const uint32_t replacement_marker = utf8::internal::mask16(0xfffd); + return utf8::unchecked::replace_invalid(start, end, out, replacement_marker); + } + template uint32_t next(octet_iterator& it) { @@ -85,13 +125,13 @@ namespace utf8 break; } ++it; - return cp; + return cp; } template uint32_t peek_next(octet_iterator it) { - return utf8::unchecked::next(it); + return utf8::unchecked::next(it); } template @@ -102,18 +142,19 @@ namespace utf8 return utf8::unchecked::next(temp); } - // Deprecated in versions that include prior, but only for the sake of consistency (see utf8::previous) - template - inline uint32_t previous(octet_iterator& it) - { - return utf8::unchecked::prior(it); - } - template void advance (octet_iterator& it, distance_type n) { - for (distance_type i = 0; i < n; ++i) - utf8::unchecked::next(it); + const distance_type zero(0); + if (n < zero) { + // backward + for (distance_type i = n; i < zero; ++i) + utf8::unchecked::prior(it); + } else { + // forward + for (distance_type i = zero; i < n; ++i) + utf8::unchecked::next(it); + } } template @@ -128,7 +169,7 @@ namespace utf8 template octet_iterator utf16to8 (u16bit_iterator start, u16bit_iterator end, octet_iterator result) - { + { while (start != end) { uint32_t cp = utf8::internal::mask16(*start++); // Take care of surrogate pairs first @@ -138,7 +179,7 @@ namespace utf8 } result = utf8::unchecked::append(cp, result); } - return result; + return result; } template @@ -176,9 +217,14 @@ namespace utf8 // The iterator class template - class iterator : public std::iterator { + class iterator { octet_iterator it; public: + typedef uint32_t value_type; + typedef uint32_t* pointer; + typedef uint32_t& reference; + typedef std::ptrdiff_t difference_type; + typedef std::bidirectional_iterator_tag iterator_category; iterator () {} explicit iterator (const octet_iterator& octet_it): it(octet_it) {} // the default "big three" are OK From a66c305609a14380015bfb79bc03e2087a0cdc5c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 5 Jul 2022 11:10:43 +0200 Subject: [PATCH 105/606] gen: work around a warning on red hat When n is an int, doing "new formula[n];" gives us "warning: argument 1 value '18446744073709551615' exceeds maximum object size 9223372036854775807" on Red Hat. * spot/gen/formulas.cc (pps_arbiter): Pass n as unsigned. Also fix some determinism in the strict variant. --- spot/gen/formulas.cc | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/spot/gen/formulas.cc b/spot/gen/formulas.cc index a94512970..3f63b07e7 100644 --- a/spot/gen/formulas.cc +++ b/spot/gen/formulas.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2019 Laboratoire de Recherche et Developpement +// Copyright (C) 2012-2019, 2022 Laboratoire de Recherche et Developpement // de l'EPITA (LRDE). // // This file is part of Spot, a model checking library. @@ -1198,13 +1198,13 @@ namespace spot } static formula - pps_arbiter(std::string r_, std::string g_, int n, bool strict_) + pps_arbiter(std::string r_, std::string g_, unsigned n, bool strict_) { formula* r = new formula[n]; formula* g = new formula[n]; std::vector res; - for (int i = 0; i < n; ++i) + for (unsigned i = 0; i < n; ++i) { r[i] = formula::ap(r_ + std::to_string(i + 1)); g[i] = formula::ap(g_ + std::to_string(i + 1)); @@ -1218,17 +1218,17 @@ namespace spot formula phi_s; { std::vector res; - for (int i = 0; i < n; ++i) + for (unsigned i = 0; i < n; ++i) res.push_back(formula::Not(r[i])); theta_e = formula::And(res); res.clear(); - for (int i = 0; i < n; ++i) + for (unsigned i = 0; i < n; ++i) res.push_back(formula::Not(g[i])); theta_s = formula::And(res); res.clear(); - for (int i = 0; i < n; ++i) + for (unsigned i = 0; i < n; ++i) { formula left = formula::Xor(r[i], g[i]); formula right = formula::Equiv(r[i], formula::X(r[i])); @@ -1237,9 +1237,9 @@ namespace spot psi_e = formula::And(res); res.clear(); - for (int i = 0; i < n; ++i) + for (unsigned i = 0; i < n; ++i) { - for (int j = 0; j < i; ++j) + for (unsigned j = 0; j < i; ++j) res.push_back(formula::Not(formula::And({g[i], g[j]}))); formula left = formula::Equiv(r[i], g[i]); formula right = formula::Equiv(g[i], formula::X(g[i])); @@ -1248,7 +1248,7 @@ namespace spot psi_s = formula::And(res); res.clear(); - for (int i = 0; i < n; ++i) + for (unsigned i = 0; i < n; ++i) { formula f = formula::Not(formula::And({r[i], g[i]})); res.push_back(formula::G(formula::F(f))); @@ -1256,7 +1256,7 @@ namespace spot phi_e = formula::And(res); res.clear(); - for (int i = 0; i < n; ++i) + for (unsigned i = 0; i < n; ++i) { res.push_back(formula::G(formula::F(formula::Equiv(r[i], g[i])))); } @@ -1267,9 +1267,9 @@ namespace spot if (!strict_) { + formula left = formula::And({formula::G(psi_e), phi_e}); formula imp = - formula::Implies(formula::And({formula::G(psi_e), phi_e}), - formula::And({formula::G(psi_s), phi_s})); + formula::Implies(left, formula::And({formula::G(psi_s), phi_s})); return formula::Implies(theta_e, formula::And({theta_s, imp})); } else From ee55dabfaaba3754c4a7ac3c5fd22655f75612d9 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 5 Jul 2022 23:56:36 +0200 Subject: [PATCH 106/606] * spot/mc/cndfs.hh: Fix a unused variable warning in NDEBUG. --- spot/mc/cndfs.hh | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/spot/mc/cndfs.hh b/spot/mc/cndfs.hh index 9b414764f..02768144b 100644 --- a/spot/mc/cndfs.hh +++ b/spot/mc/cndfs.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2017, 2018, 2019, 2020 Laboratoire de Recherche et +// Copyright (C) 2015-2020, 2022 Laboratoire de Recherche et // Developpement de l'Epita // // This file is part of Spot, a model checking library. @@ -191,9 +191,7 @@ namespace spot { // Try to insert the new state in the shared map. auto it = map_.insert(s); - bool b = it.isnew(); - - SPOT_ASSERT(!b); // should never be new in a red DFS + SPOT_ASSERT(!it.isnew()); // should never be new in a red DFS bool red = ((*it)).colors->red.load(); bool cyan = ((*it)).colors->l[tid_].cyan; bool in_Rp = ((*it)).colors->l[tid_].is_in_Rp; From 69eba6fd9a699829590d525c8b10382efaece8ef Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 6 Jul 2022 16:34:01 +0200 Subject: [PATCH 107/606] bloemen: fix a unused variable warning As discussed in #510. * spot/mc/bloemen_ec.hh: Here. --- spot/mc/bloemen_ec.hh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/spot/mc/bloemen_ec.hh b/spot/mc/bloemen_ec.hh index c31f0231f..9ffb32f5c 100644 --- a/spot/mc/bloemen_ec.hh +++ b/spot/mc/bloemen_ec.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2017, 2018, 2019, 2020 Laboratoire de Recherche et +// Copyright (C) 2015-2020, 2022 Laboratoire de Recherche et // Developpement de l'Epita // // This file is part of Spot, a model checking library. @@ -556,8 +556,8 @@ namespace spot { auto root = uf_.find(w.second); - std::lock_guard lock(w.second->acc_mutex_); - scc_acc = w.second->acc; + std::lock_guard lock(root->acc_mutex_); + scc_acc = root->acc; } // cycle found in SCC and it contains acceptance condition From 1cfb4a14ec0a938bba1b13a92fc160683a4e0908 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 6 Jul 2022 17:11:55 +0200 Subject: [PATCH 108/606] bloemen: simplify style Fixes #510. * spot/mc/bloemen_ec.hh: Here. --- spot/mc/bloemen_ec.hh | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/spot/mc/bloemen_ec.hh b/spot/mc/bloemen_ec.hh index 9ffb32f5c..6e581a0ac 100644 --- a/spot/mc/bloemen_ec.hh +++ b/spot/mc/bloemen_ec.hh @@ -251,7 +251,7 @@ namespace spot uf_element* q; uf_element* r; - while (true) + do { a_root = find(a); b_root = find(b); @@ -261,28 +261,24 @@ namespace spot // Update acceptance condition { std::lock_guard rlock(a_root->acc_mutex_); - a_root->acc |= acc; acc |= a_root->acc; + a_root->acc = acc; } while (a_root->parent.load() != a_root) { a_root = find(a_root); std::lock_guard rlock(a_root->acc_mutex_); - a_root->acc |= acc; acc |= a_root->acc; + a_root->acc = acc; } return acc; } r = std::max(a_root, b_root); q = std::min(a_root, b_root); - - if (!lock_root(q)) - continue; - - break; } + while (!lock_root(q)); uf_element* a_list = lock_list(a); if (a_list == nullptr) @@ -329,9 +325,8 @@ namespace spot { std::lock_guard rlock(r->acc_mutex_); std::lock_guard qlock(q->acc_mutex_); - q->acc |= acc; - r->acc |= q->acc; - acc |= r->acc; + acc |= r->acc | q->acc; + r->acc = q->acc = acc; } while (r->parent.load() != r) @@ -339,8 +334,8 @@ namespace spot r = find(r); std::lock_guard rlock(r->acc_mutex_); std::lock_guard qlock(q->acc_mutex_); - r->acc |= q->acc; - acc |= r->acc; + acc |= r->acc | q->acc; + r->acc = acc; } unlock_list(a_list); @@ -360,9 +355,7 @@ namespace spot a_status = a->list_status_.load(); if (a_status == list_status::BUSY) - { - return a; - } + return a; if (a_status == list_status::DONE) break; @@ -407,9 +400,7 @@ namespace spot b_status = b->list_status_.load(); if (b_status == list_status::BUSY) - { - return b; - } + return b; if (b_status == list_status::DONE) break; From 5b8350bc9b8e5440d35636c16867e04a2e3aae10 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 7 Jul 2022 15:51:11 +0200 Subject: [PATCH 109/606] rpm: omit *.la files * spot.spec.in: It seems RedHat does not distribute *.la files anymore. --- spot.spec.in | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/spot.spec.in b/spot.spec.in index 238647606..714d8589e 100755 --- a/spot.spec.in +++ b/spot.spec.in @@ -71,16 +71,16 @@ logic (LTL & PSL). %files -n libspot %{_libdir}/libbddx.a -%{_libdir}/libbddx.la +%exclude %{_libdir}/libbddx.la %{_libdir}/libbddx.so* %{_libdir}/libspot.a -%{_libdir}/libspot.la +%exclude %{_libdir}/libspot.la %{_libdir}/libspot.so* %{_libdir}/libspotgen.a -%{_libdir}/libspotgen.la +%exclude %{_libdir}/libspotgen.la %{_libdir}/libspotgen.so* %{_libdir}/libspotltsmin.a -%{_libdir}/libspotltsmin.la +%exclude %{_libdir}/libspotltsmin.la %{_libdir}/libspotltsmin.so* %license COPYING %doc AUTHORS COPYING NEWS README THANKS @@ -121,7 +121,7 @@ temporal logic (LTL & PSL). %dir %{python3_sitearch}/spot %{python3_sitearch}/spot/* %{python3_sitearch}/_buddy.*.a -%{python3_sitearch}/_buddy.*.la +%exclude %{python3_sitearch}/_buddy.*.la %{python3_sitearch}/_buddy.*.so %license COPYING %doc AUTHORS COPYING NEWS README THANKS From 3f333792ffac814d6961e2de55a95f568581a241 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Wed, 22 Jun 2022 10:25:35 +0200 Subject: [PATCH 110/606] Add a procedure that detects if an automaton is parity-type * spot/twaalgos/genem.cc, spot/twaalgos/genem.hh: add detection of edges that are in at least one accepting cycle. * spot/twaalgos/toparity.cc, spot/twaalgos/toparity.hh: add parity_type_to_parity and buchi_type_to_buchi. --- spot/twaalgos/genem.cc | 77 +++++++++++ spot/twaalgos/genem.hh | 30 +++++ spot/twaalgos/toparity.cc | 276 ++++++++++++++++++++++++++++++++++++++ spot/twaalgos/toparity.hh | 28 +++- 4 files changed, 410 insertions(+), 1 deletion(-) diff --git a/spot/twaalgos/genem.cc b/spot/twaalgos/genem.cc index 51b2ea903..237b10118 100644 --- a/spot/twaalgos/genem.cc +++ b/spot/twaalgos/genem.cc @@ -309,5 +309,82 @@ namespace spot return !scc_split_check_filtered(upper_si, forced_acc, callback, {}); } + // return ⊤ if there exists at least one accepting transition. + static bool + accepting_transitions_aux(const scc_info &si, unsigned scc, + const acc_cond acc, + acc_cond::mark_t removed_colors, + acc_cond::mark_t tocut, + std::vector &accepting_transitions, + const bitvect& kept) + { + bool result = false; + scc_and_mark_filter filt(si, scc, tocut, kept); + filt.override_acceptance(acc); + scc_info upper_si(filt, scc_info_options::ALL); + for (unsigned sc = 0; sc < upper_si.scc_count(); ++sc) + result |= accepting_transitions_scc(upper_si, sc, acc, removed_colors, + accepting_transitions, kept); + return result; + } + bool + accepting_transitions_scc(const scc_info &si, unsigned scc, + const acc_cond aut_acc, + acc_cond::mark_t removed_colors, + std::vector& accepting_transitions, + const bitvect& kept) + { + // The idea is the same as in is_scc_empty() + bool result = false; + acc_cond::mark_t sets = si.acc_sets_of(scc); + acc_cond acc = aut_acc.restrict_to(sets); + acc = acc.remove(si.common_sets_of(scc), false); + + auto inner_edges = si.inner_edges_of(scc); + + if (si.is_trivial(scc)) + return false; + if (acc.is_t() || acc.accepting(acc.get_acceptance().used_sets())) + { + for (auto& e : inner_edges) + if ((e.acc & removed_colors) == acc_cond::mark_t {}) + accepting_transitions[si.get_aut()->edge_number(e)] = true; + return true; + } + else if (acc.is_f()) + return false; + acc_cond::acc_code rest = acc_cond::acc_code::f(); + for (const acc_cond& disjunct: acc.top_disjuncts()) + if (acc_cond::mark_t fu = disjunct.fin_unit()) + result |= accepting_transitions_aux(si, scc, acc.remove(fu, true), + (removed_colors | fu), fu, + accepting_transitions, kept); + else + rest |= disjunct.get_acceptance(); + if (!rest.is_f()) + { + acc_cond::mark_t m = { (unsigned) acc.fin_one() }; + result |= accepting_transitions_aux(si, scc, acc.remove(m, true), + (removed_colors | m), m, accepting_transitions, + kept); + result |= accepting_transitions_scc(si, scc, acc.remove(m, false), + removed_colors, accepting_transitions, + kept); + } + return result; + } + + std::vector + accepting_transitions(const const_twa_graph_ptr aut, acc_cond cond) + { + auto aut_vector_size = aut->edge_vector().size(); + std::vector result(aut_vector_size, false); + auto kept = make_bitvect(aut_vector_size); + scc_info si(aut); + for (unsigned scc = 0; scc < si.scc_count(); ++scc) + accepting_transitions_scc(si, scc, cond, {}, result, *kept); + delete kept; + return result; + } } diff --git a/spot/twaalgos/genem.hh b/spot/twaalgos/genem.hh index 3c3e5de51..3fefcdc77 100644 --- a/spot/twaalgos/genem.hh +++ b/spot/twaalgos/genem.hh @@ -110,4 +110,34 @@ namespace spot SPOT_API void generic_emptiness_check_select_version(const char* emversion = nullptr); + /// \ingroup emptiness_check_algorithms + /// + /// Give the set of transitions contained in + /// an accepting cycle of the SCC \a scc of \a aut. + /// + /// \param si scc_info used to describle the automaton + /// \param scc SCC to consider + /// \param aut_acc Acceptance condition used for this SCC + /// \param removed_colors A set of colors that can't appear on a transition + /// \param accepting_transitions The result. Must be a vector of size at least + /// the max index + 1 of a transition of the SCC scc and the value of each + /// index of a transition of this SCC must be set to false + /// \param kept A list of booleans that say if a transition is kept even if + /// it does not have an element of removed_colors + /// \return True if there is an accepting transition + SPOT_API bool + accepting_transitions_scc(const scc_info &si, unsigned scc, + const acc_cond aut_acc, + acc_cond::mark_t removed_colors, + std::vector& accepting_transitions, + const bitvect& kept); + + /// \ingroup emptiness_check_algorithms + /// + /// Give the set of transitions contained in an accepting cycle of \a aut. + /// \param aut Automaton to process + /// \param cond Acceptance condition associated + SPOT_API std::vector + accepting_transitions(const const_twa_graph_ptr aut, acc_cond cond); + } diff --git a/spot/twaalgos/toparity.cc b/spot/twaalgos/toparity.cc index 0b46e6224..3c3f03607 100644 --- a/spot/twaalgos/toparity.cc +++ b/spot/twaalgos/toparity.cc @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -39,6 +40,281 @@ namespace spot { + inline void + assign_color(acc_cond::mark_t &mark, unsigned col) + { + if (col < SPOT_MAX_ACCSETS) + mark.set(col); + else + acc_cond::mark_t{SPOT_MAX_ACCSETS}; + } + + // Describes if we want to test if it is a Büchi, co-Büchi,… type automaton. + enum cond_kind + { + BUCHI, + CO_BUCHI, + // A parity condition with a Inf as outermost term + INF_PARITY, + // A parity condition with a Fin as outermost term + FIN_PARITY + }; + + // This enum describes the status of an edge + enum edge_status + { + NOT_MARKED, + MARKED, + IMPOSSIBLE, + LINK_SCC + }; + + static bool + cond_type_main_aux(const twa_graph_ptr &aut, const cond_kind kind, + const bool need_equivalent, + std::vector &status, + std::vector &res_colors, + acc_cond &new_cond, bool &was_able_to_color) + { + auto &ev = aut->edge_vector(); + const auto ev_size = ev.size(); + const auto aut_init = aut->get_init_state_number(); + was_able_to_color = false; + status = std::vector(ev_size, NOT_MARKED); + res_colors = std::vector(ev_size); + // Used by accepting_transitions_scc. + auto keep = std::unique_ptr(make_bitvect(ev_size)); + keep->set_all(); + + // Number of edges colored by the procedure, used to test equivalence for + // parity + unsigned nb_colored = 0; + + // We need to say that a transition between 2 SCC doesn't have to get a + // color. + scc_info si(aut, aut_init, nullptr, nullptr, scc_info_options::NONE); + status[0] = LINK_SCC; + if (si.scc_count() > 1) + { + for (unsigned edge_number = 1; edge_number < ev_size; ++edge_number) + { + auto &e = ev[edge_number]; + if (si.scc_of(e.src) != si.scc_of(e.dst)) + { + status[edge_number] = LINK_SCC; + ++nb_colored; + } + } + } + + // If we need to convert to (co-)Büchi, we have to search one accepting + // set. With parity there is no limit. + bool want_parity = kind == cond_kind::FIN_PARITY || + kind == cond_kind::INF_PARITY; + unsigned max_iter = want_parity ? -1U : 1; + + unsigned color = want_parity ? SPOT_MAX_ACCSETS - 1 : 0; + // Do we want always accepting transitions? + // Don't consider CO_BUCHI as it is done by Büchi + bool search_inf = kind != cond_kind::FIN_PARITY; + + using filter_data_t = std::pair &>; + + scc_info::edge_filter filter = + [](const twa_graph::edge_storage_t &t, unsigned, void *data) + -> scc_info::edge_filter_choice + { + auto &d = *static_cast(data); + // We only keep transitions that can be marked + if (d.second[d.first->edge_number(t)] == NOT_MARKED) + return scc_info::edge_filter_choice::keep; + else + return scc_info::edge_filter_choice::cut; + }; + std::vector not_decidable_transitions(ev_size, false); + auto aut_acc = aut->get_acceptance(); + auto aut_acc_comp = aut_acc.complement(); + for (unsigned iter = 0; iter < max_iter; ++iter) + { + // Share the code with Büchi-type + if (kind == CO_BUCHI) + std::swap(aut_acc, aut_acc_comp); + std::fill(not_decidable_transitions.begin(), + not_decidable_transitions.end(), false); + auto cond = acc_cond(search_inf ? aut_acc_comp : aut_acc); + auto filter_data = filter_data_t{aut, status}; + scc_info si(aut, aut_init, filter, &filter_data, + scc_info_options::TRACK_STATES); + bool worked = false; + unsigned ssc_size = si.scc_count(); + for (unsigned scc = 0; scc < ssc_size; ++scc) + { + // scc_info can detect that we will not be able to find an + // accepting/rejecting cycle. + if (!((search_inf && !si.is_accepting_scc(scc)) || + (!search_inf && !si.is_rejecting_scc(scc)))) + { + accepting_transitions_scc(si, scc, cond, {}, + not_decidable_transitions, *keep); + for (auto &e : si.inner_edges_of(scc)) + { + auto edge_number = aut->edge_number(e); + if (!not_decidable_transitions[edge_number]) + { + assert(!res_colors[edge_number]); + if (color != -1U) + assign_color(res_colors[edge_number], color); + was_able_to_color = true; + status[edge_number] = MARKED; + ++nb_colored; + keep->clear(edge_number); + worked = true; + } + } + } + } + + if (color-- == -1U) + break; + search_inf = !search_inf; + // If we were not able to add color, we have to add status 2 to + // remaining transitions. + if (!worked && !need_equivalent) + { + std::replace(status.begin(), status.end(), NOT_MARKED, IMPOSSIBLE); + break; + } + } + + acc_cond::acc_code new_code; + switch (kind) + { + case cond_kind::BUCHI: + new_code = acc_cond::acc_code::buchi(); + break; + case cond_kind::CO_BUCHI: + new_code = acc_cond::acc_code::cobuchi(); + break; + case cond_kind::INF_PARITY: + case cond_kind::FIN_PARITY: + new_code = acc_cond::acc_code::parity_max( + kind == cond_kind::INF_PARITY, SPOT_MAX_ACCSETS); + break; + } + + // We check parity + if (need_equivalent) + { + // For parity, it's equivalent if every transition has a color + // (status 1) or links 2 SCCs. + if (kind == cond_kind::INF_PARITY || kind == cond_kind::FIN_PARITY) + return nb_colored == ev_size - 1; + else + { + // For Büchi, we remove the transitions that have {0} in the + // result from aut and if there is an accepting cycle, res is not + // equivalent to aut. + // For co-Büchi, it's the same but we don't want to find a + // rejecting cycle. + using filter_data_t = std::pair; + + scc_info::edge_filter filter = + [](const twa_graph::edge_storage_t &t, unsigned, void *data) + -> scc_info::edge_filter_choice + { + auto &d = *static_cast(data); + if (d.second.get(d.first->edge_number(t))) + return scc_info::edge_filter_choice::keep; + else + return scc_info::edge_filter_choice::cut; + }; + + if (kind == CO_BUCHI) + aut->set_acceptance(acc_cond(aut_acc)); + + filter_data_t filter_data = {aut, *keep}; + scc_info si(aut, aut_init, filter, &filter_data); + si.determine_unknown_acceptance(); + const auto num_scc = si.scc_count(); + for (unsigned scc = 0; scc < num_scc; ++scc) + if (si.is_accepting_scc(scc)) + { + if (kind == CO_BUCHI) + aut->set_acceptance(acc_cond(aut_acc_comp)); + return false; + } + if (kind == CO_BUCHI) + aut->set_acceptance(acc_cond(aut_acc_comp)); + } + } + new_cond = acc_cond(new_code); + return true; + } + + static twa_graph_ptr + cond_type_main(const twa_graph_ptr &aut, const cond_kind kind, + bool &was_able_to_color) + { + std::vector res_colors; + std::vector status; + acc_cond new_cond; + if (cond_type_main_aux(aut, kind, true, status, res_colors, new_cond, + was_able_to_color)) + { + auto res = make_twa_graph(aut, twa::prop_set::all()); + auto &res_vector = res->edge_vector(); + unsigned rv_size = res_vector.size(); + for (unsigned i = 1; i < rv_size; ++i) + res_vector[i].acc = res_colors[i]; + res->set_acceptance(new_cond); + return res; + } + return nullptr; + } + + twa_graph_ptr + parity_type_to_parity(const twa_graph_ptr &aut) + { + bool odd_cond, max_cond; + bool parit = aut->acc().is_parity(max_cond, odd_cond); + // If it is parity, we just copy + if (parit) + { + if (!max_cond) + return change_parity(aut, parity_kind_max, parity_style_any); + auto res = make_twa_graph(aut, twa::prop_set::all()); + res->copy_acceptance_of(aut); + return res; + } + bool was_able_to_color; + // If the automaton is parity-type with a condition that has Inf as + // outermost term + auto res = cond_type_main(aut, cond_kind::INF_PARITY, was_able_to_color); + + // If it was impossible to find an accepting edge, it is perhaps possible + // to find a rejecting transition + if (res == nullptr && !was_able_to_color) + res = cond_type_main(aut, cond_kind::FIN_PARITY, was_able_to_color); + if (res) + reduce_parity_here(res); + return res; + } + + twa_graph_ptr + buchi_type_to_buchi(const twa_graph_ptr &aut) + { + bool useless; + return cond_type_main(aut, cond_kind::BUCHI, useless); + } + + twa_graph_ptr + co_buchi_type_to_co_buchi(const twa_graph_ptr &aut) + { + bool useless; + return cond_type_main(aut, cond_kind::CO_BUCHI, useless); + } + // Old version of IAR. namespace { diff --git a/spot/twaalgos/toparity.hh b/spot/twaalgos/toparity.hh index 7d2701581..6aecf7659 100644 --- a/spot/twaalgos/toparity.hh +++ b/spot/twaalgos/toparity.hh @@ -156,4 +156,30 @@ namespace spot SPOT_API twa_graph_ptr // deprecated since Spot 2.9 iar_maybe(const const_twa_graph_ptr& aut, bool pretty_print = false); -} // namespace spot + /// \ingroup twa_acc_transform + /// \brief Convert an automaton into a parity max automaton preserving + /// structure when possible. + /// + /// Return nullptr if no such automaton is found. + /// \param aut Automaton that we want to convert + SPOT_API twa_graph_ptr + parity_type_to_parity(const twa_graph_ptr &aut); + + /// \ingroup twa_acc_transform + /// \brief Convert an automaton into a Büchi automaton preserving structure + /// when possible. + /// + /// Return nullptr if no such automaton is found. + /// \param aut Automaton that we want to convert + SPOT_API twa_graph_ptr + buchi_type_to_buchi(const twa_graph_ptr &aut); + + /// \ingroup twa_acc_transform + /// \brief Convert an automaton into a co-Büchi automaton preserving structure + /// when possible. + /// + /// Return nullptr if no such automaton is found. + /// \param aut Automaton that we want to convert + SPOT_API twa_graph_ptr + co_buchi_type_to_co_buchi(const twa_graph_ptr &aut); +} From 6dd99e049b13ad168cbf60338e5733c8e08e803b Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Wed, 22 Jun 2022 10:27:33 +0200 Subject: [PATCH 111/606] to_parity: Rewrite the function and add new transformations * spot/twaalgos/synthesis.cc: Now needs to call reduce_parity. * spot/twaalgos/toparity.cc, spot/twaalgos/toparity.hh: here. * spot/twaalgos/zlktree.hh: make zielonka_node public * tests/core/ltlsynt.test, tests/python/games.ipynb, tests/python/synthesis.ipynb, tests/python/toparity.py: update tests --- spot/twaalgos/synthesis.cc | 2 +- spot/twaalgos/toparity.cc | 4131 ++++++++++++++++++++------------- spot/twaalgos/toparity.hh | 69 +- spot/twaalgos/zlktree.hh | 2 +- tests/core/ltlsynt.test | 22 +- tests/python/games.ipynb | 999 ++++---- tests/python/synthesis.ipynb | 4224 +++++++++++++++++----------------- tests/python/toparity.py | 133 +- 8 files changed, 5226 insertions(+), 4356 deletions(-) diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 41aa736e2..6fb126ff8 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1005,7 +1005,7 @@ namespace spot if (gi.s == algo::LAR) { dpa = to_parity(aut); - // reduce_parity is called by to_parity() + reduce_parity_here(dpa, false); } else if (gi.s == algo::LAR_OLD) { diff --git a/spot/twaalgos/toparity.cc b/spot/twaalgos/toparity.cc index 3c3f03607..c936ef57b 100644 --- a/spot/twaalgos/toparity.cc +++ b/spot/twaalgos/toparity.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018-2020 Laboratoire de Recherche et Développement +// Copyright (C) 2018-2020, 2022 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -18,28 +18,49 @@ // along with this program. If not, see . #include "config.h" -#include -#include -#include -#include -#include +#include #include #include #include -#include -#include -#include #include #include -#include +#include #include -#include +#include + +#include +#include +#include + +namespace std +{ + template + inline void hash_combine(size_t &seed, T const &v) + { + seed ^= std::hash()(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); + } + + template + struct hash> + { + typedef vector argument_type; + typedef std::size_t result_type; + result_type operator()(argument_type const &in) const + { + size_t size = in.size(); + size_t seed = 0; + for (size_t i = 0; i < size; i++) + // Combine the hash of the current vector with the hashes of the + // previous ones + hash_combine(seed, in[i]); + return seed; + } + }; +} -#include namespace spot { - inline void assign_color(acc_cond::mark_t &mark, unsigned col) { @@ -76,7 +97,7 @@ namespace spot std::vector &res_colors, acc_cond &new_cond, bool &was_able_to_color) { - auto &ev = aut->edge_vector(); + auto& ev = aut->edge_vector(); const auto ev_size = ev.size(); const auto aut_init = aut->get_init_state_number(); was_able_to_color = false; @@ -98,7 +119,7 @@ namespace spot { for (unsigned edge_number = 1; edge_number < ev_size; ++edge_number) { - auto &e = ev[edge_number]; + auto& e = ev[edge_number]; if (si.scc_of(e.src) != si.scc_of(e.dst)) { status[edge_number] = LINK_SCC; @@ -260,7 +281,7 @@ namespace spot std::vector status; acc_cond new_cond; if (cond_type_main_aux(aut, kind, true, status, res_colors, new_cond, - was_able_to_color)) + was_able_to_color)) { auto res = make_twa_graph(aut, twa::prop_set::all()); auto &res_vector = res->edge_vector(); @@ -315,10 +336,2285 @@ namespace spot return cond_type_main(aut, cond_kind::CO_BUCHI, useless); } - // Old version of IAR. +// New version for paritizing + +// data type used in a memory for CAR and IAR. +// TAR is a particular case +#if MAX_ACCSETS < UCHAR_MAX + using memory_type = unsigned char; + #define MAX_MEM_ELEM UCHAR_MAX +#elif MAX_ACCSETS < USHRT_MAX + using memory_type = unsigned short; + #define MAX_MEM_ELEM USHRT_MAX +#else + using memory_type = unsigned; + #define MAX_MEM_ELEM UINT_MAX +#endif + + template + using memory = std::vector; + + // Maps a state of the automaton to a parity_state + class state_2_lar + { + public: + // If to_parity wants to find the newest or the oldest or both, we + // adapt the algorithms + enum memory_order + { + ONLY_NEWEST, + ONLY_OLDEST, + BOTH + }; + + class node + { + public: + // Color that lead to this node + memory_type color_; + // For a state in states_, any child can be taken. While a unique state + // could be used when we search an existing state, here we have + // to consider opt_.search_ex = False, opt_.use_last_post_process = True. + // This configuration can lead to 2 states in the same node. For example + // if we add [0 1 | 2 3] and [0 1 | 3 2] where '|' says which part of the + // memory can be reordered (right). + std::vector states_; + std::vector children_; + // A timer used to detect which child is the oldest + unsigned timer_; + + node() : node(MAX_MEM_ELEM, -1U) + { + } + + node(memory_type val, unsigned timer) : color_(val), timer_(timer) + { + } + + ~node() + { + for (auto c : children_) + delete c; + } + }; + + std::vector nodes_; + memory_order order_; + unsigned timer_; + + state_2_lar() : timer_(0) + { + } + + void + init(unsigned nb_states, memory_order order) + { + order_ = order; + nodes_.reserve(nb_states); + for (unsigned i = 0; i < nb_states; ++i) + nodes_.push_back(new node()); + } + + ~state_2_lar() + { + for (auto x : nodes_) + delete x; + } + + void + add_new_path(unsigned state, const memory &vals, + unsigned res_state, unsigned nb_seen) + { + ++timer_; + node *current = nodes_[state]; + // Position in vals + int pos = vals.size() - 1; + while (true) + { + if (pos == (int)(nb_seen - 1)) + current->states_.push_back(res_state); + if (pos == -1) + break; + const unsigned current_val = vals[pos]; + auto child = std::find_if(current->children_.begin(), + current->children_.end(), + [&](const auto &child) constexpr + { return child->color_ == current_val; }); + // If we don't have a child with the corresponding color… + if (child == current->children_.end()) + { + auto nn = new node(current_val, timer_); + current->children_.push_back(nn); + current = nn; + } + else + { + // If get_compatible_state wants the most recent + // (opt_.use_last or opt_.use_last_post_process), we help this + // function by moving this node to the last position. + // Otherwise the oldest leaf will be reachable from the first child. + // If we have use_last = false and use_last_post_process = true, + // we need to access to the oldest and newest child. As the tree is + // smallest when we want to access to the oldest value, we continue + // to move the value to the last position and compute the oldest + // child in get_compatible_state. + if (order_ != memory_order::ONLY_OLDEST) + { + std::iter_swap(child, current->children_.end() - 1); + current = current->children_.back(); + } + else + current = *child; + } + --pos; + } + } + + unsigned + get_compatible_state(unsigned state, const memory &m, + unsigned seen_nb, + bool use_last) const + { + int pos = m.size() - 1; + unsigned res = -1U; + node *current = nodes_[state]; + while (true) + { + const auto ¤t_states = current->states_; + if (!current_states.empty()) + res = use_last ? current_states.back() : current_states.front(); + + const auto ¤t_children = current->children_; + if (current_children.empty()) + { + assert(current->color_ == MAX_MEM_ELEM || pos == -1); + return res; + } + // If we are in the part of the memory where the order does not matter, + // we just take the oldest/newest state. + if (pos < (int)seen_nb) + { + if (order_ == BOTH) + { + if (!use_last) + current = *std::min_element( + current_children.begin(), current_children.end(), + [](const auto &x, const auto &y) constexpr + { return x->timer_ < y->timer_; }); + else + current = current_children.back(); + } + else + { + // add_new_path constructed the tree such that the oldest/newest + // leaf is reachable from the first child. + current = use_last ? current_children.back() + : current_children.front(); + } + } + else + { + auto current_val = m[pos]; + auto ch = std::find_if( + current_children.begin(), current_children.end(), + [&](const auto &x) constexpr + { return x->color_ == current_val; }); + if (ch != current_children.end()) + current = *ch; + else + return -1U; + } + --pos; + } + } + }; + + class to_parity_generator + { + private: + class relation + { + public: + // Size of the matrix + unsigned size_; + // A line/column is indexed by a partial memory + const std::vector> labels_; + // Matrix such that vals_[x][y] = ⊤ ⇔ vals_[x] > vals_[y] + std::vector vals_; + + inline bool + at(unsigned i, unsigned j) const + { + return vals_.at(i * size_ + j); + } + + inline void + set(unsigned i, unsigned j, bool val) + { + vals_[i * size_ + j] = val; + } + + // Test if m1 ⊆ m2 + bool is_included(memory m1, memory m2) + { + if (m1.size() > m2.size()) + return false; + assert(std::is_sorted(m1.begin(), m1.end())); + assert(std::is_sorted(m2.begin(), m2.end())); + memory diff; + std::set_union(m1.begin(), m1.end(), m2.begin(), m2.end(), + std::inserter(diff, diff.begin())); + return diff.size() == m2.size(); + } + + // Supposes that there is no duplicates. + relation(std::vector> &labels) + : size_(labels.size()), labels_(labels) + { + unsigned long long size_vals; + if (__builtin_umulll_overflow(size_, size_, &size_vals)) + throw std::bad_alloc(); + vals_ = std::vector(size_vals); + for (unsigned i = 0; i < size_; ++i) + for (unsigned j = 0; j < size_; ++j) + // We cannot have vals_[i] > vals_[j] and vals_[j] > vals_[i] + if (!at(j, i)) + set(i, j, (i != j && is_included(labels_[j], labels_[i]))); + // Remove x > z if ∃y s.t. x > y > z + simplify_relation(); + } + + // Apply a transitive reduction + void + simplify_relation() + { + for (unsigned j = 0; j < size_; ++j) + for (unsigned i = 0; i < size_; ++i) + if (at(i, j)) + for (unsigned k = 0; k < size_; ++k) + if (at(j, k)) + set(i, k, false); + } + + template + void + add_to_res_(const memory ¤t, + const memory &other, + memory &result) + { + assert(std::is_sorted(current.begin(), current.end())); + assert(std::is_sorted(other.begin(), other.end())); + std::set_difference(current.begin(), current.end(), + other.begin(), other.end(), + std::inserter(result, result.end())); + } + + // Gives a compatible ordered partial memory for the partial memory + // partial_mem. + memory + find_order(const memory &partial_mem) + { + // Now if we want to find an order, we start from the line + // that contains partial_mem in the matrix, we find a more restrictive + // order and add the value that are used in partial_mem but not in this + // "child" value. + // The call to simplify_relation implies that we are sure we have + // used the longest possible path. + memory result; + auto elem = std::find(labels_.begin(), labels_.end(), partial_mem); + assert(elem != labels_.end()); + // Line that contains partial_mem + unsigned i = std::distance(labels_.begin(), elem); + while (true) + { + // The interval corresponding to the line i + auto vals_i_begin = vals_.begin() + (i * size_); + auto vals_i_end = vals_i_begin + size_; + // End of line i + auto child = std::find(vals_i_begin, vals_i_end, true); + // If there is a more restrictive memory, we use this "child" + if (child != vals_i_end) + { + unsigned child_pos = std::distance(vals_i_begin, child); + add_to_res_(labels_[i], labels_[child_pos], result); + i = child_pos; + } + // If there is no more restrictive memory, we just add the remaining + // memory. + else + { + add_to_res_(labels_[i], {}, result); + break; + } + } + // The order want that a value that is in the lowest value is a + // the head. + std::reverse(result.begin(), result.end()); + return result; + } + }; + + class scc_info_to_parity + { + private: + scc_info si_; + + public: + scc_info_to_parity(const const_twa_graph_ptr aut, + const acc_cond::mark_t removed = {}) + : si_(scc_and_mark_filter(aut, removed)) + { + } + + scc_info_to_parity(const scc_info lower_si, + const std::shared_ptr keep) + : si_(scc_and_mark_filter(lower_si, 0, acc_cond::mark_t{}, *keep), + scc_info_options::NONE) + { + } + + std::vector + split_aut(acc_cond::mark_t mark = {}) + { + auto aut = si_.get_aut(); + const auto num_scc = si_.scc_count(); + const unsigned aut_num_states = aut->num_states(); + std::vector res(num_scc); + std::vector aut_to_res; + aut_to_res.reserve(aut_num_states); + for (auto &g : res) + { + g = make_twa_graph(aut->get_dict()); + g->copy_ap_of(aut); + g->copy_acceptance_of(aut); + g->prop_copy(aut, {true, true, false, false, false, true}); + auto orig = new std::vector(); + g->set_named_prop("original-states", orig); + } + const auto tp_orig_aut = + aut->get_named_prop>("original-states"); + for (unsigned i = 0; i < aut_num_states; ++i) + { + unsigned scc_i = si_.scc_of(i); + auto &g = res[scc_i]; + unsigned ns = g->new_state(); + unsigned ori = tp_orig_aut ? (*tp_orig_aut)[i] : i; + auto pr = g->get_named_prop>("original-states"); + pr->push_back(ori); + aut_to_res.push_back(ns); + } + + for (auto &e : aut->edges()) + { + unsigned src_scc = si_.scc_of(e.src); + unsigned dst_scc = si_.scc_of(e.dst); + if (src_scc == dst_scc && !(e.acc & mark)) + res[src_scc]->new_edge(aut_to_res[e.src], aut_to_res[e.dst], + e.cond, e.acc); + } + return res; + } + + std::vector + split_aut(const std::shared_ptr &keep) + { + auto aut = si_.get_aut(); + const auto num_scc = si_.scc_count(); + const unsigned aut_num_states = aut->num_states(); + std::vector res(num_scc); + std::vector aut_to_res; + aut_to_res.reserve(aut_num_states); + for (auto &g : res) + { + g = make_twa_graph(aut->get_dict()); + g->copy_ap_of(aut); + g->copy_acceptance_of(aut); + g->prop_copy(aut, {true, true, false, false, false, true}); + auto orig = new std::vector(); + g->set_named_prop("original-states", orig); + } + const auto tp_orig_aut = + aut->get_named_prop>("original-states"); + for (unsigned i = 0; i < aut_num_states; ++i) + { + unsigned scc_i = si_.scc_of(i); + auto &g = res[scc_i]; + unsigned ns = g->new_state(); + unsigned ori = tp_orig_aut ? (*tp_orig_aut)[i] : i; + auto pr = g->get_named_prop>("original-states"); + pr->push_back(ori); + aut_to_res.push_back(ns); + } + + const auto &ev = si_.get_aut()->edge_vector(); + auto ev_size = ev.size(); + for (unsigned i = 0; i < ev_size; ++i) + if (keep->get(i)) + { + auto &e = ev[i]; + unsigned scc_src = si_.scc_of(e.src); + if (scc_src == si_.scc_of(e.dst)) + res[scc_src]->new_edge(aut_to_res[e.src], aut_to_res[e.dst], + e.cond, e.acc); + } + return res; + } + + unsigned scc_count() + { + return si_.scc_count(); + } + + unsigned scc_of(unsigned state) + { + return si_.scc_of(state); + } + }; + + // Original automaton + const const_twa_graph_ptr aut_; + // Resulting parity automaton + twa_graph_ptr res_; + // options + to_parity_options opt_; + // nullptr if opt_.pretty_print is false + std::vector *names_ = nullptr; + // original_states. Is propagated if the original automaton has + // this named property + std::vector *orig_ = nullptr; + scc_info_to_parity si_; + bool need_purge_ = false; + // Tells if we are constructing a parity max odd + bool is_odd_ = false; + // min_color used in the automaton + 1 (result of max_set). + std::optional min_color_used_; + std::optional max_color_scc_; + std::optional max_color_used_; + std::vector state_to_res_; + std::vector res_to_aut_; + // Map a state of aut_ to every copy of this state. Used by a recursive call + // to to_parity by parity_prefix for example + std::vector> *state_to_nums_ = nullptr; + unsigned algo_used_ = 0; + + enum algorithm + { + CAR = 1, + IAR_RABIN = 1 << 1, + IAR_STREETT = 1 << 2, + TAR = 1 << 3, + RABIN_TO_BUCHI = 1 << 4, + STREETT_TO_COBUCHI = 1 << 5, + PARITY_TYPE = 1 << 6, + BUCHI_TYPE = 1 << 7, + CO_BUCHI_TYPE = 1 << 8, + PARITY_EQUIV = 1 << 9, + PARITY_PREFIX = 1 << 10, + PARITY_PREFIX_GENERAL = 1 << 11, + GENERIC_EMPTINESS = 1 << 12, + PARTIAL_DEGEN = 1 << 13, + ACC_CLEAN = 1 << 14, + NONE = 1 << 15 + }; + + static std::string + algorithm_to_str(const algorithm &algo) + { + switch (algo) + { + case CAR: + return "CAR"; + case IAR_RABIN: + return "IAR (Rabin)"; + case IAR_STREETT: + return "IAR (Streett)"; + case TAR: + return "TAR"; + case NONE: + return "None"; + case BUCHI_TYPE: + return "Büchi-type"; + case CO_BUCHI_TYPE: + return "co-Büchi-type"; + case PARITY_TYPE: + return "Parity-type"; + case PARITY_EQUIV: + return "Parity equivalent"; + case GENERIC_EMPTINESS: + return "Generic emptiness"; + case STREETT_TO_COBUCHI: + return "Streett to co-Büchi"; + case RABIN_TO_BUCHI: + return "Rabin to Büchi"; + case PARITY_PREFIX: + return "Parity-prefix"; + case PARITY_PREFIX_GENERAL: + return "Parity-prefix general"; + case PARTIAL_DEGEN: + return "Partial degeneralization"; + case ACC_CLEAN: + return "acceptance cleanup"; + } + SPOT_UNREACHABLE(); + } + + template + struct to_parity_state + { + unsigned state; + unsigned state_scc; + memory mem; + + to_parity_state(unsigned st, unsigned st_scc, memory m) : + state(st), state_scc(st_scc), mem(m) + {} + + to_parity_state(const to_parity_state &) = default; + to_parity_state(to_parity_state &&) noexcept = default; + + ~to_parity_state() noexcept = default; + + bool + operator<(const to_parity_state &other) const + { + if (state < other.state) + return true; + if (state > other.state) + return false; + if (state_scc < other.state_scc) + return true; + if (state_scc > other.state_scc) + return false; + if (mem < other.mem) + return true; + return false; + } + + std::string + to_str(const algorithm &algo) const + { + std::stringstream s; + s << state; + // An empty memory does not mean that we don't use LAR. For example + // if the condition is true. We don't display a useless memory. + if (!mem.empty()) + { + s << ",["; + const char delim = ','; + s << ((unsigned)mem[0]); + auto mem_size = mem.size(); + for (unsigned i = 1; i < mem_size; ++i) + s << delim << ((unsigned)mem[i]); + s << ']'; + } + s << ',' << algorithm_to_str(algo); + return s.str(); + } + + bool operator==(const to_parity_state &other) const + { + return state == other.state && state_scc == other.state_scc + && mem == other.mem; + } + }; + + template + struct to_parity_hash + { + size_t operator()(to_parity_state const &tp) const + { + size_t result = std::hash>{}(tp.mem); + std::hash_combine(result, tp.state); + std::hash_combine(result, tp.state_scc); + return result; + } + }; + + template + unsigned + add_res_state(const algorithm &algo, const to_parity_state &ps) + { + if (names_) + names_->emplace_back(ps.to_str(algo)); + orig_->push_back(ps.state); + auto res = res_->new_state(); + if (opt_.datas) + { + algo_used_ |= algo; + ++opt_.datas->nb_states_created; + } + assert(ps.state < aut_->num_states()); + // state_to_res_ could be updated even if there is already a value. + // However it would lead to a result close to BSCC. + // So it is easier to show the influence of BSCC when the value is not + // changed when there is already a value. + if (state_to_res_[ps.state] == -1U) + state_to_res_[ps.state] = res; + if (state_to_nums_) + { + assert(ps.state < state_to_nums_->size()); + (*state_to_nums_)[ps.state].push_back(res); + } + res_to_aut_.push_back(ps.state); + return res; + } + + unsigned + add_res_edge(unsigned res_src, unsigned res_dst, + const acc_cond::mark_t &mark, const bdd &cond, + const bool can_merge_edge = true, + robin_hood::unordered_map* + edge_cache = nullptr) + { + // In a parity automaton we just need the maximal value + auto simax = mark.max_set(); + + const bool need_cache = edge_cache != nullptr && can_merge_edge; + long long key = 0; + if (need_cache) + { + constexpr auto unsignedsize = sizeof(unsigned) * 8; + key = (long long)simax << unsignedsize | res_dst; + auto cache_value = edge_cache->find(key); + if (cache_value != edge_cache->end()) + { + auto edge_index = cache_value->second; + auto &existing_edge = res_->edge_vector()[edge_index]; + existing_edge.cond |= cond; + return edge_index; + } + } + + auto simplified = mark ? acc_cond::mark_t{simax - 1} + : acc_cond::mark_t{}; + assert(res_src != -1U); + assert(res_dst != -1U); + + // No edge already done in the current scc. + if (!max_color_scc_.has_value()) + max_color_scc_.emplace(simax); + else + max_color_scc_.emplace(std::max(*max_color_scc_, simax)); + + // If it is the first edge of the result + if (!min_color_used_.has_value()) + { + assert(!max_color_used_.has_value()); + max_color_used_.emplace(simax); + min_color_used_.emplace(simax); + } + else + { + min_color_used_.emplace(std::min(*min_color_used_, simax)); + max_color_used_.emplace(std::max(*max_color_used_, simax)); + } + + auto new_edge_num = res_->new_edge(res_src, res_dst, cond, simplified); + if (need_cache) + edge_cache->emplace(std::make_pair(key, new_edge_num)); + + if (opt_.datas) + ++opt_.datas->nb_edges_created; + return new_edge_num; + } + + // copy + using coloring_function = + std::function; + + void + apply_copy_general(const const_twa_graph_ptr &sub_automaton, + const coloring_function &col_fun, + const algorithm &algo) + { + if (opt_.datas) + algo_used_ |= algo; + auto init_states = + sub_automaton->get_named_prop>("original-states"); + assert(init_states); + std::vector state_2_res_local; + auto sub_aut_ns = sub_automaton->num_states(); + state_2_res_local.reserve(sub_aut_ns); + for (unsigned state = 0; state < sub_aut_ns; ++state) + { + to_parity_state ps = {(*init_states)[state], state, {}}; + state_2_res_local.push_back(add_res_state(algo, ps)); + } + for (auto &e : sub_automaton->edges()) + { + auto new_mark = col_fun(e); + add_res_edge(state_2_res_local[e.src], state_2_res_local[e.dst], + new_mark, e.cond); + } + } + + // Case where one color is replaced by another. + // new_colors is a vector such that new_colors[i + 1] = j means that the + // color i is replaced by j. new_colors[0] is the value for an uncolored + // edge. + void + apply_copy(const const_twa_graph_ptr &sub_aut, + const std::vector &new_colors, + const algorithm &algo) + { + auto col_fun = [&](const twa_graph::edge_storage_t &edge) + { + acc_cond::mark_t res{}; + for (auto c : edge.acc.sets()) + { + auto new_col = new_colors[c + 1]; + if (new_col != -1U) + assign_color(res, new_col); + } + if (!edge.acc && new_colors[0] != -1U) + assign_color(res, new_colors[0]); + return res; + }; + apply_copy_general(sub_aut, col_fun, algo); + } + + // Case where new_color is a function such that edge_vector[i] should + // be colored by new_color[i]. + void + apply_copy_edge_index(const const_twa_graph_ptr &sub_aut, + const std::vector &new_color, + const algorithm &algo) + { + auto col_fun = [&](const twa_graph::edge_storage_t &edge) + { + auto res = new_color[sub_aut->edge_number(edge)]; + if (res == -1U) + return acc_cond::mark_t{}; + return acc_cond::mark_t{res}; + }; + apply_copy_general(sub_aut, col_fun, algo); + } + + // Create a memory for the first state created by apply_lar. + // If the algorithm is IAR, it also fills pairs_indices that + // contains the indices of the pairs that can be moved to the head of + // the memory. + template + memory + initial_memory_of(const const_twa_graph_ptr &sub_aut, + const std::vector &pairs, + std::vector, memory>> &relations) + { + unsigned init_state = sub_aut->get_init_state_number(); + if constexpr (algo == algorithm::CAR) + { + unsigned max_set = sub_aut->get_acceptance().used_sets().max_set(); + memory values(max_set); + std::iota(values.begin(), values.end(), 0); + if (opt_.force_order) + apply_move_heuristic(init_state, values, max_set, relations); + return values; + } + else if constexpr (algo == algorithm::TAR) + { + if (UINT_MAX < sub_aut->num_edges()) + { + throw std::runtime_error("Too many edges for TAR"); + } + const auto &ev = sub_aut->edge_vector(); + const auto ev_size = ev.size(); + memory values(ev_size - 1); + // 0 is not an edge number + std::iota(values.begin(), values.end(), 1); + if (opt_.force_order && sub_aut->num_states() > 1) + { + unsigned free_pos = 0; + // If a transition goes to state, it is at the head of the memory. + for (unsigned i = 1; i < ev_size; ++i) + if (ev[i].dst == init_state) + { + std::swap(values[i - 1], values[free_pos]); + ++free_pos; + } + } + return values; + } + else + { + static_assert(algo == IAR_RABIN || algo == IAR_STREETT); + memory values(pairs.size()); + std::iota(values.begin(), values.end(), 0); + if (opt_.force_order) + apply_move_heuristic(init_state, values, values.size(), relations); + return values; + } + } + + // LAR + algorithm + choose_lar(const acc_cond &scc_condition, + std::vector &pairs, + const unsigned num_edges) + { + std::vector pairs1, pairs2; + bool is_rabin_like = scc_condition.is_rabin_like(pairs1); + bool is_streett_like = scc_condition.is_streett_like(pairs2); + // If we cannot apply IAR and TAR and CAR are not used + if ((!(is_rabin_like || is_streett_like) || !opt_.iar) + && !(opt_.car || opt_.tar)) + throw std::runtime_error("to_parity needs CAR or TAR to process " + "a condition that is not a Rabin or Streett " + "condition or if IAR is not enabled"); + remove_duplicates(pairs1); + remove_duplicates(pairs2); + unsigned num_col = scc_condition.num_sets(); + + auto num_pairs1 = (opt_.iar && is_streett_like) ? pairs2.size() : -1UL; + auto num_pairs2 = (opt_.iar && is_rabin_like) ? pairs1.size() : -1UL; + + // In practice, if the number of pairs is bigger than the number of + // colors, it will create a color greater than SPOT_MAX_ACCSETS, so + // we don't consider that it is a Rabin condition. + // In this case, if CAR or TAR is not used, it will throw a Runtime + // Error. + + bool iar_overflow = false; + if ((num_pairs1 > MAX_MEM_ELEM) && (num_pairs2 > MAX_MEM_ELEM)) + { + num_pairs1 = num_pairs2 = -1U; + iar_overflow = true; + } + + const std::vector + number_elements = + { + (opt_.iar && is_streett_like) ? pairs2.size() : -1UL, + (opt_.iar && is_rabin_like) ? pairs1.size() : -1UL, + opt_.car ? num_col : -1UL, + opt_.tar ? num_edges : -1UL}; + constexpr std::array algos = {IAR_STREETT, IAR_RABIN, CAR, + TAR}; + int min_pos = std::distance(number_elements.begin(), + std::min_element(number_elements.begin(), + number_elements.end())); + + if (number_elements[min_pos] == -1U && iar_overflow) + throw std::runtime_error( + "Too many Rabin/Streett pairs, try to increase SPOT_MAX_ACCSETS"); + algorithm algo = algos[min_pos]; + if (algo == IAR_RABIN) + pairs = pairs1; + else if (algo == IAR_STREETT) + pairs = pairs2; + return algo; + } + + // Remove duplicates in pairs without changing the order. + static void + remove_duplicates(std::vector &pairs) + { + std::vector res; + res.reserve(pairs.size()); + for (auto &elem : pairs) + if (std::find(res.begin(), res.end(), elem) == res.end()) + res.emplace_back(elem); + pairs = res; + } + + template + acc_cond::mark_t + fin(const std::vector &pairs, unsigned k) + { + static_assert(algo == IAR_RABIN || algo == IAR_STREETT); + if constexpr (algo == IAR_RABIN) + return pairs[k].fin; + else + return pairs[k].inf; + } + + template + acc_cond::mark_t + inf(const std::vector &pairs, unsigned k) + { + static_assert(algo == IAR_RABIN || algo == IAR_STREETT); + if constexpr (algo == IAR_RABIN) + return pairs[k].inf; + else + return pairs[k].fin; + } + + template + std::vector, memory>> + find_relations(const const_twa_graph_ptr &sub_aut, + const std::vector &pairs, + const std::set &pairs_indices) + { + static_assert(algo == IAR_RABIN || algo == IAR_STREETT || algo == CAR); + const unsigned sub_aut_num_states = sub_aut->num_states(); + // Set of memory elements that can be at the head of the memory for + // a given state. + std::vector>> incomem(sub_aut_num_states); + // Add a mark with all colors/pairs to deal with the order of the + // original state + if constexpr (algo == algorithm::CAR) + { + auto ms = sub_aut->get_acceptance().used_sets().max_set(); + memory m(ms); + std::iota(m.begin(), m.end(), 0); + incomem[sub_aut->get_init_state_number()].insert(std::move(m)); + } + else if constexpr (algo == IAR_RABIN || algo == IAR_STREETT) + { + memory m(pairs_indices.begin(), pairs_indices.end()); + incomem[sub_aut->get_init_state_number()].insert(std::move(m)); + } + + for (auto &e : sub_aut->edges()) + { + auto e_sets = e.acc.sets(); + if constexpr (algo == algorithm::CAR) + incomem[e.dst].insert({e_sets.begin(), e_sets.end()}); + // IAR + else + { + memory parti; + for (unsigned k : pairs_indices) + if (e.acc & fin(pairs, k)) + parti.push_back(k); + incomem[e.dst].insert(parti); + } + } + std::vector, memory>> res; + res.reserve(sub_aut_num_states); + for (unsigned i = 0; i < sub_aut_num_states; ++i) + { + std::map, memory> ma; + // Memory incoming to state i. + std::vector> elem(incomem[i].begin(), + incomem[i].end()); + relation rel(elem); + for (auto &x : rel.labels_) + ma.insert({x, rel.find_order(x)}); + res.emplace_back(ma); + } + return res; + } + + void + apply_move_heuristic(unsigned state, memory &m, + unsigned nb_seen, + std::vector, + memory>> &relations) + { + // If we move 0 or 1 color we cannot change the order + if (nb_seen < 2) + return; + memory seen{m.begin(), m.begin() + nb_seen}; + const auto &new_prefix = relations[state][seen]; + + unsigned new_prefix_size = new_prefix.size(); + for (unsigned i = 0; i < new_prefix_size; ++i) + m[i] = new_prefix[i]; + } + + template + void + find_new_memory(unsigned state, memory &m, unsigned edge_number, + const acc_cond::mark_t &colors, + const std::vector &pairs, + const std::set &pairs_indices, + unsigned &nb_seen, + unsigned &h, + std::vector, memory>> &relations) + { + if constexpr (algo == TAR) + { + (void)state; + auto pos = std::find(m.begin(), m.end(), edge_number); + assert(pos != m.end()); + h = std::distance(m.begin(), pos); + std::rotate(m.begin(), pos, pos + 1); + } + else if constexpr (algo == CAR) + { + (void)edge_number; + for (auto k : colors.sets()) + { + auto it = std::find(m.begin(), m.end(), k); + // A color can exist in the automaton but not in the condition. + if (it != m.end()) + { + h = std::max(h, (unsigned)(it - m.begin()) + 1); + std::rotate(m.begin(), it, it + 1); + ++nb_seen; + } + } + if (opt_.force_order) + { + // apply_move_heuristic needs an increasing list of values + std::reverse(m.begin(), m.begin() + nb_seen); + apply_move_heuristic(state, m, nb_seen, relations); + } + } + else if constexpr (algo == IAR_RABIN || algo == IAR_STREETT) + { + (void)edge_number; + for (auto k = pairs_indices.rbegin(); k != pairs_indices.rend(); ++k) + if (colors & fin(pairs, *k)) + { + ++nb_seen; + auto it = std::find(m.begin(), m.end(), *k); + assert(it != m.end()); + // move the pair in front of the permutation + std::rotate(m.begin(), it, it + 1); + } + if (opt_.force_order) + { + // As with CAR, in relation the partial memory is sorted. That is + // why the previous loop use a reverse iterator. + assert(std::is_sorted(m.begin(), m.begin() + nb_seen)); + apply_move_heuristic(state, m, nb_seen, relations); + } + } + } + + template + void + compute_new_color_lar(const const_twa_graph_ptr &sub_aut, + const memory ¤t_mem, + const memory &new_perm, + unsigned &h, + const acc_cond::mark_t &edge_colors, + acc_cond::mark_t &acc, + const std::vector &pairs, + robin_hood::unordered_map& + acc_cache) + { + // This function should not be called with algo ∉ [CAR, IAR, TAR]. + static_assert(algo == CAR || algo == IAR_RABIN || algo == IAR_STREETT + || algo == TAR); + assert(!acc); + auto sub_aut_cond = sub_aut->acc(); + if constexpr (algo == CAR) + { + acc_cond::mark_t m(new_perm.begin(), new_perm.begin() + h); + auto cc = acc_cache.find(m); + bool rej; + if (cc != acc_cache.end()) + rej = cc->second; + else + { + rej = !sub_aut_cond.accepting(m); + acc_cache.insert({m, rej}); + } + unsigned value = 2 * h + rej - 1; + if (value != -1U) + assign_color(acc, value); + return; + } + else if constexpr (algo == TAR) + { + auto &edge_vector = sub_aut->edge_vector(); + acc_cond::mark_t acc_seen {}; + for (unsigned i = 0; i <= h; ++i) + acc_seen |= edge_vector[new_perm[i]].acc; + + auto cc = acc_cache.find(acc_seen); + bool rej; + if (cc != acc_cache.end()) + rej = cc->second; + else + { + rej = !sub_aut_cond.accepting(acc_seen); + acc_cache.insert({acc_seen, rej}); + } + + unsigned acc_col = 2 * h + rej - 1; + if (acc_col != -1U) + assign_color(acc, acc_col); + } + else + { + // IAR_RABIN produces a parity max even condition. If res_ + // is parity max odd, we add 1 to a transition to produce a parity max + // odd automaton. + unsigned delta_acc = ((algo == IAR_RABIN) && is_odd_) - 1; + + unsigned maxint = -1U; + for (int k = current_mem.size() - 1; k >= 0; --k) + { + unsigned pk = current_mem[k]; + + if (!inf(pairs, pk) || (edge_colors + & (pairs[pk].fin | pairs[pk].inf))) + { + maxint = k; + break; + } + } + + unsigned value; + if (maxint == -1U) + value = delta_acc; + else if (edge_colors & fin(pairs, current_mem[maxint])) + value = 2 * maxint + 2 + delta_acc; + else + value = 2 * maxint + 1 + delta_acc; + + if (value != -1U) + assign_color(acc, value); + } + } + + void + change_to_odd() + { + if (is_odd_) + return; + is_odd_ = true; + // We can reduce if we don't have an edge without color. + bool can_reduce = (min_color_used_.has_value() && *min_color_used_ != 0); + int shift; + + if (can_reduce) + shift = -1 * (*min_color_used_ - (*min_color_used_ % 2) + 1); + else + shift = 1; + + // If we cannot decrease and we already the the maximum color, we don't + // have to try. Constructs a mark_t to avoid to make report_too_many_sets + // public. + if (!can_reduce && max_color_used_.value_or(-1) + shift == MAX_ACCSETS) + acc_cond::mark_t {SPOT_MAX_ACCSETS}; + if (max_color_used_.has_value()) + *max_color_used_ += shift; + if (min_color_used_.has_value()) + *min_color_used_ += shift; + for (auto &e : res_->edges()) + { + auto new_val = e.acc.max_set() - 1 + shift; + if (new_val != -1U) + e.acc = { new_val }; + else + e.acc = {}; + } + } + + template + void + apply_lar(twa_graph_ptr &sub_aut, + const std::vector &pairs) + { + if constexpr (algo != IAR_RABIN) + change_to_odd(); + // avoids to call LAR if there is one color/pair/transition. + // LAR can work with this kind of condition but some optimizations + // like searching an existing state suppose that there is at least + // one element. + if ((algo == CAR && sub_aut->acc().num_sets() == 0) + || ((algo == IAR_RABIN || algo == IAR_STREETT) && pairs.empty()) + || (algo == TAR && sub_aut->num_edges() == 0)) + { + bool need_col = sub_aut->acc().is_t() != is_odd_; + auto col_fun = [&](const twa_graph::edge_storage_t &) + { + return need_col ? acc_cond::mark_t{0} : acc_cond::mark_t{}; + }; + apply_copy_general(sub_aut, col_fun, algo); + return; + } + // We sometimes need to have a list of the states + // of res_ constructed by this call to apply_lar. + const bool use_bscc = opt_.bscc; + const bool use_last_post_process = opt_.use_last_post_process; + constexpr bool is_tar = algo == TAR; + const bool need_tree = !is_tar + && (opt_.search_ex || use_last_post_process); + const bool need_state_list = use_last_post_process || use_bscc; + const bool is_dfs = opt_.lar_dfs; + // state_2_lar adapts add_new_state such that depending on the + // value of use_last in get_compatible_state, we will be able + // to find a compatible state faster. + state_2_lar::memory_order order; + if (!opt_.use_last) + { + if (opt_.use_last_post_process) + order = state_2_lar::memory_order::BOTH; + else + order = state_2_lar::memory_order::ONLY_OLDEST; + } + else + order = state_2_lar::memory_order::ONLY_NEWEST; + state_2_lar s2l; + if (need_tree) + s2l.init(sub_aut->num_states(), order); + std::vector states_scc_res; + if (need_state_list) + states_scc_res.reserve(sub_aut->num_states()); + auto init = + sub_aut->get_named_prop>("original-states"); + + if (opt_.propagate_col) + propagate_marks_here(sub_aut); + + auto init_state = sub_aut->get_init_state_number(); + robin_hood::unordered_map, + unsigned, to_parity_hash> ps_2_num; + unsigned lb_size; + if constexpr (algo == TAR) + lb_size = aut_->num_edges(); + else if constexpr (algo == CAR) + lb_size = aut_->num_states() * aut_->acc().num_sets(); + else + lb_size = aut_->num_states() * pairs.size(); + // num_2_ps maps a state of the result to a parity_state. As this function + // does not always create the first state, we need to add + // "- nb_states_before" to get a value. + const unsigned nb_states_before = res_->num_states(); + std::vector> num_2_ps; + // At least one copy of each state will be created. + num_2_ps.reserve(lb_size + num_2_ps.size()); + ps_2_num.reserve(lb_size + num_2_ps.size()); + + std::deque todo; + // return a pair new_state, is_new such that + // ps is associated to the state new_state in res_ + // and is_new is true if a new state was created by + // get_state + // We store 2 unsigned in a long long. + static_assert(sizeof(long long) >= 2 * sizeof(unsigned)); + robin_hood::unordered_map* edge_cache = nullptr; + if (!use_last_post_process) + { + edge_cache = new robin_hood::unordered_map(); + edge_cache->reserve(sub_aut->num_edges()); + } + auto get_state = [&](const to_parity_state &&ps) constexpr + { + auto it = ps_2_num.find(ps); + if (it == ps_2_num.end()) + { + unsigned nb = add_res_state(algo, ps); + ps_2_num[ps] = nb; + assert(nb == num_2_ps.size() + nb_states_before); + num_2_ps.emplace_back(ps); + todo.push_back(nb); + if (need_state_list) + states_scc_res.push_back(nb); + return std::pair{nb, true}; + } + return std::pair{it->second, false}; + }; + + std::set pairs_indices; + std::vector, memory>> relations; + if constexpr (algo == IAR_STREETT || algo == IAR_RABIN) + { + const auto num_pairs = pairs.size(); + for (unsigned k = 0; k < num_pairs; ++k) + if (fin(pairs, k)) + pairs_indices.insert(k); + } + + if constexpr (algo != TAR) + if (opt_.force_order) + relations = find_relations(sub_aut, pairs, pairs_indices); + + auto m = initial_memory_of(sub_aut, pairs, relations); + + assert(init); + auto init_res = get_state({(*init)[init_state], init_state, m}).first; + // A path is added when it is a destination. That is why we need to + // add the initial state. + unsigned nb_edges_before = res_->num_edges(); + std::vector edge_to_seen_nb; + if (use_last_post_process && algo != TAR) + edge_to_seen_nb.reserve(sub_aut->num_edges()); + if constexpr(!is_tar) + if (need_tree) + s2l.add_new_path(init_state, m, init_res, 0); + + robin_hood::unordered_map acc_cache; + // Main loop + while (!todo.empty()) + { + if (edge_cache) + edge_cache->clear(); + // If we want to process the most recent state of the result, we + // take the last value + unsigned res_current = is_dfs ? todo.back() : todo.front(); + unsigned res_index = res_current - nb_states_before; + const auto ¤t_ps = num_2_ps[res_index]; + const auto current_mem = current_ps.mem; + if (is_dfs) + todo.pop_back(); + else + todo.pop_front(); + + // For each edge leaving the state corresponding to res_state in sub_aut + for (auto &e : sub_aut->out(current_ps.state_scc)) + { + // We create a new memory and update it + memory mem(current_mem); + unsigned nb_seen = 0, + h = 0; + find_new_memory(e.dst, mem, sub_aut->edge_number(e), e.acc, + pairs, pairs_indices, nb_seen, h, relations); + // Now we try to find a way to move the elements and obtain an + // existing memory. + unsigned res_dst = -1U; + if constexpr (algo != TAR) + if (opt_.search_ex) + res_dst = s2l.get_compatible_state(e.dst, mem, nb_seen, + opt_.use_last); + // If it doesn't exist, we create a new state… + if (res_dst == -1U) + { + auto gs = get_state({(*init)[e.dst], e.dst, mem}); + res_dst = gs.first; + // And add it to the "tree" used to find a compatible state + if constexpr (!is_tar) + { + if (need_tree && gs.second) + s2l.add_new_path(e.dst, mem, res_dst, nb_seen); + } + } + + // We compute the color assigned to the new edge. + acc_cond::mark_t new_edge_color{}; + compute_new_color_lar(sub_aut, current_mem, mem, h, e.acc, + new_edge_color, pairs, acc_cache); + + // As we can assign a new destination later when + // use_last_post_process is true, we cannot try to find a compatible + // edge. + auto edge_res_num = add_res_edge(res_current, res_dst, + new_edge_color, e.cond, + !use_last_post_process, + edge_cache); + (void) edge_res_num; + // We have to remember how many colors were seen if we do a post + // processing + if constexpr (algo != TAR) + if (use_last_post_process) + { + assert(edge_res_num == + edge_to_seen_nb.size() + nb_edges_before + 1); + edge_to_seen_nb.push_back(nb_seen); + } + } + } + + // We used the most recent compatible state but perhaps that another + // state was created after. We do a new search. As TAR always moves one + // element we don't need it. + if constexpr (algo != TAR) + if (use_last_post_process) + { + for (auto &res_state : states_scc_res) + for (auto &e : res_->out(res_state)) + { + auto e_dst = e.dst; + if (e.src == e_dst) + continue; + auto edge_num = res_->edge_number(e); + const auto &ps = num_2_ps[e_dst - nb_states_before]; + unsigned seen_nb = + edge_to_seen_nb[edge_num - nb_edges_before - 1]; + assert(seen_nb < SPOT_MAX_ACCSETS); + auto new_dst = s2l.get_compatible_state(ps.state_scc, ps.mem, + seen_nb, true); + if (new_dst != e_dst) + { + assert(new_dst != -1U); + need_purge_ = true; + e.dst = new_dst; + } + } + } + if (use_bscc) + { + // Contrary to the (old) implementation of IAR, adding an edge between + // 2 SCCs of the result is the last thing done. It means that + // we don't need to use a filter when we compute the BSCC. + // A state s is in the BSCC if scc_of(s) is 0. + scc_info sub_scc(res_, init_res, nullptr, nullptr, + scc_info_options::NONE); + if (sub_scc.scc_count() > 1) + { + need_purge_ = true; + for (auto &state_produced : states_scc_res) + if (sub_scc.scc_of(state_produced) == 0) + state_to_res_[res_to_aut_[state_produced]] = state_produced; + } + } + delete edge_cache; + } + + void + link_sccs() + { + if (si_.scc_count() > 1) + { + const unsigned res_num_states = res_->num_states(); + for (unsigned i = 0; i < res_num_states; ++i) + { + auto aut_i = res_to_aut_[i]; + auto aut_i_scc = si_.scc_of(aut_i); + for (auto &e : aut_->out(aut_i)) + if (aut_i_scc != si_.scc_of(e.dst)) + { + auto e_dst_repr = state_to_res_[e.dst]; + add_res_edge(i, e_dst_repr, {}, e.cond); + } + } + } + } + + bool + try_parity_equivalence(const zielonka_tree &tree, + const twa_graph_ptr &sub_aut) + { + if (tree.has_parity_shape()) + { + bool first_is_accepting = tree.is_even(); + // A vector that stores the difference between 2 levels. + std::vector colors_diff; + auto &tree_nodes = tree.nodes_; + // Supposes that the index of the root is 0. + unsigned current_index = 0; + auto current_node = tree_nodes[current_index]; + // While the current node has a child + while (current_node.first_child != 0) + { + auto child_index = current_node.first_child; + auto child = tree_nodes[child_index]; + acc_cond::mark_t diff = current_node.colors - child.colors; + colors_diff.emplace_back(diff); + current_node = child; + } + // We have to deal with the edge between the last node and ∅. + bool is_empty_accepting = sub_aut->acc().accepting({}); + bool is_current_accepting = (current_node.level % 2) != tree.is_even(); + if (is_empty_accepting != is_current_accepting) + colors_diff.emplace_back(current_node.colors); + // + 1 as we need to know which value should be given to an uncolored + // edge. + std::vector new_colors( + sub_aut->get_acceptance().used_sets().max_set() + 1, -1U); + unsigned current_col = colors_diff.size() - 1; + for (auto &diff : colors_diff) + { + for (auto col : diff.sets()) + new_colors[col + 1] = current_col; + --current_col; + } + bool is_max_even = first_is_accepting == (colors_diff.size() % 2); + if (!is_max_even) + change_to_odd(); + + bool is_even_in_odd_world = is_odd_ && is_max_even; + if (is_even_in_odd_world) + for (auto &x : new_colors) + ++x; + apply_copy(sub_aut, new_colors, PARITY_EQUIV); + return true; + } + return false; + } + + bool + try_parity_prefix(const zielonka_tree &tree, const twa_graph_ptr &sub_aut) + { + unsigned index = 0; + auto current = tree.nodes_[index]; + std::vector prefixes; + bool first_is_accepting = tree.is_even(); + + acc_cond::mark_t removed_cols{}; + auto has_one_child = [&](const auto node) constexpr + { + auto fc = node.first_child; + return tree.nodes_[fc].next_sibling == fc; + }; + while (has_one_child(current)) + { + auto child = tree.nodes_[current.first_child]; + acc_cond::mark_t diff{}; + const bool is_leaf = current.first_child == 0; + if (is_leaf) + diff = current.colors; + else + diff = current.colors - child.colors; + prefixes.emplace_back(diff); + removed_cols |= diff; + if (is_leaf) + break; + current = child; + } + if (prefixes.empty()) + return false; + + if (opt_.datas) + algo_used_ |= algorithm::PARITY_PREFIX; + + // As we want to remove the prefix we need to remove it from the + // condition. As an unused color is not always removed (acc_clean false), + // we do it here. + auto used_cols = sub_aut->get_acceptance().used_sets() - removed_cols; + auto new_cond = sub_aut->acc().restrict_to(used_cols); + scc_info_to_parity sub(sub_aut, removed_cols); + // The recursive call will add some informations to help + // to add missing edges + state_to_nums_ = + new std::vector>(aut_->num_states()); + opt_.parity_prefix = false; + bool old_pp_gen = opt_.parity_prefix_general; + opt_.parity_prefix_general = false; + + auto max_scc_color_rec = max_color_scc_; + for (auto x : sub.split_aut({removed_cols})) + { + x->set_acceptance(new_cond); + process_scc(x, algorithm::PARITY_PREFIX); + if (max_color_scc_.has_value()) + { + if (!max_scc_color_rec.has_value()) + max_scc_color_rec.emplace(*max_color_scc_); + else + max_scc_color_rec.emplace( + std::max(*max_scc_color_rec, *max_color_scc_)); + } + } + opt_.parity_prefix = true; + opt_.parity_prefix_general = old_pp_gen; + + assert(max_scc_color_rec.has_value()); + auto max_used_is_accepting = ((*max_scc_color_rec - 1) % 2) == is_odd_; + bool last_prefix_acc = (prefixes.size() % 2) != first_is_accepting; + + unsigned m = prefixes.size() + (max_used_is_accepting != last_prefix_acc) + + *max_scc_color_rec - 1; + auto sub_aut_orig = + sub_aut->get_named_prop>("original-states"); + assert(sub_aut_orig); + for (auto &e : sub_aut->edges()) + if (e.acc & removed_cols) + { + auto el = std::find_if(prefixes.begin(), prefixes.end(), + [&](acc_cond::mark_t &x) + { return x & e.acc; }); + assert(el != prefixes.end()); + unsigned pos = std::distance(prefixes.begin(), el); + const unsigned col = m - pos; + // As it is a parity prefix we should never get a lower value than + // the color recursively produced. + assert(!max_scc_color_rec.has_value() || *max_scc_color_rec == 0 + || col + 1 > *max_scc_color_rec); + unsigned dst = state_to_res_[(*sub_aut_orig)[e.dst]]; + for (auto src : (*state_to_nums_)[(*sub_aut_orig)[e.src]]) + if (col != -1U) + add_res_edge(src, dst, {col}, e.cond); + else + add_res_edge(src, dst, {}, e.cond); + } + // As when we need to use link_scc, a set of edges that link 2 SCC + // need to be added and don't need to have a color. + else if (sub.scc_of(e.src) != sub.scc_of(e.dst)) + { + unsigned dst = state_to_res_[(*sub_aut_orig)[e.dst]]; + for (auto src : (*state_to_nums_)[(*sub_aut_orig)[e.src]]) + add_res_edge(src, dst, {}, e.cond); + } + delete state_to_nums_; + state_to_nums_ = nullptr; + + return true; + } + + bool + try_parity_prefix_general(twa_graph_ptr &sub_aut) + { + // This function should not be applied on an "empty" automaton as + // it must create an empty SCC with the algorithm NONE. + assert(sub_aut->num_edges() > 0); + static_assert((MAX_ACCSETS % 2) == 0, + "MAX_ACCSETS is supposed to be even"); + std::vector res_colors; + std::vector status; + acc_cond new_cond; + bool was_able_to_color; + // Is the maximal color accepting? + bool start_inf = true; + cond_type_main_aux(sub_aut, cond_kind::INF_PARITY, false, status, + res_colors, new_cond, was_able_to_color); + // Otherwise we can try to find a rejecting transition as first step + if (!was_able_to_color) + { + cond_type_main_aux(sub_aut, cond_kind::FIN_PARITY, false, status, + res_colors, new_cond, was_able_to_color); + if (!was_able_to_color) + return false; + start_inf = false; + } + + // If we have a parity-type automaton, it is just a copy. + if (std::find(status.begin(), status.end(), edge_status::IMPOSSIBLE) + == status.end()) + { + std::vector res_cols; + res_cols.reserve(res_colors.size()); + + auto min_set = + std::min_element(res_colors.begin() + 1, res_colors.end())->max_set(); + // Does the minimal color has the same parity than the maximal parity? + bool same_acceptance_min_max = (min_set % 2); + // Do we need to shift to match the parity of res_? + bool odd_shift = start_inf != is_odd_; + unsigned shift_col = min_set - (same_acceptance_min_max != odd_shift); + std::transform(res_colors.begin(), res_colors.end(), + std::back_inserter(res_cols), [&](auto &x) + { return x.max_set() - 1 - shift_col; }); + apply_copy_edge_index(sub_aut, res_cols, + algorithm::PARITY_PREFIX_GENERAL); + return true; + } + + // At this moment, a prefix exists + auto& ev = sub_aut->edge_vector(); + const auto ev_size = ev.size(); + auto keep = std::shared_ptr(make_bitvect(ev_size)); + const unsigned status_size = status.size(); + for (unsigned i = 1; i < status_size; ++i) + if (status[i] == edge_status::IMPOSSIBLE) + keep->set(i); + else + keep->clear(i); + + // Avoid recursive parity prefix + opt_.parity_prefix_general = false; + bool old_pp = opt_.parity_prefix; + opt_.parity_prefix = false; + + auto max_scc_color_rec = max_color_scc_; + scc_info lower_scc(sub_aut, scc_info_options::TRACK_STATES); + scc_info_to_parity sub(lower_scc, keep); + state_to_nums_ = + new std::vector>(aut_->num_states()); + for (auto x : sub.split_aut(keep)) + { + process_scc(x, algorithm::PARITY_PREFIX_GENERAL); + if (!max_scc_color_rec.has_value()) + max_scc_color_rec = max_color_scc_; + else if (max_color_scc_.has_value()) + max_scc_color_rec.emplace( + std::max(*max_scc_color_rec, *max_color_scc_)); + } + + // restore options + opt_.parity_prefix_general = true; + opt_.parity_prefix = old_pp; + + assert(sub_aut->num_edges() > 0); + + // Compute the minimal color used by parity prefix. + unsigned min_set_prefix = -2U; + for (unsigned i = 1; i < ev_size; ++i) + if (status[i] == edge_status::MARKED) + { + auto e_mark = res_colors[i].max_set(); + if (min_set_prefix == -2U) + min_set_prefix = e_mark - 1; + else + min_set_prefix = std::min(min_set_prefix + 1, e_mark) - 1; + } + + // At least one transition should be marked here. + assert(min_set_prefix != -2U); + + // Reduce the colors used by parity_prefix. + const bool min_prefix_accepting = (min_set_prefix % 2) == start_inf; + // max_scc_color_rec has a value as the automaton is not parity-type, + // so there was a recursive paritisation + assert(max_scc_color_rec.has_value()); + const bool max_rec_accepting = ((*max_scc_color_rec - 1) % 2) == is_odd_; + const bool same_prio = min_prefix_accepting == max_rec_accepting; + const unsigned delta = + min_set_prefix - (*max_scc_color_rec + 1) - !same_prio; + + auto sub_aut_orig = + sub_aut->get_named_prop>("original-states"); + assert(sub_aut_orig); + for (unsigned e_num = 1; e_num < ev_size; ++e_num) + { + auto& e = ev[e_num]; + if (status[e_num] == edge_status::MARKED) + { + unsigned dst = state_to_res_[(*sub_aut_orig)[e.dst]]; + for (auto src : (*state_to_nums_)[(*sub_aut_orig)[e.src]]) + { + auto col = res_colors[e_num].max_set() - delta - 1; + if (col == -1U) + add_res_edge(src, dst, {}, e.cond); + else + add_res_edge(src, dst, {col}, e.cond); + } + } + } + + delete state_to_nums_; + state_to_nums_ = nullptr; + + return true; + } + + bool + try_emptiness(const const_twa_graph_ptr &sub_aut, bool &tried) + { + tried = true; + if (generic_emptiness_check(sub_aut)) + { + auto col_fun = + [col = is_odd_ ? acc_cond::mark_t{0} : acc_cond::mark_t{}] + (const twa_graph::edge_storage_t &) noexcept + { + return col; + }; + apply_copy_general(sub_aut, col_fun, GENERIC_EMPTINESS); + return true; + } + return false; + } + + bool + try_rabin_to_buchi(twa_graph_ptr &sub_aut) + { + algorithm algo = RABIN_TO_BUCHI; + auto buch_aut = rabin_to_buchi_if_realizable(sub_aut); + if (buch_aut == nullptr) + { + algo = STREETT_TO_COBUCHI; + auto old_cond = sub_aut->get_acceptance(); + sub_aut->set_acceptance(acc_cond(old_cond.complement())); + buch_aut = rabin_to_buchi_if_realizable(sub_aut); + sub_aut->set_acceptance(acc_cond(old_cond)); + } + if (buch_aut != nullptr) + { + if (algo == STREETT_TO_COBUCHI) + change_to_odd(); + unsigned shift = (algo == RABIN_TO_BUCHI) && is_odd_; + + auto &buch_aut_ev = buch_aut->edge_vector(); + // 0 is not an edge, so we assign -1; + std::vector colors; + colors.reserve(buch_aut_ev.size()); + colors.push_back(-1U); + std::transform( + buch_aut_ev.begin() + 1, buch_aut_ev.end(), + std::back_inserter(colors), + [&](const twa_graph::edge_storage_t &e) { + return e.acc.max_set() - 1 + shift; + }); + apply_copy_edge_index(sub_aut, colors, algo); + return true; + } + return false; + } + + bool + try_buchi_type(const twa_graph_ptr &sub_aut) + { + std::vector status; + std::vector res_colors; + acc_cond new_cond; + bool is_co_bu = false; + bool was_able_to_color; + if (!cond_type_main_aux(sub_aut, cond_kind::BUCHI, true, status, + res_colors, new_cond, was_able_to_color)) + { + is_co_bu = true; + if (!cond_type_main_aux(sub_aut, cond_kind::CO_BUCHI, true, status, + res_colors, new_cond, was_able_to_color)) + return false; + change_to_odd(); + } + // Tests if all edges are colored or all edges are uncolored + auto [min, max] = + std::minmax_element(res_colors.begin() + 1, res_colors.end()); + const bool one_color = min->max_set() == max->max_set(); + const bool is_colored = min->max_set(); + auto col_fun = [&](const twa_graph::edge_storage_t &edge) + { + // If there one color in the automaton, we can simplify. + if (one_color) + { + bool z = (is_colored && !is_odd_) || (!is_colored && is_odd_); + // When we do co-buchi, we reverse + if (is_co_bu) + z = !z; + return z ? acc_cond::mark_t{0} : acc_cond::mark_t{}; + } + // Otherwise, copy the color + auto edge_number = sub_aut->edge_number(edge); + unsigned mc = res_colors[edge_number].max_set() - 1; + mc += (!is_co_bu && is_odd_); + if (mc == -1U) + return acc_cond::mark_t{}; + return acc_cond::mark_t{mc}; + }; + apply_copy_general(sub_aut, col_fun, is_co_bu ? algorithm::CO_BUCHI_TYPE + : algorithm::BUCHI_TYPE); + return true; + } + + bool + try_parity_type(const twa_graph_ptr &sub_aut) + { + std::vector status; + std::vector res_colors; + acc_cond new_cond; + bool was_able_to_color; + if (!cond_type_main_aux(sub_aut, cond_kind::INF_PARITY, true, status, + res_colors, new_cond, was_able_to_color)) + { + if (!cond_type_main_aux(sub_aut, cond_kind::FIN_PARITY, true, status, + res_colors, new_cond, was_able_to_color)) + return false; + } + bool is_max, is_odd; + new_cond.is_parity(is_max, is_odd); + auto [min, max] = + std::minmax_element(res_colors.begin() + 1, res_colors.end()); + // cond_type_main_aux returns a parity max condition + assert(is_max); + auto col_fun = + [shift = (is_odd != is_odd_) - (min->max_set() + (min->max_set() % 2)), + &res_colors, &sub_aut] + (const twa_graph::edge_storage_t &edge) + { + auto edge_number = sub_aut->edge_number(edge); + unsigned mc = res_colors[edge_number].max_set() - 1; + mc += shift; + if (mc == -1U) + return acc_cond::mark_t{}; + return acc_cond::mark_t{mc}; + }; + apply_copy_general(sub_aut, col_fun, PARITY_TYPE); + return true; + } + + // Keeps the result of the partial degeneralization if it reduces the number + // of colors or it allows to apply IAR. + bool + keep_deg(const const_twa_graph_ptr &sub_aut, const const_twa_graph_ptr °) + { + if (!opt_.reduce_col_deg) + return true; + unsigned nb_col_orig = sub_aut->get_acceptance().used_sets().count(); + + if (deg->get_acceptance().used_sets().count() < nb_col_orig) + return true; + std::vector pairs; + if (deg->acc().is_rabin_like(pairs)) + { + remove_duplicates(pairs); + if (pairs.size() < nb_col_orig) + return true; + } + if (deg->acc().is_streett_like(pairs)) + { + remove_duplicates(pairs); + if (pairs.size() < nb_col_orig) + return true; + } + return false; + } + + // Process a SCC. If there is no edge in the automaton, a new state is + // created and we say (if pretty_print is true) that none_algo created + // this state. + void + process_scc(twa_graph_ptr &sub_aut, + const algorithm none_algo = algorithm::NONE) + { + // Init the maximal color produced when processing this SCC. + max_color_scc_.reset(); + // If the sub_automaton is "empty", we don't need to apply an algorithm. + if (sub_aut->num_edges() == 0) + { + apply_copy(sub_aut, {}, none_algo); + return; + } + + bool tried_emptiness = false; + bool changed_structure = true; + while (true) + { + auto cond_before_simpl = sub_aut->acc(); + if (opt_.acc_clean) + simplify_acceptance_here(sub_aut); + if (opt_.propagate_col) + { + propagate_marks_here(sub_aut); + if (opt_.acc_clean) + simplify_acceptance_here(sub_aut); + } + if (opt_.datas && sub_aut->acc() != cond_before_simpl) + algo_used_ |= algorithm::ACC_CLEAN; + + if (opt_.parity_equiv || opt_.parity_prefix) + { + // If we don't try to find a parity prefix, we can stop + // to construct the tree when it has not parity shape. + zielonka_tree_options zopt = zielonka_tree_options::MERGE_SUBTREES + | zielonka_tree_options::CHECK_PARITY; + if (!opt_.parity_prefix) + zopt = zopt | zielonka_tree_options::ABORT_WRONG_SHAPE; + auto tree = zielonka_tree(sub_aut->acc(), zopt); + // If it is not parity shape, tree.nodes_ will be empty + if (tree.num_branches() != 0 && opt_.parity_equiv + && try_parity_equivalence(tree, sub_aut)) + return; + if (opt_.parity_prefix && try_parity_prefix(tree, sub_aut)) + return; + } + + if (changed_structure && opt_.parity_prefix_general + && try_parity_prefix_general(sub_aut)) + return; + + if (opt_.generic_emptiness && !tried_emptiness + && try_emptiness(sub_aut, tried_emptiness)) + return; + + // Buchi_type_to_buchi is more general that Rabin_to_buchi so + // we just call rabin_to_buchi if buchi_type_to_buchi is false. + if (!opt_.buchi_type_to_buchi && !opt_.parity_type_to_parity + && opt_.rabin_to_buchi + && try_rabin_to_buchi(sub_aut)) + return; + + // As parity_type_to_parity is stronger, we don't + // try if this option is used. + if (opt_.buchi_type_to_buchi && !opt_.parity_type_to_parity + && try_buchi_type(sub_aut)) + return; + + // We don't do it if parity_prefix_general is true as on a parity-type + // automaton parity_prefix_general removes all the transitions and + // we also get a parity-type automaton. + if (!opt_.parity_prefix_general && opt_.parity_type_to_parity + && try_parity_type(sub_aut)) + return; + + if (opt_.partial_degen + && is_partially_degeneralizable(sub_aut, true, true)) + { + auto deg = sub_aut; + std::vector forbid; + auto m = is_partially_degeneralizable(sub_aut, true, true, forbid); + bool changed = false; + while (m) + { + auto tmp = partial_degeneralize(deg, m); + simplify_acceptance_here(tmp); + if (keep_deg(deg, tmp)) + { + algo_used_ |= algorithm::PARTIAL_DEGEN; + deg = tmp; + changed = true; + changed_structure = true; + } + else + forbid.emplace_back(m); + m = is_partially_degeneralizable(deg, true, true, forbid); + } + + if (changed) + { + sub_aut = deg; + continue; + } + } + break; + } + if (opt_.use_generalized_rabin) + { + auto gen_rab = to_generalized_rabin(sub_aut); + // to_generalized_rabin does not propagate original-states. + auto sub_aut_orig = + sub_aut->get_named_prop>("original-states"); + assert(sub_aut_orig); + auto orig = new std::vector(); + const auto sub_aut_num_states = sub_aut->num_states(); + orig->reserve(sub_aut_num_states); + gen_rab->set_named_prop("original-states", orig); + for (unsigned i = 0; i < sub_aut_num_states; ++i) + orig->push_back((*sub_aut_orig)[i]); + sub_aut = partial_degeneralize(gen_rab); + } + std::vector pairs; + algorithm algo = choose_lar(sub_aut->acc(), pairs, sub_aut->num_edges()); + if (opt_.datas) + algo_used_ |= algo; + if (algo == CAR) + apply_lar(sub_aut, pairs); + else if (algo == IAR_STREETT) + apply_lar(sub_aut, pairs); + else if (algo == IAR_RABIN) + apply_lar(sub_aut, pairs); + else if (algo == TAR) + apply_lar(sub_aut, pairs); + else + SPOT_UNREACHABLE(); + } + + public: + twa_graph_ptr + run() + { + res_ = make_twa_graph(aut_->get_dict()); + res_->copy_ap_of(aut_); + const unsigned num_scc = si_.scc_count(); + auto orig_aut = + aut_->get_named_prop>("original-states"); + std::optional> orig_st; + if (orig_aut) + { + orig_st.emplace(std::vector{*orig_aut}); + std::const_pointer_cast(aut_) + ->set_named_prop("original-states", nullptr); + } + auto sccs = si_.split_aut(); + for (unsigned scc = 0; scc < num_scc; ++scc) + { + auto sub_automaton = sccs[scc]; + process_scc(sub_automaton); + } + + link_sccs(); + // During the execution, to_parity works on its own + // original-states and we must combine it with the property original + // states of aut_ to propagate the information. + if (orig_st) + for (unsigned i = 0; i < orig_->size(); ++i) + (*orig_)[i] = (*orig_aut)[(*orig_)[i]]; + res_->set_named_prop("original-states", orig_); + if (opt_.pretty_print) + res_->set_named_prop("state-names", names_); + if (res_->num_states() == 0) + add_res_state(NONE, {0, 0, {}}); + res_->set_init_state(state_to_res_[aut_->get_init_state_number()]); + // There is only a subset of algorithm that can create an unreachable + // state + if (need_purge_) + res_->purge_unreachable_states(); + // A special case is an automaton without edge. It implies + // max_color_used_ has not value so we need to test it. + if (!max_color_used_.has_value()) + { + assert(aut_->num_edges() == 0); + res_->set_acceptance(acc_cond(acc_cond::acc_code::f())); + } + else + { + res_->set_acceptance(acc_cond( + acc_cond::acc_code::parity(true, is_odd_, *max_color_used_))); + } + if (opt_.datas) + { + constexpr std::array + algos = {BUCHI_TYPE, CAR, CO_BUCHI_TYPE, GENERIC_EMPTINESS, IAR_RABIN, + IAR_STREETT, NONE, PARITY_EQUIV, PARITY_PREFIX, + PARITY_PREFIX_GENERAL, PARITY_TYPE, RABIN_TO_BUCHI, + STREETT_TO_COBUCHI, TAR}; + for (auto al : algos) + if (algo_used_ & al) + opt_.datas->algorithms_used.emplace_back(algorithm_to_str(al)); + } + return res_; + } + + to_parity_generator(const const_twa_graph_ptr &aut, + const to_parity_options opt) + : aut_(aut), + opt_(opt), + si_(aut), + state_to_res_(aut->num_states(), -1U) + { + auto aut_num = aut->num_states(); + res_to_aut_.reserve(aut_num); + orig_ = new std::vector(); + orig_->reserve(aut_num); + if (opt.pretty_print) + { + names_ = new std::vector(); + names_->reserve(aut_num); + } + } + }; + + twa_graph_ptr + to_parity(const const_twa_graph_ptr &aut, + const to_parity_options options) + { + bool is_max, is_odd; + if (aut->acc().is_parity(is_max, is_odd, false)) + { + if (!is_max) + return change_parity(aut, parity_kind::parity_kind_max, + parity_style::parity_style_any); + else + { + auto res = make_twa_graph(aut, twa::prop_set::all()); + res->copy_acceptance_of(aut); + return res; + } + } + to_parity_generator gen(aut, options); + return gen.run(); + } + + // Old version of CAR + namespace { + struct lar_state + { + unsigned state; + std::vector perm; + bool operator<(const lar_state &s) const + { + return state == s.state ? perm < s.perm : state < s.state; + } + + std::string to_string() const + { + std::ostringstream s; + s << state << " ["; + unsigned ps = perm.size(); + for (unsigned i = 0; i < ps; ++i) + { + if (i > 0) + s << ','; + s << perm[i]; + } + s << ']'; + return s.str(); + } + }; + + class lar_generator + { + const const_twa_graph_ptr &aut_; + twa_graph_ptr res_; + const bool pretty_print; + + std::map lar2num; + + public: + explicit lar_generator(const const_twa_graph_ptr &a, bool pretty_print) + : aut_(a), res_(nullptr), pretty_print(pretty_print) + { + } + + twa_graph_ptr run() + { + res_ = make_twa_graph(aut_->get_dict()); + res_->copy_ap_of(aut_); + + std::deque todo; + auto get_state = [this, &todo](const lar_state &s) + { + auto it = lar2num.emplace(s, -1U); + if (it.second) // insertion took place + { + unsigned nb = res_->new_state(); + it.first->second = nb; + todo.push_back(s); + } + return it.first->second; + }; + + std::vector initial_perm(aut_->num_sets()); + std::iota(initial_perm.begin(), initial_perm.end(), 0); + { + lar_state s0{aut_->get_init_state_number(), initial_perm}; + res_->set_init_state(get_state(s0)); + } + + scc_info si(aut_, scc_info_options::NONE); + // main loop + while (!todo.empty()) + { + lar_state current = todo.front(); + todo.pop_front(); + + // TODO: todo could store this number to avoid one lookup + unsigned src_num = get_state(current); + + unsigned source_scc = si.scc_of(current.state); + for (const auto &e : aut_->out(current.state)) + { + // find the new permutation + std::vector new_perm = current.perm; + unsigned h = 0; + for (unsigned k : e.acc.sets()) + { + auto it = std::find(new_perm.begin(), new_perm.end(), k); + h = std::max(h, unsigned(new_perm.end() - it)); + std::rotate(it, it + 1, new_perm.end()); + } + + if (source_scc != si.scc_of(e.dst)) + { + new_perm = initial_perm; + h = 0; + } + + lar_state dst{e.dst, new_perm}; + unsigned dst_num = get_state(dst); + + // Do the h last elements satisfy the acceptance condition? + // If they do, emit 2h, if they don't emit 2h+1. + acc_cond::mark_t m(new_perm.end() - h, new_perm.end()); + bool rej = !aut_->acc().accepting(m); + res_->new_edge(src_num, dst_num, e.cond, {2 * h + rej}); + } + } + + // parity max even + unsigned sets = 2 * aut_->num_sets() + 2; + res_->set_acceptance(sets, acc_cond::acc_code::parity_max_even(sets)); + + if (pretty_print) + { + auto names = new std::vector(res_->num_states()); + for (const auto &p : lar2num) + (*names)[p.second] = p.first.to_string(); + res_->set_named_prop("state-names", names); + } + + return res_; + } + }; + } + + twa_graph_ptr + to_parity_old(const const_twa_graph_ptr &aut, bool pretty_print) + { + if (!aut->is_existential()) + throw std::runtime_error("LAR does not handle alternation"); + // if aut is already parity return it as is + if (aut->acc().is_parity()) + return std::const_pointer_cast(aut); + + lar_generator gen(aut, pretty_print); + return gen.run(); + } + + // Old version of IAR + + namespace + { using perm_t = std::vector; struct iar_state { @@ -326,18 +2622,18 @@ namespace spot perm_t perm; bool - operator<(const iar_state& other) const + operator<(const iar_state &other) const { return state == other.state ? perm < other.perm : state < other.state; } }; - template + template class iar_generator { // helper functions: access fin and inf parts of the pairs // these functions negate the Streett condition to see it as a Rabin one - const acc_cond::mark_t& + const acc_cond::mark_t & fin(unsigned k) const { if (is_rabin) @@ -353,16 +2649,15 @@ namespace spot else return pairs_[k].fin; } + public: - explicit iar_generator(const const_twa_graph_ptr& a, - const std::vector& p, + explicit iar_generator(const const_twa_graph_ptr &a, + const std::vector &p, const bool pretty_print) - : aut_(a) - , pairs_(p) - , scc_(scc_info(a)) - , pretty_print_(pretty_print) - , state2pos_iar_states(aut_->num_states(), -1U) - {} + : aut_(a), pairs_(p), scc_(scc_info(a)), pretty_print_(pretty_print), + state2pos_iar_states(aut_->num_states(), -1U) + { + } twa_graph_ptr run() @@ -386,9 +2681,6 @@ namespace spot res_->set_init_state(s); } - // there could be quite a number of unreachable states, prune them - res_->purge_unreachable_states(); - if (pretty_print_) { unsigned nstates = res_->num_states(); @@ -396,13 +2688,13 @@ namespace spot for (auto e : res_->edges()) { unsigned s = e.src; - iar_state iar = num2iar.at(s); + iar_state iar = num2iar[s]; std::ostringstream st; st << iar.state << ' '; if (iar.perm.empty()) st << '['; char sep = '['; - for (unsigned h: iar.perm) + for (unsigned h : iar.perm) { st << sep << h; sep = ','; @@ -413,6 +2705,8 @@ namespace spot res_->set_named_prop("state-names", names); } + // there could be quite a number of unreachable states, prune them + res_->purge_unreachable_states(); return res_; } @@ -423,44 +2717,44 @@ namespace spot unsigned init = scc_.one_state_of(scc_num); std::deque todo; - auto get_state = [&](const iar_state& s) + auto get_state = [&](const iar_state &s) + { + auto it = iar2num.find(s); + if (it == iar2num.end()) { - auto it = iar2num.find(s); - if (it == iar2num.end()) - { - unsigned nb = res_->new_state(); - iar2num[s] = nb; - num2iar[nb] = s; - unsigned iar_pos = iar_states.size(); - unsigned old_newest_pos = state2pos_iar_states[s.state]; - state2pos_iar_states[s.state] = iar_pos; - iar_states.push_back({s, old_newest_pos}); - todo.push_back(s); - return nb; - } - return it->second; - }; + unsigned nb = res_->new_state(); + iar2num[s] = nb; + num2iar[nb] = s; + unsigned iar_pos = iar_states.size(); + unsigned old_newest_pos = state2pos_iar_states[s.state]; + state2pos_iar_states[s.state] = iar_pos; + iar_states.push_back({s, old_newest_pos}); + todo.push_back(s); + return nb; + } + return it->second; + }; auto get_other_scc = [this](unsigned state) - { - auto it = state2iar.find(state); - // recursively build the destination SCC if we detect that it has - // not been already built. - if (it == state2iar.end()) - build_iar_scc(scc_.scc_of(state)); - return iar2num.at(state2iar.at(state)); - }; + { + auto it = state2iar.find(state); + // recursively build the destination SCC if we detect that it has + // not been already built. + if (it == state2iar.end()) + build_iar_scc(scc_.scc_of(state)); + return iar2num.at(state2iar.at(state)); + }; if (scc_.is_trivial(scc_num)) - { - iar_state iar_s{init, perm_t()}; - state2iar[init] = iar_s; - unsigned src_num = get_state(iar_s); - // Do not forget to connect to subsequent SCCs - for (const auto& e : aut_->out(init)) - res_->new_edge(src_num, get_other_scc(e.dst), e.cond); - return; - } + { + iar_state iar_s{init, perm_t()}; + state2iar[init] = iar_s; + unsigned src_num = get_state(iar_s); + // Do not forget to connect to subsequent SCCs + for (const auto &e : aut_->out(init)) + res_->new_edge(src_num, get_other_scc(e.dst), e.cond); + return; + } // determine the pairs that appear in the SCC auto colors = scc_.acc_sets_of(scc_num); @@ -478,109 +2772,110 @@ namespace spot // the main loop while (!todo.empty()) + { + iar_state current = todo.front(); + todo.pop_front(); + + unsigned src_num = get_state(current); + + for (const auto &e : aut_->out(current.state)) { - iar_state current = todo.front(); - todo.pop_front(); + // connect to the appropriate state + if (scc_.scc_of(e.dst) != scc_num) + res_->new_edge(src_num, get_other_scc(e.dst), e.cond); + else + { + // find the new permutation + perm_t new_perm = current.perm; + // Count pairs whose fin-part is seen on this transition + unsigned seen_nb = 0; + // consider the pairs for this SCC only + for (unsigned k : scc_pairs) + if (e.acc & fin(k)) + { + ++seen_nb; + auto it = std::find(new_perm.begin(), + new_perm.end(), + k); + // move the pair in front of the permutation + std::rotate(new_perm.begin(), it, it + 1); + } - unsigned src_num = get_state(current); + iar_state dst; + unsigned dst_num = -1U; - for (const auto& e : aut_->out(current.state)) + // Optimization: when several indices are seen in the + // transition, they move at the front of new_perm in any + // order. Check whether there already exists an iar_state + // that matches this condition. + + auto iar_pos = state2pos_iar_states[e.dst]; + while (iar_pos != -1U) { - // connect to the appropriate state - if (scc_.scc_of(e.dst) != scc_num) - res_->new_edge(src_num, get_other_scc(e.dst), e.cond); - else - { - // find the new permutation - perm_t new_perm = current.perm; - // Count pairs whose fin-part is seen on this transition - unsigned seen_nb = 0; - // consider the pairs for this SCC only - for (unsigned k : scc_pairs) - if (e.acc & fin(k)) - { - ++seen_nb; - auto it = std::find(new_perm.begin(), - new_perm.end(), - k); - // move the pair in front of the permutation - std::rotate(new_perm.begin(), it, it+1); - } - - iar_state dst; - unsigned dst_num = -1U; - - // Optimization: when several indices are seen in the - // transition, they move at the front of new_perm in any - // order. Check whether there already exists an iar_state - // that matches this condition. - - auto iar_pos = state2pos_iar_states[e.dst]; - while (iar_pos != -1U) - { - iar_state& tmp = iar_states[iar_pos].first; - iar_pos = iar_states[iar_pos].second; - if (std::equal(new_perm.begin() + seen_nb, - new_perm.end(), - tmp.perm.begin() + seen_nb)) - { - dst = tmp; - dst_num = iar2num[dst]; - break; - } - } - // if such a state was not found, build it - if (dst_num == -1U) - { - dst = iar_state{e.dst, new_perm}; - dst_num = get_state(dst); - } - - // find the maximal index encountered by this transition - unsigned maxint = -1U; - for (int k = current.perm.size() - 1; k >= 0; --k) - { - unsigned pk = current.perm[k]; - if (!inf(pk) || - (e.acc & (pairs_[pk].fin | pairs_[pk].inf))) { - maxint = k; - break; - } - } - - acc_cond::mark_t acc = {}; - if (maxint == -1U) - acc = {0}; - else if (e.acc & fin(current.perm[maxint])) - acc = {2*maxint+2}; - else - acc = {2*maxint+1}; - - res_->new_edge(src_num, dst_num, e.cond, acc); - } + iar_state &tmp = iar_states[iar_pos].first; + iar_pos = iar_states[iar_pos].second; + if (std::equal(new_perm.begin() + seen_nb, + new_perm.end(), + tmp.perm.begin() + seen_nb)) + { + dst = tmp; + dst_num = iar2num[dst]; + break; + } } + // if such a state was not found, build it + if (dst_num == -1U) + { + dst = iar_state{e.dst, new_perm}; + dst_num = get_state(dst); + } + + // find the maximal index encountered by this transition + unsigned maxint = -1U; + for (int k = current.perm.size() - 1; k >= 0; --k) + { + unsigned pk = current.perm[k]; + if (!inf(pk) || + (e.acc & (pairs_[pk].fin | pairs_[pk].inf))) + { + maxint = k; + break; + } + } + + acc_cond::mark_t acc{}; + if (maxint == -1U) + acc.set(0); + else if (e.acc & fin(current.perm[maxint])) + assign_color(acc, 2 * maxint + 2); + else + assign_color(acc, 2 * maxint + 1); + + res_->new_edge(src_num, dst_num, e.cond, acc); + } } + } // Optimization: find the bottom SCC of the sub-automaton we have just // built. To that end, we have to ignore edges going out of scc_num. - auto leaving_edge = [&](unsigned d) - { - return scc_.scc_of(num2iar.at(d).state) != scc_num; - }; - auto filter_edge = [](const twa_graph::edge_storage_t&, + auto leaving_edge = [&](unsigned d) constexpr + { + return scc_.scc_of(num2iar.at(d).state) != scc_num; + }; + auto filter_edge = [](const twa_graph::edge_storage_t &, unsigned dst, - void* filter_data) - { - decltype(leaving_edge)* data = - static_cast(filter_data); + void *filter_data) constexpr + { + decltype(leaving_edge) *data = + static_cast(filter_data); - if ((*data)(dst)) - return scc_info::edge_filter_choice::ignore; - return scc_info::edge_filter_choice::keep; - }; + if ((*data)(dst)) + return scc_info::edge_filter_choice::ignore; + return scc_info::edge_filter_choice::keep; + }; scc_info sub_scc(res_, get_state(s0), filter_edge, &leaving_edge); - // SCCs are numbered in reverse topological order, so the bottom SCC has - // index 0. + // SCCs are numbered in reverse topological order, so the bottom SCC + // has index 0. const unsigned bscc = 0; assert(sub_scc.succ(0).empty()); assert( @@ -590,23 +2885,23 @@ namespace spot if (sub_scc.succ(s).empty()) return false; return true; - } ()); + }()); assert(sub_scc.states_of(bscc).size() - >= scc_.states_of(scc_num).size()); + >= scc_.states_of(scc_num).size()); // update state2iar for (unsigned scc_state : sub_scc.states_of(bscc)) - { - iar_state& iar = num2iar.at(scc_state); - if (state2iar.find(iar.state) == state2iar.end()) - state2iar[iar.state] = iar; - } + { + iar_state &iar = num2iar.at(scc_state); + if (state2iar.find(iar.state) == state2iar.end()) + state2iar[iar.state] = iar; + } } private: - const const_twa_graph_ptr& aut_; - const std::vector& pairs_; + const const_twa_graph_ptr &aut_; + const std::vector &pairs_; const scc_info scc_; twa_graph_ptr res_; bool pretty_print_; @@ -625,1520 +2920,36 @@ namespace spot // Make this a function different from iar_maybe(), so that // iar() does not have to call a deprecated function. static twa_graph_ptr - iar_maybe_(const const_twa_graph_ptr& aut, bool pretty_print) + iar_maybe_(const const_twa_graph_ptr &aut, bool pretty_print) { std::vector pairs; if (!aut->acc().is_rabin_like(pairs)) if (!aut->acc().is_streett_like(pairs)) return nullptr; else - { - iar_generator gen(aut, pairs, pretty_print); - return gen.run(); - } - else { - iar_generator gen(aut, pairs, pretty_print); + iar_generator gen(aut, pairs, pretty_print); return gen.run(); } + else + { + iar_generator gen(aut, pairs, pretty_print); + return gen.run(); + } } } twa_graph_ptr - iar_maybe(const const_twa_graph_ptr& aut, bool pretty_print) - { - return iar_maybe_(aut, pretty_print); - } - - twa_graph_ptr - iar(const const_twa_graph_ptr& aut, bool pretty_print) + iar(const const_twa_graph_ptr &aut, bool pretty_print) { if (auto res = iar_maybe_(aut, pretty_print)) return res; throw std::runtime_error("iar() expects Rabin-like or Streett-like input"); } -// New version for paritizing -namespace -{ -struct node -{ - // A color of the permutation or a state. - unsigned label; - std::vector children; - // is_leaf is true if the label is a state of res_. - bool is_leaf; - - node() - : node(0, 0){ - } - - node(int label_, bool is_leaf_) - : label(label_) - , children(0) - , is_leaf(is_leaf_){ - } - - ~node() - { - for (auto c : children) - delete c; - } - - // Add a permutation to the tree. - void - add_new_perm(const std::vector& permu, int pos, unsigned state) - { - if (pos == -1) - children.push_back(new node(state, true)); - else - { - auto lab = permu[pos]; - auto child = std::find_if(children.begin(), children.end(), - [lab](node* n){ - return n->label == lab; - }); - if (child == children.end()) - { - node* new_child = new node(lab, false); - children.push_back(new_child); - new_child->add_new_perm(permu, pos - 1, state); - } - else - (*child)->add_new_perm(permu, pos - 1, state); - } - } - - node* - get_sub_tree(const std::vector& elements, int pos) - { - if (pos < 0) - return this; - unsigned lab = elements[pos]; - auto child = std::find_if(children.begin(), children.end(), - [lab](node* n){ - return n->label == lab; - }); - assert(child != children.end()); - return (*child)->get_sub_tree(elements, pos - 1); - } - - // Gives a state of res_ (if it exists) reachable from this node. - // If use_last is true, we take the most recent, otherwise we take - // the oldest. - unsigned - get_end(bool use_last) - { - if (children.empty()) - { - if (!is_leaf) - return -1U; - return label; - } - if (use_last) - return children[children.size() - 1]->get_end(use_last); - return children[0]->get_end(use_last); - } - - // Try to find a state compatible with the permu when seen_nb colors are - // moved. - unsigned - get_existing(const std::vector& permu, unsigned seen_nb, int pos, - bool use_last) - { - if (pos < (int) seen_nb) - return get_end(use_last); - else - { - auto lab = permu[pos]; - auto child = std::find_if(children.begin(), children.end(), - [lab](node* n){ - return n->label == lab; - }); - if (child == children.end()) - return -1U; - return (*child)->get_existing(permu, seen_nb, pos - 1, use_last); - } - } -}; - -class state_2_car_scc -{ -std::vector nodes; - -public: -state_2_car_scc(unsigned nb_states) - : nodes(nb_states, node()){ -} - -// Try to find a state compatible with the permu when seen_nb colors are -// moved. If use_last is true, it return the last created compatible state. -// If it is false, it returns the oldest. -unsigned -get_res_state(unsigned state, const std::vector& permu, - unsigned seen_nb, bool use_last) -{ - return nodes[state].get_existing(permu, seen_nb, - permu.size() - 1, use_last); -} - -void -add_res_state(unsigned initial, unsigned state, - const std::vector& permu) -{ - nodes[initial].add_new_perm(permu, ((int) permu.size()) - 1, state); -} - -node* -get_sub_tree(const std::vector& elements, unsigned state) -{ - return nodes[state].get_sub_tree(elements, elements.size() - 1); -} -}; - -class car_generator -{ -enum algorithm { - // Try to have a Büchi condition if we have Rabin. - Rabin_to_Buchi, - Streett_to_Buchi, - // IAR - IAR_Streett, - IAR_Rabin, - // CAR - CAR, - // Changing colors transforms acceptance to max even/odd copy. - Copy_even, - Copy_odd, - // If a condition is "t" or "f", we just have to copy the automaton. - False_clean, - True_clean, - None -}; - - -static std::string -algorithm_to_str(algorithm algo) -{ - std::string algo_str; - switch (algo) - { - case IAR_Streett: - algo_str = "IAR (Streett)"; - break; - case IAR_Rabin: - algo_str = "IAR (Rabin)"; - break; - case CAR: - algo_str = "CAR"; - break; - case Copy_even: - algo_str = "Copy even"; - break; - case Copy_odd: - algo_str = "Copy odd"; - break; - case False_clean: - algo_str = "False clean"; - break; - case True_clean: - algo_str = "True clean"; - break; - case Streett_to_Buchi: - algo_str = "Streett to Büchi"; - break; - case Rabin_to_Buchi: - algo_str = "Rabin to Büchi"; - break; - default: - algo_str = "None"; - break; - } - return algo_str; -} - -using perm_t = std::vector; - -struct car_state -{ - // State of the original automaton - unsigned state; - // We create a new automaton for each SCC of the original automaton - // so we keep a link between a car_state and the state of the - // subautomaton. - unsigned state_scc; - // Permutation used by IAR and CAR. - perm_t perm; - - bool - operator<(const car_state &other) const - { - if (state < other.state) - return true; - if (state > other.state) - return false; - if (perm < other.perm) - return true; - if (perm > other.perm) - return false; - return state_scc < other.state_scc; - } - - std::string - to_string(algorithm algo) const - { - std::stringstream s; - s << state; - unsigned ps = perm.size(); - if (ps > 0) - { - s << " ["; - for (unsigned i = 0; i != ps; ++i) - { - if (i > 0) - s << ','; - s << perm[i]; - } - s << ']'; - } - s << ", "; - s << algorithm_to_str(algo); - return s.str(); - } -}; - -const acc_cond::mark_t & -fin(const std::vector& pairs, unsigned k, algorithm algo) -const -{ - if (algo == IAR_Rabin) - return pairs[k].fin; - else - return pairs[k].inf; -} - -acc_cond::mark_t -inf(const std::vector& pairs, unsigned k, algorithm algo) -const -{ - if (algo == IAR_Rabin) - return pairs[k].inf; - else - return pairs[k].fin; -} - -// Gives for each state a set of marks incoming to this state. -std::vector> -get_inputs_states(const twa_graph_ptr& aut) -{ - auto used = aut->acc().get_acceptance().used_sets(); - std::vector> inputs(aut->num_states()); - for (auto e : aut->edges()) - { - auto elements = e.acc & used; - if (elements.has_many()) - inputs[e.dst].insert(elements); - } - return inputs; -} - -// Gives for each state a set of pairs incoming to this state. -std::vector>> -get_inputs_iar(const twa_graph_ptr& aut, algorithm algo, - const std::set& perm_elem, - const std::vector& pairs) -{ - std::vector>> inputs(aut->num_states()); - for (auto e : aut->edges()) - { - auto acc = e.acc; - std::vector new_vect; - for (unsigned k : perm_elem) - if (acc & fin(pairs, k, algo)) - new_vect.push_back(k); - std::sort(std::begin(new_vect), std::end(new_vect)); - inputs[e.dst].insert(new_vect); - } - return inputs; -} -// Give an order from the set of marks -std::vector -group_to_vector(const std::set& group) -{ - // In this function, we have for example the marks {1, 2}, {1, 2, 3}, {2} - // A compatible order is [2, 1, 3] - std::vector group_vect(group.begin(), group.end()); - - // We sort the elements by inclusion. This function is called on a - // set of marks such that each mark is included or includes the others. - std::sort(group_vect.begin(), group_vect.end(), - [](const acc_cond::mark_t left, const acc_cond::mark_t right) - { - return (left != right) && ((left & right) == left); - }); - // At this moment, we have the vector [{2}, {1, 2}, {1, 2, 3}]. - // In order to create the order, we add the elements of the first element. - // Then we add the elements of the second mark (without duplication), etc. - std::vector result; - for (auto mark : group_vect) - { - for (unsigned col : mark.sets()) - if (std::find(result.begin(), result.end(), col) == result.end()) - result.push_back(col); - } - return result; -} - -// Give an order from the set of indices of pairs -std::vector -group_to_vector_iar(const std::set>& group) -{ - std::vector> group_vect(group.begin(), group.end()); - for (auto& vec : group_vect) - std::sort(std::begin(vec), std::end(vec)); - std::sort(group_vect.begin(), group_vect.end(), - [](const std::vector left, - const std::vector right) - { - return (right != left) - && std::includes(right.begin(), right.end(), - left.begin(), left.end()); - }); - std::vector result; - for (auto vec : group_vect) - for (unsigned col : vec) - if (std::find(result.begin(), result.end(), col) == result.end()) - result.push_back(col); - return result; -} - -// Give a correspondance between a mark and an order for CAR -std::map> -get_groups(const std::set& marks_input) -{ - std::map> result; - - std::vector> groups; - for (acc_cond::mark_t mark : marks_input) - { - bool added = false; - for (unsigned group = 0; group < groups.size(); ++group) - { - if (std::all_of(groups[group].begin(), groups[group].end(), - [mark](acc_cond::mark_t element) - { - return ((element | mark) == mark) - || ((element | mark) == element); - })) - { - groups[group].insert(mark); - added = true; - break; - } - } - if (!added) - groups.push_back({mark}); - } - for (auto& group : groups) - { - auto new_vector = group_to_vector(group); - for (auto mark : group) - result.insert({mark, new_vector}); - } - return result; -} - -// Give a correspondance between a mark and an order for IAR -std::map, std::vector> -get_groups_iar(const std::set>& marks_input) -{ - std::map, std::vector> result; - - std::vector>> groups; - for (auto vect : marks_input) - { - bool added = false; - for (unsigned group = 0; group < groups.size(); ++group) - if (std::all_of(groups[group].begin(), groups[group].end(), - [vect](std::vector element) - { - return std::includes(vect.begin(), vect.end(), - element.begin(), element.end()) - || std::includes(element.begin(), element.end(), - vect.begin(), vect.end()); - })) - { - groups[group].insert(vect); - added = true; - break; - } - if (!added) - groups.push_back({vect}); - } - for (auto& group : groups) - { - auto new_vector = group_to_vector_iar(group); - for (auto vect : group) - result.insert({vect, new_vector}); - } - return result; -} - -// Give for each state the correspondance between a mark and an order (CAR) -std::vector>> -get_mark_to_vector(const twa_graph_ptr& aut) -{ - std::vector>> result; - auto inputs = get_inputs_states(aut); - for (unsigned state = 0; state < inputs.size(); ++state) - result.push_back(get_groups(inputs[state])); - return result; -} - -// Give for each state the correspondance between a mark and an order (IAR) -std::vector, std::vector>> -get_iar_to_vector(const twa_graph_ptr& aut, algorithm algo, - const std::set& perm_elem, - const std::vector& pairs) -{ - std::vector, std::vector>> result; - auto inputs = get_inputs_iar(aut, algo, perm_elem, pairs); - for (unsigned state = 0; state < inputs.size(); ++state) - result.push_back(get_groups_iar(inputs[state])); - return result; -} - -public: -explicit car_generator(const const_twa_graph_ptr &a, to_parity_options options) - : aut_(a) - , scc_(scc_info(a)) - , is_odd(false) - , options(options) -{ - if (options.pretty_print) - names = new std::vector(); - else - names = nullptr; -} - -// During the creation of the states, we had to choose between a set of -// compatible states. But it is possible to create another compatible state -// after. This function checks if a compatible state was created after and -// use it. -void -change_transitions_destination(twa_graph_ptr& aut, -const std::vector& states, -std::map>& partial_history, -state_2_car_scc& state_2_car) -{ - for (auto s : states) - for (auto& edge : aut->out(s)) - { - unsigned - src = edge.src, - dst = edge.dst; - // We don't change loops - if (src == dst) - continue; - unsigned dst_scc = num2car[dst].state_scc; - auto cant_change = partial_history[aut->edge_number(edge)]; - edge.dst = state_2_car.get_sub_tree(cant_change, dst_scc) - ->get_end(true); - } -} - -unsigned -apply_false_true_clean(const twa_graph_ptr &sub_automaton, bool is_true, - const std::vector& inf_fin_prefix, - unsigned max_free_color, - std::map& state2car_local, - std::map& car2num_local) -{ - std::vector* init_states = sub_automaton-> - get_named_prop>("original-states"); - - for (unsigned state = 0; state < sub_automaton->num_states(); ++state) - { - unsigned s_aut = (*init_states)[state]; - - car_state new_car = { s_aut, state, perm_t() }; - auto new_state = res_->new_state(); - car2num_local[new_car] = new_state; - num2car.insert(num2car.begin() + new_state, new_car); - if (options.pretty_print) - names->push_back( - new_car.to_string(is_true ? True_clean : False_clean)); - state2car_local[s_aut] = new_car; - } - for (unsigned state = 0; state < sub_automaton->num_states(); ++state) - { - unsigned s_aut = (*init_states)[state]; - car_state src = { s_aut, state, perm_t() }; - unsigned src_state = car2num_local[src]; - for (auto e : aut_->out(s_aut)) - { - auto col = is_true ^ !is_odd; - if (((unsigned)col) > max_free_color) - throw std::runtime_error("CAR needs more sets"); - if (scc_.scc_of(s_aut) == scc_.scc_of(e.dst)) - { - for (auto c : e.acc.sets()) - if (inf_fin_prefix[c] + is_odd > col) - col = inf_fin_prefix[c] + is_odd; - acc_cond::mark_t cond = { (unsigned) col }; - res_->new_edge( - src_state, car2num_local[state2car_local[e.dst]], - e.cond, cond); - } - } - } - return sub_automaton->num_states(); -} - -unsigned -apply_copy(const twa_graph_ptr &sub_automaton, - const std::vector &permut, - bool copy_odd, - const std::vector& inf_fin_prefix, - std::map& state2car_local, - std::map& car2num_local) -{ - std::vector* init_states = sub_automaton - ->get_named_prop>("original-states"); - for (unsigned state = 0; state < sub_automaton->num_states(); ++state) - { - car_state new_car = { (*init_states)[state], state, perm_t() }; - auto new_state = res_->new_state(); - car2num_local[new_car] = new_state; - num2car.insert(num2car.begin() + new_state, new_car); - state2car_local[(*init_states)[state]] = new_car; - if (options.pretty_print) - names->push_back( - new_car.to_string(copy_odd ? Copy_odd : Copy_even)); - } - auto cond_col = sub_automaton->acc().get_acceptance().used_sets(); - for (unsigned s = 0; s < sub_automaton->num_states(); ++s) - { - for (auto e : sub_automaton->out(s)) - { - acc_cond::mark_t mark = { }; - int max_edge = -1; - for (auto col : e.acc.sets()) - { - if (cond_col.has(col)) - max_edge = std::max(max_edge, (int) permut[col]); - if (inf_fin_prefix[col] + (is_odd || copy_odd) > max_edge) - max_edge = inf_fin_prefix[col] + (is_odd || copy_odd); - } - if (max_edge != -1) - mark.set((unsigned) max_edge); - car_state src = { (*init_states)[s], s, perm_t() }, - dst = { (*init_states)[e.dst], e.dst, perm_t() }; - unsigned src_state = car2num_local[src], - dst_state = car2num_local[dst]; - res_->new_edge(src_state, dst_state, e.cond, mark); - } - } - return sub_automaton->num_states(); -} - -unsigned -apply_to_Buchi(const twa_graph_ptr& sub_automaton, - const twa_graph_ptr& buchi, - bool is_streett_to_buchi, - const std::vector& inf_fin_prefix, - unsigned max_free_color, - std::map& state2car_local, - std::map& car2num_local) -{ - std::vector* init_states = sub_automaton - ->get_named_prop>("original-states"); - - for (unsigned state = 0; state < buchi->num_states(); ++state) - { - car_state new_car = { (*init_states)[state], state, perm_t() }; - auto new_state = res_->new_state(); - car2num_local[new_car] = new_state; - num2car.insert(num2car.begin() + new_state, new_car); - state2car_local[(*init_states)[state]] = new_car; - if (options.pretty_print) - names->push_back(new_car.to_string( - is_streett_to_buchi ? Streett_to_Buchi : Rabin_to_Buchi)); - } - auto g = buchi->get_graph(); - for (unsigned s = 0; s < buchi->num_states(); ++s) - { - unsigned b = g.state_storage(s).succ; - while (b) - { - auto& e = g.edge_storage(b); - auto acc = e.acc; - acc <<= (is_odd + is_streett_to_buchi); - if ((is_odd || is_streett_to_buchi) && acc == acc_cond::mark_t{ }) - acc = { (unsigned) (is_streett_to_buchi && is_odd) }; - car_state src = { (*init_states)[s], s, perm_t() }, - dst = { (*init_states)[e.dst], e.dst, perm_t() }; - unsigned src_state = car2num_local[src], - dst_state = car2num_local[dst]; - int col = ((int) acc.max_set()) - 1; - if (col > (int) max_free_color) - throw std::runtime_error("CAR needs more sets"); - auto& e2 = sub_automaton->get_graph().edge_storage(b); - for (auto c : e2.acc.sets()) - { - if (inf_fin_prefix[c] + is_odd > col) - col = inf_fin_prefix[c] + is_odd; - } - if (col != -1) - acc = { (unsigned) col }; - else - acc = {}; - res_->new_edge(src_state, dst_state, e.cond, acc); - b = e.next_succ; - } - } - return buchi->num_states(); -} - -// Create a permutation for the first state of a SCC (IAR) -void -initial_perm_iar(std::set &perm_elem, perm_t &p0, - algorithm algo, const acc_cond::mark_t &colors, - const std::vector &pairs) -{ - for (unsigned k = 0; k != pairs.size(); ++k) - if (!inf(pairs, k, algo) || (colors & (pairs[k].fin | pairs[k].inf))) - { - perm_elem.insert(k); - p0.push_back(k); - } -} - -// Create a permutation for the first state of a SCC (CAR) -void -initial_perm_car(perm_t &p0, const acc_cond::mark_t &colors) -{ - auto cont = colors.sets(); - p0.assign(cont.begin(), cont.end()); -} - -void -find_new_perm_iar(perm_t &new_perm, - const std::vector &pairs, - const acc_cond::mark_t &acc, - algorithm algo, const std::set &perm_elem, - unsigned &seen_nb) -{ - for (unsigned k : perm_elem) - if (acc & fin(pairs, k, algo)) - { - ++seen_nb; - auto it = std::find(new_perm.begin(), new_perm.end(), k); - - // move the pair in front of the permutation - std::rotate(new_perm.begin(), it, it + 1); - } -} - -// Given the set acc of colors appearing on an edge, create a new -// permutation new_perm, and give the number seen_nb of colors moved to -// the head of the permutation. -void -find_new_perm_car(perm_t &new_perm, const acc_cond::mark_t &acc, - unsigned &seen_nb, unsigned &h) -{ - for (unsigned k : acc.sets()) - { - auto it = std::find(new_perm.begin(), new_perm.end(), k); - if (it != new_perm.end()) - { - h = std::max(h, unsigned(it - new_perm.begin()) + 1); - std::rotate(new_perm.begin(), it, it + 1); - ++seen_nb; - } - } -} - -void -get_acceptance_iar(algorithm algo, const perm_t ¤t_perm, - const std::vector &pairs, - const acc_cond::mark_t &e_acc, acc_cond::mark_t &acc) -{ - unsigned delta_acc = (algo == IAR_Streett) && is_odd; - - // find the maximal index encountered by this transition - unsigned maxint = -1U; - - for (int k = current_perm.size() - 1; k >= 0; --k) - { - unsigned pk = current_perm[k]; - - if (!inf(pairs, pk, - algo) - || (e_acc & (pairs[pk].fin | pairs[pk].inf))) - { - maxint = k; - break; - } - } - unsigned value; - - if (maxint == -1U) - value = delta_acc; - else if (e_acc & fin(pairs, current_perm[maxint], algo)) - value = 2 * maxint + 2 + delta_acc; - else - value = 2 * maxint + 1 + delta_acc; - acc = { value }; -} - -void -get_acceptance_car(const acc_cond &sub_aut_cond, const perm_t &new_perm, - unsigned h, acc_cond::mark_t &acc) -{ - acc_cond::mark_t m(new_perm.begin(), new_perm.begin() + h); - bool rej = !sub_aut_cond.accepting(m); - unsigned value = 2 * h + rej + is_odd; - acc = { value }; -} - -unsigned -apply_lar(const twa_graph_ptr &sub_automaton, - unsigned init, std::vector &pairs, - algorithm algo, unsigned scc_num, - const std::vector& inf_fin_prefix, - unsigned max_free_color, - std::map& state2car_local, - std::map& car2num_local, - unsigned max_states) -{ - auto maps = get_mark_to_vector(sub_automaton); - // For each edge e of res_, we store the elements of the permutation - // that are not moved, and we respect the order. - std::map> edge_to_colors; - unsigned nb_created_states = 0; - auto state_2_car = state_2_car_scc(sub_automaton->num_states()); - std::vector* init_states = sub_automaton-> - get_named_prop>("original-states"); - std::deque todo; - auto get_state = - [&](const car_state &s){ - auto it = car2num_local.find(s); - - if (it == car2num_local.end()) - { - ++nb_created_states; - unsigned nb = res_->new_state(); - if (options.search_ex) - state_2_car.add_res_state(s.state_scc, nb, s.perm); - car2num_local[s] = nb; - num2car.insert(num2car.begin() + nb, s); - - todo.push_back(s); - if (options.pretty_print) - names->push_back(s.to_string(algo)); - return nb; - } - return it->second; - }; - - auto colors = sub_automaton->acc().get_acceptance().used_sets(); - std::set perm_elem; - - perm_t p0 = { }; - switch (algo) - { - case IAR_Streett: - case IAR_Rabin: - initial_perm_iar(perm_elem, p0, algo, colors, pairs); - break; - case CAR: - initial_perm_car(p0, colors); - break; - default: - assert(false); - break; - } - - std::vector, std::vector>> - iar_maps; - if (algo == IAR_Streett || algo == IAR_Rabin) - iar_maps = get_iar_to_vector(sub_automaton, algo, perm_elem, pairs); - - car_state s0{ (*init_states)[init], init, p0 }; - get_state(s0); // put s0 in todo - - // the main loop - while (!todo.empty()) - { - car_state current = todo.front(); - todo.pop_front(); - - unsigned src_num = get_state(current); - for (const auto &e : sub_automaton->out(current.state_scc)) - { - perm_t new_perm = current.perm; - - // Count pairs whose fin-part is seen on this transition - unsigned seen_nb = 0; - - // consider the pairs for this SCC only - unsigned h = 0; - - switch (algo) - { - case IAR_Rabin: - case IAR_Streett: - find_new_perm_iar(new_perm, pairs, e.acc, algo, - perm_elem, seen_nb); - break; - case CAR: - find_new_perm_car(new_perm, e.acc, seen_nb, h); - break; - default: - assert(false); - } - - std::vector not_moved(new_perm.begin() + seen_nb, - new_perm.end()); - - if (options.force_order) - { - if (algo == CAR && seen_nb > 1) - { - auto map = maps[e.dst]; - acc_cond::mark_t first_vals( - new_perm.begin(), new_perm.begin() + seen_nb); - auto new_start = map.find(first_vals); - assert(new_start->second.size() >= seen_nb); - assert(new_start != map.end()); - for (unsigned i = 0; i < seen_nb; ++i) - new_perm[i] = new_start->second[i]; - } - else if ((algo == IAR_Streett || algo == IAR_Rabin) - && seen_nb > 1) - { - auto map = iar_maps[e.dst]; - std::vector first_vals( - new_perm.begin(), new_perm.begin() + seen_nb); - std::sort(std::begin(first_vals), std::end(first_vals)); - auto new_start = map.find(first_vals); - assert(new_start->second.size() >= seen_nb); - assert(new_start != map.end()); - for (unsigned i = 0; i < seen_nb; ++i) - new_perm[i] = new_start->second[i]; - } - } - - // Optimization: when several indices are seen in the - // transition, they move at the front of new_perm in any - // order. Check whether there already exists an car_state - // that matches this condition. - car_state dst; - unsigned dst_num = -1U; - - if (options.search_ex) - dst_num = state_2_car.get_res_state(e.dst, new_perm, seen_nb, - options.use_last); - - if (dst_num == -1U) - { - auto dst = car_state{ (*init_states)[e.dst], e.dst, new_perm }; - dst_num = get_state(dst); - if (nb_created_states > max_states) - return -1U; - } - - acc_cond::mark_t acc = { }; - - switch (algo) - { - case IAR_Rabin: - case IAR_Streett: - get_acceptance_iar(algo, current.perm, pairs, e.acc, acc); - break; - case CAR: - get_acceptance_car(sub_automaton->acc(), new_perm, h, acc); - break; - default: - assert(false); - } - - unsigned acc_col = acc.min_set() - 1; - if (options.parity_prefix) - { - if (acc_col > max_free_color) - throw std::runtime_error("CAR needs more sets"); - // parity prefix - for (auto col : e.acc.sets()) - { - if (inf_fin_prefix[col] + is_odd > (int) acc_col) - acc_col = (unsigned) inf_fin_prefix[col] + is_odd; - } - } - auto new_e = res_->new_edge(src_num, dst_num, e.cond, { acc_col }); - edge_to_colors.insert({new_e, not_moved}); - } - } - if (options.search_ex && options.use_last) - { - std::vector added_states; - std::transform(car2num_local.begin(), car2num_local.end(), - std::back_inserter(added_states), - [](std::pair pair) { - return pair.second; - }); - change_transitions_destination( - res_, added_states, edge_to_colors, state_2_car); - } - auto leaving_edge = - [&](unsigned d){ - return scc_.scc_of(num2car.at(d).state) != scc_num; - }; - auto filter_edge = - [](const twa_graph::edge_storage_t &, - unsigned dst, - void* filter_data){ - decltype(leaving_edge) *data = - static_cast(filter_data); - - if ((*data)(dst)) - return scc_info::edge_filter_choice::ignore; - - return scc_info::edge_filter_choice::keep; - }; - scc_info sub_scc(res_, get_state(s0), filter_edge, &leaving_edge); - - // SCCs are numbered in reverse topological order, so the bottom SCC has - // index 0. - const unsigned bscc = 0; - assert(sub_scc.scc_count() != 0); - assert(sub_scc.succ(0).empty()); - assert( - [&](){ - for (unsigned s = 1; s != sub_scc.scc_count(); ++s) - if (sub_scc.succ(s).empty()) - return false; - - return true; - } ()); - - assert(sub_scc.states_of(bscc).size() >= sub_automaton->num_states()); - - // update state2car - for (unsigned scc_state : sub_scc.states_of(bscc)) - { - car_state &car = num2car.at(scc_state); - - if (state2car_local.find(car.state) == state2car_local.end()) - state2car_local[car.state] = car; - } - return sub_scc.states_of(bscc).size(); -} - -algorithm -chooseAlgo(twa_graph_ptr &sub_automaton, - twa_graph_ptr &rabin_aut, - std::vector &pairs, - std::vector &permut) -{ - auto scc_condition = sub_automaton->acc(); - if (options.parity_equiv) - { - if (scc_condition.is_f()) - return False_clean; - if (scc_condition.is_t()) - return True_clean; - std::vector permut_tmp(scc_condition.all_sets().max_set(), -1); - - if (!is_odd && scc_condition.is_parity_max_equiv(permut_tmp, true)) - { - for (auto c : permut_tmp) - permut.push_back((unsigned) c); - - scc_condition.apply_permutation(permut); - sub_automaton->apply_permutation(permut); - return Copy_even; - } - std::fill(permut_tmp.begin(), permut_tmp.end(), -1); - if (scc_condition.is_parity_max_equiv(permut_tmp, false)) - { - for (auto c : permut_tmp) - permut.push_back((unsigned) c); - scc_condition.apply_permutation(permut); - sub_automaton->apply_permutation(permut); - return Copy_odd; - } - } - - if (options.rabin_to_buchi) - { - auto ra = rabin_to_buchi_if_realizable(sub_automaton); - if (ra != nullptr) - { - rabin_aut = ra; - return Rabin_to_Buchi; - } - else - { - bool streett_buchi = false; - auto sub_cond = sub_automaton->get_acceptance(); - sub_automaton->set_acceptance(sub_cond.complement()); - auto ra = rabin_to_buchi_if_realizable(sub_automaton); - streett_buchi = (ra != nullptr); - sub_automaton->set_acceptance(sub_cond); - if (streett_buchi) - { - rabin_aut = ra; - return Streett_to_Buchi; - } - } - } - - auto pairs1 = std::vector(); - auto pairs2 = std::vector(); - std::sort(pairs1.begin(), pairs1.end()); - pairs1.erase(std::unique(pairs1.begin(), pairs1.end()), pairs1.end()); - std::sort(pairs2.begin(), pairs2.end()); - pairs2.erase(std::unique(pairs2.begin(), pairs2.end()), pairs2.end()); - bool is_r_like = scc_condition.is_rabin_like(pairs1); - bool is_s_like = scc_condition.is_streett_like(pairs2); - unsigned num_cols = scc_condition.get_acceptance().used_sets().count(); - if (is_r_like) - { - if ((is_s_like && pairs1.size() < pairs2.size()) || !is_s_like) - { - if (pairs1.size() > num_cols) - return CAR; - pairs = pairs1; - return IAR_Rabin; - } - else if (is_s_like) - { - if (pairs2.size() > num_cols) - return CAR; - pairs = pairs2; - return IAR_Streett; - } - } - else - { - if (is_s_like) - { - if (pairs2.size() > num_cols) - return CAR; - pairs = pairs2; - return IAR_Streett; - } - } - return CAR; -} - -unsigned -build_scc(twa_graph_ptr &sub_automaton, - unsigned scc_num, - std::map& state2car_local, - std::map&car2num_local, - algorithm& algo, - unsigned max_states = -1U) -{ - - std::vector parity_prefix_colors (SPOT_MAX_ACCSETS, - - SPOT_MAX_ACCSETS - 2); - unsigned min_prefix_color = SPOT_MAX_ACCSETS + 1; - if (options.parity_prefix) - { - auto new_acc = sub_automaton->acc(); - auto colors = std::vector(); - bool inf_start = - sub_automaton->acc().has_parity_prefix(new_acc, colors); - sub_automaton->set_acceptance(new_acc); - for (unsigned i = 0; i < colors.size(); ++i) - parity_prefix_colors[colors[i]] = - SPOT_MAX_ACCSETS - 4 - i - !inf_start; - if (colors.size() > 0) - min_prefix_color = - SPOT_MAX_ACCSETS - 4 - colors.size() - 1 - !inf_start; - } - --min_prefix_color; - - unsigned init = 0; - - std::vector pairs = { }; - auto permut = std::vector(); - twa_graph_ptr rabin_aut = nullptr; - algo = chooseAlgo(sub_automaton, rabin_aut, pairs, permut); - switch (algo) - { - case False_clean: - case True_clean: - return apply_false_true_clean(sub_automaton, (algo == True_clean), - parity_prefix_colors, min_prefix_color, - state2car_local, car2num_local); - break; - case IAR_Streett: - case IAR_Rabin: - case CAR: - return apply_lar(sub_automaton, init, pairs, algo, scc_num, - parity_prefix_colors, min_prefix_color, - state2car_local, car2num_local, max_states); - break; - case Copy_odd: - case Copy_even: - return apply_copy(sub_automaton, permut, algo == Copy_odd, - parity_prefix_colors, state2car_local, - car2num_local); - break; - case Rabin_to_Buchi: - case Streett_to_Buchi: - return apply_to_Buchi(sub_automaton, rabin_aut, - (algo == Streett_to_Buchi), - parity_prefix_colors, min_prefix_color, - state2car_local, car2num_local); - break; - default: - break; - } - return -1U; -} - -public: -twa_graph_ptr -run() -{ - res_ = make_twa_graph(aut_->get_dict()); - res_->copy_ap_of(aut_); - for (unsigned scc = 0; scc < scc_.scc_count(); ++scc) - { - if (!scc_.is_useful_scc(scc)) - continue; - auto sub_automata = scc_.split_on_sets(scc, { }, true); - if (sub_automata.empty()) - { - for (auto state : scc_.states_of(scc)) - { - auto new_state = res_->new_state(); - car_state new_car = { state, state, perm_t() }; - car2num[new_car] = new_state; - num2car.insert(num2car.begin() + new_state, new_car); - if (options.pretty_print) - names->push_back(new_car.to_string(None)); - state2car[state] = new_car; - } - continue; - } - - auto sub_automaton = sub_automata[0]; - auto deg = sub_automaton; - if (options.acc_clean) - simplify_acceptance_here(sub_automaton); - bool has_degeneralized = false; - if (options.partial_degen) - { - std::vector forbid; - auto m = - is_partially_degeneralizable(sub_automaton, true, - true, forbid); - while (m != acc_cond::mark_t {}) - { - auto tmp = partial_degeneralize(deg, m); - simplify_acceptance_here(tmp); - if (tmp->get_acceptance().used_sets().count() - < deg->get_acceptance().used_sets().count() || - !(options.reduce_col_deg)) - { - deg = tmp; - has_degeneralized = true; - } - else - forbid.push_back(m); - m = is_partially_degeneralizable(deg, true, true, forbid); - } - } - - if (options.propagate_col) - { - propagate_marks_here(sub_automaton); - if (deg != sub_automaton) - propagate_marks_here(deg); - } - - std::map state2car_sub, state2car_deg; - std::map car2num_sub, car2num_deg; - - unsigned nb_states_deg = -1U, - nb_states_sub = -1U; - - algorithm algo_sub, algo_deg; - unsigned max_states_sub_car = -1U; - // We try with and without degeneralization and we keep the best. - if (has_degeneralized) - { - nb_states_deg = - build_scc(deg, scc, state2car_deg, car2num_deg, algo_deg); - // We suppose that if we see nb_states_deg + 1000 states when - // when construct the version without degeneralization during the - // construction, we will not be able to have nb_states_deg after - // removing useless states. So we will stop the execution. - max_states_sub_car = - 10000 + nb_states_deg - 1; - } - if (!options.force_degen || !has_degeneralized) - nb_states_sub = - build_scc(sub_automaton, scc, state2car_sub, car2num_sub, - algo_sub, max_states_sub_car); - if (nb_states_deg < nb_states_sub) - { - state2car.insert(state2car_deg.begin(), state2car_deg.end()); - car2num.insert(car2num_deg.begin(), car2num_deg.end()); - algo_sub = algo_deg; - } - else - { - state2car.insert(state2car_sub.begin(), state2car_sub.end()); - car2num.insert(car2num_sub.begin(), car2num_sub.end()); - } - if ((algo_sub == IAR_Rabin || algo_sub == Copy_odd) && !is_odd) - { - is_odd = true; - for (auto &edge : res_->edges()) - { - if (scc_.scc_of(num2car[edge.src].state) != scc - && scc_.scc_of(num2car[edge.dst].state) != scc) - { - if (edge.acc == acc_cond::mark_t{}) - edge.acc = { 0 }; - else - edge.acc <<= 1; - } - } - } - } - - for (unsigned state = 0; state < res_->num_states(); ++state) - { - unsigned original_state = num2car.at(state).state; - auto state_scc = scc_.scc_of(original_state); - for (auto edge : aut_->out(original_state)) - { - if (scc_.scc_of(edge.dst) != state_scc) - { - auto car = state2car.find(edge.dst); - if (car != state2car.end()) - { - unsigned res_dst = car2num.at(car->second); - res_->new_edge(state, res_dst, edge.cond, { }); - } - } - } - } - unsigned initial_state = aut_->get_init_state_number(); - auto initial_car_ptr = state2car.find(initial_state); - car_state initial_car; - // If we take an automaton with one state and without transition, - // the SCC was useless so state2car doesn't have initial_state - if (initial_car_ptr == state2car.end()) - { - assert(res_->num_states() == 0); - auto new_state = res_->new_state(); - car_state new_car = {initial_state, 0, perm_t()}; - state2car[initial_state] = new_car; - if (options.pretty_print) - names->push_back(new_car.to_string(None)); - num2car.insert(num2car.begin() + new_state, new_car); - car2num[new_car] = new_state; - initial_car = new_car; - } - else - initial_car = initial_car_ptr->second; - auto initial_state_res = car2num.find(initial_car); - if (initial_state_res != car2num.end()) - res_->set_init_state(initial_state_res->second); - else - res_->new_state(); - if (options.pretty_print) - res_->set_named_prop("state-names", names); - - res_->purge_unreachable_states(); - // If parity_prefix is used, we use all available colors by - // default: The IAR/CAR are using lower indices, and the prefix is - // using the upper indices. So we use reduce_parity() to clear - // the mess. If parity_prefix is not used, - unsigned max_color = SPOT_MAX_ACCSETS; - if (!options.parity_prefix) - { - acc_cond::mark_t all = {}; - for (auto& e: res_->edges()) - all |= e.acc; - max_color = all.max_set(); - } - res_->set_acceptance(acc_cond::acc_code::parity_max(is_odd, max_color)); - if (options.parity_prefix) - reduce_parity_here(res_); - return res_; -} - -private: -const const_twa_graph_ptr &aut_; -const scc_info scc_; -twa_graph_ptr res_; -// Says if we constructing an odd or even max -bool is_odd; - -std::vector num2car; -std::map state2car; -std::map car2num; - -to_parity_options options; - -std::vector* names; -}; // car_generator - -}// namespace - - -twa_graph_ptr -to_parity(const const_twa_graph_ptr &aut, const to_parity_options options) -{ - return car_generator(aut, options).run(); -} - - // Old version of CAR. - namespace - { - struct lar_state - { - unsigned state; - std::vector perm; - - bool operator<(const lar_state& s) const - { - return state == s.state ? perm < s.perm : state < s.state; - } - - std::string to_string() const - { - std::stringstream s; - s << state << " ["; - unsigned ps = perm.size(); - for (unsigned i = 0; i != ps; ++i) - { - if (i > 0) - s << ','; - s << perm[i]; - } - s << ']'; - return s.str(); - } - }; - - class lar_generator - { - const const_twa_graph_ptr& aut_; - twa_graph_ptr res_; - const bool pretty_print; - - std::map lar2num; - public: - explicit lar_generator(const const_twa_graph_ptr& a, bool pretty_print) - : aut_(a) - , res_(nullptr) - , pretty_print(pretty_print) - {} - - twa_graph_ptr run() - { - res_ = make_twa_graph(aut_->get_dict()); - res_->copy_ap_of(aut_); - - std::deque todo; - auto get_state = [this, &todo](const lar_state& s) - { - auto it = lar2num.emplace(s, -1U); - if (it.second) // insertion took place - { - unsigned nb = res_->new_state(); - it.first->second = nb; - todo.push_back(s); - } - return it.first->second; - }; - - std::vector initial_perm(aut_->num_sets()); - std::iota(initial_perm.begin(), initial_perm.end(), 0); - { - lar_state s0{aut_->get_init_state_number(), initial_perm}; - res_->set_init_state(get_state(s0)); - } - - scc_info si(aut_, scc_info_options::NONE); - // main loop - while (!todo.empty()) - { - lar_state current = todo.front(); - todo.pop_front(); - - // TODO: todo could store this number to avoid one lookup - unsigned src_num = get_state(current); - - unsigned source_scc = si.scc_of(current.state); - for (const auto& e : aut_->out(current.state)) - { - // find the new permutation - std::vector new_perm = current.perm; - unsigned h = 0; - for (unsigned k : e.acc.sets()) - { - auto it = std::find(new_perm.begin(), new_perm.end(), k); - h = std::max(h, unsigned(new_perm.end() - it)); - std::rotate(it, it+1, new_perm.end()); - } - - if (source_scc != si.scc_of(e.dst)) - { - new_perm = initial_perm; - h = 0; - } - - lar_state dst{e.dst, new_perm}; - unsigned dst_num = get_state(dst); - - // Do the h last elements satisfy the acceptance condition? - // If they do, emit 2h, if they don't emit 2h+1. - acc_cond::mark_t m(new_perm.end() - h, new_perm.end()); - bool rej = !aut_->acc().accepting(m); - res_->new_edge(src_num, dst_num, e.cond, {2*h + rej}); - } - } - - // parity max even - unsigned sets = 2*aut_->num_sets() + 2; - res_->set_acceptance(sets, acc_cond::acc_code::parity_max_even(sets)); - - if (pretty_print) - { - auto names = new std::vector(res_->num_states()); - for (const auto& p : lar2num) - (*names)[p.second] = p.first.to_string(); - res_->set_named_prop("state-names", names); - } - - return res_; - } - }; - } - twa_graph_ptr - to_parity_old(const const_twa_graph_ptr& aut, bool pretty_print) + iar_maybe(const const_twa_graph_ptr &aut, bool pretty_print) { - if (!aut->is_existential()) - throw std::runtime_error("LAR does not handle alternation"); - // if aut is already parity return it as is - if (aut->acc().is_parity()) - return std::const_pointer_cast(aut); - - lar_generator gen(aut, pretty_print); - return gen.run(); + return iar_maybe_(aut, pretty_print); } - } diff --git a/spot/twaalgos/toparity.hh b/spot/twaalgos/toparity.hh index 6aecf7659..d82403aa5 100644 --- a/spot/twaalgos/toparity.hh +++ b/spot/twaalgos/toparity.hh @@ -19,10 +19,24 @@ #pragma once -#include +#include +#include +#include namespace spot { + /// Structure used by to_parity to store some information about the + /// construction + struct to_parity_data + { + /// Total number of states created + unsigned nb_states_created = 0; + /// Total number of edges created + unsigned nb_edges_created = 0; + /// Name of algorithms used + std::vector algorithms_used; + }; + /// \ingroup twa_acc_transform /// \brief Options to control various optimizations of to_parity(). struct to_parity_options @@ -35,6 +49,9 @@ namespace spot /// most recent state when we find multiple existing state /// compatible with the current move. bool use_last = true; + /// If \c use_last_post_process is true, then when LAR ends, it tries to + /// replace the destination of an edge by the newest compatible state. + bool use_last_post_process = false; /// If \c force_order is true, we force to use an order when CAR or IAR is /// applied. Given a state s and a set E ({0}, {0 1}, {2} for example) we /// construct an order on colors. With the given example, we ask to have @@ -45,16 +62,26 @@ namespace spot /// degeneralization to remove occurrences of acceptance /// subformulas such as `Fin(x) | Fin(y)` or `Inf(x) & Inf(y)`. bool partial_degen = true; - /// If \c force_degen is false, to_parity will checks if we can - /// get a better result if we don't apply partial_degeneralize. - bool force_degen = true; /// If \c scc_acc_clean is true, to_parity() will ignore colors /// not occurring in an SCC while processing this SCC. bool acc_clean = true; /// If \c parity_equiv is true, to_parity() will check if there - /// exists a permutations of colors such that the acceptance - /// condition is a parity condition. + /// exists a way to see the acceptance condition as a parity max one. bool parity_equiv = true; + /// If \c Car is true, to_parity will try to apply CAR. It is a version of + /// LAR that tracks colors. + bool car = true; + /// If \c tar is true, to_parity will try to apply TAR. It is a version of + /// LAR that tracks transitions instead of colors. + bool tar = false; + /// If \c iar is true, to_parity will try to apply IAR. + bool iar = true; + /// if \c lar_dfs is true, then when LAR is used the next state of the + /// result that will be processed is the last created state. + bool lar_dfs = true; + /// If \c bscc is true, to_parity() will only keep the bottommost automaton + /// when it applies LAR. + bool bscc = true; /// If \c parity_prefix is true, to_parity() will use a special /// handling for acceptance conditions of the form `Inf(m0) | /// (Fin(m1) & (Inf(m2) | (… β)))` that start as a parity @@ -62,30 +89,42 @@ namespace spot /// `β` can be an arbitrary formula. In this case, the paritization /// algorithm is really applied only to `β`, and the marks of the /// prefix are appended after a suitable renumbering. - /// - /// For technical reasons, activating this option (and this is the - /// default) causes reduce_parity() to be called at the end to - /// minimize the number of colors used. It is therefore - /// recommended to disable this option when one wants to follow - /// the output CAR/IAR constructions. bool parity_prefix = true; + /// If \c parity_prefix_general is true, to_parity() will rewrite the + /// acceptance condition as `Inf(m0) | (Fin(m1) & (Inf(m2) | (… β)))` before + /// applying the same construction as with the option \c parity_prefix. + bool parity_prefix_general = false; + /// If \c generic_emptiness is true, to_parity() will check if the automaton + /// does not accept any word with an emptiness check algorithm. + bool generic_emptiness = false; /// If \c rabin_to_buchi is true, to_parity() tries to convert a Rabin or /// Streett condition to Büchi or co-Büchi with /// rabin_to_buchi_if_realizable(). bool rabin_to_buchi = true; - /// Only allow degeneralization if it reduces the number of colors in the - /// acceptance condition. + /// If \c buchi_type_to_buchi is true, to_parity converts a + /// (co-)Büchi type automaton to a (co-)Büchi automaton. + bool buchi_type_to_buchi = false; + /// If \c parity_type_to_parity is true, to_parity converts a + /// parity type automaton to a parity automaton. + bool parity_type_to_parity = false; + /// Only allow partial degeneralization if it reduces the number of colors + /// in the acceptance condition or if it implies to apply IAR instead of + /// CAR. bool reduce_col_deg = false; /// Use propagate_marks_here to increase the number of marks on transition /// in order to move more colors (and increase the number of /// compatible states) when we apply LAR. bool propagate_col = true; + /// If \c use_generalized_buchi is true, each SCC will use a generalized + /// Rabin acceptance in order to avoid CAR. + bool use_generalized_rabin = false; /// If \c pretty_print is true, states of the output automaton are /// named to help debugging. bool pretty_print = false; + /// Structure used to store some information about the construction. + to_parity_data* datas = nullptr; }; - /// \ingroup twa_acc_transform /// \brief Take an automaton with any acceptance condition and return an /// equivalent parity automaton. diff --git a/spot/twaalgos/zlktree.hh b/spot/twaalgos/zlktree.hh index b8e47bc2a..d210033e3 100644 --- a/spot/twaalgos/zlktree.hh +++ b/spot/twaalgos/zlktree.hh @@ -181,7 +181,6 @@ namespace spot /// \brief Render the tree as in GraphViz format. void dot(std::ostream&) const; - private: struct zielonka_node { unsigned parent; @@ -191,6 +190,7 @@ namespace spot acc_cond::mark_t colors; }; std::vector nodes_; + private: unsigned one_branch_ = 0; unsigned num_branches_ = 0; bool is_even_; diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 6cb449012..95e0bf4d7 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -58,11 +58,11 @@ parity 13; 13 1 1 6,10; 1 1 1 6,3; parity 5; -1 1 0 4,5 "INIT"; +0 1 0 2,3 "INIT"; +3 3 1 1; +1 1 0 4,5; 5 2 1 1,1; 4 3 1 0,1; -0 1 0 2,3; -3 3 1 1; 2 1 1 0,0; EOF @@ -414,13 +414,13 @@ grep 'DPA has 29 states' err ltlsynt --verbose -x wdba-minimize=1 --algo=ps --outs=p1 --ins=p0 -f "$f" 2>err grep 'DPA has 12 states' err -ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=no | grep 'States: 5' -ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=bisim | grep 'States: 5' -ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=bwoa | grep 'States: 4' -ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" | grep 'States: 4' -ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=sat | grep 'States: 2' -ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=bisim-sat | grep 'States: 2' -ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=bwoa-sat | grep 'States: 4' +ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=no | grep 'States: 7' +ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=bisim | grep 'States: 7' +ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=bwoa | grep 'States: 6' +ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" | grep 'States: 6' +ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=sat | grep 'States: 3' +ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=bisim-sat | grep 'States: 3' +ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=bwoa-sat | grep 'States: 6' # The following used to raise an exception because of a bug in # split_2step_fast_here(). @@ -707,7 +707,7 @@ automaton has 4 states and 1 colors LAR construction done in X seconds DPA has 4 states, 1 colors split inputs and outputs done in X seconds -automaton has 9 states +automaton has 10 states solving game with acceptance: Büchi game solved in X seconds simplification took X seconds diff --git a/tests/python/games.ipynb b/tests/python/games.ipynb index 891ebcd94..f3ffd7502 100644 --- a/tests/python/games.ipynb +++ b/tests/python/games.ipynb @@ -47,153 +47,153 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "2->5\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "6->7\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "7->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8->5\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" @@ -285,153 +285,153 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "2->5\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "6->7\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "7->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8->5\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" @@ -501,153 +501,153 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", "\n", - "0\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", "\n", - "1\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", "\n", - "3\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", "\n", - "2\n", + "2\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", "\n", - "4\n", + "4\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", "\n", - "6\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "5\n", "\n", - "5\n", + "5\n", "\n", "\n", "\n", "2->5\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7\n", "\n", - "7\n", + "7\n", "\n", "\n", "\n", "6->7\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", "\n", - "8\n", + "8\n", "\n", "\n", "\n", "7->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8->5\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" @@ -686,233 +686,218 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ")\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", - "\n", - "\n", - "4\n", - "\n", - "4\n", - "\n", - "\n", - "\n", - "I->4\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "10\n", - "\n", - "10\n", - "\n", - "\n", - "\n", - "4->10\n", - "\n", - "\n", - "!a\n", - "\n", - "\n", - "\n", - "11\n", - "\n", - "11\n", - "\n", - "\n", - "\n", - "4->11\n", - "\n", - "\n", - "a\n", - "\n", "\n", - "\n", + "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "0->5\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", - "\n", + "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "0->6\n", - "\n", - "\n", - "a\n", - "\n", - "\n", - "\n", - "\n", - "5->0\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "a\n", "\n", "\n", - "\n", + "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", - "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", "\n", - "6->1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "\n", - "1->6\n", - "\n", - "\n", - "a\n", - "\n", - "\n", - "\n", - "\n", - "7\n", - "\n", - "7\n", - "\n", - "\n", - "\n", - "1->7\n", - "\n", - "\n", - "!a\n", - "\n", - "\n", - "\n", - "\n", - "7->0\n", - "\n", - "\n", - "1\n", - "\n", + "5->3\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "!a\n", "\n", "\n", - "\n", + "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", - "\n", - "\n", - "2->8\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "7->1\n", + "\n", + "\n", + "1\n", "\n", "\n", - "\n", + "\n", "8->2\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", + "\n", "\n", - "\n", - "\n", - "3\n", - "\n", - "3\n", + "\n", + "\n", + "2->7\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", - "\n", + "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", - "\n", + "\n", "3->9\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "9->2\n", - "\n", - "\n", - "!b\n", + "\n", + "\n", + "1\n", "\n", "\n", - "\n", - "9->3\n", - "\n", - "\n", - "b\n", - "\n", - "\n", - "\n", "\n", - "10->0\n", - "\n", - "\n", - "!b\n", + "9->3\n", + "\n", + "\n", + "b\n", + "\n", "\n", - "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", "\n", - "10->3\n", - "\n", - "\n", - "b\n", + "9->4\n", + "\n", + "\n", + "!b\n", "\n", - "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "4->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", "\n", - "11->1\n", - "\n", - "\n", - "!b\n", - "\n", - "\n", - "\n", - "11->3\n", - "\n", - "\n", - "b\n", + "10->4\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f657c403180> >" + " *' at 0x7f6be431fbd0> >" ] }, "execution_count": 8, @@ -944,46 +929,44 @@ "output_type": "stream", "text": [ "HOA: v1\n", - "States: 12\n", - "Start: 4\n", + "States: 11\n", + "Start: 0\n", "AP: 2 \"b\" \"a\"\n", "acc-name: co-Buchi\n", "Acceptance: 1 Fin(0)\n", "properties: trans-labels explicit-labels trans-acc complete\n", "properties: deterministic\n", - "spot-state-player: 0 0 0 0 0 1 1 1 1 1 1 1\n", + "spot-state-player: 0 0 0 0 0 1 1 1 1 1 1\n", "controllable-AP: 0\n", "--BODY--\n", "State: 0\n", "[!1] 5\n", - "[1] 6 {0}\n", + "[1] 6\n", "State: 1\n", - "[1] 6 {0}\n", - "[!1] 7 {0}\n", + "[!1] 7\n", + "[1] 8 {0}\n", "State: 2\n", - "[t] 8\n", + "[!1] 7\n", + "[1] 8 {0}\n", "State: 3\n", "[t] 9\n", "State: 4\n", - "[!1] 10\n", - "[1] 11\n", + "[t] 10\n", "State: 5\n", - "[t] 0\n", - "State: 6\n", - "[t] 1 {0}\n", - "State: 7\n", - "[t] 0 {0}\n", - "State: 8\n", - "[t] 2\n", - "State: 9\n", - "[!0] 2\n", - "[0] 3 {0}\n", - "State: 10\n", - "[!0] 0\n", - "[0] 3\n", - "State: 11\n", "[!0] 1\n", "[0] 3\n", + "State: 6\n", + "[!0] 2\n", + "[0] 3\n", + "State: 7\n", + "[t] 1\n", + "State: 8\n", + "[t] 2 {0}\n", + "State: 9\n", + "[0] 3 {0}\n", + "[!0] 4\n", + "State: 10\n", + "[t] 4\n", "--END--\n" ] } @@ -1030,233 +1013,218 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ")\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", - "\n", - "\n", - "4\n", - "\n", - "4\n", - "\n", - "\n", - "\n", - "I->4\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "10\n", - "\n", - "10\n", - "\n", - "\n", - "\n", - "4->10\n", - "\n", - "\n", - "!a\n", - "\n", - "\n", - "\n", - "11\n", - "\n", - "11\n", - "\n", - "\n", - "\n", - "4->11\n", - "\n", - "\n", - "a\n", - "\n", "\n", - "\n", + "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "0->5\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", - "\n", + "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "0->6\n", - "\n", - "\n", - "a\n", - "\n", - "\n", - "\n", - "\n", - "5->0\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "a\n", "\n", "\n", - "\n", + "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", - "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", "\n", - "6->1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "\n", - "1->6\n", - "\n", - "\n", - "a\n", - "\n", - "\n", - "\n", - "\n", - "7\n", - "\n", - "7\n", - "\n", - "\n", - "\n", - "1->7\n", - "\n", - "\n", - "!a\n", - "\n", - "\n", - "\n", - "\n", - "7->0\n", - "\n", - "\n", - "1\n", - "\n", + "5->3\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "!a\n", "\n", "\n", - "\n", + "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", - "\n", - "\n", - "2->8\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "7->1\n", + "\n", + "\n", + "1\n", "\n", "\n", - "\n", + "\n", "8->2\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", + "\n", "\n", - "\n", - "\n", - "3\n", - "\n", - "3\n", + "\n", + "\n", + "2->7\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", - "\n", + "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", - "\n", + "\n", "3->9\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "9->2\n", - "\n", - "\n", - "!b\n", + "\n", + "\n", + "1\n", "\n", "\n", - "\n", - "9->3\n", - "\n", - "\n", - "b\n", - "\n", - "\n", - "\n", "\n", - "10->0\n", - "\n", - "\n", - "!b\n", + "9->3\n", + "\n", + "\n", + "b\n", + "\n", "\n", - "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", "\n", - "10->3\n", - "\n", - "\n", - "b\n", + "9->4\n", + "\n", + "\n", + "!b\n", "\n", - "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "4->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", "\n", - "11->1\n", - "\n", - "\n", - "!b\n", - "\n", - "\n", - "\n", - "11->3\n", - "\n", - "\n", - "b\n", + "10->4\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f658c612f30> >" + " *' at 0x7f6be431fcf0> >" ] }, "execution_count": 11, @@ -1267,18 +1235,11 @@ "source": [ "spot.highlight_strategy(game)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -1292,7 +1253,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.7.3" } }, "nbformat": 4, diff --git a/tests/python/synthesis.ipynb b/tests/python/synthesis.ipynb index 3e8b4f5ea..654d22873 100644 --- a/tests/python/synthesis.ipynb +++ b/tests/python/synthesis.ipynb @@ -3,7 +3,6 @@ { "cell_type": "code", "execution_count": 1, - "id": "7a864ea1", "metadata": {}, "outputs": [], "source": [ @@ -14,7 +13,6 @@ }, { "cell_type": "markdown", - "id": "9a294cae", "metadata": {}, "source": [ "This notebook presents functions that can be used to solve the Reactive Synthesis problem using games.\n", @@ -39,7 +37,6 @@ { "cell_type": "code", "execution_count": 2, - "id": "70429a41", "metadata": {}, "outputs": [ { @@ -56,590 +53,590 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ")\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", - "\n", - "\n", - "9\n", - "\n", - "9\n", - "\n", - "\n", - "\n", - "I->9\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "25\n", - "\n", - "25\n", - "\n", - "\n", - "\n", - "9->25\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", - "\n", - "\n", - "26\n", - "\n", - "26\n", - "\n", - "\n", - "\n", - "9->26\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", - "\n", - "\n", - "27\n", - "\n", - "27\n", - "\n", - "\n", - "\n", - "9->27\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", - "\n", - "\n", - "28\n", - "\n", - "28\n", - "\n", - "\n", - "\n", - "9->28\n", - "\n", - "\n", - "i0 & i1\n", - "\n", "\n", - "\n", + "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "0->10\n", - "\n", - "\n", - "!i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", - "\n", + "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "0->11\n", - "\n", - "\n", - "i1\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "10->1\n", - "\n", - "\n", - "o0\n", - "\n", - "\n", - "\n", - "11->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", - "\n", + "\n", "12\n", - "\n", - "12\n", + "\n", + "12\n", "\n", - "\n", + "\n", "\n", - "1->12\n", - "\n", - "\n", - "!i1\n", + "0->12\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", - "\n", + "\n", "13\n", - "\n", - "13\n", + "\n", + "13\n", "\n", - "\n", + "\n", "\n", - "1->13\n", - "\n", - "\n", - "i1\n", + "0->13\n", + "\n", + "\n", + "i0 & i1\n", "\n", - "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "10->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "11->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", "\n", - "12->1\n", - "\n", - "\n", - "!o0\n", + "12->9\n", + "\n", + "\n", + "1\n", "\n", - "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", "\n", - "13->0\n", - "\n", - "\n", - "!o0\n", + "13->5\n", + "\n", + "\n", + "1\n", "\n", - "\n", - "\n", - "2\n", - "\n", - "2\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", "\n", "\n", - "\n", + "\n", "14\n", - "\n", - "14\n", + "\n", + "14\n", "\n", - "\n", + "\n", "\n", - "2->14\n", - "\n", - "\n", - "i1\n", + "1->14\n", + "\n", + "\n", + "i0\n", "\n", "\n", - "\n", + "\n", "16\n", - "\n", - "16\n", + "\n", + "16\n", "\n", - "\n", + "\n", "\n", - "2->16\n", - "\n", - "\n", - "!i1\n", + "1->16\n", + "\n", + "\n", + "!i0\n", "\n", "\n", "\n", "15\n", - "\n", - "15\n", + "\n", + "15\n", "\n", "\n", "\n", "14->15\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", - "\n", + "\n", "\n", - "16->2\n", - "\n", - "\n", - "1\n", + "16->1\n", + "\n", + "\n", + "1\n", "\n", - "\n", - "\n", - "3\n", - "\n", - "3\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", "\n", - "\n", + "\n", "\n", - "3->13\n", - "\n", - "\n", - "i1\n", + "2->14\n", + "\n", + "\n", + "i1\n", "\n", "\n", - "\n", + "\n", "17\n", - "\n", - "17\n", + "\n", + "17\n", "\n", - "\n", + "\n", "\n", - "3->17\n", - "\n", - "\n", - "!i1\n", + "2->17\n", + "\n", + "\n", + "!i1\n", "\n", "\n", "\n", "17->2\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "1\n", "\n", - "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "3->14\n", + "\n", + "\n", + "i0 & i1\n", + "\n", + "\n", + "\n", + "3->16\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", + "\n", + "\n", + "3->17\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "3->18\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", + "\n", "\n", - "17->3\n", - "\n", - "\n", - "!o0\n", + "18->3\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", - "\n", - "\n", - "\n", - "4->14\n", - "\n", - "\n", - "i0\n", - "\n", - "\n", - "\n", - "18\n", - "\n", - "18\n", - "\n", - "\n", - "\n", - "4->18\n", - "\n", - "\n", - "!i0\n", - "\n", - "\n", - "\n", - "18->4\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "5\n", - "\n", - "5\n", - "\n", - "\n", - "\n", - "5->14\n", - "\n", - "\n", - "i0 & i1\n", - "\n", - "\n", - "\n", - "5->16\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", - "\n", - "\n", - "5->18\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "4\n", "\n", "\n", - "\n", + "\n", "19\n", - "\n", - "19\n", + "\n", + "19\n", "\n", - "\n", - "\n", - "5->19\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", - "\n", - "\n", - "19->5\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "6\n", - "\n", - "6\n", - "\n", - "\n", - "\n", - "6->10\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", - "\n", - "\n", - "6->11\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "4->19\n", + "\n", + "\n", + "!i1\n", "\n", "\n", - "\n", + "\n", "20\n", - "\n", - "20\n", + "\n", + "20\n", "\n", - "\n", - "\n", - "6->20\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "4->20\n", + "\n", + "\n", + "i1\n", + "\n", + "\n", + "\n", + "19->4\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "20->5\n", + "\n", + "\n", + "!o0\n", "\n", "\n", - "\n", + "\n", "21\n", - "\n", - "21\n", + "\n", + "21\n", "\n", - "\n", - "\n", - "6->21\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", - "\n", - "\n", - "20->4\n", - "\n", - "\n", - "!o0\n", - "\n", - "\n", - "\n", - "7\n", - "\n", - "7\n", - "\n", - "\n", - "\n", - "20->7\n", - "\n", - "\n", - "o0\n", - "\n", - "\n", - "\n", - "21->4\n", - "\n", - "\n", - "!o0\n", - "\n", - "\n", - "\n", - "21->6\n", - "\n", - "\n", - "o0\n", - "\n", - "\n", - "\n", - "7->12\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", - "\n", - "\n", - "7->13\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "5->21\n", + "\n", + "\n", + "!i1\n", "\n", "\n", - "\n", + "\n", "22\n", - "\n", - "22\n", + "\n", + "22\n", "\n", - "\n", - "\n", - "7->22\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "5->22\n", + "\n", + "\n", + "i1\n", + "\n", + "\n", + "\n", + "21->4\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "22->5\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "6->19\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", + "\n", + "\n", + "6->20\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", - "\n", + "\n", "23\n", - "\n", - "23\n", + "\n", + "23\n", "\n", - "\n", - "\n", - "7->23\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", - "\n", - "\n", - "22->4\n", - "\n", - "\n", - "o0\n", - "\n", - "\n", - "\n", - "22->7\n", - "\n", - "\n", - "!o0\n", - "\n", - "\n", - "\n", - "23->4\n", - "\n", - "\n", - "o0\n", - "\n", - "\n", - "\n", - "23->6\n", - "\n", - "\n", - "!o0\n", - "\n", - "\n", - "\n", - "8\n", - "\n", - "8\n", - "\n", - "\n", - "\n", - "8->13\n", - "\n", - "\n", - "i0 & i1\n", - "\n", - "\n", - "\n", - "8->17\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", - "\n", - "\n", - "8->23\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "6->23\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", - "\n", + "\n", "24\n", - "\n", - "24\n", + "\n", + "24\n", + "\n", + "\n", + "\n", + "6->24\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", + "\n", + "\n", + "23->1\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "23->6\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "24->1\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "24->9\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "7->20\n", + "\n", + "\n", + "i1\n", + "\n", + "\n", + "\n", + "25\n", + "\n", + "25\n", + "\n", + "\n", + "\n", + "7->25\n", + "\n", + "\n", + "!i1\n", + "\n", + "\n", + "\n", + "25->2\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "25->7\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "8->20\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", - "\n", + "\n", "8->24\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", - "\n", - "\n", - "24->5\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "8->25\n", + "\n", + "\n", + "i0 & !i1\n", "\n", - "\n", - "\n", - "24->8\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "26\n", + "\n", + "26\n", "\n", - "\n", - "\n", - "25->8\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "8->26\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", - "\n", + "\n", "26->3\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "26->8\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "9->21\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", + "\n", + "\n", + "9->22\n", + "\n", + "\n", + "i0 & i1\n", + "\n", + "\n", + "\n", + "27\n", + "\n", + "27\n", + "\n", + "\n", + "\n", + "9->27\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", + "\n", + "\n", + "28\n", + "\n", + "28\n", + "\n", + "\n", + "\n", + "9->28\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", + "\n", + "\n", + "27->1\n", + "\n", + "\n", + "!o0\n", "\n", "\n", - "\n", + "\n", "27->6\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "o0\n", "\n", - "\n", + "\n", + "\n", + "28->1\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", "\n", - "28->0\n", - "\n", - "\n", - "1\n", + "28->9\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "15->14\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fc9680c32d0> >" + " *' at 0x7f0e584de570> >" ] }, "metadata": {}, @@ -658,7 +655,6 @@ }, { "cell_type": "markdown", - "id": "c02b2d8f", "metadata": {}, "source": [ "Solving the game, is done with `solve_game()` as with any game. There is also a version that takes a `synthesis_info` as second argument in case the time it takes has to be recorded. Here passing `si` or not makes no difference." @@ -667,7 +663,6 @@ { "cell_type": "code", "execution_count": 3, - "id": "d08e7b9f", "metadata": {}, "outputs": [ { @@ -683,529 +678,529 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ")\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", - "\n", - "\n", - "9\n", - "\n", - "9\n", - "\n", - "\n", - "\n", - "I->9\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "25\n", - "\n", - "25\n", - "\n", - "\n", - "\n", - "9->25\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "26\n", - "\n", - "26\n", - "\n", - "\n", - "\n", - "9->26\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "27\n", - "\n", - "27\n", - "\n", - "\n", - "\n", - "9->27\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "28\n", - "\n", - "28\n", - "\n", - "\n", - "\n", - "9->28\n", - "\n", - "\n", - "\n", "\n", - "\n", + "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "0->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "0->11\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "10->1\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "11->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "12\n", - "\n", - "12\n", + "\n", + "12\n", "\n", - "\n", + "\n", "\n", - "1->12\n", - "\n", - "\n", + "0->12\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "13\n", - "\n", - "13\n", + "\n", + "13\n", "\n", - "\n", + "\n", "\n", - "1->13\n", - "\n", - "\n", + "0->13\n", + "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "10->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "11->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", "\n", - "12->1\n", - "\n", - "\n", + "12->9\n", + "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", "\n", - "13->0\n", - "\n", - "\n", + "13->5\n", + "\n", + "\n", "\n", - "\n", - "\n", - "2\n", - "\n", - "2\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", "\n", "\n", - "\n", + "\n", "14\n", - "\n", - "14\n", + "\n", + "14\n", "\n", - "\n", + "\n", "\n", - "2->14\n", - "\n", - "\n", + "1->14\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "16\n", - "\n", - "16\n", + "\n", + "16\n", "\n", - "\n", + "\n", "\n", - "2->16\n", - "\n", - "\n", + "1->16\n", + "\n", + "\n", "\n", "\n", "\n", "15\n", - "\n", - "15\n", + "\n", + "15\n", "\n", "\n", "\n", "14->15\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", - "\n", + "\n", "\n", - "16->2\n", - "\n", - "\n", + "16->1\n", + "\n", + "\n", "\n", - "\n", - "\n", - "3\n", - "\n", - "3\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", "\n", - "\n", + "\n", "\n", - "3->13\n", - "\n", - "\n", + "2->14\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "17\n", - "\n", - "17\n", + "\n", + "17\n", "\n", - "\n", + "\n", "\n", - "3->17\n", - "\n", - "\n", + "2->17\n", + "\n", + "\n", "\n", "\n", "\n", "17->2\n", - "\n", - "\n", + "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "3->14\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3->16\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3->17\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "3->18\n", + "\n", + "\n", + "\n", + "\n", "\n", - "17->3\n", - "\n", - "\n", + "18->3\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", - "\n", - "\n", - "\n", - "4->14\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "18\n", - "\n", - "18\n", - "\n", - "\n", - "\n", - "4->18\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "18->4\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "5\n", - "\n", - "5\n", - "\n", - "\n", - "\n", - "5->14\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "5->16\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "5->18\n", - "\n", - "\n", + "\n", + "4\n", "\n", "\n", - "\n", + "\n", "19\n", - "\n", - "19\n", + "\n", + "19\n", "\n", - "\n", - "\n", - "5->19\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "19->5\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "6\n", - "\n", - "6\n", - "\n", - "\n", - "\n", - "6->10\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "6->11\n", - "\n", - "\n", + "\n", + "\n", + "4->19\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "20\n", - "\n", - "20\n", + "\n", + "20\n", "\n", - "\n", - "\n", - "6->20\n", - "\n", - "\n", + "\n", + "\n", + "4->20\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "19->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "20->5\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "21\n", - "\n", - "21\n", + "\n", + "21\n", "\n", - "\n", - "\n", - "6->21\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "20->4\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "7\n", - "\n", - "7\n", - "\n", - "\n", - "\n", - "20->7\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "21->4\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "21->6\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "7->12\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "7->13\n", - "\n", - "\n", + "\n", + "\n", + "5->21\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "22\n", - "\n", - "22\n", + "\n", + "22\n", "\n", - "\n", - "\n", - "7->22\n", - "\n", - "\n", + "\n", + "\n", + "5->22\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "21->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "22->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "6->19\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6->20\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "23\n", - "\n", - "23\n", + "\n", + "23\n", "\n", - "\n", - "\n", - "7->23\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "22->4\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "22->7\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "23->4\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "23->6\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "8\n", - "\n", - "8\n", - "\n", - "\n", - "\n", - "8->13\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "8->17\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "8->23\n", - "\n", - "\n", + "\n", + "\n", + "6->23\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "24\n", - "\n", - "24\n", + "\n", + "24\n", + "\n", + "\n", + "\n", + "6->24\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "23->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "23->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "24->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "24->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7->20\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "25\n", + "\n", + "25\n", + "\n", + "\n", + "\n", + "7->25\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "25->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "25->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8->20\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "8->24\n", - "\n", - "\n", + "\n", + "\n", "\n", - "\n", - "\n", - "24->5\n", - "\n", - "\n", + "\n", + "\n", + "8->25\n", + "\n", + "\n", "\n", - "\n", - "\n", - "24->8\n", - "\n", - "\n", + "\n", + "\n", + "26\n", + "\n", + "26\n", "\n", - "\n", - "\n", - "25->8\n", - "\n", - "\n", + "\n", + "\n", + "8->26\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "26->3\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "26->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9->21\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9->22\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "27\n", + "\n", + "27\n", + "\n", + "\n", + "\n", + "9->27\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "28\n", + "\n", + "28\n", + "\n", + "\n", + "\n", + "9->28\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "27->1\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "27->6\n", - "\n", - "\n", + "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "28->1\n", + "\n", + "\n", + "\n", + "\n", "\n", - "28->0\n", - "\n", - "\n", + "28->9\n", + "\n", + "\n", "\n", "\n", "\n", "15->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n" @@ -1227,7 +1222,6 @@ }, { "cell_type": "markdown", - "id": "9590cf55", "metadata": {}, "source": [ "Once a strategy has been found, it can be extracted as an automaton and simplified using 6 different levels (the default is 2). The output should be interpreted as a Mealy automaton, where transition have the form `(ins)&(outs)` where `ins` and `outs` are Boolean formulas representing possible inputs and outputs (they could be more than just conjunctions of atomic proposition). Mealy machines with this type of labels are called \"separated\" in Spot." @@ -1236,7 +1230,6 @@ { "cell_type": "code", "execution_count": 4, - "id": "d6cb467d", "metadata": {}, "outputs": [ { @@ -1252,309 +1245,309 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "3->5\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "4->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "4->5\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "5->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5->5\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->5\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->6\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fc9682230f0> >" + " *' at 0x7f0e5855c9f0> >" ] }, "metadata": {}, @@ -1573,175 +1566,175 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fc968069180> >" + " *' at 0x7f0e5855cb10> >" ] }, "metadata": {}, @@ -1760,125 +1753,125 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "I->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fc968069210> >" + " *' at 0x7f0e5855ccf0> >" ] }, "metadata": {}, @@ -1897,81 +1890,81 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fc968069180> >" + " *' at 0x7f0e5855cd80> >" ] }, "metadata": {}, @@ -1990,81 +1983,81 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fc968069210> >" + " *' at 0x7f0e584defc0> >" ] }, "metadata": {}, @@ -2083,125 +2076,125 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fc968069060> >" + " *' at 0x7f0e5855ca20> >" ] }, "metadata": {}, @@ -2235,7 +2228,6 @@ }, { "cell_type": "markdown", - "id": "7ee86443", "metadata": {}, "source": [ "If needed, a separated Mealy machine can be turned into game shape using `split_sepearated_mealy()`, which is more efficient than `split_2step()`." @@ -2244,7 +2236,6 @@ { "cell_type": "code", "execution_count": 5, - "id": "80510b01", "metadata": {}, "outputs": [ { @@ -2253,260 +2244,260 @@ "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", @@ -2526,7 +2517,6 @@ }, { "cell_type": "markdown", - "id": "8f97aa04", "metadata": {}, "source": [ "# Converting the separated Mealy machine to AIG\n", @@ -2539,7 +2529,6 @@ { "cell_type": "code", "execution_count": 6, - "id": "9c6d9e8b", "metadata": {}, "outputs": [ { @@ -2548,60 +2537,60 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "2\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "2->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2615,7 +2604,6 @@ }, { "cell_type": "markdown", - "id": "d67f8bce", "metadata": {}, "source": [ "While we are at it, let us mention that you can render those circuits horizontally as follows:" @@ -2624,7 +2612,6 @@ { "cell_type": "code", "execution_count": 7, - "id": "3a363374", "metadata": {}, "outputs": [ { @@ -2633,54 +2620,54 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:w\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "2\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "2->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n" @@ -2700,7 +2687,6 @@ }, { "cell_type": "markdown", - "id": "e4f607c3", "metadata": {}, "source": [ "To encode the circuit in the AIGER format (ASCII version) use:" @@ -2709,7 +2695,6 @@ { "cell_type": "code", "execution_count": 8, - "id": "564f7d0b", "metadata": {}, "outputs": [ { @@ -2733,7 +2718,6 @@ }, { "cell_type": "markdown", - "id": "cf2d4831", "metadata": {}, "source": [ "# Adding more inputs and outputs by force" @@ -2741,7 +2725,6 @@ }, { "cell_type": "markdown", - "id": "874a108e", "metadata": {}, "source": [ "It can happen that propositions declared as output are ommited in the aig circuit (either because they are not part of the specification, or because they do not appear in the winning strategy). In that case those \n", @@ -2753,7 +2736,6 @@ { "cell_type": "code", "execution_count": 9, - "id": "1fc4c566", "metadata": {}, "outputs": [ { @@ -2762,151 +2744,151 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "I->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", - "i0\n", + "\n", + "\n", + "i0\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "3->7\n", - "\n", - "\n", - "!i0\n", + "\n", + "\n", + "!i0\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "6->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->2\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "7->1\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fc968143d80> >" + " *' at 0x7f0e5855cb70> >" ] }, "metadata": {}, @@ -2918,112 +2900,112 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "i0\n", + "\n", + "\n", + "i0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "!i0\n", + "\n", + "\n", + "!i0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fc9680c3330> >" + " *' at 0x7f0e5855cc60> >" ] }, "metadata": {}, @@ -3035,144 +3017,144 @@ "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", - "t\n", - "[all]\n", + " viewBox=\"0.00 0.00 282.00 148.79\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "i0\n", + "\n", + "\n", + "i0\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!i0\n", + "\n", + "\n", + "!i0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "3->1\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "\n", - "!i0\n", - "/\n", + "!i0\n", + "/\n", "\n", - "!o0\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", + "\n", + "\n", "\n", - "1\n", - "/\n", + "1\n", + "/\n", "\n", - "!o0\n", + "!o0\n", "\n", "\n", "\n", @@ -3191,72 +3173,72 @@ "\n", "\n", - "\n", "\n", "\n", + " viewBox=\"0.00 0.00 143.20 352.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", "\n", - "\n", + "\n", "\n", "\n", "4\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "6->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3278,7 +3260,6 @@ }, { "cell_type": "markdown", - "id": "f8dab019", "metadata": {}, "source": [ "To force the presence of extra variables in the circuit, they can be passed to `mealy_machine_to_aig()`." @@ -3287,7 +3268,6 @@ { "cell_type": "code", "execution_count": 10, - "id": "091d7c97", "metadata": {}, "outputs": [ { @@ -3296,96 +3276,96 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "6->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "8->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "8->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "0\n", - "\n", - "False\n", + "\n", + "False\n", "\n", "\n", "\n", "0->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3398,7 +3378,6 @@ }, { "cell_type": "markdown", - "id": "364c8d76", "metadata": {}, "source": [ "# Combining Mealy machines\n", @@ -3418,7 +3397,6 @@ { "cell_type": "code", "execution_count": 11, - "id": "57b3b51d", "metadata": {}, "outputs": [ { @@ -3434,134 +3412,134 @@ "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "o1\n", + "\n", + "\n", + "o1\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "!o1\n", + "\n", + "\n", + "!o1\n", "\n", "\n", "\n", @@ -3587,94 +3565,94 @@ "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "o1\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "o1\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "!o1\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "!o1\n", "\n", "\n", "\n", @@ -3700,108 +3678,108 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "10->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3820,53 +3798,53 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0 & o1\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0 & o1\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0 & !o1\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0 & !o1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fc968143bd0> >" + " *' at 0x7f0e5855cd20> >" ] }, "metadata": {}, @@ -3878,108 +3856,108 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "10->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4013,7 +3991,6 @@ }, { "cell_type": "markdown", - "id": "7d5a8a32", "metadata": {}, "source": [ "# Reading an AIGER-file\n", @@ -4028,7 +4005,6 @@ { "cell_type": "code", "execution_count": 12, - "id": "9da1f39e", "metadata": {}, "outputs": [], "source": [ @@ -4049,7 +4025,6 @@ { "cell_type": "code", "execution_count": 13, - "id": "7295f20a", "metadata": {}, "outputs": [ { @@ -4058,108 +4033,108 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "d\n", + "\n", + "d\n", "\n", "\n", "\n", "6->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "c\n", + "\n", + "c\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "a\n", + "\n", + "a\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "b\n", + "\n", + "b\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4174,7 +4149,6 @@ { "cell_type": "code", "execution_count": 14, - "id": "730952f7", "metadata": {}, "outputs": [ { @@ -4203,7 +4177,6 @@ { "cell_type": "code", "execution_count": 15, - "id": "38b5b8a1", "metadata": {}, "outputs": [ { @@ -4220,7 +4193,6 @@ }, { "cell_type": "markdown", - "id": "6bde5eac", "metadata": {}, "source": [ "An AIG circuit can be transformed into a monitor/Mealy machine. This can be used for instance to check that it does not intersect the negation of the specification." @@ -4229,7 +4201,6 @@ { "cell_type": "code", "execution_count": 16, - "id": "14f89c9b", "metadata": {}, "outputs": [ { @@ -4238,52 +4209,52 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!a & !b\n", - "/\n", - "\n", - "!c & !d\n", - "\n", - "a & b\n", - "/\n", - "\n", - "!c & d\n", - "\n", - "(!a & b) | (a & !b)\n", - "/\n", - "\n", - "c & !d\n", + "\n", + "\n", + "\n", + "!a & !b\n", + "/\n", + "\n", + "!c & !d\n", + "\n", + "a & b\n", + "/\n", + "\n", + "!c & d\n", + "\n", + "(!a & b) | (a & !b)\n", + "/\n", + "\n", + "c & !d\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fc968069ed0> >" + " *' at 0x7f0e584ee4e0> >" ] }, "execution_count": 16, @@ -4297,7 +4268,6 @@ }, { "cell_type": "markdown", - "id": "e1f01aa0", "metadata": {}, "source": [ "Note that the generation of aiger circuits from Mealy machines is flexible and accepts separated Mealy machines\n", @@ -4307,7 +4277,6 @@ { "cell_type": "code", "execution_count": 17, - "id": "93e1fc70", "metadata": {}, "outputs": [ { @@ -4316,114 +4285,114 @@ "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", @@ -4455,7 +4424,6 @@ { "cell_type": "code", "execution_count": 18, - "id": "6cb96c81", "metadata": {}, "outputs": [ { @@ -4464,180 +4432,180 @@ "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", @@ -4658,7 +4626,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -4672,7 +4640,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.7.3" } }, "nbformat": 4, diff --git a/tests/python/toparity.py b/tests/python/toparity.py index 37e111f9b..ad9bc6e0b 100644 --- a/tests/python/toparity.py +++ b/tests/python/toparity.py @@ -26,57 +26,135 @@ tc = TestCase() # Tests for the new version of to_parity +# It is no more a no_option as we now have more options (like iar, bscc, …) no_option = spot.to_parity_options() no_option.search_ex = False no_option.use_last = False -no_option.force_order = False +no_option.use_last_post_process = False no_option.partial_degen = False no_option.acc_clean = False no_option.parity_equiv = False +no_option.tar = False +no_option.iar = True +no_option.lar_dfs = True +no_option.bscc = True no_option.parity_prefix = False +no_option.parity_prefix_general = False +no_option.generic_emptiness = False no_option.rabin_to_buchi = False +no_option.buchi_type_to_buchi = False +no_option.parity_type_to_parity = False +no_option.reduce_col_deg = False no_option.propagate_col = False +no_option.use_generalized_rabin = False acc_clean_search_opt = spot.to_parity_options() -acc_clean_search_opt.force_order = False -acc_clean_search_opt.partial_degen = False -acc_clean_search_opt.parity_equiv = False -acc_clean_search_opt.parity_prefix = False -acc_clean_search_opt.rabin_to_buchi = False -acc_clean_search_opt.propagate_col = False +no_option.search_ex = False +no_option.use_last = False +no_option.use_last_post_process = False +no_option.force_order = False +no_option.partial_degen = False +no_option.acc_clean = True +no_option.parity_equiv = False +no_option.tar = False +no_option.iar = True +no_option.lar_dfs = True +no_option.bscc = True +no_option.parity_prefix = False +no_option.parity_prefix_general = False +no_option.generic_emptiness = False +no_option.rabin_to_buchi = False +no_option.buchi_type_to_buchi = False +no_option.parity_type_to_parity = False +no_option.reduce_col_deg = False +no_option.propagate_col = False +no_option.use_generalized_rabin = False partial_degen_opt = spot.to_parity_options() partial_degen_opt.search_ex = False +partial_degen_opt.use_last = False +partial_degen_opt.use_last_post_process = False partial_degen_opt.force_order = False +partial_degen_opt.partial_degen = True +partial_degen_opt.acc_clean = False partial_degen_opt.parity_equiv = False +partial_degen_opt.tar = False +partial_degen_opt.iar = True +partial_degen_opt.lar_dfs = True +partial_degen_opt.bscc = True partial_degen_opt.parity_prefix = False +partial_degen_opt.parity_prefix_general = False +partial_degen_opt.generic_emptiness = False partial_degen_opt.rabin_to_buchi = False +partial_degen_opt.buchi_type_to_buchi = False +partial_degen_opt.parity_type_to_parity = False +partial_degen_opt.reduce_col_deg = False partial_degen_opt.propagate_col = False +partial_degen_opt.use_generalized_rabin = False parity_equiv_opt = spot.to_parity_options() parity_equiv_opt.search_ex = False parity_equiv_opt.use_last = False -parity_equiv_opt.force_order = False +parity_equiv_opt.use_last_post_process = False parity_equiv_opt.partial_degen = False +parity_equiv_opt.acc_clean = False +parity_equiv_opt.parity_equiv = True +parity_equiv_opt.tar = False +parity_equiv_opt.iar = True +parity_equiv_opt.lar_dfs = True +parity_equiv_opt.bscc = True parity_equiv_opt.parity_prefix = False +parity_equiv_opt.parity_prefix_general = False +parity_equiv_opt.generic_emptiness = False parity_equiv_opt.rabin_to_buchi = False +parity_equiv_opt.buchi_type_to_buchi = False +parity_equiv_opt.parity_type_to_parity = False +parity_equiv_opt.reduce_col_deg = False parity_equiv_opt.propagate_col = False +parity_equiv_opt.use_generalized_rabin = False rab_to_buchi_opt = spot.to_parity_options() +rab_to_buchi_opt.search_ex = False rab_to_buchi_opt.use_last = False -rab_to_buchi_opt.force_order = False +rab_to_buchi_opt.use_last_post_process = False rab_to_buchi_opt.partial_degen = False -rab_to_buchi_opt.parity_equiv = False +rab_to_buchi_opt.acc_clean = False +rab_to_buchi_opt.parity_equiv = True +rab_to_buchi_opt.tar = False +rab_to_buchi_opt.iar = True +rab_to_buchi_opt.lar_dfs = False +rab_to_buchi_opt.bscc = False rab_to_buchi_opt.parity_prefix = False +rab_to_buchi_opt.parity_prefix_general = False +rab_to_buchi_opt.generic_emptiness = False +rab_to_buchi_opt.rabin_to_buchi = True +rab_to_buchi_opt.buchi_type_to_buchi = False +rab_to_buchi_opt.parity_type_to_parity = False +rab_to_buchi_opt.reduce_col_deg = False rab_to_buchi_opt.propagate_col = False +rab_to_buchi_opt.use_generalized_rabin = False -# Force to use CAR or IAR for each SCC +# Force to use CAR, IAR or TAR for each SCC use_car_opt = spot.to_parity_options() +use_car_opt.search_ex = True +use_car_opt.use_last = True +use_car_opt.use_last_post_process = True use_car_opt.partial_degen = False +use_car_opt.acc_clean = False use_car_opt.parity_equiv = False +use_car_opt.tar = True +use_car_opt.iar = True +use_car_opt.lar_dfs = True +use_car_opt.bscc = True use_car_opt.parity_prefix = False +use_car_opt.parity_prefix_general = False +use_car_opt.generic_emptiness = False use_car_opt.rabin_to_buchi = False +use_car_opt.buchi_type_to_buchi = False +use_car_opt.parity_type_to_parity = False +use_car_opt.reduce_col_deg = False use_car_opt.propagate_col = False +use_car_opt.use_generalized_rabin = False all_opt = spot.to_parity_options() all_opt.pretty_print = True @@ -100,15 +178,28 @@ def test(aut, expected_num_states=[], full=True): p1 = spot.to_parity(aut, search_ex = opt.search_ex, use_last = opt.use_last, + use_last_post_process = \ + opt.use_last_post_process, force_order = opt.force_order, partial_degen = opt.partial_degen, acc_clean = opt.acc_clean, parity_equiv = opt.parity_equiv, + tar = opt.tar, + iar = opt.iar, + lar_dfs = opt.lar_dfs, + bscc = opt.bscc, parity_prefix = opt.parity_prefix, + parity_prefix_general = \ + opt.parity_prefix_general, + generic_emptiness = opt.generic_emptiness, rabin_to_buchi = opt.rabin_to_buchi, + buchi_type_to_buchi = opt.buchi_type_to_buchi, + parity_type_to_parity = \ + opt.parity_type_to_parity, reduce_col_deg = opt.reduce_col_deg, propagate_col = opt.propagate_col, - pretty_print = opt.pretty_print, + use_generalized_rabin = \ + opt.use_generalized_rabin ) else: p1 = spot.acd_transform(aut) @@ -205,7 +296,7 @@ State: 13 [0&1] 5 [!0&!1] 10 {0 1 3 5} [0&!1] 13 {1 3} ---END--"""), [35, 30, 23, 32, 31, 28, 22, 21]) +--END--"""), [32, 22, 23, 30, 33, 45, 22, 21]) test(spot.automaton(""" HOA: v1 @@ -223,7 +314,7 @@ State: 1 [0&!1] 1 {4} [!0&1] 1 {0 1 2 3} [!0&!1] 1 {0 3} ---END--"""), [7, 5, 3, 6, 5, 5, 3, 3]) +--END--"""), [6, 3, 3, 5, 5, 26, 3, 3]) test(spot.automaton("""HOA: v1 States: 2 @@ -239,7 +330,7 @@ State: 0 State: 1 [0&1] 1 {2 3 4} [!0&!1] 0 {1 2} ---END--"""), [9, 3, 2, 3, 3, 3, 2, 2]) +--END--"""), [3, 2, 2, 9, 9, 10, 2, 2]) for i,f in enumerate(spot.randltl(10, 200)): test(spot.translate(f, "det", "G"), full=(i<50)) @@ -279,7 +370,7 @@ State: 3 [!0&1] 2 {1 4} [0&1] 3 {0} --END-- -"""), [80, 47, 104, 104, 102, 29, 6, 5]) +"""), [104, 6, 80, 23, 27, 17, 6, 5]) test(spot.automaton(""" HOA: v1 @@ -313,7 +404,7 @@ State: 4 [0&!1] 4 [0&1] 4 {1 2 4} --END-- -"""), [9, 6, 7, 7, 6, 6, 6, 6]) +"""), [6, 6, 7, 9, 9, 10, 6, 6]) test(spot.automaton(""" HOA: v1 @@ -335,7 +426,7 @@ State: 1 [0&!1] 1 {2 3} [0&1] 1 {1 2 4} --END-- -"""), [11, 3, 2, 3, 3, 3, 2, 2]) +"""), [3, 2, 2, 6, 6, 6, 2, 2]) # Tests both the old and new version of to_parity @@ -366,7 +457,7 @@ explicit-labels trans-acc --BODY-- State: 0 [0&1] 2 {4 5} [0&1] 4 {0 4} p = spot.to_parity_old(a, True) tc.assertEqual(p.num_states(), 22) tc.assertTrue(spot.are_equivalent(a, p)) -test(a, [8, 6, 6, 6, 6, 6, 6, 6]) +test(a, [6, 6, 7, 8, 6, 7, 6, 6]) # Force a few edges to false, to make sure to_parity() is OK with that. for e in a.out(2): @@ -380,7 +471,7 @@ for e in a.out(3): p = spot.to_parity_old(a, True) tc.assertEqual(p.num_states(), 22) tc.assertTrue(spot.are_equivalent(a, p)) -test(a, [7, 6, 6, 6, 6, 6, 6, 6]) +test(a, [6, 6, 7, 8, 6, 7, 6, 6]) for f in spot.randltl(4, 400): d = spot.translate(f, "det", "G") @@ -396,4 +487,4 @@ for f in spot.randltl(5, 2000): a = spot.translate('!(GFa -> (GFb & GF(!b & !Xb)))', 'gen', 'det') b = spot.to_parity_old(a, True) tc.assertTrue(a.equivalent_to(b)) -test(a, [7, 7, 3, 7, 7, 7, 3, 3]) +test(a, [7, 3, 3, 8, 8, 7, 3, 3]) From 12920c44e3b930903452ca2171c6a62b7edf2d68 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 4 May 2022 17:25:58 +0200 Subject: [PATCH 112/606] Trigger archival services on new release * .gitlab-ci.yml: curl Software Heritage and Internet Archive endpoints to trigger archival on push to stable --- .gitlab-ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a1bb0ca9a..86d89f9d7 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -366,6 +366,8 @@ publish-stable: - tgz=`ls spot-*.tar.* | head -n 1` - case $tgz in *[0-9].tar.*) scp $tgz doc@perso:/var/www/dload/spot/;; esac - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=stable" https://gitlab.lrde.epita.fr/api/v4/projects/131/trigger/pipeline + - curl -X POST "https://archive.softwareheritage.org/api/1/origin/save/git/url/https://gitlab.lrde.epita.fr/spot/spot/" + - curl "https://web.archive.org/save/https://www.lrde.epita.fr/dload/spot/$tgz" publish-unstable: only: From 3e2201bd80e3d995b19a75487de0309c1834e1f6 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 12 Jul 2022 11:47:02 +0200 Subject: [PATCH 113/606] tests: add figures from CAV'22 paper * tests/python/cav22-figs.ipynb: New file. * doc/org/tut.org, tests/Makefile.am: Add it. --- doc/org/tut.org | 1 + tests/Makefile.am | 1 + tests/python/cav22-figs.ipynb | 1582 +++++++++++++++++++++++++++++++++ 3 files changed, 1584 insertions(+) create mode 100644 tests/python/cav22-figs.ipynb diff --git a/doc/org/tut.org b/doc/org/tut.org index 598276f38..8ae701bc5 100644 --- a/doc/org/tut.org +++ b/doc/org/tut.org @@ -89,6 +89,7 @@ real notebooks instead. automata. - [[https://spot.lrde.epita.fr/ipynb/atva16-fig2a.html][=atva16-fig2a.ipynb=]] first example from our [[https://www.lrde.epita.fr/~adl/dl/adl/duret.16.atva2.pdf][ATVA'16 tool paper]]. - [[https://spot.lrde.epita.fr/ipynb/atva16-fig2b.html][=atva16-fig2b.ipynb=]] second example from our [[https://www.lrde.epita.fr/~adl/dl/adl/duret.16.atva2.pdf][ATVA'16 tool paper]]. +- [[https://spot.lrde.epita.fr/ipynb/cav22-figs.html][=cav22-figs.ipynb=]] figures from our [[https://www.lrde.epita.fr/~adl/dl/adl/duret.22.cav.pdf][CAV'22 tool paper]]. - [[https://spot.lrde.epita.fr/ipynb/alternation.html][=alternation.ipynb=]] examples of alternating automata. - [[https://spot.lrde.epita.fr/ipynb/stutter-inv.html][=stutter-inv.ipynb=]] working with stutter-invariant formulas properties. - [[https://spot.lrde.epita.fr/ipynb/satmin.html][=satmin.ipynb=]] Python interface for [[file:satmin.org][SAT-based minimization of deterministic ω-automata]]. diff --git a/tests/Makefile.am b/tests/Makefile.am index 1a3d440c3..a13a495b3 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -362,6 +362,7 @@ TESTS_ipython = \ python/atva16-fig2b.ipynb \ python/automata-io.ipynb \ python/automata.ipynb \ + python/cav22-figs.ipynb \ python/contains.ipynb \ python/decompose.ipynb \ python/formulas.ipynb \ diff --git a/tests/python/cav22-figs.ipynb b/tests/python/cav22-figs.ipynb new file mode 100644 index 000000000..ea84319a2 --- /dev/null +++ b/tests/python/cav22-figs.ipynb @@ -0,0 +1,1582 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "4d225dd2-8b41-4bb4-9cae-136c314bbcc9", + "metadata": {}, + "source": [ + "This notebook reproduces the examples shown in our CAV'22 paper, as well as a few more. It was part of the CAV'22 artifact, but has been updated to keep up with recent version of Spot." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "a1948c94-e737-4b8b-88b8-00d896c5c928", + "metadata": {}, + "outputs": [], + "source": [ + "import spot\n", + "from spot.jupyter import display_inline\n", + "from buddy import bdd_ithvar\n", + "spot.setup()" + ] + }, + { + "cell_type": "markdown", + "id": "c03f8776-c657-4f87-99b5-a56ed1fdcbe3", + "metadata": {}, + "source": [ + "# Figure 1\n", + "\n", + "Fig. 1 of the paper shows (1) how to convert an LTL formula to an automaton with arbitrary acceptance condition, and (2) how to display the internal representation of the automaton." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d7b6e2d6-7472-4136-8114-ecb03dde1edd", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fb6a430f5a0> >" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut = spot.translate('GF(a <-> Xa) & FGb', 'det', 'gen')\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "18248bd4-8d80-4ae7-a466-c347e1ea5ad4", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "g\n", + "\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "5\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "cond\n", + "\n", + "a & !b\n", + "\n", + "a & b\n", + "\n", + "!a & b\n", + "\n", + "!a & !b\n", + "\n", + "a & b\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "!a & b\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "dst\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "0\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "meta\n", + "init_state:\n", + "\n", + "0\n", + "num_sets:\n", + "2\n", + "acceptance:\n", + "Fin(0) & Inf(1)\n", + "ap_vars:\n", + "b a\n", + "\n", + "\n", + "\n", + "\n", + "props\n", + "prop_state_acc:\n", + "maybe\n", + "prop_inherently_weak:\n", + "maybe\n", + "prop_terminal:\n", + "no\n", + "prop_weak:\n", + "maybe\n", + "prop_very_weak:\n", + "maybe\n", + "prop_complete:\n", + "maybe\n", + "prop_universal:\n", + "yes\n", + "prop_unambiguous:\n", + "yes\n", + "prop_semi_deterministic:\n", + "yes\n", + "prop_stutter_invariant:\n", + "maybe\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut.show_storage()" + ] + }, + { + "cell_type": "markdown", + "id": "ef2e2a61-046c-42f0-b0a1-8dc7f1f2b37a", + "metadata": {}, + "source": [ + "# Figure 2\n", + "\n", + "Fig.2 shows an example of alternating automaton, represented in two different ways, along with its internal representation." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "5d2dd179-470c-47c1-b9f2-c5df9f76b2b8", + "metadata": {}, + "outputs": [], + "source": [ + "# We enter the automaton using the HOA format.\n", + "aut2 = spot.automaton(\"\"\"\n", + "HOA: v1\n", + "States: 5\n", + "Start: 3\n", + "acc-name: co-Buchi\n", + "Acceptance: 1 Fin(0)\n", + "AP: 2 \"a\" \"b\"\n", + "--BODY--\n", + "State: 0 {0} \n", + "[0] 1\n", + "[!0] 2\n", + "State: 1 {0} \n", + "[0&1] 0&1\n", + "State: 2 \n", + "[t] 2 \n", + "State: 3 \n", + "[0] 4&0\n", + "State: 4 \n", + "[t] 3 \n", + "--END--\n", + "\"\"\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "e1517479-6947-43fd-8369-c4fcdca72e1d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "I->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-4\n", + "\n", + "\n", + "\n", + "\n", + "3->-4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "-1\n", + "\n", + "\n", + "\n", + "\n", + "1->-1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "-1->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-1->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-4->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "-4->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "I->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-4\n", + "\n", + "\n", + "\n", + "\n", + "3->-4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "0->T2T0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "-1\n", + "\n", + "\n", + "\n", + "\n", + "1->-1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "-1->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-1->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-4->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "-4->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display_inline(aut2, aut2.show('.u'), per_row=2)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "ef26e6e8-3206-4e51-9858-33f8d27f915c", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "g\n", + "\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "cond\n", + "\n", + "a\n", + "\n", + "!a\n", + "\n", + "a & b\n", + "\n", + "1\n", + "\n", + "a\n", + "\n", + "1\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "dst\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "~0\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "~3\n", + "\n", + "\n", + "3\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "dests\n", + "\n", + "\n", + "dests\n", + "\n", + "\n", + "~0\n", + "\n", + "\n", + "\n", + "\n", + "~3\n", + "\n", + "\n", + "\n", + "#cnt/dst\n", + "\n", + "#2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "#2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "meta\n", + "init_state:\n", + "\n", + "3\n", + "num_sets:\n", + "1\n", + "acceptance:\n", + "Fin(0)\n", + "ap_vars:\n", + "b a\n", + "\n", + "\n", + "\n", + "\n", + "props\n", + "prop_state_acc:\n", + "yes\n", + "prop_inherently_weak:\n", + "maybe\n", + "prop_terminal:\n", + "maybe\n", + "prop_weak:\n", + "maybe\n", + "prop_very_weak:\n", + "maybe\n", + "prop_complete:\n", + "no\n", + "prop_universal:\n", + "yes\n", + "prop_unambiguous:\n", + "yes\n", + "prop_semi_deterministic:\n", + "yes\n", + "prop_stutter_invariant:\n", + "maybe\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut2.show_storage()" + ] + }, + { + "cell_type": "markdown", + "id": "f4f651b6-974e-4783-a54a-17a280d30782", + "metadata": {}, + "source": [ + "# Figure 3\n", + "\n", + "Fig. 3 shows an example of game generated by `ltlsynt` from the LTL specification of a reactive controler, and then how this game can be encoded into an And-Inverter-Graph.\n", + "First we retrieve the game generated by `ltlsynt` (any argument passed to `spot.automaton` is interpreted as a command if it ends with a pipe), then we solve it to compute a possible winning strategy. \n", + "\n", + "Player 0 plays from round states and tries to violate the acceptance condition; Player 1 plays from diamond states and tries to satisfy the acceptance condition. Once a game has been solved, the `highlight_strategy` function will decorate the automaton with winning region and computed strategies for player 0 and 1 in red and green respectively. Therefore this game is winning for player 1 from the initial state.\n", + "\n", + "Compared to the paper, the production of parity automata in `ltlsynt` has been improved, and it generates a Büchi game instead (but Büchi can be seen one case of parity)." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "ac90284d-2493-428b-9db7-cc7aa63384cb", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "I->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "4->12\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "7->4\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "8->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "8->5\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "2->9\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "2->10\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "9->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "10->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->9\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "3->11\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "11->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "12->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "5->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "13->5\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fb6a430f300> >" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "game = spot.automaton(\"ltlsynt --outs=b -f 'F(a & Xa) <-> Fb' --print-game-hoa |\")\n", + "spot.solve_game(game)\n", + "spot.highlight_strategy(game)\n", + "game" + ] + }, + { + "cell_type": "markdown", + "id": "d8b3ad5a-fef2-498b-8fd3-2d3940dacbf5", + "metadata": {}, + "source": [ + "The `solved_game_to_mealy()` shown in the paper does not always produce the same type of output, so it is\n", + "better to explicitely call `solved_game_to_split_mealy()` or `solved_game_to_separated_mealy()` depending on the type of output one need. We also show how to use the `reduce_mealy()` method to simplify one." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "39156f1a-945c-46db-bac2-01565d17b82e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "!a\n", + "/\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "a\n", + "/\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "\n", + "!a\n", + "/\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "\n", + "a\n", + "/\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "!a\n", + "/\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "a\n", + "/\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "\n", + "!a\n", + "/\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "\n", + "a\n", + "/\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "L0_out\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o0\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "6->o0:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "L0\n", + "\n", + "L0_in\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->L0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "mealy = spot.solved_game_to_separated_mealy(game)\n", + "mealy_min = spot.reduce_mealy(mealy, True)\n", + "aig = spot.mealy_machine_to_aig(mealy_min, \"isop\")\n", + "display_inline(mealy, mealy_min, aig)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From d6b3c757d09700e1117e4167853af5dad719e535 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 12 Jul 2022 15:43:39 +0200 Subject: [PATCH 114/606] test: ignore OpenBSD's "Terminated" messages For #501. * tests/core/autcross4.test: Here. --- tests/core/autcross4.test | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/core/autcross4.test b/tests/core/autcross4.test index 9e0d68638..13f770d1c 100755 --- a/tests/core/autcross4.test +++ b/tests/core/autcross4.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2018, 2019 Laboratoire de Recherche et Développement de +# Copyright (C) 2018, 2019, 2022 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -85,7 +85,9 @@ autcross -T3 -q --language-preserved --ignore-execution-failures \ --fail-on-timeout \ 'sleep 10; autfilt %H>%O' 'false %H %O' 2>err -Fin && exit 1 cat err -test 4 = `wc -l err && exit 1 From 444e2b5b89175e203d880a20e29f607d94bd2323 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 22 Jul 2022 10:49:23 +0200 Subject: [PATCH 115/606] parseaut: Add support for PGSolver's format * spot/parseaut/parseaut.yy, spot/parseaut/scanaut.ll: Add rules for PGSolver's format. * spot/parseaut/public.hh: PGAME is a new type of output. * tests/core/pgsolver.test: New file. * tests/Makefile.am: Add it. * tests/python/games.ipynb: More exemples. * NEWS: Mention the new feature. --- NEWS | 6 +- spot/parseaut/parseaut.yy | 110 +++- spot/parseaut/public.hh | 19 +- spot/parseaut/scanaut.ll | 47 +- tests/Makefile.am | 1 + tests/core/pgsolver.test | 265 ++++++++++ tests/python/games.ipynb | 1038 +++++++++++++++++++++++++------------ 7 files changed, 1144 insertions(+), 342 deletions(-) create mode 100755 tests/core/pgsolver.test diff --git a/NEWS b/NEWS index d7f3f4923..7e6bd6a40 100644 --- a/NEWS +++ b/NEWS @@ -65,6 +65,10 @@ New in spot 2.10.6.dev (not yet released) property of Spot to the controllable-AP header for the Extended HOA format used in SyntComp. https://arxiv.org/abs/1912.05793 + - The automaton parser learned to parse games in the PGSolver format. + See the bottom of https://spot.lrde.epita.fr/ipynb/games.html for + an example. + - "aliases" is a new named property that is filled by the HOA parser using the list of aliases declared in the HOA file, and then used by the HOA printer on a best-effort basis. Aliases can be used to @@ -105,7 +109,7 @@ New in spot 2.10.6.dev (not yet released) - The zielonka_tree construction was optimized using the same memoization trick that is used in ACD. Additionally it can now be - run with additional option to abort when the tree as an unwanted + run with additional options to abort when the tree as an unwanted shape, or to turn the tree into a DAG. - contains() can now take a twa as a second argument, not just a diff --git a/spot/parseaut/parseaut.yy b/spot/parseaut/parseaut.yy index 71ab8aaea..52d448c16 100644 --- a/spot/parseaut/parseaut.yy +++ b/spot/parseaut/parseaut.yy @@ -44,6 +44,7 @@ #include "spot/priv/accmap.hh" #include #include +#include using namespace std::string_literals; @@ -256,6 +257,11 @@ extern "C" int strverscmp(const char *s1, const char *s2); %token ENDOFFILE 0 "end of file" %token '[' +%token LINEDIRECTIVE "#line" +%token BDD + +/**** DSTAR tokens ****/ +%token ENDDSTAR "end of DSTAR automaton" %token DRA "DRA" %token DSA "DSA" %token V2 "v2" @@ -263,14 +269,12 @@ extern "C" int strverscmp(const char *s1, const char *s2); %token ACCPAIRS "Acceptance-Pairs:" %token ACCSIG "Acc-Sig:" %token ENDOFHEADER "---" -%token LINEDIRECTIVE "#line" -%token BDD %left '|' %left '&' %precedence '!' -%type init-state-conj-2 state-conj-2 state-conj-checked +%type init-state-conj-2 state-conj-2 state-conj-checked pgame_succs %type checked-state-num state-num acc-set sign %type label-expr %type acc-sig acc-sets trans-acc_opt state-acc_opt @@ -299,10 +303,14 @@ extern "C" int strverscmp(const char *s1, const char *s2); %type nc-one-ident nc-ident-list %type acceptance-cond +/**** PGAME tokens ****/ +// Also using INT, STRING +%token PGAME "start of PGSolver game" +%token ENDPGAME "end of PGSolver game" + /**** LBTT tokens *****/ - // Also using INT, STRING +// Also using INT, STRING %token ENDAUT "-1" -%token ENDDSTAR "end of DSTAR automaton" %token LBTT "LBTT header" %token INT_S "state acceptance" %token LBTT_EMPTY "acceptance sets for empty automaton" @@ -364,6 +372,7 @@ aut-1: hoa { res.h->type = spot::parsed_aut_type::HOA; } | never { res.h->type = spot::parsed_aut_type::NeverClaim; } | lbtt { res.h->type = spot::parsed_aut_type::LBTT; } | dstar /* will set type as DSA or DRA while parsing first line */ + | pgame { res.h->type = spot::parsed_aut_type::PGAME; } /**********************************************************************/ /* Rules for HOA */ @@ -1765,7 +1774,7 @@ dstar_header: dstar_sizes if (res.states > 0) { - res.h->aut->new_states(res.states);; + res.h->aut->new_states(res.states); res.info_states.resize(res.states); } res.acc_style = State_Acc; @@ -1908,6 +1917,93 @@ dstar_states: %empty res.h->aut->new_edge(res.cur_state, i.first, i.second, $3); } +/**********************************************************************/ +/* Rules for PGSolver games */ +/**********************************************************************/ + +pgamestart: PGAME + { + if (res.opts.want_kripke) + { + error(@$, + "cannot read a Kripke structure out of a PGSolver game."); + YYABORT; + } + } + +pgame: pgamestart pgame_nodes ENDPGAME + { + unsigned n = res.accset; + auto p = spot::acc_cond::acc_code::parity_max_odd(n); + res.h->aut->set_acceptance(n, p); + res.acc_style = State_Acc; + // Pretend that we have declared all states. + n = res.h->aut->num_states(); + res.info_states.resize(n); + for (auto& p: res.info_states) + p.declared = true; + } + | pgamestart error ENDPGAME + { + error(@$, "failed to parse this as a PGSolver game"); + } + +pgame_nodes: pgame_node ';' + | pgame_nodes pgame_node ';' + +pgame_succs: INT + { $$ = new std::vector{$1}; } + | pgame_succs ',' INT + { + $$ = $1; + $$->emplace_back($3); + } + +pgame_node: INT INT INT pgame_succs string_opt + { + unsigned state = $1; + unsigned owner = $3; + if (owner > 1) + { + error(@3, "node owner should be 0 or 1"); + owner = 0; + } + // Create any missing state + unsigned max_state = state; + for (unsigned s: *$4) + max_state = std::max(max_state, s); + unsigned n = res.h->aut->num_states(); + if (n <= max_state) + res.h->aut->new_states(max_state + 1 - n); + + // assume the source of the first edge is initial + if (res.start.empty()) + res.start.emplace_back(@$, std::vector{state}); + + // Create all edges with priority $2 + spot::acc_cond::mark_t m({$2}); + for (unsigned s: *$4) + res.h->aut->new_edge(state, s, bddtrue, m); + res.accset = std::max(res.accset, 1 + (int) $2); + + n = res.h->aut->num_states(); + if (!res.state_player) + res.state_player = new std::vector(n); + else if (res.state_player->size() < n) + res.state_player->resize(n); + (*res.state_player)[state] = owner; + + if (std::string* name = $5) + { + if (!res.state_names) + res.state_names = new std::vector(n); + else if (res.state_names->size() < n) + res.state_names->resize(n); + (*res.state_names)[state] = std::move(*name); + delete name; + } + } + /**********************************************************************/ /* Rules for neverclaims */ /**********************************************************************/ @@ -2487,7 +2583,7 @@ static void fix_initial_state(result_& r) start.resize(std::distance(start.begin(), res)); assert(start.size() >= 1); - if (start.size() == 1) + if (start.size() == 1) { if (r.opts.want_kripke) r.h->ks->set_init_state(start.front().front()); diff --git a/spot/parseaut/public.hh b/spot/parseaut/public.hh index d1c1793be..ec16b3ad7 100644 --- a/spot/parseaut/public.hh +++ b/spot/parseaut/public.hh @@ -44,7 +44,14 @@ namespace spot struct parse_aut_error_list {}; #endif - enum class parsed_aut_type { HOA, NeverClaim, LBTT, DRA, DSA, Unknown }; + enum class parsed_aut_type { + HOA, + NeverClaim, + LBTT, + DRA, /* DSTAR format for Rabin */ + DSA, /* DSTAR format for Streett */ + PGAME, /* PG Solver Game */ + Unknown }; /// \brief Result of the automaton parser struct SPOT_API parsed_aut final @@ -91,11 +98,11 @@ namespace spot struct automaton_parser_options final { - bool ignore_abort = false; ///< Skip aborted automata - bool debug = false; ///< Run the parser in debug mode? - bool trust_hoa = true; ///< Trust properties in HOA files - bool raise_errors = false; ///< Raise errors as exceptions. - bool want_kripke = false; ///< Parse as a Kripke structure. + bool ignore_abort = false; ///< Skip aborted automata + bool debug = false; ///< Run the parser in debug mode? + bool trust_hoa = true; ///< Trust properties in HOA files + bool raise_errors = false; ///< Raise errors as exceptions. + bool want_kripke = false; ///< Parse as a Kripke structure. }; /// \brief Parse a stream of automata diff --git a/spot/parseaut/scanaut.ll b/spot/parseaut/scanaut.ll index db8ae75c6..c04834975 100644 --- a/spot/parseaut/scanaut.ll +++ b/spot/parseaut/scanaut.ll @@ -65,12 +65,18 @@ eol \n+|\r+ eol2 (\n\r)+|(\r\n)+ eols ({eol}|{eol2})* identifier [[:alpha:]_][[:alnum:]_.-]* +pgameinit "parity"[ \t]+[0-9]+[ \t]*; +oldpgameinit [0-9]+[ \t]+[0-9]+[ \t]+[01]+[ \t]+[0-9,]+([ \t]+".*")?[ \t]*; +/* A pattern than match the start of an automaton, in order +to detect the end of the previous one. We do not try to match +LBTT automata here. */ +startaut {eols}("HOA:"|"never"|"DSA"|"DRA"|{pgameinit}) %x in_COMMENT in_STRING in_NEVER_PAR %s in_HOA in_NEVER in_LBTT_HEADER %s in_LBTT_STATE in_LBTT_INIT in_LBTT_TRANS %s in_LBTT_T_ACC in_LBTT_S_ACC in_LBTT_GUARD -%s in_DSTAR +%s in_DSTAR in_PGAME %% %{ @@ -127,7 +133,20 @@ identifier [[:alpha:]_][[:alnum:]_.-]* "never" BEGIN(in_NEVER); return token::NEVER; "DSA" BEGIN(in_DSTAR); return token::DSA; "DRA" BEGIN(in_DSTAR); return token::DRA; - +{pgameinit} { + BEGIN(in_PGAME); + char* end = nullptr; + errno = 0; + unsigned long n = strtoul(yytext + 7, &end, 10); + yylval->num = n; + return token::PGAME; + } +{oldpgameinit} { + BEGIN(in_PGAME); + yylval->num = 0; + yyless(0); + return token::PGAME; + } [0-9]+[ \t][0-9]+[ts]? { BEGIN(in_LBTT_HEADER); char* end = nullptr; @@ -229,10 +248,8 @@ identifier [[:alpha:]_][[:alnum:]_.-]* return token::INT; } [0-9]+ parse_int(); return token::INT; - /* The start of any automaton is the end of this one. - We do not try to detect LBTT automata, as that would - be too hard to distinguish from state numbers. */ - {eols}("HOA:"|"never"|"DSA"|"DRA") { + /* The start of any automaton is the end of this one. */ + {startaut} { yylloc->end = yylloc->begin; yyless(0); BEGIN(INITIAL); @@ -270,6 +287,24 @@ identifier [[:alpha:]_][[:alnum:]_.-]* } } +{ + /* Handle short numbers without going through parse_int() for efficiency. */ + [0-9] yylval->num = *yytext - '0'; return token::INT; + [0-9][0-9] { + yylval->num = (yytext[0] * 10) + yytext[1] - '0' * 11; + return token::INT; + } + [0-9]+ parse_int(); return token::INT; + /* The start of any automaton is the end of this one. */ + {startaut} { + yylloc->end = yylloc->begin; + yyless(0); + BEGIN(INITIAL); + return token::ENDPGAME; + } + <> return token::ENDPGAME; +} + /* Note: the LBTT format is scanf friendly, but not Bison-friendly. If we only tokenize it as a stream of INTs, the parser will have a very hard time recognizing what is a state from what is a diff --git a/tests/Makefile.am b/tests/Makefile.am index a13a495b3..91d3f10ea 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -341,6 +341,7 @@ TESTS_twa = \ core/dnfstreett.test \ core/parity.test \ core/parity2.test \ + core/pgsolver.test \ core/ltlsynt.test \ core/ltlsynt-pgame.test \ core/syfco.test \ diff --git a/tests/core/pgsolver.test b/tests/core/pgsolver.test new file mode 100755 index 000000000..e767e1953 --- /dev/null +++ b/tests/core/pgsolver.test @@ -0,0 +1,265 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) 2022 Laboratoire de Recherche et Développement de +# l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs + +set -e + +# This is example 6 is the manual of pgsolver 4.1 +cat >example1.pg <out + +rest='(Fin(6) & (Inf(5) | (Fin(4) & (Inf(3) | (Fin(2) & (Inf(1) | Fin(0)))))))' +cat >example1.hoa <out +diff out example1.hoa + + +# Test streaming. +cat >example2.pg < assert(!(false)) } + od; +accept_all: + skip +} +EOF +autfilt example2.pg >out +parity15=`randaut -A'parity max odd 15' -Q1 0 | grep Acceptance` +parity31=`randaut -A'parity max odd 31' -Q1 0 | grep Acceptance` +cat > example2.hoa <example3.pg <stdout 2>stderr && exit 1 +cat >expected.err< example3.hoa +diff stdout example3.hoa diff --git a/tests/python/games.ipynb b/tests/python/games.ipynb index f3ffd7502..fcc2bf12c 100644 --- a/tests/python/games.ipynb +++ b/tests/python/games.ipynb @@ -47,153 +47,153 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "2->5\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "6->7\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "7->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8->5\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" @@ -285,153 +285,153 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "2->5\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "6->7\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "7->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8->5\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" @@ -501,153 +501,153 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", "\n", - "0\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", "\n", - "1\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", "\n", - "3\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", "\n", - "2\n", + "2\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", "\n", - "4\n", + "4\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", "\n", - "6\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "5\n", "\n", - "5\n", + "5\n", "\n", "\n", "\n", "2->5\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7\n", "\n", - "7\n", + "7\n", "\n", "\n", "\n", "6->7\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", "\n", - "8\n", + "8\n", "\n", "\n", "\n", "7->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8->5\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" @@ -670,7 +670,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Input/Output\n", + "## Input/Output in HOA format\n", "\n", "An extension of the HOA format makes it possible to store the `state-player` property. This allows us to read the parity game constructed by `ltlsynt` using `spot.automaton()` like any other automaton." ] @@ -686,218 +686,218 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ")\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "0->5\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "0->6\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "!b\n", + "\n", + "\n", + "!b\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "5->3\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "6->2\n", - "\n", - "\n", - "!b\n", + "\n", + "\n", + "!b\n", "\n", "\n", "\n", "6->3\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "1->7\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n", "7->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "8->2\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "2->7\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", "\n", "3->9\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "9->3\n", - "\n", - "\n", - "b\n", - "\n", + "\n", + "\n", + "b\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "9->4\n", - "\n", - "\n", - "!b\n", + "\n", + "\n", + "!b\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "4->10\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "10->4\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f6be431fbd0> >" + " *' at 0x7f8f1861bd20> >" ] }, "execution_count": 8, @@ -1013,218 +1013,218 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ")\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "0->5\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "0->6\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "!b\n", + "\n", + "\n", + "!b\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "5->3\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "6->2\n", - "\n", - "\n", - "!b\n", + "\n", + "\n", + "!b\n", "\n", "\n", "\n", "6->3\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "1->7\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n", "7->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "8->2\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "2->7\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", "\n", "3->9\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "9->3\n", - "\n", - "\n", - "b\n", - "\n", + "\n", + "\n", + "b\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "9->4\n", - "\n", - "\n", - "!b\n", + "\n", + "\n", + "!b\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "4->10\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "10->4\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f6be431fcf0> >" + " *' at 0x7f8f187f5ef0> >" ] }, "execution_count": 11, @@ -1235,11 +1235,405 @@ "source": [ "spot.highlight_strategy(game)" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Input in PGSolver format\n", + "\n", + "The automaton parser is also able to parse the [PGSolver](https://github.com/tcsprojects/pgsolver) format. Here are two examples from the manual of PGSolver. The support for C-style comments is not part of the PGSolver format.\n", + "\n", + "Note that we use diamond node for player 1, while PGSolver use those of player 0. Also in Spot the acceptance condition is what Player 1 should satisfy; player 0 has two way to not satisfy it: leading to a rejecting cycle, or to a state without successor. In PGSolver, the graph is assumed to be total (i.e. each state has a successor), so player 0 can only win by reaching a rejecting cycle, which is equivalent to a `parity max even` acceptance." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))))))))\n", + "[parity max odd 9]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "Africa\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "Antarctica\n", + "\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "Asia\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "America\n", + "\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "Australia\n", + "\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "\n", + "5->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7->1\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "a,b = spot.automata(\"\"\"\n", + "parity 4; /* Example 6 in the manual for PGSolver 4.1 */\n", + "0 6 1 4,2 \"Africa\";\n", + "4 5 1 0 \"Antarctica\";\n", + "1 8 1 2,4,3 \"America\";\n", + "3 6 0 4,2 \"Australia\";\n", + "2 7 0 3,1,0,4 \"Asia\";\n", + "parity 8; /* Example 7 in the manual for PGSolver 4.1 */\n", + "0 0 0 1,2;\n", + "1 1 1 2,3;\n", + "2 0 0 3,4;\n", + "3 1 1 4,5;\n", + "4 0 0 5,6;\n", + "5 1 1 6,7;\n", + "6 0 0 7,0;\n", + "7 1 1 0,1;\n", + "\"\"\")\n", + "spot.solve_game(a)\n", + "spot.highlight_strategy(a)\n", + "spot.solve_game(b)\n", + "spot.highlight_strategy(b)\n", + "display(a.show('.g'), b.show('.g'))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -1253,7 +1647,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.10.5" } }, "nbformat": 4, From b3e994c249d8b9aba80775cb06513748ba7473dd Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 22 Jul 2022 10:54:15 +0200 Subject: [PATCH 116/606] * spot/twaalgos/hoa.cc: Typo in error message. --- spot/twaalgos/hoa.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spot/twaalgos/hoa.cc b/spot/twaalgos/hoa.cc index 0e03b07f5..e6147afda 100644 --- a/spot/twaalgos/hoa.cc +++ b/spot/twaalgos/hoa.cc @@ -794,7 +794,7 @@ namespace spot os << (v1_1 ? "spot." : "spot-") << "state-player:"; if (player->size() != num_states) throw std::runtime_error("print_hoa(): state-player property has" - " (" + std::to_string(player->size()) + + " " + std::to_string(player->size()) + " states but automaton has " + std::to_string(num_states)); unsigned n = 0; From 8b93b6967dc40af805109c952ef3c2b43c48e127 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 22 Jul 2022 16:52:03 +0200 Subject: [PATCH 117/606] rename pg_print() as print_pg() and add it to to_str() * NEWS: Mention those change. * spot/twaalgos/game.hh (print_pg): New function. (pg_print): Mark as deprecated. * spot/twaalgos/game.cc (pg_print): Redirect to print_pg(). (print_pg): Update to output state names. * python/spot/__init__.py: Teach to_str() about print_pg(). * bin/ltlsynt.cc: Adjust to call print_pg(). * tests/python/games.ipynb: Add an example. * tests/core/ltlsynt.test: Adjust to remove the "INIT" note. --- NEWS | 11 ++++ bin/ltlsynt.cc | 4 +- python/spot/__init__.py | 8 ++- spot/twaalgos/game.cc | 115 ++++++++++++++++++++------------------- spot/twaalgos/game.hh | 22 +++++++- tests/core/ltlsynt.test | 8 +-- tests/python/games.ipynb | 45 ++++++++++++++- 7 files changed, 145 insertions(+), 68 deletions(-) diff --git a/NEWS b/NEWS index 7e6bd6a40..77c4e081f 100644 --- a/NEWS +++ b/NEWS @@ -117,6 +117,17 @@ New in spot 2.10.6.dev (not yet released) to obtain a simple model checker (that returns true or false, without counterexample). + Python bindings: + + - The to_str() method of automata can now export a parity game into + the PG-Solver format by passing option 'pg'. See + https://spot.lrde.epita.fr/ipynb/games.html for an example. + + Deprectation notice: + + - spot::pg_print() has been deprecated in favor of spot::print_pg() + for consistency with the rest of the API. + Bugs fixed: - calling twa_graph::new_univ_edge(src, begin, end, cond, acc) could diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index bcd9d41d9..06c29db88 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -400,7 +400,7 @@ namespace [](const spot::twa_graph_ptr& game)->void { if (opt_print_pg) - spot::pg_print(std::cout, game); + spot::print_pg(std::cout, game); else spot::print_hoa(std::cout, game, opt_print_hoa_args) << '\n'; } @@ -785,7 +785,7 @@ namespace if (opt_print_pg || opt_print_hoa) { if (opt_print_pg) - spot::pg_print(std::cout, arena); + spot::print_pg(std::cout, arena); else spot::print_hoa(std::cout, arena, opt_print_hoa_args) << '\n'; return 0; diff --git a/python/spot/__init__.py b/python/spot/__init__.py index a351e9c54..340eba00a 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2014-2021 Laboratoire de +# Copyright (C) 2014-2022 Laboratoire de # Recherche et Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -261,6 +261,12 @@ class twa: ostr = ostringstream() print_lbtt(ostr, a, opt) return ostr.str() + if format == 'pg': + if opt is not None: + raise ValueError("print_pg() has no option") + ostr = ostringstream() + print_pg(ostr, a) + return ostr.str() raise ValueError("unknown string format: " + format) def save(a, filename, format='hoa', opt=None, append=False): diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index 419b33fe3..ccb3b818e 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2018, 2020-2021 Laboratoire de Recherche et +// Copyright (C) 2017-2018, 2020-2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -23,7 +23,7 @@ #include #include -#include +#include #include namespace spot @@ -834,66 +834,71 @@ namespace spot return solve_parity_game(arena); } + // backward compatibility void pg_print(std::ostream& os, const const_twa_graph_ptr& arena) { - ensure_parity_game(arena, "pg_print"); + print_pg(os, arena); + } - auto do_print = [&os](const const_twa_graph_ptr& arena) - { - const region_t& owner = get_state_players(arena); - - unsigned ns = arena->num_states(); - unsigned init = arena->get_init_state_number(); - os << "parity " << ns - 1 << ";\n"; - std::vector seen(ns, false); - std::vector todo({init}); - while (!todo.empty()) - { - unsigned src = todo.back(); - todo.pop_back(); - if (seen[src]) - continue; - seen[src] = true; - os << src << ' '; - os << arena->out(src).begin()->acc.max_set() - 1 << ' '; - os << owner[src] << ' '; - bool first = true; - for (auto& e: arena->out(src)) - { - if (!first) - os << ','; - first = false; - os << e.dst; - if (!seen[e.dst]) - todo.push_back(e.dst); - } - if (src == init) - os << " \"INIT\""; - os << ";\n"; - } - }; - // Ensure coloring - // PGSolver format expects max odd and colored + std::ostream& print_pg(std::ostream& os, const const_twa_graph_ptr& arena) + { bool is_par, max, odd; is_par = arena->acc().is_parity(max, odd, true); - assert(is_par && "pg_printer needs parity condition"); - bool is_colored = (max & odd) ? std::all_of(arena->edges().begin(), - arena->edges().end(), - [](const auto& e) - { - return (bool) e.acc; - }) - : false; - if (is_colored) - do_print(arena); - else + if (!is_par) + throw std::runtime_error("print_pg: arena must have a parity acceptance"); + const region_t& owner = *ensure_game(arena, "print_pg"); + + bool max_odd_colored = + max && odd && std::all_of(arena->edges().begin(), + arena->edges().end(), + [](const auto& e) + { + return (bool) e.acc; + }); + const_twa_graph_ptr towork = arena; + if (!max_odd_colored) { - auto arena2 = change_parity(arena, parity_kind_max, parity_style_odd); - colorize_parity_here(arena2, true); - set_state_players(arena2, - get_state_players(arena)); - do_print(arena2); + twa_graph_ptr tmp = + change_parity(arena, parity_kind_max, parity_style_odd); + colorize_parity_here(tmp, true); + towork = tmp; } + + auto sn = arena->get_named_prop>("state-names"); + unsigned ns = towork->num_states(); + unsigned init = towork->get_init_state_number(); + os << "parity " << ns - 1 << ";\n"; + std::vector seen(ns, false); + std::vector todo({init}); + while (!todo.empty()) + { + unsigned src = todo.back(); + todo.pop_back(); + if (seen[src]) + continue; + seen[src] = true; + os << src << ' '; + os << towork->out(src).begin()->acc.max_set() - 1 << ' '; + os << owner[src] << ' '; + bool first = true; + for (auto& e: arena->out(src)) + { + if (!first) + os << ','; + first = false; + os << e.dst; + if (!seen[e.dst]) + todo.push_back(e.dst); + } + if (sn && sn->size() > src && !(*sn)[src].empty()) + { + os << " \""; + escape_str(os, (*sn)[src]); + os << '"'; + } + os << ";\n"; + } + return os; } void alternate_players(spot::twa_graph_ptr& arena, diff --git a/spot/twaalgos/game.hh b/spot/twaalgos/game.hh index 64f8d52c8..df5d27439 100644 --- a/spot/twaalgos/game.hh +++ b/spot/twaalgos/game.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -112,10 +112,26 @@ namespace spot /// \ingroup games - /// \brief Print a max odd parity game using PG-solver syntax + /// \brief Print a parity game using PG-solver syntax + /// + /// The input automaton should have parity acceptance and should + /// define state owner. Since the PG solver format want player 1 to + /// solve a max odd condition, the acceptance condition will be + /// adapted to max odd if necessary. + /// + /// The output will list the initial state as first state (because + /// that is the convention of our parser), and list only reachable + /// states. + /// + /// If states are named, the names will be output as well. + /// @{ + SPOT_API + std::ostream& print_pg(std::ostream& os, const const_twa_graph_ptr& arena); + + SPOT_DEPRECATED("use print_pg() instead") SPOT_API void pg_print(std::ostream& os, const const_twa_graph_ptr& arena); - + /// @} /// \ingroup games /// \brief Highlight the edges of a strategy on an automaton. diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 95e0bf4d7..33369dcde 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -24,7 +24,7 @@ set -e cat >exp < GFo1)" --outs="o1,o2" --verbose\ --bypass=yes 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx -diff outx exp \ No newline at end of file +diff outx exp diff --git a/tests/python/games.ipynb b/tests/python/games.ipynb index fcc2bf12c..a6168b07e 100644 --- a/tests/python/games.ipynb +++ b/tests/python/games.ipynb @@ -897,7 +897,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f8f1861bd20> >" + " *' at 0x7feee9b0ebb0> >" ] }, "execution_count": 8, @@ -1224,7 +1224,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f8f187f5ef0> >" + " *' at 0x7fef001c87b0> >" ] }, "execution_count": 11, @@ -1240,7 +1240,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Input in PGSolver format\n", + "# Input/Output in PGSolver format\n", "\n", "The automaton parser is also able to parse the [PGSolver](https://github.com/tcsprojects/pgsolver) format. Here are two examples from the manual of PGSolver. The support for C-style comments is not part of the PGSolver format.\n", "\n", @@ -1623,6 +1623,45 @@ "display(a.show('.g'), b.show('.g'))" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To output a parity-game in PG-solver format, use `to_str('pg')`." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "parity 4;\n", + "0 6 1 4,2 \"Africa\";\n", + "2 7 0 3,1,0,4 \"Asia\";\n", + "4 5 1 0 \"Antarctica\";\n", + "1 8 1 2,4,3 \"America\";\n", + "3 6 0 4,2 \"Australia\";\n", + "parity 7;\n", + "0 0 0 1,2;\n", + "2 0 0 3,4;\n", + "4 0 0 5,6;\n", + "6 0 0 7,0;\n", + "7 1 1 0,1;\n", + "1 1 1 2,3;\n", + "3 1 1 4,5;\n", + "5 1 1 6,7;\n", + "\n" + ] + } + ], + "source": [ + "print(a.to_str('pg') + b.to_str('pg'))" + ] + }, { "cell_type": "code", "execution_count": null, From b0165cf39c726cb251d58efd1a843179083261e7 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 5 Aug 2022 18:55:44 +0200 Subject: [PATCH 118/606] * doc/org/tut10.org: Use the same formula in C++ as in Python and sh. --- doc/org/tut10.org | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/org/tut10.org b/doc/org/tut10.org index 419a33197..d4c45708a 100644 --- a/doc/org/tut10.org +++ b/doc/org/tut10.org @@ -139,7 +139,7 @@ automaton. Finally, the output as a never claim is done via the int main() { - spot::parsed_formula pf = spot::parse_infix_psl("[]<>a || <>[]b"); + spot::parsed_formula pf = spot::parse_infix_psl("GFa -> GFb"); if (pf.format_errors(std::cerr)) return 1; spot::translator trans; @@ -158,22 +158,22 @@ never { T0_init: if :: (true) -> goto T0_init - :: (a) -> goto accept_S1 - :: (b) -> goto accept_S2 + :: (b) -> goto accept_S1 + :: (!(a)) -> goto accept_S2 fi; accept_S1: if - :: (a) -> goto accept_S1 - :: (!(a)) -> goto T0_S3 + :: (b) -> goto accept_S1 + :: (!(b)) -> goto T0_S3 fi; accept_S2: if - :: (b) -> goto accept_S2 + :: (!(a)) -> goto accept_S2 fi; T0_S3: if - :: (a) -> goto accept_S1 - :: (!(a)) -> goto T0_S3 + :: (b) -> goto accept_S1 + :: (!(b)) -> goto T0_S3 fi; } From de9041bb31557267d1085bc3a340617eb28bd944 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sat, 6 Aug 2022 14:06:52 +0200 Subject: [PATCH 119/606] mealy: make output_assignment the default for reduce_mealy * spot/twaalgos/mealy_machine.hh: Here. Also cite the FORTE paper. * doc/spot.bib (renkin.22.forte): New entry. --- doc/spot.bib | 30 +++++++++++++++++++++++------- spot/twaalgos/mealy_machine.hh | 28 +++++++++++++++------------- 2 files changed, 38 insertions(+), 20 deletions(-) diff --git a/doc/spot.bib b/doc/spot.bib index 9d5d6b235..2a58a3031 100644 --- a/doc/spot.bib +++ b/doc/spot.bib @@ -214,13 +214,13 @@ doi = {10.1109/DepCoS-RELCOMEX.2009.31} } -@InProceedings{ cimatti.06.fmcad, - author = {Cimatti, Alessandro and Roveri, Marco and Semprini, Simone and - Tonetta, Stefano}, - title = {From {PSL} to {NBA}: a Modular Symbolic Encoding}, - booktitle = {Proceedings of the 6th conference on Formal Methods in Computer - Aided Design (FMCAD'06)}, - pages = {125--133}, +@InProceedings{ cimatti.06.fmcad, + author = {Cimatti, Alessandro and Roveri, Marco and Semprini, Simone + and Tonetta, Stefano}, + title = {From {PSL} to {NBA}: a Modular Symbolic Encoding}, + booktitle = {Proceedings of the 6th conference on Formal Methods in + Computer Aided Design (FMCAD'06)}, + pages = {125--133}, year = {2006}, publisher = {IEEE Computer Society}, doi = {10.1109/FMCAD.2006.19} @@ -858,6 +858,22 @@ doi = {10.1007/978-3-030-59152-6_7} } +@InProceedings{ renkin.22.forte, + author = {Florian Renkin and Philipp Schlehuber-Caissier and + Alexandre Duret-Lutz and Adrien Pommellet}, + title = {Effective Reductions of {M}ealy Machines}, + year = 2022, + booktitle = {Proceedings of the 42nd International Conference on Formal + Techniques for Distributed Objects, Components, and Systems + (FORTE'22)}, + series = {Lecture Notes in Computer Science}, + volume = 13273, + pages = {170--187}, + month = jun, + publisher = {Springer}, + doi = {10.1007/978-3-031-08679-3_8} +} + @InProceedings{ rozier.07.spin, author = {Kristin Y. Rozier and Moshe Y. Vardi}, title = {LTL Satisfiability Checking}, diff --git a/spot/twaalgos/mealy_machine.hh b/spot/twaalgos/mealy_machine.hh index 7406cb61d..6da0f072a 100644 --- a/spot/twaalgos/mealy_machine.hh +++ b/spot/twaalgos/mealy_machine.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021 Laboratoire de Recherche et Développement +// Copyright (C) 2021-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -87,28 +87,30 @@ namespace spot unsplit_mealy(const const_twa_graph_ptr& m); /// \brief reduce an (in)completely specified mealy machine - /// Based on signature inclusion or equality. This is not guaranteed - /// to find the minimal number of states but is usually faster. - /// This also comes at another drawback: - /// All applicable sequences have to be infinite. Finite - /// traces are disregarded - /// \param mm The mealy machine to be minimized, has to be unsplit + /// + /// This is a bisimulation based reduction, that optionally use + /// inclusion between signatures to force some output when there is + /// a choice in order to favor more reductions. Only infinite + /// traces are considered. See \cite renkin.22.forte for details. + /// + /// \param mm The mealy machine to be minimized, has to be unsplit. /// \param output_assignment Whether or not to use output assignment /// \return A specialization of \c mm. Note that if mm is separated, /// the returned machine is separated as well. - /// \note See todo TACAS22 Effective reductions of mealy machines - /// @{ + /// @{ SPOT_API twa_graph_ptr reduce_mealy(const const_twa_graph_ptr& mm, - bool output_assignment = false); + bool output_assignment = true); SPOT_API void reduce_mealy_here(twa_graph_ptr& mm, - bool output_assignment = false); + bool output_assignment = true); /// @} /// \brief Minimizes an (in)completely specified mealy machine - /// The approach is described in \todo TACAS + /// + /// The approach is described in \cite renkin.22.forte. + /// /// \param premin Use reduce_mealy before applying the /// main algorithm if demanded AND /// the original machine has no finite trace. @@ -159,4 +161,4 @@ namespace spot SPOT_API void simplify_mealy_here(twa_graph_ptr& m, synthesis_info& si, bool split_out); -} \ No newline at end of file +} From faa8fe88734c61bdb55d480f9886808951b99fd0 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sat, 6 Aug 2022 15:08:11 +0200 Subject: [PATCH 120/606] mealy: cleanup the doxygen documentation * spot/twaalgos/mealy_machine.hh: Create a new "Mealy" section for all these function, and fix some doc strings. --- spot/twaalgos/mealy_machine.hh | 143 +++++++++++++++++++++------------ 1 file changed, 93 insertions(+), 50 deletions(-) diff --git a/spot/twaalgos/mealy_machine.hh b/spot/twaalgos/mealy_machine.hh index 6da0f072a..d603d8000 100644 --- a/spot/twaalgos/mealy_machine.hh +++ b/spot/twaalgos/mealy_machine.hh @@ -21,59 +21,84 @@ #include +/// \addtogroup mealy Functions related to Mealy machines +/// \ingroup twa_algorithms + namespace spot { // Forward decl struct synthesis_info; - /// todo - /// Comment je faire au mieux pour expliquer mealy dans les doc - - /// \brief Checks whether or not the automaton is a mealy machine + /// \ingroup mealy + /// \brief Checks whether the automaton is a mealy machine + /// + /// A mealy machine is an automaton with the named property + /// `"synthesis-outputs"` and and that has a "true" as acceptance + /// condition. /// /// \param m The automaton to be verified - /// \note A mealy machine must have the named property \"synthesis-outputs\" - /// and have a \"true\" as acceptance condition + /// \see is_separated_mealy + /// \see is_split_mealy + /// \see is_input_deterministic_mealy SPOT_API bool is_mealy(const const_twa_graph_ptr& m); - /// \brief Checks whether or not the automaton is a separated mealy machine + /// \ingroup mealy + /// \brief Checks whether the automaton is a separated mealy machine + /// + /// A separated mealy machine is a mealy machine with + /// all transitions having the form `(in)&(out)` where `in` and + /// `out` are BDDs over the input and output propositions. /// /// \param m The automaton to be verified - /// \note A separated mealy machine is a mealy machine machine with all - /// transitions having the form (in)&(out) where in[out] is a bdd over - /// input[output] propositions of m + /// + /// \see is_mealy + /// \see is_split_mealy SPOT_API bool is_separated_mealy(const const_twa_graph_ptr& m); + /// \ingroup mealy /// \brief Checks whether or not the automaton is a split mealy machine /// + /// A split mealy machine is a mealy machine machine that has + /// be converted into a game. It should have the named property + /// `"state-player"`, moreover the game should be alternating + /// between the two players. Transitions leaving states owned by + /// player 0 (the environment) should use only input propositions, + /// while transitions leaving states owned by player 1 (the + /// controller) should use only output propositions. + /// /// \param m The automaton to be verified - /// \note A split mealy machine is a mealy machine machine with the named - /// property \"state-player\". Moreover the underlying automaton - /// must be alternating between the player and the env. Transitions - /// leaving env[player] states can only be labeled by - /// input[output] propositions. + /// \see is_mealy + /// \see is_separated_mealy SPOT_API bool is_split_mealy(const const_twa_graph_ptr& m); - /// \brief Checks whether or not a mealy machine is input deterministic + /// \brief Checks whether a mealy machine is input deterministic + /// + /// A machine is input deterministic if none of the states has two + /// outgoing transitions that can agree on a common assignment of + /// the input propositions. In case the mealy machine is split, the + /// previous condition is tested only on states owned by player 0 + /// (the environment). /// /// \param m The automaton to be verified - /// \note works all mealy machines, no matter whether they are split - /// or separated or neither of neither of them. - /// \note A machine is input deterministic if none of the states - /// has two outgoing transitions that can agree on a assignement - /// of the input propositions. + /// \see is_mealy SPOT_API bool is_input_deterministic_mealy(const const_twa_graph_ptr& m); - /// \brief make each transition in a separated mealy machine a - /// 2-step transition. + /// \ingroup mealy + /// \brief split a separated mealy machine + /// + /// In a separated mealy machine, every transitions as a label of + /// the form `(in)&(out)`. This function will turn each transtion + /// into a pair of consecutive transitions labeled by `in` and + /// `out`, and turn the mealy machine into a game (what we call a + /// split mealy machine) /// /// \param m separated mealy machine to be split - /// \return returns the equivalent split mealy machine if not done inplace + /// \see is_split_mealy /// @{ SPOT_API twa_graph_ptr split_separated_mealy(const const_twa_graph_ptr& m); @@ -82,10 +107,18 @@ namespace spot split_separated_mealy_here(const twa_graph_ptr& m); /// @} + /// \ingroup mealy /// \brief the inverse of split_separated_mealy + /// + /// Take a split mealy machine \a m, and build a separated mealy machine. + /// + /// \see split_separated_mealy + /// \see is_split_mealy + /// \see is_separated_mealy SPOT_API twa_graph_ptr unsplit_mealy(const const_twa_graph_ptr& m); + /// \ingroup mealy /// \brief reduce an (in)completely specified mealy machine /// /// This is a bisimulation based reduction, that optionally use @@ -95,9 +128,11 @@ namespace spot /// /// \param mm The mealy machine to be minimized, has to be unsplit. /// \param output_assignment Whether or not to use output assignment - /// \return A specialization of \c mm. Note that if mm is separated, - /// the returned machine is separated as well. - /// @{ + /// \return A specialization of \c mm. + /// + /// \note If mm is separated, the returned machine is separated as + /// well. + /// @{ SPOT_API twa_graph_ptr reduce_mealy(const const_twa_graph_ptr& mm, bool output_assignment = true); @@ -107,53 +142,60 @@ namespace spot bool output_assignment = true); /// @} + /// \ingroup mealy /// \brief Minimizes an (in)completely specified mealy machine /// /// The approach is described in \cite renkin.22.forte. /// - /// \param premin Use reduce_mealy before applying the - /// main algorithm if demanded AND - /// the original machine has no finite trace. - /// -1 : Do not use; - /// 0 : Use without output assignment; - /// 1 : Use with output assignment - /// \return Returns a split mealy machines which is a minimal - /// speciliazation of the original machine + /// \param premin Whether to use reduce_mealy as a preprocessing: + /// - -1: Do not use; + /// - 0: Use without output assignment; + /// - 1: Use with output assignment. + /// \return A split mealy machines which is a minimal + /// specialization of the original machine. + /// + /// \note Enabling \a premin will remove finite traces. + /// \see is_split_mealy_specialization SPOT_API twa_graph_ptr minimize_mealy(const const_twa_graph_ptr& mm, int premin = -1); + /// \ingroup mealy /// \brief Test if the split mealy machine \a right is a specialization of /// the split mealy machine \a left. /// - /// That is all input sequences valid for left - /// must be applicable for right and the induced sequence of output signals - /// of right must imply the ones of left + /// That is, all input sequences valid for left must be applicable + /// for right and the induced sequence of output signals of right + /// must imply the ones of left SPOT_API bool is_split_mealy_specialization(const_twa_graph_ptr left, const_twa_graph_ptr right, bool verbose = false); + /// \ingroup mealy /// \brief Product between two mealy machines \a left and \a right. /// \pre The machines have to be both either split or unsplit, - /// input complete and compatible. All of this is check by assertion - /// \result The mealy machine representing the shared behaviour. - /// The resulting machine has the same class (mealy/separated/split) - /// as the input machines + /// input complete and compatible. All of this is checked by assertion. + /// \result A mealy machine representing the shared behaviour, + /// with the same tyoe (mealy/separated/split) as the input machines SPOT_API twa_graph_ptr mealy_product(const const_twa_graph_ptr& left, const const_twa_graph_ptr& right); + /// \ingroup mealy /// \brief Convenience function to call minimize_mealy or reduce_mealy. - /// Uses the same convention as ltlsynt for \a minimize_lvl: - /// 0: no reduction - /// 1: bisimulation based reduction - /// 2: bisimulation with output assignment - /// 3: SAT minimization - /// 4: 1 then 3 - /// 5: 2 then 3 + /// Uses the same convention as ltlsynt for \a minimize_lvl (or the + /// field `minimize_lvl` of \a si): + /// - 0: no reduction + /// - 1: bisimulation based reduction + /// - 2: bisimulation with output assignment + /// - 3: SAT minimization + /// - 4: 1 then 3 + /// - 5: 2 then 3 + /// /// Minimizes the given machine \a m inplace, the parameter - /// \a split_out defines whether it is split or not + /// \a split_out specifies if the result should be split. + /// @{ SPOT_API void simplify_mealy_here(twa_graph_ptr& m, int minimize_lvl, bool split_out); @@ -161,4 +203,5 @@ namespace spot SPOT_API void simplify_mealy_here(twa_graph_ptr& m, synthesis_info& si, bool split_out); + /// @} } From a7e87a1fc79332a81a9851f55ecd2479e426202c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 10 Aug 2022 10:07:22 +0200 Subject: [PATCH 121/606] Mention the CAV'22 paper * doc/org/citing.org: Here. * doc/org/spot.css: Add support for "begin_note...end_note". --- doc/org/citing.org | 34 ++++++++++++++++++++++++---------- doc/org/spot.css | 2 ++ 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/doc/org/citing.org b/doc/org/citing.org index 0704eb886..8cc1f52ef 100644 --- a/doc/org/citing.org +++ b/doc/org/citing.org @@ -6,18 +6,22 @@ * Generic reference -If you need to cite the Spot project in some academic paper, please -use the following reference: +If you need to cite the Spot project, the latest tool paper about +it is the following reference: -- *Spot 2.0 — a framework for LTL and ω-automata manipulation*, - /Alexandre Duret-Lutz/, /Alexandre Lewkowicz/, /Amaury Fauchille/, - /Thibaud Michaud/, /Etienne Renault/, and /Laurent Xu/. In Proc. - of ATVA'16, LNCS 9938, pp. 122--129. Chiba, Japan, Oct. 2016. - ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#duret.16.atva2][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/duret.16.atva2.pdf][pdf]]) +- *From Spot 2.0 to Spot 2.10: What's new?*, /Alexandre Duret-Lutz/, + /Etienne Renault/, /Maximilien Colange/, /Florian Renkin/, + /Alexandre Gbaguidi Aisse/, /Philipp Schlehuber-Caissier/, /Thomas + Medioni/, /Antoine Martin/, /Jérôme Dubois/, /Clément Gillard/, and + Henrich Lauko/. In Proc. of CAV'22, LNCS 13372, pp. 174--187. + Haifa, Israel, Aug. 2022. + ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#duret.22.cav][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/duret.22.cav.pdf][pdf]]) - This provides a quick overview of the entire project (the features - of the library, [[file:tools.org][the tools]], the Python bindings), and provides many - references detailing more specific aspects. +#+begin_note + Tools evolve while published papers don't. Please always specify + the version of Spot (or any other tool) you are using when citing it + in a paper. Future versions might have different behaviors. +#+end_note * Other, more specific, references @@ -83,6 +87,16 @@ be more specific about a particular aspect of Spot. * Obsolete reference +- *Spot 2.0 — a framework for LTL and ω-automata manipulation*, + /Alexandre Duret-Lutz/, /Alexandre Lewkowicz/, /Amaury Fauchille/, + /Thibaud Michaud/, /Etienne Renault/, and /Laurent Xu/. In Proc. + of ATVA'16, LNCS 9938, pp. 122--129. Chiba, Japan, Oct. 2016. + ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#duret.16.atva2][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/duret.16.atva2.pdf][pdf]]) + + This provides a quick overview of the entire project (the features + of the library, [[file:tools.org][the tools]], the Python bindings), and provides many + references detailing more specific aspects. + - *Spot: an extensible model checking library using transition-based generalized Büchi automata*, /Alexandre Duret-Lutz/ and /Denis Poitrenaud/. In Proc. of MASCOTS'04, pp. 76--83. Volendam, The diff --git a/doc/org/spot.css b/doc/org/spot.css index 74cbab5bf..7bbd0ef39 100644 --- a/doc/org/spot.css +++ b/doc/org/spot.css @@ -77,6 +77,8 @@ thead tr{background:#ffe35e} .org-hoa-ap-number{color:#d70079} .implem{background:#fff0a6;padding:0.5ex 1ex 0.5ex 1ex;margin:1ex;border-color:#ffe35e;border-style:solid none} .implem::before{background:#ffe35e;content:"Implementation detail";padding:.5ex;position:relative;top:0;left:0;font-weight:bold} +.note{background:#fff0a6;padding:0.5ex 1ex 0.5ex 1ex;margin:1ex;border-color:#ffe35e;border-style:solid none} +.note::before{background:#ffe35e;content:"Note";padding:.5ex;position:relative;top:0;left:0;font-weight:bold} .caveat{background:#ef99c9;padding:0.5ex 1ex 0.5ex 1ex;margin:1ex;border-color:#d70079;border-style:solid none} .caveat::before{background:#d70079;content:"Caveat";padding:.5ex;position:relative;top:0;left:0;font-weight:bold} .spotlogo{transform-origin:50% 50%;animation-duration:2s;animation-name:animspotlogo} From 2848951965ec258c225075f1ad00a3a7a337f0cc Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 10 Aug 2022 10:26:30 +0200 Subject: [PATCH 122/606] * doc/spot.bib: Add entries for last two tool papers. --- doc/spot.bib | 50 ++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 48 insertions(+), 2 deletions(-) diff --git a/doc/spot.bib b/doc/spot.bib index 2a58a3031..284bf226a 100644 --- a/doc/spot.bib +++ b/doc/spot.bib @@ -1,4 +1,3 @@ - @InProceedings{ babiak.12.tacas, author = {Tom{\'a}{\v{s}} Babiak and Mojm{\'i}r K{\v{r}}et{\'i}nsk{\'y} and Vojt{\v{e}}ch {\v{R}}eh{\'a}k @@ -353,6 +352,41 @@ doi = {10.1504/IJCCBS.2014.059594} } +@InProceedings{ duret.16.atva, + author = {Alexandre Duret-Lutz and Fabrice Kordon and Denis + Poitrenaud and Etienne Renault}, + title = {Heuristics for Checking Liveness Properties with Partial + Order Reductions}, + booktitle = {Proceedings of the 14th International Symposium on + Automated Technology for Verification and Analysis + (ATVA'16)}, + series = {Lecture Notes in Computer Science}, + publisher = {Springer}, + volume = {9938}, + pages = {340--356}, + year = {2016}, + month = oct, + doi = {10.1007/978-3-319-46520-3_22} +} + +@InProceedings{ duret.22.cav, + author = {Alexandre~Duret-Lutz and Etienne Renault and Maximilien + Colange and Florian Renkin and Alexandre Gbaguidi~Aisse and + Philipp Schlehuber-Caissier and Thomas Medioni and Antoine + Martin and J{\'e}r{\^o}me Dubois and Cl{\'e}ment Gillard + and Henrich Lauko}, + title = {From {S}pot 2.0 to {S}pot 2.10: What's New?}, + booktitle = {Proceedings of the 34th International Conference on + Computer Aided Verification (CAV'22)}, + year = 2022, + volume = {13372}, + series = {Lecture Notes in Computer Science}, + pages = {174--187}, + month = aug, + publisher = {Springer}, + doi = {10.1007/978-3-031-13188-2_9} +} + @InProceedings{ dwyer.98.fmsp, author = {Matthew B. Dwyer and George S. Avrunin and James C. Corbett}, @@ -1036,7 +1070,19 @@ publisher = {Elsevier}, editor = {Rance Cleaveland and Hubert Garavel}, year = {2002}, - month = jul, + month = jul, pdf = {adl/duret.16.atva.pdf}, + abstract = {Checking liveness properties with partial-order reductions + requires a cycle proviso to ensure that an action cannot be + postponed forever. The proviso forces each cycle to contain + at least one fully expanded state. We present new + heuristics to select which state to expand, hoping to + reduce the size of the resulting graph. The choice of the + state to expand is done when encountering a + \emph{dangerous} edge. Almost all existing provisos expand + the source of this edge, while this paper also explores the + expansion of the destination and the use of SCC-based + information.}, + address = {M{\'a}laga, Spain}, doi = {10.1016/S1571-0661(04)80409-2} } From d1b8495510c354ff635b171af3e9ecc8078bd6fc Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 12 Aug 2022 14:56:45 +0200 Subject: [PATCH 123/606] do not use a global variable to define the number of available threads * python/spot/impl.i: Make parallel_policy implicitly contractible. * spot/graph/graph.hh (sort_edges_srcfirst_): Pass a parallel_policy explicitly. * spot/twa/twagraph.hh, spot/twa/twagraph.cc (merge_states): Likewise. * spot/misc/common.cc: Remove file. * spot/misc/common.hh (set_nthreads, get_nthreads): Remove, and replace with... (parallel_policy): ... this. * spot/misc/Makefile.am, tests/python/mergedge.py, NEWS: Adjust. --- NEWS | 8 ++-- python/spot/impl.i | 1 + spot/graph/graph.hh | 47 +++++++++++----------- spot/misc/Makefile.am | 3 +- spot/misc/common.cc | 33 --------------- spot/misc/common.hh | 31 ++++++++++----- spot/twa/twagraph.cc | 86 ++++++++++++++++++++-------------------- spot/twa/twagraph.hh | 8 +++- tests/python/mergedge.py | 74 +++++++++++++++++----------------- 9 files changed, 137 insertions(+), 154 deletions(-) delete mode 100644 spot/misc/common.cc diff --git a/NEWS b/NEWS index 77c4e081f..5b55ebbdd 100644 --- a/NEWS +++ b/NEWS @@ -38,9 +38,6 @@ New in spot 2.10.6.dev (not yet released) Library: - - A global variable, together with its setters and getters to define the - maximal number of threads is added to common.hh/common.cc - - The new function suffix_operator_normal_form() implements transformation of formulas to Suffix Operator Normal Form, described in [cimatti.06.fmcad]. @@ -117,6 +114,11 @@ New in spot 2.10.6.dev (not yet released) to obtain a simple model checker (that returns true or false, without counterexample). + - spot::parallel_policy is an object that can be passed to some + algorithm to specify how many threads can be used if Spot has been + compiled with --enable-pthread. Currently, only + twa_graph::merge_states() support it. + Python bindings: - The to_str() method of automata can now export a parity game into diff --git a/python/spot/impl.i b/python/spot/impl.i index a07709005..23c07c4e8 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -486,6 +486,7 @@ static void handle_any_exception() } } +%implicitconv spot::parallel_policy; %include %include %include diff --git a/spot/graph/graph.hh b/spot/graph/graph.hh index 06ddf0997..531426244 100644 --- a/spot/graph/graph.hh +++ b/spot/graph/graph.hh @@ -1255,56 +1255,57 @@ namespace spot /// and make a temporary copy of the edges (needs more ram) /// \pre This needs the edge_vector to be in a coherent state when called template> - void sort_edges_srcfirst_(Predicate p = Predicate()) + void sort_edges_srcfirst_(Predicate p = Predicate(), + parallel_policy ppolicy = parallel_policy()) { - //std::cerr << "\nbefore\n"; - //dump_storage(std::cerr); - const auto N = num_states(); - - auto idx_list = std::vector(N+1); - auto new_edges = edge_vector_t(); + SPOT_ASSERT(!edges_.empty()); + const unsigned ns = num_states(); + std::vector idx_list(ns+1); + edge_vector_t new_edges; new_edges.reserve(edges_.size()); - if (SPOT_UNLIKELY(edges_.empty())) - throw std::runtime_error("Empty edge vector!"); new_edges.resize(1); // This causes edge 0 to be considered as dead. new_edges[0].next_succ = 0; - // Copy the edges such that they are sorted by src - for (auto s = 0u; s < N; ++s) + // Copy all edges so that they are sorted by src + for (unsigned s = 0; s < ns; ++s) { idx_list[s] = new_edges.size(); for (const auto& e : out(s)) new_edges.push_back(e); } - idx_list[N] = new_edges.size(); + idx_list[ns] = new_edges.size(); // New edge sorted by source // If we have few edge or only one threads // Benchmark few? auto bne = new_edges.begin(); -#ifdef SPOT_ENABLE_PTHREAD - const unsigned nthreads = get_nthreads(); - if (nthreads == 1 || edges_.size() < 1000) +#ifndef SPOT_ENABLE_PTHREAD + (void) ppolicy; +#else + unsigned nthreads = ppolicy.nthreads(); + if (nthreads <= 1) #endif { - for (auto s = 0u; s < N; ++s) + for (unsigned s = 0u; s < ns; ++s) std::stable_sort(bne + idx_list[s], - bne + idx_list[s+1], - p); + bne + idx_list[s+1], p); } #ifdef SPOT_ENABLE_PTHREAD else { - static auto tv = std::vector(); + static std::vector tv; SPOT_ASSERT(tv.empty()); tv.resize(nthreads); + // FIXME: Due to the way these thread advence into the sate + // vectors, they access very close memory location. It + // would seems more cache friendly to have thread work on + // blocks of continuous states. for (unsigned id = 0; id < nthreads; ++id) tv[id] = std::thread( - [bne, id, N, &idx_list, p, nthreads]() + [bne, id, ns, &idx_list, p, nthreads]() { - for (auto s = id; s < N; s+=nthreads) + for (unsigned s = id; s < ns; s += nthreads) std::stable_sort(bne + idx_list[s], - bne + idx_list[s+1], - p); + bne + idx_list[s+1], p); return; }); for (auto& t : tv) diff --git a/spot/misc/Makefile.am b/spot/misc/Makefile.am index 623a13c87..6b771dbb5 100644 --- a/spot/misc/Makefile.am +++ b/spot/misc/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2011-2014, 2016-2018, 2020-2021 Laboratoire de +## Copyright (C) 2011-2014, 2016-2018, 2020-2022 Laboratoire de ## Recherche et Développement de l'Epita (LRDE). ## Copyright (C) 2003, 2004, 2005, 2006 Laboratoire d'Informatique de ## Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), @@ -63,7 +63,6 @@ libmisc_la_SOURCES = \ bareword.cc \ bitset.cc \ bitvect.cc \ - common.cc \ escape.cc \ formater.cc \ intvcomp.cc \ diff --git a/spot/misc/common.cc b/spot/misc/common.cc deleted file mode 100644 index adf9f2da0..000000000 --- a/spot/misc/common.cc +++ /dev/null @@ -1,33 +0,0 @@ -// -*- coding: utf-8 -*- -// Copyright (C) 2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). -// -// This file is part of Spot, a model checking library. -// -// Spot is free software; you can redistribute it and/or modify it -// under the terms of the GNU General Public License as published by -// the Free Software Foundation; either version 3 of the License, or -// (at your option) any later version. -// -// Spot is distributed in the hope that it will be useful, but WITHOUT -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -// License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -#include "config.h" -#include - -static unsigned N_MAX_THREADS = 1; - -void set_nthreads(unsigned nthreads) -{ - N_MAX_THREADS = nthreads; -} - -unsigned get_nthreads() -{ - return N_MAX_THREADS; -} \ No newline at end of file diff --git a/spot/misc/common.hh b/spot/misc/common.hh index fc74a8ee7..8b066b0a5 100644 --- a/spot/misc/common.hh +++ b/spot/misc/common.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -145,6 +145,27 @@ namespace spot { } }; + + /// \brief This class is used to tell parallel algorithms what + /// resources they may use. + /// + /// Currently, this simply stores an integer indicating the number + /// of threads that the algorithm may create, but in the future it + /// will probably do more. + class SPOT_API parallel_policy + { + unsigned nthreads_; + public: + parallel_policy(unsigned nthreads = 1) : nthreads_(nthreads) + { + } + + unsigned nthreads() const + { + return nthreads_; + } + }; + } // This is a workaround for the issue described in GNU GCC bug 89303. @@ -169,11 +190,3 @@ namespace spot # define SPOT_make_shared_enabled__(TYPE, ...) \ std::make_shared(__VA_ARGS__) #endif - - -// Global variable to determine the maximal number of threads -SPOT_API void -set_nthreads(unsigned nthreads); - -SPOT_API unsigned -get_nthreads(); diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 5b4da10a3..64f229b03 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -366,15 +366,16 @@ namespace spot g_.chain_edges_(); } - unsigned twa_graph::merge_states() + unsigned twa_graph::merge_states(parallel_policy ppolicy) { if (!is_existential()) throw std::runtime_error( "twa_graph::merge_states() does not work on alternating automata"); #ifdef ENABLE_PTHREAD - const unsigned nthreads = get_nthreads(); + const unsigned nthreads = ppolicy.nthreads(); #else + (void) ppolicy; constexpr unsigned nthreads = 1; #endif @@ -392,10 +393,10 @@ namespace spot if (rhs.cond != lhs.cond) return false; return lhs.dst < rhs.dst; - }); + }, nthreads); g_.chain_edges_(); - const auto n_states = num_states(); + const unsigned n_states = num_states(); // Edges are nicely chained and there are no erased edges // -> We can work with the edge_vector @@ -404,30 +405,28 @@ namespace spot // if so, the graph alternates between env and player vertices, // so there are, by definition, no self-loops auto sp = get_named_prop>("state-player"); - const auto spe = (bool) sp; // The hashing is a bit delicat: We may only use the dst // if it has no self-loop - auto use_for_hash = spe ? std::vector() - : std::vector(n_states); + auto use_for_hash = sp ? std::vector() + : std::vector(n_states); const auto& e_vec = edge_vector(); - const auto n_edges = e_vec.size(); + unsigned n_edges = e_vec.size(); // For each state we need 4 indices of the edge vector // [first, first_non_sfirst_selflooplfloop, first_selfloop, end] // The init value makes sure nothing is done for dead end states - auto e_idx = - std::vector>(n_states, {-1u, -1u, - -1u, -1u}); + std::vector> e_idx(n_states, {-1u, -1u, + -1u, -1u}); // Like a linked list holding the non-selfloop and selfloop transitions - auto e_chain = std::vector(e_vec.size(), -1u); + std::vector e_chain(n_edges, -1u); unsigned idx = 1; // Edges are sorted with repected to src first const unsigned n_high = e_vec.back().src; - if (spe) + if (sp) for (auto s = 0u; s < n_high; ++s) treat(e_idx, e_vec, e_chain, use_for_hash, idx, s, n_edges); @@ -436,7 +435,7 @@ namespace spot treat(e_idx, e_vec, e_chain, use_for_hash, idx, s, n_edges); // Last one - if (spe) + if (sp) treat(e_idx, e_vec, e_chain, use_for_hash, idx, n_high, n_edges); else @@ -445,7 +444,7 @@ namespace spot assert(idx == e_vec.size() && "Something went wrong during indexing"); - auto n_players = 0u; + unsigned n_players = 0u; if (sp) n_players = std::accumulate(sp->begin(), sp->end(), 0u); @@ -454,14 +453,12 @@ namespace spot // hash_linked_list is like a linked list structure // of fake pointers - auto hash_linked_list = std::vector(n_states, -1u); - auto s_to_hash = std::vector(n_states, 0); - auto env_map = - robin_hood::unordered_flat_map>(); - auto player_map = - robin_hood::unordered_flat_map>(); + std::vector hash_linked_list(n_states, -1u); + std::vector s_to_hash(n_states, 0); + robin_hood::unordered_flat_map> env_map; + robin_hood::unordered_flat_map> player_map; env_map.reserve(n_states - n_players); player_map.reserve(n_players); @@ -476,7 +473,7 @@ namespace spot else { // "tail" - auto idx = it->second.second; + unsigned idx = it->second.second; assert(idx < s && "Must be monotone"); hash_linked_list[idx] = s; it->second.second = s; @@ -484,19 +481,19 @@ namespace spot }; // Hash all states - constexpr auto SHIFT = sizeof(size_t)/2 * CHAR_BIT; + constexpr unsigned shift = sizeof(size_t)/2 * CHAR_BIT; for (auto s = 0u; s != n_states; ++s) { - auto h = fnv::init; - const auto e = e_idx[s][3]; - for (auto i = e_idx[s][0]; i != e; ++i) + size_t h = fnv::init; + const unsigned e = e_idx[s][3]; + for (unsigned i = e_idx[s][0]; i != e; ++i) { // If size_t has 8byte and unsigned has 4byte // then this works fine, otherwise there might be more collisions - size_t hh = spe || use_for_hash[e_vec[i].dst] + size_t hh = sp || use_for_hash[e_vec[i].dst] ? e_vec[i].dst : fnv::init; - hh <<= SHIFT; + hh <<= shift; hh += e_vec[i].cond.id(); h ^= hh; h *= fnv::prime; @@ -504,7 +501,7 @@ namespace spot h *= fnv::prime; } s_to_hash[s] = h; - if (spe && (*sp)[s]) + if (sp && (*sp)[s]) emplace(player_map, h, s); else emplace(env_map, h, s); @@ -538,20 +535,20 @@ namespace spot auto [i1, nsl1, sl1, e1] = e_idx[s1]; auto [i2, nsl2, sl2, e2] = e_idx[s2]; - if ((e2-i2) != (e1-i1)) + if ((e2 - i2) != (e1 - i1)) return false; // Different number of outgoing trans // checked1/2 is one element larger than necessary // the last element is always false // and acts like a nulltermination - checked1.resize(e1-i1+1); + checked1.resize(e1 - i1 + 1); std::fill(checked1.begin(), checked1.end(), false); - checked2.resize(e2-i2+1); + checked2.resize(e2 - i2 + 1); std::fill(checked2.begin(), checked2.end(), false); // Try to match self-loops // Not entirely sure when this helps exactly - while ((sl1 < e1) & (sl2 < e2)) + while ((sl1 < e1) && (sl2 < e2)) { // Like a search in ordered array if (e_vec[sl1].data() == e_vec[sl2].data()) @@ -576,12 +573,12 @@ namespace spot // Check if all have been correctly treated if ((nsl1 > e1) && std::all_of(checked1.begin(), checked1.end(), - [](const auto& e){return e; })) + [](const auto& e){return e;})) return true; // The remaining edges need to match exactly - auto idx1 = i1; - auto idx2 = i2; + unsigned idx1 = i1; + unsigned idx2 = i2; while (((idx1 < e1) & (idx2 < e2))) { // More efficient version? @@ -600,7 +597,7 @@ namespace spot if ((e_vec[idx1].dst != e_vec[idx2].dst) - || !(e_vec[idx1].data() == e_vec[idx2].data())) + || !(e_vec[idx1].data() == e_vec[idx2].data())) return false; // Advance @@ -620,7 +617,7 @@ namespace spot std::vector& checked2) { v.clear(); - for (auto i = ix; i != -1U; i = hash_linked_list[i]) + for (unsigned i = ix; i != -1U; i = hash_linked_list[i]) v.push_back(i); const unsigned N = v.size(); @@ -699,8 +696,10 @@ namespace spot auto bege = env_map.begin(); auto ende = env_map.end(); -#ifdef ENABLE_PTHREAD - if ((nthreads == 1) & (num_states() > 1000)) // Bound? +#ifndef ENABLE_PTHREAD + (void) nthreads; +#else + if (nthreads <= 1) { #endif // ENABLE_PTHREAD worker(0, begp, endp, bege, ende); @@ -728,12 +727,11 @@ namespace spot for (auto& e: edges()) if (remap[e.dst] != -1U) { - assert((!spe || (sp->at(e.dst) == sp->at(remap[e.dst]))) + assert((!sp || (sp->at(e.dst) == sp->at(remap[e.dst]))) && "States do not have the same owner"); e.dst = remap[e.dst]; } - if (remap[get_init_state_number()] != -1U) set_init_state(remap[get_init_state_number()]); diff --git a/spot/twa/twagraph.hh b/spot/twa/twagraph.hh index c6222871e..742a4d69a 100644 --- a/spot/twa/twagraph.hh +++ b/spot/twa/twagraph.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2014-2022 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -589,8 +589,12 @@ namespace spot /// (1)-a->(1) and (1)-a->(1) if (1), (2) and (3) are merged into /// (1). /// + /// On large automaton, it might be worthwhile to use multiple + /// threads to find states that can be merged. This can be + /// requested with the \a ppolicy argument. + /// /// \return the number of states that have been merged and removed. - unsigned merge_states(); + unsigned merge_states(parallel_policy ppolicy = parallel_policy()); /// \brief Like merge states, but one can chose which states are /// candidates for merging. diff --git a/tests/python/mergedge.py b/tests/python/mergedge.py index 2be4d4984..b3e934946 100644 --- a/tests/python/mergedge.py +++ b/tests/python/mergedge.py @@ -23,42 +23,40 @@ import spot from unittest import TestCase tc = TestCase() +aut = spot.automaton("""HOA: v1 States: 1 Start: 0 AP: 1 "a" +Acceptance: 1 Inf(0) --BODY-- State: 0 [0] 0 [0] 0 {0} --END--""") +tc.assertEqual(aut.num_edges(), 2) +aut.merge_edges() +tc.assertEqual(aut.num_edges(), 1) + +aut = spot.automaton(""" +HOA: v1 +States: 2 +Start: 0 +AP: 2 "p0" "p1" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels trans-acc complete +--BODY-- +State: 0 +[!0] 0 {0} +[0] 1 {0} +State: 1 +[!0&!1] 0 {0} +[0 | 1] 1 +[0&!1] 1 {0} +--END--""") +tc.assertEqual(aut.num_edges(), 5) +aut.merge_edges() +tc.assertEqual(aut.num_edges(), 5) +tc.assertFalse(spot.is_deterministic(aut)) +aut = spot.split_edges(aut) +tc.assertEqual(aut.num_edges(), 9) +aut.merge_edges() +tc.assertEqual(aut.num_edges(), 5) +tc.assertTrue(spot.is_deterministic(aut)) + for nthread in range(1, 16, 2): - spot.set_nthreads(nthread) - tc.assertEqual(spot.get_nthreads(), nthread) - aut = spot.automaton("""HOA: v1 States: 1 Start: 0 AP: 1 "a" - Acceptance: 1 Inf(0) --BODY-- State: 0 [0] 0 [0] 0 {0} --END--""") - tc.assertEqual(aut.num_edges(), 2) - aut.merge_edges() - tc.assertEqual(aut.num_edges(), 1) - - aut = spot.automaton(""" - HOA: v1 - States: 2 - Start: 0 - AP: 2 "p0" "p1" - acc-name: Buchi - Acceptance: 1 Inf(0) - properties: trans-labels explicit-labels trans-acc complete - --BODY-- - State: 0 - [!0] 0 {0} - [0] 1 {0} - State: 1 - [!0&!1] 0 {0} - [0 | 1] 1 - [0&!1] 1 {0} - --END--""") - tc.assertEqual(aut.num_edges(), 5) - aut.merge_edges() - tc.assertEqual(aut.num_edges(), 5) - tc.assertFalse(spot.is_deterministic(aut)) - aut = spot.split_edges(aut) - tc.assertEqual(aut.num_edges(), 9) - aut.merge_edges() - tc.assertEqual(aut.num_edges(), 5) - tc.assertTrue(spot.is_deterministic(aut)) - aut = spot.automaton(""" HOA: v1 States: 3 @@ -78,12 +76,12 @@ for nthread in range(1, 16, 2): [!0] 2 {0} [0] 1 --END--""") - aut.merge_states() + aut.merge_states(nthread) tc.assertEqual(aut.num_edges(), 4) tc.assertEqual(aut.num_states(), 2) tc.assertTrue(spot.is_deterministic(aut)) tc.assertTrue(aut.prop_complete()) - aut.merge_states() + aut.merge_states(nthread) tc.assertEqual(aut.num_edges(), 4) tc.assertEqual(aut.num_states(), 2) tc.assertTrue(spot.is_deterministic(aut)) @@ -168,6 +166,6 @@ for nthread in range(1, 16, 2): State: 40 [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&1&!2] 31 {1} [!0&1&2] 32 {1} [!0&!1&2] 34 {1} [!0&!1&!2] 40 {1} --END--""") - aa.merge_states() + aa.merge_states(nthread) # This used to cause a segfault reported by Philipp. print(aa.to_str()) From cd21521bfe3bae5d8dfd65c6df2bb4bec61ff80b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 12 Aug 2022 17:24:02 +0200 Subject: [PATCH 124/606] * spot/twa/twagraph.cc (merge_states): Some cleanup and simplifications. --- spot/twa/twagraph.cc | 194 +++++++++++++++++++++---------------------- 1 file changed, 95 insertions(+), 99 deletions(-) diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 64f229b03..4d0009e93 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -36,11 +36,11 @@ namespace using namespace spot; // If LAST is false, // it is guaranteed that there will be another src state - template + template void treat(std::vector>& e_idx, const twa_graph::graph_t::edge_vector_t& e_vec, std::vector& e_chain, - std::vector& use_for_hash, + std::vector& hash_of_state, unsigned& idx, unsigned s, unsigned n_e) @@ -98,10 +98,10 @@ namespace } s_idx[3] = idx; - // Check if self-loops appeared - // If so -> do not use for hash - if constexpr (!SPE) - use_for_hash[s] = s_idx[2] == -1u; + // Check if self-loops appeared. We cannot hash + // states with self-loops. + if (s_idx[2] != -1u) + hash_of_state[s] = fnv::init; } } @@ -401,15 +401,19 @@ namespace spot // Edges are nicely chained and there are no erased edges // -> We can work with the edge_vector - // Check if it is a game <-> "state-player" is defined - // if so, the graph alternates between env and player vertices, - // so there are, by definition, no self-loops + // Check if it is a game <-> "state-player" is defined. If + // so, we can only merge states that belong to the same player. + // (We will use two hash maps in this case.) auto sp = get_named_prop>("state-player"); - // The hashing is a bit delicat: We may only use the dst - // if it has no self-loop - auto use_for_hash = sp ? std::vector() - : std::vector(n_states); + // The hashing is a bit delicat: We may only use the dst if it has + // no self-loop. HASH_OF_STATE stores the hash associated to each + // state (by default its own number) or some common value if the + // state contains self-loop. + std::vector hash_of_state; + hash_of_state.reserve(n_states); + for (unsigned i = 0; i < n_states; ++i) + hash_of_state.push_back(i); const auto& e_vec = edge_vector(); unsigned n_edges = e_vec.size(); @@ -424,29 +428,20 @@ namespace spot unsigned idx = 1; - // Edges are sorted with repected to src first + // Edges are sorted with respect to src first const unsigned n_high = e_vec.back().src; - if (sp) - for (auto s = 0u; s < n_high; ++s) - treat(e_idx, e_vec, e_chain, - use_for_hash, idx, s, n_edges); - else - for (auto s = 0u; s < n_high; ++s) - treat(e_idx, e_vec, e_chain, - use_for_hash, idx, s, n_edges); + for (auto s = 0u; s < n_high; ++s) + treat(e_idx, e_vec, e_chain, + hash_of_state, idx, s, n_edges); // Last one - if (sp) - treat(e_idx, e_vec, e_chain, - use_for_hash, idx, n_high, n_edges); - else - treat(e_idx, e_vec, e_chain, - use_for_hash, idx, n_high, n_edges); + treat(e_idx, e_vec, e_chain, + hash_of_state, idx, n_high, n_edges); assert(idx == e_vec.size() && "Something went wrong during indexing"); - unsigned n_players = 0u; + unsigned n_player1 = 0u; if (sp) - n_players = std::accumulate(sp->begin(), sp->end(), 0u); + n_player1 = std::accumulate(sp->begin(), sp->end(), 0u); // Represents which states share a hash // Head is in the unordered_map, @@ -454,26 +449,28 @@ namespace spot // of fake pointers std::vector hash_linked_list(n_states, -1u); - std::vector s_to_hash(n_states, 0); - robin_hood::unordered_flat_map> env_map; - robin_hood::unordered_flat_map> player_map; - env_map.reserve(n_states - n_players); - player_map.reserve(n_players); + typedef robin_hood::unordered_flat_map> player_map; + // If the automaton is not a game, everything is assumed to be + // owned by player 0. + player_map map0; // for player 0 + player_map map1; // for player 1 + + map0.reserve(n_states - n_player1); + map1.reserve(n_player1); // Sadly we need to loop the edges twice since we have // to check for self-loops before hashing auto emplace = [&hash_linked_list](auto& m, auto h, auto s) { - auto it = m.find(h); - if (it == m.end()) - m.emplace(h, std::make_pair(s, s)); - else + auto [it, inserted] = m.try_emplace(h, std::make_pair(s, s)); + if (!inserted) { - // "tail" - unsigned idx = it->second.second; + // We already have an entry with hash "h". Link it + // to the new state. + unsigned idx = it->second.second; // tail of the list assert(idx < s && "Must be monotone"); hash_linked_list[idx] = s; it->second.second = s; @@ -490,9 +487,7 @@ namespace spot { // If size_t has 8byte and unsigned has 4byte // then this works fine, otherwise there might be more collisions - size_t hh = sp || use_for_hash[e_vec[i].dst] - ? e_vec[i].dst - : fnv::init; + size_t hh = hash_of_state[e_vec[i].dst]; hh <<= shift; hh += e_vec[i].cond.id(); h ^= hh; @@ -500,16 +495,15 @@ namespace spot h ^= e_vec[i].acc.hash(); h *= fnv::prime; } - s_to_hash[s] = h; if (sp && (*sp)[s]) - emplace(player_map, h, s); + emplace(map1, h, s); else - emplace(env_map, h, s); + emplace(map0, h, s); } // All states that might possible be merged share the same hash // Info hash coll //std::cout << "Hash collission rate pre merge: " - // << ((env_map.size()+player_map.size())/((float)n_states)) + // << ((map0.size()+map1.size())/((float)n_states)) // << '\n'; @@ -535,51 +529,50 @@ namespace spot auto [i1, nsl1, sl1, e1] = e_idx[s1]; auto [i2, nsl2, sl2, e2] = e_idx[s2]; - if ((e2 - i2) != (e1 - i1)) + unsigned n_trans = e1 - i1; + if ((e2 - i2) != n_trans) return false; // Different number of outgoing trans - // checked1/2 is one element larger than necessary - // the last element is always false - // and acts like a nulltermination - checked1.resize(e1 - i1 + 1); - std::fill(checked1.begin(), checked1.end(), false); - checked2.resize(e2 - i2 + 1); - std::fill(checked2.begin(), checked2.end(), false); + // checked1/2 is one element larger than necessary; + // the last element (false) serves as a sentinel. + checked1.clear(); + checked1.resize(n_trans + 1, false); + checked2.clear(); + checked2.resize(n_trans + 1, false); // Try to match self-loops - // Not entirely sure when this helps exactly + unsigned self_loops_matched = 0; while ((sl1 < e1) && (sl2 < e2)) { - // Like a search in ordered array - if (e_vec[sl1].data() == e_vec[sl2].data()) + auto& data1 = e_vec[sl1].data(); + auto& data2 = e_vec[sl2].data(); + if (data1 == data2) { // Matched + ++self_loops_matched; checked1[sl1 - i1] = true; //never touches last element checked2[sl2 - i2] = true; // Advance both sl1 = e_chain[sl1]; sl2 = e_chain[sl2]; } - else if (edge_data_comp(e_vec[sl1].data(), - e_vec[sl2].data())) - // sl1 needs to advance + // Since edges are ordered on each side, aadvance + // the smallest side in case there is no match. + else if (edge_data_comp(data1, data2)) sl1 = e_chain[sl1]; else - // sl2 needs to advance sl2 = e_chain[sl2]; } - // If there are no non-self-loops, in s1 - // Check if all have been correctly treated - if ((nsl1 > e1) - && std::all_of(checked1.begin(), checked1.end(), - [](const auto& e){return e;})) + // If the matched self-loops cover all transitions, we can + // stop here. + if (self_loops_matched == n_trans) return true; // The remaining edges need to match exactly unsigned idx1 = i1; unsigned idx2 = i2; - while (((idx1 < e1) & (idx2 < e2))) + while (((idx1 < e1) && (idx2 < e2))) { // More efficient version? // Skip checked edges @@ -595,7 +588,6 @@ namespace spot break; } - if ((e_vec[idx1].dst != e_vec[idx2].dst) || !(e_vec[idx1].data() == e_vec[idx2].data())) return false; @@ -611,32 +603,35 @@ namespace spot const unsigned nb_states = num_states(); std::vector remap(nb_states, -1U); - // Check each hash + // Check all pair of states with compatible hash auto check_ix = [&](unsigned ix, std::vector& v, std::vector& checked1, std::vector& checked2) { + if (hash_linked_list[ix] == -1U) // no compatible state + return; + v.clear(); for (unsigned i = ix; i != -1U; i = hash_linked_list[i]) v.push_back(i); - const unsigned N = v.size(); + const unsigned vs = v.size(); - for (unsigned idx = 0; idx < N; ++idx) + for (unsigned idx = 0; idx < vs; ++idx) { - auto i = v[idx]; + unsigned i = v[idx]; for (unsigned jdx = 0; jdx < idx; ++jdx) { - auto j = v[jdx]; + unsigned j = v[jdx]; if (state_equal(j, i, checked1, checked2)) { remap[i] = (remap[j] != -1U) ? remap[j] : j; - // Because of the special self-loop tests we use above, - // it's possible that i can be mapped to remap[j] even - // if j was last compatible states found. Consider the - // following cases, taken from an actual test case: - // 18 is equal to 5, 35 is equal to 18, but 35 is not - // equal to 5. + // Because of the special self-loop tests we use + // above, it's possible that i can be mapped to + // remap[j] even if j was the last compatible + // state found. Consider the following cases, + // taken from an actual test case: 18 is equal to + // 5, 35 is equal to 18, but 35 is not equal to 5. // // State: 5 // [0&1&2] 8 {3} @@ -673,8 +668,9 @@ namespace spot } }; - auto worker = [&upd, check_ix, nthreads](unsigned pid, auto begp, auto endp, - auto bege, auto ende) + auto worker = [&upd, check_ix, nthreads](unsigned pid, + auto beg1, auto end1, + auto beg0, auto end0) { // Temporary storage for list of edges to reduce cache misses std::vector v; @@ -682,19 +678,19 @@ namespace spot // that have been matched already. std::vector checked1; std::vector checked2; - upd(begp, endp, pid); - upd(bege, ende, pid); - for (; begp != endp; upd(begp, endp, nthreads)) - check_ix(begp->second.first, v, checked1, checked2); - for (; bege != ende; upd(bege, ende, nthreads)) - check_ix(bege->second.first, v, checked1, checked2); + upd(beg1, end1, pid); + upd(beg0, end0, pid); + for (; beg1 != end1; upd(beg1, end1, nthreads)) + check_ix(beg1->second.first, v, checked1, checked2); + for (; beg0 != end0; upd(beg0, end0, nthreads)) + check_ix(beg0->second.first, v, checked1, checked2); }; { - auto begp = player_map.begin(); - auto endp = player_map.end(); - auto bege = env_map.begin(); - auto ende = env_map.end(); + auto beg1 = map1.begin(); + auto end1 = map1.end(); + auto beg0 = map0.begin(); + auto end0 = map0.end(); #ifndef ENABLE_PTHREAD (void) nthreads; @@ -702,7 +698,7 @@ namespace spot if (nthreads <= 1) { #endif // ENABLE_PTHREAD - worker(0, begp, endp, bege, ende); + worker(0, beg1, end1, beg0, end0); #ifdef ENABLE_PTHREAD } else @@ -712,9 +708,9 @@ namespace spot tv.resize(nthreads); for (unsigned pid = 0; pid < nthreads; ++pid) tv[pid] = std::thread( - [worker, pid, begp, endp, bege, ende]() + [worker, pid, beg1, end1, beg0, end0]() { - worker(pid, begp, endp, bege, ende); + worker(pid, beg1, end1, beg0, end0); return; }); for (auto& t : tv) @@ -747,7 +743,7 @@ namespace spot defrag_states(remap, st); // Info hash coll 2 //std::cout << "Hash collission rate post merge: " - // << ((env_map.size()+player_map.size())/((float)num_states())) + // << ((map0.size()+map1.size())/((float)num_states())) // << '\n'; return merged; } From 925ac6bbe496d5ec6844f427c677b321c30fc458 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 30 Aug 2022 23:37:12 +0200 Subject: [PATCH 125/606] * .gitlab-ci.yml: Use gcovr and produce an XML report for gitlab. --- .gitlab-ci.yml | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 86d89f9d7..3950ea523 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -44,10 +44,8 @@ debian-unstable-gcc-coverage: - ./configure CXX='g++ --coverage' --enable-devel --disable-static --enable-doxygen - make - make check - - lcov --capture --directory . --no-external --output spot.info - - lcov --remove spot.info '*/bin/spot.cc' '*/bin/spot-x.cc' '*/spot/parse*/scan*.cc' '*/spot/parse*/parse*.cc' '*/utf8/*' '*/python/*' '*/buddy/*' '*/doc/org/tmp/*' --output spot2.info - - lcov --summary spot2.info - - genhtml --legend --demangle-cpp --output-directory coverage spot2.info + - gcovr --xml-pretty --exclude-unreachable-branches --print-summary -o coverage.xml --root $PWD -e 'bin/spot.cc' -e 'bin/spot-x.cc' -e 'spot/bricks/.*' -e 'spot/parse.*/scan.*.cc' -e 'spot/parse.*/parse.*.cc' -e 'utf8/.*' -e 'python/.*' -e 'buddy/.*' -e 'doc/org/tmp/.*' --html-details coverage.html --html-tab-size 8 + coverage: /^\s*lines:\s*\d+.\d+\%/ artifacts: when: always paths: @@ -55,9 +53,12 @@ debian-unstable-gcc-coverage: - ./*.log - doc/spot.html/ - doc/userdoc/ - - coverage/ + - coverage*.html - ./*.tar.gz - - spot2.info + reports: + coverage_report: + coverage_format: cobertura + path: coverage.xml debian-unstable-gcc-pypy: stage: build From 2e32793ed12b5fde9f745747d7106cee627cf0e5 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 1 Sep 2022 20:47:31 +0200 Subject: [PATCH 126/606] * .gitlab-ci.yml (debian-unstable-gcc-coverage): Export coverage.css. --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3950ea523..5f5a9c662 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -54,6 +54,7 @@ debian-unstable-gcc-coverage: - doc/spot.html/ - doc/userdoc/ - coverage*.html + - coverage*.css - ./*.tar.gz reports: coverage_report: From 7cf580a9c53edffcff2c3a384117c39707ce80cf Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 1 Sep 2022 21:27:18 +0200 Subject: [PATCH 127/606] we want the tarball we distribute to be built on Debian unstable See issue #512. * .gitlab-ci.yml (make-dist): New build. (debian-unstable-gcc-pypy, arch-gcc-glibcxxdebug, mingw-shared, mingw-static, publish-stable): Depend upon make-dist. --- .gitlab-ci.yml | 86 ++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 73 insertions(+), 13 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5f5a9c662..c381793b0 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -32,6 +32,30 @@ debian-stable-gcc: - ./*.log - ./*.tar.gz +# We build on Debian unstable because we want an up-to-date Automake. +# (See issue #512.) We do not run distcheck here to speedup this build +# that several other builds depend upon. Other builds will run distcheck. +make-dist: + stage: build + only: + - branches + except: + - /wip/ + image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian + script: + - autoreconf -vfi + - ./configure --disable-static --enable-doxygen + - make + - make dist + - autoconf --trace='AC_INIT:$2' > VERSION + artifacts: + when: always + paths: + - spot-*/_build/sub/tests/*/*.log + - ./*.log + - ./*.tar.gz + - VERSION + debian-unstable-gcc-coverage: stage: build only: @@ -62,22 +86,29 @@ debian-unstable-gcc-coverage: path: coverage.xml debian-unstable-gcc-pypy: - stage: build + stage: build2 + needs: + - job: make-dist + artifacts: true + variables: + GIT_STRATEGY: none only: - branches except: - /wip/ image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian script: - - autoreconf -vfi + - VERSION=`cat VERSION` + - tar xvf spot-$VERSION.tar.gz + - cd spot-$VERSION - ./configure PYTHON=/usr/bin/pypy3 --disable-static - make - make check TESTS='$(TESTS_python) $(TESTS_ipython)' artifacts: when: always paths: - - tests/*/*.log - - ./*.log + - spot-*/tests/*/*.log + - spot-*/*.log debian-gcc-snapshot: stage: build @@ -140,22 +171,30 @@ arch-clang: - ./*.log arch-gcc-glibcxxdebug: - stage: build + stage: build2 + needs: + - job: make-dist + artifacts: true + variables: + GIT_STRATEGY: none only: - branches except: - /wip/ image: gitlab-registry.lrde.epita.fr/spot/buildenv/arch script: - - autoreconf -vfi - - ./configure --enable-devel --enable-c++20 --enable-glibcxx-debug + - VERSION=`cat VERSION` + - tar xvf spot-$VERSION.tar.gz + - mkdir build-$VERSION + - cd build-$VERSION + - ../spot-$VERSION/configure --enable-devel --enable-c++20 --enable-glibcxx-debug - make - make distcheck DISTCHECK_CONFIGURE_FLAGS='--enable-devel --enable-c++20 --enable-glibcxx-debug' artifacts: when: on_failure paths: - - ./spot-*/_build/sub/tests/*/*.log - - ./*.log + - build-*/spot-*/_build/sub/tests/*/*.log + - build-*/*.log mingw-shared: stage: build2 @@ -163,15 +202,17 @@ mingw-shared: # We start from the tarball generated from a non-cross-compiling # job, so that all generated files are included, especially those # built from the executables. - - job: debian-stable-gcc + - job: make-dist artifacts: true + variables: + GIT_STRATEGY: none only: - branches except: - /wip/ image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian script: - - VERSION=`autoconf --trace='AC_INIT:$2'` + - VERSION=`cat VERSION` - tar xvf spot-$VERSION.tar.gz - cd spot-$VERSION - ./configure CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++-posix --host i686-w64-mingw32 --disable-python @@ -188,15 +229,17 @@ mingw-static: # We start from the tarball generated from a non-cross-compiling # job, so that all generated files are included, especially those # built from the executables. - - job: debian-stable-gcc + - job: make-dist artifacts: true + variables: + GIT_STRATEGY: none only: - branches except: - /wip/ image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian script: - - VERSION=`autoconf --trace='AC_INIT:$2'` + - VERSION=`cat VERSION` - tar xvf spot-$VERSION.tar.gz - cd spot-$VERSION - mkdir install_dir @@ -216,6 +259,8 @@ mingw-static: debpkg-stable: stage: build + variables: + GIT_STRATEGY: none only: - /-deb$/ - master @@ -240,6 +285,8 @@ debpkg-stable: debpkg-stable-i386: stage: build2 + variables: + GIT_STRATEGY: none only: - /-deb$/ - master @@ -269,6 +316,8 @@ debpkg-stable-i386: debpkg-unstable: stage: build + variables: + GIT_STRATEGY: none only: - /-deb$/ - next @@ -291,6 +340,8 @@ debpkg-unstable: debpkg-unstable-i386: stage: build2 + variables: + GIT_STRATEGY: none only: - /-deb$/ - next @@ -342,6 +393,8 @@ rpm-pkg: publish-rpm: stage: publish + variables: + GIT_STRATEGY: none only: - /-rpm$/ - next @@ -359,12 +412,17 @@ publish-stable: tags: - dput stage: publish + variables: + GIT_STRATEGY: none dependencies: - debpkg-stable-i386 + - make-dist script: - cd _build_stable - ls -l - dput lrde *.changes + - cd .. + - ls -l - tgz=`ls spot-*.tar.* | head -n 1` - case $tgz in *[0-9].tar.*) scp $tgz doc@perso:/var/www/dload/spot/;; esac - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=stable" https://gitlab.lrde.epita.fr/api/v4/projects/131/trigger/pipeline @@ -377,6 +435,8 @@ publish-unstable: tags: - dput stage: publish + variables: + GIT_STRATEGY: none dependencies: - debpkg-unstable-i386 script: From 0f131f2eee8e71113c6298016d94aba1e540f21b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 6 Sep 2022 18:05:52 +0200 Subject: [PATCH 128/606] =?UTF-8?q?product:=20B=C3=BCchi|B=C3=BCchi=3DB?= =?UTF-8?q?=C3=BCchi,=20CoB=C3=BCchi&CoB=C3=BCchi=3DCoB=C3=BCchi?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Improve the construction of the above constructions, saving colors. * spot/twaalgos/product.cc: Here. * spot/twaalgos/product.hh, NEWS: Mention it. * tests/core/prodchain.test, tests/core/prodor.test, tests/python/_product_weak.ipynb: Adjust. --- NEWS | 6 +- spot/twaalgos/product.cc | 22 +- spot/twaalgos/product.hh | 43 +- tests/core/prodchain.test | 42 +- tests/core/prodor.test | 8 +- tests/python/_product_weak.ipynb | 8468 ++++++++++++++++++++++-------- 6 files changed, 6449 insertions(+), 2140 deletions(-) diff --git a/NEWS b/NEWS index 5b55ebbdd..66f56e75d 100644 --- a/NEWS +++ b/NEWS @@ -114,10 +114,14 @@ New in spot 2.10.6.dev (not yet released) to obtain a simple model checker (that returns true or false, without counterexample). + - product() learned that the product of two co-Büchi automata + is a co-Büchi automaton. And product_or() learned that the + "or"-product of two Büchi automata is a Büchi automaton. + - spot::parallel_policy is an object that can be passed to some algorithm to specify how many threads can be used if Spot has been compiled with --enable-pthread. Currently, only - twa_graph::merge_states() support it. + twa_graph::merge_states() supports it. Python bindings: diff --git a/spot/twaalgos/product.cc b/spot/twaalgos/product.cc index 7fb70ddd6..243f3768c 100644 --- a/spot/twaalgos/product.cc +++ b/spot/twaalgos/product.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2020 Laboratoire de Recherche et Développement +// Copyright (C) 2014-2020, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -122,8 +122,23 @@ namespace spot res->copy_ap_of(left); res->copy_ap_of(right); + auto& lacc = left->acc(); + auto& racc = right->acc(); + bool leftweak = left->prop_weak().is_true(); bool rightweak = right->prop_weak().is_true(); + + // The conjunction of two co-Büchi automata is a co-Büchi automaton. + // The disjunction of two Büchi automata is a Büchi automaton. + // + // The code to handle this case is similar to the weak_weak case, + // except we do not set the weak property on the result. + if (!leftweak + && !rightweak + && ((aop == and_acc && lacc.is_co_buchi() && racc.is_co_buchi()) + || (aop == or_acc && lacc.is_buchi() && racc.is_buchi()))) + goto and_cobuchi_or_buchi; + // We have optimization to the standard product in case one // of the arguments is weak. if (leftweak || rightweak) @@ -132,14 +147,13 @@ namespace spot // t, f, Büchi or co-Büchi. We use co-Büchi only when // t and f cannot be used, and both acceptance conditions // are in {t,f,co-Büchi}. - if (leftweak && rightweak) + if ((leftweak && rightweak)) { weak_weak: res->prop_weak(true); + and_cobuchi_or_buchi: acc_cond::mark_t accmark = {0}; acc_cond::mark_t rejmark = {}; - auto& lacc = left->acc(); - auto& racc = right->acc(); if ((lacc.is_co_buchi() && (racc.is_co_buchi() || racc.num_sets() == 0)) || (lacc.num_sets() == 0 && racc.is_co_buchi())) diff --git a/spot/twaalgos/product.hh b/spot/twaalgos/product.hh index 49ee9acdf..784a3cb49 100644 --- a/spot/twaalgos/product.hh +++ b/spot/twaalgos/product.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2015, 2018-2020 Laboratoire de Recherche et +// Copyright (C) 2014-2015, 2018-2020, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -37,10 +37,14 @@ namespace spot /// The resulting automaton will accept the intersection of both /// languages and have an acceptance condition that is the /// conjunction of the acceptance conditions of the two input - /// automata. In case one of the left or right automaton is weak, - /// the acceptance condition of the result is made simpler: it - /// usually is the acceptance condition of the other argument, + /// automata. + /// + /// As an optionmization, in case one of the left or right automaton + /// is weak, the acceptance condition of the result is made simpler: + /// it usually is the acceptance condition of the other argument, /// therefore avoiding the need to introduce new accepting sets. + /// Similarly, the product of two co-Büchi automata will be a + /// co-Büchi automaton. /// /// The algorithm also defines a named property called /// "product-states" with type spot::product_states. This stores @@ -64,10 +68,14 @@ namespace spot /// languages recognized by each input automaton (with its initial /// state changed) and have an acceptance condition that is the /// conjunction of the acceptance conditions of the two input - /// automata. In case one of the left or right automaton is weak, - /// the acceptance condition of the result is made simpler: it - /// usually is the acceptance condition of the other argument, + /// automata. + /// + /// As an optionmization, in case one of the left or right automaton + /// is weak, the acceptance condition of the result is made simpler: + /// it usually is the acceptance condition of the other argument, /// therefore avoiding the need to introduce new accepting sets. + /// Similarly, the product of two co-Büchi automata will be a + /// co-Büchi automaton. /// /// The algorithm also defines a named property called /// "product-states" with type spot::product_states. This stores @@ -89,10 +97,15 @@ namespace spot /// The resulting automaton will accept the union of both /// languages and have an acceptance condition that is the /// disjunction of the acceptance conditions of the two input - /// automata. In case one of the left or right automaton is weak, - /// the acceptance condition of the result is made simpler: it - /// usually is the acceptance condition of the other argument, + /// automata. + /// + /// As an optionmization, in case one of the left or right automaton + /// is weak, the acceptance condition of the result is made simpler: + /// it usually is the acceptance condition of the other argument, /// therefore avoiding the need to introduce new accepting sets. + /// Similarly, the product_pr of two Büchi automata will be a + /// Büchi automaton. + /// /// /// The algorithm also defines a named property called /// "product-states" with type spot::product_states. This stores @@ -112,10 +125,14 @@ namespace spot /// recognized by each input automaton (with its initial state /// changed) and have an acceptance condition that is the /// disjunction of the acceptance conditions of the two input - /// automata. In case one of the left or right automaton is weak, - /// the acceptance condition of the result is made simpler: it - /// usually is the acceptance condition of the other argument, + /// automata. + /// + /// As an optionmization, in case one of the left or right automaton + /// is weak, the acceptance condition of the result is made simpler: + /// it usually is the acceptance condition of the other argument, /// therefore avoiding the need to introduce new accepting sets. + /// Similarly, the product_pr of two Büchi automata will be a + /// Büchi automaton. /// /// The algorithm also defines a named property called /// "product-states" with type spot::product_states. This stores diff --git a/tests/core/prodchain.test b/tests/core/prodchain.test index e00422148..9a9c74648 100755 --- a/tests/core/prodchain.test +++ b/tests/core/prodchain.test @@ -32,12 +32,12 @@ for i in *.hoa; do shift done shift -if $MAX_ACCSETS -eq 32; then +if [ $MAX_ACCSETS -eq 32 ]; then autfilt "$@" 2> error && exit 1 grep 'Too many acceptance sets used' error fi -autfilt -B "$@" > result -test "127,253,508,1" = `autfilt --stats=%s,%e,%t,%a result` +autfilt -B --low "$@" > result +test "4,7,16,1" = `autfilt --stats=%s,%e,%t,%a result` set x shift @@ -46,9 +46,37 @@ for i in *.hoa; do shift done shift -if $MAX_ACCSETS -eq 32; then - autfilt "$@" 2> error && exit 1 +autfilt -B --low "$@" > result +test "45,89,180,1" = `autfilt --stats=%s,%e,%t,%a result` + + +set x +shift +for i in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 \ + 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42; do + ltl2tgba -D --cobuchi -S "{a[*$i]}<>->FGb" > $i.hoa +done +for i in *.hoa; do + set x "$@" --product $i + shift +done +shift +autfilt -D --cobuchi --low -S "$@" > result +test "85,170,174,1" = `autfilt --stats=%s,%e,%t,%a result` + +set x +shift +for i in *.hoa; do + set x "$@" --product-or $i + shift +done +shift +if [ $MAX_ACCSETS -eq 32 ]; then + autfilt --cobuchi -S "$@" 2> error && exit 1 grep 'Too many acceptance sets used' error fi -autfilt -B "$@" > result -test "45,89,180,1" = `autfilt --stats=%s,%e,%t,%a result` +# FIXME: implement degeneralization for generalized-co-Büchi +# autfilt --cobuchi --low -S "$@" > result +# test "45,89,180,1" = `autfilt --stats=%s,%e,%t,%a result` + +true diff --git a/tests/core/prodor.test b/tests/core/prodor.test index 03127508b..03d8cd458 100755 --- a/tests/core/prodor.test +++ b/tests/core/prodor.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015, 2017-2018, 2021 Laboratoire de Recherche et +# Copyright (C) 2015, 2017-2018, 2021-2022 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -154,8 +154,8 @@ diff por.hoa exp ltl2tgba -BDH 'GFa' > gfa.hoa ltl2tgba -x '!wdba-minimize' -DH 'Xb' > xb.hoa -autfilt --product-or gfa.hoa xb.hoa -H > por.hoa -cat por.hoa +autfilt --product-or gfa.hoa xb.hoa -H > por2.hoa +cat por2.hoa cat >exp <\n" ], "text/plain": [ - " *' at 0x7fd90c347ba0> >" + " *' at 0x7f26743d3720> >" ] }, "metadata": {}, @@ -299,11 +313,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -379,12 +393,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -434,11 +448,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -508,11 +522,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -644,12 +658,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -699,11 +713,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -774,11 +788,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1127,6 +1141,226 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2,0\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -1586,7 +1820,7 @@ "# In a previous version we used to iterate over all possible left automata with \"for left in auts:\"\n", "# however we had trouble with Jupyter on i386, where running the full loop abort with some low-level \n", "# exeptions from Jupyter client. Halving the loop helped for some times, but then the timeout\n", - "# came back. So we do one left automaton at at time.\n", + "# came back. So we do one left automaton at a time.\n", "left = auts[0]\n", "display(left)\n", "for right in auts:\n", @@ -1609,12 +1843,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1660,7 +1894,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347bd0> >" + " *' at 0x7f26743d36c0> >" ] }, "metadata": {}, @@ -1723,11 +1957,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1797,11 +2031,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1933,12 +2167,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1988,11 +2222,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -2042,11 +2276,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -2108,12 +2342,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -2163,11 +2397,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -2217,11 +2451,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -2490,6 +2724,224 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -2949,12 +3401,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -3000,7 +3452,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347cf0> >" + " *' at 0x7f26743d37e0> >" ] }, "metadata": {}, @@ -3063,11 +3515,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -3138,11 +3590,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -3274,12 +3726,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -3329,11 +3781,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -3383,11 +3835,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -3449,12 +3901,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -3504,11 +3956,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -3558,11 +4010,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -3831,6 +4283,224 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -4331,7 +5001,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347d50> >" + " *' at 0x7f26743d3870> >" ] }, "metadata": {}, @@ -4575,12 +5245,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -4801,12 +5471,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -5119,44 +5789,192 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Inf(\n", - "\n", - ")\n", - "[Fin-less 2]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!c\n", + "\n", + "\n", + "!c\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "c\n", - "\n", - "\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")\n", + "[Rabin-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[gen. Streett 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", "\n", "\n", "\n", @@ -5559,6 +6377,1431 @@ "metadata": { "scrolled": false }, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f26743d3900> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "a\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "0,2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fb\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fb\n", + "\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "GFc\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")\n", + "[Streett 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")|Fin(\n", + "\n", + ")\n", + "[gen. co-Büchi 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")\n", + "[Rabin-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")\n", + "[Streett-like 3]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[Streett-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ")|Fin(\n", + "\n", + ")) | Inf(\n", + "\n", + ")\n", + "[Rabin-like 3]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "left = auts[4]\n", + "display(left)\n", + "for right in auts:\n", + " display_inline(right, spot.product(left, right), spot.product_or(left, right))" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "scrolled": false + }, "outputs": [ { "data": { @@ -5611,7 +7854,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347d80> >" + " *' at 0x7f26743d3990> >" ] }, "metadata": {}, @@ -5856,12 +8099,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -6082,12 +8325,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -6486,6 +8729,193 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")\n", + "[Streett-like 3]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | Fin(\n", + "\n", + ")\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -6807,1266 +9237,6 @@ "output_type": "display_data" } ], - "source": [ - "left = auts[4]\n", - "display(left)\n", - "for right in auts:\n", - " display_inline(right, spot.product(left, right), spot.product_or(left, right))" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "data": { - "image/svg+xml": [ - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett-like 1]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n" - ], - "text/plain": [ - " *' at 0x7fd90c347e40> >" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "a\n", - "\n", - "t\n", - "[all]\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "I->1\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "\n", - "\n", - "1->0\n", - "\n", - "\n", - "a\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "a & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "a & d\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett-like 1]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "a & !d\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "a & d\n", - "\n", - "\n", - "\n", - "2\n", - "\n", - "0,2\n", - "\n", - "\n", - "\n", - "0->2\n", - "\n", - "\n", - "!a & !d\n", - "\n", - "\n", - "\n", - "0->2\n", - "\n", - "\n", - "!a & d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "2->2\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "2->2\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Fb\n", - "\n", - "[Büchi]\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "I->1\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!b\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "\n", - "1->0\n", - "\n", - "\n", - "b\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & !d\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & d\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & !d\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett-like 1]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & d\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Fb\n", - "\n", - "[co-Büchi]\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "I->1\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!b\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "\n", - "\n", - "1->0\n", - "\n", - "\n", - "b\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & !d\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & d\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & !d\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett-like 1]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & d\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "GFc\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!c\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "c\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "(Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & Inf(\n", - "\n", - ")\n", - "[Streett-like 2]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!c & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "c & !d\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!c & d\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "c & d\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ") | Inf(\n", - "\n", - ")\n", - "[Rabin-like 3]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!c & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "c & !d\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!c & d\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "c & d\n", - "\n", - "\n", - "\n", - "\n", - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") & Fin(\n", - "\n", - ")\n", - "[Rabin-like 1]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "(Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & Inf(\n", - "\n", - ") & Fin(\n", - "\n", - ")\n", - "[Streett-like 3]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ") | (Inf(\n", - "\n", - ") & Fin(\n", - "\n", - "))\n", - "[Rabin-like 3]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett-like 1]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "(Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - "))\n", - "[Streett-like 2]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ")|Fin(\n", - "\n", - ")) | Inf(\n", - "\n", - ")\n", - "[Rabin-like 4]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], "source": [ "left = auts[5]\n", "display(left)\n", @@ -8134,7 +9304,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347ba0> >" + " *' at 0x7f26743d3720> >" ] }, "metadata": {}, @@ -8197,11 +9367,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -8263,11 +9433,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -8344,12 +9514,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -8399,11 +9569,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -8522,11 +9692,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -8656,12 +9826,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -8711,11 +9881,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -8834,11 +10004,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -9241,6 +10411,288 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2,0\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2,0\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -9785,12 +11237,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -9836,7 +11288,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347bd0> >" + " *' at 0x7f26743d36c0> >" ] }, "metadata": {}, @@ -9899,11 +11351,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -10022,11 +11474,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -10156,12 +11608,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -10211,11 +11663,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -10264,11 +11716,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -10331,12 +11783,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -10386,11 +11838,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -10439,11 +11891,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -10729,6 +12181,238 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -11189,12 +12873,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -11240,7 +12924,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347cf0> >" + " *' at 0x7f26743d37e0> >" ] }, "metadata": {}, @@ -11303,11 +12987,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -11426,11 +13110,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -11560,12 +13244,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -11615,11 +13299,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -11668,11 +13352,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -11735,12 +13419,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -11790,11 +13474,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -11845,11 +13529,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -12137,6 +13821,242 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -12638,7 +14558,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347d50> >" + " *' at 0x7f26743d3870> >" ] }, "metadata": {}, @@ -12938,12 +14858,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -13180,12 +15100,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -13575,6 +15495,164 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -13985,6 +16063,1562 @@ "metadata": { "scrolled": false }, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f26743d3900> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "a\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "0,2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "0,2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fb\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fb\n", + "\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "GFc\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")\n", + "[Rabin-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & (Fin(\n", + "\n", + ") | Inf(\n", + "\n", + "))) | ((Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")) & Fin(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & (Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))) | ((Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")) & Fin(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "left = auts[4]\n", + "display(left)\n", + "for right in auts:\n", + " display_inline(right, spot.product_xor(left, right), spot.product_xnor(left, right))" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "scrolled": false + }, "outputs": [ { "data": { @@ -14037,7 +17671,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347d80> >" + " *' at 0x7f26743d3990> >" ] }, "metadata": {}, @@ -14314,12 +17948,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -14541,12 +18175,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -14956,6 +18590,203 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "((Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")) & Fin(\n", + "\n", + ")) | ((Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")) & Fin(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | ((Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")) & Inf(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -15306,7 +19137,7 @@ } ], "source": [ - "left = auts[4]\n", + "left = auts[5]\n", "display(left)\n", "for right in auts:\n", " display_inline(right, spot.product_xor(left, right), spot.product_xnor(left, right))" @@ -15314,7 +19145,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 16, "metadata": { "scrolled": false }, @@ -15370,7 +19201,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347e40> >" + " *' at 0x7f26743d3a20> >" ] }, "metadata": {}, @@ -15433,46 +19264,115 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,1\n", + "\n", + "0,1\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "a & !d\n", + "\n", + "\n", + "a & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & d\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !d\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "a & d\n", + "\n", + "\n", + "a & d\n", "\n", "\n", "\n", @@ -15483,30 +19383,30 @@ "\n", "\n", "0->2\n", - "\n", - "\n", - "!a & !d\n", + "\n", + "\n", + "!a & !d\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!a & d\n", + "\n", + "\n", + "!a & d\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!d\n", + "\n", + "\n", + "!d\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "d\n", + "\n", + "\n", + "d\n", "\n", "\n", "\n", @@ -15521,111 +19421,7 @@ "2->2\n", "\n", "\n", - "d\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "a & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "a & d\n", - "\n", - "\n", - "\n", - "\n", - "2\n", - "\n", - "0,2\n", - "\n", - "\n", - "\n", - "0->2\n", - "\n", - "\n", - "!a & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->2\n", - "\n", - "\n", - "!a & d\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "\n", - "2->2\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "2->2\n", - "\n", - "\n", - "d\n", + "d\n", "\n", "\n", "\n", @@ -15647,12 +19443,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -15702,87 +19498,6 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & d\n", - "\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & d\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", "\n", "\n", @@ -15856,6 +19571,87 @@ "\n", "\n", "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & d\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & d\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n", "
" ], "text/plain": [ @@ -15874,12 +19670,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -15929,87 +19725,6 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & d\n", - "\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & d\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", "\n", "\n", @@ -16083,6 +19798,87 @@ "\n", "\n", "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & d\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & d\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n", "
" ], "text/plain": [ @@ -16146,67 +19942,62 @@ "\n", "\n", - "\n", - "\n", - "\n", - "((Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & Fin(\n", - "\n", - ")) | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")))\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")) & Inf(\n", + "\n", + ")\n", + "[Streett-like 2]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!c & !d\n", - "\n", + "\n", + "\n", + "!c & !d\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "c & !d\n", - "\n", - "\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!c & d\n", + "\n", + "\n", + "!c & d\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "c & d\n", - "\n", + "\n", + "\n", + "c & d\n", + "\n", "\n", "\n", "\n", @@ -16216,67 +20007,249 @@ "\n", "\n", - "\n", - "\n", - "\n", - "((Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & Inf(\n", - "\n", - ")) | (Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ") & Fin(\n", - "\n", - "))\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")\n", + "[Rabin-like 3]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!c & !d\n", - "\n", + "\n", + "\n", + "!c & !d\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "c & !d\n", - "\n", - "\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!c & d\n", + "\n", + "\n", + "!c & d\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "c & d\n", - "\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")) & Fin(\n", + "\n", + ")\n", + "[Streett-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ")|Fin(\n", + "\n", + "))\n", + "[Rabin-like 3]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", "\n", "\n", "\n", @@ -16344,55 +20317,48 @@ "\n", "\n", - "\n", - "\n", - "\n", - "((Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & (Fin(\n", - "\n", - ") | Inf(\n", - "\n", - "))) | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")) & Fin(\n", - "\n", - "))\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")) & Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")\n", + "[Streett-like 3]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "d\n", + "\n", + "\n", + "d\n", "\n", "\n", "\n", @@ -16402,55 +20368,48 @@ "\n", "\n", - "\n", - "\n", - "\n", - "((Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & Inf(\n", - "\n", - ") & Fin(\n", - "\n", - ")) | (Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ") & (Fin(\n", - "\n", - ") | Inf(\n", - "\n", - ")))\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ") | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 3]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "d\n", + "\n", + "\n", + "d\n", "\n", "\n", "\n", @@ -16518,55 +20477,48 @@ "\n", "\n", - "\n", - "\n", - "\n", - "((Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ")) | (Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")) & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[Streett-like 2]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "d\n", + "\n", + "\n", + "d\n", "\n", "\n", "\n", @@ -16576,55 +20528,48 @@ "\n", "\n", - "\n", - "\n", - "\n", - "((Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - "))) | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")) & Fin(\n", - "\n", - "))\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ")|Fin(\n", + "\n", + ")) | Inf(\n", + "\n", + ")\n", + "[Rabin-like 4]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "d\n", + "\n", + "\n", + "d\n", "\n", "\n", "\n", @@ -16639,16 +20584,317 @@ } ], "source": [ - "left = auts[5]\n", + "left = auts[6]\n", "display(left)\n", "for right in auts:\n", - " display_inline(right, spot.product_xor(left, right), spot.product_xnor(left, right))" + " display_inline(right, spot.product(left, right), spot.product_or(left, right))" ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "a1 = spot.translate('GFa')\n", + "a2 = spot.translate('GFb')\n", + "display_inline(spot.product(a1,a2), spot.product_or(a1, a2))" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")|Fin(\n", + "\n", + ")\n", + "[gen. co-Büchi 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "a1 = spot.dualize(a1)\n", + "a2 = spot.dualize(a2)\n", + "display_inline(spot.product(a1,a2), spot.product_or(a1, a2))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -16662,7 +20908,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.3rc1" + "version": "3.10.6" } }, "nbformat": 4, From fe3ebd370b5fae9cf3ae07dc3154497348a5c707 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 7 Sep 2022 09:59:31 +0200 Subject: [PATCH 129/606] add the TACAS'22 reference * doc/org/citing.org, doc/spot.bib: There. --- doc/org/citing.org | 10 ++++++++-- doc/spot.bib | 16 ++++++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/doc/org/citing.org b/doc/org/citing.org index 8cc1f52ef..8d669ae69 100644 --- a/doc/org/citing.org +++ b/doc/org/citing.org @@ -80,12 +80,18 @@ be more specific about a particular aspect of Spot. - *Generic Emptiness Check for Fun and Profit*, /Christel Baier/, /František Blahoudek/, /Alexandre Duret-Lutz/, /Joachim Klein/, /David Müller/, and /Jan Strejček/. - In. Proc. of ATVA'19, LNCS 11781, pp. 11781, Oct 2019. ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#baier.19.atva][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/baier.19.atva.pdf][pdf]] | + In. Proc. of ATVA'19, LNCS 11781, pp. 445--461, Oct 2019. ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#baier.19.atva][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/baier.19.atva.pdf][pdf]] | [[https://www.lrde.epita.fr/~adl/dl/adl/baier.19.atva.slides.mefosyloma.pdf][slides1]] | [[https://www.lrde.epita.fr/~adl/dl/adl/baier.19.atva.slides.pdf][slides2]]) Presents the generic emptiness-check implemented in Spot. -* Obsolete reference +- *Practical Applications of the Alternating Cycle Decomposition*, + /Antonio Casares/, /Alexandre Duret-Lutz/, /Klara J. Meyer/, /Florian Renkin/, + and /Salomon Sickert/. + In. Proc. of TACAS'22, LNCS 13244, pp. 99--117, Apr 2022. ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#casares.22.tacas][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/casares.22.tacas.pdf][pdf]] | + [[https://www.lrde.epita.fr/~adl/dl/adl/casares.22.tacas.slides.pdf][slides1]] | [[https://www.lrde.epita.fr/~adl/dl/adl/casares.22.tacas.slides2.pdf][slides2]]) + +* Obsolete references - *Spot 2.0 — a framework for LTL and ω-automata manipulation*, /Alexandre Duret-Lutz/, /Alexandre Lewkowicz/, /Amaury Fauchille/, diff --git a/doc/spot.bib b/doc/spot.bib index 284bf226a..6193cb1a2 100644 --- a/doc/spot.bib +++ b/doc/spot.bib @@ -172,6 +172,22 @@ doi = {10.4230/LIPIcs.ICALP.2021.123} } +@InProceedings{ casares.22.tacas, + author = {Antonio Casares and Alexandre Duret-Lutz and Klara J. + Meyer and Florian Renkin and Salomon Sickert}, + title = {Practical Applications of the {A}lternating {C}ycle + {D}ecomposition}, + booktitle = {Proceedings of the 28th International Conference on Tools + and Algorithms for the Construction and Analysis of + Systems}, + year = {2022}, + series = {Lecture Notes in Computer Science}, + month = apr, + volume = {13244}, + pages = {99--117}, + doi = {10.1007/978-3-030-99527-0_6}, +} + @InProceedings{ cerna.03.mfcs, author = {Ivana {\v{C}}ern{\'a} and Radek Pel{\'a}nek}, title = {Relating Hierarchy of Temporal Properties to Model From bdac53511addb0890f7425d37e7e50854af45e96 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 7 Sep 2022 14:36:23 +0200 Subject: [PATCH 130/606] =?UTF-8?q?degen:=20learn=20to=20work=20on=20gener?= =?UTF-8?q?alized-Co-B=C3=BCchi=20as=20well?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * spot/twaalgos/degen.hh, spot/twaalgos/degen.cc: Adjust degeneralize() and degeneralize_tba() to work on generalized-co-Büchi. * NEWS: Mention this. * spot/twaalgos/cobuchi.hh, spot/twaalgos/cobuchi.cc (to_nca): Use degeneralization on generalized-co-Büchi. * spot/twaalgos/postproc.cc: Use degeneralization for generalized co-Büchi as well. * bin/autfilt.cc: Improve chain products of co-Büchi automata by using generalization if too many colors are needed. * tests/core/prodchain.test, tests/python/pdegen.py: Add test cases. --- NEWS | 3 +++ bin/autfilt.cc | 32 +++++++++++++--------- spot/twaalgos/cobuchi.cc | 16 +++++++---- spot/twaalgos/cobuchi.hh | 9 ++++--- spot/twaalgos/degen.cc | 56 ++++++++++++++++++++++++++++++--------- spot/twaalgos/degen.hh | 55 +++++++++++++++++++++++--------------- spot/twaalgos/postproc.cc | 29 +++++++++++++++----- tests/core/prodchain.test | 35 ++++++++++++------------ tests/python/pdegen.py | 15 ++++++++++- 9 files changed, 169 insertions(+), 81 deletions(-) diff --git a/NEWS b/NEWS index 66f56e75d..38beaa062 100644 --- a/NEWS +++ b/NEWS @@ -114,6 +114,9 @@ New in spot 2.10.6.dev (not yet released) to obtain a simple model checker (that returns true or false, without counterexample). + - degeneralize() and degeneralize_tba() learned to work on + generalized-co-Büchi as well. + - product() learned that the product of two co-Büchi automata is a co-Büchi automaton. And product_or() learned that the "or"-product of two Büchi automata is a Büchi automaton. diff --git a/bin/autfilt.cc b/bin/autfilt.cc index 74fe44220..49543e596 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -713,10 +713,12 @@ ensure_deterministic(const spot::twa_graph_ptr& aut, bool nonalt = false) return p.run(aut); } -static spot::twa_graph_ptr ensure_tba(spot::twa_graph_ptr aut) +static spot::twa_graph_ptr +ensure_tba(spot::twa_graph_ptr aut, + spot::postprocessor::output_type type = spot::postprocessor::Buchi) { spot::postprocessor p; - p.set_type(spot::postprocessor::Buchi); + p.set_type(type); p.set_pref(spot::postprocessor::Any); p.set_level(spot::postprocessor::Low); return p.run(aut); @@ -726,12 +728,14 @@ static spot::twa_graph_ptr ensure_tba(spot::twa_graph_ptr aut) static spot::twa_graph_ptr product(spot::twa_graph_ptr left, spot::twa_graph_ptr right) { - if ((type == spot::postprocessor::Buchi) - && (left->num_sets() + right->num_sets() > - spot::acc_cond::mark_t::max_accsets())) + // Are we likely to fail because of too many colors? + if ((left->num_sets() + right->num_sets() > + spot::acc_cond::mark_t::max_accsets()) + && (type == spot::postprocessor::Buchi + || type == spot::postprocessor::CoBuchi)) { - left = ensure_tba(left); - right = ensure_tba(right); + left = ensure_tba(left, type); + right = ensure_tba(right, type); } return spot::product(left, right); } @@ -739,12 +743,14 @@ product(spot::twa_graph_ptr left, spot::twa_graph_ptr right) static spot::twa_graph_ptr product_or(spot::twa_graph_ptr left, spot::twa_graph_ptr right) { - if ((type == spot::postprocessor::Buchi) - && (left->num_sets() + right->num_sets() > - spot::acc_cond::mark_t::max_accsets())) + // Are we likely to fail because of too many colors? + if ((left->num_sets() + right->num_sets() > + spot::acc_cond::mark_t::max_accsets()) + && (type == spot::postprocessor::Buchi + || type == spot::postprocessor::CoBuchi)) { - left = ensure_tba(left); - right = ensure_tba(right); + left = ensure_tba(left, type); + right = ensure_tba(right, type); } return spot::product_or(left, right); } @@ -988,7 +994,7 @@ parse_opt(int key, char* arg, struct argp_state*) if (!opt->included_in) opt->included_in = aut; else - opt->included_in = spot::product_or(opt->included_in, aut); + opt->included_in = ::product_or(opt->included_in, aut); } break; case OPT_INHERENTLY_WEAK_SCCS: diff --git a/spot/twaalgos/cobuchi.cc b/spot/twaalgos/cobuchi.cc index 783cd0903..23d4871a0 100644 --- a/spot/twaalgos/cobuchi.cc +++ b/spot/twaalgos/cobuchi.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2018, 2021 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2018, 2021, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -338,23 +339,26 @@ namespace spot twa_graph_ptr to_nca(const_twa_graph_ptr aut, bool named_states) { - if (aut->acc().is_co_buchi()) + const acc_cond& acc = aut->acc(); + if (acc.is_co_buchi()) return make_twa_graph(aut, twa::prop_set::all()); if (auto weak = weak_to_cobuchi(aut)) return weak; + if (acc.is_generalized_co_buchi()) + return degeneralize_tba(aut); + const acc_cond::acc_code& code = aut->get_acceptance(); std::vector pairs; - if (aut->acc().is_streett_like(pairs) || aut->acc().is_parity()) + if (acc.is_streett_like(pairs) || acc.is_parity()) return nsa_to_nca(aut, named_states); else if (code.is_dnf()) return dnf_to_nca(aut, named_states); auto tmp = make_twa_graph(aut, twa::prop_set::all()); - tmp->set_acceptance(aut->acc().num_sets(), - aut->get_acceptance().to_dnf()); + tmp->set_acceptance(acc.num_sets(), code.to_dnf()); return to_nca(tmp, named_states); } @@ -683,6 +687,8 @@ namespace spot return make_twa_graph(aut, twa::prop_set::all()); if (auto weak = weak_to_cobuchi(aut)) return weak; + if (aut->acc().is_generalized_co_buchi()) + return degeneralize_tba(aut); } const acc_cond::acc_code& code = aut->get_acceptance(); diff --git a/spot/twaalgos/cobuchi.hh b/spot/twaalgos/cobuchi.hh index 5c8d85e59..b02c0535d 100644 --- a/spot/twaalgos/cobuchi.hh +++ b/spot/twaalgos/cobuchi.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2019, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -92,8 +92,8 @@ namespace spot /// original language, and is a superset iff the original language /// can not be expressed using a co-Büchi acceptance condition. /// - /// The implementation dispatches between dnf_to_nca, nsa_to_nca, - /// and a trivial implementation for weak automata. + /// The implementation dispatches between dnf_to_nca(), nsa_to_nca(), + /// degeneralize_tba(), and a trivial implementation for weak automata. SPOT_API twa_graph_ptr to_nca(const_twa_graph_ptr aut, bool named_states = false); @@ -126,7 +126,8 @@ namespace spot /// can not be expressed using a co-Büchi acceptance condition. /// /// The implementation dispatches between dnf_to_dca, nsa_to_dca, - /// and a trivial implementation for deterministic weak automata. + /// degeneralize(), and a trivial implementation for deterministic + /// weak automata. SPOT_API twa_graph_ptr to_dca(const_twa_graph_ptr aut, bool named_states = false); } diff --git a/spot/twaalgos/degen.cc b/spot/twaalgos/degen.cc index 333efe6e6..d79844b84 100644 --- a/spot/twaalgos/degen.cc +++ b/spot/twaalgos/degen.cc @@ -80,7 +80,8 @@ namespace spot void fill_cache(unsigned s) { unsigned s1 = scc_of(s); - acc_cond::mark_t common = a_->acc().all_sets(); + acc_cond::mark_t all_colors = a_->acc().all_sets(); + acc_cond::mark_t common = all_colors; acc_cond::mark_t union_ = {}; bool has_acc_self_loop = false; bool is_true_state = false; @@ -97,7 +98,7 @@ namespace spot std::get<2>(cache_[d]) &= t.acc; // an accepting self-loop? - if ((t.dst == s) && a_->acc().accepting(t.acc)) + if ((t.dst == s) && t.acc == all_colors) { has_acc_self_loop = true; if (t.cond == bddtrue) @@ -330,9 +331,10 @@ namespace spot bool skip_levels, bool ignaccsl, bool remove_extra_scc) { - if (!a->acc().is_generalized_buchi()) + bool input_is_gba = a->acc().is_generalized_buchi(); + if (!(input_is_gba || a->acc().is_generalized_co_buchi())) throw std::runtime_error - ("degeneralize() only works with generalized Büchi acceptance"); + ("degeneralize() only works with generalized (co)Büchi acceptance"); if (!a->is_existential()) throw std::runtime_error ("degeneralize() does not support alternation"); @@ -347,7 +349,11 @@ namespace spot // The result automaton is an SBA. auto res = make_twa_graph(dict); res->copy_ap_of(a); - res->set_buchi(); + if (input_is_gba) + res->set_buchi(); + else + res->set_co_buchi(); + acc_cond::mark_t all_colors = a->get_acceptance().used_sets(); if (want_sba) res->prop_state_acc(true); // Preserve determinism, weakness, and stutter-invariance @@ -396,9 +402,32 @@ namespace spot std::vector> lvl_cache(a->num_states()); // Compute SCCs in order to use any optimization. - std::unique_ptr m = use_scc - ? std::make_unique(a, scc_info_options::NONE) - : nullptr; + std::unique_ptr m = nullptr; + if (use_scc) + { + if (!input_is_gba) + { + // If the input is gen-co-Büchi, temporary pretend its + // generalized Büchi. + unsigned n = a->num_sets(); + twa_graph_ptr amut = std::const_pointer_cast(a); + amut->set_generalized_buchi(n); + try + { + m = std::make_unique(a, scc_info_options::NONE); + } + catch (...) + { + amut->set_generalized_co_buchi(n); + throw; + } + amut->set_generalized_co_buchi(n); + } + else + { + m = std::make_unique(a, scc_info_options::NONE); + } + } // Initialize scc_orders std::unique_ptr orders = use_cust_acc_orders @@ -674,7 +703,7 @@ namespace spot { d.second = 0; // Make it go to the first level. // Skip as many levels as possible. - if (!a->acc().accepting(acc) && skip_levels) + if (acc != all_colors && skip_levels) { if (use_cust_acc_orders) { @@ -723,9 +752,10 @@ namespace spot int use_lvl_cache, bool skip_levels, bool ignaccsl, bool remove_extra_scc) { - // If this already a degeneralized digraph, there is nothing we + // If this already a degeneralized twa, there is nothing we // can improve. - if (a->is_sba()) + if (const acc_cond& acc = a->acc(); + a->prop_state_acc() && (acc.is_buchi() || acc.is_co_buchi())) return std::const_pointer_cast(a); return degeneralize_aux(a, use_z_lvl, use_cust_acc_orders, @@ -739,9 +769,9 @@ namespace spot int use_lvl_cache, bool skip_levels, bool ignaccsl, bool remove_extra_scc) { - // If this already a degeneralized digraph, there is nothing we + // If this already a degeneralized twa, there is nothing we // can improve. - if (a->acc().is_buchi()) + if (a->acc().is_buchi() || a->acc().is_co_buchi()) return std::const_pointer_cast(a); return degeneralize_aux(a, use_z_lvl, use_cust_acc_orders, diff --git a/spot/twaalgos/degen.hh b/spot/twaalgos/degen.hh index 281ba2ef5..e9ae13021 100644 --- a/spot/twaalgos/degen.hh +++ b/spot/twaalgos/degen.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2015, 2017-2020 Laboratoire de +// Copyright (C) 2012-2015, 2017-2020, 2022 Laboratoire de // Recherche et Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -26,33 +26,36 @@ namespace spot class scc_info; /// \ingroup twa_acc_transform - /// \brief Degeneralize a spot::tgba into an equivalent sba with - /// only one acceptance condition. + /// \brief Degeneralize a generalized (co)Büchi automaton into an + /// equivalent (co)Büchi automaton. /// - /// This algorithm will build a new explicit automaton that has - /// at most (N+1) times the number of states of the original automaton. + /// There are two variants of the function. If the generalizd + /// (co)Büchi acceptance uses N colors, degeneralize() algorithm + /// will builds a state-based (co)Büchi automaton that has at most + /// (N+1) times the number of states of the original automaton. + /// degeneralize_tba() builds a transition-based (co)Büchi automaton + /// that has at most N times the number of states of the original + /// automaton. /// - /// When \a use_z_lvl is set, the level of the degeneralized - /// automaton is reset everytime an SCC is exited. If \a - /// use_cust_acc_orders is set, the degeneralization will compute a - /// custom acceptance order for each SCC (this option is disabled by - /// default because our benchmarks show that it usually does more - /// harm than good). If \a use_lvl_cache is set, everytime an SCC - /// is entered on a state that as already been associated to some - /// level elsewhere, reuse that level (set it to 2 to keep the - /// smallest number, 3 to keep the largest level, and 1 to keep the - /// first level found). If \a ignaccsl is set, we do not directly - /// jump to the accepting level if the entering state has an - /// accepting self-loop. If \a remove_extra_scc is set (the default) - /// we ensure that the output automaton has as many SCCs as the input - /// by removing superfluous SCCs. + /// Additional options control optimizations described in + /// \cite babiak.13.spin . When \a use_z_lvl is set, the level of + /// the degeneralized automaton is reset everytime an SCC is exited. + /// If \a use_cust_acc_orders is set, the degeneralization will + /// compute a custom acceptance order for each SCC (this option is + /// disabled by default because our benchmarks show that it usually + /// does more harm than good). If \a use_lvl_cache is set, + /// everytime an SCC is entered on a state that as already been + /// associated to some level elsewhere, reuse that level (set it to + /// 2 to keep the smallest number, 3 to keep the largest level, and + /// 1 to keep the first level found). If \a ignaccsl is set, we do + /// not directly jump to the accepting level if the entering state + /// has an accepting self-loop. If \a remove_extra_scc is set (the + /// default) we ensure that the output automaton has as many SCCs as + /// the input by removing superfluous SCCs. /// /// Any of these three options will cause the SCCs of the automaton /// \a a to be computed prior to its actual degeneralization. /// - /// The degeneralize_tba() variant produce a degeneralized automaton - /// with transition-based acceptance. - /// /// The mapping between each state of the resulting automaton /// and the original state of the input automaton is stored in the /// "original-states" named property of the produced automaton. Call @@ -70,6 +73,14 @@ namespace spot /// Similarly, the property "degen-levels" keeps track of the degeneralization /// levels. To retrieve it, call /// `aut->get_named_prop>("degen-levels")`. + /// + /// As an alternative method to degeneralization, one may also + /// consider ACD transform. acd_transform() will never produce + /// larger automata than degenaralize_tba(), and + /// acd_transform_sbacc() produce smaller automata than + /// degeneralize() on the average. See \cite casares.22.tacas for + /// some comparisons. + /// /// \@{ SPOT_API twa_graph_ptr degeneralize(const const_twa_graph_ptr& a, bool use_z_lvl = true, diff --git a/spot/twaalgos/postproc.cc b/spot/twaalgos/postproc.cc index a44ac3d52..39a6c0926 100644 --- a/spot/twaalgos/postproc.cc +++ b/spot/twaalgos/postproc.cc @@ -236,7 +236,8 @@ namespace spot if (COMP_) tmp = complete(tmp); bool want_parity = type_ & Parity; - if (want_parity && tmp->acc().is_generalized_buchi()) + if (want_parity && (tmp->acc().is_generalized_buchi() + || tmp->acc().is_generalized_co_buchi())) tmp = choose_degen(tmp); assert(!!SBACC_ == state_based_); if (state_based_) @@ -402,10 +403,19 @@ namespace spot if (PREF_ == Any) { - if (type_ == Buchi) - a = choose_degen(a); + if (type_ == Buchi + || (type_ == CoBuchi && a->acc().is_generalized_co_buchi())) + { + a = choose_degen(a); + } else if (type_ == CoBuchi) - a = to_nca(a); + { + a = to_nca(a); + if (state_based_ && a->prop_state_acc().is_true()) + a = do_sba_simul(a, simul_); + else + a = do_simul(a, simul_); + } return finalize(a); } @@ -699,6 +709,8 @@ namespace spot if (type_ == CoBuchi) { unsigned ns = sim->num_states(); + bool weak = sim->prop_weak().is_true(); + if (PREF_ == Deterministic) sim = to_dca(sim); else @@ -706,8 +718,13 @@ namespace spot // if the input of to_dca/to_nca was weak, the number of // states has not changed, and running simulation is useless. - if (level_ != Low && ns < sim->num_states()) - sim = do_simul(sim, simul_); + if (!weak || (level_ != Low && ns < sim->num_states())) + { + if (state_based_ && sim->prop_state_acc().is_true()) + sim = do_sba_simul(sim, simul_); + else + sim = do_simul(sim, simul_); + } } return finalize(sim); diff --git a/tests/core/prodchain.test b/tests/core/prodchain.test index 9a9c74648..c2d6091c7 100755 --- a/tests/core/prodchain.test +++ b/tests/core/prodchain.test @@ -26,9 +26,7 @@ shift for i in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 \ 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42; do ltl2tgba "{a[*$i]}[]->GFb" > $i.hoa -done -for i in *.hoa; do - set x "$@" --product $i + set x "$@" --product $i.hoa shift done shift @@ -41,8 +39,9 @@ test "4,7,16,1" = `autfilt --stats=%s,%e,%t,%a result` set x shift -for i in *.hoa; do - set x "$@" --product-or $i +for i in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 \ + 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42; do + set x "$@" --product-or $i.hoa shift done shift @@ -55,28 +54,30 @@ shift for i in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 \ 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42; do ltl2tgba -D --cobuchi -S "{a[*$i]}<>->FGb" > $i.hoa -done -for i in *.hoa; do - set x "$@" --product $i + set x "$@" --product $i.hoa shift done shift -autfilt -D --cobuchi --low -S "$@" > result -test "85,170,174,1" = `autfilt --stats=%s,%e,%t,%a result` +autfilt --cobuchi --high -D -S "$@" > result +test "44,47,92,1" = `autfilt --stats=%s,%e,%t,%a result` +: > stats set x shift -for i in *.hoa; do - set x "$@" --product-or $i +for i in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 \ + 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42; do + ltl2tgba -D --cobuchi "{a[*$i]}<>->FGb" > $i.hoa + set x "$@" --product-or $i.hoa shift + test $i -eq 1 && shift # remove the first --product + test 2,3,6,1 = `autfilt --high --small --cobuchi "$@" --stats=%s,%e,%t,%a` + test 3,5,10,1 = \ + `autfilt --high --small --cobuchi "$@" | autfilt -S --stats=%s,%e,%t,%a` done -shift + if [ $MAX_ACCSETS -eq 32 ]; then - autfilt --cobuchi -S "$@" 2> error && exit 1 + autfilt "$@" 2> error && exit 1 grep 'Too many acceptance sets used' error fi -# FIXME: implement degeneralization for generalized-co-Büchi -# autfilt --cobuchi --low -S "$@" > result -# test "45,89,180,1" = `autfilt --stats=%s,%e,%t,%a result` true diff --git a/tests/python/pdegen.py b/tests/python/pdegen.py index 7df9f0878..00f3df7e0 100644 --- a/tests/python/pdegen.py +++ b/tests/python/pdegen.py @@ -149,6 +149,9 @@ tc.assertEqual(spot.is_partially_degeneralizable(de), []) df = spot.partial_degeneralize(f, [0, 1]) df.equivalent_to(f) tc.assertEqual(str(df.acc()), '(1, Fin(0))') +df2 = spot.degeneralize(f) +df.equivalent_to(f) +tc.assertEqual(str(df2.acc()), '(1, Fin(0))') try: df = spot.partial_degeneralize(f, [0, 1, 2]) @@ -206,6 +209,16 @@ pdaut7 = spot.partial_degeneralize(aut7, sets) tc.assertTrue(pdaut7.equivalent_to(aut7)) tc.assertEqual(daut7.num_states(), 10) tc.assertEqual(pdaut7.num_states(), 10) +ddaut7 = spot.dualize(aut7) +ddaut7a = spot.scc_filter(spot.dualize(spot.degeneralize_tba(ddaut7))) +tc.assertTrue(ddaut7a.equivalent_to(aut7)) +tc.assertEqual(ddaut7a.num_states(), daut7.num_states()) +ddaut7b = spot.scc_filter(spot.dualize(spot.to_nca(ddaut7))) +tc.assertTrue(ddaut7b.equivalent_to(aut7)) +tc.assertEqual(ddaut7b.num_states(), daut7.num_states()) +ddaut7c = spot.scc_filter(spot.dualize(spot.to_dca(ddaut7))) +tc.assertTrue(ddaut7c.equivalent_to(aut7)) +tc.assertEqual(ddaut7c.num_states(), daut7.num_states()) aut8 = spot.automaton("""HOA: v1 States: 8 Start: 0 AP: 3 "p0" "p1" "p2" acc-name: generalized-Buchi 5 Acceptance: 5 Inf(0)&Inf(1)&Inf(2)&Inf(3)&Inf(4) @@ -482,4 +495,4 @@ State: 1 [0] 0 {0} State: 2 [!0] 0 ---END--""") \ No newline at end of file +--END--""") From d9248e2e9725cd459139c1bde54a797f1fa54463 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 9 Sep 2022 16:41:44 +0200 Subject: [PATCH 131/606] * doc/org/concepts.org (T-based vs. S-based acceptance): Adjust example. --- doc/org/concepts.org | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/doc/org/concepts.org b/doc/org/concepts.org index d5641e42f..a8fab8b65 100644 --- a/doc/org/concepts.org +++ b/doc/org/concepts.org @@ -381,13 +381,14 @@ When /transition-based acceptance/ is used, acceptance sets are now sets of /edges/ (or set of /transitions/ if you prefer), and runs are accepting if the edges they visit satisfy the acceptance condition. -Here is an example of Transition-based Generalized Büchi Automaton -(TGBA). +Here is an example of Transition-based Büchi Automaton +(TBA). #+NAME: tgba-example1 #+BEGIN_SRC sh ltl2tgba 'GF(a & X(a U b))' -d #+END_SRC + #+BEGIN_SRC dot :file concept-tgba1.svg :var txt=tgba-example1 :exports results $txt #+END_SRC @@ -399,27 +400,13 @@ This automaton accept all ω-words that infinitely often match the pattern $a^+;b$ (that is: a positive number of letters where $a$ is true are followed by one letter where $b$ is true). -Using transition-based acceptance allows for more compact automata. -The typical example is the LTL formula =GFa= (infinitely often $a$) -that can be represented using a one-state transition-based Büchi -automaton: -#+NAME: tgba-example2 -#+BEGIN_SRC sh -ltl2tgba 'GFa' -d -#+END_SRC -#+BEGIN_SRC dot :file concept-tgba2.svg :var txt=tgba-example2 :exports results -$txt -#+END_SRC - -#+RESULTS: -[[file:concept-tgba2.svg]] - -While the same property require a 2-state Büchi automaton using +Using transition-based acceptance often allows for more compact automata. +For instance the above automaton would need at least 3 states with state-based acceptance: #+NAME: tgba-example3 #+BEGIN_SRC sh -ltl2tgba 'GFa' -B -d +ltl2tgba 'GF(a & X(a U b))' -B -d #+END_SRC #+BEGIN_SRC dot :file concept-tba-vs-ba.svg :var txt=tgba-example3 :exports results $txt From b3b22388c917880703010868732ddda516230988 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 13 Sep 2022 13:53:59 +0200 Subject: [PATCH 132/606] postproc: introduce -x merge-states-min * spot/twaalgos/postproc.cc, spot/twaalgos/postproc.hh: Introduce a merge-states-min option. * bin/spot-x.cc: Document it. * spot/gen/automata.cc, spot/gen/automata.hh, bin/genaut.cc: Add option to generate cyclist test cases. * NEWS: Document the above. * tests/core/included.test: Add test cases that used to be too slow. --- NEWS | 11 ++++++++++ bin/genaut.cc | 7 +++++- bin/spot-x.cc | 6 +++++- spot/gen/automata.cc | 45 +++++++++++++++++++++++++++++++++++++-- spot/gen/automata.hh | 20 ++++++++++++++++- spot/twaalgos/postproc.cc | 4 ++++ spot/twaalgos/postproc.hh | 3 ++- tests/core/included.test | 11 ++++++++-- 8 files changed, 99 insertions(+), 8 deletions(-) diff --git a/NEWS b/NEWS index 38beaa062..93708c0d0 100644 --- a/NEWS +++ b/NEWS @@ -36,6 +36,11 @@ New in spot 2.10.6.dev (not yet released) - ltlsynt has a new option --from-pgame that takes a parity game in extended HOA format, as used in the Synthesis Competition. + - genaut learned the --cyclist-trace-nba and --cyclist-proof-dba + options. Those are used to generate pairs of automata that should + include each other, and are used to show a regression (in speed) + present in Spot 2.10.x and fixed in 2.11. + Library: - The new function suffix_operator_normal_form() implements @@ -121,6 +126,12 @@ New in spot 2.10.6.dev (not yet released) is a co-Büchi automaton. And product_or() learned that the "or"-product of two Büchi automata is a Büchi automaton. + - spot::postprocessor has a new extra option merge-states-min that + indicate above how many states twa_graph::merge_states(), which + perform a very cheap pass to fuse states with identicall + succesors, should be called before running simulation-based + reductions. + - spot::parallel_policy is an object that can be passed to some algorithm to specify how many threads can be used if Spot has been compiled with --enable-pthread. Currently, only diff --git a/bin/genaut.cc b/bin/genaut.cc index eb2163cab..d7db04d98 100644 --- a/bin/genaut.cc +++ b/bin/genaut.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2019, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -62,6 +62,11 @@ static const argp_option options[] = { "m-nba", gen::AUT_M_NBA, "RANGE", 0, "An NBA with N+1 states whose determinization needs at least " "N! states", 0}, + { "cyclist-trace-nba", gen::AUT_CYCLIST_TRACE_NBA, "RANGE", 0, + "An NBA with N+2 states that should include cyclist-proof-dba=B.", 0}, + { "cyclist-proof-dba", gen::AUT_CYCLIST_PROOF_DBA, "RANGE", 0, + "A DBA with N+2 states that should be included " + "in cyclist-trace-nba=B.", 0}, RANGE_DOC, /**************************************************/ { nullptr, 0, nullptr, 0, "Miscellaneous options:", -1 }, diff --git a/bin/spot-x.cc b/bin/spot-x.cc index c4905c2e9..a653fc926 100644 --- a/bin/spot-x.cc +++ b/bin/spot-x.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -164,6 +164,10 @@ Set to 1 to use only direct simulation. Set to 2 to use only reverse \ simulation. Set to 3 to iterate both direct and reverse simulations. \ The default is the value of parameter \"simul\" in --high mode, and 0 \ therwise.") }, + { DOC("merge-states-min", "Number of states above which states are \ +merged using a cheap approximation of a bisimulation quotient before \ +attempting simulation-based reductions. Defaults to 128. Set to 0 to \ +never merge states.") }, { DOC("simul-max", "Number of states above which simulation-based \ reductions are skipped. Defaults to 4096. Set to 0 to disable. This \ applies to all simulation-based optimization, including thoses of the \ diff --git a/spot/gen/automata.cc b/spot/gen/automata.cc index 165ab8c98..73c057a00 100644 --- a/spot/gen/automata.cc +++ b/spot/gen/automata.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019, 2021 Laboratoire de Recherche et +// Copyright (C) 2017-2019, 2021-2022 Laboratoire de Recherche et // Developpement de l'EPITA (LRDE). // // This file is part of Spot, a model checking library. @@ -220,13 +220,48 @@ namespace spot return aut; } + static twa_graph_ptr + cyclist_trace_or_proof(unsigned n, bool trace, bdd_dict_ptr dict) + { + auto aut = make_twa_graph(dict); + acc_cond::mark_t m = aut->set_buchi(); + aut->new_states(n + 2); + aut->set_init_state(0); + if (trace) + m = {}; + aut->prop_state_acc(true); + + // How many AP to we need to represent n letters + unsigned nap = ulog2(n + 1); + std::vector apvars(nap); + for (unsigned a = 0; a < nap; ++a) + apvars[a] = aut->register_ap("p" + std::to_string(a)); + + if (trace) + aut->new_edge(0, 0, bddtrue); // the only non-deterministic edge + else + aut->prop_universal(true); + + bdd zero = bdd_ibuildcube(0, nap, apvars.data()); + aut->new_edge(0, 1, zero, m); + for (unsigned letter = 1; letter <= n; ++letter) + { + bdd cond = bdd_ibuildcube(letter, nap, apvars.data()); + aut->new_acc_edge(1, letter + 1, cond); + aut->new_edge(letter + 1, 1, zero, m); + } + + return aut; + } + + twa_graph_ptr aut_pattern(aut_pattern_id pattern, int n, bdd_dict_ptr dict) { if (n < 0) { std::ostringstream err; err << "pattern argument for " << aut_pattern_name(pattern) - << " should be positive"; + << " should be non-negative"; throw std::runtime_error(err.str()); } @@ -241,6 +276,10 @@ namespace spot return l_dsa(n, dict); case AUT_M_NBA: return m_nba(n, dict); + case AUT_CYCLIST_TRACE_NBA: + return cyclist_trace_or_proof(n, true, dict); + case AUT_CYCLIST_PROOF_DBA: + return cyclist_trace_or_proof(n, false, dict); case AUT_END: break; } @@ -255,6 +294,8 @@ namespace spot "l-nba", "l-dsa", "m-nba", + "cyclist-trace-nba", + "cyclist-proof-dba", }; // Make sure we do not forget to update the above table every // time a new pattern is added. diff --git a/spot/gen/automata.hh b/spot/gen/automata.hh index d0c43d5f5..a54f75ac1 100644 --- a/spot/gen/automata.hh +++ b/spot/gen/automata.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2019 Laboratoire de Recherche et Developpement de +// Copyright (C) 2017, 2019, 2022 Laboratoire de Recherche et Developpement de // l'EPITA (LRDE). // // This file is part of Spot, a model checking library. @@ -79,6 +79,24 @@ namespace spot /// propositions to encode the $n+1$ letters used in the /// original alphabet. AUT_M_NBA, + /// \brief An NBA with (n+2) states derived from a Cyclic test + /// case. + /// + /// This familly of automata is derived from a couple of + /// examples supplied by Reuben Rowe. The task is to + /// check that the automaton generated with AUT_CYCLIST_TRACE_NBA + /// for a given n contain the automaton generated with + /// AUT_CYCLIST_PROOF_DBA for the same n. + AUT_CYCLIST_TRACE_NBA, + /// \brief A DBA with (n+2) states derived from a Cyclic test + /// case. + /// + /// This familly of automata is derived from a couple of + /// examples supplied by Reuben Rowe. The task is to + /// check that the automaton generated with AUT_CYCLIST_TRACE_NBA + /// for a given n contain the automaton generated with + /// AUT_CYCLIST_PROOF_DBA for the same n. + AUT_CYCLIST_PROOF_DBA, AUT_END }; diff --git a/spot/twaalgos/postproc.cc b/spot/twaalgos/postproc.cc index 39a6c0926..55feeb295 100644 --- a/spot/twaalgos/postproc.cc +++ b/spot/twaalgos/postproc.cc @@ -89,6 +89,7 @@ namespace spot wdba_minimize_ = opt->get("wdba-minimize", -1); gen_reduce_parity_ = opt->get("gen-reduce-parity", 1); simul_max_ = opt->get("simul-max", 4096); + merge_states_min_ = opt->get("merge-states-min", 128); wdba_det_max_ = opt->get("wdba-det-max", 4096); simul_trans_pruning_ = opt->get("simul-trans-pruning", 512); @@ -118,6 +119,9 @@ namespace spot { if (opt == 0) return a; + if (merge_states_min_ > 0 + && static_cast(merge_states_min_) < a->num_states()) + a->merge_states(); if (simul_max_ > 0 && static_cast(simul_max_) < a->num_states()) return a; diff --git a/spot/twaalgos/postproc.hh b/spot/twaalgos/postproc.hh index 080cb831f..96128c531 100644 --- a/spot/twaalgos/postproc.hh +++ b/spot/twaalgos/postproc.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -268,6 +268,7 @@ namespace spot bool state_based_ = false; int wdba_minimize_ = -1; int simul_max_ = 4096; + int merge_states_min_ = 128; int wdba_det_max_ = 4096; }; /// @} diff --git a/tests/core/included.test b/tests/core/included.test index 9f39fef20..3574af9e3 100755 --- a/tests/core/included.test +++ b/tests/core/included.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016 Laboratoire de Recherche et Développement +# Copyright (C) 2016, 2022 Laboratoire de Recherche et Développement # de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -60,5 +60,12 @@ ltl2tgba true | autfilt out.hoa --equivalent-to - ltl2tgba '!(a U c)' | autfilt --product-or a1.hoa > out.hoa ltl2tgba true | autfilt out.hoa --equivalent-to - && exit 1 -: +# In Spot 2.10, the following was very slow. +for n in 1 2 4 8 16 512 1024 2048 4096 8192; do + genaut --cyclist-trace-nba=$n > trace.hoa + genaut --cyclist-proof-dba=$n > proof.hoa + autfilt -q --included-in=trace.hoa proof.hoa || exit 1 + autfilt -q --included-in=proof.hoa trace.hoa && exit 1 +done +: From ef0aeed22844bd58041c6a1e9e4c3ba9422eb7eb Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 14 Sep 2022 11:29:18 +0200 Subject: [PATCH 133/606] ltlsynt: fix documentation of --aiger option * bin/ltlsynt.cc: Here. --- bin/ltlsynt.cc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 06c29db88..1779211ef 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -114,13 +114,13 @@ static const argp_option options[] = "realizability only, do not compute a winning strategy", 0}, { "aiger", OPT_PRINT_AIGER, "ite|isop|both[+ud][+dc]" "[+sub0|sub1|sub2]", OPTION_ARG_OPTIONAL, - "prints a winning strategy as an AIGER circuit. The first, and only " - "mandatory option defines the method to be used. \"ite\" for " - "If-then-else normal form; " + "prints a winning strategy as an AIGER circuit. The first word " + "indicates the encoding to used: \"ite\" for " + "If-Then-Else normal form; " "\"isop\" for irreducible sum of producs; " - "\"both\" tries both encodings and keeps the smaller one. " + "\"both\" tries both and keeps the smaller one. " "The other options further " - "refine the encoding, see aiger::encode_bdd.", 0}, + "refine the encoding, see aiger::encode_bdd. Defaults to \"ite\".", 0}, { "verbose", OPT_VERBOSE, nullptr, 0, "verbose mode", -1 }, { "verify", OPT_VERIFY, nullptr, 0, From c1c874b1a511640facedfa9ec0f4c4acecb77de6 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 14 Sep 2022 15:33:46 +0200 Subject: [PATCH 134/606] ltlsynt: add options --dot and --hide-status * bin/ltlsynt.cc: Implement these options. * bin/common_aoutput.hh, bin/common_aoutput.cc (automaton_format_opt): Make extern. * NEWS: Mention the new options. * doc/org/ltlsynt.org: Use dot output in documentation. * tests/core/ltlsynt.test: Quick test of the new options. --- NEWS | 10 +++ bin/common_aoutput.cc | 4 +- bin/common_aoutput.hh | 3 +- bin/ltlsynt.cc | 97 ++++++++++++++++++---------- doc/org/ltlsynt.org | 139 +++++++++++++++++++++++++++++++--------- tests/core/ltlsynt.test | 19 ++++++ 6 files changed, 205 insertions(+), 67 deletions(-) diff --git a/NEWS b/NEWS index 93708c0d0..535dee5fa 100644 --- a/NEWS +++ b/NEWS @@ -36,6 +36,16 @@ New in spot 2.10.6.dev (not yet released) - ltlsynt has a new option --from-pgame that takes a parity game in extended HOA format, as used in the Synthesis Competition. + - ltlsynt has a new option --hide-status to hide the REALIZABLE or + UNREALIZABLE output expected by SYNTCOMP. (This line is + superfluous, because the exit status of ltlsynt already indicate + whether the formula is realizable or not.) + + - ltlsynt has a new option --dot to request GraphViz output instead + of most output. This works for displaying Mealy machines, games, + or AIG circuits. See https://spot.lrde.epita.fr/ltlsynt.html for + examples. + - genaut learned the --cyclist-trace-nba and --cyclist-proof-dba options. Those are used to generate pairs of automata that should include each other, and are used to show a regression (in speed) diff --git a/bin/common_aoutput.cc b/bin/common_aoutput.cc index 665fafc67..f2c8691ec 100644 --- a/bin/common_aoutput.cc +++ b/bin/common_aoutput.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -41,7 +41,7 @@ #include automaton_format_t automaton_format = Hoa; -static const char* automaton_format_opt = nullptr; +const char* automaton_format_opt = nullptr; const char* opt_name = nullptr; static const char* opt_output = nullptr; static const char* stats = ""; diff --git a/bin/common_aoutput.hh b/bin/common_aoutput.hh index 1b2e7ae41..0fb2e8d7c 100644 --- a/bin/common_aoutput.hh +++ b/bin/common_aoutput.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2018, 2020 Laboratoire de Recherche et +// Copyright (C) 2014-2018, 2020, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -47,6 +47,7 @@ enum automaton_format_t { // The format to use in output_automaton() extern automaton_format_t automaton_format; +extern const char* automaton_format_opt; // Set to the argument of --name, else nullptr. extern const char* opt_name; // Output options diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 1779211ef..e0cf78c47 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -49,7 +50,9 @@ enum OPT_BYPASS, OPT_CSV, OPT_DECOMPOSE, + OPT_DOT, OPT_FROM_PGAME, + OPT_HIDE, OPT_INPUT, OPT_OUTPUT, OPT_PRINT, @@ -107,32 +110,41 @@ static const argp_option options[] = /**************************************************/ { nullptr, 0, nullptr, 0, "Output options:", 20 }, { "print-pg", OPT_PRINT, nullptr, 0, - "print the parity game in the pgsolver format, do not solve it", 0}, + "print the parity game in the pgsolver format, do not solve it", 0 }, { "print-game-hoa", OPT_PRINT_HOA, "options", OPTION_ARG_OPTIONAL, - "print the parity game in the HOA format, do not solve it", 0}, + "print the parity game in the HOA format, do not solve it", 0 }, { "realizability", OPT_REAL, nullptr, 0, - "realizability only, do not compute a winning strategy", 0}, + "realizability only, do not compute a winning strategy", 0 }, { "aiger", OPT_PRINT_AIGER, "ite|isop|both[+ud][+dc]" "[+sub0|sub1|sub2]", OPTION_ARG_OPTIONAL, - "prints a winning strategy as an AIGER circuit. The first word " - "indicates the encoding to used: \"ite\" for " + "encode the winning strategy as an AIG circuit and print it in AIGER" + " format. The first word indicates the encoding to used: \"ite\" for " "If-Then-Else normal form; " - "\"isop\" for irreducible sum of producs; " + "\"isop\" for irreducible sum of products; " "\"both\" tries both and keeps the smaller one. " - "The other options further " - "refine the encoding, see aiger::encode_bdd. Defaults to \"ite\".", 0}, - { "verbose", OPT_VERBOSE, nullptr, 0, - "verbose mode", -1 }, - { "verify", OPT_VERIFY, nullptr, 0, - "verifies the strategy or (if demanded) aiger against the spec.", -1 }, + "Other options further " + "refine the encoding, see aiger::encode_bdd. Defaults to \"ite\".", 0 }, + { "dot", OPT_DOT, "options", OPTION_ARG_OPTIONAL, + "Use dot format when printing the result (game, strategy, or " + "AIG circuit, depending on other options). The options that " + "may be passed to --dot depend on the nature of what is printed. " + "For games and strategies, standard automata rendering " + "options are supported (e.g., see ltl2tgba --dot). For AIG circuit, " + "use (h) for horizontal and (v) for vertical layouts.", 0 }, { "csv", OPT_CSV, "[>>]FILENAME", OPTION_ARG_OPTIONAL, "output statistics as CSV in FILENAME or on standard output " "(if '>>' is used to request append mode, the header line is " "not output)", 0 }, + { "hide-status", OPT_HIDE, nullptr, 0, + "Hide the REALIZABLE or UNREALIZABLE line. (Hint: exit status " + "is enough of an indication.)", 0 }, /**************************************************/ { nullptr, 0, nullptr, 0, "Miscellaneous options:", -1 }, { "extra-options", 'x', "OPTS", 0, "fine-tuning options (see spot-x (7))", 0 }, + { "verbose", OPT_VERBOSE, nullptr, 0, "verbose mode", 0 }, + { "verify", OPT_VERIFY, nullptr, 0, + "verify the strategy or (if demanded) AIG against the formula", 0 }, { nullptr, 0, nullptr, 0, nullptr, 0 }, }; @@ -162,8 +174,10 @@ static const char* opt_print_hoa_args = nullptr; static bool opt_real = false; static bool opt_do_verify = false; static const char* opt_print_aiger = nullptr; - +static const char* opt_dot_arg = nullptr; +static bool opt_dot = false; static spot::synthesis_info* gi; +static bool show_status = true; static char const *const algo_names[] = { @@ -254,6 +268,17 @@ namespace return s; }; + static void + dispatch_print_hoa(const spot::const_twa_graph_ptr& game) + { + if (opt_dot) + spot::print_dot(std::cout, game, opt_print_hoa_args); + else if (opt_print_pg) + spot::print_pg(std::cout, game); + else + spot::print_hoa(std::cout, game, opt_print_hoa_args) << '\n'; + } + static void print_csv(const spot::formula& f, const char* filename = nullptr) { @@ -326,7 +351,7 @@ namespace outf.close(opt_csv); } - int + static int solve_formula(const spot::formula& f, const std::vector& input_aps, const std::vector& output_aps) @@ -397,15 +422,8 @@ namespace std::vector mealy_machines; auto print_game = want_game ? - [](const spot::twa_graph_ptr& game)->void - { - if (opt_print_pg) - spot::print_pg(std::cout, game); - else - spot::print_hoa(std::cout, game, opt_print_hoa_args) << '\n'; - } - : - [](const spot::twa_graph_ptr&)->void{}; + [](const spot::twa_graph_ptr& game)->void { dispatch_print_hoa(game); } + : [](const spot::twa_graph_ptr&)->void{}; for (; sub_f != sub_form.end(); ++sub_f, ++sub_o) { @@ -425,7 +443,8 @@ namespace { case spot::mealy_like::realizability_code::UNREALIZABLE: { - std::cout << "UNREALIZABLE" << std::endl; + if (show_status) + std::cout << "UNREALIZABLE" << std::endl; safe_tot_time(); return 1; } @@ -448,7 +467,8 @@ namespace continue; if (!spot::solve_game(arena, *gi)) { - std::cout << "UNREALIZABLE" << std::endl; + if (show_status) + std::cout << "UNREALIZABLE" << std::endl; safe_tot_time(); return 1; } @@ -506,7 +526,8 @@ namespace return 0; } - std::cout << "REALIZABLE" << std::endl; + if (show_status) + std::cout << "REALIZABLE" << std::endl; if (opt_real) { safe_tot_time(); @@ -545,7 +566,10 @@ namespace << " latches and " << saig->num_gates() << " gates\n"; } - spot::print_aiger(std::cout, saig) << '\n'; + if (opt_dot) + spot::print_dot(std::cout, saig, opt_dot_arg); + else + spot::print_aiger(std::cout, saig) << '\n'; } else { @@ -784,10 +808,7 @@ namespace } if (opt_print_pg || opt_print_hoa) { - if (opt_print_pg) - spot::print_pg(std::cout, arena); - else - spot::print_hoa(std::cout, arena, opt_print_hoa_args) << '\n'; + dispatch_print_hoa(arena); return 0; } auto safe_tot_time = [&]() { @@ -796,13 +817,15 @@ namespace }; if (!spot::solve_game(arena, *gi)) { - std::cout << "UNREALIZABLE" << std::endl; + if (show_status) + std::cout << "UNREALIZABLE" << std::endl; safe_tot_time(); return 1; } if (gi->bv) gi->bv->realizable = true; - std::cout << "REALIZABLE" << std::endl; + if (show_status) + std::cout << "REALIZABLE" << std::endl; if (opt_real) { safe_tot_time(); @@ -905,9 +928,17 @@ parse_opt(int key, char *arg, struct argp_state *) opt_decompose_ltl = XARGMATCH("--decompose", arg, decompose_args, decompose_values); break; + case OPT_DOT: + opt_dot = true; + automaton_format_opt = opt_dot_arg = arg; + automaton_format = Dot; + break; case OPT_FROM_PGAME: jobs.emplace_back(arg, job_type::AUT_FILENAME); break; + case OPT_HIDE: + show_status = false; + break; case OPT_INPUT: { all_input_aps.emplace(std::vector{}); diff --git a/doc/org/ltlsynt.org b/doc/org/ltlsynt.org index f05d58309..e4fbc66e4 100644 --- a/doc/org/ltlsynt.org +++ b/doc/org/ltlsynt.org @@ -7,19 +7,19 @@ * Basic usage -This tool synthesizes controllers from LTL/PSL formulas. +This tool synthesizes reactive controllers from LTL/PSL formulas. Consider a set $I$ of /input/ atomic propositions, a set $O$ of output atomic propositions, and a PSL formula \phi over the propositions in $I \cup O$. A -=controller= realizing \phi is a function $c: (2^{I})^\star \times 2^I \mapsto +*reactive controller* realizing \phi is a function $c: (2^{I})^\star \times 2^I \mapsto 2^O$ such that, for every \omega-word $(u_i)_{i \in N} \in (2^I)^\omega$ over the input propositions, the word $(u_i \cup c(u_0 \dots u_{i-1}, u_i))_{i \in N}$ satisfies \phi. -If a controller exists, then one with finite memory exists. Such controllers -are easily represented as automata (or more specifically as I/O automata or -transducers). In the automaton representing the controller, the acceptance -condition is irrelevant and trivially true. +If a reactive controller exists, then one with finite memory +exists. Such controllers are easily represented as automata (or more +specifically as Mealy machines). In the automaton representing the +controller, the acceptance condition is irrelevant and trivially true. =ltlsynt= has three mandatory options: - =--ins=: a comma-separated list of input atomic propositions; @@ -27,45 +27,52 @@ condition is irrelevant and trivially true. - =--formula= or =--file=: a specification in LTL or PSL. One of =--ins= or =--outs= may be omitted, as any atomic proposition not listed -as input can be assumed to be an output and vice-versa. +as input can be assumed to be output and vice-versa. -The following example illustrates the synthesis of a controller acting as an -=AND= gate. We have two inputs =a= and =b= and one output =c=, and we want =c= -to always be the =AND= of the two inputs: +The following example illustrates the synthesis of a controller +ensuring that input =i1= and =i2= are both true initially if and only +if eventually output =o1= will go from true to false at some point. +Note that this is an equivalence, not an implication. #+NAME: example #+BEGIN_SRC sh :exports both -ltlsynt --ins=a,b -f 'G (a & b <=> c)' +ltlsynt --ins=i1,i2 -f '(i1 & i2) <-> F(o1 & X(!o1))' #+END_SRC #+RESULTS: example #+begin_example REALIZABLE HOA: v1 -States: 1 +States: 3 Start: 0 -AP: 3 "a" "b" "c" +AP: 3 "i1" "i2" "o1" acc-name: all Acceptance: 0 t properties: trans-labels explicit-labels state-acc deterministic +controllable-AP: 2 --BODY-- State: 0 -[!0&!2 | !1&!2] 0 -[0&1&2] 0 +[0&1&2] 1 +[!0&2 | !1&2] 2 +State: 1 +[!2] 0 +State: 2 +[2] 2 --END-- #+end_example The output is composed of two parts: -- the first one is a single line =REALIZABLE= or =UNREALIZABLE;= -- the second one, only present in the =REALIZABLE= case is an automaton describing the controller. - In this example, the controller has a single - state, with two loops labeled by =a & b & c= and =(!a | !b) & !c=. +- The first one is a single line =REALIZABLE= or =UNREALIZABLE=; the presence of this + line, required by the [[http://http://www.syntcomp.org/][SyntComp competition]], can be disabled with option =--hide-status=. +- The second one, only present in the =REALIZABLE= case, is an automaton describing the controller. + +The controller contains the line =controllable-AP: 2=, which means that this automaton +should be interpreted as a Mealy machine where =o0= is part of the output. +Using the =--dot= option, makes it easier to visualize this machine. #+NAME: exampledot -#+BEGIN_SRC sh :exports none :noweb yes -sed 1d <> -EOF +#+BEGIN_SRC sh :exports code +ltlsynt --ins=i1,i2 -f '(i1 & i2) <-> F(o1 & X(!o1))' --hide-status --dot #+END_SRC #+BEGIN_SRC dot :file ltlsyntex.svg :var txt=exampledot :exports results @@ -75,9 +82,6 @@ EOF #+RESULTS: [[file:ltlsyntex.svg]] -The label =a & b & c= should be understood as: "if the input is =a&b=, -the output should be =c=". - The following example illustrates the case of an unrealizable specification. As =a= is an input proposition, there is no way to guarantee that it will eventually hold. @@ -90,11 +94,68 @@ ltlsynt --ins=a -f 'F a' : UNREALIZABLE By default, the controller is output in HOA format, but it can be -output as an [[http://fmv.jku.at/aiger/][AIGER]] circuit thanks to the =--aiger= flag. This is the -output format required for the [[http://syntcomp.org/][SYNTCOMP]] competition. +output as an And-Inverter-Graph in [[http://fmv.jku.at/aiger/][AIGER format]] using the =--aiger= +flag. This is the output format required for the [[http://syntcomp.org/][SYNTCOMP]] competition. -The generation of a controller can be disabled with the flag =--realizability=. -In this case, =ltlsynt= output is limited to =REALIZABLE= or =UNREALIZABLE=. +#+NAME: exampleaig +#+BEGIN_SRC sh :exports both +ltlsynt --ins=i1,i2 -f '(i1 & i2) <-> F(o1 & X(!o1))' --aiger +#+END_SRC + +#+RESULTS: exampleaig +#+begin_example +REALIZABLE +aag 14 2 2 1 10 +2 +4 +6 14 +8 29 +7 +10 7 9 +12 4 10 +14 2 12 +16 7 8 +18 4 16 +20 5 7 +22 21 19 +24 2 23 +26 3 7 +28 27 25 +i0 i1 +i1 i2 +o0 o1 +#+end_example + +The above format is not very human friendly. Again, by passing both +=--aiger= and =--dot=, one can display the And-Inverter-Graph representing +the controller: + +#+NAME: exampleaigdot +#+BEGIN_SRC sh :exports code +ltlsynt --ins=i1,i2 -f '(i1 & i2) <-> F(o1 & X(!o1))' --hide-status --aiger --dot +#+END_SRC + +#+BEGIN_SRC dot :file ltlsyntexaig.svg :var txt=exampleaigdot :exports results + $txt +#+END_SRC + +#+RESULTS: +[[file:ltlsyntexaig.svg]] + +In the above diagram, round nodes represent AND gates. Small black +circles represent inversions (or negations), colored triangles are +used to represent input signals (at the bottom) and output signals (at +the top), and finally rectangles represent latches. A latch is a one +bit register that delays the signal by one step. Initially, all +latches are assumed to contain =false=, and them emit their value from +the =L0_out= and =L1_out= rectangles at the bottom. Their input value, +to be emitted at the next step, is received via the =L0_in= and =L1_in= +boxes at the top. In =ltlsynt='s encoding, the set of latches is used +to keep track of the current state of the Mealy machine. + +The generation of a controller can be disabled with the flag +=--realizability=. In this case, =ltlsynt='s output is limited to +=REALIZABLE= or =UNREALIZABLE=. * TLSF @@ -177,7 +238,18 @@ be tried by separating them using commas. For instance You can also ask =ltlsynt= to print to obtained parity game into [[https://github.com/tcsprojects/pgsolver][PGSolver]] format, with the flag =--print-pg=, or in the HOA format, using =--print-game-hoa=. These flag deactivate the resolution of the -parity game. +parity game. Note that if any of those flag is used with =--dot=, the game +will be printed in the Dot format instead: + +#+NAME: examplegamedot +#+BEGIN_SRC sh :exports code +ltlsynt --ins=i1,i2 -f '(i1 & i2) <-> F(o1 & X(!o1))' --print-game-hoa --dot +#+END_SRC +#+BEGIN_SRC dot :file ltlsyntexgame.svg :var txt=examplegamedot :exports results + $txt +#+END_SRC +#+RESULTS: +[[file:ltlsyntexgame.svg]] For benchmarking purpose, the =--csv= option can be used to record intermediate statistics about the resolution. @@ -200,6 +272,11 @@ Further improvements are described in the following paper: /Alexandre Duret-Lutz/, and /Adrien Pommellet/. Presented at the SYNT'21 workshop. ([[https://www.lrde.epita.fr/~adl/dl/adl/renkin.21.synt.pdf][pdf]] | [[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#renkin.21.synt][bib]]) +Simplification of Mealy machines is discussed in: + +- *Effective reductions of Mealy machines*, /Florian Renkin/, + /Philipp Schlehuber-Caissier/, /Alexandre Duret-Lutz/, and /Adrien Pommellet/. + Presented at FORTE'22. ([[https://www.lrde.epita.fr/~adl/dl/adl/renkin.22.forte.pdf][pdf]] | [[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#renkin.22.forte][bib]]) # LocalWords: utf ltlsynt AIGER html args mapsto SRC acc aiger TLSF # LocalWords: UNREALIZABLE unrealizable SYNTCOMP realizability Proc diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 33369dcde..b9dfac204 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -985,3 +985,22 @@ ltlsynt -f "G(o1|o2) & (GFi <-> GFo1)" --outs="o1,o2" --verbose\ --bypass=yes 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp + +# Test --dot and --hide-status +ltlsynt -f 'i <-> Fo' --ins=i --aiger --dot | grep arrowhead=dot +ltlsynt -f 'i <-> Fo' --ins=i --print-game-hoa --dot | grep 'shape="diamond"' +ltlsynt -f 'i <-> Fo' --ins=i --dot --hide-status > res +cat >exp < 0 + 0 [label="0"] + 0 -> 0 [label="i / o"] + 0 -> 1 [label="!i / !o"] + 1 [label="1"] + 1 -> 1 [label="1 / !o"] +} +EOF +diff res exp From c63c1796b992ab9e49b7eb53bd0f3919f7ebb482 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Wed, 21 Sep 2022 17:27:58 +0200 Subject: [PATCH 135/606] Improve aiger INF encoding the encoding cna be simplified to produce less gates when high or low is True. * spot/twaalgos/aiger.cc: Here * tests/python/_synthesis.ipynb: Test --- spot/twaalgos/aiger.cc | 33 ++- tests/python/_synthesis.ipynb | 446 ++++++++++++++++++++++++++++++++-- 2 files changed, 454 insertions(+), 25 deletions(-) diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index e3c3bb6c5..af255a167 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -637,19 +637,44 @@ namespace spot // De-morgan // !(!v&low | v&high) = !(!v&low) & !(v&high) // !v&low | v&high = !(!(!v&low) & !(v&high)) + // note that if low or high are T + // we can simplify the formula + // given that low / high is T + // then !(!v&low) & !(v&high) can be simplified to + // !v&low | v&high = !v | high / low | v + // = !(v & !high) / !(!low & !v) + // The case when low / high is ⊥ is automatically treated auto b_it = bdd2var_.find(b.id()); if (b_it != bdd2var_.end()) return b_it->second; - // todo -// unsigned v_var = bdd2var_.at(bdd_var(b)); unsigned v_var = bdd2var_.at(bdd_ithvar(bdd_var(b)).id()); unsigned b_branch_var[2] = {bdd2INFvar(bdd_low(b)), bdd2INFvar(bdd_high(b))}; - unsigned r = aig_not(aig_and(v_var, b_branch_var[1])); - unsigned l = aig_not(aig_and(aig_not(v_var), b_branch_var[0])); + unsigned l; + unsigned r; + + if (b_branch_var[0] == aig_true()) + { + // low == T + l = v_var; + r = aig_not(b_branch_var[1]); + } + else if (b_branch_var[1] == aig_true()) + { + // high == T + l = aig_not(b_branch_var[0]); + r = aig_not(v_var); + } + else + { + // General case + r = aig_not(aig_and(v_var, b_branch_var[1])); + l = aig_not(aig_and(aig_not(v_var), b_branch_var[0])); + } + return aig_not(aig_and(l, r)); } diff --git a/tests/python/_synthesis.ipynb b/tests/python/_synthesis.ipynb index 6952eadd3..4c203a86e 100644 --- a/tests/python/_synthesis.ipynb +++ b/tests/python/_synthesis.ipynb @@ -738,7 +738,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc883a7720> >" + " *' at 0x7fbccc33a0f0> >" ] }, "execution_count": 8, @@ -821,7 +821,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc883a7720> >" + " *' at 0x7fbccc33a0f0> >" ] }, "execution_count": 9, @@ -945,7 +945,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc8833aa80> >" + " *' at 0x7fbccc345660> >" ] }, "execution_count": 10, @@ -1043,7 +1043,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc880c2ab0> >" + " *' at 0x7fbccc3486c0> >" ] }, "execution_count": 11, @@ -1211,7 +1211,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc8833ae70> >" + " *' at 0x7fbccc345ae0> >" ] }, "execution_count": 12, @@ -1420,7 +1420,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc880c2240> >" + " *' at 0x7fbccc345e40> >" ] }, "execution_count": 13, @@ -1578,7 +1578,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc880bdc30> >" + " *' at 0x7fbccc364a20> >" ] }, "execution_count": 14, @@ -1722,7 +1722,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc880bdc30> >" + " *' at 0x7fbccc364a20> >" ] }, "execution_count": 15, @@ -1869,7 +1869,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc880c5210> >" + " *' at 0x7fbccc35a2d0> >" ] }, "execution_count": 16, @@ -2014,7 +2014,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc880c5210> >" + " *' at 0x7fbccc35a2d0> >" ] }, "execution_count": 17, @@ -2561,7 +2561,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc880c5d50> >" + " *' at 0x7fbccc35af00> >" ] }, "execution_count": 18, @@ -2715,7 +2715,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc880c5d50> >" + " *' at 0x7fbccc35af00> >" ] }, "execution_count": 19, @@ -2873,7 +2873,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc880cd090> >" + " *' at 0x7fbccc36d240> >" ] }, "metadata": {}, @@ -4215,7 +4215,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc880bdc00> >" + " *' at 0x7fbccc3910f0> >" ] }, "execution_count": 20, @@ -4698,7 +4698,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc880bdc00> >" + " *' at 0x7fbccc3910f0> >" ] }, "execution_count": 21, @@ -4831,7 +4831,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc880cd2a0> >" + " *' at 0x7fbccc364570> >" ] }, "metadata": {}, @@ -5509,7 +5509,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc880c55a0> >" + " *' at 0x7fbccc35a9c0> >" ] }, "execution_count": 22, @@ -5748,7 +5748,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc880c55a0> >" + " *' at 0x7fbccc35a9c0> >" ] }, "execution_count": 23, @@ -5838,7 +5838,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc880c5a50> >" + " *' at 0x7fbccc3646c0> >" ] }, "metadata": {}, @@ -5932,7 +5932,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc880c5a50> >" + " *' at 0x7fbccc3646c0> >" ] }, "metadata": {}, @@ -5955,10 +5955,414 @@ "display(aut)" ] }, + { + "cell_type": "markdown", + "id": "7efe7450", + "metadata": {}, + "source": [ + "# Test improved aiger INF encoding" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "31872ccc", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!a & !b & !c & !d\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a | b | c | d\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc3911e0> >" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "si = spot.synthesis_info()\n", + "\n", + "aut = spot.ltl_to_game(\"(a|b|c|d)->x\", [\"x\"], si)\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "9064bc60", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "True\n" + ] + } + ], + "source": [ + "print(spot.solve_game(aut))" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "303ada1e", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !b & !c & !d\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a | b | c | d\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbcd407ca20> >" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ctrl = spot.solved_game_to_split_mealy(aut)\n", + "ctrl" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "9874a530", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "L0_out\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "10->18\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "12->14\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "14->16\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "16->18\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o0\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "18->o0:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "L0\n", + "\n", + "L0_in\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->16\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->14\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "6->12\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "d\n", + "\n", + "\n", + "\n", + "8->12\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "False\n", + "\n", + "\n", + "\n", + "0->L0\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " >" + ] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aig = spot.mealy_machine_to_aig(ctrl, \"ite\")\n", + "aig" + ] + }, { "cell_type": "code", "execution_count": null, - "id": "f3b2d981", + "id": "eb81b7d3", "metadata": {}, "outputs": [], "source": [] From 4a24739c3f38c42551e3b2f322f0882b4b2e3ba2 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Wed, 14 Sep 2022 16:44:12 +0200 Subject: [PATCH 136/606] Improving minimize_mealy benchmarking * python/spot/__init__.py: Adding helper function for inline plot of csv *spot/twaalgos/mealy_machine.cc, spot/twaalgos/mealy_machine.hh: Main changes * tests/python/_mealy.ipynb: Update * tests/python/ipnbdoctest.py: Ignore timing table * tests/python/synthesis.ipynb: Update --- python/spot/__init__.py | 30 + spot/twaalgos/mealy_machine.cc | 290 +++++-- spot/twaalgos/mealy_machine.hh | 21 + tests/python/_mealy.ipynb | 1291 +++++++++++++++++++++++++++++++- tests/python/ipnbdoctest.py | 4 + tests/python/synthesis.ipynb | 36 +- 6 files changed, 1609 insertions(+), 63 deletions(-) diff --git a/python/spot/__init__.py b/python/spot/__init__.py index 340eba00a..01210c824 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -1298,6 +1298,36 @@ def sat_minimize(aut, acc=None, colored=False, else: return sm(aut, args, state_based) +# Adding the inline csv-display option +def minimize_mealy(mm, opt = -1, display_log = False, return_log = False): + from spot.impl import minimize_mealy as minmealy + + try: + lvl = int(opt) + opt = synthesis_info() + opt.minimize_lvl = lvl + 4 + except (ValueError, TypeError) as _: + pass + + if display_log or return_log: + import pandas as pd + with tempfile.NamedTemporaryFile(dir='.', suffix='.minlog') as t: + opt.opt.set_str("satlogcsv", t.name) + resmm = minmealy(mm, opt) + + dfrm = pd.read_csv(t.name, dtype=object) + if display_log: + from IPython.display import display + del dfrm['instance'] + display(dfrm) + if return_log: + return resmm, dfrm + else: + return resmm + else: + return minmealy(mm, opt) + + def parse_word(word, dic=_bdd_dict): from spot.impl import parse_word as pw diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index 6bbb9c4f7..3635e6334 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -88,6 +89,33 @@ namespace #endif } +namespace +{ + static std::unique_ptr sat_csv_file; + struct fwrapper{ + std::string fname; + std::FILE* f; + fwrapper(const std::string& name) + : fname{name} + , f{std::fopen(name.c_str(), "a")} + { + if (!f) + throw std::runtime_error("File could not be oppened for writing."); + } + ~fwrapper() + { + std::fclose(f); + f = nullptr; + } + fwrapper& operator=(const fwrapper&) = delete; + fwrapper& operator=(fwrapper&&) = delete; + fwrapper(const fwrapper&) = delete; + fwrapper(fwrapper&&) = delete; + }; + static std::unique_ptr sat_dimacs_file; + static std::string sat_instance_name = ""; +} + namespace spot { @@ -817,6 +845,119 @@ namespace #else void trace_clause(const std::vector&){} #endif + struct satprob_info + { + stopwatch sw; + + double premin_time, reorg_time, partsol_time, player_incomp_time, + incomp_time, split_all_let_time, split_min_let_time, + split_cstr_time, prob_init_build_time, sat_time, + build_time, refine_time, total_time; + long long n_classes, n_refinement, n_lit, n_clauses, + n_iteration, n_bisim_let, n_min_states, done; + std::string task; + const std::string instance; + + satprob_info(const std::string& instance) + : premin_time{-1} + , reorg_time{-1} + , partsol_time{-1} + , player_incomp_time{-1} + , incomp_time{-1} + , split_all_let_time{-1} + , split_min_let_time{-1} + , split_cstr_time{-1} + , prob_init_build_time{-1} + , sat_time{-1} + , build_time{-1} + , refine_time{-1} + , total_time{-1} + , n_classes{-1} + , n_refinement{-1} + , n_lit{-1} + , n_clauses{-1} + , n_iteration{-1} + , n_bisim_let{-1} + , n_min_states{-1} + , done{-1} + , task{} + , instance{instance+","} + { + } + + void start() + { + sw.start(); + } + double stop() + { + return sw.stop(); + } + double restart() + { + double res = sw.stop(); + sw.start(); + return res; + } + // Writing also "flushes" + void write() + { + auto f = [](std::ostream& o, auto& v, bool sep = true) + { + if (v >= 0) + o << v; + if (sep) + o.put(','); + v = -1; + }; + if (!sat_csv_file) + return; + + auto& out = *sat_csv_file; + if (out.tellp() == 0) + { + out << "instance,task,premin_time,reorg_time,partsol_time," + << "player_incomp_time,incomp_time,split_all_let_time," + << "split_min_let_time,split_cstr_time,prob_init_build_time," + << "sat_time,build_time,refine_time,total_time,n_classes," + << "n_refinement,n_lit,n_clauses,n_iteration,n_bisim_let," + << "n_min_states,done\n"; + } + + assert(!task.empty()); + out << instance; + out << task; + task = ""; + out.put(','); + + std::stringstream ss; + + f(ss, premin_time); + f(ss, reorg_time); + f(ss, partsol_time); + f(ss, player_incomp_time); + f(ss, incomp_time); + f(ss, split_all_let_time); + f(ss, split_min_let_time); + f(ss, split_cstr_time); + f(ss, prob_init_build_time); + f(ss, sat_time); + f(ss, build_time); + f(ss, refine_time); + f(ss, total_time); + f(ss, n_classes); + f(ss, n_refinement); + f(ss, n_lit); + f(ss, n_clauses); + f(ss, n_iteration); + f(ss, n_bisim_let); + f(ss, n_min_states); + f(ss, done, false); + out << ss.str(); + out.put('\n'); + } + }; + template bool all_of(const CONT& c) @@ -1125,7 +1266,7 @@ namespace square_matrix compute_incomp(const_twa_graph_ptr mm, const unsigned n_env, - stopwatch& sw) + satprob_info& si) { const unsigned n_tot = mm->num_states(); @@ -1201,7 +1342,7 @@ namespace return inc_player.get(ps2c[s1].second, ps2c[s2].second); }; - dotimeprint << "Done computing player incomp " << sw.stop() << '\n'; + si.player_incomp_time = si.restart(); #ifdef TRACE trace << "player cond id incomp\n"; @@ -1284,7 +1425,7 @@ namespace trace << "Env state incomp\n"; inc_env.print(std::cerr); #endif - + si.incomp_time = si.restart(); return inc_env; } @@ -1912,26 +2053,22 @@ namespace std::pair reduce_and_split(const_twa_graph_ptr mmw, const unsigned n_env, const square_matrix& incompmat, - stopwatch& sw) + satprob_info& si) { reduced_alphabet_t red; + si.start(); + std::tie(red.n_groups, red.which_group) = trans_comp_classes(incompmat); - dotimeprint << "Done trans comp " << red.n_groups - << " - " << sw.stop() << '\n'; compute_all_letters(red, mmw, n_env); - dotimeprint << "Done comp all letters " << " - " << sw.stop() << '\n'; + si.split_all_let_time = si.restart(); compute_minimal_letters(red, mmw, n_env); -#ifdef MINTIMINGS - dotimeprint << "Done comp all min sim letters "; - for (const auto& al : red.bisim_letters) - dotimeprint << al.size() << ' '; - dotimeprint << " - " << sw.stop() << '\n'; -#endif + si.split_min_let_time = si.restart(); + si.n_bisim_let = red.n_red_sigma; twa_graph_ptr split_mmw = split_on_minimal(red, mmw, n_env); - dotimeprint << "Done splitting " << sw.stop() << '\n'; + si.split_cstr_time = si.restart(); trace << std::endl; return std::make_pair(split_mmw, red); @@ -2234,9 +2371,10 @@ namespace struct mm_sat_prob_t { mm_sat_prob_t(unsigned n_classes, unsigned n_env, - unsigned n_sigma_red) + unsigned n_sigma_red, satprob_info& si) : lm(n_classes, n_env, n_sigma_red) , n_classes{lm.n_classes_} + , si{si} { state_cover_clauses.reserve(n_classes); trans_cover_clauses.reserve(n_classes*n_sigma_red); @@ -2288,6 +2426,13 @@ namespace // res[i] == -1 : i not used in lit mapper // res[i] == 0 : i is assigned false // res[i] == 1 : i is assigned true + if (sat_dimacs_file) + { + fprintf(sat_dimacs_file->f, + "c ### Next Instance %lld %lld ###\n", + this->si.n_classes, this->si.n_refinement); + picosat_print(lm.psat_, sat_dimacs_file->f); + } switch (picosat_sat(lm.psat_, -1)) { case PICOSAT_UNSATISFIABLE: @@ -2353,6 +2498,8 @@ namespace std::unordered_map> cube_map; // A map that indicates if two cubes are compatible or not via their id std::unordered_map, bool, pair_hash> cube_incomp_map; + // Piggy-back a struct for performance measure + satprob_info& si; }; template<> @@ -2431,14 +2578,15 @@ namespace const square_matrix& incompmat, const reduced_alphabet_t& red, const part_sol_t& psol, - const unsigned n_env) + const unsigned n_env, + satprob_info& si) { const auto& psolv = psol.psol; const unsigned n_classes = psolv.size(); const unsigned n_red = red.n_red_sigma; const unsigned n_groups = red.n_groups; - mm_sat_prob_t mm_pb(n_classes, n_env, n_red); + mm_sat_prob_t mm_pb(n_classes, n_env, n_red, si); auto& lm = mm_pb.lm; @@ -3372,7 +3520,7 @@ namespace const reduced_alphabet_t& red, const part_sol_t& psol, const unsigned n_env, - stopwatch& sw) + satprob_info& si) { const auto& psolv = psol.psol; const unsigned n_psol = psolv.size(); @@ -3393,15 +3541,16 @@ namespace mm_pb.lm.print(std::cerr); #endif mm_pb.set_variable_clauses(); - dotimeprint << "Done constructing SAT " << sw.stop() << '\n'; - dotimeprint << "n literals " << mm_pb.n_lits() - << " n clauses " << mm_pb.n_clauses() << '\n'; + si.n_lit = mm_pb.n_lits(); + si.n_clauses = mm_pb.n_clauses(); + si.start(); auto sol = mm_pb.get_sol(); - dotimeprint << "Done solving SAT " << sw.stop() << '\n'; + si.sat_time = si.restart(); if (sol.empty()) { mm_pb.unset_variable_clauses(); + si.write(); return nullptr; } #ifdef TRACE @@ -3606,20 +3755,25 @@ namespace for (const auto& el : used_ziaj_map) if (el.second == bddfalse) infeasible_classes.emplace_back(el.first.i, el.first.a); + si.build_time = si.restart(); + if (!infeasible_classes.empty()) { // Remove the variable clauses // This is suboptimal but the contexts form a stack so... - dotimeprint << "Refining constraints for " - << infeasible_classes.size() << " classses.\n"; + auto oldrefine = si.n_refinement; + si.write(); + si.task = "refinement"; + si.n_classes = n_classes; + si.n_refinement = oldrefine + infeasible_classes.size(); mm_pb.unset_variable_clauses(); add_bdd_cond_constr(mm_pb, mmw, red, n_env, infeasible_classes, x_in_class); + si.refine_time = si.restart(); continue; //retry } cstr_split_mealy(minmach, red, x_in_class, used_ziaj_map); - // todo: What is the impact of chosing one of the possibilities minmach->set_init_state(init_class_v.front()); return minmach; @@ -3634,8 +3788,10 @@ namespace spot { assert(is_mealy(mm)); - stopwatch sw; - sw.start(); + satprob_info si(sat_instance_name); + si.task = "presat"; + stopwatch sglob; + sglob.start(); if ((premin < -1) || (premin > 1)) throw std::runtime_error("premin has to be -1, 0 or 1"); @@ -3672,7 +3828,9 @@ namespace spot const_twa_graph_ptr mmw = do_premin(); assert(is_split_mealy(mmw)); - dotimeprint << "Done premin " << sw.stop() << '\n'; + si.premin_time = si.restart(); + + // 0 -> "Env" next is input props // 1 -> "Player" next is output prop @@ -3690,24 +3848,24 @@ namespace spot print_hoa(std::cerr, mmw); #endif assert(n_env != -1u); - dotimeprint << "Done reorganise " << n_env << " - " - << sw.stop() << '\n'; + si.reorg_time = si.restart(); // Compute incompatibility based on bdd - auto incompmat = compute_incomp(mmw, n_env, sw); - dotimeprint << "Done incompatibility " << sw.stop() << '\n'; + auto incompmat = compute_incomp(mmw, n_env, si); #ifdef TRACE incompmat.print(std::cerr); #endif // Get a partial solution auto partsol = get_part_sol(incompmat); - dotimeprint << "Done partial solution " << partsol.psol.size() - << " - " << sw.stop() << '\n'; + si.partsol_time = si.restart(); auto early_exit = [&]() { + si.done = 1; + si.total_time = sglob.stop(); + si.write(); // Always keep machines split if (mm->get_named_prop("state-player")) assert(is_split_mealy_specialization(mm, mmw)); @@ -3721,58 +3879,78 @@ namespace spot // states as the original automaton -> we are done if (partsol.psol.size() == n_env) { - dotimeprint << "Done trans comp " << 1 << " - " << sw.stop() << '\n'; - dotimeprint << "Done comp all letters " << " - " - << sw.stop() << '\n'; -#ifdef MINTIMINGS - dotimeprint << "Done comp all min sim letters 0 - " - << sw.stop() << '\n'; -#endif - dotimeprint << "Done splitting " << sw.stop() << '\n'; - dotimeprint << "Done split and reduce " << sw.stop() << '\n'; - dotimeprint << "Done build init prob " << sw.stop() << '\n'; - dotimeprint << "Done minimizing - " << mmw->num_states() - << " - " << sw.stop() << '\n'; return early_exit(); } // Get the reduced alphabet auto [split_mmw, reduced_alphabet] = - reduce_and_split(mmw, n_env, incompmat, sw); - dotimeprint << "Done split and reduce " << sw.stop() << '\n'; + reduce_and_split(mmw, n_env, incompmat, si); auto mm_pb = build_init_prob(split_mmw, incompmat, - reduced_alphabet, partsol, n_env); - dotimeprint << "Done build init prob " << sw.stop() << '\n'; + reduced_alphabet, partsol, n_env, si); + si.prob_init_build_time = si.restart(); + si.write(); twa_graph_ptr minmachine = nullptr; for (size_t n_classes = partsol.psol.size(); n_classes < n_env; ++n_classes) { + if (si.task.empty()) + si.task = "sat"; + si.n_iteration = (n_classes-partsol.psol.size()); + si.n_refinement = 0; + si.n_classes = n_classes; + minmachine = try_build_min_machine(mm_pb, mmw, reduced_alphabet, partsol, n_env, - sw); - dotimeprint << "Done try_build " << n_classes - << " - " << sw.stop() << '\n'; + si); if (minmachine) break; increment_classes(split_mmw, incompmat, reduced_alphabet, partsol, mm_pb); - dotimeprint << "Done incrementing " << sw.stop() << '\n'; + } // Is already minimal -> Return a copy // Set state players! if (!minmachine) return early_exit(); set_synthesis_outputs(minmachine, get_synthesis_outputs(mm)); - dotimeprint << "Done minimizing - " << minmachine->num_states() - << " - " << sw.stop() << '\n'; + + si.done=1; + si.n_min_states = minmachine->num_states(); + si.total_time = sglob.stop(); + si.write(); assert(is_split_mealy_specialization(mm, minmachine)); return minmachine; } + + twa_graph_ptr + minimize_mealy(const const_twa_graph_ptr& mm, + synthesis_info& si) + { + if ((si.minimize_lvl < 3) || (5 < si.minimize_lvl)) + throw std::runtime_error("Invalid option"); + + std::string csvfile = si.opt.get_str("satlogcsv"); + std::string dimacsfile = si.opt.get_str("satlogdimacs"); + + if (!csvfile.empty()) + sat_csv_file + = std::make_unique(csvfile, + std::ios_base::ate + | std::ios_base::app); + if (!dimacsfile.empty()) + sat_dimacs_file + = std::make_unique(dimacsfile); + sat_instance_name = si.opt.get_str("satinstancename"); + auto res = minimize_mealy(mm, si.minimize_lvl-4); + sat_csv_file.reset(); + sat_dimacs_file.reset(); + return res; + } } namespace spot diff --git a/spot/twaalgos/mealy_machine.hh b/spot/twaalgos/mealy_machine.hh index d603d8000..3bdb71b73 100644 --- a/spot/twaalgos/mealy_machine.hh +++ b/spot/twaalgos/mealy_machine.hh @@ -159,6 +159,27 @@ namespace spot SPOT_API twa_graph_ptr minimize_mealy(const const_twa_graph_ptr& mm, int premin = -1); + /// \ingroup mealy + /// \brief Minimizes an (in)completely specified mealy machine + /// + /// The approach is described in \cite renkin.22.forte. + /// + /// \param si synthesis_info structure used to store data for benchmarking + /// and indicates which premin level to use + /// + /// \return A split mealy machines which is a minimal + /// specialization of the original machine. + /// + /// \note Enabling \a premin will remove finite traces. + /// \note If si.opt contains an option "satlogcsv" detailed results will be + /// stored in this file. If it contains "satlogdimacs" all sat problems will + /// stored. + /// \see is_split_mealy_specialization + + SPOT_API twa_graph_ptr + minimize_mealy(const const_twa_graph_ptr& mm, + synthesis_info& si); + /// \ingroup mealy /// \brief Test if the split mealy machine \a right is a specialization of diff --git a/tests/python/_mealy.ipynb b/tests/python/_mealy.ipynb index c2aeb125c..9d7fe7d96 100644 --- a/tests/python/_mealy.ipynb +++ b/tests/python/_mealy.ipynb @@ -7,7 +7,8 @@ "metadata": {}, "outputs": [], "source": [ - "import spot\n", + "import spot, buddy\n", + "import pandas as pd\n", "spot.setup()" ] }, @@ -128,7 +129,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fc1244a3d50> >" + " *' at 0x7fcc35aaa030> >" ] }, "execution_count": 4, @@ -208,7 +209,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fc124439570> >" + " *' at 0x7fcc35aaa900> >" ] }, "execution_count": 6, @@ -282,7 +283,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fc124439570> >" + " *' at 0x7fcc35aaa900> >" ] }, "execution_count": 8, @@ -296,9 +297,1284 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "id": "923a59d6", "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "!i\n", + "/\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "i\n", + "/\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc35ac20c0> >" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut = spot.make_twa_graph()\n", + "i = buddy.bdd_ithvar(aut.register_ap(\"i\"))\n", + "o = buddy.bdd_ithvar(aut.register_ap(\"o\"))\n", + "spot.set_synthesis_outputs(aut, o)\n", + "aut.new_states(3)\n", + "aut.new_edge(0,1,buddy.bdd_not(i)&buddy.bdd_not(o))\n", + "aut.new_edge(0,2,i&o)\n", + "aut.new_edge(1,1,buddy.bdd_not(o))\n", + "aut.new_edge(2,2,buddy.bddtrue)\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "f06d6df4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "('o',)\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!i\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "i\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc35ac2720> >" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut_s = spot.split_2step(aut)\n", + "print(spot.get_synthesis_output_aps(aut_s))\n", + "aut_s" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "3cc4d320", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
taskpremin_timereorg_timepartsol_timeplayer_incomp_timeincomp_timesplit_all_let_timesplit_min_let_timesplit_cstr_timeprob_init_build_time...refine_timetotal_timen_classesn_refinementn_litn_clausesn_iterationn_bisim_letn_min_statesdone
0presat25643.31.112e-064.588e-069.888e-064.549e-061.5929e-059.338e-065.901e-066.7276e-05...NaNNaNNaNNaNNaNNaNNaN2NaNNaN
1satNaNNaNNaNNaNNaNNaNNaNNaNNaN...NaN0.000282709207120NaN41
\n", + "

2 rows × 22 columns

\n", + "
" + ], + "text/plain": [ + " task premin_time reorg_time partsol_time player_incomp_time incomp_time \\\n", + "0 presat 25643.3 1.112e-06 4.588e-06 9.888e-06 4.549e-06 \n", + "1 sat NaN NaN NaN NaN NaN \n", + "\n", + " split_all_let_time split_min_let_time split_cstr_time prob_init_build_time \\\n", + "0 1.5929e-05 9.338e-06 5.901e-06 6.7276e-05 \n", + "1 NaN NaN NaN NaN \n", + "\n", + " ... refine_time total_time n_classes n_refinement n_lit n_clauses \\\n", + "0 ... NaN NaN NaN NaN NaN NaN \n", + "1 ... NaN 0.000282709 2 0 7 12 \n", + "\n", + " n_iteration n_bisim_let n_min_states done \n", + "0 NaN 2 NaN NaN \n", + "1 0 NaN 4 1 \n", + "\n", + "[2 rows x 22 columns]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "i\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!i\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc88735f00> >" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "min_lvl = 0\n", + "aut_ms, table = spot.minimize_mealy(aut_s, min_lvl, display_log=True, return_log=True)\n", + "aut_ms" + ] + }, + { + "cell_type": "markdown", + "id": "bc844797", + "metadata": {}, + "source": [ + "## A more involved example" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "893bc90e", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "o1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "(!o0 & o1) | (o0 & !o1)\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc6157fe40> >" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut = spot.make_twa_graph()\n", + "i = buddy.bdd_ithvar(aut.register_ap(\"i\"))\n", + "o0 = buddy.bdd_ithvar(aut.register_ap(\"o0\"))\n", + "no0 = buddy.bdd_not(o0)\n", + "o1 = buddy.bdd_ithvar(aut.register_ap(\"o1\"))\n", + "no1 = buddy.bdd_not(o1)\n", + "spot.set_synthesis_outputs(aut, o0&o1)\n", + "\n", + "vo1 = o0&o1\n", + "vo2 = no0&o1\n", + "vo3 = o0&no1\n", + "\n", + "aut.new_states(3)\n", + "\n", + "aut.new_edge(0,1,vo1|vo2)\n", + "aut.new_edge(1,2,vo1|vo3)\n", + "aut.new_edge(2,2,vo2|vo3)\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "23edb107", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "o1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "(!o0 & o1) | (o0 & !o1)\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc6157f210> >" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut_s = spot.split_2step(aut)\n", + "aut_s" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "837aab84", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
taskpremin_timereorg_timepartsol_timeplayer_incomp_timeincomp_timesplit_all_let_timesplit_min_let_timesplit_cstr_timeprob_init_build_time...refine_timetotal_timen_classesn_refinementn_litn_clausesn_iterationn_bisim_letn_min_statesdone
0presat25643.41.683e-065.611e-062.66e-051.2e-073.647e-068.365e-063.747e-062.5538e-05...NaNNaNNaNNaNNaNNaNNaN1NaNNaN
1satNaNNaNNaNNaNNaNNaNNaNNaNNaN...NaNNaN10360NaNNaNNaN
2refinementNaNNaNNaNNaNNaNNaNNaNNaNNaN...4.4884e-05NaN111016NaNNaNNaNNaN
3satNaNNaNNaNNaNNaNNaNNaNNaNNaN...NaN0.0002003442017291NaN41
\n", + "

4 rows × 22 columns

\n", + "
" + ], + "text/plain": [ + " task premin_time reorg_time partsol_time player_incomp_time \\\n", + "0 presat 25643.4 1.683e-06 5.611e-06 2.66e-05 \n", + "1 sat NaN NaN NaN NaN \n", + "2 refinement NaN NaN NaN NaN \n", + "3 sat NaN NaN NaN NaN \n", + "\n", + " incomp_time split_all_let_time split_min_let_time split_cstr_time \\\n", + "0 1.2e-07 3.647e-06 8.365e-06 3.747e-06 \n", + "1 NaN NaN NaN NaN \n", + "2 NaN NaN NaN NaN \n", + "3 NaN NaN NaN NaN \n", + "\n", + " prob_init_build_time ... refine_time total_time n_classes n_refinement \\\n", + "0 2.5538e-05 ... NaN NaN NaN NaN \n", + "1 NaN ... NaN NaN 1 0 \n", + "2 NaN ... 4.4884e-05 NaN 1 1 \n", + "3 NaN ... NaN 0.000200344 2 0 \n", + "\n", + " n_lit n_clauses n_iteration n_bisim_let n_min_states done \n", + "0 NaN NaN NaN 1 NaN NaN \n", + "1 3 6 0 NaN NaN NaN \n", + "2 10 16 NaN NaN NaN NaN \n", + "3 17 29 1 NaN 4 1 \n", + "\n", + "[4 rows x 22 columns]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0 NaN\n", + "1 3\n", + "2 10\n", + "3 17\n", + "Name: n_lit, dtype: object\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "o0 & o1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "o0 & !o1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcc35ac22a0> >" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "si = spot.synthesis_info()\n", + "si.minimize_lvl = 3\n", + "aut_ms, table = spot.minimize_mealy(aut_s, si, display_log=True, return_log=True)\n", + "print(table[\"n_lit\"])\n", + "aut_ms" + ] + }, + { + "cell_type": "markdown", + "id": "0fea0269", + "metadata": {}, + "source": [ + "## Testing dimacs output" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "d14324e8", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
taskpremin_timereorg_timepartsol_timeplayer_incomp_timeincomp_timesplit_all_let_timesplit_min_let_timesplit_cstr_timeprob_init_build_time...refine_timetotal_timen_classesn_refinementn_litn_clausesn_iterationn_bisim_letn_min_statesdone
0presat25643.51.563e-065.4e-062.0519e-051.3e-073.968e-069.698e-067.624e-063.211e-05...NaNNaNNaNNaNNaNNaNNaN1NaNNaN
1satNaNNaNNaNNaNNaNNaNNaNNaNNaN...NaNNaN10360NaNNaNNaN
2refinementNaNNaNNaNNaNNaNNaNNaNNaNNaN...4.4633e-05NaN111016NaNNaNNaNNaN
3satNaNNaNNaNNaNNaNNaNNaNNaNNaN...NaN0.0002806752017291NaN41
\n", + "

4 rows × 22 columns

\n", + "
" + ], + "text/plain": [ + " task premin_time reorg_time partsol_time player_incomp_time \\\n", + "0 presat 25643.5 1.563e-06 5.4e-06 2.0519e-05 \n", + "1 sat NaN NaN NaN NaN \n", + "2 refinement NaN NaN NaN NaN \n", + "3 sat NaN NaN NaN NaN \n", + "\n", + " incomp_time split_all_let_time split_min_let_time split_cstr_time \\\n", + "0 1.3e-07 3.968e-06 9.698e-06 7.624e-06 \n", + "1 NaN NaN NaN NaN \n", + "2 NaN NaN NaN NaN \n", + "3 NaN NaN NaN NaN \n", + "\n", + " prob_init_build_time ... refine_time total_time n_classes n_refinement \\\n", + "0 3.211e-05 ... NaN NaN NaN NaN \n", + "1 NaN ... NaN NaN 1 0 \n", + "2 NaN ... 4.4633e-05 NaN 1 1 \n", + "3 NaN ... NaN 0.000280675 2 0 \n", + "\n", + " n_lit n_clauses n_iteration n_bisim_let n_min_states done \n", + "0 NaN NaN NaN 1 NaN NaN \n", + "1 3 6 0 NaN NaN NaN \n", + "2 10 16 NaN NaN NaN NaN \n", + "3 17 29 1 NaN 4 1 \n", + "\n", + "[4 rows x 22 columns]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "c ### Next Instance 1 0 ###\n", + "p cnf 5 5\n", + "-1 2 -3 0\n", + "1 -3 0\n", + "1 -5 0\n", + "2 -5 0\n", + "3 -5 0\n", + "c ### Next Instance 1 1 ###\n", + "p cnf 12 15\n", + "-1 2 -3 0\n", + "4 0\n", + "6 0\n", + "-9 0\n", + "-1 -2 10 0\n", + "-10 0\n", + "1 -3 0\n", + "1 -5 0\n", + "1 -12 0\n", + "2 -5 0\n", + "2 -12 0\n", + "-2 9 0\n", + "3 -5 0\n", + "3 -12 0\n", + "7 8 0\n", + "c ### Next Instance 2 0 ###\n", + "p cnf 19 29\n", + "-3 -1 2 0\n", + "4 0\n", + "6 0\n", + "-9 0\n", + "-1 -2 10 0\n", + "-10 0\n", + "11 -16 -17 0\n", + "1 -15 -17 0\n", + "-1 13 -14 0\n", + "-11 13 -16 0\n", + "-11 -15 2 0\n", + "-13 -15 2 0\n", + "1 11 -19 0\n", + "13 -19 2 0\n", + "15 16 -19 0\n", + "3 14 -19 0\n", + "-2 0\n", + "-12 0\n", + "-5 0\n", + "1 -3 0\n", + "1 -5 0\n", + "1 -12 0\n", + "2 -5 0\n", + "2 -12 0\n", + "-2 9 0\n", + "3 -5 0\n", + "3 -12 0\n", + "7 8 0\n", + "11 -14 0\n", + "\n" + ] + } + ], + "source": [ + "import tempfile\n", + "\n", + "si = spot.synthesis_info()\n", + "si.minimize_lvl = 3\n", + "\n", + "with tempfile.NamedTemporaryFile(dir='.', suffix='.dimacslog') as t:\n", + " si.opt.set_str(\"satlogdimacs\", t.name)\n", + " aut_ms, table = spot.minimize_mealy(aut_s, si, display_log=True, return_log=True)\n", + " with open(t.name, \"r\") as f:\n", + " print(\"\".join(f.readlines()))\n", + " \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5c9fe115", + "metadata": {}, "outputs": [], "source": [] } @@ -320,6 +1596,11 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" + }, + "vscode": { + "interpreter": { + "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1" + } } }, "nbformat": 4, diff --git a/tests/python/ipnbdoctest.py b/tests/python/ipnbdoctest.py index 18da81cf8..c6bfcf134 100755 --- a/tests/python/ipnbdoctest.py +++ b/tests/python/ipnbdoctest.py @@ -143,6 +143,10 @@ def canonicalize(s, type, ignores): # timing result we cannot compare between runs. s = re.sub(r'', '
', s, flags=re.DOTALL) + # Table that contains premin_time are log from the mealy minimization. + # They contain timing result so we cannot compare between runs. + s = re.sub(r'', '
', + s, flags=re.DOTALL) for n, p in enumerate(ignores): s = re.sub(p, 'IGN{}'.format(n), s) diff --git a/tests/python/synthesis.ipynb b/tests/python/synthesis.ipynb index 654d22873..54da20ef7 100644 --- a/tests/python/synthesis.ipynb +++ b/tests/python/synthesis.ipynb @@ -3,6 +3,7 @@ { "cell_type": "code", "execution_count": 1, + "id": "4f84fa79", "metadata": {}, "outputs": [], "source": [ @@ -13,6 +14,7 @@ }, { "cell_type": "markdown", + "id": "4ad017a0", "metadata": {}, "source": [ "This notebook presents functions that can be used to solve the Reactive Synthesis problem using games.\n", @@ -37,6 +39,7 @@ { "cell_type": "code", "execution_count": 2, + "id": "e333be09", "metadata": {}, "outputs": [ { @@ -655,6 +658,7 @@ }, { "cell_type": "markdown", + "id": "4d030586", "metadata": {}, "source": [ "Solving the game, is done with `solve_game()` as with any game. There is also a version that takes a `synthesis_info` as second argument in case the time it takes has to be recorded. Here passing `si` or not makes no difference." @@ -663,6 +667,7 @@ { "cell_type": "code", "execution_count": 3, + "id": "f13ac820", "metadata": {}, "outputs": [ { @@ -1222,6 +1227,7 @@ }, { "cell_type": "markdown", + "id": "98aa1402", "metadata": {}, "source": [ "Once a strategy has been found, it can be extracted as an automaton and simplified using 6 different levels (the default is 2). The output should be interpreted as a Mealy automaton, where transition have the form `(ins)&(outs)` where `ins` and `outs` are Boolean formulas representing possible inputs and outputs (they could be more than just conjunctions of atomic proposition). Mealy machines with this type of labels are called \"separated\" in Spot." @@ -1230,6 +1236,7 @@ { "cell_type": "code", "execution_count": 4, + "id": "4c93add7", "metadata": {}, "outputs": [ { @@ -2228,6 +2235,7 @@ }, { "cell_type": "markdown", + "id": "9d8d52f6", "metadata": {}, "source": [ "If needed, a separated Mealy machine can be turned into game shape using `split_sepearated_mealy()`, which is more efficient than `split_2step()`." @@ -2236,6 +2244,7 @@ { "cell_type": "code", "execution_count": 5, + "id": "707f4cf6", "metadata": {}, "outputs": [ { @@ -2517,6 +2526,7 @@ }, { "cell_type": "markdown", + "id": "b9e4412e", "metadata": {}, "source": [ "# Converting the separated Mealy machine to AIG\n", @@ -2529,6 +2539,7 @@ { "cell_type": "code", "execution_count": 6, + "id": "9f344931", "metadata": {}, "outputs": [ { @@ -2604,6 +2615,7 @@ }, { "cell_type": "markdown", + "id": "92bbe8d0", "metadata": {}, "source": [ "While we are at it, let us mention that you can render those circuits horizontally as follows:" @@ -2612,6 +2624,7 @@ { "cell_type": "code", "execution_count": 7, + "id": "3ae7ce32", "metadata": {}, "outputs": [ { @@ -2687,6 +2700,7 @@ }, { "cell_type": "markdown", + "id": "44fbc0ac", "metadata": {}, "source": [ "To encode the circuit in the AIGER format (ASCII version) use:" @@ -2695,6 +2709,7 @@ { "cell_type": "code", "execution_count": 8, + "id": "566715d5", "metadata": {}, "outputs": [ { @@ -2718,6 +2733,7 @@ }, { "cell_type": "markdown", + "id": "ef304f36", "metadata": {}, "source": [ "# Adding more inputs and outputs by force" @@ -2725,6 +2741,7 @@ }, { "cell_type": "markdown", + "id": "5c2b0b78", "metadata": {}, "source": [ "It can happen that propositions declared as output are ommited in the aig circuit (either because they are not part of the specification, or because they do not appear in the winning strategy). In that case those \n", @@ -2736,6 +2753,7 @@ { "cell_type": "code", "execution_count": 9, + "id": "874c7df1", "metadata": {}, "outputs": [ { @@ -3260,6 +3278,7 @@ }, { "cell_type": "markdown", + "id": "c564dba3", "metadata": {}, "source": [ "To force the presence of extra variables in the circuit, they can be passed to `mealy_machine_to_aig()`." @@ -3268,6 +3287,7 @@ { "cell_type": "code", "execution_count": 10, + "id": "c31a3b38", "metadata": {}, "outputs": [ { @@ -3378,6 +3398,7 @@ }, { "cell_type": "markdown", + "id": "3323fc84", "metadata": {}, "source": [ "# Combining Mealy machines\n", @@ -3397,6 +3418,7 @@ { "cell_type": "code", "execution_count": 11, + "id": "5d8e4cdb", "metadata": {}, "outputs": [ { @@ -3991,6 +4013,7 @@ }, { "cell_type": "markdown", + "id": "c7a1986f", "metadata": {}, "source": [ "# Reading an AIGER-file\n", @@ -4005,6 +4028,7 @@ { "cell_type": "code", "execution_count": 12, + "id": "a10d7e3b", "metadata": {}, "outputs": [], "source": [ @@ -4025,6 +4049,7 @@ { "cell_type": "code", "execution_count": 13, + "id": "2c40e19b", "metadata": {}, "outputs": [ { @@ -4149,6 +4174,7 @@ { "cell_type": "code", "execution_count": 14, + "id": "0ad6c566", "metadata": {}, "outputs": [ { @@ -4177,6 +4203,7 @@ { "cell_type": "code", "execution_count": 15, + "id": "2e1996c1", "metadata": {}, "outputs": [ { @@ -4193,6 +4220,7 @@ }, { "cell_type": "markdown", + "id": "41a8e042", "metadata": {}, "source": [ "An AIG circuit can be transformed into a monitor/Mealy machine. This can be used for instance to check that it does not intersect the negation of the specification." @@ -4201,6 +4229,7 @@ { "cell_type": "code", "execution_count": 16, + "id": "7399ea38", "metadata": {}, "outputs": [ { @@ -4268,6 +4297,7 @@ }, { "cell_type": "markdown", + "id": "7ac06afc", "metadata": {}, "source": [ "Note that the generation of aiger circuits from Mealy machines is flexible and accepts separated Mealy machines\n", @@ -4277,6 +4307,7 @@ { "cell_type": "code", "execution_count": 17, + "id": "bac68923", "metadata": {}, "outputs": [ { @@ -4424,6 +4455,7 @@ { "cell_type": "code", "execution_count": 18, + "id": "03ceb2a8", "metadata": {}, "outputs": [ { @@ -4626,7 +4658,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -4640,7 +4672,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.8.10" } }, "nbformat": 4, From c45ff0c94ce4a53595d25423590c8d915148e30b Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Wed, 21 Sep 2022 16:02:49 +0200 Subject: [PATCH 137/606] fix: ltlsynt --tlsf does not propagate name to csv * bin/ltlsynt.cc: Here --- bin/ltlsynt.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index e0cf78c47..630ccd629 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -705,7 +705,7 @@ namespace } if (opt_csv) - print_csv(f); + print_csv(f, filename); return res; } From aa7992c65f19204d10d25d313d2c89057d15de72 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 21 Sep 2022 14:04:18 +0200 Subject: [PATCH 138/606] simplify some uses of minato_isop Typically intead of doing minato_isop isop(rel & letter); while (bdd cube = isop.next()) { bdd res = bdd_exists(cube, ap) ... } do minato_isop isop(bdd_relprod(rel, letter, ap); while (bdd res = isop.next()) { ... } this way the existential quantification is done once at the same time of the conjunction, and isop has fewer variable to work with. * spot/twaalgos/alternation.cc, spot/twaalgos/dualize.cc, spot/twaalgos/simulation.cc, spot/twaalgos/toweak.cc: Here. --- spot/twaalgos/alternation.cc | 41 ++++++++++++++++++------------------ spot/twaalgos/dualize.cc | 15 ++++++------- spot/twaalgos/simulation.cc | 13 ++++-------- spot/twaalgos/toweak.cc | 15 ++++++------- 4 files changed, 36 insertions(+), 48 deletions(-) diff --git a/spot/twaalgos/alternation.cc b/spot/twaalgos/alternation.cc index a3762f9b0..8370f395b 100644 --- a/spot/twaalgos/alternation.cc +++ b/spot/twaalgos/alternation.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016-2019, 2021 Laboratoire de Recherche et +// Copyright (C) 2016-2019, 2021, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -457,12 +457,10 @@ namespace spot // First loop over all possible valuations atomic properties. for (bdd oneletter: minterms_of(all_letters, ap)) { - minato_isop isop(bs & oneletter); - bdd cube; - while ((cube = isop.next()) != bddfalse) + minato_isop isop(bdd_relprod(bs, oneletter, ap)); + bdd dest; + while ((dest = isop.next()) != bddfalse) { - bdd cond = bdd_exist(cube, all_vars_); - bdd dest = bdd_existcomp(cube, all_vars_); v.clear(); acc_cond::mark_t m = bdd_to_state(dest, v); @@ -491,7 +489,7 @@ namespace spot unsigned d = new_state(v, has_mark); if (has_mark) m.set(0); - res->new_edge(s, d, cond, all_marks - m); + res->new_edge(s, d, oneletter, all_marks - m); } } } @@ -576,7 +574,8 @@ namespace spot bdd all_states_; bdd ap_; bdd all_letters_; - bdd transition_; + bdd dest_; + bdd cond_; minato_isop isop_; const std::map& var_to_state_; univ_remover_state* dst_; @@ -587,8 +586,8 @@ namespace spot const std::vector& state_to_var, const std::map& var_to_state, bdd all_states) - : transitions_(bddtrue), all_states_(all_states), transition_(bddfalse), - isop_(bddfalse), var_to_state_(var_to_state) + : transitions_(bddtrue), all_states_(all_states), dest_(bddfalse), + cond_(bddfalse), isop_(bddfalse), var_to_state_(var_to_state) { // Build the bdd transitions_, from which we extract the successors. for (unsigned s : state->states()) @@ -627,20 +626,20 @@ namespace spot void one_transition() { - transition_ = isop_.next(); - if (transition_ != bddfalse || all_letters_ != bddfalse) + dest_ = isop_.next(); + if (dest_ != bddfalse || all_letters_ != bddfalse) { // If it was the last transition, try the next letter. - if (transition_ == bddfalse) + if (dest_ == bddfalse) { bdd oneletter = bdd_satoneset(all_letters_, ap_, bddfalse); + cond_ = oneletter; all_letters_ -= oneletter; // Get a sum of possible transitions matching this letter. - isop_ = minato_isop(oneletter & transitions_); - transition_ = isop_.next(); + isop_ = minato_isop(bdd_relprod(transitions_, oneletter, ap_)); + dest_ = isop_.next(); } - bdd dest_bdd = bdd_exist(transition_, ap_); - std::set dest = bdd_to_state(dest_bdd); + std::set dest = bdd_to_state(dest_); dst_ = new univ_remover_state(dest); } } @@ -648,18 +647,18 @@ namespace spot virtual bool first() override { one_transition(); - return transition_ != bddfalse; + return dest_ != bddfalse; } virtual bool next() override { one_transition(); - return transition_ != bddfalse; + return dest_ != bddfalse; } virtual bool done() const override { - return transition_ == bddfalse && all_letters_ == bddfalse; + return dest_ == bddfalse && all_letters_ == bddfalse; } virtual const state* dst() const override @@ -669,7 +668,7 @@ namespace spot virtual bdd cond() const override { - return bdd_exist(transition_, all_states_); + return cond_; } virtual acc_cond::mark_t acc() const override diff --git a/spot/twaalgos/dualize.cc b/spot/twaalgos/dualize.cc index e42822740..91498ce8d 100644 --- a/spot/twaalgos/dualize.cc +++ b/spot/twaalgos/dualize.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019, 2021 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2017-2019, 2021-2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -155,14 +155,11 @@ namespace spot for (bdd oneletter: minterms_of(letters, ap)) { - minato_isop isop(delta & oneletter); - bdd cube; + minato_isop isop(bdd_relprod(delta, oneletter, ap)); + bdd dest; - while ((cube = isop.next()) != bddfalse) + while ((dest = isop.next()) != bddfalse) { - bdd cond = bdd_exist(cube, all_vars_); - bdd dest = bdd_existcomp(cube, all_vars_); - st.clear(); acc_cond::mark_t m = bdd_to_state(dest, st); if (st.empty()) @@ -171,7 +168,7 @@ namespace spot if (aut_->prop_state_acc()) m = aut_->state_acc_sets(i); } - res->new_univ_edge(i, st.begin(), st.end(), cond, m); + res->new_univ_edge(i, st.begin(), st.end(), oneletter, m); } } } diff --git a/spot/twaalgos/simulation.cc b/spot/twaalgos/simulation.cc index e62762489..58ebfd79d 100644 --- a/spot/twaalgos/simulation.cc +++ b/spot/twaalgos/simulation.cc @@ -590,7 +590,7 @@ namespace spot // C1 then (!C1)C2, instead of C1 then C2. // With minatop_isop, we ensure that the no negative // class variable will be seen (likewise for promises). - minato_isop isop(sig & one); + minato_isop isop(bdd_relprod(sig, one, sup_all_atomic_prop)); ++nb_minterms; @@ -603,17 +603,12 @@ namespace spot // Take the edge, and keep only the variable which // are used to represent the class. - bdd dst = bdd_existcomp(cond_acc_dest, - all_class_var_); + bdd dst = bdd_existcomp(cond_acc_dest, all_class_var_); // Keep only ones who are acceptance condition. auto acc = bdd_to_mark(bdd_existcomp(cond_acc_dest, all_proms_)); - // Keep the other! - bdd cond = bdd_existcomp(cond_acc_dest, - sup_all_atomic_prop); - // Because we have complemented all the Inf // acceptance conditions on the input automaton, // we must revert them to create a new edge. @@ -630,11 +625,11 @@ namespace spot accst[srcst] = acc; acc = {}; } - gb->new_edge(dst.id(), src.id(), cond, acc); + gb->new_edge(dst.id(), src.id(), one, acc); } else { - gb->new_edge(src.id(), dst.id(), cond, acc); + gb->new_edge(src.id(), dst.id(), one, acc); } } } diff --git a/spot/twaalgos/toweak.cc b/spot/twaalgos/toweak.cc index 8f62477a4..543c7c9a1 100644 --- a/spot/twaalgos/toweak.cc +++ b/spot/twaalgos/toweak.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2018, 2021 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2017, 2018, 2021, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -179,14 +179,11 @@ namespace spot for (bdd oneletter: minterms_of(letters, ap)) { - minato_isop isop(delta & oneletter); - bdd cube; + minato_isop isop(bdd_relprod(delta, oneletter, ap)); + bdd dest; - while ((cube = isop.next()) != bddfalse) + while ((dest = isop.next()) != bddfalse) { - bdd cond = bdd_exist(cube, all_states_); - bdd dest = bdd_existcomp(cube, all_states_); - states.clear(); while (dest != bddtrue) { @@ -199,7 +196,7 @@ namespace spot } res_->new_univ_edge(new_state(st.id, st.rank, st.mark), states.begin(), states.end(), - cond, mark); + oneletter, mark); } } todo_.pop(); From 3efab05cf22214b74c8235a1d7dd8c1701c532e5 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 21 Sep 2022 15:40:12 +0200 Subject: [PATCH 139/606] introduce delay_branching_here This is motivated by an example sent by Edmond Irani Liu, that will be tested in next patch. * spot/twaalgos/dbranch.cc, spot/twaalgos/dbranch.hh: New files. * python/spot/impl.i, spot/twaalgos/Makefile.am: Add them. * spot/twaalgos/translate.cc: Call delay_branching_here unconditionally. * spot/twa/twagraph.cc (defrag_states): Do not assume that games are alternating. * tests/core/genltl.test: Adjust expected numbers. * tests/python/dbranch.py: New file. * tests/Makefile.am: Add it. --- NEWS | 9 ++ python/spot/impl.i | 16 ++-- spot/twa/twagraph.cc | 12 --- spot/twaalgos/Makefile.am | 4 +- spot/twaalgos/dbranch.cc | 163 +++++++++++++++++++++++++++++++++++++ spot/twaalgos/dbranch.hh | 36 ++++++++ spot/twaalgos/translate.cc | 8 +- tests/Makefile.am | 1 + tests/core/genltl.test | 6 +- tests/python/dbranch.py | 147 +++++++++++++++++++++++++++++++++ 10 files changed, 378 insertions(+), 24 deletions(-) create mode 100644 spot/twaalgos/dbranch.cc create mode 100644 spot/twaalgos/dbranch.hh create mode 100644 tests/python/dbranch.py diff --git a/NEWS b/NEWS index 535dee5fa..f6ec9ad15 100644 --- a/NEWS +++ b/NEWS @@ -142,6 +142,15 @@ New in spot 2.10.6.dev (not yet released) succesors, should be called before running simulation-based reductions. + - A new function delay_branching_here(aut) can be used to simplify + some non-deterministic branching. If two transitions (q₁,ℓ,M,q₂) + and (q₁,ℓ,M,q₃) differ only by their destination state, and are + the only incoming transitions of their destination states, then q₂ + and q₃ can be merged (taking the union of their outgoing + transitions). This is cheap function is automatically called by + spot::translate() after translation of a formula to GBA, before + further simplification. + - spot::parallel_policy is an object that can be passed to some algorithm to specify how many threads can be used if Spot has been compiled with --enable-pthread. Currently, only diff --git a/python/spot/impl.i b/python/spot/impl.i index 23c07c4e8..88bdcf5c4 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -114,13 +114,14 @@ #include #include #include -#include -#include -#include #include #include #include #include +#include +#include +#include +#include #include #include #include @@ -678,11 +679,14 @@ def state_is_accepting(self, src) -> "bool": %include %include %include -%include -%include %include %include %include +%include +%include +%include +%include +%include %feature("flatnested") spot::twa_run::step; %include %template(list_step) std::list; @@ -694,8 +698,6 @@ def state_is_accepting(self, src) -> "bool": %include %include %include -%include -%include %include %include %include diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 4d0009e93..2a72702f3 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -1323,18 +1323,6 @@ namespace spot } init_number_ = newst[init_number_]; g_.defrag_states(newst, used_states); - // Make sure we did not mess up the structure - assert([&]() - { - if (auto sp = get_named_prop>("state-player")) - { - for (const auto& e : edges()) - if (sp->at(e.src) == sp->at(e.dst)) - return false; - return true; - } - return true; - }() && "Game not alternating!"); } void twa_graph::remove_unused_ap() diff --git a/spot/twaalgos/Makefile.am b/spot/twaalgos/Makefile.am index ff71982b5..57ae8ce9f 100644 --- a/spot/twaalgos/Makefile.am +++ b/spot/twaalgos/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2008-2018, 2020-2021 Laboratoire de Recherche et +## Copyright (C) 2008-2018, 2020-2022 Laboratoire de Recherche et ## Développement de l'Epita (LRDE). ## Copyright (C) 2003-2005 Laboratoire d'Informatique de Paris 6 ## (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -42,6 +42,7 @@ twaalgos_HEADERS = \ contains.hh \ copy.hh \ cycles.hh \ + dbranch.hh \ degen.hh \ determinize.hh \ dot.hh \ @@ -115,6 +116,7 @@ libtwaalgos_la_SOURCES = \ compsusp.cc \ contains.cc \ cycles.cc \ + dbranch.cc \ degen.cc \ determinize.cc \ dot.cc \ diff --git a/spot/twaalgos/dbranch.cc b/spot/twaalgos/dbranch.cc new file mode 100644 index 000000000..465f8326e --- /dev/null +++ b/spot/twaalgos/dbranch.cc @@ -0,0 +1,163 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2022 Laboratoire de Recherche et Développement +// de l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" + +#include +#include +#include +#include +#include +#include + +namespace spot +{ + namespace + { + typedef std::pair bdd_color; + + struct bdd_color_hash + { + size_t + operator()(const bdd_color& bc) const noexcept + { + return bc.first.id() ^ bc.second.hash(); + } + }; + + template + bool delay_branching_aux(const twa_graph_ptr& aut, std::vector* owner) + { + unsigned ns = aut->num_states(); + // number of predecessors of each state + std::vector pred_count(ns, 0); + unsigned init = aut->get_init_state_number(); + pred_count[init] = 2; // pretend the initial state has too many + // predecessors, so it does not get fused. + // for each state, number of successors that have a single predecessors + std::vector succ_cand(ns, 0); + for (auto& e: aut->edges()) + for (unsigned d: aut->univ_dests(e)) + { + // Note that e.dst might be a destination group in + // alternating automata. + unsigned pc = ++pred_count[d]; + succ_cand[e.src] += (pc == 1) - (pc == 2); + } + bool changed = false; + typedef robin_hood::unordered_map hashmap_t; + hashmap_t first_dest[1 + is_game]; + auto& g = aut->get_graph(); + + // setup a DFS + std::vector seen(ns); + std::stack todo; + auto push_state = [&](unsigned state) + { + todo.push(state); + seen[state] = true; + }; + push_state(init); + + while (!todo.empty()) + { + unsigned src = todo.top(); + todo.pop(); + if (succ_cand[src] < 2) // nothing to merge + { + for (auto& e: aut->out(src)) + for (unsigned d: aut->univ_dests(e)) + if (!seen[d]) + push_state(d); + continue; + } + first_dest[0].clear(); + if constexpr (is_game) + first_dest[1].clear(); + auto it = g.out_iteraser(src); + while (it) + { + unsigned canddst = it->dst; + for (unsigned d: aut->univ_dests(canddst)) + if (!seen[d]) + push_state(d); + if (aut->is_univ_dest(canddst) || pred_count[canddst] != 1) + { + ++it; + continue; + } + if (it->cond == bddfalse) + { + it.erase(); + continue; + } + unsigned mapidx = is_game ? (*owner)[canddst] : 0; + auto [it2, inserted] = + first_dest[mapidx].emplace(bdd_color{it->cond, it->acc}, + canddst); + if (inserted) + { + ++it; + continue; + } + unsigned mergedst = it2->second; + // we have to merge canddst into mergedst. This is as + // simple as: + // 1) connecting their list of transitions + unsigned& mergedfirst = g.state_storage(mergedst).succ; + unsigned& mergedlast = g.state_storage(mergedst).succ_tail; + unsigned& candfirst = g.state_storage(canddst).succ; + unsigned& candlast = g.state_storage(canddst).succ_tail; + if (mergedlast) + aut->edge_storage(mergedlast).next_succ = candfirst; + else // mergedst had now successor + mergedfirst = candfirst; + mergedlast = candlast; + // 2) updating the source of the merged transitions + for (unsigned e2 = candfirst; e2 != 0;) + { + auto& edge = aut->edge_storage(e2); + edge.src = mergedst; + e2 = edge.next_succ; + } + // 3) deleting the edge to canddst. + candfirst = candlast = 0; + it.erase(); + // 4) updating succ_cand + succ_cand[mergedst] += succ_cand[canddst]; + succ_cand[canddst] = 0; + changed = true; + } + } + return changed; + } + } + + bool delay_branching_here(const twa_graph_ptr& aut) + { + if (aut->prop_universal()) + return false; + auto owner = aut->get_named_prop>("state-player"); + if (SPOT_UNLIKELY(owner)) + return delay_branching_aux(aut, owner); + else + return delay_branching_aux(aut, nullptr); + } +} diff --git a/spot/twaalgos/dbranch.hh b/spot/twaalgos/dbranch.hh new file mode 100644 index 000000000..9cd0efa5e --- /dev/null +++ b/spot/twaalgos/dbranch.hh @@ -0,0 +1,36 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2022 Laboratoire de Recherche et Développement +// de l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include + +namespace spot +{ + /// \ingroup twa_algorithms + /// \brief Merge states to delay + /// + /// If a state (x) has two outgoing transitions (x,l,m,y) and + /// (x,l,m,z) going to states (x) and (y) that have no other + /// incoming edges, then (y) and (z) can be merged (keeping the + /// union of their outgoing destinations). + /// + /// \return true iff the automaton was modified. + SPOT_API bool delay_branching_here(const twa_graph_ptr& aut); +} diff --git a/spot/twaalgos/translate.cc b/spot/twaalgos/translate.cc index 4db8643f9..a5a84a10b 100644 --- a/spot/twaalgos/translate.cc +++ b/spot/twaalgos/translate.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018, 2020-2021 Laboratoire de Recherche et +// Copyright (C) 2013-2018, 2020-2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -29,6 +29,7 @@ #include #include #include +#include namespace spot { @@ -401,6 +402,11 @@ namespace spot aut = ltl_to_tgba_fm(r, simpl_->get_dict(), exprop, true, false, false, nullptr, nullptr, unambiguous); + if (delay_branching_here(aut)) + { + aut->purge_unreachable_states(); + aut->merge_edges(); + } } aut = this->postprocessor::run(aut, r); diff --git a/tests/Makefile.am b/tests/Makefile.am index 91d3f10ea..9570f7dcd 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -405,6 +405,7 @@ TESTS_python = \ python/bddnqueen.py \ python/bugdet.py \ python/complement_semidet.py \ + python/dbranch.py \ python/declenv.py \ python/decompose_scc.py \ python/det.py \ diff --git a/tests/core/genltl.test b/tests/core/genltl.test index d5efb0236..d943c4cae 100755 --- a/tests/core/genltl.test +++ b/tests/core/genltl.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016-2021 Laboratoire de Recherche et Développement +# Copyright (C) 2016-2022 Laboratoire de Recherche et Développement # de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -134,8 +134,8 @@ genltl --kr-n2=1..2 --kr-nlogn=1..2 --kr-n=1..2 --gxf-and=0..3 --fxg-or=0..3 \ --pps-arbiter-standard=2..3 --pps-arbiter-strict=2..3 --format=%F=%L,%f | ltl2tgba --low --det -F-/2 --stats='%<,%s' > out cat >exp<. + +# Test that the spot.gen package works, in particular, we want +# to make sure that the objects created from spot.gen methods +# are usable with methods from the spot package. + + +import spot +from unittest import TestCase +tc = TestCase() + +aut5 = spot.automaton("""HOA: v1 States: 28 Start: 0 AP: 4 "alive" "b" +"a" "c" acc-name: Buchi Acceptance: 1 Inf(0) properties: trans-labels +explicit-labels state-acc very-weak --BODY-- State: 0 [0] 1 [0] 2 [0] +3 [0] 4 [0] 5 [0&!1] 6 [0] 7 State: 1 [0] 8 State: 2 [!0] 9 [0] 10 +State: 3 [!0] 9 [0] 11 State: 4 [!0] 9 [0] 12 State: 5 [!0] 9 [0] 13 +State: 6 [!0] 9 [0&!1] 14 State: 7 [!0] 9 [0&!1&!2] 14 State: 8 [0] 15 +State: 9 {0} [!0] 9 State: 10 [!0] 9 [0] 16 State: 11 [!0] 9 [0] 17 +State: 12 [!0] 9 [0] 18 State: 13 [!0] 9 [0&!1&!2] 19 State: 14 [!0] 9 +[0&!1] 19 State: 15 [0] 20 State: 16 [!0] 9 [0] 21 State: 17 [!0] 9 +[0] 22 State: 18 [!0] 9 [0&!1&!2] 23 State: 19 [!0] 9 [0&!1] 23 State: +20 [0] 24 State: 21 [!0] 9 [0] 25 State: 22 [!0] 9 [0&!1&!2] 26 State: +23 [!0] 9 [0&!1] 26 State: 24 [0&3] 27 State: 25 [!0] 9 [0&!1&!2] 27 +State: 26 [!0] 9 [0&!1] 27 State: 27 [!0] 9 [0] 27 --END--""") + +copy = spot.make_twa_graph(aut5, spot.twa_prop_set.all()) + +tc.assertFalse(spot.is_deterministic(aut5)) +if spot.delay_branching_here(aut5): + aut5.purge_unreachable_states() + aut5.merge_edges() +tc.assertEqual(aut5.num_states(), 13) +tc.assertEqual(aut5.num_edges(), 29) +tc.assertTrue(spot.are_equivalent(copy, aut5)) + +a = spot.automaton("""HOA: v1 States: 8 Start: 0 AP: 3 "a" "b" "c" +Acceptance: 0 t --BODY-- State: 0 [0] 1 [0] 2 [0] 3 State: 1 [!1] 4&5 +[1] 5&6 State: 2 [0] 4&6 State: 3 [0] 3&6 State: 4 [!0] 7 State: 5 +[!0] 7 State: 6 [!0] 6 State: 7 [0] 7 --END--""") + +copy = spot.make_twa_graph(a, spot.twa_prop_set.all()) +if spot.delay_branching_here(a): + a.purge_unreachable_states() + a.merge_edges() +tc.assertEqual(a.to_str(), """HOA: v1 +States: 7 +Start: 0 +AP: 3 "b" "a" "c" +acc-name: all +Acceptance: 0 t +properties: trans-labels explicit-labels state-acc univ-branch +--BODY-- +State: 0 +[1] 1 +[1] 2 +State: 1 +[1] 3&5 +[0] 4&5 +[!0] 3&4 +State: 2 +[1] 2&5 +State: 3 +[!1] 6 +State: 4 +[!1] 6 +State: 5 +[!1] 5 +State: 6 +[1] 6 +--END--""") + +a = spot.automaton("""HOA: v1 +States: 9 +Start: 0 AP: 2 "a" "b" +spot.state-player: 0 1 1 0 0 0 0 1 1 +Acceptance: 0 t +--BODY-- +State: 0 +[0] 1 +[0] 2 +[0] 3 +[0] 4 +State: 1 +[1] 5 +State: 2 +[!1] 6 +State: 3 +[1] 7 +State: 4 +[!1] 8 +State: 5 +[t] 5 +State: 6 +[t] 6 +State: 7 +[t] 7 +State: 8 +[t] 8 +--END--""") +copy = spot.make_twa_graph(a, spot.twa_prop_set.all()) +if spot.delay_branching_here(a): + a.purge_unreachable_states() +tc.assertTrue(spot.are_equivalent(a, copy)) +tc.assertEqual(a.to_str(), """HOA: v1 +States: 7 +Start: 0 +AP: 2 "b" "a" +acc-name: all +Acceptance: 0 t +properties: trans-labels explicit-labels state-acc very-weak +spot-state-player: 0 1 0 0 0 1 1 +--BODY-- +State: 0 +[1] 1 +[1] 2 +State: 1 +[0] 3 +[!0] 4 +State: 2 +[0] 5 +[!0] 6 +State: 3 +[t] 3 +State: 4 +[t] 4 +State: 5 +[t] 5 +State: 6 +[t] 6 +--END--""") From 7ed62f7eedd89133cdd052b0b11c912bdd69b823 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 21 Sep 2022 16:43:28 +0200 Subject: [PATCH 140/606] genltl: introduce --eil-gsi Based on a mail from Edmond Irani Liu. The test case also serves for the previous patch. * bin/genltl.cc, spot/gen/formulas.cc, spot/gen/formulas.hh: Add it. * NEWS: Mention it. * tests/core/genltl.test: Test it. --- NEWS | 10 +++++++++- bin/genltl.cc | 4 +++- spot/gen/formulas.cc | 20 ++++++++++++++++++++ spot/gen/formulas.hh | 6 ++++-- tests/core/genltl.test | 40 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 76 insertions(+), 4 deletions(-) diff --git a/NEWS b/NEWS index f6ec9ad15..2fbe8d2bf 100644 --- a/NEWS +++ b/NEWS @@ -51,6 +51,13 @@ New in spot 2.10.6.dev (not yet released) include each other, and are used to show a regression (in speed) present in Spot 2.10.x and fixed in 2.11. + - genltl learned --eil-gsi to generate a familly a function whose + translation and simplification used to be very slow. In particular + + genltl --eil-gsi=23 | ltlfilt --from-ltlf | ltl2tgba + + was reported as taking 9 days. This is now instantaneous. + Library: - The new function suffix_operator_normal_form() implements @@ -149,7 +156,8 @@ New in spot 2.10.6.dev (not yet released) and q₃ can be merged (taking the union of their outgoing transitions). This is cheap function is automatically called by spot::translate() after translation of a formula to GBA, before - further simplification. + further simplification. This was introduced to help with automata + produced from formulas output by "genltl --eil-gsi" (see above). - spot::parallel_policy is an object that can be passed to some algorithm to specify how many threads can be used if Spot has been diff --git a/bin/genltl.cc b/bin/genltl.cc index 6c632de7a..6393024c2 100644 --- a/bin/genltl.cc +++ b/bin/genltl.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2015-2019 Laboratoire de Recherche et +// Copyright (C) 2012, 2013, 2015-2019, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -84,6 +84,8 @@ static const argp_option options[] = { "eh-patterns", gen::LTL_EH_PATTERNS, "RANGE", OPTION_ARG_OPTIONAL, "Etessami and Holzmann [Concur'00] patterns " "(range should be included in 1..12)", 0 }, + { "eil-gsi", gen::LTL_EIL_GSI, "RANGE", 0, + "G[0..n]((a S b) -> c) rewritten using future operators", 0 }, { "fxg-or", gen::LTL_FXG_OR, "RANGE", 0, "F(p0 | XG(p1 | XG(p2 | ... XG(pn))))", 0}, { "gf-equiv", gen::LTL_GF_EQUIV, "RANGE", 0, diff --git a/spot/gen/formulas.cc b/spot/gen/formulas.cc index 3f63b07e7..10841e820 100644 --- a/spot/gen/formulas.cc +++ b/spot/gen/formulas.cc @@ -1281,6 +1281,21 @@ namespace spot } } + // G[0..n]((a S b) -> c) rewritten using future operators, + // from Edmond Irani Liu (EIL). GSI stands for "Globally Since Implies." + static formula eil_gsi(int n, std::string a, std::string b, std::string c) + { + formula fa = formula::ap(a); + formula fb = formula::ap(b); + formula res = fb; + for (int i = 1; i <= n; ++i) + { + formula tmp = formula::And({formula::strong_X(i, fa), res}); + res = formula::Or({formula::strong_X(i, fb), tmp}); + } + return formula::Implies(res, formula::strong_X(n, formula::ap(c))); + } + formula ltl_pattern(ltl_pattern_id pattern, int n, int m) { if (n < 0) @@ -1317,6 +1332,8 @@ namespace spot return dac_pattern(n); case LTL_EH_PATTERNS: return eh_pattern(n); + case LTL_EIL_GSI: + return eil_gsi(n, "a", "b", "c"); case LTL_FXG_OR: return FXG_or_n("p", n); case LTL_GF_EQUIV: @@ -1418,6 +1435,7 @@ namespace spot "ccj-beta-prime", "dac-patterns", "eh-patterns", + "eil-gsi", "fxg-or", "gf-equiv", "gf-equiv-xn", @@ -1485,6 +1503,7 @@ namespace spot return 55; case LTL_EH_PATTERNS: return 12; + case LTL_EIL_GSI: case LTL_FXG_OR: case LTL_GF_EQUIV: case LTL_GF_EQUIV_XN: @@ -1554,6 +1573,7 @@ namespace spot case LTL_CCJ_BETA_PRIME: case LTL_DAC_PATTERNS: case LTL_EH_PATTERNS: + case LTL_EIL_GSI: case LTL_FXG_OR: case LTL_GF_EQUIV: case LTL_GF_EQUIV_XN: diff --git a/spot/gen/formulas.hh b/spot/gen/formulas.hh index ac5974e48..ef5a0d850 100644 --- a/spot/gen/formulas.hh +++ b/spot/gen/formulas.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2018, 2019 Laboratoire de Recherche et Developpement de -// l'EPITA (LRDE). +// Copyright (C) 2017-2019, 2022 Laboratoire de Recherche et +// Developpement de l'EPITA (LRDE). // // This file is part of Spot, a model checking library. // @@ -56,6 +56,8 @@ namespace spot /// 12 formulas from Etessami and Holzmann. /// \cite etessami.00.concur LTL_EH_PATTERNS, + /// Familly sent by Edmond Irani Liu + LTL_EIL_GSI, /// `F(p0 | XG(p1 | XG(p2 | ... XG(pn))))` LTL_FXG_OR, /// `(GFa1 & GFa2 & ... & GFan) <-> GFz` diff --git a/tests/core/genltl.test b/tests/core/genltl.test index d943c4cae..622950f65 100755 --- a/tests/core/genltl.test +++ b/tests/core/genltl.test @@ -269,3 +269,43 @@ diff range1.ltl range2.ltl genltl --sb-patterns=1..20 | ltlfilt -v --nth 10..20 > range1.ltl genltl --sb-patterns=1..9 > range2.ltl diff range1.ltl range2.ltl + + +# Edmond Irani Liu sent a bug report where formula 23 in this series +# took 9 days to produce, despite the small size of the resulting +# automaton. I (ADL) later found this to be caused by simulation +# applied on a non-deterministic automaton with many non-deterministic +# choices going to state that simulate one another, which in turn lead +# to massive slowdown of the minato_isop algorithm. As a workaround, +# I introduced delay_branching_here(), a cheap function that is called +# before simplification. In this case, this is enough to determinize +# the automaton, simplifying simulation-based reduction greatly. +genltl --eil-gsi=1..25 | ltlfilt --from-ltlf | ltl2tgba --stats=%s,%e >output +cat >expected < Date: Thu, 22 Sep 2022 15:44:18 +0200 Subject: [PATCH 141/606] translate: add a branch-post option * spot/twaalgos/translate.cc, spot/twaalgos/translate.hh: Here. * NEWS, bin/spot-x.cc: Mention it. * tests/core/genltl.test: Test it. --- NEWS | 5 +++++ bin/spot-x.cc | 5 +++++ spot/twaalgos/translate.cc | 13 +++++++++++-- spot/twaalgos/translate.hh | 3 ++- tests/core/genltl.test | 7 ++++++- 5 files changed, 29 insertions(+), 4 deletions(-) diff --git a/NEWS b/NEWS index 2fbe8d2bf..4268ab81a 100644 --- a/NEWS +++ b/NEWS @@ -159,6 +159,11 @@ New in spot 2.10.6.dev (not yet released) further simplification. This was introduced to help with automata produced from formulas output by "genltl --eil-gsi" (see above). + - spot::postproc has new configuration variable branch-post that + can be used to control the use of branching-postponement (diabled + by default) or delayed-branching (see above, enabled by default). + See the spot-x(7) man page for details. + - spot::parallel_policy is an object that can be passed to some algorithm to specify how many threads can be used if Spot has been compiled with --enable-pthread. Currently, only diff --git a/bin/spot-x.cc b/bin/spot-x.cc index a653fc926..908cbb98a 100644 --- a/bin/spot-x.cc +++ b/bin/spot-x.cc @@ -50,6 +50,11 @@ implication checks for formula simplifications. Defaults to 64.") }, { nullptr, 0, nullptr, 0, "Translation options:", 0 }, { DOC("ltl-split", "Set to 0 to disable the translation of automata \ as product or sum of subformulas.") }, + { DOC("branch-prop", "Set to 0 to disable branching-postponement \ +(done during translation, may create more states) and delayed-branching \ +(almost similar, but done after translation to only remove states). \ +Set to 1 to force branching-postponement, and to 2 \ +to force delayed-branching. By default delayed-branching is used.") }, { DOC("comp-susp", "Set to 1 to enable compositional suspension, \ as described in our SPIN'13 paper (see Bibliography below). Set to 2, \ to build only the skeleton TGBA without composing it. Set to 0 (the \ diff --git a/spot/twaalgos/translate.cc b/spot/twaalgos/translate.cc index a5a84a10b..cd1e2aa63 100644 --- a/spot/twaalgos/translate.cc +++ b/spot/twaalgos/translate.cc @@ -65,6 +65,7 @@ namespace spot int tls_max_states = opt->get("tls-max-states", 64); tls_max_states_ = std::max(0, tls_max_states); exprop_ = opt->get("exprop", -1); + branchpost_ = opt->get("branch-post", -1); } void translator::build_simplifier(const bdd_dict_ptr& dict) @@ -399,10 +400,18 @@ namespace spot bool exprop = unambiguous || (level_ == postprocessor::High && exprop_ != 0) || exprop_ > 0; + // branch-post: 1 == force branching postponement + // 0 == disable branching post. and delay_branching + // 2 == force delay_branching + // -1 == auto (delay_branching) + // Some quick experiments suggests that branching postponement + // can produce larger automata on non-obligations formulas, and + // that even on obligation formulas, delay_branching is faster. + bool bpost = branchpost_ == 1; aut = ltl_to_tgba_fm(r, simpl_->get_dict(), exprop, - true, false, false, nullptr, nullptr, + true, bpost, false, nullptr, nullptr, unambiguous); - if (delay_branching_here(aut)) + if (!bpost && branchpost_ != 0 && delay_branching_here(aut)) { aut->purge_unreachable_states(); aut->merge_edges(); diff --git a/spot/twaalgos/translate.hh b/spot/twaalgos/translate.hh index 9dc6b12d2..d17c917b2 100644 --- a/spot/twaalgos/translate.hh +++ b/spot/twaalgos/translate.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018, 2020 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2018, 2020, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -154,6 +154,7 @@ namespace spot bool gf_guarantee_ = true; bool gf_guarantee_set_ = false; bool ltl_split_; + int branchpost_ = -1; unsigned tls_max_states_ = 0; int exprop_; const option_map* opt_; diff --git a/tests/core/genltl.test b/tests/core/genltl.test index 622950f65..71b1ddf77 100755 --- a/tests/core/genltl.test +++ b/tests/core/genltl.test @@ -280,7 +280,8 @@ diff range1.ltl range2.ltl # I introduced delay_branching_here(), a cheap function that is called # before simplification. In this case, this is enough to determinize # the automaton, simplifying simulation-based reduction greatly. -genltl --eil-gsi=1..25 | ltlfilt --from-ltlf | ltl2tgba --stats=%s,%e >output +genltl --eil-gsi=1..25 | ltlfilt --from-ltlf > formulas.ltl +ltl2tgba -F formulas.ltl --stats=%s,%e >output cat >expected <expected <output +diff expected output From 383128d9835cc672b2638ceec9dcd6e65fbd2a7d Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 22 Sep 2022 16:19:07 +0200 Subject: [PATCH 142/606] * doc/tl/tl.tex: Fix a couple of typos detected by ispell. --- doc/tl/tl.tex | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/doc/tl/tl.tex b/doc/tl/tl.tex index 2c0599f82..b6268d9cd 100644 --- a/doc/tl/tl.tex +++ b/doc/tl/tl.tex @@ -395,7 +395,7 @@ following Boolean operators: (allowing better compatibility with Wring and VIS) may only used in temporal formulas. Boolean expressions that occur inside SERE (see Section~\ref{sec:sere}) may not use this form because the $\STARALT$ - symbol is used as the Kleen star.} + symbol is used as the Kleene star.} Additionally, an atomic proposition $a$ can be negated using the syntax \samp{$a$=0}, which is equivalent to \samp{$\NOT a$}. Also @@ -600,7 +600,7 @@ the source. It can mean either ``\textit{Sequential Extended Regular ``\textit{Semi-Extended Regular Expression}''~\citep{eisner.08.hvc}. In any case, the intent is the same: regular expressions with traditional operations (union `$\OR$', concatenation `$\CONCAT$', -Kleen star `$\STAR{}$') are extended with operators such as +Kleene star `$\STAR{}$') are extended with operators such as intersection `$\ANDALT$', and fusion `$\FUSION$'. Any Boolean formula (section~\ref{def:boolform}) is a SERE. SERE can @@ -638,7 +638,7 @@ denote arbitrary SERE. \end{tabular} \end{center} -\footnotetext{\emph{Non-Length-Matching} interesction.} +\footnotetext{\emph{Non-Length-Matching} intersection.} The character \samp{\$} or the string \samp{inf} can also be used as value for $\mvar{j}$ in the above operators to denote an unbounded @@ -1069,7 +1069,7 @@ psl2ba, Modella, and NuSMV all have $\U$ and $\R$ as left-associative, while Goal (hence Büchi store), LTL2AUT, and LTL2Büchi (from JavaPathFinder) have $\U$ and $\R$ as right-associative. Vis and LBTT have these two operators as non-associative (parentheses required). -Similarly the tools do not aggree on the associativity of $\IMPLIES$ +Similarly the tools do not agree on the associativity of $\IMPLIES$ and $\EQUIV$: some tools handle both operators as left-associative, or both right-associative, other have only $\IMPLIES$ as right-associative. @@ -1429,7 +1429,7 @@ $\NOT$ operator. \end{align*} Note that the above rules include the ``unabbreviation'' of operators -``$\EQUIV$'', ``$\IMPLIES$'', and ``$\XOR$'', correspondings to the +``$\EQUIV$'', ``$\IMPLIES$'', and ``$\XOR$'', corresponding to the rules \texttt{"ei\^"} of function `\verb=unabbreviate()= as described in Section~\ref{sec:unabbrev}. Therefore it is never necessary to apply these abbreviations before or after @@ -2097,3 +2097,14 @@ $f_1\AND f_2$ & \bor{f_1}{g}{f_2}{g} & & & %%% TeX-master: t %%% coding: utf-8 %%% End: + +% LocalWords: tabu Alexandre Duret Lutz toc subsequence Kripke unary +% LocalWords: LTL GFa INISHED ZX FX cccccrl UTF syntaxes disjunction +% LocalWords: VIS Kleene overline overbar ary cccrl EF sep FB LTLf +% LocalWords: rewritings TSLF NLM iter un SVA PSL SEREs DFA ccccc ba +% LocalWords: SystemVerilog clc ltl psl Modella NuSMV Büchi AUT Vis +% LocalWords: JavaPathFinder LBTT AST subtrees boolean nenoform lbt +% LocalWords: eword nn LBT's automata subformulas ottom unabbreviate +% LocalWords: Unabbreviations ei GRW RW WR unabbreviator simplifier +% LocalWords: tl unabbreviation indeterminism dnf cnf SNF rl iff BDD +% LocalWords: subformula From 51caa5588e986ed860df1527be1a8a2c8b6eec8a Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 22 Sep 2022 17:48:56 +0200 Subject: [PATCH 143/606] update gitlab references As LRDE is being renamed LRE, gitlab is one of the first URL to migrate. The old URL is still supported, but we want to only use the new one eventually. * .dir-locals.el, .gitlab-ci.yml, HACKING, NEWS, doc/org/concepts.org, doc/org/install.org, doc/org/setup.org, elisp/Makefile.am, elisp/hoa-mode.el, tests/ltsmin/README: Update to the new gitlab URL. --- .dir-locals.el | 2 +- .gitlab-ci.yml | 46 ++++++++++++++++++++++---------------------- HACKING | 4 ++-- NEWS | 2 +- doc/org/concepts.org | 6 +++--- doc/org/install.org | 6 +++--- doc/org/setup.org | 2 +- elisp/Makefile.am | 6 +++--- elisp/hoa-mode.el | 4 ++-- tests/ltsmin/README | 2 +- 10 files changed, 40 insertions(+), 40 deletions(-) diff --git a/.dir-locals.el b/.dir-locals.el index 7bc423371..91c287367 100644 --- a/.dir-locals.el +++ b/.dir-locals.el @@ -5,7 +5,7 @@ (bug-reference-bug-regexp . "\\(?:[Ff]ix\\(es\\)? \\|[Ii]ssue \\)#\\(?2:[0-9]+\\)") (bug-reference-url-format - . "https://gitlab.lrde.epita.fr/spot/spot/issues/%s") + . "https://gitlab.lre.epita.fr/spot/spot/issues/%s") (mode . bug-reference) (magit-branch-adjust-remote-upstream-alist ("origin/next" . "/")))) (c++-mode . ((c-default-style . "gnu") diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c381793b0..4a94ebfce 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -19,7 +19,7 @@ debian-stable-gcc: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian:stable + image: gitlab-registry.lre.epita.fr/spot/buildenv/debian:stable script: - autoreconf -vfi - ./configure --enable-max-accsets=256 --enable-pthread @@ -41,7 +41,7 @@ make-dist: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian + image: gitlab-registry.lre.epita.fr/spot/buildenv/debian script: - autoreconf -vfi - ./configure --disable-static --enable-doxygen @@ -62,7 +62,7 @@ debian-unstable-gcc-coverage: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian + image: gitlab-registry.lre.epita.fr/spot/buildenv/debian script: - autoreconf -vfi - ./configure CXX='g++ --coverage' --enable-devel --disable-static --enable-doxygen @@ -96,7 +96,7 @@ debian-unstable-gcc-pypy: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian + image: gitlab-registry.lre.epita.fr/spot/buildenv/debian script: - VERSION=`cat VERSION` - tar xvf spot-$VERSION.tar.gz @@ -116,7 +116,7 @@ debian-gcc-snapshot: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian + image: gitlab-registry.lre.epita.fr/spot/buildenv/debian script: - export PATH="/usr/lib/gcc-snapshot/bin:$PATH" LD_LIBRARY_PATH="/usr/lib/gcc-snapshot/lib:$LD_LIBRARY_PATH" - autoreconf -vfi @@ -139,7 +139,7 @@ alpine-gcc: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/alpine + image: gitlab-registry.lre.epita.fr/spot/buildenv/alpine script: - autoreconf -vfi - ./configure @@ -158,7 +158,7 @@ arch-clang: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/arch + image: gitlab-registry.lre.epita.fr/spot/buildenv/arch script: - autoreconf -vfi - ./configure --prefix ~/install_dir CC='clang -Qunused-arguments' CXX='clang++ -Qunused-arguments' --enable-devel --enable-c++20 --enable-doxygen @@ -181,7 +181,7 @@ arch-gcc-glibcxxdebug: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/arch + image: gitlab-registry.lre.epita.fr/spot/buildenv/arch script: - VERSION=`cat VERSION` - tar xvf spot-$VERSION.tar.gz @@ -210,7 +210,7 @@ mingw-shared: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian + image: gitlab-registry.lre.epita.fr/spot/buildenv/debian script: - VERSION=`cat VERSION` - tar xvf spot-$VERSION.tar.gz @@ -237,7 +237,7 @@ mingw-static: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian + image: gitlab-registry.lre.epita.fr/spot/buildenv/debian script: - VERSION=`cat VERSION` - tar xvf spot-$VERSION.tar.gz @@ -267,11 +267,11 @@ debpkg-stable: - next - stable script: - - docker pull gitlab-registry.lrde.epita.fr/spot/buildenv/debian:stable + - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian:stable - vol=spot-stable-$CI_COMMIT_SHA - docker volume create $vol - exitcode=0 - - docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lrde.epita.fr/spot/buildenv/debian:stable ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$? + - docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian:stable ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$? - docker cp helper-$vol:/build/result _build_stable || exitcode=$? - docker rm helper-$vol || exitcode=$? - docker volume rm $vol || exitcode=$? @@ -295,11 +295,11 @@ debpkg-stable-i386: tags: ["x86"] needs: ["debpkg-stable"] script: - - docker pull gitlab-registry.lrde.epita.fr/spot/buildenv/debian-i386:stable + - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386:stable - vol=spot-stable-$CI_COMMIT_SHA - docker volume create $vol - exitcode=0 - - docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lrde.epita.fr/spot/buildenv/debian-i386:stable ./bin-spot.sh -j${NBPROC-1} || exitcode=$? + - docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386:stable ./bin-spot.sh -j${NBPROC-1} || exitcode=$? - docker cp _build_stable/. helper-$vol:/build/result || exitcode=$? - rm -rf _build_stable - docker start -a helper-$vol || exitcode=$? @@ -322,11 +322,11 @@ debpkg-unstable: - /-deb$/ - next script: - - docker pull gitlab-registry.lrde.epita.fr/spot/buildenv/debian + - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian - vol=spot-unstable-$CI_COMMIT_SHA - docker volume create $vol - exitcode=0 - - docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lrde.epita.fr/spot/buildenv/debian ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$? + - docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$? - docker cp helper-$vol:/build/result _build_unstable || exitcode=$? - docker rm helper-$vol || exitcode=$? - docker volume rm $vol || exitcode=$? @@ -348,11 +348,11 @@ debpkg-unstable-i386: tags: ["x86"] needs: ["debpkg-unstable"] script: - - docker pull gitlab-registry.lrde.epita.fr/spot/buildenv/debian-i386 + - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386 - vol=spot-unstable-$CI_COMMIT_SHA - docker volume create $vol - exitcode=0 - - docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lrde.epita.fr/spot/buildenv/debian-i386 ./bin-spot.sh -j${NBPROC-1} || exitcode=$? + - docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386 ./bin-spot.sh -j${NBPROC-1} || exitcode=$? - docker cp _build_unstable/. helper-$vol:/build/result || exitcode=$? - rm -rf _build_unstable - docker start -a helper-$vol || exitcode=$? @@ -374,7 +374,7 @@ rpm-pkg: - master - next - stable - image: gitlab-registry.lrde.epita.fr/spot/buildenv/fedora + image: gitlab-registry.lre.epita.fr/spot/buildenv/fedora script: - autoreconf -vfi - ./configure @@ -425,8 +425,8 @@ publish-stable: - ls -l - tgz=`ls spot-*.tar.* | head -n 1` - case $tgz in *[0-9].tar.*) scp $tgz doc@perso:/var/www/dload/spot/;; esac - - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=stable" https://gitlab.lrde.epita.fr/api/v4/projects/131/trigger/pipeline - - curl -X POST "https://archive.softwareheritage.org/api/1/origin/save/git/url/https://gitlab.lrde.epita.fr/spot/spot/" + - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=stable" https://gitlab.lre.epita.fr/api/v4/projects/131/trigger/pipeline + - curl -X POST "https://archive.softwareheritage.org/api/1/origin/save/git/url/https://gitlab.lre.epita.fr/spot/spot/" - curl "https://web.archive.org/save/https://www.lrde.epita.fr/dload/spot/$tgz" publish-unstable: @@ -443,8 +443,8 @@ publish-unstable: - cd _build_unstable - ls -l - dput lrde *.changes - - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=next" https://gitlab.lrde.epita.fr/api/v4/projects/131/trigger/pipeline - - curl -X POST -F ref=master -F token=$TRIGGER_SANDBOX https://gitlab.lrde.epita.fr/api/v4/projects/181/trigger/pipeline + - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=next" https://gitlab.lre.epita.fr/api/v4/projects/131/trigger/pipeline + - curl -X POST -F ref=master -F token=$TRIGGER_SANDBOX https://gitlab.lre.epita.fr/api/v4/projects/181/trigger/pipeline raspbian: stage: build diff --git a/HACKING b/HACKING index de461376b..f2cf27e8c 100644 --- a/HACKING +++ b/HACKING @@ -5,11 +5,11 @@ Bootstraping from the GIT repository Spot's gitlab page is at - https://gitlab.lrde.epita.fr/spot/spot + https://gitlab.lre.epita.fr/spot/spot The GIT repository can be cloned with - git clone https://gitlab.lrde.epita.fr/spot/spot.git + git clone https://gitlab.lre.epita.fr/spot/spot.git Some files in SPOT's source tree are generated. They are distributed so that users do not need to install tools to rebuild them, but we diff --git a/NEWS b/NEWS index 4268ab81a..262414d67 100644 --- a/NEWS +++ b/NEWS @@ -1602,7 +1602,7 @@ New in spot 2.6.2 (2018-09-28) - We no longer distribute the Python-based CGI script + javascript code for the online translator. Its replacement has its own - repository: https://gitlab.lrde.epita.fr/spot/spot-web-app/ + repository: https://gitlab.lre.epita.fr/spot/spot-web-app/ Library: diff --git a/doc/org/concepts.org b/doc/org/concepts.org index a8fab8b65..64f982eb8 100644 --- a/doc/org/concepts.org +++ b/doc/org/concepts.org @@ -1022,7 +1022,7 @@ layers. dynamic libraries that [[http://fmt.cs.utwente.nl/tools/ltsmin/][LTSmin]] uses to represent state-spaces. It currently supports libraries generated from Promela models using SpinS or a patched version of DiVinE, but you have to install - those third-party tools first. See [[https://gitlab.lrde.epita.fr/spot/spot/blob/next/tests/ltsmin/README][=tests/ltsmin/README=]] + those third-party tools first. See [[https://gitlab.lre.epita.fr/spot/spot/blob/next/tests/ltsmin/README][=tests/ltsmin/README=]] for details. - In addition to the C++17 API, we also provide Python bindings for =libspotgen=, =libspotltsmin=, =libbddx=, and most of =libspot=. @@ -1034,8 +1034,8 @@ layers. distributed with the rest of Spot, their source-code is publicly available (in case you want to contribute or run a local version). The [[https://spot-sandbox.lrde.epita.fr/][=spot-sandbox=]] website runs from a Docker container whose - configuration can be found in [[https://gitlab.lrde.epita.fr/spot/sandbox/tree/master=][this repository]]. The client and - server parts of the [[https://spot.lrde.epita.fr/app/][online LTL translator]] can be found in [[https://gitlab.lrde.epita.fr/spot/spot-web-app/][this + configuration can be found in [[https://gitlab.lre.epita.fr/spot/sandbox/tree/master=][this repository]]. The client and + server parts of the [[https://spot.lrde.epita.fr/app/][online LTL translator]] can be found in [[https://gitlab.lre.epita.fr/spot/spot-web-app/][this repository]]. * Automaton property flags diff --git a/doc/org/install.org b/doc/org/install.org index a5759da17..a24134e42 100644 --- a/doc/org/install.org +++ b/doc/org/install.org @@ -14,7 +14,7 @@ The latest release of Spot is version {{{LASTRELEASE}}}: - {{{LASTTARBALL}}} (see also the {{{LASTNEWS}}}) Past releases can be found [[https://www.lrde.epita.fr/dload/spot/][in the same directory]]. If you are -interested in /future/ releases, you can always peek at the [[https://gitlab.lrde.epita.fr/spot/spot/-/jobs/artifacts/next/browse?job=debian-stable-gcc][last +interested in /future/ releases, you can always peek at the [[https://gitlab.lre.epita.fr/spot/spot/-/jobs/artifacts/next/browse?job=make-dist][last successful development build]]. ** Requirements @@ -162,11 +162,11 @@ the (working) code that should be part of the next major release. To clone the git repository, use #+BEGIN_SRC sh -git clone https://gitlab.lrde.epita.fr/spot/spot.git +git clone https://gitlab.lre.epita.fr/spot/spot.git #+END_SRC This should put you on the =next= branch by default. From there, read -the [[https://gitlab.lrde.epita.fr/spot/spot/blob/next/HACKING][HACKING]] file that should be at the top of your cloned repository: +the [[https://gitlab.lre.epita.fr/spot/spot/blob/next/HACKING][HACKING]] file that should be at the top of your cloned repository: it lists all the tools you should install before attempting to compile the source tree. diff --git a/doc/org/setup.org b/doc/org/setup.org index 52aa02639..d68521b7b 100644 --- a/doc/org/setup.org +++ b/doc/org/setup.org @@ -4,7 +4,7 @@ #+MACRO: SPOTVERSION 2.10.6 #+MACRO: LASTRELEASE 2.10.6 #+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.10.6.tar.gz][=spot-2.10.6.tar.gz=]] -#+MACRO: LASTNEWS [[https://gitlab.lrde.epita.fr/spot/spot/blob/spot-2-10-6/NEWS][summary of the changes]] +#+MACRO: LASTNEWS [[https://gitlab.lre.epita.fr/spot/spot/blob/spot-2-10-6/NEWS][summary of the changes]] #+MACRO: LASTDATE 2022-05-18 #+ATTR_HTML: :id spotlogo diff --git a/elisp/Makefile.am b/elisp/Makefile.am index efdc604f5..c67a969e8 100644 --- a/elisp/Makefile.am +++ b/elisp/Makefile.am @@ -1,6 +1,6 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2015, 2016, 2017, 2018 Laboratoire de Recherche et Développement -## de l'Epita (LRDE). +## Copyright (C) 2015-2018, 2022 Laboratoire de Recherche et +## Développement de l'Epita (LRDE). ## ## This file is part of Spot, a model checking library. ## @@ -19,7 +19,7 @@ EXTRA_DIST = hoa-mode.el -GIT = https://gitlab.lrde.epita.fr/spot/emacs-modes/raw/master/ +GIT = https://gitlab.lre.epita.fr/spot/emacs-modes/raw/master/ .PHONY: update-el update-el: diff --git a/elisp/hoa-mode.el b/elisp/hoa-mode.el index 9083b529d..58730b971 100644 --- a/elisp/hoa-mode.el +++ b/elisp/hoa-mode.el @@ -1,10 +1,10 @@ ;;; hoa-mode.el --- Major mode for the HOA format -*- lexical-binding: t -*- -;; Copyright (C) 2015, 2017, 2019 Alexandre Duret-Lutz +;; Copyright (C) 2015, 2017, 2019, 2022 Alexandre Duret-Lutz ;; Author: Alexandre Duret-Lutz ;; Maintainer: Alexandre Duret-Lutz -;; URL: https://gitlab.lrde.epita.fr/spot/emacs-modes +;; URL: https://gitlab.lre.epita.fr/spot/emacs-modes ;; Keywords: major-mode, automata, convenience ;; Created: 2015-11-13 diff --git a/tests/ltsmin/README b/tests/ltsmin/README index 6aaf5bba4..c3f2696d6 100644 --- a/tests/ltsmin/README +++ b/tests/ltsmin/README @@ -31,7 +31,7 @@ Installation of DiVinE Use the following commands to compile and install the patched version of DiVinE. - git clone https://gitlab.lrde.epita.fr/spot/divine-ltsmin-deb + git clone https://gitlab.lre.epita.fr/spot/divine-ltsmin-deb cd divine-ltsmin-deb mkdir _build && cd _build cmake .. -DMURPHI=OFF -DHOARD=OFF -DGUI=OFF -DRX_PATH= -DCMAKE_INSTALL_PREFIX=$HOME/usr From 0521901e9db006ceef1c896cb7abe2506a6a288c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 23 Sep 2022 09:42:15 +0200 Subject: [PATCH 144/606] revert c45ff0c94 and add test case showing why * bin/ltlsynt.cc: Revert c45ff0c94. Also fix documentation of exit status. * tests/core/ltlsynt2.test: New file. * tests/Makefile.am: Add it. --- bin/ltlsynt.cc | 10 ++++-- tests/Makefile.am | 1 + tests/core/ltlsynt2.test | 77 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+), 3 deletions(-) create mode 100755 tests/core/ltlsynt2.test diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 630ccd629..44c55ef54 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -160,8 +160,8 @@ static const struct argp_child children[] = const char argp_program_doc[] = "\ Synthesize a controller from its LTL specification.\v\ Exit status:\n\ - 0 if the input problem is realizable\n\ - 1 if the input problem is not realizable\n\ + 0 if all input problems were realizable\n\ + 1 if at least one input problem was not realizable\n\ 2 if any error has been reported"; static std::optional> all_output_aps; @@ -279,6 +279,10 @@ namespace spot::print_hoa(std::cout, game, opt_print_hoa_args) << '\n'; } + // If filename is passed, it is printed instead of the formula. We + // use that when processing games since we have no formula to print. + // It would be cleaner to have two columns: one for location (that's + // filename + line number if known), and one for formula (if known). static void print_csv(const spot::formula& f, const char* filename = nullptr) { @@ -705,7 +709,7 @@ namespace } if (opt_csv) - print_csv(f, filename); + print_csv(f); return res; } diff --git a/tests/Makefile.am b/tests/Makefile.am index 9570f7dcd..2384f115e 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -343,6 +343,7 @@ TESTS_twa = \ core/parity2.test \ core/pgsolver.test \ core/ltlsynt.test \ + core/ltlsynt2.test \ core/ltlsynt-pgame.test \ core/syfco.test \ core/rabin2parity.test \ diff --git a/tests/core/ltlsynt2.test b/tests/core/ltlsynt2.test new file mode 100755 index 000000000..dbb754d92 --- /dev/null +++ b/tests/core/ltlsynt2.test @@ -0,0 +1,77 @@ +#! /bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) 2022 Laboratoire de Recherche et Développement de +# l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# More checks for ltlfilt + +. ./defs || exit 1 + +set -e + +cat >formulas.ltl < Xo1) +F(i1 xor i2) <-> F(o1) +i1 <-> F(o1 xor o2) +F(i1) <-> G(o2) +EOF + +ltlsynt --ins=i1,i2 -F formulas.ltl -f 'o1 & F(i1 <-> o2)' -q --csv=out.csv &&\ + exit 2 +test $? -eq 1 || exit 2 + +cat >test.py <expected < Xo1),lar,1,3 +F(i1 xor i2) <-> Fo1,lar,1,2 +i1 <-> F(o1 xor o2),lar,1,3 +Fi1 <-> Go2,lar,0,0 +o1 & F(i1 <-> o2),lar,1,4 +EOF + +diff filtered.csv expected + +# ltlfilt should be able to read the first columns +mv filtered.csv input.csv +ltlsynt --ins=i1,i2 -F input.csv/-1 --csv=out.csv -q && exit 2 +test $? -eq 1 +$PYTHON test.py +diff filtered.csv expected + +grep -v 0,0 filtered.csv >input.csv +ltlsynt --ins=i1,i2 -F input.csv/-1 --csv=out.csv -q || exit 2 +$PYTHON test.py +diff filtered.csv input.csv From 3cd43f618c6cabab2762bf8893238f249dc88500 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 30 Sep 2022 13:36:41 +0200 Subject: [PATCH 145/606] test: fix running on python test in OpenBSD * tests/run.in: Add LD_LIBRARY_PATH. --- tests/run.in | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/run.in b/tests/run.in index 3b9470bef..7502b88f8 100755 --- a/tests/run.in +++ b/tests/run.in @@ -46,7 +46,9 @@ PATH="@abs_top_builddir@/bin:$PATH" export PATH test -z "$1" && - PYTHONPATH=$pypath DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ + PYTHONPATH=$pypath \ + DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ + LD_LIBRARY_PATH=$modpath:$LD_LIBRARY_PATH \ exec $PREFIXCMD @PYTHON@ srcdir="@srcdir@" @@ -109,10 +111,12 @@ case $1 in *.ipynb) PYTHONPATH=$pypath:$PYTHONPATH \ DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ + LD_LIBRARY_PATH=$modpath:$LD_LIBRARY_PATH \ PYTHONIOENCODING=utf-8:surrogateescape \ exec $PREFIXCMD @PYTHON@ @abs_srcdir@/python/ipnbdoctest.py "$@";; *.py) PYTHONPATH=$pypath:$PYTHONPATH \ + LD_LIBRARY_PATH=$modpath:$LD_LIBRARY_PATH \ DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ exec $PREFIXCMD @PYTHON@ "$@";; *.test) @@ -121,6 +125,7 @@ case $1 in exec $PERL "$@";; *python*|*jupyter*|*pypy*) PYTHONPATH=$pypath:$PYTHONPATH \ + LD_LIBRARY_PATH=$modpath:$LD_LIBRARY_PATH \ DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ exec $PREFIXCMD "$@";; *) From 27816ea4385cc9b3b33c50a8e5402b8a9f40d58e Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Sat, 3 Sep 2022 14:04:41 +0200 Subject: [PATCH 146/606] synthesis: Fix for implication decomposition * spot/twaalgos/synthesis.cc: here --- spot/twaalgos/synthesis.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 6fb126ff8..e1e4e1780 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1737,7 +1737,7 @@ namespace // anonymous for subsformula std::vector children; for (auto fi : f) children.push_back( - extract_and(fi, outs, can_extract_impl, form2props)); + extract_and(fi, outs, false, form2props)); return formula::And(children); } if (f.is(op::Not)) From 4d2c096ec0ce9fee9701ad26af1ed66d1e552341 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Sat, 3 Sep 2022 14:13:23 +0200 Subject: [PATCH 147/606] dot: fix 'g' with a Mealy machine * spot/twaalgos/dot.cc: here * tests/python/mealy.py: add test --- spot/twaalgos/dot.cc | 9 ++++++--- tests/python/mealy.py | 15 +++++++++++++++ 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/spot/twaalgos/dot.cc b/spot/twaalgos/dot.cc index 70b707edc..19a638b9e 100644 --- a/spot/twaalgos/dot.cc +++ b/spot/twaalgos/dot.cc @@ -1084,9 +1084,12 @@ namespace spot { if (aut->acc().is_t()) opt_show_acc_ = false; - bdd out = *p; - opt_mealy_output_ = out; - opt_mealy_ = true; + if (opt_showlabel_) + { + bdd out = *p; + opt_mealy_output_ = out; + opt_mealy_ = true; + } } incomplete_ = aut->get_named_prop>("incomplete-states"); diff --git a/tests/python/mealy.py b/tests/python/mealy.py index 71c7739f9..7a884235e 100644 --- a/tests/python/mealy.py +++ b/tests/python/mealy.py @@ -611,3 +611,18 @@ State: 1 res = spot.reduce_mealy(aut, True) tc.assertEqual(res.to_str(), exp) + +exp = """digraph "" { + rankdir=LR + node [shape="circle"] + I [label="", style=invis, width=0] + I -> 0 + 0 [label="0"] + 0 -> 1 [label=""] + 0 -> 1 [label=""] + 0 -> 1 [label=""] + 1 [label="1"] + 1 -> 1 [label=""] +} +""" +tc.assertEqual(res.to_str("dot", "g"), exp) From 74b752eb79d4846df37fef618ee2de335f4a4423 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 3 Oct 2022 09:15:55 +0200 Subject: [PATCH 148/606] * .gitlab-ci.yml (debian-gcc-snapshot): Build from tarball. --- .gitlab-ci.yml | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4a94ebfce..eeb07acf7 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -110,8 +110,14 @@ debian-unstable-gcc-pypy: - spot-*/tests/*/*.log - spot-*/*.log +# With emacs now using gcc for on-the-fly compilation, +# we cannot rebuild the documentation using gcc-snapshot. So we start +# from the tarball instead. debian-gcc-snapshot: - stage: build + stage: build2 + needs: + - job: make-dist + artifacts: true only: - branches except: @@ -119,7 +125,9 @@ debian-gcc-snapshot: image: gitlab-registry.lre.epita.fr/spot/buildenv/debian script: - export PATH="/usr/lib/gcc-snapshot/bin:$PATH" LD_LIBRARY_PATH="/usr/lib/gcc-snapshot/lib:$LD_LIBRARY_PATH" - - autoreconf -vfi + - VERSION=`cat VERSION` + - tar xvf spot-$VERSION.tar.gz + - cd spot-$VERSION - ./configure --with-included-ltdl CXX='g++' - make - make distcheck DISTCHECK_CONFIGURE_FLAGS='--with-included-ltdl' @@ -127,11 +135,8 @@ debian-gcc-snapshot: artifacts: when: always paths: - - ./spot-*/_build/sub/tests/*/*.log - - ./*.log - - doc/spot.html/ - - doc/userdoc/ - - ./*.tar.gz + - spot-*/tests/*/*.log + - spot-*/*.log alpine-gcc: stage: build From fa4500a8d3b6330a129e712fd66ba1ce31c69c9b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 3 Oct 2022 11:17:51 +0200 Subject: [PATCH 149/606] * tests/python/ipnbdoctest.py: Also retry if Kernel does not respond. --- tests/python/ipnbdoctest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/python/ipnbdoctest.py b/tests/python/ipnbdoctest.py index c6bfcf134..f6ce3562e 100755 --- a/tests/python/ipnbdoctest.py +++ b/tests/python/ipnbdoctest.py @@ -364,7 +364,8 @@ if __name__ == '__main__': except RuntimeError as e: # If the Kernel dies, try again. It seems we have spurious # failures when multiple instances of jupyter start in parallel. - if 'Kernel died' in str(e): + stre = str(e) + if 'Kernel died' in stre or "Kernel didn't respond" in stre: tries -= 1 if tries: s = random.randint(1, 5) From 35b4cb89fc9a483944813e5eddaa7a5370638b43 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 3 Oct 2022 16:26:33 +0200 Subject: [PATCH 150/606] add test for previous decomposition patch * tests/core/ltlsynt.test: Here. --- tests/core/ltlsynt.test | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index b9dfac204..4a7595539 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -1004,3 +1004,24 @@ digraph "" { } EOF diff res exp + + +# The following formula, generated from SPIReadManag.tlsf exhibited a bug +# in the decomposition. +s1="G(!((!o15 & !((!o14 & o16) <-> (o14 & !o16))) <-> (o15 & !(o14 | o16)))" +s2=" & !((!o12 & !((!o11 & o13) <-> (o11 & !o13))) <-> (o12 & !(o11 | o13)))" +s3=" & !((o09 & !o10) <-> (!o09 & o10)) & !((o07 & !o08) <-> (!o07 & o08))" +s4=" & !((!o05 & !((!o04 & o06) <-> (o04 & !o06))) <-> (o05 & !(o04 | o06)))" +s5=" & !((!o02 & !((!o01 & o03) <-> (o01 & !o03))) <-> (o02 & !(o01 | o03))))" +s6=" & ((G!(i2 & i7) & G(o15 -> Fi3)) -> (Go09 & G(o14 <-> (i6 & !i7)) & " +s7="G(o07 <-> (i7 & i8)) & G((i7 & i8) -> (o11 U i3)) & GFo12 & G(o04 <-> " +s8="(i4 & i6)) & G(o05 <-> !(i4 & i6)) & G(o15 <-> (i7 & i8)) & G(i7 -> o02) & " +s9="G((!i7 & !(i1 & i2 & !i5 & i6)) -> o03) & G(o01 <-> (i1 & i2 & !i5 & i6))))" +s=$s1$s2$s3$s4$s5$s6$s7$s8$s9 +ltlsynt --decomp=yes -f "$s" --ins=i1,i2,i3,i4,i5,i6,i7,i8 --realizability >out +ltlsynt --decomp=no -f "$s" --ins=i1,i2,i3,i4,i5,i6,i7,i8 --realizability >>out +cat >expected < Date: Mon, 3 Oct 2022 16:48:03 +0200 Subject: [PATCH 151/606] * .dir-locals.el (bug-reference-bug-regexp): Fix first group. --- .dir-locals.el | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.dir-locals.el b/.dir-locals.el index 91c287367..5de24fdfc 100644 --- a/.dir-locals.el +++ b/.dir-locals.el @@ -3,7 +3,7 @@ (require-final-newline . t) (mode . global-whitespace) (bug-reference-bug-regexp - . "\\(?:[Ff]ix\\(es\\)? \\|[Ii]ssue \\)#\\(?2:[0-9]+\\)") + . "\\(?1:\\(?:[Ff]ix\\(?:es\\)? \\|[Ii]ssue \\)#\\(?2:[0-9]+\\)\\)") (bug-reference-url-format . "https://gitlab.lre.epita.fr/spot/spot/issues/%s") (mode . bug-reference) From e907f114885faf36282dc90bddfd1a99e0fe4ed3 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 3 Oct 2022 17:00:15 +0200 Subject: [PATCH 152/606] emptinesscheck: improve coverage of CVWY90 and SE05 * tests/core/randtgba.cc: Test the ar:form_stack variants. --- tests/core/randtgba.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/core/randtgba.cc b/tests/core/randtgba.cc index 7462f2c80..460bf9cd9 100644 --- a/tests/core/randtgba.cc +++ b/tests/core/randtgba.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008-2012, 2014-2019 Laboratoire de Recherche et +// Copyright (C) 2008-2012, 2014-2019, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris // 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), @@ -69,9 +69,11 @@ const char* default_algos[] = { "Cou99abs", "CVWY90", "CVWY90(bsh=4K)", + "CVWY90(ar:from_stack)", "GV04", "SE05", "SE05(bsh=4K)", + "SE05(ar:from_stack)", "Tau03", "Tau03_opt", "Tau03_opt(condstack)", From 4ab51e1c88529e6869d863d9701efdf36992ed3a Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 3 Oct 2022 17:40:45 +0200 Subject: [PATCH 153/606] toparity: cover more options * tests/python/toparity.py: Augment test cases. --- tests/python/toparity.py | 136 ++++++++++++++++++++++++++++----------- 1 file changed, 98 insertions(+), 38 deletions(-) diff --git a/tests/python/toparity.py b/tests/python/toparity.py index ad9bc6e0b..ab5fbf314 100644 --- a/tests/python/toparity.py +++ b/tests/python/toparity.py @@ -49,26 +49,26 @@ no_option.propagate_col = False no_option.use_generalized_rabin = False acc_clean_search_opt = spot.to_parity_options() -no_option.search_ex = False -no_option.use_last = False -no_option.use_last_post_process = False -no_option.force_order = False -no_option.partial_degen = False -no_option.acc_clean = True -no_option.parity_equiv = False -no_option.tar = False -no_option.iar = True -no_option.lar_dfs = True -no_option.bscc = True -no_option.parity_prefix = False -no_option.parity_prefix_general = False -no_option.generic_emptiness = False -no_option.rabin_to_buchi = False -no_option.buchi_type_to_buchi = False -no_option.parity_type_to_parity = False -no_option.reduce_col_deg = False -no_option.propagate_col = False -no_option.use_generalized_rabin = False +acc_clean_search_opt.search_ex = False +acc_clean_search_opt.use_last = False +acc_clean_search_opt.use_last_post_process = False +acc_clean_search_opt.force_order = False +acc_clean_search_opt.partial_degen = False +acc_clean_search_opt.acc_clean = True +acc_clean_search_opt.parity_equiv = False +acc_clean_search_opt.tar = False +acc_clean_search_opt.iar = True +acc_clean_search_opt.lar_dfs = True +acc_clean_search_opt.bscc = True +acc_clean_search_opt.parity_prefix = False +acc_clean_search_opt.parity_prefix_general = False +acc_clean_search_opt.generic_emptiness = False +acc_clean_search_opt.rabin_to_buchi = False +acc_clean_search_opt.buchi_type_to_buchi = False +acc_clean_search_opt.parity_type_to_parity = False +acc_clean_search_opt.reduce_col_deg = False +acc_clean_search_opt.propagate_col = False +acc_clean_search_opt.use_generalized_rabin = False partial_degen_opt = spot.to_parity_options() partial_degen_opt.search_ex = False @@ -156,9 +156,29 @@ use_car_opt.reduce_col_deg = False use_car_opt.propagate_col = False use_car_opt.use_generalized_rabin = False -all_opt = spot.to_parity_options() -all_opt.pretty_print = True +default_opt = spot.to_parity_options() +all_opt = spot.to_parity_options() +all_opt.search_ex = True +all_opt.use_last = True +all_opt.use_last_post_process = True +all_opt.partial_degen = True +all_opt.acc_clean = True +all_opt.parity_equiv = True +all_opt.tar = True +all_opt.iar = True +all_opt.lar_dfs = True +all_opt.bscc = True +all_opt.parity_prefix = True +all_opt.parity_prefix_general = True +all_opt.generic_emptiness = True +all_opt.rabin_to_buchi = True +all_opt.buchi_type_to_buchi = True +all_opt.parity_type_to_parity = True +all_opt.reduce_col_deg = True +all_opt.propagate_col = True +all_opt.use_generalized_rabin = True +all_opt.pretty_print = True options = [ no_option, @@ -167,6 +187,7 @@ options = [ parity_equiv_opt, rab_to_buchi_opt, use_car_opt, + default_opt, all_opt, None, # acd_transform ] @@ -174,6 +195,8 @@ options = [ def test(aut, expected_num_states=[], full=True): for (opt, expected_num) in zip_longest(options, expected_num_states): + if type(expected_num) is str and expected_num == 'skip': + continue if opt is not None: p1 = spot.to_parity(aut, search_ex = opt.search_ex, @@ -296,7 +319,7 @@ State: 13 [0&1] 5 [!0&!1] 10 {0 1 3 5} [0&!1] 13 {1 3} ---END--"""), [32, 22, 23, 30, 33, 45, 22, 21]) +--END--"""), [30, 32, 23, 30, 33, 45, 22, 22, 21]) test(spot.automaton(""" HOA: v1 @@ -314,7 +337,7 @@ State: 1 [0&!1] 1 {4} [!0&1] 1 {0 1 2 3} [!0&!1] 1 {0 3} ---END--"""), [6, 3, 3, 5, 5, 26, 3, 3]) +--END--"""), [5, 6, 3, 5, 5, 26, 3, 3, 3]) test(spot.automaton("""HOA: v1 States: 2 @@ -330,14 +353,7 @@ State: 0 State: 1 [0&1] 1 {2 3 4} [!0&!1] 0 {1 2} ---END--"""), [3, 2, 2, 9, 9, 10, 2, 2]) - -for i,f in enumerate(spot.randltl(10, 200)): - test(spot.translate(f, "det", "G"), full=(i<50)) - -for f in spot.randltl(5, 500): - test(spot.translate(f), full=False) - +--END--"""), [9, 3, 2, 9, 9, 10, 2, 2, 2]) test(spot.automaton(""" HOA: v1 @@ -370,7 +386,7 @@ State: 3 [!0&1] 2 {1 4} [0&1] 3 {0} --END-- -"""), [104, 6, 80, 23, 27, 17, 6, 5]) +"""), [23, 104, 80, 23, 27, 17, "skip", "skip", 5]) test(spot.automaton(""" HOA: v1 @@ -404,7 +420,7 @@ State: 4 [0&!1] 4 [0&1] 4 {1 2 4} --END-- -"""), [6, 6, 7, 9, 9, 10, 6, 6]) +"""), [9, 6, 7, 9, 9, 10, 6, 6, 6]) test(spot.automaton(""" HOA: v1 @@ -426,7 +442,7 @@ State: 1 [0&!1] 1 {2 3} [0&1] 1 {1 2 4} --END-- -"""), [3, 2, 2, 6, 6, 6, 2, 2]) +"""), [6, 3, 2, 6, 6, 6, 2, 2, 2]) # Tests both the old and new version of to_parity @@ -457,7 +473,7 @@ explicit-labels trans-acc --BODY-- State: 0 [0&1] 2 {4 5} [0&1] 4 {0 4} p = spot.to_parity_old(a, True) tc.assertEqual(p.num_states(), 22) tc.assertTrue(spot.are_equivalent(a, p)) -test(a, [6, 6, 7, 8, 6, 7, 6, 6]) +test(a, [8, 6, 7, 8, 6, 7, 6, 6, 6]) # Force a few edges to false, to make sure to_parity() is OK with that. for e in a.out(2): @@ -471,7 +487,7 @@ for e in a.out(3): p = spot.to_parity_old(a, True) tc.assertEqual(p.num_states(), 22) tc.assertTrue(spot.are_equivalent(a, p)) -test(a, [6, 6, 7, 8, 6, 7, 6, 6]) +test(a, [8, 6, 7, 8, 6, 7, 6, 6, 6]) for f in spot.randltl(4, 400): d = spot.translate(f, "det", "G") @@ -483,8 +499,52 @@ for f in spot.randltl(5, 2000): p = spot.to_parity_old(n, True) tc.assertTrue(spot.are_equivalent(n, p)) +for i,f in enumerate(spot.randltl(10, 200)): + test(spot.translate(f, "det", "G"), full=(i<50)) + +for f in spot.randltl(5, 500): + test(spot.translate(f), full=False) + # Issue #390. a = spot.translate('!(GFa -> (GFb & GF(!b & !Xb)))', 'gen', 'det') b = spot.to_parity_old(a, True) tc.assertTrue(a.equivalent_to(b)) -test(a, [7, 3, 3, 8, 8, 7, 3, 3]) +test(a, [8, 7, 3, 8, 8, 7, 3, 3, 3]) + +# owl-21.0 ltl2dra -f '(GFa -> GFb) & GF(c <-> Xc)' | autfilt -Hi | fmt +a = spot.automaton(""" +HOA: v1 name: "Automaton for ((((F(G(!a))) | (G(F(b))))) & (G(F(((c) <-> +(X(c)))))))" States: 11 Start: 0 AP: 3 "a" "b" "c" acc-name: Rabin 3 +Acceptance: 6 (Fin(0) & Inf(1)) | (Fin(2) & Inf(3)) | (Fin(4) & Inf(5)) +properties: implicit-labels trans-acc complete deterministic --BODY-- +State: 0 0 {3} 0 {2 4} 3 {3} 3 {2 4} 1 0 {2 4} 2 4 {2 4} State: 1 0 0 {2 +4} 3 3 {2 4} 1 {5} 0 {2 4} 2 {5} 4 {2 4} State: 2 3 3 {2 4} 3 3 {2 4} +6 {1 5} 5 {1 2 4} 2 {1 5} 4 {1 2 4} State: 3 7 {1 3} 7 {1 2 4} 3 {1 3} +3 {1 2 4} 2 4 {2 4} 2 4 {2 4} State: 4 3 {3} 3 {2 4} 3 {3} 3 {2 4} 6 {1} +5 {1 2 4} 2 {1} 4 {1 2 4} State: 5 8 {3} 8 {2 4} 3 {3} 3 {2 4} 6 5 {2 +4} 2 4 {2 4} State: 6 8 8 {2 4} 3 3 {2 4} 6 {5} 5 {2 4} 2 {5} 4 {2 4} +State: 7 7 {3} 7 {2 4} 3 {3} 3 {2 4} 9 10 {2 4} 2 4 {2 4} State: 8 0 {1 +3} 0 {1 2 4} 3 {1 3} 3 {1 2 4} 6 5 {2 4} 2 4 {2 4} State: 9 7 7 {2 4} +3 3 {2 4} 1 {1 5} 0 {1 2 4} 2 {1 5} 4 {1 2 4} State: 10 7 {3} 7 {2 4} +3 {3} 3 {2 4} 1 {1} 0 {1 2 4} 2 {1} 4 {1 2 4} --END-- +""") +b = spot.iar_maybe(a) +tc.assertEqual(b.num_states(), 11) +tc.assertTrue(a.equivalent_to(b)) +test(a, [11, 11, 11, 11, 11, 11, 11, 11, 11]) + +a = spot.automaton(""" +HOA: v1 States: 10 Start: 0 AP: 2 "p0" "p1" acc-name: Rabin 4 Acceptance: +8 (Fin(0) & Inf(1)) | (Fin(2) & Inf(3)) | (Fin(4) & Inf(5)) | (Fin(6) +& Inf(7)) properties: implicit-labels trans-acc complete deterministic +--BODY-- State: 0 2 {7} 7 {3} 2 {7} 3 State: 1 5 {0 3} 9 {3 4} 5 {0 3} +9 {3 4} State: 2 9 {1 6} 9 {1 6} 9 {1 6} 9 {1 6} State: 3 3 {4} 9 {0} +1 {4} 4 {5 6} State: 4 7 8 {1 5 7} 9 {3 7} 8 {1 5 7} State: 5 6 {4} 9 +{1 2 6} 6 {4} 9 {1 2 6} State: 6 1 {3 7} 1 {3 7} 1 {3 7} 1 {3 7} State: +7 1 {3 6} 8 {2} 1 {3 6} 8 {2} State: 8 8 {3 4 7} 3 {2} 8 {3 4 7} 3 {2} +State: 9 3 {4} 2 3 {4} 6 --END-- +""") +b = spot.iar_maybe(a) +tc.assertEqual(b.num_states(), 87) +tc.assertTrue(a.equivalent_to(b)) +test(a, [87, 91, 91, 87, 87, 87, 51, 51, 21]) From e867242cf619900f07ccc450389c247438e9d4e6 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 4 Oct 2022 11:15:07 +0200 Subject: [PATCH 154/606] Update troubleshouting instruction for Python bindings For issue #512 * README: Update instructions. * configure.ac: Add some code to warn if Python files will be installed in a place that is not searched up by default. Add --with-pythondir support. * NEWS: Mention --with-pythondir. --- NEWS | 5 +++++ README | 40 +++++++++++++++++++++++++++------------- configure.ac | 25 +++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 13 deletions(-) diff --git a/NEWS b/NEWS index 262414d67..88c689bfc 100644 --- a/NEWS +++ b/NEWS @@ -2,6 +2,11 @@ New in spot 2.10.6.dev (not yet released) Build: + - configure will now diagnose situation where Python bindings will + be installed in a directory that is not part of Python's search + path. A new configure option --with-pythondir can be used to + modify this installation path. + - A new configure option --enable-pthread enable the compilation of Spot with -pthread, and activate the parallel version of some algorithms. If Spot is compiled with -pthread enabled, any user diff --git a/README b/README index a0b7c9579..458da2d99 100644 --- a/README +++ b/README @@ -110,16 +110,16 @@ Spot follows the traditional `./configure && make && make check && make install' process. People unfamiliar with the GNU Build System should read the file INSTALL for generic instructions. -If you plan to use the Python binding, we recommend you use one -of the following --prefix options when calling configure: +If you plan to use the Python bindings, we recommend you use the +following --prefix options when calling configure: - --prefix /usr - --prefix /usr/local (the default) - --prefix ~/.local (if you do not have root permissions) + --prefix ~/.local -The reason is that all these locations are usually automatically -searched by Python. If you use a different prefix directory, you may -have to tune the PYTHONPATH environment variable. +The reason is that ~/.local/lib/python3.X/site-packages, where Spot's +Python bindings will be installed, is automatically searched by +Python. If you use a different prefix directory, you may have to tune +the PYTHONPATH environment variable, or use the --with-pythondir +option to specify different installation paths. In addition to its usual options, ./configure will accept some flags specific to Spot: @@ -250,17 +250,31 @@ To test the Python bindings, try running >>> import spot >>> print(spot.version()) -If you installed Spot with a prefix that is not one of those suggested -in the "Building and installing" section, it is likely that the above -import statement will fail to locate the spot package. You can show -the list of directories that are searched by Python using: +If you installed Spot with a prefix that is not searched by Python by +default it is likely that the above import statement will fail to +locate the spot package. You can show the list of directories that +are searched by Python using: % python3 >>> import sys >>> print(sys.path) And you can modify that list of searched directories using the -PYTHONPATH environment variable. +PYTHONPATH environment variable. Alternatively, you can instruct Spot +to install its Python files in one of those directory using the +--with-pythondir configure option. As an example, an issue in +distributions derived from Debian is that if you run + + ./configure && make && make install + +Python files get installed in /usr/local/lib/python3.X/site-packages +while Debian's version of Python only looks for them into +/usr/local/lib/python3.X/dist-packages instead. You can fix that by +instructing configure that you want packages installed into the right +directory instead: + + ./configure --with-pythondir=/usr/local/lib/python3.X/dist-packages \ + && make && make install To test if man pages can be found, simply try: diff --git a/configure.ac b/configure.ac index 2d6b4be1f..e2e16d63a 100644 --- a/configure.ac +++ b/configure.ac @@ -189,9 +189,14 @@ if test "x${enable_python:-yes}" = xyes; then AC_MSG_NOTICE([You may configure with --disable-python ]dnl [if you do not need Python bindings.]) adl_CHECK_PYTHON + + AC_ARG_WITH([pythondir], + [AS_HELP_STRING([--with-pythondir], [override the computed pythondir])], + [pythondir=$withval], []) fi + adl_ENABLE_DEBUG ad_GCC_OPTIM adl_NDEBUG @@ -290,3 +295,23 @@ case $VERSION:$enable_devel in echo '===================================================================' ;; esac + +case $enable_python in + yes) + pd=$pythondir + eval pd=$pd + eval pd=$pd + $PYTHON -c " +import sys +if '$pd' in sys.path: + exit() +else: + print('\nWARNING: Python bindings will be installed in $pd') + print(' however this path is not searched by default by $PYTHON.') + print('\n$PYTHON\'s sys.path contains the following paths:\n', + '\n'.join(sys.path)) + print('\nUse --with-pythondir=... if you wish ' + 'to change this installation path.') +" + ;; +esac From 344e01d4e2450bf0e7f1d891544a330d62786f46 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 5 Oct 2022 11:08:19 +0200 Subject: [PATCH 155/606] translate, postproc: improve parity output * spot/twaalgos/translate.cc: When producing Parity output, split LTL as we do in the Generic case. * spot/twaalgos/postproc.hh, spot/twaalgos/postproc.cc: Use acd_transform() and add an "acd" option to disable this. * bin/spot-x.cc, NEWS: Document this. * tests/core/genltl.test, tests/core/minusx.test, tests/core/parity2.test: Adjust test cases for improved outputs. --- NEWS | 11 +- bin/spot-x.cc | 3 + spot/twaalgos/postproc.cc | 31 ++-- spot/twaalgos/postproc.hh | 2 + spot/twaalgos/translate.cc | 20 +-- tests/core/genltl.test | 32 ++-- tests/core/minusx.test | 10 +- tests/core/parity2.test | 321 ++++++++++++++----------------------- 8 files changed, 189 insertions(+), 241 deletions(-) diff --git a/NEWS b/NEWS index 88c689bfc..3a2b8316b 100644 --- a/NEWS +++ b/NEWS @@ -164,11 +164,20 @@ New in spot 2.10.6.dev (not yet released) further simplification. This was introduced to help with automata produced from formulas output by "genltl --eil-gsi" (see above). - - spot::postproc has new configuration variable branch-post that + - spot::postprocessor has new configuration variable branch-post that can be used to control the use of branching-postponement (diabled by default) or delayed-branching (see above, enabled by default). See the spot-x(7) man page for details. + - spot::postprocessor is now using acd_transform() by default when + building parity automata. Setting option "acd=0" will revert + to using "to_parity()" instead. + + - When asked to build parity automata, spot::translator is now more + aggressively using LTL decomposition, as done in the Generic + acceptance case before paritizing the result. This results in + much smaller automata in many cases. + - spot::parallel_policy is an object that can be passed to some algorithm to specify how many threads can be used if Spot has been compiled with --enable-pthread. Currently, only diff --git a/bin/spot-x.cc b/bin/spot-x.cc index 908cbb98a..d1a8f96f6 100644 --- a/bin/spot-x.cc +++ b/bin/spot-x.cc @@ -80,6 +80,9 @@ only if it is smaller than the original skeleton. This option is only \ used when comp-susp=1 and default to 1 or 2 depending on whether --small \ or --deterministic is specified.") }, { nullptr, 0, nullptr, 0, "Postprocessing options:", 0 }, + { DOC("acd", "Set to 1 (the default) to use paritize automata using \ +the alternatinc cycle decomposition. Set to 0 to use paritization based \ +on latest appearance record variants.") }, { DOC("scc-filter", "Set to 1 (the default) to enable \ SCC-pruning and acceptance simplification at the beginning of \ post-processing. Transitions that are outside of accepting SCC are \ diff --git a/spot/twaalgos/postproc.cc b/spot/twaalgos/postproc.cc index 55feeb295..19e3d2b6c 100644 --- a/spot/twaalgos/postproc.cc +++ b/spot/twaalgos/postproc.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -40,6 +40,7 @@ #include #include #include +#include namespace spot { @@ -92,6 +93,7 @@ namespace spot merge_states_min_ = opt->get("merge-states-min", 128); wdba_det_max_ = opt->get("wdba-det-max", 4096); simul_trans_pruning_ = opt->get("simul-trans-pruning", 512); + acd_ = opt->get("acd", 1); if (sat_acc_ && sat_minimize_ == 0) sat_minimize_ = 1; // Dicho. @@ -250,7 +252,8 @@ namespace spot tmp = ensure_ba(tmp); if (want_parity) { - reduce_parity_here(tmp, COLORED_); + if (!acd_was_used_) + reduce_parity_here(tmp, COLORED_); parity_kind kind = parity_kind_any; parity_style style = parity_style_any; if ((type_ & ParityMin) == ParityMin) @@ -295,6 +298,8 @@ namespace spot bool via_gba = (type_ == Buchi) || (type_ == GeneralizedBuchi) || (type_ == Monitor); bool want_parity = type_ & Parity; + acd_was_used_ = false; + if (COLORED_ && !want_parity) throw std::runtime_error("postprocessor: the Colored setting only works " "for parity acceptance"); @@ -340,18 +345,26 @@ namespace spot !(type_ == Generic && PREF_ == Any && level_ == Low)) a = remove_alternation(a); + // If we do want a parity automaton, we can use to_parity(). + // However (1) degeneralization is faster if the input is + // GBA, and (2) if we want a deterministic parity automaton and the + // input is not deterministic, that is useless here. We need + // to determinize it first, and our deterministization + // function only deal with TGBA as input. if ((via_gba || (want_parity && !a->acc().is_parity())) && !a->acc().is_generalized_buchi()) { - // If we do want a parity automaton, we can use to_parity(). - // However (1) degeneralization is better if the input is - // GBA, and (2) if we want a deterministic parity automaton and the - // input is not deterministic, that is useless here. We need - // to determinize it first, and our deterministization - // function only deal with TGBA as input. if (want_parity && (PREF_ != Deterministic || is_deterministic(a))) { - a = to_parity(a); + if (acd_) + { + a = acd_transform(a, COLORED_); + acd_was_used_ = true; + } + else + { + a = to_parity(a); + } } else { diff --git a/spot/twaalgos/postproc.hh b/spot/twaalgos/postproc.hh index 96128c531..f470dcf5b 100644 --- a/spot/twaalgos/postproc.hh +++ b/spot/twaalgos/postproc.hh @@ -270,6 +270,8 @@ namespace spot int simul_max_ = 4096; int merge_states_min_ = 128; int wdba_det_max_ = 4096; + bool acd_ = false; + bool acd_was_used_; }; /// @} } diff --git a/spot/twaalgos/translate.cc b/spot/twaalgos/translate.cc index cd1e2aa63..0f5e86cde 100644 --- a/spot/twaalgos/translate.cc +++ b/spot/twaalgos/translate.cc @@ -137,6 +137,9 @@ namespace spot twa_graph_ptr aut; twa_graph_ptr aut2 = nullptr; + bool split_hard = + type_ == Generic || (type_ & Parity) || type_ == GeneralizedBuchi; + if (ltl_split_ && !r.is_syntactic_obligation()) { formula r2 = r; @@ -146,11 +149,11 @@ namespace spot r2 = r2[0]; ++leading_x; } - if (type_ == Generic || type_ == GeneralizedBuchi) + if (split_hard) { - // F(q|u|f) = q|F(u)|F(f) only for generic acceptance + // F(q|u|f) = q|F(u)|F(f) disabled for GeneralizedBuchi // G(q&e&f) = q&G(e)&G(f) - bool want_u = r2.is({op::F, op::Or}) && (type_ == Generic); + bool want_u = r2.is({op::F, op::Or}) && (type_ != GeneralizedBuchi); if (want_u || r2.is({op::G, op::And})) { std::vector susp; @@ -213,20 +216,19 @@ namespace spot oblg.erase(i, oblg.end()); } + // The only cases where we accept susp and rest to be both + // non-empty is when doing Generic/Parity/TGBA if (!susp.empty()) { - // The only cases where we accept susp and rest to be both - // non-empty is when doing Generic acceptance or TGBA. - if (!rest.empty() - && !(type_ == Generic || type_ == GeneralizedBuchi)) + if (!rest.empty() && !split_hard) { rest.insert(rest.end(), susp.begin(), susp.end()); susp.clear(); } // For Parity, we want to translate all suspendable // formulas at once. - if (rest.empty() && type_ & Parity) - susp = { formula::multop(r2.kind(), susp) }; + //if (rest.empty() && type_ & Parity) + // susp = { formula::multop(r2.kind(), susp) }; } // For TGBA and BA, we only split if there is something to // suspend. diff --git a/tests/core/genltl.test b/tests/core/genltl.test index 71b1ddf77..ce5584a21 100755 --- a/tests/core/genltl.test +++ b/tests/core/genltl.test @@ -190,29 +190,29 @@ cat >exp< Date: Wed, 5 Oct 2022 16:29:47 +0200 Subject: [PATCH 156/606] fix previous patch this patch failed on i386 and on Raspberry. * spot/twaalgos/translate.cc: Clear. * spot/twaalgos/postproc.cc: Call reduce_parity_here in more cases. --- spot/twaalgos/postproc.cc | 10 ++++++---- spot/twaalgos/translate.cc | 1 + 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/spot/twaalgos/postproc.cc b/spot/twaalgos/postproc.cc index 19e3d2b6c..1a2915dda 100644 --- a/spot/twaalgos/postproc.cc +++ b/spot/twaalgos/postproc.cc @@ -239,11 +239,13 @@ namespace spot { if (PREF_ != Any && level_ != Low) tmp->remove_unused_ap(); - if (COMP_) + bool was_complete = tmp->prop_complete().is_true(); + if (COMP_ && !was_complete) tmp = complete(tmp); bool want_parity = type_ & Parity; - if (want_parity && (tmp->acc().is_generalized_buchi() - || tmp->acc().is_generalized_co_buchi())) + if (want_parity && tmp->num_sets() > 1 + && (tmp->acc().is_generalized_buchi() + || tmp->acc().is_generalized_co_buchi())) tmp = choose_degen(tmp); assert(!!SBACC_ == state_based_); if (state_based_) @@ -252,7 +254,7 @@ namespace spot tmp = ensure_ba(tmp); if (want_parity) { - if (!acd_was_used_) + if (!acd_was_used_ || (COMP_ && !was_complete)) reduce_parity_here(tmp, COLORED_); parity_kind kind = parity_kind_any; parity_style style = parity_style_any; diff --git a/spot/twaalgos/translate.cc b/spot/twaalgos/translate.cc index 0f5e86cde..8a99313a3 100644 --- a/spot/twaalgos/translate.cc +++ b/spot/twaalgos/translate.cc @@ -387,6 +387,7 @@ namespace spot || type_ == GeneralizedBuchi) aut2 = gf_guarantee_to_ba_maybe(r, simpl_->get_dict(), det, state_based_); + acd_was_used_ = false; if (aut2 && (pref_ & Deterministic)) return finalize(aut2); if (!aut2 && (type_ == Generic From 1a4121c6c2826b73c8ec4c65c9d438674918653f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 7 Oct 2022 16:37:02 +0200 Subject: [PATCH 157/606] * tests/Makefile.am (.ipynb.html): Use classic template. --- tests/Makefile.am | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/Makefile.am b/tests/Makefile.am index 2384f115e..71d6a852f 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -476,7 +476,11 @@ endif CLEANFILES = python/test1.dve python/test1.dve2C python/test1.dve.cpp SUFFIXES = .ipynb .html +# Use the classic template when available because it loads +# jquery and we need it in zlktree.html; however the --template +# option does not exist with nbconvert 5.6.1 (in Debian stable). .ipynb.html: + $(JUPYTER) nbconvert $< --to html --template classic --stdout >$@ || \ $(JUPYTER) nbconvert $< --to html --stdout >$@ .PHONY: nb-html From 9fc48daf281c706e9b61d792a8b035edb98b4ec1 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sat, 8 Oct 2022 10:34:30 +0200 Subject: [PATCH 158/606] CI: work around GIT_STRATEGY=none not cleaning the build dir * .gitlab-ci.yml (publish-rpm, publish-stable, publish-unstable): Use the latest files and clean things up after publication. --- .gitlab-ci.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index eeb07acf7..11be46f71 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -410,6 +410,7 @@ publish-rpm: - rpm-pkg script: - case $CI_COMMIT_REF_NAME in stable) rput fedora stable *.rpm;; next) rput fedora unstable *.rpm;; esac + - rm -rf ./* publish-stable: only: @@ -425,11 +426,12 @@ publish-stable: script: - cd _build_stable - ls -l - - dput lrde *.changes + - dput lrde `ls -t *.changes | head -1` - cd .. - ls -l - tgz=`ls spot-*.tar.* | head -n 1` - case $tgz in *[0-9].tar.*) scp $tgz doc@perso:/var/www/dload/spot/;; esac + - rm -rf ./* - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=stable" https://gitlab.lre.epita.fr/api/v4/projects/131/trigger/pipeline - curl -X POST "https://archive.softwareheritage.org/api/1/origin/save/git/url/https://gitlab.lre.epita.fr/spot/spot/" - curl "https://web.archive.org/save/https://www.lrde.epita.fr/dload/spot/$tgz" @@ -447,7 +449,9 @@ publish-unstable: script: - cd _build_unstable - ls -l - - dput lrde *.changes + - dput lrde `ls -t *.changes | head -1` + - cd .. + - rm -rf _build_unstable - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=next" https://gitlab.lre.epita.fr/api/v4/projects/131/trigger/pipeline - curl -X POST -F ref=master -F token=$TRIGGER_SANDBOX https://gitlab.lre.epita.fr/api/v4/projects/181/trigger/pipeline From 1a5b5602db2909d7e61b1bda4dbcae132d231393 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sat, 8 Oct 2022 15:28:15 +0200 Subject: [PATCH 159/606] * .gitlab-ci.yml (publish-unstable): Publish both amd64 and i386. --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 11be46f71..b7cd7fd1d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -449,7 +449,7 @@ publish-unstable: script: - cd _build_unstable - ls -l - - dput lrde `ls -t *.changes | head -1` + - dput lrde `ls -t *amd64.changes | head -1` `ls -t *i386.changes | head -1` - cd .. - rm -rf _build_unstable - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=next" https://gitlab.lre.epita.fr/api/v4/projects/131/trigger/pipeline From 8131fae1a6fefc3f6c8a768ba35879c5f4d29f7e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sat, 8 Oct 2022 20:59:27 +0200 Subject: [PATCH 160/606] Release Spot 2.11 * NEWS, configure.ac, doc/org/setup.org: Update version. --- NEWS | 47 +++++++++++++++++++++++++---------------------- configure.ac | 2 +- doc/org/setup.org | 10 +++++----- 3 files changed, 31 insertions(+), 28 deletions(-) diff --git a/NEWS b/NEWS index 3a2b8316b..c01a17e7e 100644 --- a/NEWS +++ b/NEWS @@ -1,26 +1,26 @@ -New in spot 2.10.6.dev (not yet released) +New in spot 2.11 (2022-10-08) Build: - - configure will now diagnose situation where Python bindings will + - configure will now diagnose situations where Python bindings will be installed in a directory that is not part of Python's search path. A new configure option --with-pythondir can be used to modify this installation path. - - A new configure option --enable-pthread enable the compilation of - Spot with -pthread, and activate the parallel version of some - algorithms. If Spot is compiled with -pthread enabled, any user - linking with Spot should also link with the pthread library. In - order to not break existing build setups using Spot, this option - is currently disabled by default in this release. We plan to turn - it on by default in some future release. Third-party project - using Spot may want to start linking with -pthread in prevision - for this change. + - A new configure option --enable-pthread enables the compilation of + Spot with -pthread, and render available the parallel version of + some algorithms. If Spot is compiled with -pthread enabled, any + user linking with Spot should also link with the pthread library. + In order to not break existing build setups using Spot, this + option is currently disabled by default in this release. We plan + to turn it on by default in some future release. Third-party + project using Spot may want to start linking with -pthread in + prevision for this change. Command-line tools: - autfilt has a new options --aliases=drop|keep to specify - if the output code should attempt to preserve aliases + if the HOA printer should attempt to preserve aliases present in the HOA input. This defaults to "keep". - autfilt has a new --to-finite option, illustrated on @@ -79,11 +79,11 @@ New in spot 2.10.6.dev (not yet released) - tgba_determinize() learned to fill the "original-classes" property. States of the determinized automaton that correspond to the same subset of states of the original automaton belong to the same - class. Filling this property is only done on demand has it inccurs - on small overhead. + class. Filling this property is only done on demand as it inccurs + a small overhead. - sbacc() learned to take the "original-classes" property into - account and preserve it. + account and to preserve it. - The HOA parser and printer learned to map the synthesis-outputs property of Spot to the controllable-AP header for the Extended @@ -148,10 +148,10 @@ New in spot 2.10.6.dev (not yet released) is a co-Büchi automaton. And product_or() learned that the "or"-product of two Büchi automata is a Büchi automaton. - - spot::postprocessor has a new extra option merge-states-min that - indicate above how many states twa_graph::merge_states(), which + - spot::postprocessor has a new extra option "merge-states-min" that + indicates above how many states twa_graph::merge_states() (which perform a very cheap pass to fuse states with identicall - succesors, should be called before running simulation-based + succesors) should be called before running simulation-based reductions. - A new function delay_branching_here(aut) can be used to simplify @@ -164,15 +164,18 @@ New in spot 2.10.6.dev (not yet released) further simplification. This was introduced to help with automata produced from formulas output by "genltl --eil-gsi" (see above). - - spot::postprocessor has new configuration variable branch-post that - can be used to control the use of branching-postponement (diabled - by default) or delayed-branching (see above, enabled by default). - See the spot-x(7) man page for details. + - spot::postprocessor has new configuration variable branch-post + that can be used to control the use of branching-postponement + (disabled by default) or delayed-branching (see above, enabled by + default). See the spot-x(7) man page for details. - spot::postprocessor is now using acd_transform() by default when building parity automata. Setting option "acd=0" will revert to using "to_parity()" instead. + - to_parity() has been almost entirely rewritten and is a bit + faster. + - When asked to build parity automata, spot::translator is now more aggressively using LTL decomposition, as done in the Generic acceptance case before paritizing the result. This results in diff --git a/configure.ac b/configure.ac index e2e16d63a..65ee20594 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.10.6.dev], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.11], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) diff --git a/doc/org/setup.org b/doc/org/setup.org index d68521b7b..1df1e07e2 100644 --- a/doc/org/setup.org +++ b/doc/org/setup.org @@ -1,11 +1,11 @@ #+OPTIONS: H:2 num:nil toc:t html-postamble:nil ^:nil #+EMAIL: spot@lrde.epita.fr #+HTML_LINK_HOME: index.html -#+MACRO: SPOTVERSION 2.10.6 -#+MACRO: LASTRELEASE 2.10.6 -#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.10.6.tar.gz][=spot-2.10.6.tar.gz=]] -#+MACRO: LASTNEWS [[https://gitlab.lre.epita.fr/spot/spot/blob/spot-2-10-6/NEWS][summary of the changes]] -#+MACRO: LASTDATE 2022-05-18 +#+MACRO: SPOTVERSION 2.11 +#+MACRO: LASTRELEASE 2.11 +#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.11.tar.gz][=spot-2.11.tar.gz=]] +#+MACRO: LASTNEWS [[https://gitlab.lre.epita.fr/spot/spot/blob/spot-2-11/NEWS][summary of the changes]] +#+MACRO: LASTDATE 2022-10-08 #+ATTR_HTML: :id spotlogo [[file:spot2.svg]] From db79d5a79e41cdbe81544988e740649269a7781f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sat, 8 Oct 2022 21:05:04 +0200 Subject: [PATCH 161/606] * NEWS, configure.ac: Bump version to 2.11.0.dev. --- NEWS | 4 ++++ configure.ac | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index c01a17e7e..19c1408bb 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,7 @@ +New in spot 2.11.0.dev (not yet released) + + Nothing yet. + New in spot 2.11 (2022-10-08) Build: diff --git a/configure.ac b/configure.ac index 65ee20594..3d07d9d0e 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.11], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.11.0.dev], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) From 2c13b299b8c4627a528507c0dc7976a5d71285c9 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 10 Oct 2022 10:00:38 +0200 Subject: [PATCH 162/606] hoa: add missing include Fixes #515, reported by Yuri Victorovich. * spot/twaalgos/hoa.hh: Include . --- spot/twaalgos/hoa.hh | 1 + 1 file changed, 1 insertion(+) diff --git a/spot/twaalgos/hoa.hh b/spot/twaalgos/hoa.hh index 8c2da4e43..74e97b567 100644 --- a/spot/twaalgos/hoa.hh +++ b/spot/twaalgos/hoa.hh @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include From d0c296e1cf599c00814d95cddbe14972d4c9a4ac Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 10 Oct 2022 10:06:25 +0200 Subject: [PATCH 163/606] org: mention "make check" and the new GPG key Fixes #515. * doc/org/install.org: Here. --- doc/org/install.org | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/doc/org/install.org b/doc/org/install.org index a24134e42..dc492af57 100644 --- a/doc/org/install.org +++ b/doc/org/install.org @@ -52,10 +52,13 @@ make make install #+END_SRC +Before running =make install=, you might want to run =make check= to +run our test-suite. + Files =INSTALL= and =README= included in the tarball contains more -explanations about the various options you can use during this -process. Also note that =README= has a section about troubleshooting -installations. +explanations about the various options you can use during the +compilation process. Also note that =README= has a section about +troubleshooting installations. * Installing the Debian packages :PROPERTIES: @@ -88,7 +91,9 @@ apt-get install spot libspot-dev spot-doc python3-spot # Or a subset of those Note that our Debian repository is signed since that is the new Debian policy, and both of the above command blocks start with a download of our [[https://www.lrde.epita.fr/repo/debian.gpg][GPG key]]. Its fingerprint is =209B 7362 CFD6 FECF B41D 717F 03D9 -9E74 44F2 A84A=, if you want to verify it. +9E74 44F2 A84A=, if you want to verify it. If you have an old copy of +the GPG key that expired, please download it again: the current +version should be valid until 2032. The package =spot= contains the [[file:tools.org][command-line tools]]. =libspot-dev= contains the header files if you plan to use Spot in a C++17 From 55e4d340fe622755a767ea1d4ebb1f3d8779bdd4 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 10 Oct 2022 10:42:40 +0200 Subject: [PATCH 164/606] CI: fix upload of stable Debian packages for amd64 This prevented the Spot website to regenerate. Should fix #516 once we release 2.11.1. * .gitlab-ci.yml (publish-stable): Upload changes for amd64 and i386, not just the later. --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b7cd7fd1d..dd6e49c16 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -426,7 +426,7 @@ publish-stable: script: - cd _build_stable - ls -l - - dput lrde `ls -t *.changes | head -1` + - dput lrde `ls -t *amd64.changes | head -1` `ls -t *i386.changes | head -1` - cd .. - ls -l - tgz=`ls spot-*.tar.* | head -n 1` From c2bbb3fd00d39c5e4b145220f06ed912f52bacd0 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 10 Oct 2022 14:13:42 +0200 Subject: [PATCH 165/606] Release Spot 2.11.1 * NEWS, configure.ac, doc/org/setup.org: Update. --- NEWS | 7 +++++-- configure.ac | 2 +- doc/org/setup.org | 10 +++++----- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/NEWS b/NEWS index 19c1408bb..ea9fc533d 100644 --- a/NEWS +++ b/NEWS @@ -1,6 +1,9 @@ -New in spot 2.11.0.dev (not yet released) +New in spot 2.11.1 (2022-10-10) - Nothing yet. + Bugs fixed: + + - Fix a build issue preventing the update of website (issue #516). + - Fix a compilation error with clang-14 on FreeBSD (issue #515). New in spot 2.11 (2022-10-08) diff --git a/configure.ac b/configure.ac index 3d07d9d0e..382bba644 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.11.0.dev], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.11.1], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) diff --git a/doc/org/setup.org b/doc/org/setup.org index 1df1e07e2..0be5b364b 100644 --- a/doc/org/setup.org +++ b/doc/org/setup.org @@ -1,11 +1,11 @@ #+OPTIONS: H:2 num:nil toc:t html-postamble:nil ^:nil #+EMAIL: spot@lrde.epita.fr #+HTML_LINK_HOME: index.html -#+MACRO: SPOTVERSION 2.11 -#+MACRO: LASTRELEASE 2.11 -#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.11.tar.gz][=spot-2.11.tar.gz=]] -#+MACRO: LASTNEWS [[https://gitlab.lre.epita.fr/spot/spot/blob/spot-2-11/NEWS][summary of the changes]] -#+MACRO: LASTDATE 2022-10-08 +#+MACRO: SPOTVERSION 2.11.1 +#+MACRO: LASTRELEASE 2.11.1 +#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.11.1.tar.gz][=spot-2.11.1.tar.gz=]] +#+MACRO: LASTNEWS [[https://gitlab.lre.epita.fr/spot/spot/blob/spot-2-11-1/NEWS][summary of the changes]] +#+MACRO: LASTDATE 2022-10-10 #+ATTR_HTML: :id spotlogo [[file:spot2.svg]] From 548f3d766342ad9a62655d77aa80bd7fc3a59c79 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 10 Oct 2022 14:15:23 +0200 Subject: [PATCH 166/606] * NEWS, configure.ac: Bump version to 2.11.1.dev. --- NEWS | 4 ++++ configure.ac | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index ea9fc533d..9aee79188 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,7 @@ +New in spot 2.11.1.dev (not yet released) + + Nothing yet. + New in spot 2.11.1 (2022-10-10) Bugs fixed: diff --git a/configure.ac b/configure.ac index 382bba644..5c160e6cd 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.11.1], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.11.1.dev], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) From 583ca38d91c92fc5201ddaf3b26d46cc41f42736 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 11 Oct 2022 10:43:27 +0200 Subject: [PATCH 167/606] replace bdd_relprod by bdd_restrict * spot/twaalgos/alternation.cc, spot/twaalgos/dualize.cc, spot/twaalgos/simulation.cc, spot/twaalgos/toweak.cc: Here. --- spot/twaalgos/alternation.cc | 4 ++-- spot/twaalgos/dualize.cc | 2 +- spot/twaalgos/simulation.cc | 4 ++-- spot/twaalgos/toweak.cc | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/spot/twaalgos/alternation.cc b/spot/twaalgos/alternation.cc index 8370f395b..bdbe07982 100644 --- a/spot/twaalgos/alternation.cc +++ b/spot/twaalgos/alternation.cc @@ -457,7 +457,7 @@ namespace spot // First loop over all possible valuations atomic properties. for (bdd oneletter: minterms_of(all_letters, ap)) { - minato_isop isop(bdd_relprod(bs, oneletter, ap)); + minato_isop isop(bdd_restrict(bs, oneletter)); bdd dest; while ((dest = isop.next()) != bddfalse) { @@ -636,7 +636,7 @@ namespace spot cond_ = oneletter; all_letters_ -= oneletter; // Get a sum of possible transitions matching this letter. - isop_ = minato_isop(bdd_relprod(transitions_, oneletter, ap_)); + isop_ = minato_isop(bdd_restrict(transitions_, oneletter)); dest_ = isop_.next(); } std::set dest = bdd_to_state(dest_); diff --git a/spot/twaalgos/dualize.cc b/spot/twaalgos/dualize.cc index 91498ce8d..1b60a0d17 100644 --- a/spot/twaalgos/dualize.cc +++ b/spot/twaalgos/dualize.cc @@ -155,7 +155,7 @@ namespace spot for (bdd oneletter: minterms_of(letters, ap)) { - minato_isop isop(bdd_relprod(delta, oneletter, ap)); + minato_isop isop(bdd_restrict(delta, oneletter)); bdd dest; while ((dest = isop.next()) != bddfalse) diff --git a/spot/twaalgos/simulation.cc b/spot/twaalgos/simulation.cc index 58ebfd79d..ca8928888 100644 --- a/spot/twaalgos/simulation.cc +++ b/spot/twaalgos/simulation.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -590,7 +590,7 @@ namespace spot // C1 then (!C1)C2, instead of C1 then C2. // With minatop_isop, we ensure that the no negative // class variable will be seen (likewise for promises). - minato_isop isop(bdd_relprod(sig, one, sup_all_atomic_prop)); + minato_isop isop(bdd_restrict(sig, one)); ++nb_minterms; diff --git a/spot/twaalgos/toweak.cc b/spot/twaalgos/toweak.cc index 543c7c9a1..ae7a0f58a 100644 --- a/spot/twaalgos/toweak.cc +++ b/spot/twaalgos/toweak.cc @@ -179,7 +179,7 @@ namespace spot for (bdd oneletter: minterms_of(letters, ap)) { - minato_isop isop(bdd_relprod(delta, oneletter, ap)); + minato_isop isop(bdd_restrict(delta, oneletter)); bdd dest; while ((dest = isop.next()) != bddfalse) From 9de545555235fa07536ac9a85c60d8a46e772202 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 11 Oct 2022 13:28:15 +0200 Subject: [PATCH 168/606] fix some typos * spot/graph/graph.hh, spot/ltsmin/spins_kripke.hxx, spot/mc/bloemen.hh, spot/mc/lpar13.hh, spot/twaalgos/determinize.cc: Here. --- spot/graph/graph.hh | 8 ++++---- spot/ltsmin/spins_kripke.hxx | 12 ++++++------ spot/mc/bloemen.hh | 4 ++-- spot/mc/lpar13.hh | 16 ++++++++-------- spot/twaalgos/determinize.cc | 10 +++++----- 5 files changed, 25 insertions(+), 25 deletions(-) diff --git a/spot/graph/graph.hh b/spot/graph/graph.hh index 531426244..04c21fec9 100644 --- a/spot/graph/graph.hh +++ b/spot/graph/graph.hh @@ -1295,10 +1295,10 @@ namespace spot static std::vector tv; SPOT_ASSERT(tv.empty()); tv.resize(nthreads); - // FIXME: Due to the way these thread advence into the sate - // vectors, they access very close memory location. It - // would seems more cache friendly to have thread work on - // blocks of continuous states. + // FIXME: Due to the way these thread advance into the state + // vector, they access very close memory location. It would + // seems more cache friendly to have threads work on blocks + // of continuous states. for (unsigned id = 0; id < nthreads; ++id) tv[id] = std::thread( [bne, id, ns, &idx_list, p, nthreads]() diff --git a/spot/ltsmin/spins_kripke.hxx b/spot/ltsmin/spins_kripke.hxx index bafb6f641..bdf47fbb6 100644 --- a/spot/ltsmin/spins_kripke.hxx +++ b/spot/ltsmin/spins_kripke.hxx @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2018, 2020 Laboratoire de Recherche et Développement de -// l'Epita (LRDE) +// Copyright (C) 2017, 2018, 2020, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE) // // This file is part of Spot, a model checking library. // @@ -400,10 +400,10 @@ namespace spot } } - // FIXME I think we only need visbles aps, i.e. if the system has - // following variables, i.e. P_0.var1 and P_0.var2 but the property - // automaton only mention P_0.var2, we do not need to capture (in - // the resulting cube) any atomic proposition for P_0.var1 + // FIXME: I think we only need visible aps. E.g., if the system has + // variables P_0.var1 and P_0.var2 but the property automaton only + // mentions P_0.var2, we do not need to capture (in the resulting + // cube) any atomic proposition for P_0.var1 void kripkecube::match_aps(std::vector& aps, diff --git a/spot/mc/bloemen.hh b/spot/mc/bloemen.hh index 1a37a71be..432badb76 100644 --- a/spot/mc/bloemen.hh +++ b/spot/mc/bloemen.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2017, 2018, 2019, 2020 Laboratoire de Recherche et +// Copyright (C) 2015-2020, 2022 Laboratoire de Recherche et // Developpement de l'Epita // // This file is part of Spot, a model checking library. @@ -127,7 +127,7 @@ namespace spot bool b = it.isnew(); // Insertion failed, delete element - // FIXME Should we add a local cache to avoid useless allocations? + // FIXME: Should we add a local cache to avoid useless allocations? if (!b) p_.deallocate(v); else diff --git a/spot/mc/lpar13.hh b/spot/mc/lpar13.hh index 28b71aa4b..77396fb9d 100644 --- a/spot/mc/lpar13.hh +++ b/spot/mc/lpar13.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2016, 2018-2021 Laboratoire de Recherche et +// Copyright (C) 2015-2016, 2018-, 20222022 Laboratoire de Recherche et // Developpement de l'Epita // // This file is part of Spot, a model checking library. @@ -32,9 +32,9 @@ namespace spot { /// \brief This class implements the sequential emptiness check as /// presented in "Three SCC-based Emptiness Checks for Generalized - /// B\¨uchi Automata" (Renault et al, LPAR 2013). Among the three - /// emptiness check that has been proposed we opted to implement - /// the Gabow's one. + /// Büchi Automata" (Renault et al, LPAR 2013). Among the three + /// emptiness checks that have been proposed, we opted to implement + /// yGabow's one. template class SPOT_API lpar13 @@ -62,8 +62,8 @@ namespace spot size_t operator()(const product_state that) const noexcept { - // FIXME! wang32_hash(that.st_prop) could have - // been pre-calculated! + // FIXME: wang32_hash(that.st_prop) could have been + // pre-calculated! StateHash hasher; return wang32_hash(that.st_prop) ^ hasher(that.st_kripke); } @@ -135,7 +135,7 @@ namespace spot map[newtop]))) { sys_.recycle(todo.back().it_kripke, tid_); - // FIXME a local storage for twacube iterator? + // FIXME: a local storage for twacube iterator? todo.pop_back(); if (SPOT_UNLIKELY(found_)) { @@ -346,7 +346,7 @@ namespace spot ctrx_element* current = front; while (current != nullptr) { - // FIXME also display acc? + // FIXME: also display acc? res = res + " " + std::to_string(current->prod_st->st_prop) + + "*" + diff --git a/spot/twaalgos/determinize.cc b/spot/twaalgos/determinize.cc index 82305f564..ba4fb3ded 100644 --- a/spot/twaalgos/determinize.cc +++ b/spot/twaalgos/determinize.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2021 Laboratoire de Recherche et +// Copyright (C) 2015-2022 Laboratoire de Recherche et // Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -476,7 +476,7 @@ namespace spot std::vector tmp; while (brace >= 0) { - // FIXME is not there a smarter way? + // FIXME: is there a smarter way? tmp.insert(tmp.begin(), brace); brace = s.braces_[brace]; } @@ -781,7 +781,7 @@ namespace spot bool safra_state::operator<(const safra_state& other) const { - // FIXME what is the right, if any, comparison to perform? + // FIXME: what is the right, if any, comparison to perform? return braces_ == other.braces_ ? nodes_ < other.nodes_ : braces_ < other.braces_; } @@ -887,7 +887,7 @@ namespace spot // NB spot::simulation() does not remove unreachable states, as it // would invalidate the contents of 'implications'. // so we need to explicitly test for unreachable states - // FIXME based on the scc_info, we could remove the unreachable + // FIXME: based on the scc_info, we could remove the unreachable // states, both in the input automaton and in 'implications' // to reduce the size of 'implies'. if (!scc.reachable_state(i)) @@ -922,7 +922,7 @@ namespace spot std::vector support(aut->num_states()); if (use_stutter && aut->prop_stutter_invariant()) { - // FIXME this could be improved + // FIXME: this could be improved // supports of states should account for possible stuttering if we plan // to use stuttering invariance for (unsigned c = 0; c != scc.scc_count(); ++c) From dae46567e7a78230b5616061b937a75339c73112 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 11 Oct 2022 14:54:24 +0200 Subject: [PATCH 169/606] org: work around newer org-mode not displaying SVG as * doc/org/init.el.in (spot-svg-output-as-object): New function. --- doc/org/init.el.in | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/doc/org/init.el.in b/doc/org/init.el.in index e16d097cc..543e39423 100644 --- a/doc/org/init.el.in +++ b/doc/org/init.el.in @@ -184,6 +184,49 @@ up.html points to index.html, then the result is: :publishing-function org-publish-attachment) ("spot-all" :components ("spot-html" "spot-static")))) + + + +;;; Org-mode 9.4.6 is now using to render SVG images. +;;; Unfortunately, this breaks SVG images that use external style +;;; sheets as are expected to be self-contained. +;;; +;;; Since we do use such external style-sheets and never had +;;; any issue with , we revert +;;; to the previous behavior. +;;; +;;; The following function is based on org-html--svg-image from +;;; Org-mode 9.4.5, with the addition of the SVG extension test. +(defun spot-svg-output-as-object (source attributes info) + "If source is an SVG file, return an \"object\" embedding svg file +SOURCE with given ATTRIBUTES. +INFO is a plist used as a communication channel. Otherwise return nil. + +The special attribute \"fallback\" can be used to specify a +fallback image file to use if the object embedding is not +supported. CSS class \"org-svg\" is assigned as the class of the +object unless a different class is specified with an attribute." + (when (string= "svg" (file-name-extension source)) + (let ((fallback (plist-get attributes :fallback)) + (attrs (org-html--make-attribute-string + (org-combine-plists + ;; Remove fallback attribute, which is not meant to + ;; appear directly in the attributes string, and + ;; provide a default class if none is set. + '(:class "org-svg") attributes '(:fallback nil))))) + (format "\n%s" + source + attrs + (if fallback + (org-html-close-tag + "img" (format "src=\"%s\" %s" fallback attrs) info) + "Sorry, your browser does not support SVG."))))) +;;; Hack org-html--format-image to call the above first. +;;; (The org-html--svg-image function was removed when the formater code +;;; switched to for SVG.) +(unless (fboundp 'org-html--svg-image) + (advice-add 'org-html--format-image :before-until 'spot-svg-output-as-object)) + (org-publish-all t) ;;; org-babel-remove-temporary-directory does not correctly remove ;;; nested directories and we have some files in tmp/.libs/ because of From bfb8f0a0784cb981c000629a7a566a90162b6dea Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 11 Oct 2022 15:06:54 +0200 Subject: [PATCH 170/606] * .gitlab-ci.yml: Fail if coverage goes below 90.7%. --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index dd6e49c16..3bad252ae 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -68,7 +68,7 @@ debian-unstable-gcc-coverage: - ./configure CXX='g++ --coverage' --enable-devel --disable-static --enable-doxygen - make - make check - - gcovr --xml-pretty --exclude-unreachable-branches --print-summary -o coverage.xml --root $PWD -e 'bin/spot.cc' -e 'bin/spot-x.cc' -e 'spot/bricks/.*' -e 'spot/parse.*/scan.*.cc' -e 'spot/parse.*/parse.*.cc' -e 'utf8/.*' -e 'python/.*' -e 'buddy/.*' -e 'doc/org/tmp/.*' --html-details coverage.html --html-tab-size 8 + - gcovr --xml-pretty --exclude-unreachable-branches --print-summary -o coverage.xml --root $PWD -e 'bin/spot.cc' -e 'bin/spot-x.cc' -e 'spot/bricks/.*' -e 'spot/parse.*/scan.*.cc' -e 'spot/parse.*/parse.*.cc' -e 'utf8/.*' -e 'python/.*' -e 'buddy/.*' -e 'doc/org/tmp/.*' --html-details coverage.html --html-tab-size 8 --fail-under-line 90.7 coverage: /^\s*lines:\s*\d+.\d+\%/ artifacts: when: always From da356f114297c6f1f5c40fe112ff0f8cd27ab75e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 11 Oct 2022 15:34:09 +0200 Subject: [PATCH 171/606] substitute @LIBSPOT_PTHREAD@ in spot/libspot.pc Fixes #520, reported by Fangyi Zhou. * spot/Makefile.am (libspot.pc): Substitute @LIBSPOT_PTHREAD@. * THANKS: Add Fangyi Zhou. --- THANKS | 1 + spot/Makefile.am | 1 + 2 files changed, 2 insertions(+) diff --git a/THANKS b/THANKS index c53a0aafb..db74e14b8 100644 --- a/THANKS +++ b/THANKS @@ -15,6 +15,7 @@ Edmond Irani Liu Ernesto Posse Étienne Renault Fabrice Kordon +Fangyi Zhou Felix Klaedtke Florian Perlié-Long František Blahoudek diff --git a/spot/Makefile.am b/spot/Makefile.am index 72cbef22d..806b299ad 100644 --- a/spot/Makefile.am +++ b/spot/Makefile.am @@ -68,6 +68,7 @@ libspot.pc: $(srcdir)/libspot.pc.in Makefile -e 's![@]includedir[@]!$(includedir)!g' \ -e 's![@]libdir[@]!$(libdir)!g' \ -e 's![@]PACKAGE_VERSION[@]!$(PACKAGE_VERSION)!g' \ + -e 's![@]LIBSPOT_PTHREAD[@]!$(LIBSPOT_PTHREAD)!g' \ $(srcdir)/libspot.pc.in > $@.tmp && mv $@.tmp $@ CLEANFILES = libspot.pc From 7f6e3c2bf8e537a496febafcaad14e614918c9e7 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 13 Oct 2022 11:21:50 +0200 Subject: [PATCH 172/606] * NEWS: Add news entry for previous fix. --- NEWS | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 9aee79188..805325fe6 100644 --- a/NEWS +++ b/NEWS @@ -1,6 +1,8 @@ New in spot 2.11.1.dev (not yet released) - Nothing yet. + Bugs fixed: + + - Fix pkg-config files containing @LIBSPOT_PTHREAD@ (issue #520) New in spot 2.11.1 (2022-10-10) From 666d78d4999d1e2130cd5cec52649b2b09afd277 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 13 Oct 2022 11:22:14 +0200 Subject: [PATCH 173/606] * doc/org/init.el.in: Typo in comment. --- doc/org/init.el.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/org/init.el.in b/doc/org/init.el.in index 543e39423..9f589bb35 100644 --- a/doc/org/init.el.in +++ b/doc/org/init.el.in @@ -187,7 +187,7 @@ up.html points to index.html, then the result is: -;;; Org-mode 9.4.6 is now using to render SVG images. +;;; Org-mode 9.5 is now using to render SVG images. ;;; Unfortunately, this breaks SVG images that use external style ;;; sheets as are expected to be self-contained. ;;; From 179672fe3b920134d71287cec9cabb1b2ea0a30d Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 13 Oct 2022 11:34:38 +0200 Subject: [PATCH 174/606] relabel: fix handling of concat and fusion * spot/tl/relabel.cc (formula_to_fgraph): Do not assume that n-ary operators are Boolean operators. * tests/python/relabel.py: Add a test case found while discussing some expression with Antoine Martin. * NEWS: Mention it. --- NEWS | 3 +++ spot/tl/relabel.cc | 14 +++++++++++--- tests/python/relabel.py | 4 ++++ 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/NEWS b/NEWS index 805325fe6..3ad57d31d 100644 --- a/NEWS +++ b/NEWS @@ -3,6 +3,9 @@ New in spot 2.11.1.dev (not yet released) Bugs fixed: - Fix pkg-config files containing @LIBSPOT_PTHREAD@ (issue #520) + - spot::relabel_bse() was incorrectly relabeling some dependent + Boolean subexpressions in SERE. (Note that this had no + consequence on automata translated from those SERE.) New in spot 2.11.1 (2022-10-10) diff --git a/spot/tl/relabel.cc b/spot/tl/relabel.cc index 44d6577cb..26c7564c1 100644 --- a/spot/tl/relabel.cc +++ b/spot/tl/relabel.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2016, 2018-2020 Laboratoire de Recherche et +// Copyright (C) 2012-2016, 2018-2020, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -363,7 +363,7 @@ namespace spot goto done; } } - if (sz > 2 && !f.is_boolean()) + if (sz > 2 && !f.is_boolean() && f.is(op::And, op::Or)) { /// If we have a formula like (a & b & Xc), consider /// it as ((a & b) & Xc) in the graph to isolate the @@ -384,7 +384,7 @@ namespace spot for (i = 1; i < sz; ++i) { formula next = f[i]; - // Note that we only add an edge in both directions, + // Note that we add an edge in both directions, // as the cut point algorithm really need undirected // graphs. (We used to do only one direction, and // that turned out to be a bug.) @@ -581,6 +581,14 @@ namespace spot conv.visit(f); } + //// Uncomment to print the graph. + // for (auto& [f, sv]: g) + // { + // std::cerr << f << ":\n"; + // for (auto& s: sv) + // std::cerr << " " << s << '\n'; + // } + // Compute its cut-points fset c; cut_points(g, c, f); diff --git a/tests/python/relabel.py b/tests/python/relabel.py index 0de668b12..b32ebd752 100644 --- a/tests/python/relabel.py +++ b/tests/python/relabel.py @@ -55,3 +55,7 @@ try: spot.relabel_here(autg, m) except RuntimeError as e: tc.assertIn("old labels", str(e)) + +f = spot.parse_infix_sere("(p9;p21|p22):(p1&p2;p11&p22;p1&p2)").f +g = spot.relabel_bse(f, spot.Abc) +tc.assertEqual(str(g), "(a;(b | c)):(d;(c & e);d)") From eb2616efaa9a68f27393b0c5904cc11a3e48e039 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 14 Oct 2022 09:44:17 +0200 Subject: [PATCH 175/606] * .gitlab-ci.yml (debian-unstable-gcc-coverage): Disable devel mode. --- .gitlab-ci.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3bad252ae..a2006ee7f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -56,6 +56,9 @@ make-dist: - ./*.tar.gz - VERSION +# We --disable-devel for coverage, because debug mode replaces +# SPOT_UNREACHABLE by an assertion wich is never reachable, lowering +# our coverage. debian-unstable-gcc-coverage: stage: build only: @@ -65,7 +68,7 @@ debian-unstable-gcc-coverage: image: gitlab-registry.lre.epita.fr/spot/buildenv/debian script: - autoreconf -vfi - - ./configure CXX='g++ --coverage' --enable-devel --disable-static --enable-doxygen + - ./configure CXX='g++ --coverage' --disable-devel --enable-warnings --disable-static --enable-doxygen - make - make check - gcovr --xml-pretty --exclude-unreachable-branches --print-summary -o coverage.xml --root $PWD -e 'bin/spot.cc' -e 'bin/spot-x.cc' -e 'spot/bricks/.*' -e 'spot/parse.*/scan.*.cc' -e 'spot/parse.*/parse.*.cc' -e 'utf8/.*' -e 'python/.*' -e 'buddy/.*' -e 'doc/org/tmp/.*' --html-details coverage.html --html-tab-size 8 --fail-under-line 90.7 From b0c299b9e95114421cf2821f4ad9281cebf0984d Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 14 Oct 2022 16:41:26 +0200 Subject: [PATCH 176/606] reduce_parity: add layered option * spot/twaalgos/parity.cc: Implement it. * spot/twaalgos/parity.hh, NEWS: Document it. * tests/python/parity.ipynb: Demonstrate it. This is the only test so far, but more uses are coming. --- NEWS | 6 + spot/twaalgos/parity.cc | 39 ++-- spot/twaalgos/parity.hh | 47 ++++- tests/python/parity.ipynb | 430 +++++++++++++++++++++++++------------- 4 files changed, 357 insertions(+), 165 deletions(-) diff --git a/NEWS b/NEWS index 3ad57d31d..d8fd0ab70 100644 --- a/NEWS +++ b/NEWS @@ -1,5 +1,11 @@ New in spot 2.11.1.dev (not yet released) + Library: + + - spot::reduce_parity() now has a "layered" option to force all + transition in the same parity layer to receive the same color; + like acd_transform() would do. + Bugs fixed: - Fix pkg-config files containing @LIBSPOT_PTHREAD@ (issue #520) diff --git a/spot/twaalgos/parity.cc b/spot/twaalgos/parity.cc index 94c7bd922..9428d7bb5 100644 --- a/spot/twaalgos/parity.cc +++ b/spot/twaalgos/parity.cc @@ -388,14 +388,14 @@ namespace spot } twa_graph_ptr - reduce_parity(const const_twa_graph_ptr& aut, bool colored) + reduce_parity(const const_twa_graph_ptr& aut, bool colored, bool layered) { return reduce_parity_here(make_twa_graph(aut, twa::prop_set::all()), - colored); + colored, layered); } twa_graph_ptr - reduce_parity_here(twa_graph_ptr aut, bool colored) + reduce_parity_here(twa_graph_ptr aut, bool colored, bool layered) { unsigned num_sets = aut->num_sets(); if (!colored && num_sets == 0) @@ -507,15 +507,30 @@ namespace spot m.first += (piri - m.first) & 1; m.second += (piri - m.second) & 1; } - for (unsigned state: si.states_of(scc)) - for (auto& e: aut->out(state)) - if ((sba || si.scc_of(e.dst) == scc) && - ((piri >= 0 && e.acc.has(color)) || (piri < 0 && !e.acc))) - { - unsigned en = aut->edge_number(e); - piprime1[en] = m.first; - piprime2[en] = m.second; - } + // Recolor edges. Depending on LAYERED we want to + // either recolor all edges for which piprime1 is -2 + // (uncolored), or only the edges that we were removed + // by the previous filter. + auto coloredge = [&](auto& e) { + unsigned en = aut->edge_number(e); + bool recolor = layered + ? piprime1[en] == -2 + : (piri >= 0 && e.acc.has(color)) || (piri < 0 && !e.acc); + if (recolor) + { + piprime1[en] = m.first; + piprime2[en] = m.second; + } + }; + if (sba) + // si.edges_of(scc) would be wrong as it can ignore + // outgoing edges removed from a previous level. + for (unsigned s: si.states_of(scc)) + for (auto& e: aut->out(s)) + coloredge(e); + else + for (auto& e: si.inner_edges_of(scc)) + coloredge(e); res.first = std::max(res.first, m.first); res.second = std::max(res.second, m.second); } diff --git a/spot/twaalgos/parity.hh b/spot/twaalgos/parity.hh index 167c6b2d8..44d7cca7e 100644 --- a/spot/twaalgos/parity.hh +++ b/spot/twaalgos/parity.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016-2019 Laboratoire de Recherche et Développement +// Copyright (C) 2016-2019, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -134,7 +134,6 @@ namespace spot colorize_parity_here(twa_graph_ptr aut, bool keep_style = false); /// @} - /// \brief Reduce the parity acceptance condition to use a minimal /// number of colors. /// @@ -149,11 +148,51 @@ namespace spot /// the above paper assumes). Otherwise, the smallest or highest /// colors (depending on the parity kind) is removed to simplify the /// acceptance condition. + /// + /// If the input uses state-based acceptance, the output will use + /// state-based acceptance as well. + /// + /// A parity automaton, sometimes called a chain automaton, can be + /// seen as a stack of layers that are alternatively rejecting and + /// accepting. For instance imagine a parity max automaton that is + /// strongly connected. Removing the transitions with the maximal + /// color might leave a few transitions that were not labeled by + /// this maximal color, but that are part of any cycle anymore: + /// those transition could have been colored with the maximal color, + /// since any cycle going through them would have seen the maximal + /// color. (Once your remove this maximal layer, + /// your can define the next layer similarly.) + /// + /// When \a layered is true all transition that belong to the same + /// layer receive the same color. When layer is `false`, only the + /// transition that where used initially to define the layers (i.e, + /// the transition with the maximal color in the previous exemple), + /// get their color adjusted. The other will receive either no + /// color (if \a colored is false), or a useless color (if \a colored + /// is true). Here "useless color" means the smallest color + /// for parity max, and the largest color for parity min. + /// + /// When \a layered is true, the output of this function is + /// comparable to what acd_transform() would produce. The + /// difference is that this function preserve the kind (min/max) of + /// parity input, while acd_transform() always output a parity min + /// automaton. Additionally, this function needs fewer resources + /// than acd_transform() because it is already known that the input + /// is a parity automaton. In some (historically inaccurate) way, + /// reduce_parity() can be seen as a specialized version of + /// acd_transform(). + /// + /// The reason layered is false by default, is that not introducing + /// colors in place where there where none occasionally help with + /// simulation-based reductions. + /// /// @{ SPOT_API twa_graph_ptr - reduce_parity(const const_twa_graph_ptr& aut, bool colored = false); + reduce_parity(const const_twa_graph_ptr& aut, + bool colored = false, bool layered = false); SPOT_API twa_graph_ptr - reduce_parity_here(twa_graph_ptr aut, bool colored = false); + reduce_parity_here(twa_graph_ptr aut, + bool colored = false, bool layered = false); /// @} } diff --git a/tests/python/parity.ipynb b/tests/python/parity.ipynb index 7323717da..56d6af350 100644 --- a/tests/python/parity.ipynb +++ b/tests/python/parity.ipynb @@ -72,9 +72,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Of course the case of parity automata with a single color is a bit degenerate, as the same formula correspond to two parity conditions with different kinds. \n", + "Of course the case of parity automata with a single color is a bit degenerate, as the same formula corresponds to two parity conditions of different kinds. \n", "\n", - "In addition the the above, an automaton is said to be **colored** if each of its edges (or states) has exactly one color. Automata that people usually call *parity automata* correspond in Spot to *colored* automata with *parity acceptance*. For this reason try to use the term *automata with parity acceptance* rather than *parity automata* for automata that are not *colored*." + "In addition to the above, an automaton is said to be **colored** if each of its edges (or states) has exactly one color. Automata that people usually call *parity automata* correspond in Spot to *colored* automata with *parity acceptance*. For this reason try to use the term *automata with parity acceptance* rather than *parity automata* for automata that are not *colored*." ] }, { @@ -3009,11 +3009,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -3237,11 +3237,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -4223,14 +4223,15 @@ "\n", "# Reduce parity\n", "\n", - "The `reduce_parity()` function is a more elaborate version of `cleanup_parity()`. It implements an algorithm by Carton and Maceiras (*Computing the Rabin index of a parity automaton*, Informatique théorique et applications, 1999), to obtain the minimal parity acceptance condition for a given automaton. Why the original algorithm assume *max odd* parity, this version with work with the four types of parity acceptance. It will only try to preserve the kind (max/min) and may change the style if it allows saving one color. Furthermore, it can colorize (or uncolorize) automata at the same time,\n", + "The `reduce_parity()` function is a more elaborate version of `cleanup_parity()`. It implements an algorithm by Carton and Maceiras (*Computing the Rabin index of a parity automaton*, Informatique théorique et applications, 1999), to obtain the minimal parity acceptance condition for a given automaton. While the original algorithm assumes *max odd* parity, this version works with the four types of parity acceptance. It will only try to preserve the kind (max/min) and may change the style if it allows saving one color. Furthermore, it can colorize (or uncolorize) automata at the same time,\n", "making it a very nice replacement for both `cleanup_parity()` and `colorize_parity()`.\n", "\n", - "It takes two arguments:\n", + "It takes three arguments:\n", "1. the automaton whose parity acceptance condition should be reduced\n", "2. a Boolean indicating whether the output should be colored (`True`), or if transition with no color can be used (`False`).\n", + "3. a Boolean indicating whether the output should be layered, i.e., in a max parity automaton, that means the color of a transition should be the maximal color visited by all cycles going through it.\n", "\n", - "By default, the second argument is `False`, because acceptance sets is a scarse ressource in Spot." + "By default, the second argument is `False`, because acceptance sets is a scarse ressource in Spot. The third argument also defaults to `False`, but for empircal reason: adding more colors like this tends to hinder simulation-based reductions." ] }, { @@ -4715,8 +4716,8 @@ "outputs": [ { "data": { - "text/html": [ - "
\n", + "image/svg+xml": [ + "\n", "\n", "\n", - "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ")))\n", - "[parity max even 4]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "2\n", - "\n", - "2\n", - "\n", - "\n", - "\n", - "0->2\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", - "\n", - "\n", - "3\n", - "\n", - "3\n", - "\n", - "\n", - "\n", - "0->3\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "2->1\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", - "\n", - "3->0\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", - "\n", - "\n", - "\n", - "3->2\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", - "\n", - "\n", - "\n", - "3->3\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", - "\n", - "\n", - "1->0\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "1->2\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", - "\n", - "\n", "
\n", "\n", @@ -5188,17 +5074,263 @@ }, "metadata": {}, "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[parity max even 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[parity max odd 3]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ - "display2(maxeven4, spot.reduce_parity(maxeven4))\n", - "display2(maxeven4, spot.reduce_parity(maxeven4, True))" + "display(maxeven4)\n", + "display2(spot.reduce_parity(maxeven4), spot.reduce_parity(maxeven4, True))\n", + "display2(spot.reduce_parity(maxeven4, False, True), spot.reduce_parity(maxeven4, True, True))" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -5212,7 +5344,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.2" + "version": "3.10.7" } }, "nbformat": 4, From 67722db78f64ed332abba8ad7042d4657b2db89d Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 14 Oct 2022 17:55:36 +0200 Subject: [PATCH 177/606] reduce_parity: expose the internal vectors of colors * spot/twaalgos/parity.cc, spot/twaalgos/parity.hh: Add a reduce_parity_data class for access to the vectors of colors computed by reduce_parity. * python/spot/impl.i: Add bindings for std::vector. --- python/spot/impl.i | 1 + spot/twaalgos/parity.cc | 75 ++++++++++++++++++++++------------------- spot/twaalgos/parity.hh | 23 +++++++++++++ 3 files changed, 64 insertions(+), 35 deletions(-) diff --git a/python/spot/impl.i b/python/spot/impl.i index 88bdcf5c4..502770fcb 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -520,6 +520,7 @@ namespace std { %template(vectorbdd) vector; %template(aliasvector) vector>; %template(vectorstring) vector; + %template(vectorint) vector; %template(pair_formula_vectorstring) pair>; %template(atomic_prop_set) set; %template(relabeling_map) map; diff --git a/spot/twaalgos/parity.cc b/spot/twaalgos/parity.cc index 9428d7bb5..c8507ac53 100644 --- a/spot/twaalgos/parity.cc +++ b/spot/twaalgos/parity.cc @@ -387,27 +387,15 @@ namespace spot return aut; } - twa_graph_ptr - reduce_parity(const const_twa_graph_ptr& aut, bool colored, bool layered) + reduce_parity_data::reduce_parity_data(const const_twa_graph_ptr& aut, + bool layered) { - return reduce_parity_here(make_twa_graph(aut, twa::prop_set::all()), - colored, layered); - } - - twa_graph_ptr - reduce_parity_here(twa_graph_ptr aut, bool colored, bool layered) - { - unsigned num_sets = aut->num_sets(); - if (!colored && num_sets == 0) - return aut; - - bool current_max; - bool current_odd; - if (!aut->acc().is_parity(current_max, current_odd, true)) - input_is_not_parity("reduce_parity"); + if (!aut->acc().is_parity(parity_max, parity_odd, true)) + input_is_not_parity("reduce_parity_data"); if (!aut->is_existential()) throw std::runtime_error - ("reduce_parity_here() does not support alternation"); + ("reduce_parity_data() does not support alternation"); + unsigned num_sets = aut->num_sets(); // The algorithm assumes "max odd" or "max even" parity. "min" // parity is handled by converting it to "max" while the algorithm @@ -466,8 +454,8 @@ namespace spot // // -2 means the edge was never assigned a color. unsigned evs = aut->edge_vector().size(); - std::vector piprime1(evs, -2); // k=1 - std::vector piprime2(evs, -2); // k=0 + piprime1.resize(evs, -2); // k=1 + piprime2.resize(evs, -2); // k=0 bool sba = aut->prop_state_acc().is_true(); auto rec = @@ -481,7 +469,7 @@ namespace spot { int piri; // π(Rᵢ) int color; // corresponding color, to deal with "min" kind - if (current_max) + if (parity_max) { piri = color = si.acc_sets_of(scc).max_set() - 1; } @@ -538,11 +526,28 @@ namespace spot }; scc_and_mark_filter filter1(aut, {}); rec(filter1, rec); + } + + twa_graph_ptr + reduce_parity(const const_twa_graph_ptr& aut, bool colored, bool layered) + { + return reduce_parity_here(make_twa_graph(aut, twa::prop_set::all()), + colored, layered); + } + + twa_graph_ptr + reduce_parity_here(twa_graph_ptr aut, bool colored, bool layered) + { + unsigned num_sets = aut->num_sets(); + if (!colored && num_sets == 0) + return aut; + + reduce_parity_data pd(aut, layered); // compute the used range for each vector. int min1 = num_sets; int max1 = -2; - for (int m : piprime1) + for (int m : pd.piprime1) { if (m <= -2) continue; @@ -559,7 +564,7 @@ namespace spot } int min2 = num_sets; int max2 = -2; - for (int m : piprime2) + for (int m : pd.piprime2) { if (m <= -2) continue; @@ -575,13 +580,13 @@ namespace spot { std::swap(size1, size2); std::swap(min1, min2); - std::swap(piprime1, piprime2); + std::swap(pd.piprime1, pd.piprime2); } unsigned new_num_sets = size1; - if (current_max) + if (pd.parity_max) { - for (int& m : piprime1) + for (int& m : pd.piprime1) if (m > -2) m -= min1; else @@ -589,7 +594,7 @@ namespace spot } else { - for (int& m : piprime1) + for (int& m : pd.piprime1) if (m > -2) m = new_num_sets - (m - min1) - 1; else @@ -597,8 +602,8 @@ namespace spot } // The parity style changes if we shift colors by an odd number. - bool new_odd = current_odd ^ (min1 & 1); - if (!current_max) + bool new_odd = pd.parity_odd ^ (min1 & 1); + if (!pd.parity_max) // Switching from min<->max changes the parity style every time // the number of colors is even. If the input was "min", we // switched once to "max" to apply the reduction and once again @@ -607,7 +612,7 @@ namespace spot new_odd ^= !(num_sets & 1) ^ !(new_num_sets & 1); if (!colored) { - new_odd ^= current_max; + new_odd ^= pd.parity_max; new_num_sets -= 1; // It seems we have nothing to win by changing automata with a @@ -617,18 +622,18 @@ namespace spot } aut->set_acceptance(new_num_sets, - acc_cond::acc_code::parity(current_max, new_odd, + acc_cond::acc_code::parity(pd.parity_max, new_odd, new_num_sets)); if (colored) for (auto& e: aut->edges()) { unsigned n = aut->edge_number(e); - e.acc = acc_cond::mark_t({unsigned(piprime1[n])}); + e.acc = acc_cond::mark_t({unsigned(pd.piprime1[n])}); } - else if (current_max) + else if (pd.parity_max) for (auto& e: aut->edges()) { - unsigned n = piprime1[aut->edge_number(e)]; + unsigned n = pd.piprime1[aut->edge_number(e)]; if (n == 0) e.acc = acc_cond::mark_t({}); else @@ -637,7 +642,7 @@ namespace spot else for (auto& e: aut->edges()) { - unsigned n = piprime1[aut->edge_number(e)]; + unsigned n = pd.piprime1[aut->edge_number(e)]; if (n >= new_num_sets) e.acc = acc_cond::mark_t({}); else diff --git a/spot/twaalgos/parity.hh b/spot/twaalgos/parity.hh index 44d7cca7e..188e92483 100644 --- a/spot/twaalgos/parity.hh +++ b/spot/twaalgos/parity.hh @@ -21,6 +21,7 @@ #include #include +#include namespace spot { @@ -194,5 +195,27 @@ namespace spot SPOT_API twa_graph_ptr reduce_parity_here(twa_graph_ptr aut, bool colored = false, bool layered = false); + + /// @} + + /// \brief Internal data computed by the reduce_parity function + /// + /// `piprime1` and `piprime2` have the size of `aut`'s edge vector, + /// represent two possible colorations of the edges. piprime1 assumes + /// that terminal cases of the recursion are odd, and piprime2 assumes + /// they are even. + /// + /// reduce_parity() actually compare the range of values in these + /// two vectors to limit the number of colors. + struct SPOT_API reduce_parity_data + { + bool parity_max; ///< Whether the input automaton is parity max + bool parity_odd; ///< Whether the input automaton is parity odd + std::vector piprime1; + std::vector piprime2; + + reduce_parity_data(const const_twa_graph_ptr& aut, bool layered = false); + }; + /// @} } From c4a33d34573f7c1881aead7ed2c4756583072be3 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 18 Oct 2022 17:34:13 +0200 Subject: [PATCH 178/606] add a .mailmap for git * .mailmap: New file, to fix email inconsistencies. --- .mailmap | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 .mailmap diff --git a/.mailmap b/.mailmap new file mode 100644 index 000000000..41bc60980 --- /dev/null +++ b/.mailmap @@ -0,0 +1,20 @@ +Ala-Eddine Ben-Salem +Ala-Eddine Ben-Salem +Ala-Eddine Ben-Salem +Antoine Martin +Arthur Remaud +Arthur Remaud +Damien Lefortier +Felix Abecassis +Felix Abecassis +Felix Abecassis +Guillaume Sadegh +Guillaume Sadegh +Henrich Lauko +Henrich Lauko +Jerome Dubois Jérôme Dubois +Philipp Schlehuber-Caissier +Thibaud Michaud +Thomas Badie +Rachid Rebiha +Thomas Martinez From 52ed3d1e8fa60044baecd1fbfb113596da59dc78 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 19 Oct 2022 14:54:34 +0200 Subject: [PATCH 179/606] * bin/common_aoutput.cc: Missing space in doc string. --- bin/common_aoutput.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/common_aoutput.cc b/bin/common_aoutput.cc index f2c8691ec..e8c2e5401 100644 --- a/bin/common_aoutput.cc +++ b/bin/common_aoutput.cc @@ -221,7 +221,7 @@ static const argp_option io_options[] = "(iw) inherently weak. Use uppercase letters to negate them.", 0 }, { "%R, %[LETTERS]R", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "CPU time (excluding parsing), in seconds; Add LETTERS to restrict to" + "CPU time (excluding parsing), in seconds; Add LETTERS to restrict to " "(u) user time, (s) system time, (p) parent process, " "or (c) children processes.", 0 }, { "%N, %n", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, From de29ba9e4cb21b0e56ede4ecb9cfc2cebe4c6e5e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 19 Oct 2022 16:30:00 +0200 Subject: [PATCH 180/606] stats: add options to count unreachable states and transitions Based on a request from Pierre Ganty. * spot/twaalgos/stats.cc, spot/twaalgos/stats.hh, bin/common_aoutput.cc, bin/common_aoutput.hh: Implement those options. * tests/core/format.test: Add test case. * doc/org/autfilt.org: Update doc. * NEWS: Mention them. --- NEWS | 7 ++++ bin/common_aoutput.cc | 43 ++++++++++++-------- bin/common_aoutput.hh | 6 +-- doc/org/autfilt.org | 10 +++-- spot/twaalgos/stats.cc | 90 ++++++++++++++++++++++++++++++++++++++---- spot/twaalgos/stats.hh | 43 +++++++++++++++++--- tests/core/format.test | 50 +++++++++++++++-------- 7 files changed, 197 insertions(+), 52 deletions(-) diff --git a/NEWS b/NEWS index d8fd0ab70..bf9e1079e 100644 --- a/NEWS +++ b/NEWS @@ -1,5 +1,12 @@ New in spot 2.11.1.dev (not yet released) + Command-line tools: + + - The --stats specifications %s, %e, %t for printing the number of + (reachable) states, edges, and transitions, learned to support + options [r], [u], [a] to indicate if only reachable, unreachable, + or all elements should be counted. + Library: - spot::reduce_parity() now has a "layered" option to force all diff --git a/bin/common_aoutput.cc b/bin/common_aoutput.cc index e8c2e5401..fcc79fc3c 100644 --- a/bin/common_aoutput.cc +++ b/bin/common_aoutput.cc @@ -203,12 +203,18 @@ static const argp_option io_options[] = "to specify additional options as in --hoa=opt)", 0 }, { "%M, %m", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "name of the automaton", 0 }, - { "%S, %s", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "number of reachable states", 0 }, - { "%E, %e", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "number of reachable edges", 0 }, - { "%T, %t", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "number of reachable transitions", 0 }, + { "%S, %s, %[LETTER]S, %[LETTER]s", + 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "number of states (add one LETTER to select (r) reachable [default], " + "(u) unreachable, (a) all).", 0 }, + { "%E, %e, %[LETTER]E, %[LETTER]e", + 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "number of edges (add one LETTER to select (r) reachable [default], " + "(u) unreachable, (a) all).", 0 }, + { "%T, %t, %[LETTER]E, %[LETTER]e", + 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "number of transitions (add one LETTER to select (r) reachable " + "[default], (u) unreachable, (a) all).", 0 }, { "%A, %a", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "number of acceptance sets", 0 }, { "%G, %g, %[LETTERS]G, %[LETTERS]g", 0, nullptr, @@ -268,12 +274,15 @@ static const argp_option o_options[] = "to specify additional options as in --hoa=opt)", 0 }, { "%m", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "name of the automaton", 0 }, - { "%s", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "number of reachable states", 0 }, - { "%e", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "number of reachable edges", 0 }, - { "%t", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "number of reachable transitions", 0 }, + { "%s, %[LETTER]s", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "number of states (add one LETTER to select (r) reachable [default], " + "(u) unreachable, (a) all).", 0 }, + { "%e, %[LETTER]e", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "number of edges (add one LETTER to select (r) reachable [default], " + "(u) unreachable, (a) all).", 0 }, + { "%t, %[LETTER]t", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "number of transitions (add one LETTER to select (r) reachable " + "[default], (u) unreachable, (a) all).", 0 }, { "%a", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "number of acceptance sets", 0 }, { "%g, %[LETTERS]g", 0, nullptr, @@ -472,15 +481,15 @@ hoa_stat_printer::print(const spot::const_parsed_aut_ptr& haut, if (has('T')) { spot::twa_sub_statistics s = sub_stats_reachable(haut->aut); - haut_states_ = s.states; - haut_edges_ = s.edges; - haut_trans_ = s.transitions; + haut_states_.set(s.states, haut->aut->num_states()); + haut_edges_.set(s.edges, haut->aut->num_edges()); + haut_trans_.set(s.transitions, count_all_transitions(haut->aut)); } else if (has('E') || has('S')) { spot::twa_statistics s = stats_reachable(haut->aut); - haut_states_ = s.states; - haut_edges_ = s.edges; + haut_states_.set(s.states, haut->aut->num_states()); + haut_edges_.set(s.edges, haut->aut->num_edges()); } if (has('M')) { diff --git a/bin/common_aoutput.hh b/bin/common_aoutput.hh index 0fb2e8d7c..d33b687d2 100644 --- a/bin/common_aoutput.hh +++ b/bin/common_aoutput.hh @@ -166,9 +166,9 @@ private: spot::printable_value aut_word_; spot::printable_value haut_word_; spot::printable_acc_cond haut_gen_acc_; - spot::printable_value haut_states_; - spot::printable_value haut_edges_; - spot::printable_value haut_trans_; + spot::printable_size haut_states_; + spot::printable_size haut_edges_; + spot::printable_long_size haut_trans_; spot::printable_value haut_acc_; printable_varset haut_ap_; printable_varset aut_ap_; diff --git a/doc/org/autfilt.org b/doc/org/autfilt.org index 4ccf09f07..5c8a8f1e5 100644 --- a/doc/org/autfilt.org +++ b/doc/org/autfilt.org @@ -145,7 +145,8 @@ ltl2tgba --help | sed -n '/ sequences:/,/^$/p' | sed '1d;$d' (iw) inherently weak. Use uppercase letters to negate them. %d 1 if the output is deterministic, 0 otherwise - %e number of reachable edges + %e, %[LETTER]e number of edges (add one LETTER to select (r) + reachable [default], (u) unreachable, (a) all). %f the formula, in Spot's syntax %F name of the input file %g, %[LETTERS]g acceptance condition (in HOA syntax); add brackets @@ -170,8 +171,11 @@ ltl2tgba --help | sed -n '/ sequences:/,/^$/p' | sed '1d;$d' LETTERS to restrict to(u) user time, (s) system time, (p) parent process, or (c) children processes. - %s number of reachable states - %t number of reachable transitions + %s, %[LETTER]s number of states (add one LETTER to select (r) + reachable [default], (u) unreachable, (a) all). + %t, %[LETTER]t number of transitions (add one LETTER to select + (r) reachable [default], (u) unreachable, (a) + all). %u, %[e]u number of states (or [e]dges) with universal branching %u, %[LETTER]u 1 if the automaton contains some universal diff --git a/spot/twaalgos/stats.cc b/spot/twaalgos/stats.cc index ddccba5db..4a905e542 100644 --- a/spot/twaalgos/stats.cc +++ b/spot/twaalgos/stats.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2011-2018, 2020 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2008, 2011-2018, 2020, 2022 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre // et Marie Curie. @@ -33,6 +33,16 @@ namespace spot { + unsigned long long + count_all_transitions(const const_twa_graph_ptr& g) + { + unsigned long long tr = 0; + bdd v = g->ap_vars(); + for (auto& e: g->edges()) + tr += bdd_satcountset(e.cond, v); + return tr; + } + namespace { class stats_bfs: public twa_reachable_iterator_breadth_first @@ -82,6 +92,7 @@ namespace spot }; + template void dfs(const const_twa_graph_ptr& ge, SU state_update, EU edge_update) { @@ -344,10 +355,73 @@ namespace spot << std::string(beg, end + 2) << ", "; tmp << e.what(); throw std::runtime_error(tmp.str()); - } } + void printable_size::print(std::ostream& os, const char* pos) const + { + char p = 'r'; + if (*pos == '[') + { + p = pos[1]; + if (pos[2] != ']' || !(p == 'r' || p == 'u' || p == 'a')) + { + const char* end = strchr(pos + 1, ']'); + std::ostringstream tmp; + tmp << "while processing %" + << std::string(pos, end + 2) << ", " + << "only [a], [r], or [u] is supported."; + throw std::runtime_error(tmp.str()); + } + } + switch (p) + { + case 'r': + os << reachable_; + return; + case 'a': + os << all_; + return; + case 'u': + os << all_ - reachable_; + return; + } + SPOT_UNREACHABLE(); + return; + } + + void printable_long_size::print(std::ostream& os, const char* pos) const + { + char p = 'r'; + if (*pos == '[') + { + p = pos[1]; + if (pos[2] != ']' || !(p == 'r' || p == 'u' || p == 'a')) + { + const char* end = strchr(pos + 1, ']'); + std::ostringstream tmp; + tmp << "while processing %" + << std::string(pos, end + 2) << ", " + << "only [a], [r], or [u] is supported."; + throw std::runtime_error(tmp.str()); + } + } + switch (p) + { + case 'r': + os << reachable_; + return; + case 'a': + os << all_; + return; + case 'u': + os << all_ - reachable_; + return; + } + SPOT_UNREACHABLE(); + return; + } + stat_printer::stat_printer(std::ostream& os, const char* format) : format_(format) @@ -376,15 +450,15 @@ namespace spot if (has('t')) { twa_sub_statistics s = sub_stats_reachable(aut); - states_ = s.states; - edges_ = s.edges; - trans_ = s.transitions; + states_.set(s.states, aut->num_states()); + edges_.set(s.edges, aut->num_edges()); + trans_.set(s.transitions, count_all_transitions(aut)); } else if (has('s') || has('e')) { twa_statistics s = stats_reachable(aut); - states_ = s.states; - edges_ = s.edges; + states_.set(s.states, aut->num_states()); + edges_.set(s.edges, aut->num_edges()); } if (has('a')) diff --git a/spot/twaalgos/stats.hh b/spot/twaalgos/stats.hh index 1caa8324b..24353fc31 100644 --- a/spot/twaalgos/stats.hh +++ b/spot/twaalgos/stats.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2011-2017, 2020 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2008, 2011-2017, 2020, 2022 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre // et Marie Curie. @@ -55,6 +55,9 @@ namespace spot /// \brief Compute sub statistics for an automaton. SPOT_API twa_sub_statistics sub_stats_reachable(const const_twa_ptr& g); + /// \brief Count all transtitions, even unreachable ones. + SPOT_API unsigned long long + count_all_transitions(const const_twa_graph_ptr& g); class SPOT_API printable_formula: public printable_value { @@ -102,6 +105,36 @@ namespace spot void print(std::ostream& os, const char* pos) const override; }; + class SPOT_API printable_size final: + public spot::printable + { + unsigned reachable_ = 0; + unsigned all_ = 0; + public: + void set(unsigned reachable, unsigned all) + { + reachable_ = reachable; + all_ = all; + } + + void print(std::ostream& os, const char* pos) const override; + }; + + class SPOT_API printable_long_size final: + public spot::printable + { + unsigned long long reachable_ = 0; + unsigned long long all_ = 0; + public: + void set(unsigned long long reachable, unsigned long long all) + { + reachable_ = reachable; + all_ = all; + } + + void print(std::ostream& os, const char* pos) const override; + }; + /// \brief prints various statistics about a TGBA /// /// This object can be configured to display various statistics @@ -123,9 +156,9 @@ namespace spot const char* format_; printable_formula form_; - printable_value states_; - printable_value edges_; - printable_value trans_; + printable_size states_; + printable_size edges_; + printable_long_size trans_; printable_value acc_; printable_scc_info scc_; printable_value nondetstates_; diff --git a/tests/core/format.test b/tests/core/format.test index 4e6f4a4d5..da78e3e7e 100644 --- a/tests/core/format.test +++ b/tests/core/format.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2017 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) 2016, 2017, 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -139,18 +139,36 @@ test 3,5 = `ltl2tgba --low --any --stats=%s,%e "$f"` test 3,4 = `ltl2tgba --stats=%s,%e "$f"` cat >foo < stats + +cat >expected <err && exit 1 +grep 'only \[a\], \[r\], or \[u\] is supported' err From 0ba6949f7dd5d0cff778672ec5eb15de2416a5b5 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 20 Oct 2022 10:48:01 +0200 Subject: [PATCH 181/606] use bdd_restrict more Doing so reduced the number of GC passes tested in bdd.test, which is good. * spot/twaalgos/ltl2tgba_fm.cc: Simplify minato loops with bdd_restrict. * spot/twaalgos/synthesis.cc (split_2step): Use bdd_restrict instead of bdd_appex. * tests/core/bdd.test, tests/core/ltlf.test: Adjust test cases. --- spot/twaalgos/ltl2tgba_fm.cc | 35 +++++++++++++---------------------- spot/twaalgos/synthesis.cc | 4 +--- tests/core/bdd.test | 14 +++++++------- tests/core/ltlf.test | 12 ++++++------ 4 files changed, 27 insertions(+), 38 deletions(-) diff --git a/spot/twaalgos/ltl2tgba_fm.cc b/spot/twaalgos/ltl2tgba_fm.cc index 3566abc97..a354aaddd 100644 --- a/spot/twaalgos/ltl2tgba_fm.cc +++ b/spot/twaalgos/ltl2tgba_fm.cc @@ -900,8 +900,7 @@ namespace spot for (bdd label: minterms_of(all_props, var_set)) { formula dest = - dict_.bdd_to_sere(bdd_appex(res_ndet, label, bddop_and, - dict_.var_set)); + dict_.bdd_to_sere(bdd_restrict(res_ndet, label)); dest = formula::first_match(dest); if (to_concat_) dest = formula::Concat({dest, to_concat_}); @@ -995,9 +994,7 @@ namespace spot bdd all_props = bdd_existcomp(res, dict_.var_set); for (bdd label: minterms_of(all_props, var_set)) { - formula dest = - dict_.bdd_to_sere(bdd_appex(res, label, bddop_and, - dict_.var_set)); + formula dest = dict_.bdd_to_sere(bdd_restrict(res, label)); f2a_t::const_iterator i = f2a_.find(dest); if (i != f2a_.end() && i->second.first == nullptr) continue; @@ -1471,9 +1468,7 @@ namespace spot for (bdd label: minterms_of(all_props, var_set)) { formula dest = - dict_.bdd_to_sere(bdd_appex(f1, label, bddop_and, - dict_.var_set)); - + dict_.bdd_to_sere(bdd_restrict(f1, label)); formula dest2 = formula::binop(o, dest, node[1]); bool unamb = dict_.unambiguous; if (!dest2.is_ff()) @@ -1552,9 +1547,7 @@ namespace spot for (bdd label: minterms_of(all_props, var_set)) { formula dest = - dict_.bdd_to_sere(bdd_appex(f1, label, bddop_and, - dict_.var_set)); - + dict_.bdd_to_sere(bdd_restrict(f1, label)); formula dest2 = formula::binop(o, dest, node[1]); bdd udest = @@ -1787,16 +1780,15 @@ namespace spot var_set = bdd_existcomp(bdd_support(t.symbolic), d_.var_set); all_props = bdd_existcomp(t.symbolic, d_.var_set); } - for (bdd one_prop_set: minterms_of(all_props, var_set)) + for (bdd label: minterms_of(all_props, var_set)) { - minato_isop isop(t.symbolic & one_prop_set); + minato_isop isop(t.symbolic & label); bdd cube; while ((cube = isop.next()) != bddfalse) { bdd label = bdd_exist(cube, d_.next_set); bdd dest_bdd = bdd_existcomp(cube, d_.next_set); - formula dest = - d_.conj_bdd_to_formula(dest_bdd); + formula dest = d_.conj_bdd_to_formula(dest_bdd); // Handle a Miyano-Hayashi style unrolling for // rational operators. Marked nodes correspond to @@ -1818,8 +1810,7 @@ namespace spot dest = d_.mt.mark_concat_ops(dest); } // Note that simplify_mark may have changed dest. - dest_bdd = bdd_ithvar(d_.register_next_variable(dest)); - res |= label & dest_bdd; + res |= label & bdd_ithvar(d_.register_next_variable(dest)); } } t.symbolic = res; @@ -2120,16 +2111,15 @@ namespace spot // // FIXME: minato_isop is quite expensive, and I (=adl) // don't think we really care that much about getting the - // smalled sum of products that minato_isop strives to + // smallest sum of products that minato_isop strives to // compute. Given that Next and Acc variables should // always be positive, maybe there is a faster way to // compute the successors? E.g. using bdd_satone() and // ignoring negated Next and Acc variables. - minato_isop isop(res & one_prop_set); + minato_isop isop(bdd_restrict(res, one_prop_set)); bdd cube; while ((cube = isop.next()) != bddfalse) { - bdd label = bdd_exist(cube, d.next_set); bdd dest_bdd = bdd_existcomp(cube, d.next_set); formula dest = d.conj_bdd_to_formula(dest_bdd); @@ -2147,8 +2137,9 @@ namespace spot if (symb_merge) dest = fc.canonicalize(dest); - bdd conds = bdd_existcomp(label, d.var_set); - bdd promises = bdd_existcomp(label, d.a_set); + bdd conds = + exprop ? one_prop_set : bdd_existcomp(cube, d.var_set); + bdd promises = bdd_existcomp(cube, d.a_set); dests.emplace_back(transition(dest, conds, promises)); } } diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index e1e4e1780..88e22ff04 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -574,9 +574,7 @@ namespace spot // implies is faster than and if (bdd_implies(one_letter, e_info.einsup.first)) { - e_info.econdout = - bdd_appex(e_info.econd, one_letter, - bddop_and, input_bdd); + e_info.econdout = bdd_restrict(e_info.econd, one_letter); dests.push_back(&e_info); assert(e_info.econdout != bddfalse); } diff --git a/tests/core/bdd.test b/tests/core/bdd.test index ba2e11232..db03dbad5 100755 --- a/tests/core/bdd.test +++ b/tests/core/bdd.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2020 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) 2017, 2018, 2020, 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -23,7 +23,7 @@ set -e # Make sure that setting the SPOT_BDD_TRACE envvar actually does # something. -genltl --kr-nlogn=2 | +genltl --kr-n=3 | SPOT_BDD_TRACE=1 ltl2tgba -x tls-max-states=0 -D >out 2>err cat err grep spot: out && exit 1 @@ -31,14 +31,14 @@ grep 'spot: BDD package initialized' err # This value below, which is the number of time we need to garbage # collect might change if we improve the tool or change the way BuDDy # is initialized. -test 11 = `grep -c 'spot: BDD GC' err` +test 2 = `grep -c 'spot: BDD GC' err` # Minimal size for this automaton. # See also https://www.lrde.epita.fr/dload/spot/mochart10-fixes.pdf -test "147,207" = `autfilt --stats=%s,%e out` +test "2240,4214" = `autfilt --stats=%s,%e out` # With the default value of tls-max-states, no GC is needed -genltl --kr-nlogn=2 | SPOT_BDD_TRACE=1 ltl2tgba -D --stats=%s,%e >out 2>err +genltl --kr-n=3 | SPOT_BDD_TRACE=1 ltl2tgba -D --stats=%s,%e >out 2>err cat err grep 'spot: BDD package initialized' err test 0 = `grep -c 'spot: BDD GC' err` -test "147,207" = `cat out` +test "2240,4214" = `cat out` diff --git a/tests/core/ltlf.test b/tests/core/ltlf.test index a1979bc8d..11f2132ac 100755 --- a/tests/core/ltlf.test +++ b/tests/core/ltlf.test @@ -57,7 +57,7 @@ State: 3 HOA: v1 name: "a & X(A & a) & (A U G!A)" States: 4 -Start: 2 +Start: 3 AP: 2 "A" "a" acc-name: Buchi Acceptance: 1 Inf(0) @@ -70,9 +70,9 @@ State: 1 [!0] 0 [0] 1 State: 2 -[0&1] 3 -State: 3 [0&1] 1 +State: 3 +[0&1] 2 --END-- HOA: v1 name: "(a U (A & b)) & (A U G!A) & F((A & c) | (A & d & X!A))" @@ -124,7 +124,7 @@ State: 2 {0} --END-- HOA: v1 States: 3 -Start: 1 +Start: 2 AP: 1 "a" acc-name: Buchi Acceptance: 1 Inf(0) @@ -133,9 +133,9 @@ properties: trans-labels explicit-labels state-acc deterministic State: 0 {0} [t] 0 State: 1 -[0] 2 -State: 2 [0] 0 +State: 2 +[0] 1 --END-- HOA: v1 States: 5 From 0ecc870a0eabaf7f845bcab2acbdc1e1c0945072 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 25 Oct 2022 11:52:03 +0200 Subject: [PATCH 182/606] [buddy] Add a default_deleter for bddPair * src/bddx.h (std::default_deleter): Here. --- buddy/src/bddx.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/buddy/src/bddx.h b/buddy/src/bddx.h index 0efd3a0a9..b3cb377a1 100644 --- a/buddy/src/bddx.h +++ b/buddy/src/bddx.h @@ -501,6 +501,7 @@ BUDDY_API_VAR const BDD bddtrue; *************************************************************************/ #ifdef CPLUSPLUS #include +#include /*=== User BDD class ===================================================*/ @@ -1092,6 +1093,11 @@ inline bddxfalse bdd_false(void) { return bddxfalse(); } +template<> +struct std::default_delete { + void operator()(bddPair *p) const { bdd_freepair(p); }; +}; + /*=== Iostream printing ================================================*/ class BUDDY_API bdd_ioformat From 65bc67f300ecaa07fa027dfada96c026b34d9945 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 25 Oct 2022 11:53:05 +0200 Subject: [PATCH 183/606] relabel_here: make sure free_bddpair is called * spot/twaalgos/relabel.cc (relabel_here): This function has multiple exit paths, and none of them were calling bdd_freepair. Use a unique_ptr to ensure that. --- spot/twaalgos/relabel.cc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/spot/twaalgos/relabel.cc b/spot/twaalgos/relabel.cc index 22eddd893..ac1556aec 100644 --- a/spot/twaalgos/relabel.cc +++ b/spot/twaalgos/relabel.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2018, 2020 Laboratoire de Recherche et +// Copyright (C) 2015-2018, 2020, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -26,7 +26,7 @@ namespace spot void relabel_here(twa_graph_ptr& aut, relabeling_map* relmap) { - bddPair* pairs = bdd_newpair(); + std::unique_ptr pairs(bdd_newpair()); auto d = aut->get_dict(); std::vector vars; std::set newvars; @@ -53,7 +53,7 @@ namespace spot { int newv = aut->register_ap(p.second); newvars.insert(newv); - bdd_setpair(pairs, oldv, newv); + bdd_setpair(pairs.get(), oldv, newv); } else { @@ -64,7 +64,7 @@ namespace spot return false; }); bdd newb = formula_to_bdd(p.second, d, aut); - bdd_setbddpair(pairs, oldv, newb); + bdd_setbddpair(pairs.get(), oldv, newb); bool_subst = true; } } @@ -75,7 +75,7 @@ namespace spot static_cast(bdd_veccompose) : static_cast(bdd_replace); for (auto& t: aut->edges()) { - bdd c = (*op)(t.cond, pairs); + bdd c = (*op)(t.cond, pairs.get()); t.cond = c; if (c == bddfalse) need_cleanup = true; From 0a710eb99594983bfc559a607fe702804c3e17ce Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 25 Oct 2022 16:31:35 +0200 Subject: [PATCH 184/606] declare all argp_program_doc as static * bench/stutter/stutter_invariance_formulas.cc, bin/autcross.cc, bin/autfilt.cc, bin/dstar2tgba.cc, bin/genaut.cc, bin/genltl.cc, bin/ltl2tgba.cc, bin/ltl2tgta.cc, bin/ltlcross.cc, bin/ltldo.cc, bin/ltlfilt.cc, bin/ltlsynt.cc, bin/randaut.cc, bin/randltl.cc, bin/spot-x.cc, bin/spot.cc, tests/ltsmin/modelcheck.cc: Here. --- bench/stutter/stutter_invariance_formulas.cc | 6 +++--- bin/autcross.cc | 6 +++--- bin/autfilt.cc | 2 +- bin/dstar2tgba.cc | 2 +- bin/genaut.cc | 7 ++++--- bin/genltl.cc | 2 +- bin/ltl2tgba.cc | 2 +- bin/ltl2tgta.cc | 2 +- bin/ltlcross.cc | 6 +++--- bin/ltldo.cc | 2 +- bin/ltlfilt.cc | 2 +- bin/ltlsynt.cc | 2 +- bin/randaut.cc | 6 +++--- bin/randltl.cc | 4 ++-- bin/spot-x.cc | 2 +- bin/spot.cc | 7 ++++--- tests/ltsmin/modelcheck.cc | 6 +++--- 17 files changed, 34 insertions(+), 32 deletions(-) diff --git a/bench/stutter/stutter_invariance_formulas.cc b/bench/stutter/stutter_invariance_formulas.cc index 2007891af..32bc45083 100644 --- a/bench/stutter/stutter_invariance_formulas.cc +++ b/bench/stutter/stutter_invariance_formulas.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2015, 2016, 2017 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2014, 2015, 2016, 2017, 2022 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -28,7 +28,7 @@ #include #include -const char argp_program_doc[] =""; +static const char argp_program_doc[] = ""; const struct argp_child children[] = { diff --git a/bin/autcross.cc b/bin/autcross.cc index 21d21f2c7..24cd9bcd4 100644 --- a/bin/autcross.cc +++ b/bin/autcross.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2020, 2022 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) 2017-2020, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -51,7 +51,7 @@ #include #include -const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Call several tools that process automata and cross-compare their output \ to detect bugs, or to gather statistics. The list of automata to use \ should be supplied on standard input, or using the -F option.\v\ diff --git a/bin/autfilt.cc b/bin/autfilt.cc index 49543e596..e16ef770a 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -75,7 +75,7 @@ #include #include -static const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Convert, transform, and filter omega-automata.\v\ Exit status:\n\ 0 if some automata were output\n\ diff --git a/bin/dstar2tgba.cc b/bin/dstar2tgba.cc index 1d5cf8762..5b60a0ecc 100644 --- a/bin/dstar2tgba.cc +++ b/bin/dstar2tgba.cc @@ -48,7 +48,7 @@ #include #include -static const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Convert automata with any acceptance condition into variants of \ Büchi automata.\n\nThis reads automata into any supported format \ (HOA, LBTT, ltl2dstar, never claim) and outputs a \ diff --git a/bin/genaut.cc b/bin/genaut.cc index d7db04d98..26678c588 100644 --- a/bin/genaut.cc +++ b/bin/genaut.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019, 2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2017-2019, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -43,7 +43,8 @@ using namespace spot; -const char argp_program_doc[] ="Generate ω-automata from predefined patterns."; +static const char argp_program_doc[] = + "Generate ω-automata from predefined patterns."; static const argp_option options[] = { diff --git a/bin/genltl.cc b/bin/genltl.cc index 6393024c2..96d8bd7d3 100644 --- a/bin/genltl.cc +++ b/bin/genltl.cc @@ -45,7 +45,7 @@ using namespace spot; -const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Generate temporal logic formulas from predefined patterns."; // We reuse the values from gen::ltl_pattern_id as option keys. diff --git a/bin/ltl2tgba.cc b/bin/ltl2tgba.cc index ee3d9f777..d4fb2fc17 100644 --- a/bin/ltl2tgba.cc +++ b/bin/ltl2tgba.cc @@ -39,7 +39,7 @@ #include #include -static const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Translate linear-time formulas (LTL/PSL) into various types of automata.\n\n\ By default it will apply all available optimizations to output \ the smallest Transition-based Generalized Büchi Automata, \ diff --git a/bin/ltl2tgta.cc b/bin/ltl2tgta.cc index e3f241385..ab925c7ac 100644 --- a/bin/ltl2tgta.cc +++ b/bin/ltl2tgta.cc @@ -46,7 +46,7 @@ #include #include -const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Translate linear-time formulas (LTL/PSL) into Testing Automata.\n\n\ By default it outputs a transition-based generalized Testing Automaton \ the smallest Transition-based Generalized Büchi Automata, \ diff --git a/bin/ltlcross.cc b/bin/ltlcross.cc index 8e1005db6..0dfa09985 100644 --- a/bin/ltlcross.cc +++ b/bin/ltlcross.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2020, 2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2012-2020, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -69,7 +69,7 @@ #include #include -const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Call several LTL/PSL translators and cross-compare their output to detect \ bugs, or to gather statistics. The list of formulas to use should be \ supplied on standard input, or using the -f or -F options.\v\ diff --git a/bin/ltldo.cc b/bin/ltldo.cc index 705e71105..ffbd4873e 100644 --- a/bin/ltldo.cc +++ b/bin/ltldo.cc @@ -47,7 +47,7 @@ #include #include -const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Run LTL/PSL formulas through another program, performing conversion\n\ of input and output as required."; diff --git a/bin/ltlfilt.cc b/bin/ltlfilt.cc index b74f7bc0c..c9064368d 100644 --- a/bin/ltlfilt.cc +++ b/bin/ltlfilt.cc @@ -59,7 +59,7 @@ #include #include -const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Read a list of formulas and output them back after some optional processing.\v\ Exit status:\n\ 0 if some formulas were output (skipped syntax errors do not count)\n\ diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 44c55ef54..aaea855a4 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -157,7 +157,7 @@ static const struct argp_child children[] = { nullptr, 0, nullptr, 0 } }; -const char argp_program_doc[] = "\ +static const char argp_program_doc[] = "\ Synthesize a controller from its LTL specification.\v\ Exit status:\n\ 0 if all input problems were realizable\n\ diff --git a/bin/randaut.cc b/bin/randaut.cc index 27512c9ce..1ceb82ee0 100644 --- a/bin/randaut.cc +++ b/bin/randaut.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2016, 2018-2020 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2012-2016, 2018-2020, 2022 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -42,7 +42,7 @@ #include -const char argp_program_doc[] = "\ +static const char argp_program_doc[] = "\ Generate random connected automata.\n\n\ The automata are built over the atomic propositions named by PROPS...\n\ or, if N is a nonnegative number, using N arbitrary names.\n\ diff --git a/bin/randltl.cc b/bin/randltl.cc index cded77171..986c437c1 100644 --- a/bin/randltl.cc +++ b/bin/randltl.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2016, 2018-2019 Laboratoire de Recherche +// Copyright (C) 2012-2016, 2018-2019, 2022 Laboratoire de Recherche // et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -40,7 +40,7 @@ #include #include -const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Generate random temporal logic formulas.\n\n\ The formulas are built over the atomic propositions named by PROPS...\n\ or, if N is a nonnegative number, using N arbitrary names.\v\ diff --git a/bin/spot-x.cc b/bin/spot-x.cc index d1a8f96f6..35f971fd6 100644 --- a/bin/spot-x.cc +++ b/bin/spot-x.cc @@ -24,7 +24,7 @@ #include #include "common_setup.hh" -const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Common fine-tuning options for programs installed with Spot.\n\ \n\ The argument of -x or --extra-options is a comma-separated list of KEY=INT \ diff --git a/bin/spot.cc b/bin/spot.cc index 95ce7063a..c6bad3c70 100644 --- a/bin/spot.cc +++ b/bin/spot.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2013-2018, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -24,7 +24,8 @@ #include #include "common_setup.hh" -const char argp_program_doc[] ="Command-line tools installed by Spot."; +static const char argp_program_doc[] = + "Command-line tools installed by Spot."; #define DOC(NAME, TXT) NAME, 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, TXT, 0 diff --git a/tests/ltsmin/modelcheck.cc b/tests/ltsmin/modelcheck.cc index 6e17e8ba2..9c529f0e8 100644 --- a/tests/ltsmin/modelcheck.cc +++ b/tests/ltsmin/modelcheck.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2020 Laboratoire de Recherche et Developpement -// de l'Epita (LRDE) +// Copyright (C) 2011-2020, 2022 Laboratoire de Recherche et +// Developpement de l'Epita (LRDE) // // This file is part of Spot, a model checking library. // @@ -45,7 +45,7 @@ #include #include -const char argp_program_doc[] = +static const char argp_program_doc[] = "Process model and formula to check wether a " "model meets a specification.\v\ Exit status:\n\ From c312a05bbd5b034b23d860fbc7e9b12fabb663d1 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 26 Oct 2022 10:03:29 +0200 Subject: [PATCH 185/606] do not use id for animating the logo because we remove ids using svgo... * doc/org/spot2.svg, doc/org/spot.css: Animate the verison using a class. --- doc/org/spot.css | 2 +- doc/org/spot2.svg | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/org/spot.css b/doc/org/spot.css index 7bbd0ef39..ca8b12395 100644 --- a/doc/org/spot.css +++ b/doc/org/spot.css @@ -83,7 +83,7 @@ thead tr{background:#ffe35e} .caveat::before{background:#d70079;content:"Caveat";padding:.5ex;position:relative;top:0;left:0;font-weight:bold} .spotlogo{transform-origin:50% 50%;animation-duration:2s;animation-name:animspotlogo} g.spotlogobg{transform-origin:50% 50%;animation-duration:2s;animation-name:animspotlogobg} -g#version{transform-origin:50% 50%;animation-duration:3s;animation-name:animspotlogover} +g.spotlogover{transform-origin:50% 50%;animation-duration:3s;animation-name:animspotlogover} @keyframes animspotlogo{ 0%{transform:rotateY(90deg)} 80%{transform:rotateY(0deg)} diff --git a/doc/org/spot2.svg b/doc/org/spot2.svg index 76b76525f..8d68ba9d3 100644 --- a/doc/org/spot2.svg +++ b/doc/org/spot2.svg @@ -14,7 +14,7 @@ - + From 66aaa115801f668fbcccdc8ba22ab395c563c48b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 26 Oct 2022 11:15:39 +0200 Subject: [PATCH 186/606] Release Spot 2.11.2 * NEWS, configure.ac, doc/org/setup.org: Bump version to 2.11.2. --- NEWS | 2 +- configure.ac | 2 +- doc/org/setup.org | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/NEWS b/NEWS index bf9e1079e..a6e370af4 100644 --- a/NEWS +++ b/NEWS @@ -1,4 +1,4 @@ -New in spot 2.11.1.dev (not yet released) +New in spot 2.11.2 (2022-10-26) Command-line tools: diff --git a/configure.ac b/configure.ac index 5c160e6cd..8ec99065c 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.11.1.dev], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.11.2], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) diff --git a/doc/org/setup.org b/doc/org/setup.org index 0be5b364b..bb568e5f1 100644 --- a/doc/org/setup.org +++ b/doc/org/setup.org @@ -1,11 +1,11 @@ #+OPTIONS: H:2 num:nil toc:t html-postamble:nil ^:nil #+EMAIL: spot@lrde.epita.fr #+HTML_LINK_HOME: index.html -#+MACRO: SPOTVERSION 2.11.1 -#+MACRO: LASTRELEASE 2.11.1 -#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.11.1.tar.gz][=spot-2.11.1.tar.gz=]] -#+MACRO: LASTNEWS [[https://gitlab.lre.epita.fr/spot/spot/blob/spot-2-11-1/NEWS][summary of the changes]] -#+MACRO: LASTDATE 2022-10-10 +#+MACRO: SPOTVERSION 2.11.2 +#+MACRO: LASTRELEASE 2.11.2 +#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.11.2.tar.gz][=spot-2.11.2.tar.gz=]] +#+MACRO: LASTNEWS [[https://gitlab.lre.epita.fr/spot/spot/blob/spot-2-11-2/NEWS][summary of the changes]] +#+MACRO: LASTDATE 2022-10-26 #+ATTR_HTML: :id spotlogo [[file:spot2.svg]] From 17a959aa291e33ee62af754c38524c365a9d0e11 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 26 Oct 2022 11:24:20 +0200 Subject: [PATCH 187/606] Bump version to 2.11.2.dev * NEWS, configure.ac: Here. --- NEWS | 4 ++++ configure.ac | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index a6e370af4..f6cba13cc 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,7 @@ +New in spot 2.11.2.dev (not yet released) + + Nothing yet. + New in spot 2.11.2 (2022-10-26) Command-line tools: diff --git a/configure.ac b/configure.ac index 8ec99065c..236063093 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.11.2], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.11.2.dev], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) From fafe40c530d26aa10cfd5c5a768a7f2fb0a560ee Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 4 Nov 2022 17:11:51 +0100 Subject: [PATCH 188/606] fix namespace for exception errors * spot/priv/satcommon.cc, spot/twaalgos/dtbasat.cc, spot/twaalgos/dtwasat.cc: When setting exception on std::ofstream, use ofstream::failbit and ofstream::badbit instead of ifstream::failbit and ifstream::badbit. --- spot/priv/satcommon.cc | 4 ++-- spot/twaalgos/dtbasat.cc | 4 ++-- spot/twaalgos/dtwasat.cc | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/spot/priv/satcommon.cc b/spot/priv/satcommon.cc index 21d75eee1..aec73d104 100644 --- a/spot/priv/satcommon.cc +++ b/spot/priv/satcommon.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2019 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2019, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -167,7 +167,7 @@ namespace spot return; std::ofstream out(log, std::ios_base::ate | std::ios_base::app); - out.exceptions(std::ifstream::failbit | std::ifstream::badbit); + out.exceptions(std::ofstream::failbit | std::ofstream::badbit); if (out.tellp() == 0) out << ("input.states,target.states,reachable.states,edges,transitions," diff --git a/spot/twaalgos/dtbasat.cc b/spot/twaalgos/dtbasat.cc index eb39a69b6..b2147ebb4 100644 --- a/spot/twaalgos/dtbasat.cc +++ b/spot/twaalgos/dtbasat.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018, 2021 Laboratoire de Recherche et +// Copyright (C) 2013-2018, 2021, 2022 Laboratoire de Recherche et // Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -585,7 +585,7 @@ namespace spot #if TRACE std::fstream out("dtba-sat.dbg", std::ios_base::trunc | std::ios_base::out); - out.exceptions(std::ifstream::failbit | std::ifstream::badbit); + out.exceptions(std::ofstream::failbit | std::ofstream::badbit); #endif std::set acc_states; std::set seen_trans; diff --git a/spot/twaalgos/dtwasat.cc b/spot/twaalgos/dtwasat.cc index 670a9ffc8..25a299154 100644 --- a/spot/twaalgos/dtwasat.cc +++ b/spot/twaalgos/dtwasat.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2021 Laboratoire de Recherche +// Copyright (C) 2013-2022 Laboratoire de Recherche // et Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -864,7 +864,7 @@ namespace spot #if TRACE std::fstream out("dtwa-sat.dbg", std::ios_base::trunc | std::ios_base::out); - out.exceptions(std::ifstream::failbit | std::ifstream::badbit); + out.exceptions(std::ofstream::failbit | std::ofstream::badbit); #endif std::map state_acc; std::set seen_trans; From 5c5133348eaf774b7be88f01327406d68406ecba Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 4 Nov 2022 18:20:29 +0100 Subject: [PATCH 189/606] mealy: improve error reporting * spot/twaalgos/mealy_machine.cc: Add more exceptions. * tests/python/except.py: Test them. --- spot/twaalgos/mealy_machine.cc | 76 +++++++++++++++++++++------------- tests/python/except.py | 38 +++++++++++++++++ 2 files changed, 86 insertions(+), 28 deletions(-) diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index 3635e6334..9cdae279e 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -100,7 +100,8 @@ namespace , f{std::fopen(name.c_str(), "a")} { if (!f) - throw std::runtime_error("File could not be oppened for writing."); + throw std::runtime_error("`" + name + + "' could not be oppened for writing."); } ~fwrapper() { @@ -120,21 +121,36 @@ namespace namespace spot { + static bdd + ensure_mealy(const char* function_name, + const const_twa_graph_ptr& m) + { + if (SPOT_UNLIKELY(!m->acc().is_t())) + throw std::runtime_error(std::string(function_name) + + "(): Mealy machines must have " + "true acceptance condition"); + bdd* out = m->get_named_prop("synthesis-outputs"); + if (SPOT_UNLIKELY(!out)) + throw std::runtime_error(std::string(function_name) + + "(): \"synthesis-outputs\" not defined"); + return *out; + } + bool is_mealy(const const_twa_graph_ptr& m) { if (!m->acc().is_t()) - { - trace << "is_mealy(): Mealy machines must have " - "true acceptance condition.\n"; - return false; - } + { + trace << "is_mealy(): Mealy machines must have " + "true acceptance condition.\n"; + return false; + } if (!m->get_named_prop("synthesis-outputs")) - { - trace << "is_mealy(): \"synthesis-outputs\" not found!\n"; - return false; - } + { + trace << "is_mealy(): \"synthesis-outputs\" not found!\n"; + return false; + } return true; } @@ -252,9 +268,7 @@ namespace spot void split_separated_mealy_here(const twa_graph_ptr& m) { - assert(is_mealy(m)); - - auto output_bdd = get_synthesis_outputs(m); + bdd output_bdd = ensure_mealy("split_separated_mealy_here", m); struct dst_cond_color_t { @@ -323,10 +337,10 @@ namespace spot twa_graph_ptr split_separated_mealy(const const_twa_graph_ptr& m) { - assert(is_mealy((m))); + bdd outputs = ensure_mealy("split_separated_mealy", m); auto m2 = make_twa_graph(m, twa::prop_set::all()); m2->copy_acceptance_of(m); - set_synthesis_outputs(m2, get_synthesis_outputs(m)); + set_synthesis_outputs(m2, outputs); split_separated_mealy_here(m2); return m2; } @@ -767,7 +781,7 @@ namespace spot twa_graph_ptr reduce_mealy(const const_twa_graph_ptr& mm, bool output_assignment) { - assert(is_mealy(mm)); + bdd outputs = ensure_mealy("reduce_mealy", mm); if (mm->get_named_prop>("state-player")) throw std::runtime_error("reduce_mealy(): " "Only works on unsplit machines.\n"); @@ -775,7 +789,7 @@ namespace spot auto mmc = make_twa_graph(mm, twa::prop_set::all()); mmc->copy_ap_of(mm); mmc->copy_acceptance_of(mm); - set_synthesis_outputs(mmc, get_synthesis_outputs(mm)); + set_synthesis_outputs(mmc, outputs); reduce_mealy_here(mmc, output_assignment); @@ -785,7 +799,7 @@ namespace spot void reduce_mealy_here(twa_graph_ptr& mm, bool output_assignment) { - assert(is_mealy(mm)); + ensure_mealy("reduce_mealy_here", mm); // Only consider infinite runs mm->purge_dead_states(); @@ -902,6 +916,8 @@ namespace // Writing also "flushes" void write() { + if (!sat_csv_file) + return; auto f = [](std::ostream& o, auto& v, bool sep = true) { if (v >= 0) @@ -910,8 +926,6 @@ namespace o.put(','); v = -1; }; - if (!sat_csv_file) - return; auto& out = *sat_csv_file; if (out.tellp() == 0) @@ -3786,7 +3800,7 @@ namespace spot twa_graph_ptr minimize_mealy(const const_twa_graph_ptr& mm, int premin) { - assert(is_mealy(mm)); + bdd outputs = ensure_mealy("minimize_mealy", mm); satprob_info si(sat_instance_name); si.task = "presat"; @@ -3916,7 +3930,7 @@ namespace spot // Set state players! if (!minmachine) return early_exit(); - set_synthesis_outputs(minmachine, get_synthesis_outputs(mm)); + set_synthesis_outputs(minmachine, outputs); si.done=1; si.n_min_states = minmachine->num_states(); @@ -3931,17 +3945,23 @@ namespace spot minimize_mealy(const const_twa_graph_ptr& mm, synthesis_info& si) { - if ((si.minimize_lvl < 3) || (5 < si.minimize_lvl)) - throw std::runtime_error("Invalid option"); + if ((si.minimize_lvl < 3) || (si.minimize_lvl > 5)) + throw std::runtime_error("minimize_mealy(): " + "minimize_lvl should be between 3 and 5."); std::string csvfile = si.opt.get_str("satlogcsv"); std::string dimacsfile = si.opt.get_str("satlogdimacs"); if (!csvfile.empty()) - sat_csv_file - = std::make_unique(csvfile, - std::ios_base::ate - | std::ios_base::app); + { + sat_csv_file = std::make_unique + (csvfile, std::ios_base::ate | std::ios_base::app); + if (!*sat_csv_file) + throw std::runtime_error("could not open `" + csvfile + + "' for writing"); + sat_csv_file->exceptions(std::ofstream::failbit + | std::ofstream::badbit); + } if (!dimacsfile.empty()) sat_dimacs_file = std::make_unique(dimacsfile); diff --git a/tests/python/except.py b/tests/python/except.py index 508ffd7f9..34aa61ad2 100644 --- a/tests/python/except.py +++ b/tests/python/except.py @@ -321,3 +321,41 @@ except RuntimeError as e: tc.assertIn("already registered", se) else: report_missing_exception() + + +try: + spot.minimize_mealy(a, 100) +except RuntimeError as e: + se = str(e) + tc.assertIn("minimize_mealy", se) + tc.assertIn("minimize_lvl", se) +else: + report_missing_exception() + +opt = spot.synthesis_info() +opt.minimize_lvl = 3 +try: + spot.minimize_mealy(a, opt) +except RuntimeError as e: + se = str(e) + tc.assertIn("minimize_mealy", se) + tc.assertIn("synthesis-output", se) + +spot.set_synthesis_outputs(a, buddy.bdd_ithvar(a.register_ap("b"))) +filename = "/THIS-FILE/SHOULD/NOT/EXIST" +opt.opt.set_str("satlogdimacs", filename) +try: + spot.minimize_mealy(a, opt) +except RuntimeError as e: + tc.assertIn(filename, str(e)) +else: + report_missing_exception() + +opt.opt.set_str("satlogdimacs", "") +opt.opt.set_str("satlogcsv", filename) +try: + spot.minimize_mealy(a, opt) +except RuntimeError as e: + tc.assertIn(filename, str(e)) +else: + report_missing_exception() From 6dc740184c154bde0f4065c6c9a0d1322931ebf3 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 7 Nov 2022 09:37:26 +0100 Subject: [PATCH 190/606] * tests/sanity/style.test: Fix recent grep warnings. --- tests/sanity/style.test | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/sanity/style.test b/tests/sanity/style.test index 85ef359b0..325ebe78d 100755 --- a/tests/sanity/style.test +++ b/tests/sanity/style.test @@ -40,7 +40,7 @@ GREP=grep # Get some help from GNU grep. if (grep --color=auto -n --version)>/dev/null 2>&1; then GREP="$GREP --color=auto -n" - GREP_COLOR='1;31' + GREP_COLOR='mt=1;31' export GREP_COLOR fi @@ -295,7 +295,7 @@ for dir in "$TOP/spot" "$TOP/bin" "$TOP/tests"; do fi # we want catch (const reftype&) or catch (...) - $GREP 'catch *([^.]' $tmp | $GREP -v 'const.*\&' && + $GREP 'catch *([^.]' $tmp | $GREP -v 'const.*&' && diag 'Always capture exceptions by const reference.' case $file in From b36cee06a1bb8630aa6d9b2070d2e18514e1ef83 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 7 Nov 2022 16:24:33 +0100 Subject: [PATCH 191/606] adjust to Swig 4.1.0 * python/spot/__init__.py: Add flatnested versions of some static methods. * spot/twa/acc.hh: Hide && version of & and |, causing trouble to swig. * tests/python/_synthesis.ipynb, tests/python/synthesis.ipynb: Upgrade expected type names. * tests/python/ipnbdoctest.py: Adjust for difference between 4.0 and 4.1. --- python/spot/__init__.py | 19 +++++++++++-------- spot/twa/acc.hh | 4 ++++ tests/python/_synthesis.ipynb | 2 +- tests/python/ipnbdoctest.py | 5 +++++ tests/python/synthesis.ipynb | 12 ++++++------ 5 files changed, 27 insertions(+), 15 deletions(-) diff --git a/python/spot/__init__.py b/python/spot/__init__.py index 01210c824..edbf4a4e6 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -104,19 +104,22 @@ def setup(**kwargs): os.environ['SPOT_DOTDEFAULT'] = d -# In version 3.0.2, Swig puts strongly typed enum in the main -# namespace without prefixing them. Latter versions fix this. So we -# can remove for following hack once 3.0.2 is no longer used in our -# build farm. -if 'op_ff' not in globals(): +# Swig versions prior to 4.1.0 export formula.X as formula_X as well, +# for all operators. Swig 4.1.0 stops doing this, breaking some +# existing code. +if 'formula_ff' not in globals(): for i in ('ff', 'tt', 'eword', 'ap', 'Not', 'X', 'F', 'G', 'Closure', 'NegClosure', 'NegClosureMarked', 'Xor', 'Implies', 'Equiv', 'U', 'R', 'W', 'M', 'EConcat', 'EConcatMarked', 'UConcat', 'Or', 'OrRat', 'And', 'AndRat', 'AndNLM', 'Concat', - 'Fusion', 'Star', 'FStar'): - globals()['op_' + i] = globals()[i] - del globals()[i] + 'Fusion', 'Star', 'FStar', 'nested_unop_range', + 'sugar_goto', 'sugar_equal', 'sugar_delay', 'unop', + 'binop', 'bunop', 'multop', 'first_match', 'unbounded'): + globals()['formula_' + i] = formula.__dict__[i].__func__ +if 'trival_maybe' not in globals(): + for i in ('maybe',): + globals()['trival_' + i] = trival.__dict__[i].__func__ # Global BDD dict so that we do not have to create one in user code. diff --git a/spot/twa/acc.hh b/spot/twa/acc.hh index 905f5c40a..1c460cfc4 100644 --- a/spot/twa/acc.hh +++ b/spot/twa/acc.hh @@ -1011,6 +1011,7 @@ namespace spot return res; } +#ifndef SWIG /// \brief Conjunct the current condition with \a r. acc_code operator&(acc_code&& r) const { @@ -1018,6 +1019,7 @@ namespace spot res &= r; return res; } +#endif // SWIG /// \brief Disjunct the current condition in place with \a r. acc_code& operator|=(const acc_code& r) @@ -1106,6 +1108,7 @@ namespace spot return *this; } +#ifndef SWIG /// \brief Disjunct the current condition with \a r. acc_code operator|(acc_code&& r) const { @@ -1113,6 +1116,7 @@ namespace spot res |= r; return res; } +#endif // SWIG /// \brief Disjunct the current condition with \a r. acc_code operator|(const acc_code& r) const diff --git a/tests/python/_synthesis.ipynb b/tests/python/_synthesis.ipynb index 4c203a86e..2d92236b7 100644 --- a/tests/python/_synthesis.ipynb +++ b/tests/python/_synthesis.ipynb @@ -6346,7 +6346,7 @@ "\n" ], "text/plain": [ - " >" + " *' at 0x7fbccc33adb0> >" ] }, "execution_count": 28, diff --git a/tests/python/ipnbdoctest.py b/tests/python/ipnbdoctest.py index f6ce3562e..47b73f901 100755 --- a/tests/python/ipnbdoctest.py +++ b/tests/python/ipnbdoctest.py @@ -101,6 +101,11 @@ def canonicalize(s, type, ignores): # %%file writes `Writing`, or `Overwriting` if the file exists. s = re.sub(r'^Overwriting ', 'Writing ', s) + # Swig 4.1.0 fixed an ordering issue with how types are printed. + # aig_ptr is expected to be printed as shared_ptr, but prior + # Swig version did not do that. + s = re.sub(r'spot::aig_ptr ', 'std::shared_ptr< spot::aig > ', s) + # SVG generated by graphviz may put note at different positions # depending on the graphviz build. Let's just strip anything that # look like a position. diff --git a/tests/python/synthesis.ipynb b/tests/python/synthesis.ipynb index 54da20ef7..ba1b562cc 100644 --- a/tests/python/synthesis.ipynb +++ b/tests/python/synthesis.ipynb @@ -2601,7 +2601,7 @@ "\n" ], "text/plain": [ - " >" + " *' at 0x7f0e584deae0> >" ] }, "metadata": {}, @@ -3256,7 +3256,7 @@ "\n" ], "text/plain": [ - " >" + " *' at 0x7f0e5855c9f0> >" ] }, "metadata": {}, @@ -3385,7 +3385,7 @@ "\n" ], "text/plain": [ - " >" + " *' at 0x7f0e5855c900> >" ] }, "metadata": {}, @@ -3801,7 +3801,7 @@ "\n" ], "text/plain": [ - " >" + " *' at 0x7f0e584def00> >" ] }, "metadata": {}, @@ -3979,7 +3979,7 @@ "\n" ], "text/plain": [ - " >" + " *' at 0x7f0e5855c930> >" ] }, "metadata": {}, @@ -4159,7 +4159,7 @@ "\n" ], "text/plain": [ - " >" + " *' at 0x7f0e584def90> >" ] }, "metadata": {}, From 0f4f7ec287dc5d0e7b93bf1171aa5c7214bc51b1 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 8 Nov 2022 15:51:27 +0100 Subject: [PATCH 192/606] * debian/copyright: Fix download URL. --- debian/copyright | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/copyright b/debian/copyright index 9c4653c28..792afcec1 100644 --- a/debian/copyright +++ b/debian/copyright @@ -1,6 +1,6 @@ Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: spot -Source: http://spot.lrde.epita.fr/dload/spot/ +Source: http://www.lrde.epita.fr/dload/spot/ Files: * Copyright: 2003-2007 Laboratoire d'Informatique de Paris 6 (LIP6) From a6c65dff8d026bdacf373fb738b9adb9a1b71fd5 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 8 Nov 2022 15:52:02 +0100 Subject: [PATCH 193/606] misc Doxygen fixes * spot/misc/satsolver.hh, spot/tl/formula.hh, spot/twaalgos/hoa.hh, spot/twaalgos/synthesis.hh, spot/twaalgos/zlktree.hh, spot/twacube_algos/convert.hh: Typos in Doxygen comments. --- spot/misc/satsolver.hh | 6 +++--- spot/tl/formula.hh | 16 ++++++++-------- spot/twaalgos/hoa.hh | 4 ++-- spot/twaalgos/synthesis.hh | 6 +++--- spot/twaalgos/zlktree.hh | 2 +- spot/twacube_algos/convert.hh | 6 +++--- 6 files changed, 20 insertions(+), 20 deletions(-) diff --git a/spot/misc/satsolver.hh b/spot/misc/satsolver.hh index 03a75fa02..3b5bedccd 100644 --- a/spot/misc/satsolver.hh +++ b/spot/misc/satsolver.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2017-2018, 2020 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) 2013, 2017-2018, 2020, 2022 Laboratoire de Recherche +// et Développement de l'Epita. // // This file is part of Spot, a model checking library. // @@ -88,7 +88,7 @@ namespace spot /// \brief Add a single lit. to the current clause. void add(int v); - /// \breif Get the current number of clauses. + /// \brief Get the current number of clauses. int get_nb_clauses() const; /// \brief Get the current number of variables. diff --git a/spot/tl/formula.hh b/spot/tl/formula.hh index c52ed3e39..d01b8379c 100644 --- a/spot/tl/formula.hh +++ b/spot/tl/formula.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2015-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -1227,12 +1227,12 @@ namespace spot return bunop(op::Name, std::move(f), min, max); \ } #endif - /// \brief Create SERE for f[*min..max] + /// \brief Create SERE for `f[*min..max]` /// @{ SPOT_DEF_BUNOP(Star); /// @} - /// \brief Create SERE for f[:*min..max] + /// \brief Create SERE for `f[:*min..max]` /// /// This operator is a generalization of the (+) operator /// defined by Dax et al. \cite dax.09.atva @@ -1259,24 +1259,24 @@ namespace spot f.ptr_->clone())); } - /// \brief Create a SERE equivalent to b[->min..max] + /// \brief Create a SERE equivalent to `b[->min..max]` /// /// The operator does not exist: it is handled as syntactic sugar /// by the parser and the printer. This function is used by the /// parser to create the equivalent SERE. static formula sugar_goto(const formula& b, unsigned min, unsigned max); - /// Create the SERE b[=min..max] + /// \brief Create the SERE `b[=min..max]` /// /// The operator does not exist: it is handled as syntactic sugar /// by the parser and the printer. This function is used by the /// parser to create the equivalent SERE. static formula sugar_equal(const formula& b, unsigned min, unsigned max); - /// Create the SERE a ##[n:m] b + /// \brief Create the SERE `a ##[n:m] b` /// - /// This ##[n:m] operator comes from SVA. When n=m, it is simply - /// written ##n. + /// This `##[n:m]` operator comes from SVA. When n=m, it is simply + /// written `##n`. /// /// The operator does not exist in Spot it is handled as syntactic /// sugar by the parser. This function is used by the parser to diff --git a/spot/twaalgos/hoa.hh b/spot/twaalgos/hoa.hh index 74e97b567..441b9ed16 100644 --- a/spot/twaalgos/hoa.hh +++ b/spot/twaalgos/hoa.hh @@ -95,7 +95,7 @@ namespace spot /// registered in the automaton is not only ignored, but also /// removed from the alias list stored in the automaton. /// - /// The \a or_str, \a and_str, and \ap_printer arguments are + /// The \a or_str, \a and_str, and \a ap_printer arguments are /// used to print operators OR, AND, and to print atomic propositions /// that are not aliases. \a lpar_str and \a rpar_str are used /// to group conjuncts that appear in a disjunction. @@ -119,7 +119,7 @@ namespace spot /// /// - If an alias A exists for \a label, `"@A"` is returned. /// - /// - If an alias A exists for the negation of \a label, `"!@A` + /// - If an alias A exists for the negation of \a label, `"!@A"` /// is returned. /// /// - If \a label is true or false, `true_str` or `false_str` diff --git a/spot/twaalgos/synthesis.hh b/spot/twaalgos/synthesis.hh index 115b8097c..b1b7fdf1d 100644 --- a/spot/twaalgos/synthesis.hh +++ b/spot/twaalgos/synthesis.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2020-2021 Laboratoire de Recherche et +// Copyright (C) 2020-2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -36,8 +36,8 @@ namespace spot /// p -- cond --> q cond in 2^2^AP /// into a set of transitions of the form /// p -- {a} --> (p,a) -- o --> q - /// for each a in cond \cap 2^2^I - /// and where o = (cond & a) \cap 2^2^(O) + /// for each a in cond ∪ 2^2^I + /// and where o = (cond & a) ∪ 2^2^O. /// /// By definition, the states p are deterministic, /// only the states of the form diff --git a/spot/twaalgos/zlktree.hh b/spot/twaalgos/zlktree.hh index d210033e3..6d8b3270c 100644 --- a/spot/twaalgos/zlktree.hh +++ b/spot/twaalgos/zlktree.hh @@ -481,7 +481,7 @@ namespace spot /// /// If \a colored is set, each output transition will have exactly /// one color, and the output automaton will use at most n+1 colors - /// if the input has n colors. If \colored is unsed (the default), + /// if the input has n colors. If \a colored is unsed (the default), /// output transitions will use at most one color, and output /// automaton will use at most n colors. /// diff --git a/spot/twacube_algos/convert.hh b/spot/twacube_algos/convert.hh index f21aaec3f..ba739f470 100644 --- a/spot/twacube_algos/convert.hh +++ b/spot/twacube_algos/convert.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2020 Laboratoire de Recherche et Developpement de -// l'Epita (LRDE). +// Copyright (C) 2015, 2016, 2020, 2022 Laboratoire de Recherche et +// Developpement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -51,7 +51,7 @@ namespace spot twa_to_twacube(spot::const_twa_graph_ptr aut); /// \brief Convert a twacube into a twa. - /// When \d is specified, the BDD_dict in parameter is used rather than + /// When \a d is specified, the BDD_dict in parameter is used rather than /// creating a new one. SPOT_API spot::twa_graph_ptr twacube_to_twa(spot::twacube_ptr twacube, From f2c65ea5579c456e508110274550794e3caf0390 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 15 Nov 2022 16:59:21 +0100 Subject: [PATCH 194/606] simplify: set exprop=false during containment checks For issue #521, reported by Jacopo Binchi. * spot/tl/simplify.cc: Here. * tests/core/521.test: New test case. * tests/Makefile.am: Add it. * NEWS: Mention it. * THANKS: Add Jacopo Binchi. --- NEWS | 6 ++++- THANKS | 1 + spot/tl/simplify.cc | 6 ++--- tests/Makefile.am | 1 + tests/core/521.test | 64 +++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 74 insertions(+), 4 deletions(-) create mode 100755 tests/core/521.test diff --git a/NEWS b/NEWS index f6cba13cc..1af76aed4 100644 --- a/NEWS +++ b/NEWS @@ -1,6 +1,10 @@ New in spot 2.11.2.dev (not yet released) - Nothing yet. + Bug fixes: + + - Automata-based implication checks, used to simplify formulas where + slower than necessary because the translator was configured to + favor determinism unnecessarily. (Issue #521.) New in spot 2.11.2 (2022-10-26) diff --git a/THANKS b/THANKS index db74e14b8..2b054666c 100644 --- a/THANKS +++ b/THANKS @@ -23,6 +23,7 @@ Gerard J. Holzmann Hashim Ali Heikki Tauriainen Henrich Lauko +Jacopo Binchi Jan Strejček Jean-Michel Couvreur Jean-Michel Ilié diff --git a/spot/tl/simplify.cc b/spot/tl/simplify.cc index 3a2433197..cbc5857c5 100644 --- a/spot/tl/simplify.cc +++ b/spot/tl/simplify.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2021 Laboratoire de Recherche et Developpement +// Copyright (C) 2011-2022 Laboratoire de Recherche et Developpement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -64,14 +64,14 @@ namespace spot } tl_simplifier_cache(const bdd_dict_ptr& d) - : dict(d), lcc(d, true, true, false, false) + : dict(d), lcc(d, false, true, false, false) { } tl_simplifier_cache(const bdd_dict_ptr& d, const tl_simplifier_options& opt) : dict(d), options(opt), - lcc(d, true, true, false, false, opt.containment_max_states) + lcc(d, false, true, false, false, opt.containment_max_states) { options.containment_checks |= options.containment_checks_stronger; options.event_univ |= options.favor_event_univ; diff --git a/tests/Makefile.am b/tests/Makefile.am index 71d6a852f..4c2fe830c 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -222,6 +222,7 @@ TESTS_misc = \ TESTS_twa = \ core/385.test \ + core/521.test \ core/acc.test \ core/acc2.test \ core/bdddict.test \ diff --git a/tests/core/521.test b/tests/core/521.test new file mode 100755 index 000000000..002ab1ca2 --- /dev/null +++ b/tests/core/521.test @@ -0,0 +1,64 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) 2022 Laboratoire de Recherche et Développement de +# l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs + +set -e + +# For issue #521. + +# The following formula used to take hours or days to translate with +# default settings (nobody was patient enough to wait) because +# automata-based containment checks were run to exprop=1. + +cat >formula.ltl <<'EOF' +!a & !b & !c & !d & e & f & G(g & h & i & j & ((!c & !d) | (!c & d) | +(c & !d) | (c & d)) & ((!a & !b) | (!a & b) | (a & !b) | (a & b)) & +(k -> !l) & (f -> k) & (l -> !k) & (f -> !l) & (l -> !f) & (m -> !n) & +(m -> o) & (p -> !q) & (m -> !r) & (p -> !m) & (s -> !e) & (r -> !s) & +(e -> n) & (m -> !t) & (t -> !s) & (q -> u) & (o -> !t) & (m -> !p) & +(u -> o) & (p -> !v) & (q -> v) & (n -> w) & (x -> !s) & (u -> !t) & +(p -> w) & (u -> !p) & (t -> n) & (m -> !x) & (q -> !e) & (p -> !u) & +(s -> !n) & (s -> o) & (s -> m) & (v -> !e) & (x -> n) & (s -> !r) & +(e -> x) & (e -> !q) & (n -> r) & (w -> !s) & (m -> q) & (s -> !t) & +(u -> !x) & (e -> p) & (e -> !m) & (s -> !p) & (p -> r) & (e -> !o) & +(e -> !v) & (t -> x) & (q -> o) & (q -> !n) & (t -> !q) & (r -> !m) & +(t -> p) & (t -> !m) & (s -> !x) & (v -> o) & (e -> w) & (n -> !s) & +(q -> !t) & (t -> !o) & (x -> !q) & (e -> !u) & (q -> !p) & (t -> !v) & +(p -> !s) & (m -> u) & (x -> !m) & (v -> !t) & (s -> q) & (v -> !p) & +(m -> v) & (r -> w) & (t -> w) & (e -> t) & (e -> r) & (q -> !x) & +(t -> !u) & (p -> n) & (m -> !e) & (u -> v) & (x -> w) & (o -> !e) & +(x -> !u) & (s -> !w) & (u -> !e) & (t -> r) & (s -> u) & (e -> !s) & +(s -> v) & (n -> !q) & (x -> r) & (n -> !m) & (p -> x) & ((!a & !b & +!c & !d) | (!a & b & !c & d) | (a & !b & c & !d) | (a & b & c & d)) & +((!c & !d & k & o) -> X(!c & d)) & ((!c & !d & l & v & !(k & o)) -> +X(!c & d)) & ((!c & !d) -> ((!(k & o) & !(l & v)) -> X(!c & !d))) & +((!c & d & k & t) -> X(!c & !d)) & ((!c & d & l & p & !(k & t)) -> X(!c +& !d)) & ((!c & d & k & u & !(l & p & !(k & t))) -> X(c & !d)) & ((!c & +d & l & q & !(k & u & !(l & p & !(k & t)))) -> X(c & !d)) & ((!c & d) -> +((!(k & t) & !(l & p) & !(k & u) & !(l & q)) -> X(!c & d))) & ((c & !d +& k & x) -> X(!c & d)) & ((c & !d & l & n & !(k & x)) -> X(!c & d)) & +((c & !d & k & m & !(l & n & !(k & x))) -> X(c & d)) & ((c & !d & l & +s & !(k & m & !(l & n & !(k & x)))) -> X(c & d)) & ((c & !d) -> ((!(k & +x) & !(l & n) & !(k & m) & !(l & s)) -> X(c & !d))) & ((c & d & k & r) +-> X(c & !d)) & ((c & d & l & w & !(k & r)) -> X(c & !d)) & ((c & d) -> +((!(k & r) & !(l & w)) -> X(c & d)))) +EOF +test 5 = `tr -d "\r\n" < formula.ltl | ltl2tgba --stats=%s` From 843c4cdb91685e108d0d7e945512e4ccf9a8aea2 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 15 Nov 2022 17:27:10 +0100 Subject: [PATCH 195/606] translate, simplify: limit containment checks of n-ary operators Fixes #521. * spot/tl/simplify.cc, spot/tl/simplify.hh, spot/twaalgos/translate.cc, spot/twaalgos/translate.hh: Add an option to limit automata-based implication checks of n-ary operators when too many operands are used. Defaults to 16. * bin/spot-x.cc, NEWS, doc/tl/tl.tex: Document it. * tests/core/bdd.test: Disable the limit for this test. --- NEWS | 5 +++++ bin/spot-x.cc | 3 +++ doc/tl/tl.tex | 6 ++++++ spot/tl/simplify.cc | 10 ++++++---- spot/tl/simplify.hh | 5 ++++- spot/twaalgos/translate.cc | 5 +++-- spot/twaalgos/translate.hh | 3 ++- tests/core/bdd.test | 7 ++++--- 8 files changed, 33 insertions(+), 11 deletions(-) diff --git a/NEWS b/NEWS index 1af76aed4..f19987c2c 100644 --- a/NEWS +++ b/NEWS @@ -6,6 +6,11 @@ New in spot 2.11.2.dev (not yet released) slower than necessary because the translator was configured to favor determinism unnecessarily. (Issue #521.) + - Automata-based implication checks for f&g and f|g could be + very slow when those n-ary operator had two many arguments. + They have been limited to 16 operands, but this value can be changed + with option -x tls-max-ops=N. (Issue #521 too.) + New in spot 2.11.2 (2022-10-26) Command-line tools: diff --git a/bin/spot-x.cc b/bin/spot-x.cc index 35f971fd6..1edb3f54e 100644 --- a/bin/spot-x.cc +++ b/bin/spot-x.cc @@ -47,6 +47,9 @@ depends on the --low, --medium, or --high settings.") }, { DOC("tls-max-states", "Maximum number of states of automata involved in automata-based \ implication checks for formula simplifications. Defaults to 64.") }, + { DOC("tls-max-ops", + "Maximum number of operands in n-ary opertors (or, and) on which \ +implication-based simplifications are attempted. Defaults to 16.") }, { nullptr, 0, nullptr, 0, "Translation options:", 0 }, { DOC("ltl-split", "Set to 0 to disable the translation of automata \ as product or sum of subformulas.") }, diff --git a/doc/tl/tl.tex b/doc/tl/tl.tex index b6268d9cd..62a35635f 100644 --- a/doc/tl/tl.tex +++ b/doc/tl/tl.tex @@ -1926,6 +1926,12 @@ Many of the above rules were collected from the literature~\cite{somenzi.00.cav,tauriainen.03.tr,babiak.12.tacas} and sometimes generalized to support operators such as $\M$ and $\W$. +The first six rules, about n-ary operators $\AND$ and $\OR$, are +implemented for $n$ operands by testing each operand against all +other. To prevent the complexity to escalate, this is only performed +with up to 16 operands. That value can be changed in +``\verb|tl_simplifier_options::containment_max_ops|''. + The following rules mix implication-based checks with formulas that are pure eventualities ($e$) or that are purely universal ($u$). diff --git a/spot/tl/simplify.cc b/spot/tl/simplify.cc index cbc5857c5..4eac97282 100644 --- a/spot/tl/simplify.cc +++ b/spot/tl/simplify.cc @@ -2507,8 +2507,11 @@ namespace spot unsigned mos = mo.size(); if ((opt_.synt_impl | opt_.containment_checks) - && mo.is(op::Or, op::And)) + && mo.is(op::Or, op::And) + && (opt_.containment_max_ops == 0 + || opt_.containment_max_ops >= mos)) { + bool is_and = mo.is(op::And); // Do not merge these two loops, as rewritings from the // second loop could prevent rewritings from the first one // to trigger. @@ -2520,7 +2523,6 @@ namespace spot // if fo => !fi, then fi & fo = false // if !fi => fo, then fi | fo = true // if !fo => fi, then fi | fo = true - bool is_and = mo.is(op::And); if (c_->implication_neg(fi, fo, is_and) || c_->implication_neg(fo, fi, is_and)) return recurse(is_and ? formula::ff() : formula::tt()); @@ -2531,8 +2533,8 @@ namespace spot formula fo = mo.all_but(i); // if fi => fo, then fi | fo = fo // if fo => fi, then fi & fo = fo - if ((mo.is(op::Or) && c_->implication(fi, fo)) - || (mo.is(op::And) && c_->implication(fo, fi))) + if (((!is_and) && c_->implication(fi, fo)) + || (is_and && c_->implication(fo, fi))) { // We are about to pick fo, but hold on! // Maybe we actually have fi <=> fo, in diff --git a/spot/tl/simplify.hh b/spot/tl/simplify.hh index e5838544d..ec102a205 100644 --- a/spot/tl/simplify.hh +++ b/spot/tl/simplify.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2017, 2019, 2020 Laboratoire de Recherche et Developpement +// Copyright (C) 2011-2022 Laboratoire de Recherche et Developpement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -96,6 +96,9 @@ namespace spot // If greater than 0, bound the number of states used by automata // in containment checks. unsigned containment_max_states = 0; + // If greater than 0, maximal number of terms in a multop to perform + // containment checks on this multop. + unsigned containment_max_ops = 16; }; // fwd declaration to hide technical details. diff --git a/spot/twaalgos/translate.cc b/spot/twaalgos/translate.cc index 8a99313a3..d5b1aacd0 100644 --- a/spot/twaalgos/translate.cc +++ b/spot/twaalgos/translate.cc @@ -62,8 +62,8 @@ namespace spot gf_guarantee_set_ = true; } ltl_split_ = opt->get("ltl-split", 1); - int tls_max_states = opt->get("tls-max-states", 64); - tls_max_states_ = std::max(0, tls_max_states); + tls_max_states_ = std::max(0, opt->get("tls-max-states", 64)); + tls_max_ops_ = std::max(0, opt->get("tls-max-ops", 16)); exprop_ = opt->get("exprop", -1); branchpost_ = opt->get("branch-post", -1); } @@ -72,6 +72,7 @@ namespace spot { tl_simplifier_options options(false, false, false); options.containment_max_states = tls_max_states_; + options.containment_max_ops = tls_max_ops_; switch (level_) { case High: diff --git a/spot/twaalgos/translate.hh b/spot/twaalgos/translate.hh index d17c917b2..8428a2f22 100644 --- a/spot/twaalgos/translate.hh +++ b/spot/twaalgos/translate.hh @@ -155,7 +155,8 @@ namespace spot bool gf_guarantee_set_ = false; bool ltl_split_; int branchpost_ = -1; - unsigned tls_max_states_ = 0; + unsigned tls_max_states_ = 64; + unsigned tls_max_ops_ = 16; int exprop_; const option_map* opt_; }; diff --git a/tests/core/bdd.test b/tests/core/bdd.test index db03dbad5..85d410f8d 100755 --- a/tests/core/bdd.test +++ b/tests/core/bdd.test @@ -24,20 +24,21 @@ set -e # Make sure that setting the SPOT_BDD_TRACE envvar actually does # something. genltl --kr-n=3 | - SPOT_BDD_TRACE=1 ltl2tgba -x tls-max-states=0 -D >out 2>err + SPOT_BDD_TRACE=1 ltl2tgba -x tls-max-states=0,tls-max-ops=0 -D >out 2>err cat err grep spot: out && exit 1 grep 'spot: BDD package initialized' err # This value below, which is the number of time we need to garbage # collect might change if we improve the tool or change the way BuDDy # is initialized. -test 2 = `grep -c 'spot: BDD GC' err` +test 15 = `grep -c 'spot: BDD GC' err` # Minimal size for this automaton. # See also https://www.lrde.epita.fr/dload/spot/mochart10-fixes.pdf test "2240,4214" = `autfilt --stats=%s,%e out` # With the default value of tls-max-states, no GC is needed -genltl --kr-n=3 | SPOT_BDD_TRACE=1 ltl2tgba -D --stats=%s,%e >out 2>err +genltl --kr-n=3 | + SPOT_BDD_TRACE=1 ltl2tgba -D -x tls-max-ops=0 --stats=%s,%e >out 2>err cat err grep 'spot: BDD package initialized' err test 0 = `grep -c 'spot: BDD GC' err` From c2a3f2941d3c14b0ea1ce6794bbaa3cc5b709901 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 15 Nov 2022 17:50:45 +0100 Subject: [PATCH 196/606] ltl_to_tgba_fm: fix a memory leak on abort This issue surfaced in twacube.test after the previous patches. * spot/twaalgos/ltl2tgba_fm.cc: Release the formula namer on abort. * NEWS: Mention the bug. --- NEWS | 4 ++++ spot/twaalgos/ltl2tgba_fm.cc | 1 + 2 files changed, 5 insertions(+) diff --git a/NEWS b/NEWS index f19987c2c..a1f6267bb 100644 --- a/NEWS +++ b/NEWS @@ -11,6 +11,10 @@ New in spot 2.11.2.dev (not yet released) They have been limited to 16 operands, but this value can be changed with option -x tls-max-ops=N. (Issue #521 too.) + - Running ltl_to_tgba_fm() with an output_aborter (which is done + during automata-based implication checks) would leak memory on + abort. + New in spot 2.11.2 (2022-10-26) Command-line tools: diff --git a/spot/twaalgos/ltl2tgba_fm.cc b/spot/twaalgos/ltl2tgba_fm.cc index a354aaddd..42571f00f 100644 --- a/spot/twaalgos/ltl2tgba_fm.cc +++ b/spot/twaalgos/ltl2tgba_fm.cc @@ -2031,6 +2031,7 @@ namespace spot { if (aborter && aborter->too_large(a)) { + a->release_formula_namer(namer, false); if (!simplifier) delete s; return nullptr; From cfe1b0b70d54cfd56ecf6f26c9f190a88b3550d6 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 17 Nov 2022 11:14:32 +0100 Subject: [PATCH 197/606] configure: --with-pythondir should also override pyexecdir Fixes #512. * configure.ac: Here. * NEWS: Mention the bug. --- NEWS | 4 ++++ configure.ac | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index a1f6267bb..feef2c0d8 100644 --- a/NEWS +++ b/NEWS @@ -15,6 +15,10 @@ New in spot 2.11.2.dev (not yet released) during automata-based implication checks) would leak memory on abort. + - configure --with-pythondir should also redefine pyexecdir, + otherwise, libraries get installed in the wrong place on Debian. + (Issue #512.) + New in spot 2.11.2 (2022-10-26) Command-line tools: diff --git a/configure.ac b/configure.ac index 236063093..469ff0df4 100644 --- a/configure.ac +++ b/configure.ac @@ -192,7 +192,7 @@ if test "x${enable_python:-yes}" = xyes; then AC_ARG_WITH([pythondir], [AS_HELP_STRING([--with-pythondir], [override the computed pythondir])], - [pythondir=$withval], []) + [pythondir=$withval pyexecdir=$withval], []) fi From a032abf0c54482bc6003ea84c7d0911d1a507862 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 2 Dec 2022 14:44:03 +0100 Subject: [PATCH 198/606] parseaut: diagnose states that are unused and undefined Reported by Pierre Ganty. * spot/parseaut/parseaut.yy: Add diagnostics. * tests/core/parseaut.test: Adjust expected output, and add a test case. * NEWS: Mention the bug. --- NEWS | 4 ++++ spot/parseaut/parseaut.yy | 43 +++++++++++++++++++++++++++++++-------- tests/core/parseaut.test | 16 +++++++++++++-- 3 files changed, 53 insertions(+), 10 deletions(-) diff --git a/NEWS b/NEWS index feef2c0d8..96e3d3431 100644 --- a/NEWS +++ b/NEWS @@ -19,6 +19,10 @@ New in spot 2.11.2.dev (not yet released) otherwise, libraries get installed in the wrong place on Debian. (Issue #512.) + - The HOA parser used to silently declare unused and undefined states + (e.g., when the State: header declare many more states than the body + of the file). It now warns about those. + New in spot 2.11.2 (2022-10-26) Command-line tools: diff --git a/spot/parseaut/parseaut.yy b/spot/parseaut/parseaut.yy index 52d448c16..4d96b8c1c 100644 --- a/spot/parseaut/parseaut.yy +++ b/spot/parseaut/parseaut.yy @@ -1231,6 +1231,7 @@ body: states // diagnostic, so let not add another one. if (res.states >= 0) n = res.states; + std::vector unused_undeclared; for (unsigned i = 0; i < n; ++i) { auto& p = res.info_states[i]; @@ -1239,17 +1240,43 @@ body: states if (p.used) error(p.used_loc, "state " + std::to_string(i) + " has no definition"); - if (!p.used && res.complete) - if (auto p = res.prop_is_true("complete")) - { - error(res.states_loc, - "state " + std::to_string(i) + - " has no definition..."); - error(p.loc, "... despite 'properties: complete'"); - } + if (!p.used) + unused_undeclared.push_back(i); res.complete = false; } } + if (!unused_undeclared.empty()) + { + std::ostringstream out; + unsigned uus = unused_undeclared.size(); + int rangestart = -2; + int rangecur = -2; + const char* sep = uus > 1 ? "states " : "state "; + auto print_range = [&]() { + if (rangecur < 0) + return; + out << sep << rangestart; + if (rangecur != rangestart) + out << '-' << rangecur; + sep = ","; + }; + for (unsigned s: unused_undeclared) + { + if ((int)s != rangecur + 1) + { + print_range(); + rangestart = s; + } + rangecur = s; + } + print_range(); + out << (uus > 1 ? " are" : " is") << " unused and undefined"; + error(res.states_loc, out.str()); + + if (auto p = res.prop_is_true("complete")) + error(p.loc, "automaton is incomplete because it has " + "undefined states"); + } if (res.complete) if (auto p = res.prop_is_false("complete")) { diff --git a/tests/core/parseaut.test b/tests/core/parseaut.test index 56f2d54eb..7dabd563d 100755 --- a/tests/core/parseaut.test +++ b/tests/core/parseaut.test @@ -230,6 +230,7 @@ input:3.1-8: initial state number is larger than state count... input:4.1-9: ... declared here. input:1.1-4.9: missing 'Acceptance:' header input:3.1-8: initial state 1 has no definition +input:4.1-9: state 0 is unused and undefined EOF cat >input < Date: Fri, 2 Dec 2022 15:22:32 +0100 Subject: [PATCH 199/606] autfilt: print match count even on parse errors * bin/autfilt.cc: If -c is used, print the match_count even in present of parse errors. * tests/core/readsave.test: Adjust. * NEWS: Mention the bug. --- NEWS | 3 +++ bin/autfilt.cc | 12 +++++++----- tests/core/readsave.test | 13 +++++++------ 3 files changed, 17 insertions(+), 11 deletions(-) diff --git a/NEWS b/NEWS index 96e3d3431..ec6cff1bf 100644 --- a/NEWS +++ b/NEWS @@ -23,6 +23,9 @@ New in spot 2.11.2.dev (not yet released) (e.g., when the State: header declare many more states than the body of the file). It now warns about those. + - 'autfilt -c ...' should display a match count even in present of + parse errors. + New in spot 2.11.2 (2022-10-26) Command-line tools: diff --git a/bin/autfilt.cc b/bin/autfilt.cc index e16ef770a..7cff60e8b 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -1763,15 +1763,17 @@ main(int argc, char** argv) post.set_level(level); autfilt_processor processor(post, o.dict); - if (processor.run()) - return 2; - - // Diagnose unused -x options - extra_options.report_unused_options(); + int err = processor.run(); if (automaton_format == Count) std::cout << match_count << std::endl; + // Diagnose unused -x options + if (!err) + extra_options.report_unused_options(); + else + return 2; + check_cout(); return match_count ? 0 : 1; }); diff --git a/tests/core/readsave.test b/tests/core/readsave.test index dd4e2efaf..3780b4766 100755 --- a/tests/core/readsave.test +++ b/tests/core/readsave.test @@ -963,7 +963,8 @@ EOF test `autfilt -c --is-inherently-weak input7` = 1 test `autfilt -c --is-weak input7` = 0 test `autfilt -c --is-stutter-invariant input7` = 1 -autfilt --check input7 -H >output7 +autfilt --check input7 -H >output7 && exit 0 +test $? -eq 2 cat >expected7 <oneline.hoa -autfilt input8 --stats='%h' >oneline2.hoa -autfilt input8 --stats='%H' >oneline3.hoa -autfilt input8 --randomize --stats='%h' >oneline4.hoa -autfilt input8 --randomize --stats='%H' >oneline5.hoa +autfilt input8 -Hl >oneline.hoa && exit 1 +autfilt input8 --stats='%h' >oneline2.hoa && exit 1 +autfilt input8 --stats='%H' >oneline3.hoa && exit 1 +autfilt input8 --randomize --stats='%h' >oneline4.hoa && exit 1 +autfilt input8 --randomize --stats='%H' >oneline5.hoa && exit 1 diff oneline.hoa oneline2.hoa diff oneline.hoa oneline3.hoa diff oneline.hoa oneline4.hoa && exit 1 From 6b70edabf0029b7c4349b178f7b85c3ece1ba1d1 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 2 Dec 2022 17:30:29 +0100 Subject: [PATCH 200/606] getopt: do not include sys/cdefs.h to please Alpine Linux * m4/getopt.m4: Pretend sys/cdefs.h is missing, so that Alpine linux does not output a warning which we would turn into an error. --- m4/getopt.m4 | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/m4/getopt.m4 b/m4/getopt.m4 index 595483d58..e291e0c66 100644 --- a/m4/getopt.m4 +++ b/m4/getopt.m4 @@ -1,5 +1,5 @@ # getopt.m4 serial 47 -dnl Copyright (C) 2002-2006, 2008-2020 Free Software Foundation, Inc. +dnl Copyright (C) 2002-2006, 2008-2020, 2022 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. @@ -363,13 +363,9 @@ dnl is ambiguous with environment values that contain newlines. AC_DEFUN([gl_GETOPT_SUBSTITUTE_HEADER], [ - AC_CHECK_HEADERS_ONCE([sys/cdefs.h]) - if test $ac_cv_header_sys_cdefs_h = yes; then - HAVE_SYS_CDEFS_H=1 - else - HAVE_SYS_CDEFS_H=0 - fi - AC_SUBST([HAVE_SYS_CDEFS_H]) + # pretend HAVE_SYS_CDEFS_H is always 0 including isn't + # really necessary and causes warning on Alpine Linux. + AC_SUBST([HAVE_SYS_CDEFS_H], [0]) AC_DEFINE([__GETOPT_PREFIX], [[rpl_]], [Define to rpl_ if the getopt replacement functions and variables From 86c433cf808cf77dee0a23b2eda76a32a393c98d Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Thu, 1 Dec 2022 13:26:53 +0100 Subject: [PATCH 201/606] mealy: fix incorrect assertion * spot/twaalgos/mealy_machine.cc (minimize_mealy): Do not compare result to the original unsplit machine without splitting it first. * tests/python/mealy.py: Add a test case. --- spot/twaalgos/mealy_machine.cc | 6 +++- tests/python/mealy.py | 52 ++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 1 deletion(-) diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index 9cdae279e..1126ad8e0 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -185,6 +185,7 @@ namespace spot { trace << "is_split_mealy(): Split mealy machine must define the named " "property \"state-player\"!\n"; + return false; } auto sp = get_state_players(m); @@ -3937,7 +3938,10 @@ namespace spot si.total_time = sglob.stop(); si.write(); - assert(is_split_mealy_specialization(mm, minmachine)); + assert(is_split_mealy_specialization( + mm->get_named_prop("state-player") ? mm + :split_2step(mm, false), + minmachine)); return minmachine; } diff --git a/tests/python/mealy.py b/tests/python/mealy.py index 7a884235e..7f6070146 100644 --- a/tests/python/mealy.py +++ b/tests/python/mealy.py @@ -626,3 +626,55 @@ exp = """digraph "" { } """ tc.assertEqual(res.to_str("dot", "g"), exp) + +# assertion bug: original machine is not always +# correctly split before testing inside minimize_mealy +aut = spot.automaton("""HOA: v1 +States: 2 +Start: 0 +AP: 11 "u0accel0accel" "u0accel0f1dcon23p81b" "u0accel0f1dcon231b" + "u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b" + "u0gear0gear" "u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b" + "u0steer0f1dsteering0angle0trackpos1b" "u0steer0steer" + "p0p0gt0rpm0f1dcon5523231b" "p0p0lt0rpm0f1dcon32323231b" + "p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b" +acc-name: all +Acceptance: 0 t +properties: trans-labels explicit-labels state-acc deterministic +controllable-AP: 0 1 2 3 4 5 6 7 +--BODY-- +State: 0 +[!0&!1&2&!3&4&!5&6&!7&!8&!9&!10] 0 +[!0&1&!2&!3&4&!5&6&!7&!8&!9&10] 0 +[!0&!1&2&!3&!4&5&6&!7&!8&9&!10] 0 +[!0&1&!2&!3&!4&5&6&!7&!8&9&10] 0 +[!0&!1&2&3&!4&!5&6&!7&8&!9&!10] 0 +[!0&1&!2&3&!4&!5&6&!7&8&!9&10] 0 +[!0&!1&2&!3&!4&5&!6&7&8&9 | !0&!1&2&!3&!4&5&6&!7&8&9 | !0&!1&2&!3&4&!5&!6&7&8&9 + | !0&!1&2&!3&4&!5&6&!7&8&9 | !0&!1&2&3&!4&!5&!6&7&8&9 + | !0&!1&2&3&!4&!5&6&!7&8&9 | !0&1&!2&!3&!4&5&!6&7&8&9 + | !0&1&!2&!3&!4&5&6&!7&8&9 | !0&1&!2&!3&4&!5&!6&7&8&9 + | !0&1&!2&!3&4&!5&6&!7&8&9 | !0&1&!2&3&!4&!5&!6&7&8&9 + | !0&1&!2&3&!4&!5&6&!7&8&9 | 0&!1&!2&!3&!4&5&!6&7&8&9 + | 0&!1&!2&!3&!4&5&6&!7&8&9 | 0&!1&!2&!3&4&!5&!6&7&8&9 + | 0&!1&!2&!3&4&!5&6&!7&8&9 | 0&!1&!2&3&!4&!5&!6&7&8&9 + | 0&!1&!2&3&!4&!5&6&!7&8&9] 1 +State: 1 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 + | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 + | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 + | !0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 + | !0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 + | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 + | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 + | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 + | 0&!1&!2&3&!4&!5&6&!7] 1 +--END--""") + +spot.minimize_mealy(aut, -1) +spot.minimize_mealy(aut, 0) +spot.minimize_mealy(aut, 1) +auts = spot.split_2step(aut) +spot.minimize_mealy(auts, -1) +spot.minimize_mealy(auts, 0) +spot.minimize_mealy(auts, 1) \ No newline at end of file From 37d4e513d95776f0144e4d8f8581f62d4e6a83f3 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Sun, 27 Nov 2022 01:16:46 +0100 Subject: [PATCH 202/606] game: fix appending strategies bug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When calling solve_parity_game() multiple times on the same automaton the strategies are appended one after the other. Reported by Dávid Smolka. * NEWS: Mention the bug. * spot/twaalgos/game.cc: Fix it. * tests/python/game.py: Test it. * THANKS: Add Dávid. --- NEWS | 4 ++++ THANKS | 1 + spot/twaalgos/game.cc | 1 + tests/python/game.py | 14 ++++++++++++++ 4 files changed, 20 insertions(+) diff --git a/NEWS b/NEWS index ec6cff1bf..b3f9b2c63 100644 --- a/NEWS +++ b/NEWS @@ -26,6 +26,10 @@ New in spot 2.11.2.dev (not yet released) - 'autfilt -c ...' should display a match count even in present of parse errors. + - Calling solve_parity_game() multiple times on the same automaton + used to append the new strategy to the existing one instead of + overwriting it. + New in spot 2.11.2 (2022-10-26) Command-line tools: diff --git a/THANKS b/THANKS index 2b054666c..356d187a1 100644 --- a/THANKS +++ b/THANKS @@ -11,6 +11,7 @@ Christian Dax Christopher Ziegler Clément Tamines David Müller +Dávid Smolka Edmond Irani Liu Ernesto Posse Étienne Renault diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index ccb3b818e..f5699bf49 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -227,6 +227,7 @@ namespace spot region_t &w = *arena->get_or_set_named_prop("state-winner"); strategy_t &s = *arena->get_or_set_named_prop("strategy"); w.swap(w_.winner_); + s.clear(); s.reserve(s_.size()); for (auto as : s_) s.push_back(as == no_strat_mark ? 0 : (unsigned) as); diff --git a/tests/python/game.py b/tests/python/game.py index 647c8d347..a7080b696 100644 --- a/tests/python/game.py +++ b/tests/python/game.py @@ -371,3 +371,17 @@ for kind in [spot.parity_kind_min, spot.parity_kind_max]: tc.assertTrue(spot.solve_parity_game(g_test_split1)) c_strat1 = spot.get_strategy(g_test_split1) tc.assertTrue(c_strat == c_strat1) + +# Test that strategies are not appended +# if solve is called multiple times +aut = spot.make_twa_graph() +aut.set_buchi() +aut.new_states(2) +aut.new_edge(0,1,buddy.bddtrue, [0]) +aut.new_edge(1,0,buddy.bddtrue, []) +spot.set_state_players(aut, [False, True]) +spot.solve_game(aut) +S1 = list(spot.get_strategy(aut)) +spot.solve_game(aut) +S2 = list(spot.get_strategy(aut)) +tc.assertEqual(S1, S2) \ No newline at end of file From 5dbf601afb9c28db8002f6b60593f0acbad011e0 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 6 Dec 2022 16:07:21 +0100 Subject: [PATCH 203/606] * NEWS: Typos. --- NEWS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index b3f9b2c63..068be3a4d 100644 --- a/NEWS +++ b/NEWS @@ -2,7 +2,7 @@ New in spot 2.11.2.dev (not yet released) Bug fixes: - - Automata-based implication checks, used to simplify formulas where + - Automata-based implication checks, used to simplify formulas were slower than necessary because the translator was configured to favor determinism unnecessarily. (Issue #521.) @@ -23,7 +23,7 @@ New in spot 2.11.2.dev (not yet released) (e.g., when the State: header declare many more states than the body of the file). It now warns about those. - - 'autfilt -c ...' should display a match count even in present of + - 'autfilt -c ...' should display a match count even in presence of parse errors. - Calling solve_parity_game() multiple times on the same automaton From 4629d074ab0b1374c9c055b8b1285a8e371fd843 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 7 Dec 2022 11:26:51 +0100 Subject: [PATCH 204/606] Fix semantics of [*i..j] and [:*i..j] * doc/tl/tl.tex: After a discussion with Antoin, it appears that the semantics previously given for f[*0..j] was not considering that f[*0] should accept any sequence of one letter. --- doc/tl/tl.tex | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/doc/tl/tl.tex b/doc/tl/tl.tex index 62a35635f..f9205cced 100644 --- a/doc/tl/tl.tex +++ b/doc/tl/tl.tex @@ -668,20 +668,17 @@ $a$ is an atomic proposition. \sigma\VDash f\FUSION g&\iff \exists k\in\N,\,(\sigma^{0..k} \VDash f)\land(\sigma^{k..} \VDash g)\\ \sigma\VDash f\STAR{\mvar{i}..\mvar{j}}& \iff \begin{cases} - \text{either} & \mvar{i}=0 \land \sigma=\varepsilon \\ - \text{or} & \mvar{i}=0 \land \mvar{j}>0 \land (\exists k\in\N,\, - (\sigma^{0..k-1}\VDash f) \land (\sigma^{k..} - \VDash f\STAR{\mvar{0}..\mvar{j-1}}))\\ + \text{either} & \mvar{i}=0 \land\mvar{j}=0\land \sigma=\varepsilon \\ + \text{or} & \mvar{i}=0 \land \mvar{j}>0 \land \bigl((\sigma = \varepsilon) \lor (\sigma + \VDash f\STAR{\mvar{1}..\mvar{j}})\bigr)\\ \text{or} & \mvar{i}>0 \land \mvar{j}>0 \land (\exists k\in\N,\, (\sigma^{0..k-1}\VDash f) \land (\sigma^{k..} \VDash f\STAR{\mvar{i-1}..\mvar{j-1}}))\\ \end{cases}\\ \sigma\VDash f\STAR{\mvar{i}..} & \iff \begin{cases} - \text{either} & \mvar{i}=0 \land \sigma=\varepsilon \\ - \text{or} & \mvar{i}=0 \land (\exists k\in\N,\, - (\sigma^{0..k-1}\VDash f) \land (\sigma^{k..} - \VDash f\STAR{\mvar{0}..}))\\ + \text{either} & \mvar{i}=0 \land \bigl((\sigma=\varepsilon)\lor(\sigma + \VDash f\STAR{\mvar{1}..})\bigr)\\ \text{or} & \mvar{i}>0 \land (\exists k\in\N,\, (\sigma^{0..k-1}\VDash f) \land (\sigma^{k..} \VDash f\STAR{\mvar{i-1}..}))\\ @@ -689,19 +686,16 @@ $a$ is an atomic proposition. \sigma\VDash f\FSTAR{\mvar{i}..\mvar{j}}& \iff \begin{cases} \text{either} & \mvar{i}=0 \land \mvar{j}=0 \land \sigma\VDash\1 \\ - \text{or} & \mvar{i}=0 \land \mvar{j}>0 \land (\exists k\in\N,\, - (\sigma^{0..k}\VDash f) \land (\sigma^{k..} - \VDash f\FSTAR{\mvar{0}..\mvar{j-1}}))\\ + \text{or} & \mvar{i}=0 \land \mvar{j}>0 \land \bigl((\sigma\VDash\1)\lor(\sigma + \VDash f\FSTAR{\mvar{1}..\mvar{j}})\bigr)\\ \text{or} & \mvar{i}>0 \land \mvar{j}>0 \land (\exists k\in\N,\, (\sigma^{0..k}\VDash f) \land (\sigma^{k..} \VDash f\FSTAR{\mvar{i-1}..\mvar{j-1}}))\\ \end{cases}\\ \sigma\VDash f\FSTAR{\mvar{i}..} & \iff \begin{cases} - \text{either} & \mvar{i}=0 \land \sigma\VDash\1 \\ - \text{or} & \mvar{i}=0 \land (\exists k\in\N,\, - (\sigma^{0..k}\VDash f) \land (\sigma^{k..} - \VDash f\FSTAR{\mvar{0}..}))\\ + \text{either} & \mvar{i}=0 \land \bigl((\sigma\VDash\1) + \lor(\sigma \VDash f\FSTAR{\mvar{1}..})\bigr)\\ \text{or} & \mvar{i}>0 \land (\exists k\in\N,\, (\sigma^{0..k}\VDash f) \land (\sigma^{k..} \VDash f\FSTAR{\mvar{i-1}..}))\\ From 8ed9e3381f9720bc9580ea3ce5e7cc911a1affa5 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 8 Dec 2022 11:51:07 +0100 Subject: [PATCH 205/606] formula: introduce one_plus(), and saturate predefined formulas * spot/tl/formula.hh, spot/tl/formula.cc (one_plus): New. (fnode): Add a saturated argument. (tt_, ff_, eword_, one_plus, one_star): Create saturated node. (destroy): Do not check for id() < 3. --- spot/tl/formula.cc | 21 ++++++++++++++++----- spot/tl/formula.hh | 37 ++++++++++++++++++++++++++++--------- 2 files changed, 44 insertions(+), 14 deletions(-) diff --git a/spot/tl/formula.cc b/spot/tl/formula.cc index fb4ab0d49..a3145884d 100644 --- a/spot/tl/formula.cc +++ b/spot/tl/formula.cc @@ -660,6 +660,16 @@ namespace spot switch (o) { case op::Star: + if (max == unbounded() && child == tt_) + { + // bypass normal construction: 1[*] and 1[+] are + // frequently used, so they are not reference counted. + if (min == 0) + return one_star(); + if (min == 1) + return one_plus(); + } + neutral = eword(); break; case op::FStar: @@ -810,7 +820,7 @@ namespace spot return tt(); // ![*0] = 1[+] if (f->is_eword()) - return bunop(op::Star, tt(), 1); + return one_plus(); auto fop = f->kind(); // "Not" is an involution. @@ -1138,10 +1148,11 @@ namespace spot return id; } - const fnode* fnode::ff_ = new fnode(op::ff, {}); - const fnode* fnode::tt_ = new fnode(op::tt, {}); - const fnode* fnode::ew_ = new fnode(op::eword, {}); + const fnode* fnode::ff_ = new fnode(op::ff, {}, true); + const fnode* fnode::tt_ = new fnode(op::tt, {}, true); + const fnode* fnode::ew_ = new fnode(op::eword, {}, true); const fnode* fnode::one_star_ = nullptr; // Only built when necessary. + const fnode* fnode::one_plus_ = nullptr; // Only built when necessary. void fnode::setup_props(op o) { @@ -1817,7 +1828,7 @@ namespace spot { unsigned cnt = 0; for (auto i: m.uniq) - if (i->id() > 3 && i != one_star_) + if (!i->saturated_) { if (!cnt++) std::cerr << "*** m.uniq is not empty ***\n"; diff --git a/spot/tl/formula.hh b/spot/tl/formula.hh index d01b8379c..9239e1a68 100644 --- a/spot/tl/formula.hh +++ b/spot/tl/formula.hh @@ -149,7 +149,7 @@ namespace spot { if (SPOT_LIKELY(refs_)) --refs_; - else if (SPOT_LIKELY(id_ > 2) && SPOT_LIKELY(!saturated_)) + else if (SPOT_LIKELY(!saturated_)) // last reference to a node that is not a constant destroy_aux(); } @@ -351,10 +351,18 @@ namespace spot static const fnode* one_star() { if (!one_star_) - one_star_ = bunop(op::Star, tt(), 0); + one_star_ = new fnode(op::Star, tt_, 0, unbounded(), true); return one_star_; } + /// \see formula::one_plus + static const fnode* one_plus() + { + if (!one_plus_) + one_plus_ = new fnode(op::Star, tt_, 1, unbounded(), true); + return one_plus_; + } + /// \see formula::ap_name const std::string& ap_name() const; @@ -536,7 +544,7 @@ namespace spot template - fnode(op o, iter begin, iter end) + fnode(op o, iter begin, iter end, bool saturated = false) // Clang has some optimization where is it able to combine the // 4 movb initializing op_,min_,max_,saturated_ into a single // movl. Also it can optimize the three byte-comparisons of @@ -551,7 +559,7 @@ namespace spot #if __llvm__ min_(0), max_(0), #endif - saturated_(0) + saturated_(saturated) { size_t s = std::distance(begin, end); if (SPOT_UNLIKELY(s > (size_t) UINT16_MAX)) @@ -563,13 +571,15 @@ namespace spot setup_props(o); } - fnode(op o, std::initializer_list l) - : fnode(o, l.begin(), l.end()) + fnode(op o, std::initializer_list l, + bool saturated = false) + : fnode(o, l.begin(), l.end(), saturated) { } - fnode(op o, const fnode* f, uint8_t min, uint8_t max) - : op_(o), min_(min), max_(max), saturated_(0), size_(1) + fnode(op o, const fnode* f, uint8_t min, uint8_t max, + bool saturated = false) + : op_(o), min_(min), max_(max), saturated_(saturated), size_(1) { children[0] = f; setup_props(o); @@ -579,6 +589,7 @@ namespace spot static const fnode* tt_; static const fnode* ew_; static const fnode* one_star_; + static const fnode* one_plus_; op op_; // operator uint8_t min_; // range minimum (for star-like operators) @@ -1552,7 +1563,15 @@ namespace spot /// \brief Return a copy of the formula 1[*]. static formula one_star() { - return formula(fnode::one_star()->clone()); + // no need to clone, 1[*] is not reference counted + return formula(fnode::one_star()); + } + + /// \brief Return a copy of the formula 1[+]. + static formula one_plus() + { + // no need to clone, 1[+] is not reference counted + return formula(fnode::one_plus()); } /// \brief Whether the formula is an atomic proposition or its From 720c380412e4c70d134d227b9bd7e78cf6e4d13e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 8 Dec 2022 13:54:19 +0100 Subject: [PATCH 206/606] formula: new trivial simplifications Add the following rules: - f|[+] = [+] if f rejects [*0] - f|[*] = [*] if f accepts [*0] - f&&[+] = f if f rejects [*0] - b:b[*i..j] = b[*max(i,1)..j] - b[*i..j]:b[*k..l] = b[*max(i,1)+max(k,1)-1,1), j+l-1] * spot/tl/formula.cc: Implement the new rules. * doc/tl/tl.tex: Document them. * tests/core/equals.test: Test them. * NEWS: Add them --- NEWS | 9 +++ doc/tl/tl.tex | 18 ++++-- spot/tl/formula.cc | 126 ++++++++++++++++++++++++++++++++++++++--- tests/core/equals.test | 8 ++- 4 files changed, 147 insertions(+), 14 deletions(-) diff --git a/NEWS b/NEWS index 068be3a4d..775bb710c 100644 --- a/NEWS +++ b/NEWS @@ -1,5 +1,14 @@ New in spot 2.11.2.dev (not yet released) + Library: + + - The following new trivial simplifications have been implemented for SEREs: + - f|[+] = [+] if f rejects [*0] + - f|[*] = [*] if f accepts [*0] + - f&&[+] = f if f rejects [*0] + - b:b[*i..j] = b[*max(i,1)..j] + - b[*i..j]:b[*k..l] = b[*max(i,1)+max(k,1)-1,1), j+l-1] + Bug fixes: - Automata-based implication checks, used to simplify formulas were diff --git a/doc/tl/tl.tex b/doc/tl/tl.tex index f9205cced..288a5da0c 100644 --- a/doc/tl/tl.tex +++ b/doc/tl/tl.tex @@ -853,10 +853,18 @@ The following rules are all valid with the two arguments swapped. \1\OR b &\equiv \1 & \1 \FUSION f & \equiv f\mathrlap{\text{~if~}\varepsilon\nVDash f}\\ && - \STAR{} \AND f &\equiv f & - \STAR{} \OR f &\equiv \1\mathrlap{\STAR{}} & + \STAR{} \ANDALT f &\equiv f & + \STAR{} \OR f &\equiv \mathrlap{\STAR{}} & && - \STAR{} \CONCAT f &\equiv \STAR{}\mathrlap{\text{~if~}\varepsilon\VDash f}& \\ + \STAR{} \CONCAT f &\equiv \STAR{}\text{~if~}\varepsilon\VDash f& \\ + && + \PLUS{} \ANDALT f &\equiv f \text{~if~}\varepsilon\nVDash f& + \PLUS{} \OR f &\equiv \begin{cases} + \mathrlap{\STAR{}\text{~if~} \varepsilon\VDash f} \\ + \mathrlap{\PLUS{}\text{~if~} \varepsilon\nVDash f} \\ + \end{cases} & + && + && \\ \eword\AND f &\equiv f & \eword\ANDALT f &\equiv \begin{cases} @@ -880,7 +888,9 @@ The following rules are all valid with the two arguments swapped. f\STAR{\mvar{i}..\mvar{j}}\CONCAT f&\equiv f\STAR{\mvar{i+1}..\mvar{j+1}} & f\STAR{\mvar{i}..\mvar{j}}\CONCAT f\STAR{\mvar{k}..\mvar{l}}&\equiv f\STAR{\mvar{i+k}..\mvar{j+l}}\\ f\FSTAR{\mvar{i}..\mvar{j}}\FUSION f&\equiv f\FSTAR{\mvar{i+1}..\mvar{j+1}} & -f\FSTAR{\mvar{i}..\mvar{j}}\FUSION f\FSTAR{\mvar{k}..\mvar{l}}&\equiv f\FSTAR{\mvar{i+k}..\mvar{j+l}} +f\FSTAR{\mvar{i}..\mvar{j}}\FUSION f\FSTAR{\mvar{k}..\mvar{l}}&\equiv f\FSTAR{\mvar{i+k}..\mvar{j+l}}\\ +b\STAR{\mvar{i}..\mvar{j}}\FUSION b &\equiv b\STAR{\mvar{\max(i,1)}..\mvar{j}} & +b\STAR{\mvar{i}..\mvar{j}}\FUSION b\STAR{\mvar{k}..\mvar{l}} &\equiv b\mathrlap{\STAR{\mvar{\max(i,1)+\max(k,1)-1}..\mvar{j+l-1}}} \end{align*} \section{SERE-LTL Binding Operators} diff --git a/spot/tl/formula.cc b/spot/tl/formula.cc index a3145884d..370a50e8f 100644 --- a/spot/tl/formula.cc +++ b/spot/tl/formula.cc @@ -307,11 +307,14 @@ namespace spot unsigned orig_size = v.size(); - const fnode* neutral; - const fnode* neutral2; - const fnode* abs; - const fnode* abs2; - const fnode* weak_abs; + const fnode* neutral; // neutral element + const fnode* neutral2; // second neutral element (if any) + const fnode* abs; // absorbent element + const fnode* abs2; // second absorbent element (if any) + const fnode* weak_abs; // almost absorbent element (if any) + // The notion of "almost absorbent" captures situation where the + // present of the element can be simplified in itself or another + // element depending on a condition on the rest of the formula. switch (o) { case op::And: @@ -323,7 +326,17 @@ namespace spot break; case op::AndRat: neutral = one_star(); - neutral2 = nullptr; + { + // If this AndRat contains an operand that does not accept + // the empty word, and that is not [+], then any [+] can be + // removed. + bool one_non_eword_non_plus = + std::find_if(v.begin(), v.end(), + [o = one_plus()](const fnode* f) { + return !f->accepts_eword() && f != o; + }) != v.end(); + neutral2 = one_non_eword_non_plus ? one_plus() : nullptr; + } abs = ff(); abs2 = nullptr; weak_abs = eword(); @@ -349,7 +362,7 @@ namespace spot neutral2 = nullptr; abs = one_star(); abs2 = nullptr; - weak_abs = nullptr; + weak_abs = one_plus(); gather_bool(v, op::Or); break; case op::Concat: @@ -506,11 +519,10 @@ namespace spot else return abs; } - else + else if (o == op::AndNLM) { // Similarly, a* & 1 & (c;d) = c;d // a* & 1 & c* = 1 - assert(o == op::AndNLM); vec tmp; for (auto i: v) { @@ -527,6 +539,27 @@ namespace spot tmp.emplace_back(weak_abs); v.swap(tmp); } + else if (o == op::OrRat) + { + // We have a[*] | [+] | c = [*] + // and a | [+] | c = [+] + // So if [+] has been seen, check if some term + // recognize the empty word. + bool acc_eword = false; + for (i = v.begin(); i != v.end(); ++i) + { + acc_eword |= (*i)->accepts_eword(); + (*i)->destroy(); + } + if (acc_eword) + return abs; + else + return weak_abs; + } + else + { + SPOT_UNREACHABLE(); + } } else if (o == op::Concat || o == op::Fusion) { @@ -613,6 +646,81 @@ namespace spot *fpos = newfs; } } + // also + // b[*i..j]:b -> b[*max(1,i),j] + // b:b[*i..j] -> b[*max(1,i),j] + // b[*i..j]:b[*k..l] -> b[*max(i,1)+max(j,1)-1,j+l-1] + if (o == op::Fusion && v.size() > 1) + { + i = v.begin(); + while (i != v.end()) + { + if (!(((*i)->is(op::Star) && (*i)->nth(0)->is_boolean()) + || (*i)->is_boolean())) + { + ++i; + continue; + } + const fnode *b; + unsigned min; + unsigned max; + if ((*i)->is_boolean()) + { + min = max = 1; + b = *i; + } + else + { + b = (*i)->nth(0); + min = (*i)->min(); + max = (*i)->max(); + } + vec::iterator prev = i; + ++i; + bool changed = false; + while (i != v.end()) + { + unsigned min2; + unsigned max2; + if ((*i)->is_boolean()) + { + if (*i != b) + break; + min2 = max2 = 1; + } + else if ((*i)->is(op::Star) && (*i)->nth(0)->is_boolean()) + { + if ((*i)->nth(0) != b) + break; + min2 = (*i)->min(); + max2 = (*i)->max(); + } + else + { + break; + } + // Now we can merge prev and i. + min = min + (min == 0) + min2 + (min2 == 0) - 1; + assert(max != 0 && max2 != 0); + if (max2 == unbounded() || max == unbounded()) + max = unbounded(); + else if (max + max2 < unbounded()) + max = max + max2 - 1; + else + break; + changed = true; + (*i)->destroy(); + i = v.erase(i); + } + if (changed) + { + const fnode* newf = + fnode::bunop(op::Star, b->clone(), min, max); + (*prev)->destroy(); + *prev = newf; + } + } + } } } diff --git a/tests/core/equals.test b/tests/core/equals.test index f00216347..a67c4b1ef 100755 --- a/tests/core/equals.test +++ b/tests/core/equals.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009-2012, 2014-2015, 2021 Laboratoire de Recherche et +# Copyright (C) 2009-2012, 2014-2015, 2021, 2022 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -196,6 +196,12 @@ G({1}<>->1), 1 {(a*;b|c)[:*0]}, 1 {(a*;b|c)[:*1]}, {(a*;b|c)} {(a;b):(a;b):(a;b)[:*2]:(a;b):b*:b*:(c;d)[:*1]}, {(a;b)[:*5]:b*[:*2]:(c;d)} +{((a;b)|[+]|(c;d[*]));a}, {[+];a} +{((a;b)|[+]|(d[*]));a}, {[*];a} +{((a;b)&&[+]&&(d[*]));a}, {((a;b)&&(d[*]));a} +{((a;b|[*0])&&[+]&&(d[*]));a}, {((a;b|[*0])&&[+]&&(d[*]));a} +{(a;c):b[*3..5]:b[*10]:(a;c)}, {(a;c):b[*12..14]:(a;c)} +{(a;c):b:b[*3..5]:b:b[*0..4]:(a;c)}, {(a;c):b[*3..8]:(a;c)} EOF From 1248d326aaa03debb539db2ae34693f01553233b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 8 Dec 2022 17:27:32 +0100 Subject: [PATCH 207/606] Work around spurious g++-12 warnings * spot/twaalgos/ltl2tgba_fm.cc, spot/tl/formula.hh, spot/twaalgos/translate.cc: Add SPOT_ASSUME in various places to help g++. --- spot/tl/formula.hh | 4 +++- spot/twaalgos/ltl2tgba_fm.cc | 15 ++++++++++----- spot/twaalgos/translate.cc | 9 ++++++--- 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/spot/tl/formula.hh b/spot/tl/formula.hh index 9239e1a68..074ec8b02 100644 --- a/spot/tl/formula.hh +++ b/spot/tl/formula.hh @@ -294,7 +294,9 @@ namespace spot { if (SPOT_UNLIKELY(i >= size())) report_non_existing_child(); - return children[i]; + const fnode* c = children[i]; + SPOT_ASSUME(c != nullptr); + return c; } /// \see formula::ff diff --git a/spot/twaalgos/ltl2tgba_fm.cc b/spot/twaalgos/ltl2tgba_fm.cc index 42571f00f..9768dfbfd 100644 --- a/spot/twaalgos/ltl2tgba_fm.cc +++ b/spot/twaalgos/ltl2tgba_fm.cc @@ -1026,11 +1026,16 @@ namespace spot bool coacc = false; auto& st = sm->states_of(n); for (auto l: st) - if (namer->get_name(l).accepts_eword()) - { - coacc = true; - break; - } + { + formula lf = namer->get_name(l); + // Somehow gcc 12.2.0 thinks lf can be nullptr. + SPOT_ASSUME(lf != nullptr); + if (lf.accepts_eword()) + { + coacc = true; + break; + } + } if (!coacc) { // ... or if any of its successors is coaccessible. diff --git a/spot/twaalgos/translate.cc b/spot/twaalgos/translate.cc index d5b1aacd0..339463426 100644 --- a/spot/twaalgos/translate.cc +++ b/spot/twaalgos/translate.cc @@ -209,9 +209,12 @@ namespace spot if (!rest.empty() && !oblg.empty()) { auto safety = [](formula f) - { - return f.is_syntactic_safety(); - }; + { + // Prevent gcc 12.2.0 from warning us that f could be a + // nullptr formula. + SPOT_ASSUME(f != nullptr); + return f.is_syntactic_safety(); + }; auto i = std::remove_if(oblg.begin(), oblg.end(), safety); rest.insert(rest.end(), i, oblg.end()); oblg.erase(i, oblg.end()); From d7feeca13e2ede1948d90dfcdfe39b125fdd8338 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 9 Dec 2022 09:40:27 +0100 Subject: [PATCH 208/606] Release Spot 2.11.3 * NEWS, configure.ac, doc/org/setup.org: Bump version to 2.11.3. --- NEWS | 2 +- configure.ac | 2 +- doc/org/setup.org | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/NEWS b/NEWS index 068be3a4d..37555a50e 100644 --- a/NEWS +++ b/NEWS @@ -1,4 +1,4 @@ -New in spot 2.11.2.dev (not yet released) +New in spot 2.11.3 (2022-12-09) Bug fixes: diff --git a/configure.ac b/configure.ac index 469ff0df4..3478f41b9 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.11.2.dev], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.11.3], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) diff --git a/doc/org/setup.org b/doc/org/setup.org index bb568e5f1..78091ea45 100644 --- a/doc/org/setup.org +++ b/doc/org/setup.org @@ -1,11 +1,11 @@ #+OPTIONS: H:2 num:nil toc:t html-postamble:nil ^:nil #+EMAIL: spot@lrde.epita.fr #+HTML_LINK_HOME: index.html -#+MACRO: SPOTVERSION 2.11.2 -#+MACRO: LASTRELEASE 2.11.2 -#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.11.2.tar.gz][=spot-2.11.2.tar.gz=]] -#+MACRO: LASTNEWS [[https://gitlab.lre.epita.fr/spot/spot/blob/spot-2-11-2/NEWS][summary of the changes]] -#+MACRO: LASTDATE 2022-10-26 +#+MACRO: SPOTVERSION 2.11.3 +#+MACRO: LASTRELEASE 2.11.3 +#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.11.3.tar.gz][=spot-2.11.3.tar.gz=]] +#+MACRO: LASTNEWS [[https://gitlab.lre.epita.fr/spot/spot/blob/spot-2-11-3/NEWS][summary of the changes]] +#+MACRO: LASTDATE 2022-12-09 #+ATTR_HTML: :id spotlogo [[file:spot2.svg]] From 09e147ee4b4949e0ebc7ec172e4ba7de144d448f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 9 Dec 2022 09:43:18 +0100 Subject: [PATCH 209/606] * NEWS, configure.ac: Bump version to 2.11.3.dev. --- NEWS | 4 ++++ configure.ac | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 37555a50e..b1f7c2d79 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,7 @@ +New in spot 2.11.3.dev (not yet released) + + Nothing yet. + New in spot 2.11.3 (2022-12-09) Bug fixes: diff --git a/configure.ac b/configure.ac index 3478f41b9..68fe4cab7 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.11.3], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.11.3.dev], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) From fb63dfc309683d1ac443158605e22c94a11775e6 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Tue, 29 Nov 2022 14:01:45 +0100 Subject: [PATCH 210/606] introduce partitioned_relabel_here Function taking an automaton and trying to relabel it by partitioning the old conditions and encode the different subsets of the partition with new variables * spot/priv/Makefile.am: Add * spot/priv/partitioned_relabel.hh , spot/priv/partitioned_relabel.cc: try_partition_me, computes the partition of a given vector of bdds * spot/twaalgos/relabel.hh , spot/twaalgos/relabel.cc: Here. Adapt also relabel() to cope with the different type of relabeling_maps * tests/python/_partitioned_relabel.ipynb , tests/python/except.py: Test and Usage * tests/Makefile.am: Add test --- spot/priv/Makefile.am | 2 + spot/priv/partitioned_relabel.cc | 147 +++ spot/priv/partitioned_relabel.hh | 81 ++ spot/twaalgos/relabel.cc | 461 ++++++++- spot/twaalgos/relabel.hh | 33 + tests/Makefile.am | 1 + tests/python/_partitioned_relabel.ipynb | 1224 +++++++++++++++++++++++ tests/python/except.py | 15 + 8 files changed, 1920 insertions(+), 44 deletions(-) create mode 100644 spot/priv/partitioned_relabel.cc create mode 100644 spot/priv/partitioned_relabel.hh create mode 100644 tests/python/_partitioned_relabel.ipynb diff --git a/spot/priv/Makefile.am b/spot/priv/Makefile.am index d4e9cc77c..317292bd3 100644 --- a/spot/priv/Makefile.am +++ b/spot/priv/Makefile.am @@ -29,6 +29,8 @@ libpriv_la_SOURCES = \ bddalloc.hh \ freelist.cc \ freelist.hh \ + partitioned_relabel.cc \ + partitioned_relabel.hh \ robin_hood.hh \ satcommon.hh\ satcommon.cc\ diff --git a/spot/priv/partitioned_relabel.cc b/spot/priv/partitioned_relabel.cc new file mode 100644 index 000000000..f28ea5554 --- /dev/null +++ b/spot/priv/partitioned_relabel.cc @@ -0,0 +1,147 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2022 Laboratoire de Recherche +// de l'Epita (LRE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" + +#include + + +relabeling_map +bdd_partition::to_relabeling_map(twa_graph& for_me) const +{ + relabeling_map res; + // Change to unordered_map? + bdd_dict_ptr bdddict = for_me.get_dict(); + + bool use_inner = ig->state_storage(0).new_label != bddfalse; + std::vector doskip + = use_inner ? std::vector(ig->num_states(), false) + : std::vector(); + + auto bdd2form = [&bdddict](const bdd& cond) + { + return bdd_to_formula(cond, bdddict); + }; + + for (const auto& [old_letter, s] : treated) + { + formula new_letter_form = bdd2form(ig->state_storage(s).new_label); + assert(res.count(new_letter_form) == 0); + if (use_inner) + doskip[s] = true; + res[new_letter_form] = bdd2form(old_letter); + } + + if (use_inner) + { + // This implies that the split option was false, + // so we can store further info + auto& all_cond = *all_cond_ptr; + const unsigned Norig = all_cond.size(); + + for (unsigned i = 0; i < Norig; ++i) + { + // Internal node -> new ? + if (doskip[i]) + continue; + // Leave node -> already exists + if (ig->state_storage(i).succ == 0) + continue; + doskip[i] = true; + formula new_letter_form + = bdd2form(ig->state_storage(i).new_label); +#ifdef NDEBUG + res[new_letter_form] = bdd2form(all_cond[i]); +#else + // Check if they are the same + formula old_form = bdd2form(all_cond[i]); + if (res.count(new_letter_form) == 0) + res[new_letter_form] = old_form; + else + assert(res[new_letter_form] == old_form); +#endif + } + } + return res; +} + +/// \brief Tries to partition the given condition vector \a all_cond +/// abandons at \a max_letter. +/// \return The corresponding bdd_partition +/// \note A pointer to all_cond is captured internally, it needs +/// to outlive the returned bdd_partition +bdd_partition +try_partition_me(const std::vector& all_cond, unsigned max_letter) +{ + // We create vector that will be succesively filled. + // Each entry corresponds to a "letter", of the partition + const size_t Norig = all_cond.size(); + + bdd_partition result(all_cond); + + auto& treated = result.treated; + auto& ig = *result.ig; + + for (unsigned io = 0; io < Norig; ++io) + { + bdd cond = all_cond[io]; + const auto Nt = treated.size(); + for (size_t in = 0; in < Nt; ++in) + { + if (cond == bddfalse) + break; + if (treated[in].first == cond) + { + // Found this very condition -> make transition + ig.new_edge(io, treated[in].second); + cond = bddfalse; + break; + } + if (bdd_have_common_assignment(treated[in].first, cond)) + { + bdd inter = treated[in].first & cond; + // Create two new states + unsigned ssplit = ig.new_states(2); + // ssplit becomes the state without the intersection + // ssplit + 1 becomes the intersection + // Both of them are implied by the original node, + // Only inter is implied by the current letter + ig.new_edge(treated[in].second, ssplit); + ig.new_edge(treated[in].second, ssplit+1); + ig.new_edge(io, ssplit+1); + treated.emplace_back(inter, ssplit+1); + // Update + cond -= inter; + treated[in].first -= inter; + treated[in].second = ssplit; + if (treated.size() > max_letter) + return bdd_partition{}; + } + } + if (cond != bddfalse) + { + unsigned sc = ig.new_state(); + treated.emplace_back(cond, sc); + ig.new_edge(io, sc); + } + } + + result.relabel_succ = true; + return result; +} \ No newline at end of file diff --git a/spot/priv/partitioned_relabel.hh b/spot/priv/partitioned_relabel.hh new file mode 100644 index 000000000..cd19ffaea --- /dev/null +++ b/spot/priv/partitioned_relabel.hh @@ -0,0 +1,81 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2022 Laboratoire de Recherche +// de l'Epita (LRE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + + +using namespace spot; + +struct bdd_partition +{ + struct S + { + bdd new_label = bddfalse; + }; + struct T + { + }; + using implication_graph = digraph; + + // A pointer to the conditions to be partitioned + const std::vector* all_cond_ptr; + // Graph with the invariant that + // children imply parents + // Leaves from the partition + // original conditions are "root" nodes + std::unique_ptr ig; + // todo: technically there are at most two successors, so a graph + // is "too" generic + // All conditions currently part of the partition + // unsigned corresponds to the associated node + std::vector> treated; + std::vector new_aps; + bool relabel_succ = false; + + bdd_partition() = default; + bdd_partition(const std::vector& all_cond) + : all_cond_ptr(&all_cond) + , ig{std::make_unique(2*all_cond.size(), + 2*all_cond.size())} + { + // Create the roots of all old conditions + // Each condition is associated to the state with + // the same index + const unsigned Norig = all_cond.size(); + ig->new_states(Norig); + } + + // Facilitate conversion + // This can only be called when letters have already + // been computed + relabeling_map + to_relabeling_map(twa_graph& for_me) const; +}; // bdd_partition + + +bdd_partition +try_partition_me(const std::vector& all_cond, unsigned max_letter); \ No newline at end of file diff --git a/spot/twaalgos/relabel.cc b/spot/twaalgos/relabel.cc index ac1556aec..ba5e4ed14 100644 --- a/spot/twaalgos/relabel.cc +++ b/spot/twaalgos/relabel.cc @@ -20,60 +20,275 @@ #include "config.h" #include #include +#include + +#include + +#include +#include +#include + namespace spot { - void - relabel_here(twa_graph_ptr& aut, relabeling_map* relmap) + namespace { - std::unique_ptr pairs(bdd_newpair()); - auto d = aut->get_dict(); - std::vector vars; - std::set newvars; - vars.reserve(relmap->size()); - bool bool_subst = false; - auto aplist = aut->ap(); - for (auto& p: *relmap) - { - if (!p.first.is(op::ap)) - throw std::runtime_error - ("relabel_here: old labels should be atomic propositions"); - if (!p.second.is_boolean()) - throw std::runtime_error - ("relabel_here: new labels should be Boolean formulas"); + void + comp_new_letters(bdd_partition& part, + twa_graph& aut, + const std::string& var_prefix, + bool split) + { + auto& ig = *part.ig; + const auto& treated = part.treated; + auto& new_aps = part.new_aps; + // Get the new variables and their negations + const unsigned Nnl = treated.size(); + const unsigned Nnv = std::ceil(std::log2(Nnl)); + std::vector> Nv_vec(Nnv); - // Don't attempt to rename APs that are not used. - if (std::find(aplist.begin(), aplist.end(), p.first) == aplist.end()) - continue; + new_aps.reserve(Nnv); + for (unsigned i = 0; i < Nnv; ++i) + { + // todo check if it does not exist / use anonymous? + new_aps.push_back(formula::ap(var_prefix+std::to_string(i))); + int v = aut.register_ap(new_aps.back()); + Nv_vec[i] = {bdd_nithvar(v), bdd_ithvar(v)}; + } - int oldv = aut->register_ap(p.first); - vars.emplace_back(oldv); - if (p.second.is(op::ap)) - { - int newv = aut->register_ap(p.second); - newvars.insert(newv); - bdd_setpair(pairs.get(), oldv, newv); - } - else - { - p.second.traverse([&](const formula& f) - { - if (f.is(op::ap)) - newvars.insert(aut->register_ap(f)); - return false; - }); - bdd newb = formula_to_bdd(p.second, d, aut); - bdd_setbddpair(pairs.get(), oldv, newb); - bool_subst = true; - } + auto leaveidx2label = [&](unsigned idx) + { + unsigned c = 0; + unsigned rem = idx; + bdd thisbdd = bddtrue; + while (rem) + { + thisbdd &= Nv_vec[c][rem & 1]; + ++c; + rem >>= 1; + } + for (; c < Nnv; ++c) + thisbdd &= Nv_vec[c][0]; + return thisbdd; + }; + + // Compute only labels of leaves + for (unsigned idx = 0; idx < Nnl; ++idx) + ig.state_storage(treated[idx].second).new_label = leaveidx2label(idx); + + // We will label the implication graph with the new letters + auto relabel_impl = [&](unsigned s, auto&& relabel_impl_rec) + { + auto& ss = ig.state_storage(s); + if (ss.new_label != bddfalse) + return ss.new_label; + else + { + assert((ss.succ != 0) && "Should not be a leave"); + bdd thisbdd = bddfalse; + for (const auto& e : ig.out(s)) + thisbdd |= relabel_impl_rec(e.dst, relabel_impl_rec); + ss.new_label = thisbdd; + return thisbdd; + } + }; + + if (!split) + { + // For split only leaves is ok, + // disjunction is done via transitions + // This will compute the new_label for all states in the ig + const unsigned Norig = part.all_cond_ptr->size(); + for (unsigned i = 0; i < Norig; ++i) + relabel_impl(i, relabel_impl); + } + } // comp_new_letters + + // Recursive traversal of implication graph + void replace_label_(unsigned si, + unsigned esrc, unsigned edst, + bdd& econd, + const bdd_partition::implication_graph& ig, + twa_graph& aut) + { + auto& sstore = ig.state_storage(si); + if (sstore.succ == 0) + { + if (econd == bddfalse) + econd = sstore.new_label; + else + aut.new_edge(esrc, edst, sstore.new_label); + } + else + { + for (const auto& e_ig : ig.out(si)) + replace_label_(e_ig.dst, esrc, edst, econd, ig, aut); + } + } + + relabeling_map + partitioned_relabel_here_(twa_graph& aut, bool split, + unsigned max_letter, + unsigned max_letter_mult, + const bdd& concerned_ap, + bool treat_all, + const std::string& var_prefix) + { + auto abandon = []() + { + return relabeling_map{}; + }; + + + // When split we need to distiguish effectively new and old edges + if (split) + { + aut.get_graph().remove_dead_edges_(); + aut.get_graph().sort_edges_(); + aut.get_graph().chain_edges_(); + } + + // Get all conditions present in the automaton + std::vector all_cond; + bdd ignoredcon = bddtrue; + std::unordered_map all_cond_id2idx; + + all_cond.reserve(0.1*aut.num_edges()); + all_cond_id2idx.reserve(0.1*aut.num_edges()); + + // Map for all supports + // and whether or not they are to be relabeled + std::unordered_map, bdd_hash> all_supports; + + for (const auto& e : aut.edges()) + { + auto it = all_supports.find(e.cond); + if (it != all_supports.end()) + continue; // Already treated + bdd se = bddtrue; + bool is_concerned = true; + if (!treat_all) + { + se = bdd_support(e.cond); + is_concerned = bdd_implies(concerned_ap, se); + } + + all_supports.emplace(e.cond, + std::make_pair(is_concerned, se)); + + if (!is_concerned) + { + assert(bdd_existcomp(se, concerned_ap) == bddtrue + && "APs are not partitioned"); + continue; + } + + auto [_, ins] = + all_cond_id2idx.try_emplace(e.cond.id(), all_cond.size()); + if (ins) + { + all_cond.push_back(e.cond); + if (all_cond.size() > max_letter) + return abandon(); + } + } + + unsigned stop = max_letter; + if (max_letter_mult != -1u) + { + // Make sure it does not overflow + if (max_letter_mult <= (-1u / ((unsigned) all_cond.size()))) + stop = std::min(stop, + (unsigned) (max_letter_mult*all_cond.size())); + } + + auto this_partition = try_partition_me(all_cond, stop); + + if (!this_partition.relabel_succ) + return abandon(); + + comp_new_letters(this_partition, aut, var_prefix, split); + + // An original condition is represented by all leaves that imply it + auto& ig = *this_partition.ig; + const unsigned Ns = aut.num_states(); + const unsigned Nt = aut.num_edges(); + for (unsigned s = 0; s < Ns; ++s) + { + for (auto& e : aut.out(s)) + { + if (aut.edge_number(e) > Nt) + continue; + if (!all_supports.at(e.cond).first) + continue; // Edge not concerned + unsigned idx = all_cond_id2idx[e.cond.id()]; + + if (split) + { + // initial call + // We can not hold a ref to the edge + // as the edgevector might get reallocated + bdd econd = bddfalse; + unsigned eidx = aut.edge_number(e); + replace_label_(idx, e.src, e.dst, + econd, ig, aut); + aut.edge_storage(eidx).cond = econd; + } + else + e.cond = ig.state_storage(idx).new_label; + } // for edge + } // for state + return this_partition.to_relabeling_map(aut); + } + + void + relabel_here_ap_(twa_graph_ptr& aut_ptr, relabeling_map relmap) + { + assert(aut_ptr); + twa_graph& aut = *aut_ptr; + + std::unique_ptr pairs(bdd_newpair()); + auto d = aut.get_dict(); + std::vector vars; + std::set newvars; + vars.reserve(relmap.size()); + bool bool_subst = false; + auto aplist = aut.ap(); + + for (auto& p: relmap) + { + // Don't attempt to rename APs that are not used. + if (std::find(aplist.begin(), aplist.end(), p.first) == aplist.end()) + continue; + + int oldv = aut.register_ap(p.first); + vars.emplace_back(oldv); + if (p.second.is(op::ap)) + { + int newv = aut.register_ap(p.second); + newvars.insert(newv); + bdd_setpair(pairs.get(), oldv, newv); + } + else + { + p.second.traverse([&](const formula& f) + { + if (f.is(op::ap)) + newvars.insert(aut.register_ap(f)); + return false; + }); + bdd newb = formula_to_bdd(p.second, d, aut_ptr); + bdd_setbddpair(pairs.get(), oldv, newb); + bool_subst = true; + } } bool need_cleanup = false; typedef bdd (*op_t)(const bdd&, bddPair*); op_t op = bool_subst ? static_cast(bdd_veccompose) : static_cast(bdd_replace); - for (auto& t: aut->edges()) + for (auto& t: aut.edges()) { bdd c = (*op)(t.cond, pairs.get()); t.cond = c; @@ -86,14 +301,172 @@ namespace spot // p0) for (auto v: vars) if (newvars.find(v) == newvars.end()) - aut->unregister_ap(v); + aut.unregister_ap(v); // If some of the edges were relabeled false, we need to clean the // automaton. if (need_cleanup) { - aut->merge_edges(); // remove any edge labeled by 0 - aut->purge_dead_states(); // remove useless states + aut.merge_edges(); // remove any edge labeled by 0 + aut.purge_dead_states(); // remove useless states } + } + + void + relabel_here_gen_(twa_graph_ptr& aut_ptr, relabeling_map relmap) + { + assert(aut_ptr); + twa_graph& aut = *aut_ptr; + + auto form2bdd = [this_dict = aut.get_dict()](const formula& f) + { + return formula_to_bdd(f, this_dict, this_dict); + }; + + auto bdd2form = [bdddict = aut.get_dict()](const bdd& cond) + { + return bdd_to_formula(cond, bdddict); + }; + + + // translate formula -> bdd + std::unordered_map base_letters; + base_letters.reserve(relmap.size()); + + std::unordered_map comp_letters; + std::unordered_set ignored_letters; + + // Necessary to detect unused + bdd new_var_supp = bddtrue; + auto translate = [&](bdd& cond) + { + // Check if known + for (const auto& map : {base_letters, comp_letters}) + { + auto it = map.find(cond); + if (it != map.end()) + { + cond = it->second; + return; + } + } + + // Check if known to be ignored + if (auto it = ignored_letters.find(cond); + it != ignored_letters.end()) + return; + + // Check if ignored + bdd cond_supp = bdd_support(cond); + if (!bdd_implies(new_var_supp, cond_supp)) + { + ignored_letters.insert(cond); + assert(bdd_existcomp(cond_supp, new_var_supp) == bddtrue + && "APs are not partitioned"); + return; + } + + // Compute + // compose the given cond from a disjunction of base_letters + bdd old_cond = bddfalse; + for (const auto& [k, v] : base_letters) + { + if (bdd_implies(k, cond)) + old_cond |= v; + } + comp_letters[cond] = old_cond; + cond = old_cond; + return; + }; + + for (const auto& [new_f, old_f] : relmap) + { + bdd new_cond = form2bdd(new_f); + new_var_supp &= bdd_support(new_cond); + base_letters[new_cond] = form2bdd(old_f); + } + + + // Save the composed letters? With a special seperator like T/F? + // Is swapping between formula <-> bdd expensive + for (auto& e : aut.edges()) + translate(e.cond); + + // Remove the new auxilliary variables from the aut + bdd c_supp = new_var_supp; + while (c_supp != bddtrue) + { + aut.unregister_ap(bdd_var(c_supp)); + c_supp = bdd_high(c_supp); + } + + return; + } + + } // Namespace + + void + relabel_here(twa_graph_ptr& aut, relabeling_map* relmap) + { + if (!relmap || relmap->empty()) + return; + + // There are two different types of relabeling maps: + // 1) The "traditional": + // New atomic propositions (keys) correspond to general formulas over + // the original propositions (values) + // 2) The one resulting from partitioned_relabel_here + // Here general (boolean) formulas over new propositions (keys) + // are associated to general formulas over + // the original propositions (values) + + if (!std::all_of(relmap->begin(), relmap->end(), + [](const auto& it){return it.first.is_boolean() + && it.second.is_boolean(); })) + throw std::runtime_error + ("relabel_here: old labels and new labels " + "should be Boolean formulas"); + + bool only_ap = std::all_of(relmap->cbegin(), relmap->cend(), + [](const auto& p) + { + return p.first.is(op::ap); + }); + + if (only_ap) + relabel_here_ap_(aut, *relmap); + else + relabel_here_gen_(aut, *relmap); + } + + relabeling_map + partitioned_relabel_here(twa_graph_ptr& aut, + bool split, + unsigned max_letter, + unsigned max_letter_mult, + const bdd& concerned_ap, + std::string var_prefix) + { + if (!aut) + throw std::runtime_error("aut is null!"); + + if (std::find_if(aut->ap().cbegin(), aut->ap().cend(), + [var_prefix](const auto& ap) + { + return ap.ap_name().find(var_prefix) == 0; + }) != aut->ap().cend()) + throw std::runtime_error("partitioned_relabel_here(): " + "The given prefix for new variables may not appear as " + "a prefix of existing variables."); + + // If concerned_ap == bddtrue -> all aps are concerned + bool treat_all = concerned_ap == bddtrue; + bdd concerned_ap_ + = treat_all ? aut->ap_vars() : concerned_ap; + return partitioned_relabel_here_(*aut, split, + max_letter, max_letter_mult, + concerned_ap_, + treat_all, + var_prefix); } } diff --git a/spot/twaalgos/relabel.hh b/spot/twaalgos/relabel.hh index e10fe8903..34f7a0a41 100644 --- a/spot/twaalgos/relabel.hh +++ b/spot/twaalgos/relabel.hh @@ -21,6 +21,10 @@ #include #include +#include + +#include +#include namespace spot { @@ -33,4 +37,33 @@ namespace spot /// or relabel_bse(). SPOT_API void relabel_here(twa_graph_ptr& aut, relabeling_map* relmap); + + + /// \brief Replace conditions in \a aut with non-overlapping conditions + /// over fresh variables. + /// + /// Partitions the conditions in the automaton, then (binary) encodes + /// them using fresh propositions. + /// This can lead to an exponential explosion in the number of + /// conditions. The operations is aborted if either + /// the number of new letters (subsets of the partition) exceeds + /// \a max_letter OR \a max_letter_mult times the number of conditions + /// in the original automaton. + /// The argument \a concerned_ap can be used to filter out transitions. + /// If given, only the transitions whose support intersects the + /// concerned_ap (or whose condition is T) are taken into account. + /// The fresh aps will be enumerated and prefixed by \a var_prefix. + /// These variables need to be fresh, i.e. may not exist yet (not checked) + /// + /// \note If concerned_ap is given, then there may not be an edge + /// whose condition uses ap inside AND outside of concerned_ap. + /// Mostly used in a game setting to distinguish between + /// env and player transitions. + SPOT_API relabeling_map + partitioned_relabel_here(twa_graph_ptr& aut, bool split = false, + unsigned max_letter = -1u, + unsigned max_letter_mult = -1u, + const bdd& concerned_ap = bddtrue, + std::string var_prefix = "__nv"); + } diff --git a/tests/Makefile.am b/tests/Makefile.am index 4c2fe830c..0810df809 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -436,6 +436,7 @@ TESTS_python = \ python/origstate.py \ python/otfcrash.py \ python/parsetgba.py \ + python/_partitioned_relabel.ipynb \ python/parity.py \ python/pdegen.py \ python/prodexpt.py \ diff --git a/tests/python/_partitioned_relabel.ipynb b/tests/python/_partitioned_relabel.ipynb new file mode 100644 index 000000000..a9c1c7af7 --- /dev/null +++ b/tests/python/_partitioned_relabel.ipynb @@ -0,0 +1,1224 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "4d896402", + "metadata": {}, + "outputs": [], + "source": [ + "import spot, buddy" + ] + }, + { + "cell_type": "markdown", + "id": "94e87f9c", + "metadata": {}, + "source": [ + "# Partitioned relabeling\n", + "\n", + "Partitioned relabeling will:\n", + "First compute a partition over all conditions appearing in the automaton.\n", + "That is, the set of new conditions is such that (1) they do not overlap (2) all valuations that verify some condition in the original automaton also verify (exactly one) of the new conditions.\n", + "These new conditions can be thought of as letters in a \"classical\" sense.\n", + "Then we create new aps and encode the \"number\" of these letters using the fresh aps, resulting in new letters which are a single valuation over the fresh aps.\n", + "\n", + "This can be helpful if there are many aps, but few different conditions over them\n", + "\n", + "The algorithm comes in two flavours:\n", + "\n", + "We maintain the original number of edges. Therefore the new label correspond to a disjunction over new letters (split=False).\n", + "We split each edge into its letters, creating more edges (split=True)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62123fa9", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f65b0311a80> >" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#Relabeling a graph\n", + "aut = spot.make_twa_graph()\n", + "aut.new_states(5)\n", + "\n", + "a = buddy.bdd_ithvar(aut.register_ap(\"a\"))\n", + "na = buddy.bdd_nithvar(aut.register_ap(\"a\"))\n", + "b0 = buddy.bdd_ithvar(aut.register_ap(\"b0\"))\n", + "nb0 = buddy.bdd_nithvar(aut.register_ap(\"b0\"))\n", + "b1 = buddy.bdd_ithvar(aut.register_ap(\"b1\"))\n", + "nb1 = buddy.bdd_nithvar(aut.register_ap(\"b1\"))\n", + "b2 = buddy.bdd_ithvar(aut.register_ap(\"b2\"))\n", + "nb2 = buddy.bdd_nithvar(aut.register_ap(\"b2\"))\n", + "\n", + "aut.new_edge(0,1,buddy.bddtrue)\n", + "aut.new_edge(0,2,a)\n", + "aut.new_edge(0,3,a&b0&b1&b2)\n", + "aut.new_edge(0,4,a&nb0&nb1&nb2)\n", + "\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d4c8e977", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "6\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "__nv0 | __nv1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "__nv0 & __nv1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f65b0311a80> >" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "relabel_dict = spot.partitioned_relabel_here(aut)\n", + "\n", + "print(relabel_dict.size())\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "6f90a095", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f65b0311a80> >" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Undo the relabeling\n", + "spot.relabel_here(aut, relabel_dict)\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "513067ab", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f65b02c0d50> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HOA: v1\n", + "States: 5\n", + "Start: 0\n", + "AP: 6 \"a\" \"b0\" \"b1\" \"b2\" \"__nv0\" \"__nv1\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc\n", + "--BODY--\n", + "State: 0\n", + "[!4&!5] 1\n", + "[4&!5] 2\n", + "[!4&5] 3\n", + "[4&5] 4\n", + "[4&!5] 1\n", + "[4&5] 1\n", + "[!4&5] 1\n", + "[4&5] 2\n", + "[!4&5] 2\n", + "State: 1\n", + "State: 2\n", + "State: 3\n", + "State: 4\n", + "--END--\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!__nv0 & !__nv1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "__nv0 & !__nv1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "__nv0 & !__nv1\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "__nv0 & __nv1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f65b02c0d50> >" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Relabeling the same graph using the split option\n", + "aut = spot.make_twa_graph()\n", + "aut.new_states(5)\n", + "\n", + "a = buddy.bdd_ithvar(aut.register_ap(\"a\"))\n", + "na = buddy.bdd_nithvar(aut.register_ap(\"a\"))\n", + "b0 = buddy.bdd_ithvar(aut.register_ap(\"b0\"))\n", + "nb0 = buddy.bdd_nithvar(aut.register_ap(\"b0\"))\n", + "b1 = buddy.bdd_ithvar(aut.register_ap(\"b1\"))\n", + "nb1 = buddy.bdd_nithvar(aut.register_ap(\"b1\"))\n", + "b2 = buddy.bdd_ithvar(aut.register_ap(\"b2\"))\n", + "nb2 = buddy.bdd_nithvar(aut.register_ap(\"b2\"))\n", + "\n", + "aut.new_edge(0,1,buddy.bddtrue)\n", + "aut.new_edge(0,2,a)\n", + "aut.new_edge(0,3,a&b0&b1&b2)\n", + "aut.new_edge(0,4,a&nb0&nb1&nb2)\n", + "\n", + "display(aut)\n", + "xx = spot.partitioned_relabel_here(aut, True)\n", + "print(aut.to_str(\"hoa\"))\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "50c6a08b", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "(a & !b0 & b2) | (a & b0 & !b2) | (a & !b1 & b2) | (a & b1 & !b2)\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "(a & !b0 & b2) | (a & b0 & !b2) | (a & !b1 & b2) | (a & b1 & !b2)\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f65b02c0d50> >" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Undo the relabeling -> disjoint conditions over the original ap\n", + "spot.relabel_here(aut, relabel_dict)\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "d2efd313", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f65b02c90f0> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HOA: v1\n", + "States: 6\n", + "Start: 0\n", + "AP: 5 \"a\" \"__nv0\" \"__nv1\" \"b\" \"c\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc\n", + "--BODY--\n", + "State: 0\n", + "[!1 | !2] 1\n", + "[!1&2 | 1&!2] 2\n", + "[!1&2] 3\n", + "[1&!2] 4\n", + "[4] 5\n", + "State: 1\n", + "State: 2\n", + "State: 3\n", + "State: 4\n", + "State: 5\n", + "--END--\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!__nv0 | !__nv1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "(__nv0 & !__nv1) | (!__nv0 & __nv1)\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "__nv0 & !__nv1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f65b02c90f0> >" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Working only on a subset of the aps\n", + "# Note that True is always relabeled\n", + "\n", + "aut = spot.make_twa_graph()\n", + "aut.new_states(6)\n", + "\n", + "a = buddy.bdd_ithvar(aut.register_ap(\"a\"))\n", + "na = buddy.bdd_nithvar(aut.register_ap(\"a\"))\n", + "b = buddy.bdd_ithvar(aut.register_ap(\"b\"))\n", + "nb = buddy.bdd_nithvar(aut.register_ap(\"b\"))\n", + "c = buddy.bdd_ithvar(aut.register_ap(\"c\"))\n", + "nc = buddy.bdd_nithvar(aut.register_ap(\"c\"))\n", + "\n", + "aut.new_edge(0,1,buddy.bddtrue)\n", + "aut.new_edge(0,2,a)\n", + "aut.new_edge(0,3,a&b)\n", + "aut.new_edge(0,4,a&nb)\n", + "aut.new_edge(0,5,c)\n", + "\n", + "display(aut)\n", + "\n", + "concerned_aps = a & b # concerned aps are given as a conjunction of positive aps\n", + "# As partitioning can be exponentially costly,\n", + "# one can limit the number of new letters generated before abadoning\n", + "# This can be done either as a hard limit and/or as the number of current condition\n", + "# times a factor\n", + "relabel_dict = spot.partitioned_relabel_here(aut, False, 1000, 1000, concerned_aps)\n", + "print(aut.to_str(\"hoa\"))\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "1fbc8813", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HOA: v1\n", + "States: 6\n", + "Start: 0\n", + "AP: 3 \"a\" \"b\" \"c\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc\n", + "--BODY--\n", + "State: 0\n", + "[t] 1\n", + "[0] 2\n", + "[0&1] 3\n", + "[0&!1] 4\n", + "[2] 5\n", + "State: 1\n", + "State: 2\n", + "State: 3\n", + "State: 4\n", + "State: 5\n", + "--END--\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f65b02c90f0> >" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#undo partial relabeling\n", + "spot.relabel_here(aut, relabel_dict)\n", + "print(aut.to_str(\"hoa\"))\n", + "aut" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tests/python/except.py b/tests/python/except.py index 34aa61ad2..e531882dd 100644 --- a/tests/python/except.py +++ b/tests/python/except.py @@ -359,3 +359,18 @@ except RuntimeError as e: tc.assertIn(filename, str(e)) else: report_missing_exception() + + +# Relabeling must use new variables +aut = spot.make_twa_graph() +aut.new_states(2) +ap = buddy.bdd_ithvar(aut.register_ap("__nv0")) +aut.new_edge(0,1,ap) + +try: + spot.partitioned_relabel_here(aut) +except RuntimeError as e: + tc.assertIn("The given prefix for new variables", + str(e)) +else: + report_missing_exception() \ No newline at end of file From 6e2e7c942e8fce377c5feb031b260120651c0288 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Tue, 29 Nov 2022 14:58:26 +0100 Subject: [PATCH 211/606] Using partitioned_relabel_here Put the new function to use in order to speed up mealy machine minimization * spot/twaalgos/mealy_machine.cc: Here * spot/twaalgos/synthesis.cc , spot/twaalgos/synthesis.hh: Helper function to relabel games * tests/python/_mealy.ipynb , tests/python/except.py , tests/python/_partitioned_relabel.ipynb: Adapt/expand tests --- spot/twaalgos/mealy_machine.cc | 314 +++++-- spot/twaalgos/synthesis.cc | 95 ++ spot/twaalgos/synthesis.hh | 36 + tests/python/_mealy.ipynb | 292 ++++-- tests/python/_partitioned_relabel.ipynb | 1147 ++++++++++++++++++++++- tests/python/except.py | 20 +- 6 files changed, 1722 insertions(+), 182 deletions(-) diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index 1126ad8e0..df9ad6017 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -36,7 +36,9 @@ #include #include #include +#include #include +#include #include @@ -869,7 +871,7 @@ namespace split_cstr_time, prob_init_build_time, sat_time, build_time, refine_time, total_time; long long n_classes, n_refinement, n_lit, n_clauses, - n_iteration, n_bisim_let, n_min_states, done; + n_iteration, n_letters_part, n_bisim_let, n_min_states, done; std::string task; const std::string instance; @@ -892,6 +894,7 @@ namespace , n_lit{-1} , n_clauses{-1} , n_iteration{-1} + , n_letters_part{-1} , n_bisim_let{-1} , n_min_states{-1} , done{-1} @@ -935,8 +938,8 @@ namespace << "player_incomp_time,incomp_time,split_all_let_time," << "split_min_let_time,split_cstr_time,prob_init_build_time," << "sat_time,build_time,refine_time,total_time,n_classes," - << "n_refinement,n_lit,n_clauses,n_iteration,n_bisim_let," - << "n_min_states,done\n"; + << "n_refinement,n_lit,n_clauses,n_iteration,n_letters_part," + << "n_bisim_let,n_min_states,done\n"; } assert(!task.empty()); @@ -965,6 +968,7 @@ namespace f(ss, n_lit); f(ss, n_clauses); f(ss, n_iteration); + f(ss, n_letters_part); f(ss, n_bisim_let); f(ss, n_min_states); f(ss, done, false); @@ -1280,8 +1284,8 @@ namespace } square_matrix - compute_incomp(const_twa_graph_ptr mm, const unsigned n_env, - satprob_info& si) + compute_incomp_impl_(const_twa_graph_ptr mm, const unsigned n_env, + satprob_info& si, bool is_partitioned) { const unsigned n_tot = mm->num_states(); @@ -1292,20 +1296,6 @@ namespace // Have two states already been checked for common pred square_matrix checked_pred(n_env, false); - // We also need a transposed_graph - auto mm_t = make_twa_graph(mm->get_dict()); - mm_t->copy_ap_of(mm); - mm_t->new_states(n_env); - - for (unsigned s = 0; s < n_env; ++s) - { - for (const auto& e_env : mm->out(s)) - { - unsigned dst_env = mm->out(e_env.dst).begin()->dst; - mm_t->new_edge(dst_env, s, e_env.cond); - } - } - // Utility function auto get_cond = [&mm](unsigned s)->const bdd& {return mm->out(s).begin()->cond; }; @@ -1367,15 +1357,28 @@ namespace #endif // direct incomp: Two env states can reach incompatible player states // under the same input + // The original graph mm is not sorted, and most of the + // sorting is not rentable + // However, bdd_have_common_assignment simply becomes equality auto direct_incomp = [&](unsigned s1, unsigned s2) { for (const auto& e1 : mm->out(s1)) for (const auto& e2 : mm->out(s2)) { + if (is_partitioned && (e1.cond != e2.cond)) + continue; if (!is_p_incomp(e1.dst - n_env, e2.dst - n_env)) continue; //Compatible -> no prob // Reachable under same letter? - if (bdd_have_common_assignment(e1.cond, e2.cond)) + if (is_partitioned) // -> Yes + { + trace << s1 << " and " << s2 << " directly incomp " + "due to successors " << e1.dst << " and " << e2.dst + << '\n'; + return true; + } + else if (!is_partitioned + && bdd_have_common_assignment(e1.cond, e2.cond)) { trace << s1 << " and " << s2 << " directly incomp " "due to successors " << e1.dst << " and " << e2.dst @@ -1388,7 +1391,27 @@ namespace // If two states can reach an incompatible state // under the same input, then they are incompatible as well - auto tag_predec = [&](unsigned s1, unsigned s2) + + // Version if the input is not partitioned + // We also need a transposed_graph + twa_graph_ptr mm_t = nullptr; + if (!is_partitioned) + { + mm_t = make_twa_graph(mm->get_dict()); + mm_t->copy_ap_of(mm); + mm_t->new_states(n_env); + + for (unsigned s = 0; s < n_env; ++s) + { + for (const auto& e_env : mm->out(s)) + { + unsigned dst_env = mm->out(e_env.dst).begin()->dst; + mm_t->new_edge(dst_env, s, e_env.cond); + } + } + } + + auto tag_predec_unpart = [&](unsigned s1, unsigned s2) { static std::vector> todo_; assert(todo_.empty()); @@ -1422,17 +1445,98 @@ namespace // Done tagging all pred }; + // Version of taging taking advantaged of partitioned conditions + struct S + { + }; + struct T + { + int id; + }; + std::unique_ptr> mm_t_part; + if (is_partitioned) + { + mm_t_part = std::make_unique>(n_env, mm->num_edges()); + mm_t_part->new_states(n_env); + + for (unsigned s = 0; s < n_env; ++s) + { + for (const auto& e_env : mm->out(s)) + { + unsigned dst_env = mm->out(e_env.dst).begin()->dst; + mm_t_part->new_edge(dst_env, s, e_env.cond.id()); + } + } + + // Now we need to sort the edge to ensure that + // the next algo works correctly + mm_t_part->sort_edges_srcfirst_([](const auto& e1, const auto& e2) + {return e1.id < e2.id; }); + mm_t_part->chain_edges_(); + } + + auto tag_predec_part = [&](unsigned s1, unsigned s2) + { + static std::vector> todo_; + assert(todo_.empty()); + + todo_.emplace_back(s1, s2); + + while (!todo_.empty()) + { + auto [i, j] = todo_.back(); + todo_.pop_back(); + if (checked_pred.get(i, j)) + continue; + // If predecs are already marked incomp + auto e_it_i = mm_t_part->out(i); + auto e_it_j = mm_t_part->out(j); + + auto e_it_i_e = e_it_i.end(); + auto e_it_j_e = e_it_j.end(); + + auto e_i = e_it_i.begin(); + auto e_j = e_it_j.begin(); + + // Joint iteration over both edge groups + while ((e_i != e_it_i_e) && (e_j != e_it_j_e)) + { + if (e_i->id < e_j->id) + ++e_i; + else if (e_j->id < e_i->id) + ++e_j; + else + { + assert(e_j->id == e_i->id); + trace << e_i->dst << " and " << e_j->dst << " tagged incomp" + " due to " << e_i->id << '\n'; + inc_env.set(e_i->dst, e_j->dst, true); + todo_.emplace_back(e_i->dst, e_j->dst); + ++e_i; + ++e_j; + } + } + checked_pred.set(i, j, true); + } + // Done tagging all pred + }; + for (unsigned s1 = 0; s1 < n_env; ++s1) for (unsigned s2 = s1 + 1; s2 < n_env; ++s2) { if (inc_env.get(s1, s2)) continue; // Already done + // Check if they are incompatible for some letter // We have to check all pairs of edges if (direct_incomp(s1, s2)) { inc_env.set(s1, s2, true); - tag_predec(s1, s2); + if (is_partitioned) + tag_predec_part(s1, s2); + else + tag_predec_unpart(s1, s2); + } } @@ -1442,9 +1546,38 @@ namespace #endif si.incomp_time = si.restart(); return inc_env; + } // incomp no partition + + square_matrix + compute_incomp(const_twa_graph_ptr mm, const unsigned n_env, + satprob_info& si, int max_letter_mult) + { + // Try to generate a graph with partitioned env transitions + auto mm2 = make_twa_graph(mm, twa::prop_set::all()); + set_state_players(mm2, get_state_players(mm)); + set_synthesis_outputs(mm2, get_synthesis_outputs(mm)); + + // todo get a good value for cutoff + auto relabel_maps + = partitioned_game_relabel_here(mm2, true, false, true, + false, -1u, max_letter_mult); + bool succ = !relabel_maps.env_map.empty(); + + si.n_letters_part = relabel_maps.env_map.size(); + +#ifdef TRACE + if (succ) + std::cout << "Relabeling succesfull with " << relabel_maps.env_map.size() + << " letters\n"; + else + std::cout << "Relabeling aborted\n"; +#endif + + return compute_incomp_impl_(succ ? const_twa_graph_ptr(mm2) : mm, + n_env, si, succ); } - struct part_sol_t + struct part_sol_t { std::vector psol; std::vector is_psol; @@ -1602,6 +1735,11 @@ namespace return std::make_pair(n_group, which_group); } + // Helper function + // Computes the set of all original letters implied by the leaves + // This avoids transposing the graph + + // Computes the letters of each group // Letters here means bdds such that for all valid // assignments of the bdd we go to the same dst from the same source @@ -1611,7 +1749,9 @@ namespace { //To avoid recalc std::set all_bdd; - std::set treated_bdd; + std::vector all_bdd_v; + std::unordered_map node2idx; + std::unordered_multimap>> sigma_map; @@ -1649,6 +1789,11 @@ namespace continue; else { + // Store bdds as vector for compatibility + all_bdd_v.clear(); // Note: sorted automatically by id + std::transform(all_bdd.begin(), all_bdd.end(), + std::back_inserter(all_bdd_v), + [](int i){return bdd_from_int(i); }); // Insert it already into the sigma_map trace << "Group " << groupidx << " generates a new alphabet\n"; sigma_map.emplace(std::piecewise_construct, @@ -1658,62 +1803,60 @@ namespace } } + // Result red.share_sigma_with.push_back(groupidx); red.all_letters.emplace_back(); auto& group_letters = red.all_letters.back(); - treated_bdd.clear(); + // Compute it + auto this_part = try_partition_me(all_bdd_v, -1u); + assert(this_part.relabel_succ); - for (unsigned s = 0; s < n_env; ++s) + // Transform it + // group_letters is pair + // There are as many new_letters as treated bdds in the partition + group_letters.clear(); + group_letters.reserve(this_part.treated.size()); + node2idx.clear(); + node2idx.reserve(this_part.treated.size()); + + for (const auto& [label, node] : this_part.treated) { - if (red.which_group[s] != groupidx) - continue; - for (const auto& e : mmw->out(s)) - { - bdd rcond = e.cond; - const int econd_id = rcond.id(); - trace << rcond << " - " << econd_id << std::endl; - if (treated_bdd.count(econd_id)) - { - trace << "Already treated" << std::endl; - continue; - } - treated_bdd.insert(econd_id); - - assert(rcond != bddfalse && "Deactivated edges are forbiden"); - // Check against all currently used "letters" - const size_t osize = group_letters.size(); - for (size_t i = 0; i < osize; ++i) - { - if (group_letters[i].first == rcond) - { - rcond = bddfalse; - group_letters[i].second.insert(econd_id); - break; - } - bdd inter = group_letters[i].first & rcond; - if (inter == bddfalse) - continue; // No intersection - if (group_letters[i].first == inter) - group_letters[i].second.insert(econd_id); - else - { - group_letters[i].first -= inter; - group_letters.emplace_back(inter, - group_letters[i].second); - group_letters.back().second.insert(econd_id); - } - - rcond -= inter; - // Early exit? - if (rcond == bddfalse) - break; - } - // Leftovers? - if (rcond != bddfalse) - group_letters.emplace_back(rcond, std::set{econd_id}); - } + node2idx[node] = group_letters.size(); + group_letters.emplace_back(std::piecewise_construct, + std::forward_as_tuple(label), + std::forward_as_tuple()); } + + // Go through the graph for each original letter + auto search_leaves + = [&ig = *this_part.ig, &group_letters, &node2idx] + (int orig_letter_id, unsigned s, auto&& search_leaves_) -> void + { + if (ig.state_storage(s).succ == 0) + { + // Leaf + unsigned idx = node2idx[s]; + auto& setidx = group_letters[idx].second; + setidx.emplace_hint(setidx.end(), orig_letter_id); + } + else + { + // Traverse + for (const auto& e : ig.out(s)) + search_leaves_(orig_letter_id, e.dst, search_leaves_); + } + }; + + const unsigned Norig = all_bdd_v.size(); + for (unsigned s = 0; s < Norig; ++s) + search_leaves(all_bdd_v[s].id(), s, search_leaves); + + // Verify that all letters imply at least one original letter + assert(std::all_of(group_letters.begin(), group_letters.end(), + [](const auto& l){return !l.second.empty(); })); + + #ifdef TRACE trace << "this group letters" << std::endl; auto sp = [&](const auto& c) @@ -3467,6 +3610,7 @@ namespace for (unsigned letter_idx = 0; letter_idx < n_ml; ++letter_idx) { const auto& ml_list = group_map[letter_idx]; + assert(ml_list.begin() != ml_list.end()); // Incompatibility is commutative // new / new constraints const auto it_end = ml_list.end(); @@ -3794,12 +3938,9 @@ namespace return minmach; } // while loop } // try_build_machine -} // namespace -namespace spot -{ - twa_graph_ptr minimize_mealy(const const_twa_graph_ptr& mm, - int premin) + twa_graph_ptr minimize_mealy_(const const_twa_graph_ptr& mm, + int premin, int max_letter_mult) { bdd outputs = ensure_mealy("minimize_mealy", mm); @@ -3866,8 +4007,9 @@ namespace spot si.reorg_time = si.restart(); // Compute incompatibility based on bdd - auto incompmat = compute_incomp(mmw, n_env, si); + auto incompmat = compute_incomp(mmw, n_env, si, max_letter_mult); #ifdef TRACE + std::cerr << "Final incomp mat\n"; incompmat.print(std::cerr); #endif @@ -3944,6 +4086,15 @@ namespace spot minmachine)); return minmachine; } +} // namespace + +namespace spot +{ + twa_graph_ptr minimize_mealy(const const_twa_graph_ptr& mm, + int premin) + { + return minimize_mealy_(mm, premin, 10); + } twa_graph_ptr minimize_mealy(const const_twa_graph_ptr& mm, @@ -3970,7 +4121,8 @@ namespace spot sat_dimacs_file = std::make_unique(dimacsfile); sat_instance_name = si.opt.get_str("satinstancename"); - auto res = minimize_mealy(mm, si.minimize_lvl-4); + auto res = minimize_mealy_(mm, si.minimize_lvl-4, + si.opt.get("max_letter_mult", 10)); sat_csv_file.reset(); sat_dimacs_file.reset(); return res; @@ -4161,7 +4313,7 @@ namespace spot reduce_mealy_here(m, minimize_lvl == 2); } else if (3 <= minimize_lvl) - m = minimize_mealy(m, minimize_lvl - 4); + m = minimize_mealy(m, si); // Convert to demanded output format bool is_split = m->get_named_prop("state-player"); diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 88e22ff04..4e38efd5b 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -37,6 +37,7 @@ #include +#include // Helper function/structures for split_2step namespace{ @@ -1935,4 +1936,98 @@ namespace spot return res; } + namespace + { + const std::string in_mark_s("__AP_IN__"); + const std::string out_mark_s("__AP_OUT__"); + } + + game_relabeling_map + partitioned_game_relabel_here(twa_graph_ptr& arena, + bool relabel_env, + bool relabel_play, + bool split_env, + bool split_play, + unsigned max_letter, + unsigned max_letter_mult) + { + if (!arena) + throw std::runtime_error("arena is null."); + auto& arena_r = *arena; + + const auto& sp = get_state_players(arena); + bdd all_ap = arena->ap_vars(); + + if (std::find_if(arena->ap().cbegin(), arena->ap().cend(), + [](const auto& ap) + { + return ap.ap_name() == out_mark_s + || ap.ap_name() == in_mark_s; + }) != arena->ap().cend()) + throw std::runtime_error("partitioned_game_relabel_here(): " + "You can not use " + + in_mark_s + " or " + out_mark_s + + " as propositions if relabeling."); + + bdd out_mark = bdd_ithvar(arena_r.register_ap(out_mark_s)); + bdd in_mark = bdd_ithvar(arena_r.register_ap(in_mark_s)); + + bdd outs = get_synthesis_outputs(arena) & out_mark; + bdd ins = bdd_exist(all_ap, outs) & in_mark; + + for (auto& e : arena_r.edges()) + e.cond = e.cond & (sp[e.src] ? out_mark : in_mark); + + game_relabeling_map res; + + if (relabel_env) + res.env_map + = partitioned_relabel_here(arena, split_env, max_letter, + max_letter_mult, ins, "__nv_in"); + if (relabel_play) + res.player_map + = partitioned_relabel_here(arena, split_play, max_letter, + max_letter_mult, outs, "__nv_out"); + return res; + } + + void + relabel_game_here(twa_graph_ptr& arena, + game_relabeling_map& rel_maps) + { + if (!arena) + throw std::runtime_error("arena is null."); + auto& arena_r = *arena; + + // Check that it was partitioned_game_relabel_here + if (!((std::find_if(arena->ap().cbegin(), arena->ap().cend(), + [](const auto& ap) + { return ap.ap_name() == out_mark_s; }) + != arena->ap().cend()) + && (std::find_if(arena->ap().cbegin(), arena->ap().cend(), + [](const auto& ap) + { return ap.ap_name() == in_mark_s; })) + != arena->ap().cend())) + throw std::runtime_error("relabel_game_here(): " + + in_mark_s + " or " + out_mark_s + + " not registered with arena. " + "Not relabeled?"); + + if (!rel_maps.env_map.empty()) + relabel_here(arena, &rel_maps.env_map); + if (!rel_maps.player_map.empty()) + relabel_here(arena, &rel_maps.player_map); + + bdd dummy_ap = bdd_ithvar(arena_r.register_ap(in_mark_s)) + & bdd_ithvar(arena_r.register_ap(out_mark_s)); + + for (auto& e : arena_r.edges()) + e.cond = bdd_exist(e.cond, dummy_ap); + + arena_r.unregister_ap(arena_r.register_ap(in_mark_s)); + arena_r.unregister_ap(arena_r.register_ap(out_mark_s)); + + return; + } + } // spot diff --git a/spot/twaalgos/synthesis.hh b/spot/twaalgos/synthesis.hh index b1b7fdf1d..2d9c0600a 100644 --- a/spot/twaalgos/synthesis.hh +++ b/spot/twaalgos/synthesis.hh @@ -21,6 +21,7 @@ #include #include +#include #include namespace spot @@ -256,4 +257,39 @@ namespace spot SPOT_API bool solve_game(twa_graph_ptr arena, synthesis_info& gi); + struct SPOT_API game_relabeling_map + { + relabeling_map env_map; + relabeling_map player_map; + }; + + /// \ingroup synthesis + /// \brief Tries to relabel a SPLIT game \a arena using fresh propositions. + /// Can be applied to env or player depending on \a relabel_env + /// and \a relabel_play. The arguments \a split_env and \a split_play + /// determine whether or not env and player edges are to + /// be split into several transitions labelled by letters not conditions. + /// + /// \return pair of relabeling_map, first is for env, second is for player. + /// The maps are empty if no relabeling was performed + /// \note Can also be applied to split mealy machine. + /// \note partitioned_relabel_here can not be used directly if there are + /// T (true conditions) + SPOT_API game_relabeling_map + partitioned_game_relabel_here(twa_graph_ptr& arena, + bool relabel_env, + bool relabel_play, + bool split_env = false, + bool split_play = false, + unsigned max_letter = -1u, + unsigned max_letter_mult = -1u); + + /// \ingroup synthesis + /// \brief Undoes a relabeling done by partitioned_game_relabel_here. + /// A dedicated function is necessary in order to remove the + /// variables tagging env and player conditions + SPOT_API void + relabel_game_here(twa_graph_ptr& arena, + game_relabeling_map& rel_maps); + } diff --git a/tests/python/_mealy.ipynb b/tests/python/_mealy.ipynb index 9d7fe7d96..ebeeaacb7 100644 --- a/tests/python/_mealy.ipynb +++ b/tests/python/_mealy.ipynb @@ -129,7 +129,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc35aaa030> >" + " *' at 0x7f86481a2690> >" ] }, "execution_count": 4, @@ -209,7 +209,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc35aaa900> >" + " *' at 0x7f85f45cbb70> >" ] }, "execution_count": 6, @@ -283,7 +283,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc35aaa900> >" + " *' at 0x7f85f45cbb70> >" ] }, "execution_count": 8, @@ -387,7 +387,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc35ac20c0> >" + " *' at 0x7f861bfc8ae0> >" ] }, "execution_count": 9, @@ -532,7 +532,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc35ac2720> >" + " *' at 0x7f85f45efde0> >" ] }, "execution_count": 10, @@ -584,13 +584,13 @@ " split_cstr_time\n", " prob_init_build_time\n", " ...\n", - " refine_time\n", " total_time\n", " n_classes\n", " n_refinement\n", " n_lit\n", " n_clauses\n", " n_iteration\n", + " n_letters_part\n", " n_bisim_let\n", " n_min_states\n", " done\n", @@ -600,15 +600,15 @@ " \n", " 0\n", " presat\n", - " 25643.3\n", - " 1.112e-06\n", - " 4.588e-06\n", - " 9.888e-06\n", - " 4.549e-06\n", - " 1.5929e-05\n", - " 9.338e-06\n", - " 5.901e-06\n", - " 6.7276e-05\n", + " 3868.95\n", + " 3.282e-06\n", + " 1.4388e-05\n", + " 0.000129765\n", + " 1.3759e-05\n", + " 9.499e-06\n", + " 8.73e-06\n", + " 9.01e-06\n", + " 6.6209e-05\n", " ...\n", " NaN\n", " NaN\n", @@ -616,7 +616,7 @@ " NaN\n", " NaN\n", " NaN\n", - " NaN\n", + " 3\n", " 2\n", " NaN\n", " NaN\n", @@ -634,40 +634,40 @@ " NaN\n", " NaN\n", " ...\n", - " NaN\n", - " 0.000282709\n", + " 0.000743251\n", " 2\n", " 0\n", " 7\n", " 12\n", " 0\n", " NaN\n", + " NaN\n", " 4\n", " 1\n", " \n", " \n", "\n", - "

2 rows × 22 columns

\n", + "

2 rows × 23 columns

\n", "
" ], "text/plain": [ " task premin_time reorg_time partsol_time player_incomp_time incomp_time \\\n", - "0 presat 25643.3 1.112e-06 4.588e-06 9.888e-06 4.549e-06 \n", + "0 presat 3868.95 3.282e-06 1.4388e-05 0.000129765 1.3759e-05 \n", "1 sat NaN NaN NaN NaN NaN \n", "\n", " split_all_let_time split_min_let_time split_cstr_time prob_init_build_time \\\n", - "0 1.5929e-05 9.338e-06 5.901e-06 6.7276e-05 \n", + "0 9.499e-06 8.73e-06 9.01e-06 6.6209e-05 \n", "1 NaN NaN NaN NaN \n", "\n", - " ... refine_time total_time n_classes n_refinement n_lit n_clauses \\\n", - "0 ... NaN NaN NaN NaN NaN NaN \n", - "1 ... NaN 0.000282709 2 0 7 12 \n", + " ... total_time n_classes n_refinement n_lit n_clauses n_iteration \\\n", + "0 ... NaN NaN NaN NaN NaN NaN \n", + "1 ... 0.000743251 2 0 7 12 0 \n", "\n", - " n_iteration n_bisim_let n_min_states done \n", - "0 NaN 2 NaN NaN \n", - "1 0 NaN 4 1 \n", + " n_letters_part n_bisim_let n_min_states done \n", + "0 3 2 NaN NaN \n", + "1 NaN NaN 4 1 \n", "\n", - "[2 rows x 22 columns]" + "[2 rows x 23 columns]" ] }, "metadata": {}, @@ -758,7 +758,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc88735f00> >" + " *' at 0x7f861bfc8630> >" ] }, "execution_count": 11, @@ -861,7 +861,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc6157fe40> >" + " *' at 0x7f861bf9fb40> >" ] }, "execution_count": 12, @@ -1000,7 +1000,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc6157f210> >" + " *' at 0x7f861bf9f210> >" ] }, "execution_count": 13, @@ -1051,13 +1051,13 @@ " split_cstr_time\n", " prob_init_build_time\n", " ...\n", - " refine_time\n", " total_time\n", " n_classes\n", " n_refinement\n", " n_lit\n", " n_clauses\n", " n_iteration\n", + " n_letters_part\n", " n_bisim_let\n", " n_min_states\n", " done\n", @@ -1067,15 +1067,15 @@ " \n", " 0\n", " presat\n", - " 25643.4\n", - " 1.683e-06\n", - " 5.611e-06\n", - " 2.66e-05\n", - " 1.2e-07\n", - " 3.647e-06\n", - " 8.365e-06\n", - " 3.747e-06\n", - " 2.5538e-05\n", + " 3869.08\n", + " 3.213e-06\n", + " 9.079e-06\n", + " 9.5752e-05\n", + " 5.168e-06\n", + " 5.727e-06\n", + " 7.543e-06\n", + " 1.5784e-05\n", + " 4.0507e-05\n", " ...\n", " NaN\n", " NaN\n", @@ -1083,7 +1083,7 @@ " NaN\n", " NaN\n", " NaN\n", - " NaN\n", + " 1\n", " 1\n", " NaN\n", " NaN\n", @@ -1102,7 +1102,6 @@ " NaN\n", " ...\n", " NaN\n", - " NaN\n", " 1\n", " 0\n", " 3\n", @@ -1111,6 +1110,7 @@ " NaN\n", " NaN\n", " NaN\n", + " NaN\n", " \n", " \n", " 2\n", @@ -1125,7 +1125,6 @@ " NaN\n", " NaN\n", " ...\n", - " 4.4884e-05\n", " NaN\n", " 1\n", " 1\n", @@ -1135,6 +1134,7 @@ " NaN\n", " NaN\n", " NaN\n", + " NaN\n", " \n", " \n", " 3\n", @@ -1149,48 +1149,48 @@ " NaN\n", " NaN\n", " ...\n", - " NaN\n", - " 0.000200344\n", + " 0.000399073\n", " 2\n", " 0\n", " 17\n", " 29\n", " 1\n", " NaN\n", + " NaN\n", " 4\n", " 1\n", " \n", " \n", "\n", - "

4 rows × 22 columns

\n", + "

4 rows × 23 columns

\n", "" ], "text/plain": [ " task premin_time reorg_time partsol_time player_incomp_time \\\n", - "0 presat 25643.4 1.683e-06 5.611e-06 2.66e-05 \n", + "0 presat 3869.08 3.213e-06 9.079e-06 9.5752e-05 \n", "1 sat NaN NaN NaN NaN \n", "2 refinement NaN NaN NaN NaN \n", "3 sat NaN NaN NaN NaN \n", "\n", " incomp_time split_all_let_time split_min_let_time split_cstr_time \\\n", - "0 1.2e-07 3.647e-06 8.365e-06 3.747e-06 \n", + "0 5.168e-06 5.727e-06 7.543e-06 1.5784e-05 \n", "1 NaN NaN NaN NaN \n", "2 NaN NaN NaN NaN \n", "3 NaN NaN NaN NaN \n", "\n", - " prob_init_build_time ... refine_time total_time n_classes n_refinement \\\n", - "0 2.5538e-05 ... NaN NaN NaN NaN \n", - "1 NaN ... NaN NaN 1 0 \n", - "2 NaN ... 4.4884e-05 NaN 1 1 \n", - "3 NaN ... NaN 0.000200344 2 0 \n", + " prob_init_build_time ... total_time n_classes n_refinement n_lit \\\n", + "0 4.0507e-05 ... NaN NaN NaN NaN \n", + "1 NaN ... NaN 1 0 3 \n", + "2 NaN ... NaN 1 1 10 \n", + "3 NaN ... 0.000399073 2 0 17 \n", "\n", - " n_lit n_clauses n_iteration n_bisim_let n_min_states done \n", - "0 NaN NaN NaN 1 NaN NaN \n", - "1 3 6 0 NaN NaN NaN \n", - "2 10 16 NaN NaN NaN NaN \n", - "3 17 29 1 NaN 4 1 \n", + " n_clauses n_iteration n_letters_part n_bisim_let n_min_states done \n", + "0 NaN NaN 1 1 NaN NaN \n", + "1 6 0 NaN NaN NaN NaN \n", + "2 16 NaN NaN NaN NaN NaN \n", + "3 29 1 NaN NaN 4 1 \n", "\n", - "[4 rows x 22 columns]" + "[4 rows x 23 columns]" ] }, "metadata": {}, @@ -1200,6 +1200,7 @@ "name": "stdout", "output_type": "stream", "text": [ + "Number of variables\n", "0 NaN\n", "1 3\n", "2 10\n", @@ -1285,7 +1286,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fcc35ac22a0> >" + " *' at 0x7f861bfcdc00> >" ] }, "execution_count": 14, @@ -1297,6 +1298,7 @@ "si = spot.synthesis_info()\n", "si.minimize_lvl = 3\n", "aut_ms, table = spot.minimize_mealy(aut_s, si, display_log=True, return_log=True)\n", + "print(\"Number of variables\")\n", "print(table[\"n_lit\"])\n", "aut_ms" ] @@ -1347,13 +1349,13 @@ " split_cstr_time\n", " prob_init_build_time\n", " ...\n", - " refine_time\n", " total_time\n", " n_classes\n", " n_refinement\n", " n_lit\n", " n_clauses\n", " n_iteration\n", + " n_letters_part\n", " n_bisim_let\n", " n_min_states\n", " done\n", @@ -1363,15 +1365,15 @@ " \n", " 0\n", " presat\n", - " 25643.5\n", - " 1.563e-06\n", - " 5.4e-06\n", - " 2.0519e-05\n", - " 1.3e-07\n", - " 3.968e-06\n", - " 9.698e-06\n", - " 7.624e-06\n", - " 3.211e-05\n", + " 3869.14\n", + " 2.863e-06\n", + " 9.08e-06\n", + " 6.0622e-05\n", + " 4.679e-06\n", + " 5.308e-06\n", + " 8.59e-06\n", + " 7.962e-06\n", + " 4.0159e-05\n", " ...\n", " NaN\n", " NaN\n", @@ -1379,7 +1381,7 @@ " NaN\n", " NaN\n", " NaN\n", - " NaN\n", + " 1\n", " 1\n", " NaN\n", " NaN\n", @@ -1398,7 +1400,6 @@ " NaN\n", " ...\n", " NaN\n", - " NaN\n", " 1\n", " 0\n", " 3\n", @@ -1407,6 +1408,7 @@ " NaN\n", " NaN\n", " NaN\n", + " NaN\n", " \n", " \n", " 2\n", @@ -1421,7 +1423,6 @@ " NaN\n", " NaN\n", " ...\n", - " 4.4633e-05\n", " NaN\n", " 1\n", " 1\n", @@ -1431,6 +1432,7 @@ " NaN\n", " NaN\n", " NaN\n", + " NaN\n", " \n", " \n", " 3\n", @@ -1445,48 +1447,48 @@ " NaN\n", " NaN\n", " ...\n", - " NaN\n", - " 0.000280675\n", + " 0.000416464\n", " 2\n", " 0\n", " 17\n", " 29\n", " 1\n", " NaN\n", + " NaN\n", " 4\n", " 1\n", " \n", " \n", "\n", - "

4 rows × 22 columns

\n", + "

4 rows × 23 columns

\n", "" ], "text/plain": [ " task premin_time reorg_time partsol_time player_incomp_time \\\n", - "0 presat 25643.5 1.563e-06 5.4e-06 2.0519e-05 \n", + "0 presat 3869.14 2.863e-06 9.08e-06 6.0622e-05 \n", "1 sat NaN NaN NaN NaN \n", "2 refinement NaN NaN NaN NaN \n", "3 sat NaN NaN NaN NaN \n", "\n", " incomp_time split_all_let_time split_min_let_time split_cstr_time \\\n", - "0 1.3e-07 3.968e-06 9.698e-06 7.624e-06 \n", + "0 4.679e-06 5.308e-06 8.59e-06 7.962e-06 \n", "1 NaN NaN NaN NaN \n", "2 NaN NaN NaN NaN \n", "3 NaN NaN NaN NaN \n", "\n", - " prob_init_build_time ... refine_time total_time n_classes n_refinement \\\n", - "0 3.211e-05 ... NaN NaN NaN NaN \n", - "1 NaN ... NaN NaN 1 0 \n", - "2 NaN ... 4.4633e-05 NaN 1 1 \n", - "3 NaN ... NaN 0.000280675 2 0 \n", + " prob_init_build_time ... total_time n_classes n_refinement n_lit \\\n", + "0 4.0159e-05 ... NaN NaN NaN NaN \n", + "1 NaN ... NaN 1 0 3 \n", + "2 NaN ... NaN 1 1 10 \n", + "3 NaN ... 0.000416464 2 0 17 \n", "\n", - " n_lit n_clauses n_iteration n_bisim_let n_min_states done \n", - "0 NaN NaN NaN 1 NaN NaN \n", - "1 3 6 0 NaN NaN NaN \n", - "2 10 16 NaN NaN NaN NaN \n", - "3 17 29 1 NaN 4 1 \n", + " n_clauses n_iteration n_letters_part n_bisim_let n_min_states done \n", + "0 NaN NaN 1 1 NaN NaN \n", + "1 6 0 NaN NaN NaN NaN \n", + "2 16 NaN NaN NaN NaN NaN \n", + "3 29 1 NaN NaN 4 1 \n", "\n", - "[4 rows x 22 columns]" + "[4 rows x 23 columns]" ] }, "metadata": {}, @@ -1570,10 +1572,118 @@ " " ] }, + { + "cell_type": "markdown", + "id": "b10213b8", + "metadata": {}, + "source": [ + "# Testing partitioned relabeling" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "fd5ca506", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Conditions in orig machine: 8\n", + "Conditions in relabeled machine: 13\n" + ] + } + ], + "source": [ + "def get_mealy():\n", + " return spot.split_2step(spot.automaton(\"\"\"HOA: v1\n", + "States: 2\n", + "Start: 0\n", + "AP: 11 \"u0accel0accel\" \"u0accel0f1dcon23p81b\" \"u0accel0f1dcon231b\" \"u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b\" \"u0gear0gear\" \"u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b\" \"u0steer0f1dsteering0angle0trackpos1b\" \"u0steer0steer\" \"p0p0gt0rpm0f1dcon5523231b\" \"p0p0lt0rpm0f1dcon32323231b\" \"p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc deterministic\n", + "controllable-AP: 0 1 2 3 4 5 6 7\n", + "--BODY--\n", + "State: 0\n", + "[!0&!1&2&!3&4&!5&6&!7&!8&!9&!10] 0\n", + "[!0&1&!2&!3&4&!5&6&!7&!8&!9&10] 0\n", + "[!0&!1&2&!3&!4&5&6&!7&!8&9&!10] 0\n", + "[!0&1&!2&!3&!4&5&6&!7&!8&9&10] 0\n", + "[!0&!1&2&3&!4&!5&6&!7&8&!9&!10] 0\n", + "[!0&1&!2&3&!4&!5&6&!7&8&!9&10] 0\n", + "[!0&!1&2&!3&!4&5&!6&7&8&9 | !0&!1&2&!3&!4&5&6&!7&8&9 | !0&!1&2&!3&4&!5&!6&7&8&9 | !0&!1&2&!3&4&!5&6&!7&8&9 | !0&!1&2&3&!4&!5&!6&7&8&9 | !0&!1&2&3&!4&!5&6&!7&8&9 | !0&1&!2&!3&!4&5&!6&7&8&9 | !0&1&!2&!3&!4&5&6&!7&8&9 | !0&1&!2&!3&4&!5&!6&7&8&9 | !0&1&!2&!3&4&!5&6&!7&8&9 | !0&1&!2&3&!4&!5&!6&7&8&9 | !0&1&!2&3&!4&!5&6&!7&8&9 | 0&!1&!2&!3&!4&5&!6&7&8&9 | 0&!1&!2&!3&!4&5&6&!7&8&9 | 0&!1&!2&!3&4&!5&!6&7&8&9 | 0&!1&!2&!3&4&!5&6&!7&8&9 | 0&!1&!2&3&!4&!5&!6&7&8&9 | 0&!1&!2&3&!4&!5&6&!7&8&9] 1\n", + "State: 1\n", + "[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 1\n", + "--END--\"\"\"))\n", + "\n", + "def env_conditions(m):\n", + " sp = spot.get_state_players(m)\n", + " conds = []\n", + " for e in m.edges():\n", + " if sp[e.src]:\n", + " continue\n", + " if not e.cond in conds:\n", + " conds.append(e.cond)\n", + " return conds\n", + "print(\"Conditions in orig machine: \", len(env_conditions(get_mealy())))\n", + "ms = get_mealy()\n", + "# Relabel only env\n", + "spot.partitioned_game_relabel_here(ms, True, False, True, False)\n", + "print(\"Conditions in relabeled machine: \", len(env_conditions(ms)))" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "ee29da67", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Partitioned env letters: 13\n" + ] + } + ], + "source": [ + "si = spot.synthesis_info()\n", + "si.minimize_lvl = 3\n", + "# Turn on relabeling\n", + "si.opt.set(\"max_letter_mult\", 100000)\n", + "\n", + "mm, log = spot.minimize_mealy(get_mealy(), si, return_log=True)\n", + "print(\"Partitioned env letters:\", log[\"n_letters_part\"][0])" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "0aec8019", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Partitioned env letters: 0\n" + ] + } + ], + "source": [ + "# Turn off relabeling\n", + "si.opt.set(\"max_letter_mult\", 0)\n", + "\n", + "mm, log = spot.minimize_mealy(get_mealy(), si, return_log=True)\n", + "print(\"Partitioned env letters:\", log[\"n_letters_part\"][0])" + ] + }, { "cell_type": "code", "execution_count": null, - "id": "5c9fe115", + "id": "a92f4f43", "metadata": {}, "outputs": [], "source": [] diff --git a/tests/python/_partitioned_relabel.ipynb b/tests/python/_partitioned_relabel.ipynb index a9c1c7af7..b7f1c4380 100644 --- a/tests/python/_partitioned_relabel.ipynb +++ b/tests/python/_partitioned_relabel.ipynb @@ -121,7 +121,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f65b0311a80> >" + " *' at 0x7f936415fbd0> >" ] }, "execution_count": 2, @@ -248,7 +248,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f65b0311a80> >" + " *' at 0x7f936415fbd0> >" ] }, "execution_count": 3, @@ -353,7 +353,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f65b0311a80> >" + " *' at 0x7f936415fbd0> >" ] }, "execution_count": 4, @@ -457,7 +457,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f65b02c0d50> >" + " *' at 0x7f936415bf30> >" ] }, "metadata": {}, @@ -611,7 +611,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f65b02c0d50> >" + " *' at 0x7f936415bf30> >" ] }, "execution_count": 5, @@ -769,7 +769,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f65b02c0d50> >" + " *' at 0x7f936415bf30> >" ] }, "execution_count": 6, @@ -886,7 +886,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f65b02c90f0> >" + " *' at 0x7f936c3c6090> >" ] }, "metadata": {}, @@ -1015,7 +1015,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f65b02c90f0> >" + " *' at 0x7f936c3c6090> >" ] }, "execution_count": 7, @@ -1184,7 +1184,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f65b02c90f0> >" + " *' at 0x7f936c3c6090> >" ] }, "execution_count": 8, @@ -1198,6 +1198,1135 @@ "print(aut.to_str(\"hoa\"))\n", "aut" ] + }, + { + "cell_type": "markdown", + "id": "ef77c2ee", + "metadata": {}, + "source": [ + "# Concerning games and Mealy machines\n", + "\n", + "Games and split mealy machines have both: defined outputs and states that either belong to player or env.\n", + "Relabeling is done separately for env and player transitions (over inputs and outputs respectively).\n", + "\n", + "The problem is that T (bddtrue) is ambiguous, as it may be over the inputs or outputs.\n", + "\n", + "We therefore introduce a dedicated function for this matter." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "296a93d3", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b / (label too long)\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1 / (label too long)\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936415f510> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HOA: v1\n", + "States: 9\n", + "Start: 0\n", + "AP: 11 \"u0accel0accel\" \"u0accel0f1dcon23p81b\" \"u0accel0f1dcon231b\" \"u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b\" \"u0gear0gear\" \"u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b\" \"u0steer0f1dsteering0angle0trackpos1b\" \"u0steer0steer\" \"p0p0gt0rpm0f1dcon5523231b\" \"p0p0lt0rpm0f1dcon32323231b\" \"p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc deterministic\n", + "spot-state-player: 0 0 1 1 1 1 1 1 1\n", + "controllable-AP: 0 1 2 3 4 5 6 7\n", + "--BODY--\n", + "State: 0\n", + "[!8&!9&!10] 2\n", + "[!8&!9&10] 3\n", + "[!8&9&!10] 4\n", + "[!8&9&10] 5\n", + "[8&!9&!10] 6\n", + "[8&!9&10] 7\n", + "[8&9] 8\n", + "State: 1\n", + "[t] 8\n", + "State: 2\n", + "[!0&!1&2&!3&4&!5&6&!7] 0\n", + "State: 3\n", + "[!0&1&!2&!3&4&!5&6&!7] 0\n", + "State: 4\n", + "[!0&!1&2&!3&!4&5&6&!7] 0\n", + "State: 5\n", + "[!0&1&!2&!3&!4&5&6&!7] 0\n", + "State: 6\n", + "[!0&!1&2&3&!4&!5&6&!7] 0\n", + "State: 7\n", + "[!0&1&!2&3&!4&!5&6&!7] 0\n", + "State: 8\n", + "[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 1\n", + "--END--\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "0->7\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "0->8\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "7->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->1\n", + "\n", + "\n", + "(label too long)\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936415f990> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HOA: v1\n", + "States: 9\n", + "Start: 0\n", + "AP: 21 \"u0accel0accel\" \"u0accel0f1dcon23p81b\" \"u0accel0f1dcon231b\" \"u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b\" \"u0gear0gear\" \"u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b\" \"u0steer0f1dsteering0angle0trackpos1b\" \"u0steer0steer\" \"p0p0gt0rpm0f1dcon5523231b\" \"p0p0lt0rpm0f1dcon32323231b\" \"p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\" \"__AP_OUT__\" \"__AP_IN__\" \"__nv_in0\" \"__nv_in1\" \"__nv_in2\" \"__nv_in3\" \"__nv_out0\" \"__nv_out1\" \"__nv_out2\" \"__nv_out3\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc deterministic\n", + "spot-state-player: 0 0 1 1 1 1 1 1 1\n", + "controllable-AP: 0 1 2 3 4 5 6 7\n", + "--BODY--\n", + "State: 0\n", + "[!13&!14&!15&!16] 2\n", + "[13&!14&!15&!16] 3\n", + "[!13&14&!15&!16] 4\n", + "[13&14&!15&!16] 5\n", + "[!13&!14&15&!16] 6\n", + "[13&!14&15&!16] 7\n", + "[!13&14&15&!16] 8\n", + "[13&14&15&!16] 2\n", + "[!13&!14&!15&16] 3\n", + "[13&!14&!15&16] 4\n", + "[!13&14&!15&16] 5\n", + "[13&14&!15&16] 6\n", + "[!13&!14&15&16] 7\n", + "State: 1\n", + "[13&14&15&!16] 8\n", + "[!13&!14&!15&16] 8\n", + "[13&!14&!15&16] 8\n", + "[!13&14&!15&16] 8\n", + "[13&14&!15&16] 8\n", + "[!13&!14&15&16] 8\n", + "[!13&14&15&!16] 8\n", + "State: 2\n", + "[!17&!18&!19&!20 | !17&18&19&!20] 0\n", + "State: 3\n", + "[17&!18&!19&!20 | 17&18&19&!20] 0\n", + "State: 4\n", + "[!17&!18&!19&20 | !17&18&!19&!20] 0\n", + "State: 5\n", + "[17&!18&!19&20 | 17&18&!19&!20] 0\n", + "State: 6\n", + "[!17&!18&19&!20 | !17&18&!19&20] 0\n", + "State: 7\n", + "[17&!18&19&!20 | 17&18&!19&20] 0\n", + "State: 8\n", + "[!17&!18&20 | 18&19&!20 | !19&20] 1\n", + "--END--\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & !__nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "__nv_in0 & !__nv_in1 & !__nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & !__nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & !__nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "0->7\n", + "\n", + "\n", + "__nv_in0 & !__nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->7\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & __nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "0->8\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "(!__nv_out0 & !__nv_out1 & !__nv_out2 & !__nv_out3) | (!__nv_out0 & __nv_out1 & __nv_out2 & !__nv_out3)\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "(__nv_out0 & !__nv_out1 & !__nv_out2 & !__nv_out3) | (__nv_out0 & __nv_out1 & __nv_out2 & !__nv_out3)\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "(!__nv_out0 & __nv_out1 & !__nv_out2 & !__nv_out3) | (!__nv_out0 & !__nv_out1 & !__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "(__nv_out0 & __nv_out1 & !__nv_out2 & !__nv_out3) | (__nv_out0 & !__nv_out1 & !__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "(!__nv_out0 & !__nv_out1 & __nv_out2 & !__nv_out3) | (!__nv_out0 & __nv_out1 & !__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "\n", + "7->0\n", + "\n", + "\n", + "(__nv_out0 & !__nv_out1 & __nv_out2 & !__nv_out3) | (__nv_out0 & __nv_out1 & !__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->1\n", + "\n", + "\n", + "(!__nv_out0 & !__nv_out1 & __nv_out3) | (__nv_out1 & __nv_out2 & !__nv_out3) | (!__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & __nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936415f990> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Strategy torquesimple_acd as mealy machine\n", + "\n", + "aut = spot.automaton(\"\"\"HOA: v1\n", + "States: 2\n", + "Start: 0\n", + "AP: 11 \"u0accel0accel\" \"u0accel0f1dcon23p81b\" \"u0accel0f1dcon231b\" \"u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b\" \"u0gear0gear\" \"u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b\" \"u0steer0f1dsteering0angle0trackpos1b\" \"u0steer0steer\" \"p0p0gt0rpm0f1dcon5523231b\" \"p0p0lt0rpm0f1dcon32323231b\" \"p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc deterministic\n", + "controllable-AP: 0 1 2 3 4 5 6 7\n", + "--BODY--\n", + "State: 0\n", + "[!0&!1&2&!3&4&!5&6&!7&!8&!9&!10] 0\n", + "[!0&1&!2&!3&4&!5&6&!7&!8&!9&10] 0\n", + "[!0&!1&2&!3&!4&5&6&!7&!8&9&!10] 0\n", + "[!0&1&!2&!3&!4&5&6&!7&!8&9&10] 0\n", + "[!0&!1&2&3&!4&!5&6&!7&8&!9&!10] 0\n", + "[!0&1&!2&3&!4&!5&6&!7&8&!9&10] 0\n", + "[!0&!1&2&!3&!4&5&!6&7&8&9 | !0&!1&2&!3&!4&5&6&!7&8&9 | !0&!1&2&!3&4&!5&!6&7&8&9 | !0&!1&2&!3&4&!5&6&!7&8&9 | !0&!1&2&3&!4&!5&!6&7&8&9 | !0&!1&2&3&!4&!5&6&!7&8&9 | !0&1&!2&!3&!4&5&!6&7&8&9 | !0&1&!2&!3&!4&5&6&!7&8&9 | !0&1&!2&!3&4&!5&!6&7&8&9 | !0&1&!2&!3&4&!5&6&!7&8&9 | !0&1&!2&3&!4&!5&!6&7&8&9 | !0&1&!2&3&!4&!5&6&!7&8&9 | 0&!1&!2&!3&!4&5&!6&7&8&9 | 0&!1&!2&!3&!4&5&6&!7&8&9 | 0&!1&!2&!3&4&!5&!6&7&8&9 | 0&!1&!2&!3&4&!5&6&!7&8&9 | 0&!1&!2&3&!4&!5&!6&7&8&9 | 0&!1&!2&3&!4&!5&6&!7&8&9] 1\n", + "State: 1\n", + "[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 1\n", + "--END--\"\"\")\n", + "\n", + "display(aut)\n", + "\n", + "# Convert to split mealy machine\n", + "auts = spot.split_2step(aut)\n", + "print(auts.to_str(\"hoa\"))\n", + "display(auts)\n", + "\n", + "# Relabel both, inputs and outputs\n", + "# You can choose the split option and stopping criteria as before\n", + "rel_dicts = spot.partitioned_game_relabel_here(auts, True, True, True, False, 10000, 10000)\n", + "print(auts.to_str(\"hoa\"))\n", + "display(auts)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "7ec02ff5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HOA: v1\n", + "States: 9\n", + "Start: 0\n", + "AP: 11 \"u0accel0accel\" \"u0accel0f1dcon23p81b\" \"u0accel0f1dcon231b\" \"u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b\" \"u0gear0gear\" \"u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b\" \"u0steer0f1dsteering0angle0trackpos1b\" \"u0steer0steer\" \"p0p0gt0rpm0f1dcon5523231b\" \"p0p0lt0rpm0f1dcon32323231b\" \"p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc deterministic\n", + "spot-state-player: 0 0 1 1 1 1 1 1 1\n", + "controllable-AP: 0 1 2 3 4 5 6 7\n", + "--BODY--\n", + "State: 0\n", + "[f] 2\n", + "[f] 3\n", + "[f] 4\n", + "[f] 5\n", + "[f] 6\n", + "[f] 7\n", + "[8&9] 8\n", + "[!8&!9&!10] 2\n", + "[!8&!9&10] 3\n", + "[!8&9&!10] 4\n", + "[!8&9&10] 5\n", + "[8&!9&!10] 6\n", + "[8&!9&10] 7\n", + "State: 1\n", + "[!8&!9&!10] 8\n", + "[!8&!9&10] 8\n", + "[!8&9&!10] 8\n", + "[!8&9&10] 8\n", + "[8&!9&!10] 8\n", + "[8&!9&10] 8\n", + "[8&9] 8\n", + "State: 2\n", + "[!0&!1&2&!3&4&!5&6&!7] 0\n", + "State: 3\n", + "[!0&1&!2&!3&4&!5&6&!7] 0\n", + "State: 4\n", + "[!0&!1&2&!3&!4&5&6&!7] 0\n", + "State: 5\n", + "[!0&1&!2&!3&!4&5&6&!7] 0\n", + "State: 6\n", + "[!0&!1&2&3&!4&!5&6&!7] 0\n", + "State: 7\n", + "[!0&1&!2&3&!4&!5&6&!7] 0\n", + "State: 8\n", + "[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 1\n", + "--END--\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "0->7\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->7\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "0->8\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "7->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->1\n", + "\n", + "\n", + "(label too long)\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936415f990> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Undo relabel\n", + "spot.relabel_game_here(auts, rel_dicts)\n", + "print(auts.to_str(\"hoa\"))\n", + "display(auts)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "48c2283b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "True\n" + ] + } + ], + "source": [ + "# Check if we do actually obtain the same automaton\n", + "\n", + "print(spot.are_equivalent(aut, spot.unsplit_2step(auts)))" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "2b8d907e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n" + ] + } + ], + "source": [ + "# Test all options for equivalence\n", + "for relabel_env in [True, False]:\n", + " for relabel_player in [True, False]:\n", + " for split_env in [True, False]:\n", + " for split_player in [True, False]:\n", + " auts = spot.split_2step(aut)\n", + " rel_dicts = spot.partitioned_game_relabel_here(auts, relabel_env, relabel_player, split_env, split_player, 10000, 10000)\n", + " spot.relabel_game_here(auts, rel_dicts)\n", + " print(spot.are_equivalent(aut, spot.unsplit_2step(auts)))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17a32a72", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/tests/python/except.py b/tests/python/except.py index e531882dd..03076c01b 100644 --- a/tests/python/except.py +++ b/tests/python/except.py @@ -373,4 +373,22 @@ except RuntimeError as e: tc.assertIn("The given prefix for new variables", str(e)) else: - report_missing_exception() \ No newline at end of file + report_missing_exception() + +# Relabeling games must not use the +# globally reserved aps +aut = spot.make_twa_graph() +aut.new_states(2) +apin = buddy.bdd_ithvar(aut.register_ap("__AP_IN__")) +apout = buddy.bdd_ithvar(aut.register_ap("__AP_OUT__")) +aut.new_edge(0,1,apin & apout) +aut.new_edge(1,0,buddy.bdd_not(apin & apout)) +spot.set_state_players(aut, [False, True]) + +try: + spot.partitioned_game_relabel_here(aut, True, True) +except RuntimeError as e: + tc.assertIn("You can not use __AP_IN__ or __AP_OUT__", + str(e)) +else: + report_missing_exception() From 427f667f9f0db1131d0adf3b72da6710a4eabc9c Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Thu, 6 Oct 2022 22:03:28 +0200 Subject: [PATCH 212/606] lazy eval for sat mealy minimization Evaluate incomp of player conditions only if necessary * spot/twaalgos/mealy_machine.cc: Here --- spot/twaalgos/mealy_machine.cc | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index df9ad6017..e2b1523de 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -1312,7 +1312,9 @@ namespace // Associated condition and id of each player state std::vector> ps2c; ps2c.reserve(n_tot - n_env); + // bdd id -> internal index std::unordered_map all_out_cond; + for (unsigned s1 = n_env; s1 < n_tot; ++s1) { const bdd &c1 = get_cond(s1); @@ -1327,24 +1329,26 @@ namespace #endif } // Are two player condition ids states incompatible + // Matrix for incompatibility square_matrix inc_player(all_out_cond.size(), false); + // Matrix whether computed or not + square_matrix inc_player_comp(all_out_cond.size(), false); // Compute. First is id of bdd - for (const auto& p1 : all_out_cond) - for (const auto& p2 : all_out_cond) - { - if (p1.second > p2.second) - continue; - inc_player.set(p1.second, p2.second, - !bdd_have_common_assignment( - bdd_from_int((int) p1.first), - bdd_from_int((int) p2.first))); - assert(inc_player.get(p1.second, p2.second) - == ((bdd_from_int((int) p1.first) - & bdd_from_int((int) p2.first)) == bddfalse)); - } + // Lazy eval: Compute incompatibility between out conditions + // only if demanded + auto is_p_incomp = [&](unsigned s1, unsigned s2) { - return inc_player.get(ps2c[s1].second, ps2c[s2].second); + const auto& [s1bdd, s1idx] = ps2c[s1]; + const auto& [s2bdd, s2idx] = ps2c[s2]; + + if (!inc_player_comp.get(s1idx, s2idx)) + { + inc_player_comp.set(s1idx, s2idx, true); + inc_player.set(s1idx, s2idx, + !bdd_have_common_assignment(s1bdd, s2bdd)); + } + return inc_player.get(s1idx, s2idx); }; si.player_incomp_time = si.restart(); @@ -3948,6 +3952,7 @@ namespace si.task = "presat"; stopwatch sglob; sglob.start(); + si.start(); if ((premin < -1) || (premin > 1)) throw std::runtime_error("premin has to be -1, 0 or 1"); From d0b15088318fc6a87999b28b31c90f3bbe4dd36b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 9 Dec 2022 12:04:15 +0100 Subject: [PATCH 213/606] acd: rewrite Python wrapper without jQuery * python/spot/__init__.py (acd): Rewrite javascript so that it does not use jQuery, to make it easier to use in jupyterlab, or with nbconvert. * tests/python/zlktree.ipynb: Adjust. * NEWS: Mention this. --- NEWS | 4 + python/spot/__init__.py | 58 ++-- tests/python/zlktree.ipynb | 683 ++++++++++++++++++++++++++++--------- 3 files changed, 564 insertions(+), 181 deletions(-) diff --git a/NEWS b/NEWS index c45876408..384ddc8bc 100644 --- a/NEWS +++ b/NEWS @@ -9,6 +9,10 @@ New in spot 2.11.3.dev (not yet released) - b:b[*i..j] = b[*max(i,1)..j] - b[*i..j]:b[*k..l] = b[*max(i,1)+max(k,1)-1, j+l-1] + Python: + + - spot.acd() no longer depends on jQuery for interactivity. + New in spot 2.11.3 (2022-12-09) Bug fixes: diff --git a/python/spot/__init__.py b/python/spot/__init__.py index edbf4a4e6..ef4cd772e 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -502,51 +502,57 @@ class acd: .acdacc polygon{fill:green;} ''' js = ''' -function acd{num}_clear(){{ - $("#acd{num} .node,#acdaut{num} .node,#acdaut{num} .edge") - .removeClass("acdhigh acdbold acdacc acdrej"); +function acdremclasses(sel, classes) {{ +document.querySelectorAll(sel).forEach(n=>{{n.classList.remove(...classes)}});}} +function acdaddclasses(sel, classes) {{ +document.querySelectorAll(sel).forEach(n=>{{n.classList.add(...classes)}});}} +function acdonclick(sel, fn) {{ + document.querySelectorAll(sel).forEach(n=> + {{n.addEventListener("click", fn)}}); +}} +function acd{num}_clear() {{ + acdremclasses("#acd{num} .node,#acdaut{num} .node,#acdaut{num} .edge", + ["acdhigh", "acdbold", "acdacc", "acdrej"]); }}; function acd{num}_state(state){{ - acd{num}_clear(); - $("#acd{num} .acdS" + state).addClass("acdhigh acdbold"); - $("#acdaut{num} #S" + state).addClass("acdbold"); + acd{num}_clear(); + acdaddclasses("#acd{num} .acdS" + state, ["acdhigh", "acdbold"]); + acdaddclasses("#acdaut{num} #S" + state, ["acdbold"]); }}; function acd{num}_edge(edge){{ - acd{num}_clear(); - var theedge = $('#acdaut{num} #E' + edge) - var classList = theedge.attr('class').split(/\s+/); - $.each(classList, function(index, item) {{ - if (item.startsWith('acdN')) {{ - $("#acd{num} #" + item.substring(3)).addClass("acdhigh acdbold"); - }} - }}); - theedge.addClass("acdbold"); + acd{num}_clear(); + var theedge = document.querySelector('#acdaut{num} #E' + edge); + theedge.classList.forEach(function(item, index) {{ + if (item.startsWith('acdN')) {{ + acdaddclasses("#acd{num} #" + item.substring(3), ["acdhigh", "acdbold"]); + }} + }}); + theedge.classList.add("acdbold"); }}; function acd{num}_node(node, acc){{ acd{num}_clear(); - $("#acdaut{num} .acdN" + node).addClass(acc - ? "acdacc acdbold" - : "acdrej acdbold"); - $("#acd{num} #N" + node).addClass("acdbold acdhigh"); + acdaddclasses("#acdaut{num} .acdN" + node, + [acc ? "acdacc" : "acdrej", "acdbold"]); + acdaddclasses("#acd{num} #N" + node, ["acdbold", "acdhigh"]); }};'''.format(num=num) me = 0 for n in range(self.node_count()): for e in self.edges_of_node(n): me = max(e, me) - js += '$("#acdaut{num} #E{e}").addClass("acdN{n}");'\ + js += 'acdaddclasses("#acdaut{num} #E{e}", ["acdN{n}"]);\n'\ .format(num=num, e=e, n=n) for e in range(1, me + 1): - js += '$("#acdaut{num} #E{e}")'\ - '.click(function(){{acd{num}_edge({e});}});'\ + js += 'acdonclick("#acdaut{num} #E{e}",'\ + 'function(){{acd{num}_edge({e});}});\n'\ .format(num=num, e=e) for s in range(self.get_aut().num_states()): - js += '$("#acdaut{num} #S{s}")'\ - '.click(function(){{acd{num}_state({s});}});'\ + js += 'acdonclick("#acdaut{num} #S{s}",'\ + 'function(){{acd{num}_state({s});}});\n'\ .format(num=num, s=s) for n in range(self.node_count()): v = int(self.node_acceptance(n)) - js += '$("#acd{num} #N{n}")'\ - '.click(function(){{acd{num}_node({n}, {v});}});'\ + js += 'acdonclick("#acd{num} #N{n}",'\ + 'function(){{acd{num}_node({n}, {v});}});\n'\ .format(num=num, n=n, v=v) html = '
{}
{}
'\ .format(style, diff --git a/tests/python/zlktree.ipynb b/tests/python/zlktree.ipynb index ae44ad37d..c9eb3503d 100644 --- a/tests/python/zlktree.ipynb +++ b/tests/python/zlktree.ipynb @@ -216,7 +216,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 2, @@ -640,7 +640,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f14701b7510> >" + " *' at 0x7f82c009d7a0> >" ] }, "execution_count": 10, @@ -1063,7 +1063,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470220960> >" + " *' at 0x7f82c009c630> >" ] }, "execution_count": 11, @@ -1256,7 +1256,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f14701b75d0> >" + " *' at 0x7f82c009c6c0> >" ] }, "execution_count": 13, @@ -1701,7 +1701,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470142240> >" + " *' at 0x7f82c009c480> >" ] }, "execution_count": 14, @@ -2096,7 +2096,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2427,7 +2427,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2513,7 +2513,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2624,7 +2624,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2662,7 +2662,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2700,7 +2700,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2928,7 +2928,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 18, @@ -4064,36 +4064,159 @@ "\n", "\n", "" + " acdaddclasses(\"#acdaut0 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd0 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut0 #E9\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E10\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E11\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E12\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E13\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E14\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E15\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E16\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E21\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E22\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E23\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E24\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E25\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E26\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E27\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E28\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E33\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E34\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E35\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E36\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E31\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut0 #E32\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut0 #E39\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut0 #E40\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut0 #E5\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut0 #E7\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut0 #E17\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut0 #E1\", [\"acdN3\"]);\n", + "acdaddclasses(\"#acdaut0 #E10\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E12\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E13\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E15\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E21\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E22\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E23\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut0 #E24\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut0 #E34\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut0 #E36\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut0 #E14\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut0 #E15\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut0 #E22\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut0 #E23\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut0 #E14\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut0 #E16\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut0 #E26\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut0 #E9\", [\"acdN8\"]);\n", + "acdaddclasses(\"#acdaut0 #E40\", [\"acdN9\"]);\n", + "acdaddclasses(\"#acdaut0 #E5\", [\"acdN10\"]);\n", + "acdaddclasses(\"#acdaut0 #E23\", [\"acdN11\"]);\n", + "acdaddclasses(\"#acdaut0 #E14\", [\"acdN12\"]);\n", + "acdaddclasses(\"#acdaut0 #E23\", [\"acdN13\"]);\n", + "acdaddclasses(\"#acdaut0 #E14\", [\"acdN14\"]);\n", + "acdonclick(\"#acdaut0 #E1\",function(){acd0_edge(1);});\n", + "acdonclick(\"#acdaut0 #E2\",function(){acd0_edge(2);});\n", + "acdonclick(\"#acdaut0 #E3\",function(){acd0_edge(3);});\n", + "acdonclick(\"#acdaut0 #E4\",function(){acd0_edge(4);});\n", + "acdonclick(\"#acdaut0 #E5\",function(){acd0_edge(5);});\n", + "acdonclick(\"#acdaut0 #E6\",function(){acd0_edge(6);});\n", + "acdonclick(\"#acdaut0 #E7\",function(){acd0_edge(7);});\n", + "acdonclick(\"#acdaut0 #E8\",function(){acd0_edge(8);});\n", + "acdonclick(\"#acdaut0 #E9\",function(){acd0_edge(9);});\n", + "acdonclick(\"#acdaut0 #E10\",function(){acd0_edge(10);});\n", + "acdonclick(\"#acdaut0 #E11\",function(){acd0_edge(11);});\n", + "acdonclick(\"#acdaut0 #E12\",function(){acd0_edge(12);});\n", + "acdonclick(\"#acdaut0 #E13\",function(){acd0_edge(13);});\n", + "acdonclick(\"#acdaut0 #E14\",function(){acd0_edge(14);});\n", + "acdonclick(\"#acdaut0 #E15\",function(){acd0_edge(15);});\n", + "acdonclick(\"#acdaut0 #E16\",function(){acd0_edge(16);});\n", + "acdonclick(\"#acdaut0 #E17\",function(){acd0_edge(17);});\n", + "acdonclick(\"#acdaut0 #E18\",function(){acd0_edge(18);});\n", + "acdonclick(\"#acdaut0 #E19\",function(){acd0_edge(19);});\n", + "acdonclick(\"#acdaut0 #E20\",function(){acd0_edge(20);});\n", + "acdonclick(\"#acdaut0 #E21\",function(){acd0_edge(21);});\n", + "acdonclick(\"#acdaut0 #E22\",function(){acd0_edge(22);});\n", + "acdonclick(\"#acdaut0 #E23\",function(){acd0_edge(23);});\n", + "acdonclick(\"#acdaut0 #E24\",function(){acd0_edge(24);});\n", + "acdonclick(\"#acdaut0 #E25\",function(){acd0_edge(25);});\n", + "acdonclick(\"#acdaut0 #E26\",function(){acd0_edge(26);});\n", + "acdonclick(\"#acdaut0 #E27\",function(){acd0_edge(27);});\n", + "acdonclick(\"#acdaut0 #E28\",function(){acd0_edge(28);});\n", + "acdonclick(\"#acdaut0 #E29\",function(){acd0_edge(29);});\n", + "acdonclick(\"#acdaut0 #E30\",function(){acd0_edge(30);});\n", + "acdonclick(\"#acdaut0 #E31\",function(){acd0_edge(31);});\n", + "acdonclick(\"#acdaut0 #E32\",function(){acd0_edge(32);});\n", + "acdonclick(\"#acdaut0 #E33\",function(){acd0_edge(33);});\n", + "acdonclick(\"#acdaut0 #E34\",function(){acd0_edge(34);});\n", + "acdonclick(\"#acdaut0 #E35\",function(){acd0_edge(35);});\n", + "acdonclick(\"#acdaut0 #E36\",function(){acd0_edge(36);});\n", + "acdonclick(\"#acdaut0 #E37\",function(){acd0_edge(37);});\n", + "acdonclick(\"#acdaut0 #E38\",function(){acd0_edge(38);});\n", + "acdonclick(\"#acdaut0 #E39\",function(){acd0_edge(39);});\n", + "acdonclick(\"#acdaut0 #E40\",function(){acd0_edge(40);});\n", + "acdonclick(\"#acdaut0 #S0\",function(){acd0_state(0);});\n", + "acdonclick(\"#acdaut0 #S1\",function(){acd0_state(1);});\n", + "acdonclick(\"#acdaut0 #S2\",function(){acd0_state(2);});\n", + "acdonclick(\"#acdaut0 #S3\",function(){acd0_state(3);});\n", + "acdonclick(\"#acdaut0 #S4\",function(){acd0_state(4);});\n", + "acdonclick(\"#acdaut0 #S5\",function(){acd0_state(5);});\n", + "acdonclick(\"#acdaut0 #S6\",function(){acd0_state(6);});\n", + "acdonclick(\"#acdaut0 #S7\",function(){acd0_state(7);});\n", + "acdonclick(\"#acdaut0 #S8\",function(){acd0_state(8);});\n", + "acdonclick(\"#acdaut0 #S9\",function(){acd0_state(9);});\n", + "acdonclick(\"#acd0 #N0\",function(){acd0_node(0, 0);});\n", + "acdonclick(\"#acd0 #N1\",function(){acd0_node(1, 1);});\n", + "acdonclick(\"#acd0 #N2\",function(){acd0_node(2, 1);});\n", + "acdonclick(\"#acd0 #N3\",function(){acd0_node(3, 1);});\n", + "acdonclick(\"#acd0 #N4\",function(){acd0_node(4, 1);});\n", + "acdonclick(\"#acd0 #N5\",function(){acd0_node(5, 1);});\n", + "acdonclick(\"#acd0 #N6\",function(){acd0_node(6, 1);});\n", + "acdonclick(\"#acd0 #N7\",function(){acd0_node(7, 1);});\n", + "acdonclick(\"#acd0 #N8\",function(){acd0_node(8, 1);});\n", + "acdonclick(\"#acd0 #N9\",function(){acd0_node(9, 0);});\n", + "acdonclick(\"#acd0 #N10\",function(){acd0_node(10, 0);});\n", + "acdonclick(\"#acd0 #N11\",function(){acd0_node(11, 0);});\n", + "acdonclick(\"#acd0 #N12\",function(){acd0_node(12, 0);});\n", + "acdonclick(\"#acd0 #N13\",function(){acd0_node(13, 0);});\n", + "acdonclick(\"#acd0 #N14\",function(){acd0_node(14, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 20, @@ -4968,7 +5091,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f14701670f0> >" + " *' at 0x7f82c00bc870> >" ] }, "execution_count": 29, @@ -5607,7 +5730,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470167210> >" + " *' at 0x7f82c00bc060> >" ] }, "execution_count": 31, @@ -5807,7 +5930,9 @@ "cell_type": "code", "execution_count": 40, "id": "813d15ed", - "metadata": {}, + "metadata": { + "scrolled": false + }, "outputs": [ { "data": { @@ -6875,36 +7000,159 @@ "\n", "\n", "" + " acdaddclasses(\"#acdaut1 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd1 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut1 #E9\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E10\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E11\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E12\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E13\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E14\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E15\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E16\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E21\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E22\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E23\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E24\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E25\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E26\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E27\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E28\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E33\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E34\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E35\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E36\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E31\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut1 #E32\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut1 #E39\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut1 #E40\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut1 #E5\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut1 #E7\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut1 #E17\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut1 #E1\", [\"acdN3\"]);\n", + "acdaddclasses(\"#acdaut1 #E10\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E12\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E13\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E15\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E21\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E22\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E14\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut1 #E15\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut1 #E22\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut1 #E23\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut1 #E23\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut1 #E24\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut1 #E34\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut1 #E36\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut1 #E14\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut1 #E16\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut1 #E26\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut1 #E9\", [\"acdN8\"]);\n", + "acdaddclasses(\"#acdaut1 #E40\", [\"acdN9\"]);\n", + "acdaddclasses(\"#acdaut1 #E5\", [\"acdN10\"]);\n", + "acdaddclasses(\"#acdaut1 #E14\", [\"acdN11\"]);\n", + "acdaddclasses(\"#acdaut1 #E23\", [\"acdN12\"]);\n", + "acdaddclasses(\"#acdaut1 #E23\", [\"acdN13\"]);\n", + "acdaddclasses(\"#acdaut1 #E14\", [\"acdN14\"]);\n", + "acdonclick(\"#acdaut1 #E1\",function(){acd1_edge(1);});\n", + "acdonclick(\"#acdaut1 #E2\",function(){acd1_edge(2);});\n", + "acdonclick(\"#acdaut1 #E3\",function(){acd1_edge(3);});\n", + "acdonclick(\"#acdaut1 #E4\",function(){acd1_edge(4);});\n", + "acdonclick(\"#acdaut1 #E5\",function(){acd1_edge(5);});\n", + "acdonclick(\"#acdaut1 #E6\",function(){acd1_edge(6);});\n", + "acdonclick(\"#acdaut1 #E7\",function(){acd1_edge(7);});\n", + "acdonclick(\"#acdaut1 #E8\",function(){acd1_edge(8);});\n", + "acdonclick(\"#acdaut1 #E9\",function(){acd1_edge(9);});\n", + "acdonclick(\"#acdaut1 #E10\",function(){acd1_edge(10);});\n", + "acdonclick(\"#acdaut1 #E11\",function(){acd1_edge(11);});\n", + "acdonclick(\"#acdaut1 #E12\",function(){acd1_edge(12);});\n", + "acdonclick(\"#acdaut1 #E13\",function(){acd1_edge(13);});\n", + "acdonclick(\"#acdaut1 #E14\",function(){acd1_edge(14);});\n", + "acdonclick(\"#acdaut1 #E15\",function(){acd1_edge(15);});\n", + "acdonclick(\"#acdaut1 #E16\",function(){acd1_edge(16);});\n", + "acdonclick(\"#acdaut1 #E17\",function(){acd1_edge(17);});\n", + "acdonclick(\"#acdaut1 #E18\",function(){acd1_edge(18);});\n", + "acdonclick(\"#acdaut1 #E19\",function(){acd1_edge(19);});\n", + "acdonclick(\"#acdaut1 #E20\",function(){acd1_edge(20);});\n", + "acdonclick(\"#acdaut1 #E21\",function(){acd1_edge(21);});\n", + "acdonclick(\"#acdaut1 #E22\",function(){acd1_edge(22);});\n", + "acdonclick(\"#acdaut1 #E23\",function(){acd1_edge(23);});\n", + "acdonclick(\"#acdaut1 #E24\",function(){acd1_edge(24);});\n", + "acdonclick(\"#acdaut1 #E25\",function(){acd1_edge(25);});\n", + "acdonclick(\"#acdaut1 #E26\",function(){acd1_edge(26);});\n", + "acdonclick(\"#acdaut1 #E27\",function(){acd1_edge(27);});\n", + "acdonclick(\"#acdaut1 #E28\",function(){acd1_edge(28);});\n", + "acdonclick(\"#acdaut1 #E29\",function(){acd1_edge(29);});\n", + "acdonclick(\"#acdaut1 #E30\",function(){acd1_edge(30);});\n", + "acdonclick(\"#acdaut1 #E31\",function(){acd1_edge(31);});\n", + "acdonclick(\"#acdaut1 #E32\",function(){acd1_edge(32);});\n", + "acdonclick(\"#acdaut1 #E33\",function(){acd1_edge(33);});\n", + "acdonclick(\"#acdaut1 #E34\",function(){acd1_edge(34);});\n", + "acdonclick(\"#acdaut1 #E35\",function(){acd1_edge(35);});\n", + "acdonclick(\"#acdaut1 #E36\",function(){acd1_edge(36);});\n", + "acdonclick(\"#acdaut1 #E37\",function(){acd1_edge(37);});\n", + "acdonclick(\"#acdaut1 #E38\",function(){acd1_edge(38);});\n", + "acdonclick(\"#acdaut1 #E39\",function(){acd1_edge(39);});\n", + "acdonclick(\"#acdaut1 #E40\",function(){acd1_edge(40);});\n", + "acdonclick(\"#acdaut1 #S0\",function(){acd1_state(0);});\n", + "acdonclick(\"#acdaut1 #S1\",function(){acd1_state(1);});\n", + "acdonclick(\"#acdaut1 #S2\",function(){acd1_state(2);});\n", + "acdonclick(\"#acdaut1 #S3\",function(){acd1_state(3);});\n", + "acdonclick(\"#acdaut1 #S4\",function(){acd1_state(4);});\n", + "acdonclick(\"#acdaut1 #S5\",function(){acd1_state(5);});\n", + "acdonclick(\"#acdaut1 #S6\",function(){acd1_state(6);});\n", + "acdonclick(\"#acdaut1 #S7\",function(){acd1_state(7);});\n", + "acdonclick(\"#acdaut1 #S8\",function(){acd1_state(8);});\n", + "acdonclick(\"#acdaut1 #S9\",function(){acd1_state(9);});\n", + "acdonclick(\"#acd1 #N0\",function(){acd1_node(0, 0);});\n", + "acdonclick(\"#acd1 #N1\",function(){acd1_node(1, 1);});\n", + "acdonclick(\"#acd1 #N2\",function(){acd1_node(2, 1);});\n", + "acdonclick(\"#acd1 #N3\",function(){acd1_node(3, 1);});\n", + "acdonclick(\"#acd1 #N4\",function(){acd1_node(4, 1);});\n", + "acdonclick(\"#acd1 #N5\",function(){acd1_node(5, 1);});\n", + "acdonclick(\"#acd1 #N6\",function(){acd1_node(6, 1);});\n", + "acdonclick(\"#acd1 #N7\",function(){acd1_node(7, 1);});\n", + "acdonclick(\"#acd1 #N8\",function(){acd1_node(8, 1);});\n", + "acdonclick(\"#acd1 #N9\",function(){acd1_node(9, 0);});\n", + "acdonclick(\"#acd1 #N10\",function(){acd1_node(10, 0);});\n", + "acdonclick(\"#acd1 #N11\",function(){acd1_node(11, 0);});\n", + "acdonclick(\"#acd1 #N12\",function(){acd1_node(12, 0);});\n", + "acdonclick(\"#acd1 #N13\",function(){acd1_node(13, 0);});\n", + "acdonclick(\"#acd1 #N14\",function(){acd1_node(14, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 40, @@ -7817,7 +8065,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f14700fe1e0> >" + " *' at 0x7f82c00be460> >" ] }, "execution_count": 45, @@ -8114,36 +8362,69 @@ "\n", "\n", "" + " acdaddclasses(\"#acdaut2 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd2 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut2 #E1\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E2\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E3\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E4\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E5\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E6\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E2\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut2 #E3\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut2 #E4\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut2 #E5\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut2 #E6\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut2 #E1\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut2 #E2\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut2 #E4\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut2 #E6\", [\"acdN2\"]);\n", + "acdonclick(\"#acdaut2 #E1\",function(){acd2_edge(1);});\n", + "acdonclick(\"#acdaut2 #E2\",function(){acd2_edge(2);});\n", + "acdonclick(\"#acdaut2 #E3\",function(){acd2_edge(3);});\n", + "acdonclick(\"#acdaut2 #E4\",function(){acd2_edge(4);});\n", + "acdonclick(\"#acdaut2 #E5\",function(){acd2_edge(5);});\n", + "acdonclick(\"#acdaut2 #E6\",function(){acd2_edge(6);});\n", + "acdonclick(\"#acdaut2 #S0\",function(){acd2_state(0);});\n", + "acdonclick(\"#acdaut2 #S1\",function(){acd2_state(1);});\n", + "acdonclick(\"#acdaut2 #S2\",function(){acd2_state(2);});\n", + "acdonclick(\"#acd2 #N0\",function(){acd2_node(0, 1);});\n", + "acdonclick(\"#acd2 #N1\",function(){acd2_node(1, 0);});\n", + "acdonclick(\"#acd2 #N2\",function(){acd2_node(2, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 47, @@ -8353,7 +8634,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f14700feb40> >" + " *' at 0x7f82c00bdd40> >" ] }, "execution_count": 48, @@ -8628,36 +8909,69 @@ "\n", "\n", "" + " acdaddclasses(\"#acdaut3 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd3 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut3 #E1\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E2\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E3\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E4\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E5\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E6\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E1\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut3 #E2\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut3 #E4\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut3 #E6\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut3 #E2\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut3 #E3\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut3 #E4\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut3 #E5\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut3 #E6\", [\"acdN2\"]);\n", + "acdonclick(\"#acdaut3 #E1\",function(){acd3_edge(1);});\n", + "acdonclick(\"#acdaut3 #E2\",function(){acd3_edge(2);});\n", + "acdonclick(\"#acdaut3 #E3\",function(){acd3_edge(3);});\n", + "acdonclick(\"#acdaut3 #E4\",function(){acd3_edge(4);});\n", + "acdonclick(\"#acdaut3 #E5\",function(){acd3_edge(5);});\n", + "acdonclick(\"#acdaut3 #E6\",function(){acd3_edge(6);});\n", + "acdonclick(\"#acdaut3 #S0\",function(){acd3_state(0);});\n", + "acdonclick(\"#acdaut3 #S1\",function(){acd3_state(1);});\n", + "acdonclick(\"#acdaut3 #S2\",function(){acd3_state(2);});\n", + "acdonclick(\"#acd3 #N0\",function(){acd3_node(0, 1);});\n", + "acdonclick(\"#acd3 #N1\",function(){acd3_node(1, 0);});\n", + "acdonclick(\"#acd3 #N2\",function(){acd3_node(2, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 49, @@ -8841,7 +9155,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f14700fea80> >" + " *' at 0x7f82c00bf300> >" ] }, "execution_count": 50, @@ -8993,7 +9307,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470107240> >" + " *' at 0x7f82c00be5b0> >" ] }, "execution_count": 51, @@ -9105,7 +9419,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 52, @@ -9271,7 +9585,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470107030> >" + " *' at 0x7f82c00bf5d0> >" ] }, "execution_count": 53, @@ -9535,36 +9849,63 @@ "\n", "\n", "" + " acdaddclasses(\"#acdaut4 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd4 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut4 #E1\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E2\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E3\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E4\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E5\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E6\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E7\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E8\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E6\", [\"acdN1\"]);\n", + "acdonclick(\"#acdaut4 #E1\",function(){acd4_edge(1);});\n", + "acdonclick(\"#acdaut4 #E2\",function(){acd4_edge(2);});\n", + "acdonclick(\"#acdaut4 #E3\",function(){acd4_edge(3);});\n", + "acdonclick(\"#acdaut4 #E4\",function(){acd4_edge(4);});\n", + "acdonclick(\"#acdaut4 #E5\",function(){acd4_edge(5);});\n", + "acdonclick(\"#acdaut4 #E6\",function(){acd4_edge(6);});\n", + "acdonclick(\"#acdaut4 #E7\",function(){acd4_edge(7);});\n", + "acdonclick(\"#acdaut4 #E8\",function(){acd4_edge(8);});\n", + "acdonclick(\"#acdaut4 #S0\",function(){acd4_state(0);});\n", + "acdonclick(\"#acdaut4 #S1\",function(){acd4_state(1);});\n", + "acdonclick(\"#acd4 #N0\",function(){acd4_node(0, 1);});\n", + "acdonclick(\"#acd4 #N1\",function(){acd4_node(1, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 55, @@ -9708,7 +10049,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470107b70> >" + " *' at 0x7f82c00f4240> >" ] }, "execution_count": 57, @@ -9855,7 +10196,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f147010d240> >" + " *' at 0x7f82c00f4090> >" ] }, "execution_count": 58, @@ -10165,36 +10506,68 @@ "\n", "\n", "" + " acdaddclasses(\"#acdaut5 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd5 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut5 #E1\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E2\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E3\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E4\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E5\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E6\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E7\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E1\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut5 #E3\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut5 #E4\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut5 #E5\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut5 #E7\", [\"acdN2\"]);\n", + "acdonclick(\"#acdaut5 #E1\",function(){acd5_edge(1);});\n", + "acdonclick(\"#acdaut5 #E2\",function(){acd5_edge(2);});\n", + "acdonclick(\"#acdaut5 #E3\",function(){acd5_edge(3);});\n", + "acdonclick(\"#acdaut5 #E4\",function(){acd5_edge(4);});\n", + "acdonclick(\"#acdaut5 #E5\",function(){acd5_edge(5);});\n", + "acdonclick(\"#acdaut5 #E6\",function(){acd5_edge(6);});\n", + "acdonclick(\"#acdaut5 #E7\",function(){acd5_edge(7);});\n", + "acdonclick(\"#acdaut5 #S0\",function(){acd5_state(0);});\n", + "acdonclick(\"#acdaut5 #S1\",function(){acd5_state(1);});\n", + "acdonclick(\"#acdaut5 #S2\",function(){acd5_state(2);});\n", + "acdonclick(\"#acdaut5 #S3\",function(){acd5_state(3);});\n", + "acdonclick(\"#acd5 #N0\",function(){acd5_node(0, 1);});\n", + "acdonclick(\"#acd5 #N1\",function(){acd5_node(1, 0);});\n", + "acdonclick(\"#acd5 #N2\",function(){acd5_node(2, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 60, @@ -10322,7 +10695,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f147010d5a0> >" + " *' at 0x7f82c00f50b0> >" ] }, "execution_count": 61, @@ -10452,7 +10825,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f147010d6f0> >" + " *' at 0x7f82c00f52c0> >" ] }, "execution_count": 62, @@ -10732,7 +11105,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470116270> >" + " *' at 0x7f82c00f4960> >" ] }, "execution_count": 63, @@ -10826,7 +11199,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470116630> >" + " *' at 0x7f82c00f5a10> >" ] }, "execution_count": 64, @@ -10937,7 +11310,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470116450> >" + " *' at 0x7f82c00f5ce0> >" ] }, "execution_count": 66, @@ -10995,7 +11368,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.2" + "version": "3.10.7" } }, "nbformat": 4, From ba695194cd9161bc3fe4a5075b348684134989b6 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 9 Dec 2022 16:22:06 +0100 Subject: [PATCH 214/606] priv: remove unused allocator.hh * spot/priv/allocator.hh: Delete. * spot/priv/Makefile.am, tests/core/mempool.cc: Adjust. --- spot/priv/Makefile.am | 3 +- spot/priv/allocator.hh | 104 ----------------------------------------- tests/core/mempool.cc | 29 ------------ 3 files changed, 1 insertion(+), 135 deletions(-) delete mode 100644 spot/priv/allocator.hh diff --git a/spot/priv/Makefile.am b/spot/priv/Makefile.am index 317292bd3..b2c75ab7d 100644 --- a/spot/priv/Makefile.am +++ b/spot/priv/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2013-2019, 2021 Laboratoire de Recherche et +## Copyright (C) 2013-2019, 2021-2022 Laboratoire de Recherche et ## Développement de l'Epita (LRDE). ## ## This file is part of Spot, a model checking library. @@ -24,7 +24,6 @@ AM_CXXFLAGS = $(WARNING_CXXFLAGS) noinst_LTLIBRARIES = libpriv.la libpriv_la_SOURCES = \ accmap.hh \ - allocator.hh \ bddalloc.cc \ bddalloc.hh \ freelist.cc \ diff --git a/spot/priv/allocator.hh b/spot/priv/allocator.hh deleted file mode 100644 index 9c3d50268..000000000 --- a/spot/priv/allocator.hh +++ /dev/null @@ -1,104 +0,0 @@ -// -*- coding: utf-8 -*- -// Copyright (C) 2011, 2015-2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE) -// -// This file is part of Spot, a model checking library. -// -// Spot is free software; you can redistribute it and/or modify it -// under the terms of the GNU General Public License as published by -// the Free Software Foundation; either version 3 of the License, or -// (at your option) any later version. -// -// Spot is distributed in the hope that it will be useful, but WITHOUT -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -// License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -#pragma once - -#include - -namespace spot -{ - /// An allocator to be used with STL containers. - /// It uses a spot::fixed_size_pool to handle memory. - /// It is intended to improve performance and locality of node-based - /// containers (std::{unordered}{multi}{set,map}). - /// It is geared towards efficiently allocating memory for one object at a - /// time (the nodes of the node-based containers). Larger allocations are - /// served by calling the global memory allocation mechanism (::operator new). - /// Using it for contiguous containers (such as std::vector or std::deque) - /// will be less efficient than using the default std::allocator. - /// - /// Short reminder on STL concept of Allocator: - /// allocate() may throw - /// deallocate() must not throw - /// equality testing (i.e. == and !=) must not throw - /// copying allocator (constructor and assignment) must not throw - /// moving allocator (constructor and assignment) must not throw - /// - /// WARNING this class is NOT thread-safe: the allocator relies on a static - /// fixed_size_pool (which is not thread-safe either). - template - class pool_allocator - { - static - fixed_size_pool& - pool() - { - static fixed_size_pool p = - fixed_size_pool(sizeof(T)); - return p; - } - - public: - using value_type = T; - using pointer = value_type*; - using const_pointer = const value_type*; - using size_type = size_t; - - constexpr pool_allocator() noexcept - {} - template - constexpr pool_allocator(const pool_allocator&) noexcept - {} - - template - struct rebind - { - using other = pool_allocator; - }; - - pointer - allocate(size_type n) - { - if (SPOT_LIKELY(n == 1)) - return static_cast(pool().allocate()); - else - return static_cast(::operator new(n*sizeof(T))); - } - - void - deallocate(pointer ptr, size_type n) noexcept - { - if (SPOT_LIKELY(n == 1)) - pool().deallocate(static_cast(ptr)); - else - ::operator delete(ptr); - } - - bool - operator==(const pool_allocator&) const noexcept - { - return true; - } - bool - operator!=(const pool_allocator& o) const noexcept - { - return !(this->operator==(o)); - } - }; -} diff --git a/tests/core/mempool.cc b/tests/core/mempool.cc index 9d3610df7..1431a24b2 100644 --- a/tests/core/mempool.cc +++ b/tests/core/mempool.cc @@ -23,9 +23,6 @@ #include #include -#include - -#include namespace { @@ -157,32 +154,6 @@ int main() c->incr(); // no delete: valgrind should find a leak } - { - std::set, spot::pool_allocator> s; - s.insert(1); - s.insert(2); - s.insert(1); - s.erase(1); - s.insert(3); - s.insert(4); - - s.clear(); - - auto t = s; - t.insert(5); - t.insert(6); - - std::swap(s, t); - - s.erase(5); - s.erase(6); - - if (s != t) - return 1; - else - return 0; - } - return 0; } From c9ba998200d6f57037360c67628bf24f6bf79e1b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 9 Dec 2022 16:35:05 +0100 Subject: [PATCH 215/606] avoid a g++-12 warning about potential null pointer dereference * spot/twaalgos/determinize.cc (sorted_nodes): Rewrite to avoid reallocation of temporary vector. --- spot/twaalgos/determinize.cc | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/spot/twaalgos/determinize.cc b/spot/twaalgos/determinize.cc index ba4fb3ded..d2d35a824 100644 --- a/spot/twaalgos/determinize.cc +++ b/spot/twaalgos/determinize.cc @@ -472,15 +472,23 @@ namespace spot std::vector res; for (const auto& n: s.nodes_) { - int brace = n.second; - std::vector tmp; - while (brace >= 0) + // First, count the number of braces. + unsigned nbraces = 0; + for (int brace = n.second; brace >= 0; brace = s.braces_[brace]) + ++nbraces; + // Then list them in reverse order. Since we know the + // number of braces, we can allocate exactly what we need. + if (nbraces > 0) { - // FIXME: is there a smarter way? - tmp.insert(tmp.begin(), brace); - brace = s.braces_[brace]; + std::vector tmp(nbraces, 0); + for (int brace = n.second; brace >= 0; brace = s.braces_[brace]) + tmp[--nbraces] = brace; + res.emplace_back(n.first, std::move(tmp)); + } + else + { + res.emplace_back(n.first, std::vector{}); } - res.emplace_back(n.first, std::move(tmp)); } std::sort(res.begin(), res.end(), compare()); return res; From daf797b9d48936b6a6ebc119bad0210dcc5e232f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 4 Jan 2023 15:11:21 +0100 Subject: [PATCH 216/606] fix merging of initial states in state-based automata Fixes #522 reported by Raven Beutner. * spot/parseaut/parseaut.yy: Make sure all edges leaving the initial state have the same color. * THANKS: Add Raven. * NEWS: Mention the bug. * tests/core/522.test: New file. * tests/Makefile.am: Add it. --- NEWS | 6 ++++++ THANKS | 1 + spot/parseaut/parseaut.yy | 33 ++++++++++++++++++++++++------ tests/Makefile.am | 3 ++- tests/core/522.test | 43 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 79 insertions(+), 7 deletions(-) create mode 100755 tests/core/522.test diff --git a/NEWS b/NEWS index 384ddc8bc..3a950996a 100644 --- a/NEWS +++ b/NEWS @@ -13,6 +13,12 @@ New in spot 2.11.3.dev (not yet released) - spot.acd() no longer depends on jQuery for interactivity. + Bug fixes: + + - When merging initial states from state-based automata with + multiple initial states (because Spot supports only one), the HOA + parser could break state-based acceptance. (Issue #522.) + New in spot 2.11.3 (2022-12-09) Bug fixes: diff --git a/THANKS b/THANKS index 356d187a1..93155f9d1 100644 --- a/THANKS +++ b/THANKS @@ -48,6 +48,7 @@ Nikos Gorogiannis Ondřej Lengál Paul Guénézan Pierre Ganty +Raven Beutner Reuben Rowe Roei Nahum Rüdiger Ehlers diff --git a/spot/parseaut/parseaut.yy b/spot/parseaut/parseaut.yy index 4d96b8c1c..7d5fac361 100644 --- a/spot/parseaut/parseaut.yy +++ b/spot/parseaut/parseaut.yy @@ -1,5 +1,5 @@ /* -*- coding: utf-8 -*- -** Copyright (C) 2014-2022 Laboratoire de Recherche et Développement +** Copyright (C) 2014-2023 Laboratoire de Recherche et Développement ** de l'Epita (LRDE). ** ** This file is part of Spot, a model checking library. @@ -2610,7 +2610,7 @@ static void fix_initial_state(result_& r) start.resize(std::distance(start.begin(), res)); assert(start.size() >= 1); - if (start.size() == 1) + if (start.size() == 1) { if (r.opts.want_kripke) r.h->ks->set_init_state(start.front().front()); @@ -2627,13 +2627,13 @@ static void fix_initial_state(result_& r) "a single initial state"); return; } + auto& aut = r.h->aut; // Fiddling with initial state may turn an incomplete automaton // into a complete one. - if (r.complete.is_false()) - r.complete = spot::trival::maybe(); + if (aut->prop_complete().is_false()) + aut->prop_complete(spot::trival::maybe()); // Multiple initial states. We might need to add a fake one, // unless one of the actual initial state has no incoming edge. - auto& aut = r.h->aut; std::vector has_incoming(aut->num_states(), 0); for (auto& t: aut->edges()) for (unsigned ud: aut->univ_dests(t)) @@ -2672,6 +2672,9 @@ static void fix_initial_state(result_& r) { unsigned p = pp.front(); if (p != init) + // FIXME: If p has no incoming we should be able to + // change the source of the edges of p instead of + // adding new edges. for (auto& t: aut->out(p)) aut->new_edge(init, t.dst, t.cond); } @@ -2694,6 +2697,24 @@ static void fix_initial_state(result_& r) } combiner.new_dests(init, comb_or); } + + // Merging two states may break state-based acceptance + // make sure all outgoing edges have the same color. + if (aut->prop_state_acc().is_true()) + { + bool first = true; + spot::acc_cond::mark_t prev; + for (auto& e: aut->out(init)) + if (first) + { + first = false; + prev = e.acc; + } + else if (e.acc != prev) + { + e.acc = prev; + } + } } } @@ -2871,8 +2892,8 @@ namespace spot r.aut_or_ks->set_named_prop("aliases", p); } fix_acceptance(r); + fix_properties(r); // before fix_initial_state fix_initial_state(r); - fix_properties(r); if (r.h->aut && !r.h->aut->is_existential()) r.h->aut->merge_univ_dests(); return r.h; diff --git a/tests/Makefile.am b/tests/Makefile.am index 0810df809..3bd43d5f4 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2009-2022 Laboratoire de Recherche et Développement +## Copyright (C) 2009-2023 Laboratoire de Recherche et Développement ## de l'Epita (LRDE). ## Copyright (C) 2003-2006 Laboratoire d'Informatique de Paris 6 ## (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -223,6 +223,7 @@ TESTS_misc = \ TESTS_twa = \ core/385.test \ core/521.test \ + core/522.test \ core/acc.test \ core/acc2.test \ core/bdddict.test \ diff --git a/tests/core/522.test b/tests/core/522.test new file mode 100755 index 000000000..5fe6ba945 --- /dev/null +++ b/tests/core/522.test @@ -0,0 +1,43 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) 2023 Laboratoire de Recherche et Développement de +# l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs + +set -e + +# For issue #522. + +cat >552.hoa < out.hoa +grep 'States: 7' out.hoa From 396009c014beb3437508dd1ec1cc2a625c68081f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 5 Jan 2023 15:09:26 +0100 Subject: [PATCH 217/606] parseaut: better merge of multiple initial states If an initial states without incoming transition has to be merged into another one, its outgoing edges can be reused by just changing their source. * spot/parseaut/parseaut.yy (fix_initial_state): Implement this here. * tests/core/522.test: Add more tests. * tests/core/readsave.test: Adjust one expected output. * doc/org/hoa.org: Mention the completeness change. * NEWS: Mention the new feature. --- NEWS | 5 +++++ doc/org/hoa.org | 5 +++-- spot/parseaut/parseaut.yy | 30 +++++++++++++++++++++++------ tests/core/522.test | 40 ++++++++++++++++++++++++++++++++++++--- tests/core/readsave.test | 7 +++---- 5 files changed, 72 insertions(+), 15 deletions(-) diff --git a/NEWS b/NEWS index 3a950996a..d6f6a702b 100644 --- a/NEWS +++ b/NEWS @@ -9,6 +9,11 @@ New in spot 2.11.3.dev (not yet released) - b:b[*i..j] = b[*max(i,1)..j] - b[*i..j]:b[*k..l] = b[*max(i,1)+max(k,1)-1, j+l-1] + - The HOA parser is a bit smarter when merging multiple initial + states into a single initial state (Spot's automaton class + supports only one): it now reuse the edges leaving initial states + without incoming transitions. + Python: - spot.acd() no longer depends on jQuery for interactivity. diff --git a/doc/org/hoa.org b/doc/org/hoa.org index 26969e4ed..6994abdc5 100644 --- a/doc/org/hoa.org +++ b/doc/org/hoa.org @@ -66,7 +66,7 @@ the HOA format, the output may not be exactly the same as the input. sets. This hard-coded limit can be augmented at configure time - using option `--enable-max-accsets=N`, but doing so will consume + using option =--enable-max-accsets=N=, but doing so will consume more memory and time. - Multiple (or missing) initial states are emulated. @@ -76,7 +76,8 @@ the HOA format, the output may not be exactly the same as the input. is transformed into an equivalent TωA by merging the initial states into a single one. The merged state can either be one of the original initial states (if one of those has no incoming edge) or a - new state introduced for that purpose. + new state introduced for that purpose. This "conversion" may change + the completeness property of the automaton. Similarly, when an automaton with no initial state is loaded (this includes the case where the automaton has no state), a disconnected diff --git a/spot/parseaut/parseaut.yy b/spot/parseaut/parseaut.yy index 7d5fac361..5b8792e96 100644 --- a/spot/parseaut/parseaut.yy +++ b/spot/parseaut/parseaut.yy @@ -2671,12 +2671,30 @@ static void fix_initial_state(result_& r) for (auto& pp: start) { unsigned p = pp.front(); - if (p != init) - // FIXME: If p has no incoming we should be able to - // change the source of the edges of p instead of - // adding new edges. - for (auto& t: aut->out(p)) - aut->new_edge(init, t.dst, t.cond); + if (p == init) + continue; + if (!has_incoming[p]) + { + // If p has no incoming edge, we can simply take + // out its outgoing edges and "re-source" them on init. + // This will avoid creating new edges. + for (auto& t: aut->out(p)) + t.src = init; + auto& gr = aut->get_graph(); + auto& ps = gr.state_storage(p); + auto& is = gr.state_storage(init); + gr.edge_storage(is.succ_tail).next_succ = ps.succ; + is.succ_tail = ps.succ_tail; + ps.succ = ps.succ_tail = 0; + // we just created a state without successors + aut->prop_complete(false); + } + else + { + // duplicate all edges + for (auto& t: aut->out(p)) + aut->new_edge(init, t.dst, t.cond); + } } } else diff --git a/tests/core/522.test b/tests/core/522.test index 5fe6ba945..3f1596514 100755 --- a/tests/core/522.test +++ b/tests/core/522.test @@ -38,6 +38,40 @@ State: 0 {0} [t] 2 [t] 3 State: 1 {0} [t] 4 [t] 5 State: 2 State: 5 {1} [t] 6 [t] 7 State: 6 [t] 6 [t] 7 State: 7 [t] 6 [t] 7 --END-- EOF -# This command, even without --remove-dead, used to break during print_hoa() -autfilt --remove-dead 552.hoa > out.hoa -grep 'States: 7' out.hoa +# This command used to break during print_hoa() +autfilt 552.hoa > out.hoa +test "8 1 16 0" = "`autfilt --stats='%[a]s %[u]s %[a]e %[u]e' out.hoa`" + +cat >552loop1.hoa < out.hoa +test "8 0 20 0" = "`autfilt --stats='%[a]s %[u]s %[a]e %[u]e' out.hoa`" + +cat >552loop2.hoa < out.hoa +test "9 0 24 0" = "`autfilt --stats='%[a]s %[u]s %[a]e %[u]e' out.hoa`" diff --git a/tests/core/readsave.test b/tests/core/readsave.test index 3780b4766..cf6f43b89 100755 --- a/tests/core/readsave.test +++ b/tests/core/readsave.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2012, 2014-2022 Laboratoire de +# Copyright (C) 2009, 2010, 2012, 2014-2023 Laboratoire de # Recherche et Développement de l'Epita (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -346,9 +346,8 @@ digraph "" { 0 -> 0 [label="b", id="E1", tooltip="\\\\E\n#1"] 1 -> 1 [label="a", id="E2", tooltip="\\\\E\n#2"] 2 [label="s2"] - 2 -> 0 [label="b", id="E3", tooltip="\\\\E\n#3"] - 3 -> 1 [label="a", id="E4", tooltip="\\\\E\n#4"] - 3 -> 0 [label="b", id="E5", tooltip="\\\\E\n#5"] + 3 -> 1 [label="a", id="E3", tooltip="\\\\E\n#3"] + 3 -> 0 [label="b", id="E4", tooltip="\\\\E\n#4"] } EOF From 16ad7bdf77713e9a4b96bd2d9a37990b5612f26b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 5 Jan 2023 17:47:46 +0100 Subject: [PATCH 218/606] * doc/org/spot.css: Do not define background twice. --- doc/org/spot.css | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/org/spot.css b/doc/org/spot.css index ca8b12395..569ca37a9 100644 --- a/doc/org/spot.css +++ b/doc/org/spot.css @@ -16,9 +16,9 @@ h1::before{content:"";position:absolute;z-index:-1;background-color:#ffe35e;left #table-of-contents #text-table-of-contents{text-align:left} #org-div-home-and-up{text-align:center;font-size:100%} .outline-2 h2{display:block;width:100%;position:relative} -.outline-2 h2::before{content:"";height:100%;width:calc(100% + 2em);position:absolute;z-index:-1;bottom:0em;left:-1em;background-color:#ffe35e;background:linear-gradient(45deg,#ffe35e 50%,transparent 75%);transform:skew(10deg);border-radius:5px;} +.outline-2 h2::before{content:"";height:100%;width:calc(100% + 2em);position:absolute;z-index:-1;bottom:0em;left:-1em;background:linear-gradient(45deg,#ffe35e 50%,transparent 75%);transform:skew(10deg);border-radius:5px;} .outline-3 h3{display:block;width:auto;position:relative} -.outline-3 h3::before{content:"";position:absolute;z-index:-1;width:calc(100% + 2em);height:100%;left:-1em;bottom:0em;;background-color:#ffe35e;background:linear-gradient(45deg,#ffe35e 25%,transparent 50%);transform:skew(10deg);border-radius:3px} +.outline-3 h3::before{content:"";position:absolute;z-index:-1;width:calc(100% + 2em);height:100%;left:-1em;bottom:0em;background:linear-gradient(45deg,#ffe35e 25%,transparent 50%);transform:skew(10deg);border-radius:3px} .outline-2 h2:hover::before,.outline-3 h3:hover::before{background-color:#ffe35e} pre{margin:1.2ex} pre.src{padding-top:8px;border-left-style:solid;border-color:#00adad;overflow:auto;margin-top:0;margin-bottom:0} From 2ba6fba29f14c591cc63ebb3b47d185704b3b647 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 5 Jan 2023 17:48:14 +0100 Subject: [PATCH 219/606] simplify several comparison operators * spot/twaalgos/dtbasat.cc, spot/twaalgos/dtwasat.cc, spot/twaalgos/simulation.cc: Simplify, as reported by sonarcloud. --- spot/twaalgos/dtbasat.cc | 8 ++------ spot/twaalgos/dtwasat.cc | 8 ++------ spot/twaalgos/simulation.cc | 10 ++-------- 3 files changed, 6 insertions(+), 20 deletions(-) diff --git a/spot/twaalgos/dtbasat.cc b/spot/twaalgos/dtbasat.cc index b2147ebb4..c4bf3d1bc 100644 --- a/spot/twaalgos/dtbasat.cc +++ b/spot/twaalgos/dtbasat.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018, 2021, 2022 Laboratoire de Recherche et +// Copyright (C) 2013-2018, 2021-2023 Laboratoire de Recherche et // Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -77,11 +77,7 @@ namespace spot return true; if (this->src_ref > other.src_ref) return false; - if (this->dst_ref < other.dst_ref) - return true; - if (this->dst_ref > other.dst_ref) - return false; - return false; + return this->dst_ref < other.dst_ref; } }; diff --git a/spot/twaalgos/dtwasat.cc b/spot/twaalgos/dtwasat.cc index 25a299154..2ecf38fd1 100644 --- a/spot/twaalgos/dtwasat.cc +++ b/spot/twaalgos/dtwasat.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2022 Laboratoire de Recherche +// Copyright (C) 2013-2023 Laboratoire de Recherche // et Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -98,11 +98,7 @@ namespace spot return true; if (this->acc_ref > other.acc_ref) return false; - if (this->acc_cand < other.acc_cand) - return true; - if (this->acc_cand > other.acc_cand) - return false; - return false; + return this->acc_cand < other.acc_cand; } }; diff --git a/spot/twaalgos/simulation.cc b/spot/twaalgos/simulation.cc index ca8928888..ed53929b3 100644 --- a/spot/twaalgos/simulation.cc +++ b/spot/twaalgos/simulation.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -93,13 +93,7 @@ namespace spot return true; if (states > r.states) return false; - - if (edges < r.edges) - return true; - if (edges > r.edges) - return false; - - return false; + return edges < r.edges; } inline bool operator>(const automaton_size& r) From 716bb781eb57a4884eee0ea4d3a2637653a0063d Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 5 Jan 2023 17:49:00 +0100 Subject: [PATCH 220/606] * spot/twaalgos/game.cc: Fix incorrect std::forward. --- spot/twaalgos/game.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index f5699bf49..df259b84a 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2018, 2020-2022 Laboratoire de Recherche et +// Copyright (C) 2017-2018, 2020-2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -1034,7 +1034,7 @@ namespace spot ("set_state_players(): There must be as many owners as states"); arena->set_named_prop("state-player", - new region_t(std::forward(owners))); + new region_t(std::move(owners))); } void set_state_player(twa_graph_ptr arena, unsigned state, bool owner) @@ -1101,7 +1101,7 @@ namespace spot throw std::runtime_error("set_strategy(): strategies need to have " "the same size as the automaton."); arena->set_named_prop("strategy", - new strategy_t(std::forward(strat))); + new strategy_t(std::move(strat))); } void set_synthesis_outputs(const twa_graph_ptr& arena, const bdd& outs) @@ -1152,7 +1152,7 @@ namespace spot ("set_state_winners(): There must be as many winners as states"); arena->set_named_prop("state-winner", - new region_t(std::forward(winners))); + new region_t(std::move(winners))); } void set_state_winner(twa_graph_ptr arena, unsigned state, bool winner) From 05edab3f5ae20b0d2122d0e5a551ce62e22f0acc Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 5 Jan 2023 23:34:10 +0100 Subject: [PATCH 221/606] fix some code smells reported by sonarcloud * bench/dtgbasat/gen.py, bin/autcross.cc, bin/autfilt.cc, bin/common_aoutput.cc, bin/common_aoutput.hh: Various cleanups. --- bench/dtgbasat/gen.py | 6 ++-- bin/autcross.cc | 12 +++---- bin/autfilt.cc | 79 ++++++++++++++++++------------------------- bin/common_aoutput.cc | 12 +++---- bin/common_aoutput.hh | 8 ++--- 5 files changed, 51 insertions(+), 66 deletions(-) diff --git a/bench/dtgbasat/gen.py b/bench/dtgbasat/gen.py index e96bf2825..dabf77971 100755 --- a/bench/dtgbasat/gen.py +++ b/bench/dtgbasat/gen.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (C) 2016-2018 Laboratoire de Recherche et Développement de +# Copyright (C) 2016-2018, 2023 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -55,12 +55,12 @@ class BenchConfig(object): if line[0] == '#' or line.isspace(): continue elif line[0:2] == "sh": - sh = re.search('sh (.+?)$', line).group(1) + sh = re.search('sh (.+)$', line).group(1) continue else: name = re.search('(.+?):', line).group(1) code = re.search(':(.+?)>', line).group(1) - xoptions = re.search('>(.+?)$', line).group(1) + xoptions = re.search('>(.+)$', line).group(1) b = Bench(name=name, code=code, xoptions=xoptions) self.l.append(b) self.sh.append(sh) diff --git a/bin/autcross.cc b/bin/autcross.cc index 24cd9bcd4..b3e504bb3 100644 --- a/bin/autcross.cc +++ b/bin/autcross.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2020, 2022 Laboratoire de Recherche et +// Copyright (C) 2017-2020, 2022-2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -549,7 +549,7 @@ namespace { if (!quiet) std::cerr << "info: building " << autname(i, is_really_comp(i)) - << '*' << autname(j, true ^ is_really_comp(j)) + << '*' << autname(j, !is_really_comp(j)) << " requires more acceptance sets than supported\n"; return false; } @@ -557,14 +557,14 @@ namespace if (verbose) std::cerr << "info: check_empty " << autname(i, is_really_comp(i)) - << '*' << autname(j, true ^ is_really_comp(j)) << '\n'; + << '*' << autname(j, !is_really_comp(j)) << '\n'; auto w = aut_i->intersecting_word(aut_j); if (w) { std::ostream& err = global_error(); err << "error: " << autname(i, is_really_comp(i)) - << '*' << autname(j, true ^ is_really_comp(j)) + << '*' << autname(j, !is_really_comp(j)) << (" is nonempty; both automata accept the infinite word:\n" " "); example() << *w << '\n'; @@ -613,7 +613,7 @@ namespace return src.str(); }(); - input_statistics.push_back(in_statistics()); + input_statistics.emplace_back(in_statistics()); input_statistics[round_num].input_source = std::move(source); if (auto name = input->get_named_prop("automaton-name")) @@ -658,7 +658,7 @@ namespace problems += prob; } spot::cleanup_tmpfiles(); - output_statistics.push_back(std::move(stats)); + output_statistics.emplace_back(std::move(stats)); if (verbose) { diff --git a/bin/autfilt.cc b/bin/autfilt.cc index 7cff60e8b..b55d1bc9f 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -448,7 +448,7 @@ struct canon_aut std::vector edges; std::string acc; - canon_aut(const spot::const_twa_graph_ptr& aut) + explicit canon_aut(const spot::const_twa_graph_ptr& aut) : num_states(aut->num_states()) , edges(aut->edge_vector().begin() + 1, aut->edge_vector().end()) @@ -755,6 +755,22 @@ product_or(spot::twa_graph_ptr left, spot::twa_graph_ptr right) return spot::product_or(left, right); } +static spot::twa_graph_ptr +word_to_aut(const char* word, const char *argname) +{ + try + { + return spot::parse_word(word, opt->dict)->as_automaton(); + } + catch (const spot::parse_error& e) + { + error(2, 0, "failed to parse the argument of --%s:\n%s", + argname, e.what()); + } + SPOT_UNREACHABLE(); + return nullptr; +} + static int parse_opt(int key, char* arg, struct argp_state*) { @@ -776,17 +792,14 @@ parse_opt(int key, char* arg, struct argp_state*) opt_nth = parse_range(arg, 0, std::numeric_limits::max()); break; case 'u': - opt->uniq = std::unique_ptr(new std::set()); + opt->uniq = std::make_unique(); break; case 'v': opt_invert = true; break; case 'x': - { - const char* opt = extra_options.parse_options(arg); - if (opt) - error(2, 0, "failed to parse --options near '%s'", opt); - } + if (const char* opt = extra_options.parse_options(arg)) + error(2, 0, "failed to parse --options near '%s'", opt); break; case OPT_ALIASES: opt_aliases = XARGMATCH("--aliases", arg, aliases_args, aliases_types); @@ -802,16 +815,7 @@ parse_opt(int key, char* arg, struct argp_state*) opt_art_sccs_set = true; break; case OPT_ACCEPT_WORD: - try - { - opt->acc_words.push_back(spot::parse_word(arg, opt->dict) - ->as_automaton()); - } - catch (const spot::parse_error& e) - { - error(2, 0, "failed to parse the argument of --accept-word:\n%s", - e.what()); - } + opt->acc_words.emplace_back(word_to_aut(arg, "accept-word")); break; case OPT_ACCEPTANCE_IS: { @@ -964,16 +968,7 @@ parse_opt(int key, char* arg, struct argp_state*) "%d should be followed by a comma and WORD", res); arg = endptr + 1; } - try - { - opt->hl_words.emplace_back(spot::parse_word(arg, opt->dict) - ->as_automaton(), res); - } - catch (const spot::parse_error& e) - { - error(2, 0, "failed to parse the argument of --highlight-word:\n%s", - e.what()); - } + opt->hl_words.emplace_back(word_to_aut(arg, "highlight-word"), res); } break; case OPT_HIGHLIGHT_LANGUAGES: @@ -1157,16 +1152,7 @@ parse_opt(int key, char* arg, struct argp_state*) opt_art_sccs_set = true; break; case OPT_REJECT_WORD: - try - { - opt->rej_words.push_back(spot::parse_word(arg, opt->dict) - ->as_automaton()); - } - catch (const spot::parse_error& e) - { - error(2, 0, "failed to parse the argument of --reject-word:\n%s", - e.what()); - } + opt->rej_words.emplace_back(word_to_aut(arg, "reject-word")); break; case OPT_REM_AP: opt->rem_ap.add_ap(arg); @@ -1291,7 +1277,7 @@ namespace static bool match_acceptance(spot::twa_graph_ptr aut) { - auto& acc = aut->acc(); + const spot::acc_cond& acc = aut->acc(); switch (opt_acceptance_is) { case ACC_Any: @@ -1346,8 +1332,7 @@ namespace { bool max; bool odd; - bool is_p = acc.is_parity(max, odd, true); - if (!is_p) + if (!acc.is_parity(max, odd, true)) return false; switch (opt_acceptance_is) { @@ -1460,7 +1445,7 @@ namespace if (matched && opt_acceptance_is) matched = match_acceptance(aut); - if (matched && (opt_sccs_set | opt_art_sccs_set)) + if (matched && (opt_sccs_set || opt_art_sccs_set)) { spot::scc_info si(aut); unsigned n = si.scc_count(); @@ -1540,14 +1525,14 @@ namespace && spot::contains(aut, opt->equivalent_pos); if (matched && !opt->acc_words.empty()) - for (auto& word_aut: opt->acc_words) + for (const spot::twa_graph_ptr& word_aut: opt->acc_words) if (spot::product(aut, word_aut)->is_empty()) { matched = false; break; } if (matched && !opt->rej_words.empty()) - for (auto& word_aut: opt->rej_words) + for (const spot::twa_graph_ptr& word_aut: opt->rej_words) if (!spot::product(aut, word_aut)->is_empty()) { matched = false; @@ -1681,13 +1666,13 @@ namespace aut->accepting_run()->highlight(opt_highlight_accepting_run); if (!opt->hl_words.empty()) - for (auto& word_aut: opt->hl_words) + for (auto& [word_aut, color]: opt->hl_words) { if (aut->acc().uses_fin_acceptance()) error(2, 0, "--highlight-word does not yet work with Fin acceptance"); - if (auto run = spot::product(aut, word_aut.first)->accepting_run()) - run->project(aut)->highlight(word_aut.second); + if (auto run = spot::product(aut, word_aut)->accepting_run()) + run->project(aut)->highlight(color); } timer.stop(); diff --git a/bin/common_aoutput.cc b/bin/common_aoutput.cc index fcc79fc3c..60f83289e 100644 --- a/bin/common_aoutput.cc +++ b/bin/common_aoutput.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -453,7 +453,7 @@ hoa_stat_printer::print(const spot::const_parsed_aut_ptr& haut, const spot::const_twa_graph_ptr& aut, spot::formula f, const char* filename, int loc, - spot::process_timer& ptimer, + const spot::process_timer& ptimer, const char* csv_prefix, const char* csv_suffix) { timer_ = ptimer; @@ -633,10 +633,10 @@ automaton_printer::print(const spot::twa_graph_ptr& aut, outputnamer.print(haut, aut, f, filename, loc, ptimer, csv_prefix, csv_suffix); std::string fname = outputname.str(); - auto p = outputfiles.emplace(fname, nullptr); - if (p.second) - p.first->second.reset(new output_file(fname.c_str())); - out = &p.first->second->ostream(); + auto [it, b] = outputfiles.try_emplace(fname, nullptr); + if (b) + it->second.reset(new output_file(fname.c_str())); + out = &it->second->ostream(); } // Output it. diff --git a/bin/common_aoutput.hh b/bin/common_aoutput.hh index d33b687d2..f57beae84 100644 --- a/bin/common_aoutput.hh +++ b/bin/common_aoutput.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2018, 2020, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2014-2018, 2020, 2022, 2023 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -155,7 +155,7 @@ public: print(const spot::const_parsed_aut_ptr& haut, const spot::const_twa_graph_ptr& aut, spot::formula f, - const char* filename, int loc, spot::process_timer& ptimer, + const char* filename, int loc, const spot::process_timer& ptimer, const char* csv_prefix, const char* csv_suffix); private: @@ -196,7 +196,7 @@ class automaton_printer std::map> outputfiles; public: - automaton_printer(stat_style input = no_input); + explicit automaton_printer(stat_style input = no_input); ~automaton_printer(); void From 96c3972c5c59d7ef7766919f92e872afcdf26772 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 5 Jan 2023 23:43:31 +0100 Subject: [PATCH 222/606] bin: detect overflows in conversion functions * bin/common_conv.cc (to_int, to_unsigned): Here. * bin/common_range.cc (parse_range): And there. * tests/core/ltlgrind.test, tests/core/genaut.test, tests/core/randaut.test: Add test cases. --- bin/common_conv.cc | 15 ++++++++++++--- bin/common_range.cc | 22 ++++++++++++++-------- tests/core/genaut.test | 9 ++++++--- tests/core/ltlgrind.test | 5 ++++- tests/core/randaut.test | 7 +++++-- 5 files changed, 41 insertions(+), 17 deletions(-) diff --git a/bin/common_conv.cc b/bin/common_conv.cc index e63969b16..02b1815fd 100644 --- a/bin/common_conv.cc +++ b/bin/common_conv.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2018 Laboratoire de Recherche et Développement +// Copyright (C) 2015, 2018, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -25,10 +25,14 @@ int to_int(const char* s, const char* where) { char* endptr; - int res = strtol(s, &endptr, 10); + long int lres = strtol(s, &endptr, 10); if (*endptr) error(2, 0, "failed to parse '%s' as an integer (in argument of %s).", s, where); + int res = lres; + if (res != lres) + error(2, 0, "value '%s' is too large for an int (in argument of %s).", + s, where); return res; } @@ -45,11 +49,16 @@ unsigned to_unsigned (const char *s, const char* where) { char* endptr; - unsigned res = strtoul(s, &endptr, 10); + unsigned long lres = strtoul(s, &endptr, 10); if (*endptr) error(2, 0, "failed to parse '%s' as an unsigned integer (in argument of %s).", s, where); + unsigned res = lres; + if (res != lres) + error(2, 0, + "value '%s' is too large for a unsigned int (in argument of %s).", + s, where); return res; } diff --git a/bin/common_range.cc b/bin/common_range.cc index 8909a26c0..9419cc389 100644 --- a/bin/common_range.cc +++ b/bin/common_range.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2014, 2016 Laboratoire de Recherche et +// Copyright (C) 2012, 2014, 2016, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -36,13 +36,16 @@ parse_range(const char* str, int missing_left, int missing_right) { range res; char* end; - res.min = strtol(str, &end, 10); + long lres = strtol(str, &end, 10); + res.min = lres; + if (res.min != lres) + error(2, 0, "start of range '%s' is too large for an int.", str); if (end == str) { // No leading number. It's OK as long as the string is not // empty. if (!*end) - error(1, 0, "invalid empty range"); + error(2, 0, "invalid empty range"); res.min = missing_left; } if (!*end) @@ -66,19 +69,22 @@ parse_range(const char* str, int missing_left, int missing_right) { // Parse the next integer. char* end2; - res.max = strtol(end, &end2, 10); + lres = strtol(end, &end2, 10); + res.max = lres; + if (res.max != lres) + error(2, 0, "end of range '%s' is too large for an int.", str); if (str == end2) - error(1, 0, "invalid range '%s' " + error(2, 0, "invalid range '%s' " "(should start with digits, dots, or colon)", str); if (end == end2) - error(1, 0, "invalid range '%s' (missing end?)", str); + error(2, 0, "invalid range '%s' (missing end?)", str); if (*end2) - error(1, 0, "invalid range '%s' (trailing garbage?)", str); + error(2, 0, "invalid range '%s' (trailing garbage?)", str); } } if (res.min < 0 || res.max < 0) - error(1, 0, "invalid range '%s': values must be positive", str); + error(2, 0, "invalid range '%s': values must be positive", str); return res; } diff --git a/tests/core/genaut.test b/tests/core/genaut.test index 5da9509ed..f364569e1 100644 --- a/tests/core/genaut.test +++ b/tests/core/genaut.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2019, 2020 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) 2017-2020, 2023 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -60,7 +60,10 @@ genaut --l-nba='1..3?' 2>err && exit 1 grep 'invalid range.*trailing garbage' err genaut --l-nba='1..' 2>err && exit 1 grep 'invalid range.*missing end' err - +genaut --l-nba='9999999999999999999999999..' 2>err && exit 1 +grep 'start.*too large' err +genaut --l-nba='1..9999999999999999999999999' 2>err && exit 1 +grep 'end.*too large' err # Tests for autfilt -N/--nth genaut --ks-nca=1..5 | autfilt -N 2..4 > range1.hoa diff --git a/tests/core/ltlgrind.test b/tests/core/ltlgrind.test index 292756bc6..09e75ee4e 100755 --- a/tests/core/ltlgrind.test +++ b/tests/core/ltlgrind.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014, 2015, 2019 Laboratoire de Recherche et Développement +# Copyright (C) 2014, 2015, 2019, 2023 Laboratoire de Recherche et Développement # de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -198,3 +198,6 @@ checkopt_noparse -F input/2 --format '%<,%f,%>,%F,%L' <err && exit 1 +grep 'too large' err diff --git a/tests/core/randaut.test b/tests/core/randaut.test index 7ff851646..50558e790 100755 --- a/tests/core/randaut.test +++ b/tests/core/randaut.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014-2018, 2020, 2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) 2014-2018, 2020, 2022, 2023 Laboratoire de Recherche +# et Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -29,6 +29,9 @@ grep "randaut: 3.1.*is not between 0 and 1 (in argument of -e" err randaut -n1a 3 2>err && exit 1 grep "randaut: failed to parse '1a' as an integer.* -n/--automata)" err +randaut -n99999999999999999999999999 3 2>err && exit 1 +grep "randaut:.*too large" err + randaut --spin -Q4 a b | ../ikwiad -H -XN - >out grep 'States: 4' out grep 'AP: 2' out From 09bbaa1e418f94d318d2593a984d3d5f3766442d Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 6 Jan 2023 11:55:34 +0100 Subject: [PATCH 223/606] more code smells * bin/common_file.cc, bin/common_file.hh, bin/common_finput.cc, bin/common_finput.hh, bin/common_output.cc, bin/common_setup.cc, bin/common_setup.hh, bin/common_trans.cc, bin/common_trans.hh, bin/dstar2tgba.cc, bin/genaut.cc, bin/genltl.cc, bin/ltl2tgba.cc, bin/ltl2tgta.cc, bin/ltlcross.cc, bin/ltldo.cc, bin/ltlfilt.cc, bin/ltlsynt.cc, bin/randltl.cc: Fix minor code issues reported by sonarcloud. --- bin/common_file.cc | 7 ++-- bin/common_file.hh | 13 +++---- bin/common_finput.cc | 10 ++---- bin/common_finput.hh | 16 +++++---- bin/common_output.cc | 20 +++++------ bin/common_setup.cc | 7 ++-- bin/common_setup.hh | 6 ++-- bin/common_trans.cc | 46 ++++++++++++------------ bin/common_trans.hh | 19 +++++----- bin/dstar2tgba.cc | 4 +-- bin/genaut.cc | 4 +-- bin/genltl.cc | 6 ++-- bin/ltl2tgba.cc | 8 ++--- bin/ltl2tgta.cc | 4 +-- bin/ltlcross.cc | 86 ++++++++++++++++---------------------------- bin/ltldo.cc | 14 +++----- bin/ltlfilt.cc | 20 +++++------ bin/ltlsynt.cc | 15 ++++---- bin/randltl.cc | 6 ++-- 19 files changed, 133 insertions(+), 178 deletions(-) diff --git a/bin/common_file.cc b/bin/common_file.cc index 005bb5479..4e56c6d54 100644 --- a/bin/common_file.cc +++ b/bin/common_file.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2022 Laboratoire de Recherche et +// Copyright (C) 2015, 2016, 2022, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -21,7 +21,6 @@ #include #include - output_file::output_file(const char* name, bool force_append) { std::ios_base::openmode mode = std::ios_base::trunc; @@ -39,10 +38,10 @@ output_file::output_file(const char* name, bool force_append) os_ = &std::cout; return; } - of_ = new std::ofstream(name, mode); + of_ = std::make_unique(name, mode); if (!*of_) error(2, errno, "cannot open '%s'", name); - os_ = of_; + os_ = of_.get(); } diff --git a/bin/common_file.hh b/bin/common_file.hh index b8f9842b8..b6aa0bec3 100644 --- a/bin/common_file.hh +++ b/bin/common_file.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2022 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) 2015-2016, 2022-2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -21,13 +21,13 @@ #include "common_sys.hh" #include +#include #include -#include class output_file { std::ostream* os_; - std::ofstream* of_ = nullptr; + std::unique_ptr of_; bool append_ = false; public: // Open a file for output. "-" is interpreted as stdout. @@ -37,11 +37,6 @@ public: void close(const std::string& name); - ~output_file() - { - delete of_; - } - bool append() const { return append_; diff --git a/bin/common_finput.cc b/bin/common_finput.cc index 80aca5df7..dbcdb3849 100644 --- a/bin/common_finput.cc +++ b/bin/common_finput.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2017, 2019, 2021, 2022 Laboratoire de Recherche +// Copyright (C) 2012-2017, 2019, 2021-2023 Laboratoire de Recherche // et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -96,12 +96,6 @@ parse_formula(const std::string& s) (s, spot::default_environment::instance(), false, lenient); } -job_processor::job_processor() - : abort_run(false), real_filename(nullptr), - col_to_read(0), prefix(nullptr), suffix(nullptr) -{ -} - job_processor::~job_processor() { if (real_filename) @@ -370,7 +364,7 @@ int job_processor::run() { int error = 0; - for (auto& j: jobs) + for (const auto& j: jobs) { switch (j.type) { diff --git a/bin/common_finput.hh b/bin/common_finput.hh index 2a5815fc3..9ecb5b025 100644 --- a/bin/common_finput.hh +++ b/bin/common_finput.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2017, 2022 Laboratoire de Recherche et +// Copyright (C) 2012-2017, 2022, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -56,9 +56,11 @@ spot::parsed_formula parse_formula(const std::string& s); class job_processor { protected: - bool abort_run; // Set to true in process_formula() to abort run(). + bool abort_run = false; // Set to true in process_formula() to abort run(). public: - job_processor(); + job_processor() = default; + job_processor(const job_processor&) = delete; + job_processor& operator=(const job_processor&) = delete; virtual ~job_processor(); @@ -84,10 +86,10 @@ public: virtual int run(); - char* real_filename; - long int col_to_read; - char* prefix; - char* suffix; + char* real_filename = nullptr; + long int col_to_read = 0; + char* prefix = nullptr; + char* suffix = nullptr; }; // Report and error message or add a default job depending on whether diff --git a/bin/common_output.cc b/bin/common_output.cc index e9c61a513..93cb2dfaf 100644 --- a/bin/common_output.cc +++ b/bin/common_output.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2019 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2019, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -23,6 +23,7 @@ #include "common_setup.hh" #include #include +#include #include #include #include @@ -297,9 +298,9 @@ namespace }; } -static formula_printer* format = nullptr; +static std::unique_ptr format; static std::ostringstream outputname; -static formula_printer* outputnamer = nullptr; +static std::unique_ptr outputnamer; static std::map> outputfiles; int @@ -320,7 +321,7 @@ parse_opt_output(int key, char* arg, struct argp_state*) output_format = lbt_output; break; case 'o': - outputnamer = new formula_printer(outputname, arg); + outputnamer = std::make_unique(outputname, arg); break; case 'p': full_parenth = true; @@ -341,8 +342,7 @@ parse_opt_output(int key, char* arg, struct argp_state*) output_format = wring_output; break; case OPT_FORMAT: - delete format; - format = new formula_printer(std::cout, arg); + format = std::make_unique(std::cout, arg); break; default: return ARGP_ERR_UNKNOWN; @@ -417,10 +417,10 @@ output_formula_checked(spot::formula f, spot::process_timer* ptimer, formula_with_location fl = { f, filename, linenum, prefix, suffix }; outputnamer->print(fl, ptimer); std::string fname = outputname.str(); - auto p = outputfiles.emplace(fname, nullptr); - if (p.second) - p.first->second.reset(new output_file(fname.c_str())); - out = &p.first->second->ostream(); + auto [it, b] = outputfiles.try_emplace(fname, nullptr); + if (b) + it->second.reset(new output_file(fname.c_str())); + out = &it->second->ostream(); } output_formula(*out, f, ptimer, filename, linenum, prefix, suffix); *out << output_terminator; diff --git a/bin/common_setup.cc b/bin/common_setup.cc index 24cacae85..af033a47f 100644 --- a/bin/common_setup.cc +++ b/bin/common_setup.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -20,13 +20,14 @@ #include "common_setup.hh" #include "common_aoutput.hh" -#include "argp.h" -#include "closeout.h" +#include +#include #include #include #include #include #include +#include #include static void diff --git a/bin/common_setup.hh b/bin/common_setup.hh index e2fce84e0..94cd16f4f 100644 --- a/bin/common_setup.hh +++ b/bin/common_setup.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2018, 2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2012-2013, 2018-2019, 2023 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -34,5 +34,5 @@ int protected_main(char** progname, std::function mainfun); // Diagnose exceptions. [[noreturn]] void handle_any_exception(); -#define BEGIN_EXCEPTION_PROTECT try { (void)0; +#define BEGIN_EXCEPTION_PROTECT try { (void)0 #define END_EXCEPTION_PROTECT } catch (...) { handle_any_exception(); } diff --git a/bin/common_trans.cc b/bin/common_trans.cc index e34f3d77d..b93535173 100644 --- a/bin/common_trans.cc +++ b/bin/common_trans.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2015-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -53,7 +53,7 @@ struct shorthands_t }; #define SHORTHAND(PRE, POST) { PRE, std::regex("^" PRE), POST } -static shorthands_t shorthands_ltl[] = { +static const shorthands_t shorthands_ltl[] = { SHORTHAND("delag", " %f>%O"), SHORTHAND("lbt", " <%L>%O"), SHORTHAND("ltl2ba", " -f %s>%O"), @@ -73,7 +73,7 @@ static shorthands_t shorthands_ltl[] = { SHORTHAND("owl.* ltl-utilities\\b", " -f %f"), }; -static shorthands_t shorthands_autproc[] = { +static const shorthands_t shorthands_autproc[] = { SHORTHAND("autfilt", " %H>%O"), SHORTHAND("dra2dpa", " <%H>%O"), SHORTHAND("dstar2tgba", " %H>%O"), @@ -85,7 +85,7 @@ static shorthands_t shorthands_autproc[] = { " <%H>%O"), }; -static void show_shorthands(shorthands_t* begin, shorthands_t* end) +static void show_shorthands(const shorthands_t* begin, const shorthands_t* end) { std::cout << ("If a COMMANDFMT does not use any %-sequence, and starts with one of\n" @@ -100,7 +100,8 @@ static void show_shorthands(shorthands_t* begin, shorthands_t* end) } -tool_spec::tool_spec(const char* spec, shorthands_t* begin, shorthands_t* end, +tool_spec::tool_spec(const char* spec, + const shorthands_t* begin, const shorthands_t* end, bool is_ref) noexcept : spec(spec), cmd(spec), name(spec), reference(is_ref) { @@ -113,15 +114,15 @@ tool_spec::tool_spec(const char* spec, shorthands_t* begin, shorthands_t* end, { if (*pos == '{') ++count; - else if (*pos == '}') - if (!--count) - { - name = strndup(cmd + 1, pos - cmd - 1); - cmd = pos + 1; - while (*cmd == ' ' || *cmd == '\t') - ++cmd; - break; - } + else if (*pos == '}' && --count == 0) + { + name = strndup(cmd + 1, pos - cmd - 1); + cmd = pos + 1; + // skip leading whitespace + while (*cmd == ' ' || *cmd == '\t') + ++cmd; + break; + } } } // If there is no % in the string, look for a known @@ -147,11 +148,11 @@ tool_spec::tool_spec(const char* spec, shorthands_t* begin, shorthands_t* end, auto& p = *begin++; if (std::regex_search(basename, p.rprefix)) { - int m = strlen(p.suffix); - int q = strlen(cmd); + size_t m = strlen(p.suffix); + size_t q = strlen(cmd); char* tmp = static_cast(malloc(q + m + 1)); - strcpy(tmp, cmd); - strcpy(tmp + q, p.suffix); + memcpy(tmp, cmd, q); + memcpy(tmp + q, p.suffix, m + 1); cmd = tmp; allocated = true; break; @@ -490,9 +491,8 @@ read_stdout_of_command(char* const* args) if (close(cout_pipe[1]) < 0) error(2, errno, "closing write-side of pipe failed"); - std::string buffer(32, 0); std::string results; - int bytes_read; + ssize_t bytes_read; for (;;) { static char buffer[512]; @@ -612,7 +612,7 @@ get_arg(const char*& cmd) { const char* start = cmd; std::string arg; - while (int c = *cmd) + while (char c = *cmd) { switch (c) { @@ -642,14 +642,14 @@ get_arg(const char*& cmd) goto end_loop; case '\'': { - int d = 0; + char d = '\0'; while ((d = *++cmd)) { if (d == '\'') break; arg.push_back(d); } - if (d == 0) + if (d == '\0') return nullptr; } break; diff --git a/bin/common_trans.hh b/bin/common_trans.hh index 31c88c80c..0ebe59e8c 100644 --- a/bin/common_trans.hh +++ b/bin/common_trans.hh @@ -51,7 +51,8 @@ struct tool_spec // Whether the tool is a reference. bool reference; - tool_spec(const char* spec, shorthands_t* begin, shorthands_t* end, + tool_spec(const char* spec, + const shorthands_t* begin, const shorthands_t* end, bool is_ref) noexcept; tool_spec(const tool_spec& other) noexcept; tool_spec& operator=(const tool_spec& other); @@ -71,7 +72,7 @@ struct quoted_formula final: public spot::printable_value struct filed_formula final: public spot::printable { - filed_formula(const quoted_formula& ltl) : f_(ltl) + explicit filed_formula(const quoted_formula& ltl) : f_(ltl) { } @@ -89,9 +90,7 @@ struct filed_formula final: public spot::printable struct filed_automaton final: public spot::printable { - filed_automaton() - { - } + filed_automaton() = default; void print(std::ostream& os, const char* pos) const override; @@ -112,7 +111,7 @@ struct printable_result_filename final: unsigned translator_num; printable_result_filename(); - ~printable_result_filename(); + ~printable_result_filename() override; void reset(unsigned n); void cleanup(); @@ -126,7 +125,7 @@ protected: spot::bdd_dict_ptr dict; // Round-specific variables quoted_formula ltl_formula; - filed_formula filename_formula = ltl_formula; + filed_formula filename_formula{ltl_formula}; // Run-specific variables printable_result_filename output; public: @@ -151,9 +150,9 @@ protected: public: using spot::formater::has; - autproc_runner(// whether we accept the absence of output - // specifier - bool no_output_allowed = false); + explicit autproc_runner(// whether we accept the absence of output + // specifier + bool no_output_allowed = false); void round_automaton(spot::const_twa_graph_ptr aut, unsigned serial); }; diff --git a/bin/dstar2tgba.cc b/bin/dstar2tgba.cc index 5b60a0ecc..4b2ec9662 100644 --- a/bin/dstar2tgba.cc +++ b/bin/dstar2tgba.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2019, 2022 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2019, 2022, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -117,7 +117,7 @@ namespace spot::postprocessor& post; automaton_printer printer; - dstar_processor(spot::postprocessor& post) + explicit dstar_processor(spot::postprocessor& post) : hoa_processor(spot::make_bdd_dict()), post(post), printer(aut_input) { } diff --git a/bin/genaut.cc b/bin/genaut.cc index 26678c588..f8d6b93ff 100644 --- a/bin/genaut.cc +++ b/bin/genaut.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019, 2022 Laboratoire de Recherche et +// Copyright (C) 2017-2019, 2022-2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -128,7 +128,7 @@ output_pattern(gen::aut_pattern_id pattern, int n) static void run_jobs() { - for (auto& j: jobs) + for (const auto& j: jobs) { int inc = (j.range.max < j.range.min) ? -1 : 1; int n = j.range.min; diff --git a/bin/genltl.cc b/bin/genltl.cc index 96d8bd7d3..ef8049171 100644 --- a/bin/genltl.cc +++ b/bin/genltl.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2015-2019, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2012, 2013, 2015-2019, 2022-2023 Laboratoire de +// Recherche et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -317,7 +317,7 @@ output_pattern(gen::ltl_pattern_id pattern, int n, int n2) static void run_jobs() { - for (auto& j: jobs) + for (const auto& j: jobs) { int inc = (j.range.max < j.range.min) ? -1 : 1; int n = j.range.min; diff --git a/bin/ltl2tgba.cc b/bin/ltl2tgba.cc index d4fb2fc17..73a9a23c6 100644 --- a/bin/ltl2tgba.cc +++ b/bin/ltl2tgba.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2019, 2022 Laboratoire de Recherche et +// Copyright (C) 2012-2019, 2022-2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -124,10 +124,10 @@ namespace { public: spot::translator& trans; - automaton_printer printer; + automaton_printer printer{ltl_input}; - trans_processor(spot::translator& trans) - : trans(trans), printer(ltl_input) + explicit trans_processor(spot::translator& trans) + : trans(trans) { } diff --git a/bin/ltl2tgta.cc b/bin/ltl2tgta.cc index ab925c7ac..60afcf9e8 100644 --- a/bin/ltl2tgta.cc +++ b/bin/ltl2tgta.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2020, 2022 Laboratoire de Recherche et +// Copyright (C) 2012-2020, 2022-2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -168,7 +168,7 @@ namespace public: spot::translator& trans; - trans_processor(spot::translator& trans) + explicit trans_processor(spot::translator& trans) : trans(trans) { } diff --git a/bin/ltlcross.cc b/bin/ltlcross.cc index 0dfa09985..3219beb75 100644 --- a/bin/ltlcross.cc +++ b/bin/ltlcross.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2020, 2022 Laboratoire de Recherche et +// Copyright (C) 2012-2020, 2022, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -264,55 +264,32 @@ end_error() struct statistics { - statistics() noexcept - : ok(false), - alternating(false), - status_str(nullptr), - status_code(0), - time(0), - states(0), - edges(0), - transitions(0), - acc(0), - scc(0), - nonacc_scc(0), - terminal_scc(0), - weak_scc(0), - strong_scc(0), - nondetstates(0), - nondeterministic(false), - terminal_aut(false), - weak_aut(false), - strong_aut(false) - { - } - // If OK is false, only the status_str, status_code, and time fields // should be valid. - bool ok; - bool alternating; - const char* status_str; - int status_code; - double time; - unsigned states; - unsigned edges; - unsigned long long transitions; - unsigned acc; - unsigned scc; - unsigned nonacc_scc; - unsigned terminal_scc; - unsigned weak_scc; - unsigned strong_scc; - unsigned nondetstates; - bool nondeterministic; - bool terminal_aut; - bool weak_aut; - bool strong_aut; + bool ok = false; + bool alternating = false; + const char* status_str = nullptr; + int status_code = 0; + double time = 0.0; + unsigned states = 0; + unsigned edges = 0; + unsigned long long transitions = 0; + unsigned acc = 0; + unsigned scc = 0; + unsigned nonacc_scc = 0; + unsigned terminal_scc = 0; + unsigned weak_scc = 0; + unsigned strong_scc = 0; + unsigned nondetstates = 0; + bool nondeterministic = false; + bool terminal_aut = false; + bool weak_aut = false; + bool strong_aut = false; std::vector product_states; std::vector product_transitions; std::vector product_scc; - bool ambiguous; - bool complete; + bool ambiguous = false; + bool complete = false; std::string hoa_str; static void @@ -581,7 +558,7 @@ namespace class xtranslator_runner final: public translator_runner { public: - xtranslator_runner(spot::bdd_dict_ptr dict) + explicit xtranslator_runner(spot::bdd_dict_ptr dict) : translator_runner(dict) { } @@ -1095,17 +1072,14 @@ namespace } // Make sure we do not translate the same formula twice. - if (!allow_dups) + if (!allow_dups && !unique_set.insert(f).second) { - if (!unique_set.insert(f).second) - { - if (!quiet) - std::cerr - << ("warning: This formula or its negation has already" - " been checked.\n Use --allow-dups if it " - "should not be ignored.\n\n"); - return 0; - } + if (!quiet) + std::cerr + << ("warning: This formula or its negation has already" + " been checked.\n Use --allow-dups if it " + "should not be ignored.\n\n"); + return 0; } int problems = 0; diff --git a/bin/ltldo.cc b/bin/ltldo.cc index ffbd4873e..6e7bf5ec7 100644 --- a/bin/ltldo.cc +++ b/bin/ltldo.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2020, 2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2015-2020, 2022-2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -209,7 +209,7 @@ namespace class xtranslator_runner final: public translator_runner { public: - xtranslator_runner(spot::bdd_dict_ptr dict) + explicit xtranslator_runner(spot::bdd_dict_ptr dict) : translator_runner(dict, true) { } @@ -224,8 +224,6 @@ namespace format(command, tools[translator_num].cmd); std::string cmd = command.str(); - //std::cerr << "Running [" << l << translator_num << "]: " - // << cmd << std::endl; timer.start(); int es = exec_with_timeout(cmd.c_str()); timer.stop(); @@ -312,7 +310,7 @@ namespace spot::printable_value inputf; public: - processor(spot::postprocessor& post) + explicit processor(spot::postprocessor& post) : runner(dict), best_printer(best_stream, best_format), post(post) { printer.add_stat('T', &cmdname); @@ -323,9 +321,7 @@ namespace best_printer.declare('f', &inputf); } - ~processor() - { - } + ~processor() override = default; int process_string(const std::string& input, diff --git a/bin/ltlfilt.cc b/bin/ltlfilt.cc index c9064368d..81e895d42 100644 --- a/bin/ltlfilt.cc +++ b/bin/ltlfilt.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -586,7 +586,7 @@ namespace fset_t unique_set; spot::relabeling_map relmap; - ltl_processor(spot::tl_simplifier& simpl) + explicit ltl_processor(spot::tl_simplifier& simpl) : simpl(simpl) { } @@ -722,7 +722,7 @@ namespace matched &= !syntactic_si || f.is_syntactic_stutter_invariant(); if (matched && (ap_n.min > 0 || ap_n.max >= 0)) { - auto s = atomic_prop_collect(f); + spot::atomic_prop_set* s = atomic_prop_collect(f); int n = s->size(); delete s; matched &= (ap_n.min <= 0) || (n >= ap_n.min); @@ -761,7 +761,7 @@ namespace aut = ltl_to_tgba_fm(f, simpl.get_dict(), true); if (matched && !opt->acc_words.empty()) - for (auto& word_aut: opt->acc_words) + for (const spot::twa_graph_ptr& word_aut: opt->acc_words) if (spot::product(aut, word_aut)->is_empty()) { matched = false; @@ -769,7 +769,7 @@ namespace } if (matched && !opt->rej_words.empty()) - for (auto& word_aut: opt->rej_words) + for (const spot::twa_graph_ptr& word_aut: opt->rej_words) if (!spot::product(aut, word_aut)->is_empty()) { matched = false; @@ -843,12 +843,12 @@ namespace { // Sort the formulas alphabetically. std::map m; - for (auto& p: relmap) - m.emplace(str_psl(p.first), p.second); - for (auto& p: m) + for (const auto& [newformula, oldname]: relmap) + m.emplace(str_psl(newformula), oldname); + for (const auto& [newname, oldname]: m) stream_formula(opt->output_define->ostream() - << "#define " << p.first << " (", - p.second, filename, + << "#define " << newname << " (", + oldname, filename, std::to_string(linenum).c_str()) << ")\n"; } one_match = true; diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index aaea855a4..a2ec32cd1 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -152,7 +152,6 @@ static const struct argp_child children[] = { { &finput_argp_headless, 0, nullptr, 0 }, { &aoutput_argp, 0, nullptr, 0 }, - //{ &aoutput_o_format_argp, 0, nullptr, 0 }, { &misc_argp, 0, nullptr, 0 }, { nullptr, 0, nullptr, 0 } }; @@ -425,10 +424,6 @@ namespace auto sub_o = sub_outs_str.begin(); std::vector mealy_machines; - auto print_game = want_game ? - [](const spot::twa_graph_ptr& game)->void { dispatch_print_hoa(game); } - : [](const spot::twa_graph_ptr&)->void{}; - for (; sub_f != sub_form.end(); ++sub_f, ++sub_o) { spot::mealy_like m_like @@ -466,9 +461,11 @@ namespace assert((spptr->at(arena->get_init_state_number()) == false) && "Env needs first turn"); } - print_game(arena); if (want_game) - continue; + { + dispatch_print_hoa(arena); + continue; + } if (!spot::solve_game(arena, *gi)) { if (show_status) @@ -625,7 +622,7 @@ namespace } static void - split_aps(std::string arg, std::vector& where) + split_aps(const std::string& arg, std::vector& where) { std::istringstream aps(arg); std::string ap; diff --git a/bin/randltl.cc b/bin/randltl.cc index 986c437c1..749fcf373 100644 --- a/bin/randltl.cc +++ b/bin/randltl.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2016, 2018-2019, 2022 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) 2012-2016, 2018-2019, 2022, 2023 Laboratoire de +// Recherche et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -65,7 +65,6 @@ enum { OPT_DUMP_PRIORITIES, OPT_DUPS, OPT_LTL_PRIORITIES, - OPT_PSL_PRIORITIES, OPT_SEED, OPT_SERE_PRIORITIES, OPT_TREE_SIZE, @@ -194,7 +193,6 @@ parse_opt(int key, char* arg, struct argp_state* as) case OPT_DUMP_PRIORITIES: opt_dump_priorities = true; break; - // case OPT_PSL_PRIORITIES: break; case OPT_SERE_PRIORITIES: opt_pS = arg; break; From a9c457f93fb38569849d8d1318c74dc16009cfcd Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 23 Jan 2023 11:59:49 +0100 Subject: [PATCH 224/606] dbranch: fix handling of states without successors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #524, reported by Rüdiger Ehlers. * spot/twaalgos/dbranch.cc: When merging an edge going to state without successors simply delete it. * bin/spot-x.cc: Typo in documentation. * tests/core/ltlcross.test: Add a test case. * NEWS: Mention the bug. --- NEWS | 4 ++++ bin/spot-x.cc | 4 ++-- spot/twaalgos/dbranch.cc | 39 +++++++++++++++++++++------------------ tests/core/ltlcross.test | 5 ++++- 4 files changed, 31 insertions(+), 21 deletions(-) diff --git a/NEWS b/NEWS index d6f6a702b..d25bf4ff4 100644 --- a/NEWS +++ b/NEWS @@ -24,6 +24,10 @@ New in spot 2.11.3.dev (not yet released) multiple initial states (because Spot supports only one), the HOA parser could break state-based acceptance. (Issue #522.) + - delay_branching_here(), a new optimization of Spot 2.11 had an + incorrectly handling of states without successors, causing some + segfaults. (Issue #524.) + New in spot 2.11.3 (2022-12-09) Bug fixes: diff --git a/bin/spot-x.cc b/bin/spot-x.cc index 1edb3f54e..964710dc1 100644 --- a/bin/spot-x.cc +++ b/bin/spot-x.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -53,7 +53,7 @@ implication-based simplifications are attempted. Defaults to 16.") }, { nullptr, 0, nullptr, 0, "Translation options:", 0 }, { DOC("ltl-split", "Set to 0 to disable the translation of automata \ as product or sum of subformulas.") }, - { DOC("branch-prop", "Set to 0 to disable branching-postponement \ + { DOC("branch-post", "Set to 0 to disable branching-postponement \ (done during translation, may create more states) and delayed-branching \ (almost similar, but done after translation to only remove states). \ Set to 1 to force branching-postponement, and to 2 \ diff --git a/spot/twaalgos/dbranch.cc b/spot/twaalgos/dbranch.cc index 465f8326e..19a0d9474 100644 --- a/spot/twaalgos/dbranch.cc +++ b/spot/twaalgos/dbranch.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2022 Laboratoire de Recherche et Développement +// Copyright (C) 2022-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -118,27 +118,30 @@ namespace spot continue; } unsigned mergedst = it2->second; - // we have to merge canddst into mergedst. This is as - // simple as: + // we have to merge canddst into mergedst. + // This is as simple as: // 1) connecting their list of transitions - unsigned& mergedfirst = g.state_storage(mergedst).succ; - unsigned& mergedlast = g.state_storage(mergedst).succ_tail; - unsigned& candfirst = g.state_storage(canddst).succ; unsigned& candlast = g.state_storage(canddst).succ_tail; - if (mergedlast) - aut->edge_storage(mergedlast).next_succ = candfirst; - else // mergedst had now successor - mergedfirst = candfirst; - mergedlast = candlast; - // 2) updating the source of the merged transitions - for (unsigned e2 = candfirst; e2 != 0;) + if (candlast) { - auto& edge = aut->edge_storage(e2); - edge.src = mergedst; - e2 = edge.next_succ; + unsigned& mergedfirst = g.state_storage(mergedst).succ; + unsigned& mergedlast = g.state_storage(mergedst).succ_tail; + unsigned& candfirst = g.state_storage(canddst).succ; + if (mergedlast) + aut->edge_storage(mergedlast).next_succ = candfirst; + else // mergedst had no successor + mergedfirst = candfirst; + mergedlast = candlast; + // 2) updating the source of the merged transitions + for (unsigned e2 = candfirst; e2 != 0;) + { + auto& edge = aut->edge_storage(e2); + edge.src = mergedst; + e2 = edge.next_succ; + } + // 3) deleting the edge to canddst. + candfirst = candlast = 0; } - // 3) deleting the edge to canddst. - candfirst = candlast = 0; it.erase(); // 4) updating succ_cand succ_cand[mergedst] += succ_cand[canddst]; diff --git a/tests/core/ltlcross.test b/tests/core/ltlcross.test index 1a5806ba8..ebe20fb26 100755 --- a/tests/core/ltlcross.test +++ b/tests/core/ltlcross.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012-2014, 2016, 2019 Laboratoire de Recherche et +# Copyright (C) 2012-2014, 2016, 2019, 2023 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -65,3 +65,6 @@ ltlcross -D \ # Spot 2.8. We use ltl2tgba twice so ltlcross build cross-products. ltlcross --verbose ltl2tgba ltl2tgba \ -f '(G(F((a1)&(X(X(b1))))))&(G(F((a2)&(X(X(b2))))))&(G(F((a3)&(X(X(b3))))))' + +# Issue #524. +ltlcross ltl2tgba -f '!(X(v3 | G!v5) | ((Xv5 & !(v5 & !X!v3)) U !v5))' From 3aba452b5b7fc8b8399c288257f5f96872409d61 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 23 Jan 2023 15:25:06 +0100 Subject: [PATCH 225/606] robin_hood: update to version version 3.11.5 * spot/priv/robin_hood.hh: Update. * spot/priv/Makefile.am: Patch ROBIN_HOOD_IS_TRIVIALLY_COPYABLE to work around an issue with clang on Arch linux. --- spot/priv/Makefile.am | 10 ++++++++-- spot/priv/robin_hood.hh | 43 +++++++++++++++++++++++++++-------------- 2 files changed, 37 insertions(+), 16 deletions(-) diff --git a/spot/priv/Makefile.am b/spot/priv/Makefile.am index b2c75ab7d..9a23caaa3 100644 --- a/spot/priv/Makefile.am +++ b/spot/priv/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2013-2019, 2021-2022 Laboratoire de Recherche et +## Copyright (C) 2013-2019, 2021-2023 Laboratoire de Recherche et ## Développement de l'Epita (LRDE). ## ## This file is part of Spot, a model checking library. @@ -43,5 +43,11 @@ RH = $(GH)/robin-hood-hashing/master/src/include/robin_hood.h .PHONY: update update: wget $(RH) -O robin_hood.tmp || curl $(RH) -o robin_hood.tmp - sed 's/std::malloc/malloc/' robin_hood.tmp > $(srcdir)/robin_hood.hh +## Do not use std::malloc but malloc, because gnulib may replace it by +## rpl_malloc instead. Also disable to tests of __GNUC__ about +## ROBIN_HOOD_IS_TRIVIALLY_COPYABLE because (1) all versions of G++ we +## support have std::is_trivially_copyable, and (2) clang define +## __GNUC__ to some value that fail this test, and then warn that +## __has_trivial_copy is obsoleted. + sed 's/std::malloc/malloc/;/https:\/\/stackoverflow.com\/a\/31798726/{n;s/defined.*/false/}' robin_hood.tmp > $(srcdir)/robin_hood.hh rm -f robin_hood.tmp diff --git a/spot/priv/robin_hood.hh b/spot/priv/robin_hood.hh index 8c151d517..a4bc8beae 100644 --- a/spot/priv/robin_hood.hh +++ b/spot/priv/robin_hood.hh @@ -36,7 +36,7 @@ // see https://semver.org/ #define ROBIN_HOOD_VERSION_MAJOR 3 // for incompatible API changes #define ROBIN_HOOD_VERSION_MINOR 11 // for adding functionality in a backwards-compatible manner -#define ROBIN_HOOD_VERSION_PATCH 3 // for backwards-compatible bug fixes +#define ROBIN_HOOD_VERSION_PATCH 5 // for backwards-compatible bug fixes #include #include @@ -206,7 +206,7 @@ static Counts& counts() { // workaround missing "is_trivially_copyable" in g++ < 5.0 // See https://stackoverflow.com/a/31798726/48181 -#if defined(__GNUC__) && __GNUC__ < 5 +#if false # define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__) #else # define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value @@ -1820,6 +1820,12 @@ public: InsertionState::key_found != idxAndState.second); } + template + iterator emplace_hint(const_iterator position, Args&&... args) { + (void)position; + return emplace(std::forward(args)...).first; + } + template std::pair try_emplace(const key_type& key, Args&&... args) { return try_emplace_impl(key, std::forward(args)...); @@ -1831,16 +1837,15 @@ public: } template - std::pair try_emplace(const_iterator hint, const key_type& key, - Args&&... args) { + iterator try_emplace(const_iterator hint, const key_type& key, Args&&... args) { (void)hint; - return try_emplace_impl(key, std::forward(args)...); + return try_emplace_impl(key, std::forward(args)...).first; } template - std::pair try_emplace(const_iterator hint, key_type&& key, Args&&... args) { + iterator try_emplace(const_iterator hint, key_type&& key, Args&&... args) { (void)hint; - return try_emplace_impl(std::move(key), std::forward(args)...); + return try_emplace_impl(std::move(key), std::forward(args)...).first; } template @@ -1854,16 +1859,15 @@ public: } template - std::pair insert_or_assign(const_iterator hint, const key_type& key, - Mapped&& obj) { + iterator insert_or_assign(const_iterator hint, const key_type& key, Mapped&& obj) { (void)hint; - return insertOrAssignImpl(key, std::forward(obj)); + return insertOrAssignImpl(key, std::forward(obj)).first; } template - std::pair insert_or_assign(const_iterator hint, key_type&& key, Mapped&& obj) { + iterator insert_or_assign(const_iterator hint, key_type&& key, Mapped&& obj) { (void)hint; - return insertOrAssignImpl(std::move(key), std::forward(obj)); + return insertOrAssignImpl(std::move(key), std::forward(obj)).first; } std::pair insert(const value_type& keyval) { @@ -1871,10 +1875,20 @@ public: return emplace(keyval); } + iterator insert(const_iterator hint, const value_type& keyval) { + (void)hint; + return emplace(keyval).first; + } + std::pair insert(value_type&& keyval) { return emplace(std::move(keyval)); } + iterator insert(const_iterator hint, value_type&& keyval) { + (void)hint; + return emplace(std::move(keyval)).first; + } + // Returns 1 if key is found, 0 otherwise. size_t count(const key_type& key) const { // NOLINT(modernize-use-nodiscard) ROBIN_HOOD_TRACE(this) @@ -2308,13 +2322,14 @@ private: auto const numElementsWithBuffer = calcNumElementsWithBuffer(max_elements); - // calloc also zeroes everything + // malloc & zero mInfo. Faster than calloc everything. auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer); ROBIN_HOOD_LOG("std::calloc " << numBytesTotal << " = calcNumBytesTotal(" << numElementsWithBuffer << ")") mKeyVals = reinterpret_cast( - detail::assertNotNull(std::calloc(1, numBytesTotal))); + detail::assertNotNull(malloc(numBytesTotal))); mInfo = reinterpret_cast(mKeyVals + numElementsWithBuffer); + std::memset(mInfo, 0, numBytesTotal - numElementsWithBuffer * sizeof(Node)); // set sentinel mInfo[numElementsWithBuffer] = 1; From 60abfeb31fee32092205b5ae0b60c2877291d233 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 23 Jan 2023 16:07:49 +0100 Subject: [PATCH 226/606] bin: update copyright year and laboratory name * bin/common_setup.cc: Here. --- bin/common_setup.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/common_setup.cc b/bin/common_setup.cc index af033a47f..c59ec0695 100644 --- a/bin/common_setup.cc +++ b/bin/common_setup.cc @@ -36,7 +36,7 @@ display_version(FILE *stream, struct argp_state*) fputs(program_name, stream); fputs(" (" PACKAGE_NAME ") " PACKAGE_VERSION "\n\ \n\ -Copyright (C) 2022 Laboratoire de Recherche et Développement de l'Epita.\n\ +Copyright (C) 2023 Laboratoire de Recherche de l'Epita (LRE)\n\ License GPLv3+: \ GNU GPL version 3 or later .\n\ This is free software: you are free to change and redistribute it.\n\ From 5b0143eba61d000a683f825142d4b30c24e1717e Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Fri, 20 Jan 2023 15:57:46 +0100 Subject: [PATCH 227/606] ltlsynt: typo in doc * bin/ltlsynt.cc: here --- bin/ltlsynt.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index a2ec32cd1..35ac4194b 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -102,8 +102,8 @@ static const argp_option options[] = "whether to decompose the specification as multiple output-disjoint " "problems to solve independently (enabled by default)", 0 }, { "simplify", OPT_SIMPLIFY, "no|bisim|bwoa|sat|bisim-sat|bwoa-sat", 0, - "simplification to apply to the controler (no) nothing, " - "(bisim) bisimulation-based reduction, (bwoa) bissimulation-based " + "simplification to apply to the controller (no) nothing, " + "(bisim) bisimulation-based reduction, (bwoa) bisimulation-based " "reduction with output assignment, (sat) SAT-based minimization, " "(bisim-sat) SAT after bisim, (bwoa-sat) SAT after bwoa. Defaults " "to 'bwoa'.", 0 }, From e5150d03140de87ced36b3a2328b62f9d17477e0 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 24 Jan 2023 11:35:14 +0100 Subject: [PATCH 228/606] autfilt: allow --highlight-word to work on Fin acceptance Fixes #523. * bin/autfilt.cc: Remove the restriction. * tests/core/acc_word.test: Add test case. * NEWS: Mention the fix. --- NEWS | 4 ++++ bin/autfilt.cc | 9 ++------- tests/core/acc_word.test | 20 +++++++++++++------- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/NEWS b/NEWS index d25bf4ff4..4d9d1def2 100644 --- a/NEWS +++ b/NEWS @@ -24,6 +24,10 @@ New in spot 2.11.3.dev (not yet released) multiple initial states (because Spot supports only one), the HOA parser could break state-based acceptance. (Issue #522.) + - autfilt --highlight-word refused to work on automata with Fin + acceptance for historical reason, but the cose is perfectly able + to handle this now. (Issue #523.) + - delay_branching_here(), a new optimization of Spot 2.11 had an incorrectly handling of states without successors, causing some segfaults. (Issue #524.) diff --git a/bin/autfilt.cc b/bin/autfilt.cc index b55d1bc9f..4487fad8b 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -1667,13 +1667,8 @@ namespace if (!opt->hl_words.empty()) for (auto& [word_aut, color]: opt->hl_words) - { - if (aut->acc().uses_fin_acceptance()) - error(2, 0, - "--highlight-word does not yet work with Fin acceptance"); - if (auto run = spot::product(aut, word_aut)->accepting_run()) - run->project(aut)->highlight(color); - } + if (auto run = spot::product(aut, word_aut)->accepting_run()) + run->project(aut)->highlight(color); timer.stop(); if (opt->uniq) diff --git a/tests/core/acc_word.test b/tests/core/acc_word.test index 53ce4b98e..5f3b6880b 100644 --- a/tests/core/acc_word.test +++ b/tests/core/acc_word.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2017, 2018, 2019 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) 2016-2019, 2023 Laboratoire de Recherche +# et Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -91,6 +91,15 @@ State: 1 EOF diff expected out +ltl2tgba -G '(GF(a & X!a) -> GF(b & XXb)) & GFc' > aut.hoa +word='!a&!c;cycle{!a&b&!c;!a&c;!a&b&c}' +autfilt -H1.1 aut.hoa --highlight-word="$word" > out.hoa +grep spot.highlight.edges out.hoa >out.edges +cat >expected <stderr && exit 1 -test $? -eq 2 -grep 'highlight-word.*Fin' stderr - +# highlight-word used not to work with Fin acceptance, but it's ok now +ltl2tgba -G -D 'FGa' | autfilt --highlight-word='cycle{a}' ltlfilt -f 'GFa' --accept-word 'cycle{!a}' && exit 1 ltlfilt -f 'GF!a' --accept-word 'cycle{!a}' From 121d5e5524d9fb0fe7d3555172613c7f1750265f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 24 Jan 2023 15:48:06 +0100 Subject: [PATCH 229/606] bin: fix number conversion routines on 32bit On 32bit archetectures, long int = int the current check for detecting values that overflow int will fail. Conversion routings should check errno. * bin/common_conv.cc, bin/common_range.cc: Here. --- bin/common_conv.cc | 12 ++++++++---- bin/common_range.cc | 6 ++++-- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/bin/common_conv.cc b/bin/common_conv.cc index 02b1815fd..b23a67c51 100644 --- a/bin/common_conv.cc +++ b/bin/common_conv.cc @@ -25,12 +25,13 @@ int to_int(const char* s, const char* where) { char* endptr; + errno = 0; long int lres = strtol(s, &endptr, 10); if (*endptr) error(2, 0, "failed to parse '%s' as an integer (in argument of %s).", s, where); int res = lres; - if (res != lres) + if (res != lres || errno == ERANGE) error(2, 0, "value '%s' is too large for an int (in argument of %s).", s, where); return res; @@ -49,13 +50,14 @@ unsigned to_unsigned (const char *s, const char* where) { char* endptr; + errno = 0; unsigned long lres = strtoul(s, &endptr, 10); if (*endptr) error(2, 0, "failed to parse '%s' as an unsigned integer (in argument of %s).", s, where); unsigned res = lres; - if (res != lres) + if (res != lres || errno == ERANGE) error(2, 0, "value '%s' is too large for a unsigned int (in argument of %s).", s, where); @@ -66,8 +68,9 @@ float to_float(const char* s, const char* where) { char* endptr; + errno = 0; float res = strtof(s, &endptr); - if (*endptr) + if (*endptr || errno == ERANGE) error(2, 0, "failed to parse '%s' as a float (in argument of %s)", s, where); return res; @@ -89,8 +92,9 @@ to_longs(const char* arg) while (*arg) { char* endptr; + errno = 0; long value = strtol(arg, &endptr, 10); - if (endptr == arg) + if (endptr == arg || errno) error(2, 0, "failed to parse '%s' as an integer.", arg); res.push_back(value); while (*endptr == ' ' || *endptr == ',') diff --git a/bin/common_range.cc b/bin/common_range.cc index 9419cc389..98e568b41 100644 --- a/bin/common_range.cc +++ b/bin/common_range.cc @@ -36,9 +36,10 @@ parse_range(const char* str, int missing_left, int missing_right) { range res; char* end; + errno = 0; long lres = strtol(str, &end, 10); res.min = lres; - if (res.min != lres) + if (res.min != lres || errno == ERANGE) error(2, 0, "start of range '%s' is too large for an int.", str); if (end == str) { @@ -69,9 +70,10 @@ parse_range(const char* str, int missing_left, int missing_right) { // Parse the next integer. char* end2; + errno = 0; lres = strtol(end, &end2, 10); res.max = lres; - if (res.max != lres) + if (res.max != lres || errno == ERANGE) error(2, 0, "end of range '%s' is too large for an int.", str); if (str == end2) error(2, 0, "invalid range '%s' " From d16183c05345fc33e5b196635666af09489a0bab Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 24 Jan 2023 15:54:39 +0100 Subject: [PATCH 230/606] * .gitlab-ci.yml: Use pipeline id to name volumes. --- .gitlab-ci.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a2006ee7f..348bacba1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -276,7 +276,7 @@ debpkg-stable: - stable script: - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian:stable - - vol=spot-stable-$CI_COMMIT_SHA + - vol=spot-stable-$CI_COMMIT_SHA-$CI_PIPELINE_ID - docker volume create $vol - exitcode=0 - docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian:stable ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$? @@ -304,7 +304,7 @@ debpkg-stable-i386: needs: ["debpkg-stable"] script: - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386:stable - - vol=spot-stable-$CI_COMMIT_SHA + - vol=spot-stable-$CI_COMMIT_SHA-$CI_PIPELINE_ID - docker volume create $vol - exitcode=0 - docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386:stable ./bin-spot.sh -j${NBPROC-1} || exitcode=$? @@ -331,7 +331,7 @@ debpkg-unstable: - next script: - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian - - vol=spot-unstable-$CI_COMMIT_SHA + - vol=spot-unstable-$CI_COMMIT_SHA-$CI_PIPELINE_ID - docker volume create $vol - exitcode=0 - docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$? @@ -357,7 +357,7 @@ debpkg-unstable-i386: needs: ["debpkg-unstable"] script: - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386 - - vol=spot-unstable-$CI_COMMIT_SHA + - vol=spot-unstable-$CI_COMMIT_SHA-$CI_PIPELINE_ID - docker volume create $vol - exitcode=0 - docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386 ./bin-spot.sh -j${NBPROC-1} || exitcode=$? From bdaa31ef2158d914d5bd21f22c2f1ef9b8665382 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 30 Jan 2023 17:51:48 +0100 Subject: [PATCH 231/606] work around gcc-snapshot warnings about dangling references * spot/twaalgos/game.hh, spot/twaalgos/game.cc (get_state_players, get_strategy, get_state_winners): Take argument by reference, not copy. * spot/twaalgos/synthesis.cc, spot/twaalgos/mealy_machine.cc: Replace auto by actual type for readability. --- spot/twaalgos/game.cc | 17 ++++++++++++++--- spot/twaalgos/game.hh | 12 ++++++++---- spot/twaalgos/mealy_machine.cc | 8 ++++---- spot/twaalgos/synthesis.cc | 12 ++++++------ 4 files changed, 32 insertions(+), 17 deletions(-) diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index df259b84a..17f94a7e4 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -1056,7 +1056,18 @@ namespace spot (*owners)[state] = owner; } - const region_t& get_state_players(const_twa_graph_ptr arena) + const region_t& get_state_players(const const_twa_graph_ptr& arena) + { + region_t *owners = arena->get_named_prop + ("state-player"); + if (!owners) + throw std::runtime_error + ("get_state_players(): state-player property not defined, not a game?"); + + return *owners; + } + + const region_t& get_state_players(twa_graph_ptr& arena) { region_t *owners = arena->get_named_prop ("state-player"); @@ -1081,7 +1092,7 @@ namespace spot } - const strategy_t& get_strategy(const_twa_graph_ptr arena) + const strategy_t& get_strategy(const const_twa_graph_ptr& arena) { auto strat_ptr = arena->get_named_prop("strategy"); if (!strat_ptr) @@ -1174,7 +1185,7 @@ namespace spot (*winners)[state] = winner; } - const region_t& get_state_winners(const_twa_graph_ptr arena) + const region_t& get_state_winners(const const_twa_graph_ptr& arena) { region_t *winners = arena->get_named_prop("state-winner"); if (!winners) diff --git a/spot/twaalgos/game.hh b/spot/twaalgos/game.hh index df5d27439..dbaccce75 100644 --- a/spot/twaalgos/game.hh +++ b/spot/twaalgos/game.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -163,14 +163,18 @@ namespace spot /// \ingroup games /// \brief Get the owner of all states + ///@{ SPOT_API - const region_t& get_state_players(const_twa_graph_ptr arena); + const region_t& get_state_players(const const_twa_graph_ptr& arena); + SPOT_API + const region_t& get_state_players(twa_graph_ptr& arena); + ///@} /// \ingroup games /// \brief Get or set the strategy /// @{ SPOT_API - const strategy_t& get_strategy(const_twa_graph_ptr arena); + const strategy_t& get_strategy(const const_twa_graph_ptr& arena); SPOT_API void set_strategy(twa_graph_ptr arena, const strategy_t& strat); SPOT_API @@ -214,5 +218,5 @@ namespace spot /// \ingroup games /// \brief Get the winner of all states SPOT_API - const region_t& get_state_winners(const_twa_graph_ptr arena); + const region_t& get_state_winners(const const_twa_graph_ptr& arena); } diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index e2b1523de..25bab05a9 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021, 2022 Laboratoire de Recherche et Développement +// Copyright (C) 2021, 2022, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -3995,7 +3995,7 @@ namespace // 0 -> "Env" next is input props // 1 -> "Player" next is output prop - const auto& spref = get_state_players(mmw); + const region_t& spref = get_state_players(mmw); assert((spref.size() == mmw->num_states()) && "Inconsistent state players"); @@ -4146,9 +4146,9 @@ namespace spot const unsigned initl = left->get_init_state_number(); const unsigned initr = right->get_init_state_number(); - auto& spr = get_state_players(right); + const region_t& spr = get_state_players(right); #ifndef NDEBUG - auto& spl = get_state_players(left); + const region_t& spl = get_state_players(left); // todo auto check_out = [](const const_twa_graph_ptr& aut, const auto& sp) diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 4e38efd5b..aef11d27b 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2020-2022 Laboratoire de Recherche et +// Copyright (C) 2020-2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -138,12 +138,12 @@ namespace{ // Note, this only deals with deterministic strategies // Note, assumes that env starts playing twa_graph_ptr - apply_strategy(const twa_graph_ptr& arena, + apply_strategy(const const_twa_graph_ptr& arena, bool unsplit, bool keep_acc) { - const auto& win = get_state_winners(arena); - const auto& strat = get_strategy(arena); - const auto& sp = get_state_players(arena); + const region_t& win = get_state_winners(arena); + const strategy_t& strat = get_strategy(arena); + const region_t& sp = get_state_players(arena); auto outs = get_synthesis_outputs(arena); if (!win[arena->get_init_state_number()]) @@ -1955,7 +1955,7 @@ namespace spot throw std::runtime_error("arena is null."); auto& arena_r = *arena; - const auto& sp = get_state_players(arena); + const region_t& sp = get_state_players(arena); bdd all_ap = arena->ap_vars(); if (std::find_if(arena->ap().cbegin(), arena->ap().cend(), From cab3ea7faf394d2bfaa1dc619b9dc48f026e9b6d Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 9 Dec 2022 12:04:15 +0100 Subject: [PATCH 232/606] acd: rewrite Python wrapper without jQuery * python/spot/__init__.py (acd): Rewrite javascript so that it does not use jQuery, to make it easier to use in jupyterlab, or with nbconvert. * tests/python/zlktree.ipynb: Adjust. * NEWS: Mention this. --- NEWS | 4 +- python/spot/__init__.py | 58 ++-- tests/python/zlktree.ipynb | 683 ++++++++++++++++++++++++++++--------- 3 files changed, 563 insertions(+), 182 deletions(-) diff --git a/NEWS b/NEWS index b1f7c2d79..0ac838737 100644 --- a/NEWS +++ b/NEWS @@ -1,6 +1,8 @@ New in spot 2.11.3.dev (not yet released) - Nothing yet. + Python: + + - spot.acd() no longer depends on jQuery for interactivity. New in spot 2.11.3 (2022-12-09) diff --git a/python/spot/__init__.py b/python/spot/__init__.py index edbf4a4e6..ef4cd772e 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -502,51 +502,57 @@ class acd: .acdacc polygon{fill:green;} ''' js = ''' -function acd{num}_clear(){{ - $("#acd{num} .node,#acdaut{num} .node,#acdaut{num} .edge") - .removeClass("acdhigh acdbold acdacc acdrej"); +function acdremclasses(sel, classes) {{ +document.querySelectorAll(sel).forEach(n=>{{n.classList.remove(...classes)}});}} +function acdaddclasses(sel, classes) {{ +document.querySelectorAll(sel).forEach(n=>{{n.classList.add(...classes)}});}} +function acdonclick(sel, fn) {{ + document.querySelectorAll(sel).forEach(n=> + {{n.addEventListener("click", fn)}}); +}} +function acd{num}_clear() {{ + acdremclasses("#acd{num} .node,#acdaut{num} .node,#acdaut{num} .edge", + ["acdhigh", "acdbold", "acdacc", "acdrej"]); }}; function acd{num}_state(state){{ - acd{num}_clear(); - $("#acd{num} .acdS" + state).addClass("acdhigh acdbold"); - $("#acdaut{num} #S" + state).addClass("acdbold"); + acd{num}_clear(); + acdaddclasses("#acd{num} .acdS" + state, ["acdhigh", "acdbold"]); + acdaddclasses("#acdaut{num} #S" + state, ["acdbold"]); }}; function acd{num}_edge(edge){{ - acd{num}_clear(); - var theedge = $('#acdaut{num} #E' + edge) - var classList = theedge.attr('class').split(/\s+/); - $.each(classList, function(index, item) {{ - if (item.startsWith('acdN')) {{ - $("#acd{num} #" + item.substring(3)).addClass("acdhigh acdbold"); - }} - }}); - theedge.addClass("acdbold"); + acd{num}_clear(); + var theedge = document.querySelector('#acdaut{num} #E' + edge); + theedge.classList.forEach(function(item, index) {{ + if (item.startsWith('acdN')) {{ + acdaddclasses("#acd{num} #" + item.substring(3), ["acdhigh", "acdbold"]); + }} + }}); + theedge.classList.add("acdbold"); }}; function acd{num}_node(node, acc){{ acd{num}_clear(); - $("#acdaut{num} .acdN" + node).addClass(acc - ? "acdacc acdbold" - : "acdrej acdbold"); - $("#acd{num} #N" + node).addClass("acdbold acdhigh"); + acdaddclasses("#acdaut{num} .acdN" + node, + [acc ? "acdacc" : "acdrej", "acdbold"]); + acdaddclasses("#acd{num} #N" + node, ["acdbold", "acdhigh"]); }};'''.format(num=num) me = 0 for n in range(self.node_count()): for e in self.edges_of_node(n): me = max(e, me) - js += '$("#acdaut{num} #E{e}").addClass("acdN{n}");'\ + js += 'acdaddclasses("#acdaut{num} #E{e}", ["acdN{n}"]);\n'\ .format(num=num, e=e, n=n) for e in range(1, me + 1): - js += '$("#acdaut{num} #E{e}")'\ - '.click(function(){{acd{num}_edge({e});}});'\ + js += 'acdonclick("#acdaut{num} #E{e}",'\ + 'function(){{acd{num}_edge({e});}});\n'\ .format(num=num, e=e) for s in range(self.get_aut().num_states()): - js += '$("#acdaut{num} #S{s}")'\ - '.click(function(){{acd{num}_state({s});}});'\ + js += 'acdonclick("#acdaut{num} #S{s}",'\ + 'function(){{acd{num}_state({s});}});\n'\ .format(num=num, s=s) for n in range(self.node_count()): v = int(self.node_acceptance(n)) - js += '$("#acd{num} #N{n}")'\ - '.click(function(){{acd{num}_node({n}, {v});}});'\ + js += 'acdonclick("#acd{num} #N{n}",'\ + 'function(){{acd{num}_node({n}, {v});}});\n'\ .format(num=num, n=n, v=v) html = '
{}
{}
'\ .format(style, diff --git a/tests/python/zlktree.ipynb b/tests/python/zlktree.ipynb index ae44ad37d..c9eb3503d 100644 --- a/tests/python/zlktree.ipynb +++ b/tests/python/zlktree.ipynb @@ -216,7 +216,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 2, @@ -640,7 +640,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f14701b7510> >" + " *' at 0x7f82c009d7a0> >" ] }, "execution_count": 10, @@ -1063,7 +1063,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470220960> >" + " *' at 0x7f82c009c630> >" ] }, "execution_count": 11, @@ -1256,7 +1256,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f14701b75d0> >" + " *' at 0x7f82c009c6c0> >" ] }, "execution_count": 13, @@ -1701,7 +1701,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470142240> >" + " *' at 0x7f82c009c480> >" ] }, "execution_count": 14, @@ -2096,7 +2096,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2427,7 +2427,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2513,7 +2513,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2624,7 +2624,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2662,7 +2662,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2700,7 +2700,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2928,7 +2928,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 18, @@ -4064,36 +4064,159 @@ "\n", "\n", "" + " acdaddclasses(\"#acdaut0 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd0 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut0 #E9\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E10\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E11\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E12\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E13\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E14\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E15\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E16\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E21\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E22\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E23\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E24\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E25\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E26\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E27\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E28\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E33\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E34\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E35\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E36\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E31\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut0 #E32\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut0 #E39\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut0 #E40\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut0 #E5\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut0 #E7\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut0 #E17\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut0 #E1\", [\"acdN3\"]);\n", + "acdaddclasses(\"#acdaut0 #E10\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E12\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E13\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E15\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E21\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E22\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E23\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut0 #E24\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut0 #E34\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut0 #E36\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut0 #E14\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut0 #E15\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut0 #E22\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut0 #E23\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut0 #E14\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut0 #E16\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut0 #E26\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut0 #E9\", [\"acdN8\"]);\n", + "acdaddclasses(\"#acdaut0 #E40\", [\"acdN9\"]);\n", + "acdaddclasses(\"#acdaut0 #E5\", [\"acdN10\"]);\n", + "acdaddclasses(\"#acdaut0 #E23\", [\"acdN11\"]);\n", + "acdaddclasses(\"#acdaut0 #E14\", [\"acdN12\"]);\n", + "acdaddclasses(\"#acdaut0 #E23\", [\"acdN13\"]);\n", + "acdaddclasses(\"#acdaut0 #E14\", [\"acdN14\"]);\n", + "acdonclick(\"#acdaut0 #E1\",function(){acd0_edge(1);});\n", + "acdonclick(\"#acdaut0 #E2\",function(){acd0_edge(2);});\n", + "acdonclick(\"#acdaut0 #E3\",function(){acd0_edge(3);});\n", + "acdonclick(\"#acdaut0 #E4\",function(){acd0_edge(4);});\n", + "acdonclick(\"#acdaut0 #E5\",function(){acd0_edge(5);});\n", + "acdonclick(\"#acdaut0 #E6\",function(){acd0_edge(6);});\n", + "acdonclick(\"#acdaut0 #E7\",function(){acd0_edge(7);});\n", + "acdonclick(\"#acdaut0 #E8\",function(){acd0_edge(8);});\n", + "acdonclick(\"#acdaut0 #E9\",function(){acd0_edge(9);});\n", + "acdonclick(\"#acdaut0 #E10\",function(){acd0_edge(10);});\n", + "acdonclick(\"#acdaut0 #E11\",function(){acd0_edge(11);});\n", + "acdonclick(\"#acdaut0 #E12\",function(){acd0_edge(12);});\n", + "acdonclick(\"#acdaut0 #E13\",function(){acd0_edge(13);});\n", + "acdonclick(\"#acdaut0 #E14\",function(){acd0_edge(14);});\n", + "acdonclick(\"#acdaut0 #E15\",function(){acd0_edge(15);});\n", + "acdonclick(\"#acdaut0 #E16\",function(){acd0_edge(16);});\n", + "acdonclick(\"#acdaut0 #E17\",function(){acd0_edge(17);});\n", + "acdonclick(\"#acdaut0 #E18\",function(){acd0_edge(18);});\n", + "acdonclick(\"#acdaut0 #E19\",function(){acd0_edge(19);});\n", + "acdonclick(\"#acdaut0 #E20\",function(){acd0_edge(20);});\n", + "acdonclick(\"#acdaut0 #E21\",function(){acd0_edge(21);});\n", + "acdonclick(\"#acdaut0 #E22\",function(){acd0_edge(22);});\n", + "acdonclick(\"#acdaut0 #E23\",function(){acd0_edge(23);});\n", + "acdonclick(\"#acdaut0 #E24\",function(){acd0_edge(24);});\n", + "acdonclick(\"#acdaut0 #E25\",function(){acd0_edge(25);});\n", + "acdonclick(\"#acdaut0 #E26\",function(){acd0_edge(26);});\n", + "acdonclick(\"#acdaut0 #E27\",function(){acd0_edge(27);});\n", + "acdonclick(\"#acdaut0 #E28\",function(){acd0_edge(28);});\n", + "acdonclick(\"#acdaut0 #E29\",function(){acd0_edge(29);});\n", + "acdonclick(\"#acdaut0 #E30\",function(){acd0_edge(30);});\n", + "acdonclick(\"#acdaut0 #E31\",function(){acd0_edge(31);});\n", + "acdonclick(\"#acdaut0 #E32\",function(){acd0_edge(32);});\n", + "acdonclick(\"#acdaut0 #E33\",function(){acd0_edge(33);});\n", + "acdonclick(\"#acdaut0 #E34\",function(){acd0_edge(34);});\n", + "acdonclick(\"#acdaut0 #E35\",function(){acd0_edge(35);});\n", + "acdonclick(\"#acdaut0 #E36\",function(){acd0_edge(36);});\n", + "acdonclick(\"#acdaut0 #E37\",function(){acd0_edge(37);});\n", + "acdonclick(\"#acdaut0 #E38\",function(){acd0_edge(38);});\n", + "acdonclick(\"#acdaut0 #E39\",function(){acd0_edge(39);});\n", + "acdonclick(\"#acdaut0 #E40\",function(){acd0_edge(40);});\n", + "acdonclick(\"#acdaut0 #S0\",function(){acd0_state(0);});\n", + "acdonclick(\"#acdaut0 #S1\",function(){acd0_state(1);});\n", + "acdonclick(\"#acdaut0 #S2\",function(){acd0_state(2);});\n", + "acdonclick(\"#acdaut0 #S3\",function(){acd0_state(3);});\n", + "acdonclick(\"#acdaut0 #S4\",function(){acd0_state(4);});\n", + "acdonclick(\"#acdaut0 #S5\",function(){acd0_state(5);});\n", + "acdonclick(\"#acdaut0 #S6\",function(){acd0_state(6);});\n", + "acdonclick(\"#acdaut0 #S7\",function(){acd0_state(7);});\n", + "acdonclick(\"#acdaut0 #S8\",function(){acd0_state(8);});\n", + "acdonclick(\"#acdaut0 #S9\",function(){acd0_state(9);});\n", + "acdonclick(\"#acd0 #N0\",function(){acd0_node(0, 0);});\n", + "acdonclick(\"#acd0 #N1\",function(){acd0_node(1, 1);});\n", + "acdonclick(\"#acd0 #N2\",function(){acd0_node(2, 1);});\n", + "acdonclick(\"#acd0 #N3\",function(){acd0_node(3, 1);});\n", + "acdonclick(\"#acd0 #N4\",function(){acd0_node(4, 1);});\n", + "acdonclick(\"#acd0 #N5\",function(){acd0_node(5, 1);});\n", + "acdonclick(\"#acd0 #N6\",function(){acd0_node(6, 1);});\n", + "acdonclick(\"#acd0 #N7\",function(){acd0_node(7, 1);});\n", + "acdonclick(\"#acd0 #N8\",function(){acd0_node(8, 1);});\n", + "acdonclick(\"#acd0 #N9\",function(){acd0_node(9, 0);});\n", + "acdonclick(\"#acd0 #N10\",function(){acd0_node(10, 0);});\n", + "acdonclick(\"#acd0 #N11\",function(){acd0_node(11, 0);});\n", + "acdonclick(\"#acd0 #N12\",function(){acd0_node(12, 0);});\n", + "acdonclick(\"#acd0 #N13\",function(){acd0_node(13, 0);});\n", + "acdonclick(\"#acd0 #N14\",function(){acd0_node(14, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 20, @@ -4968,7 +5091,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f14701670f0> >" + " *' at 0x7f82c00bc870> >" ] }, "execution_count": 29, @@ -5607,7 +5730,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470167210> >" + " *' at 0x7f82c00bc060> >" ] }, "execution_count": 31, @@ -5807,7 +5930,9 @@ "cell_type": "code", "execution_count": 40, "id": "813d15ed", - "metadata": {}, + "metadata": { + "scrolled": false + }, "outputs": [ { "data": { @@ -6875,36 +7000,159 @@ "\n", "\n", "" + " acdaddclasses(\"#acdaut1 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd1 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut1 #E9\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E10\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E11\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E12\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E13\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E14\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E15\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E16\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E21\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E22\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E23\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E24\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E25\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E26\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E27\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E28\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E33\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E34\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E35\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E36\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E31\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut1 #E32\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut1 #E39\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut1 #E40\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut1 #E5\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut1 #E7\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut1 #E17\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut1 #E1\", [\"acdN3\"]);\n", + "acdaddclasses(\"#acdaut1 #E10\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E12\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E13\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E15\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E21\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E22\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E14\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut1 #E15\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut1 #E22\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut1 #E23\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut1 #E23\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut1 #E24\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut1 #E34\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut1 #E36\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut1 #E14\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut1 #E16\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut1 #E26\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut1 #E9\", [\"acdN8\"]);\n", + "acdaddclasses(\"#acdaut1 #E40\", [\"acdN9\"]);\n", + "acdaddclasses(\"#acdaut1 #E5\", [\"acdN10\"]);\n", + "acdaddclasses(\"#acdaut1 #E14\", [\"acdN11\"]);\n", + "acdaddclasses(\"#acdaut1 #E23\", [\"acdN12\"]);\n", + "acdaddclasses(\"#acdaut1 #E23\", [\"acdN13\"]);\n", + "acdaddclasses(\"#acdaut1 #E14\", [\"acdN14\"]);\n", + "acdonclick(\"#acdaut1 #E1\",function(){acd1_edge(1);});\n", + "acdonclick(\"#acdaut1 #E2\",function(){acd1_edge(2);});\n", + "acdonclick(\"#acdaut1 #E3\",function(){acd1_edge(3);});\n", + "acdonclick(\"#acdaut1 #E4\",function(){acd1_edge(4);});\n", + "acdonclick(\"#acdaut1 #E5\",function(){acd1_edge(5);});\n", + "acdonclick(\"#acdaut1 #E6\",function(){acd1_edge(6);});\n", + "acdonclick(\"#acdaut1 #E7\",function(){acd1_edge(7);});\n", + "acdonclick(\"#acdaut1 #E8\",function(){acd1_edge(8);});\n", + "acdonclick(\"#acdaut1 #E9\",function(){acd1_edge(9);});\n", + "acdonclick(\"#acdaut1 #E10\",function(){acd1_edge(10);});\n", + "acdonclick(\"#acdaut1 #E11\",function(){acd1_edge(11);});\n", + "acdonclick(\"#acdaut1 #E12\",function(){acd1_edge(12);});\n", + "acdonclick(\"#acdaut1 #E13\",function(){acd1_edge(13);});\n", + "acdonclick(\"#acdaut1 #E14\",function(){acd1_edge(14);});\n", + "acdonclick(\"#acdaut1 #E15\",function(){acd1_edge(15);});\n", + "acdonclick(\"#acdaut1 #E16\",function(){acd1_edge(16);});\n", + "acdonclick(\"#acdaut1 #E17\",function(){acd1_edge(17);});\n", + "acdonclick(\"#acdaut1 #E18\",function(){acd1_edge(18);});\n", + "acdonclick(\"#acdaut1 #E19\",function(){acd1_edge(19);});\n", + "acdonclick(\"#acdaut1 #E20\",function(){acd1_edge(20);});\n", + "acdonclick(\"#acdaut1 #E21\",function(){acd1_edge(21);});\n", + "acdonclick(\"#acdaut1 #E22\",function(){acd1_edge(22);});\n", + "acdonclick(\"#acdaut1 #E23\",function(){acd1_edge(23);});\n", + "acdonclick(\"#acdaut1 #E24\",function(){acd1_edge(24);});\n", + "acdonclick(\"#acdaut1 #E25\",function(){acd1_edge(25);});\n", + "acdonclick(\"#acdaut1 #E26\",function(){acd1_edge(26);});\n", + "acdonclick(\"#acdaut1 #E27\",function(){acd1_edge(27);});\n", + "acdonclick(\"#acdaut1 #E28\",function(){acd1_edge(28);});\n", + "acdonclick(\"#acdaut1 #E29\",function(){acd1_edge(29);});\n", + "acdonclick(\"#acdaut1 #E30\",function(){acd1_edge(30);});\n", + "acdonclick(\"#acdaut1 #E31\",function(){acd1_edge(31);});\n", + "acdonclick(\"#acdaut1 #E32\",function(){acd1_edge(32);});\n", + "acdonclick(\"#acdaut1 #E33\",function(){acd1_edge(33);});\n", + "acdonclick(\"#acdaut1 #E34\",function(){acd1_edge(34);});\n", + "acdonclick(\"#acdaut1 #E35\",function(){acd1_edge(35);});\n", + "acdonclick(\"#acdaut1 #E36\",function(){acd1_edge(36);});\n", + "acdonclick(\"#acdaut1 #E37\",function(){acd1_edge(37);});\n", + "acdonclick(\"#acdaut1 #E38\",function(){acd1_edge(38);});\n", + "acdonclick(\"#acdaut1 #E39\",function(){acd1_edge(39);});\n", + "acdonclick(\"#acdaut1 #E40\",function(){acd1_edge(40);});\n", + "acdonclick(\"#acdaut1 #S0\",function(){acd1_state(0);});\n", + "acdonclick(\"#acdaut1 #S1\",function(){acd1_state(1);});\n", + "acdonclick(\"#acdaut1 #S2\",function(){acd1_state(2);});\n", + "acdonclick(\"#acdaut1 #S3\",function(){acd1_state(3);});\n", + "acdonclick(\"#acdaut1 #S4\",function(){acd1_state(4);});\n", + "acdonclick(\"#acdaut1 #S5\",function(){acd1_state(5);});\n", + "acdonclick(\"#acdaut1 #S6\",function(){acd1_state(6);});\n", + "acdonclick(\"#acdaut1 #S7\",function(){acd1_state(7);});\n", + "acdonclick(\"#acdaut1 #S8\",function(){acd1_state(8);});\n", + "acdonclick(\"#acdaut1 #S9\",function(){acd1_state(9);});\n", + "acdonclick(\"#acd1 #N0\",function(){acd1_node(0, 0);});\n", + "acdonclick(\"#acd1 #N1\",function(){acd1_node(1, 1);});\n", + "acdonclick(\"#acd1 #N2\",function(){acd1_node(2, 1);});\n", + "acdonclick(\"#acd1 #N3\",function(){acd1_node(3, 1);});\n", + "acdonclick(\"#acd1 #N4\",function(){acd1_node(4, 1);});\n", + "acdonclick(\"#acd1 #N5\",function(){acd1_node(5, 1);});\n", + "acdonclick(\"#acd1 #N6\",function(){acd1_node(6, 1);});\n", + "acdonclick(\"#acd1 #N7\",function(){acd1_node(7, 1);});\n", + "acdonclick(\"#acd1 #N8\",function(){acd1_node(8, 1);});\n", + "acdonclick(\"#acd1 #N9\",function(){acd1_node(9, 0);});\n", + "acdonclick(\"#acd1 #N10\",function(){acd1_node(10, 0);});\n", + "acdonclick(\"#acd1 #N11\",function(){acd1_node(11, 0);});\n", + "acdonclick(\"#acd1 #N12\",function(){acd1_node(12, 0);});\n", + "acdonclick(\"#acd1 #N13\",function(){acd1_node(13, 0);});\n", + "acdonclick(\"#acd1 #N14\",function(){acd1_node(14, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 40, @@ -7817,7 +8065,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f14700fe1e0> >" + " *' at 0x7f82c00be460> >" ] }, "execution_count": 45, @@ -8114,36 +8362,69 @@ "\n", "\n", "" + " acdaddclasses(\"#acdaut2 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd2 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut2 #E1\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E2\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E3\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E4\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E5\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E6\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E2\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut2 #E3\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut2 #E4\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut2 #E5\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut2 #E6\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut2 #E1\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut2 #E2\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut2 #E4\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut2 #E6\", [\"acdN2\"]);\n", + "acdonclick(\"#acdaut2 #E1\",function(){acd2_edge(1);});\n", + "acdonclick(\"#acdaut2 #E2\",function(){acd2_edge(2);});\n", + "acdonclick(\"#acdaut2 #E3\",function(){acd2_edge(3);});\n", + "acdonclick(\"#acdaut2 #E4\",function(){acd2_edge(4);});\n", + "acdonclick(\"#acdaut2 #E5\",function(){acd2_edge(5);});\n", + "acdonclick(\"#acdaut2 #E6\",function(){acd2_edge(6);});\n", + "acdonclick(\"#acdaut2 #S0\",function(){acd2_state(0);});\n", + "acdonclick(\"#acdaut2 #S1\",function(){acd2_state(1);});\n", + "acdonclick(\"#acdaut2 #S2\",function(){acd2_state(2);});\n", + "acdonclick(\"#acd2 #N0\",function(){acd2_node(0, 1);});\n", + "acdonclick(\"#acd2 #N1\",function(){acd2_node(1, 0);});\n", + "acdonclick(\"#acd2 #N2\",function(){acd2_node(2, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 47, @@ -8353,7 +8634,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f14700feb40> >" + " *' at 0x7f82c00bdd40> >" ] }, "execution_count": 48, @@ -8628,36 +8909,69 @@ "\n", "\n", "" + " acdaddclasses(\"#acdaut3 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd3 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut3 #E1\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E2\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E3\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E4\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E5\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E6\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E1\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut3 #E2\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut3 #E4\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut3 #E6\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut3 #E2\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut3 #E3\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut3 #E4\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut3 #E5\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut3 #E6\", [\"acdN2\"]);\n", + "acdonclick(\"#acdaut3 #E1\",function(){acd3_edge(1);});\n", + "acdonclick(\"#acdaut3 #E2\",function(){acd3_edge(2);});\n", + "acdonclick(\"#acdaut3 #E3\",function(){acd3_edge(3);});\n", + "acdonclick(\"#acdaut3 #E4\",function(){acd3_edge(4);});\n", + "acdonclick(\"#acdaut3 #E5\",function(){acd3_edge(5);});\n", + "acdonclick(\"#acdaut3 #E6\",function(){acd3_edge(6);});\n", + "acdonclick(\"#acdaut3 #S0\",function(){acd3_state(0);});\n", + "acdonclick(\"#acdaut3 #S1\",function(){acd3_state(1);});\n", + "acdonclick(\"#acdaut3 #S2\",function(){acd3_state(2);});\n", + "acdonclick(\"#acd3 #N0\",function(){acd3_node(0, 1);});\n", + "acdonclick(\"#acd3 #N1\",function(){acd3_node(1, 0);});\n", + "acdonclick(\"#acd3 #N2\",function(){acd3_node(2, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 49, @@ -8841,7 +9155,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f14700fea80> >" + " *' at 0x7f82c00bf300> >" ] }, "execution_count": 50, @@ -8993,7 +9307,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470107240> >" + " *' at 0x7f82c00be5b0> >" ] }, "execution_count": 51, @@ -9105,7 +9419,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 52, @@ -9271,7 +9585,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470107030> >" + " *' at 0x7f82c00bf5d0> >" ] }, "execution_count": 53, @@ -9535,36 +9849,63 @@ "\n", "\n", "" + " acdaddclasses(\"#acdaut4 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd4 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut4 #E1\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E2\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E3\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E4\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E5\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E6\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E7\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E8\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E6\", [\"acdN1\"]);\n", + "acdonclick(\"#acdaut4 #E1\",function(){acd4_edge(1);});\n", + "acdonclick(\"#acdaut4 #E2\",function(){acd4_edge(2);});\n", + "acdonclick(\"#acdaut4 #E3\",function(){acd4_edge(3);});\n", + "acdonclick(\"#acdaut4 #E4\",function(){acd4_edge(4);});\n", + "acdonclick(\"#acdaut4 #E5\",function(){acd4_edge(5);});\n", + "acdonclick(\"#acdaut4 #E6\",function(){acd4_edge(6);});\n", + "acdonclick(\"#acdaut4 #E7\",function(){acd4_edge(7);});\n", + "acdonclick(\"#acdaut4 #E8\",function(){acd4_edge(8);});\n", + "acdonclick(\"#acdaut4 #S0\",function(){acd4_state(0);});\n", + "acdonclick(\"#acdaut4 #S1\",function(){acd4_state(1);});\n", + "acdonclick(\"#acd4 #N0\",function(){acd4_node(0, 1);});\n", + "acdonclick(\"#acd4 #N1\",function(){acd4_node(1, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 55, @@ -9708,7 +10049,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470107b70> >" + " *' at 0x7f82c00f4240> >" ] }, "execution_count": 57, @@ -9855,7 +10196,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f147010d240> >" + " *' at 0x7f82c00f4090> >" ] }, "execution_count": 58, @@ -10165,36 +10506,68 @@ "\n", "\n", "" + " acdaddclasses(\"#acdaut5 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd5 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut5 #E1\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E2\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E3\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E4\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E5\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E6\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E7\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E1\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut5 #E3\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut5 #E4\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut5 #E5\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut5 #E7\", [\"acdN2\"]);\n", + "acdonclick(\"#acdaut5 #E1\",function(){acd5_edge(1);});\n", + "acdonclick(\"#acdaut5 #E2\",function(){acd5_edge(2);});\n", + "acdonclick(\"#acdaut5 #E3\",function(){acd5_edge(3);});\n", + "acdonclick(\"#acdaut5 #E4\",function(){acd5_edge(4);});\n", + "acdonclick(\"#acdaut5 #E5\",function(){acd5_edge(5);});\n", + "acdonclick(\"#acdaut5 #E6\",function(){acd5_edge(6);});\n", + "acdonclick(\"#acdaut5 #E7\",function(){acd5_edge(7);});\n", + "acdonclick(\"#acdaut5 #S0\",function(){acd5_state(0);});\n", + "acdonclick(\"#acdaut5 #S1\",function(){acd5_state(1);});\n", + "acdonclick(\"#acdaut5 #S2\",function(){acd5_state(2);});\n", + "acdonclick(\"#acdaut5 #S3\",function(){acd5_state(3);});\n", + "acdonclick(\"#acd5 #N0\",function(){acd5_node(0, 1);});\n", + "acdonclick(\"#acd5 #N1\",function(){acd5_node(1, 0);});\n", + "acdonclick(\"#acd5 #N2\",function(){acd5_node(2, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 60, @@ -10322,7 +10695,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f147010d5a0> >" + " *' at 0x7f82c00f50b0> >" ] }, "execution_count": 61, @@ -10452,7 +10825,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f147010d6f0> >" + " *' at 0x7f82c00f52c0> >" ] }, "execution_count": 62, @@ -10732,7 +11105,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470116270> >" + " *' at 0x7f82c00f4960> >" ] }, "execution_count": 63, @@ -10826,7 +11199,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470116630> >" + " *' at 0x7f82c00f5a10> >" ] }, "execution_count": 64, @@ -10937,7 +11310,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f1470116450> >" + " *' at 0x7f82c00f5ce0> >" ] }, "execution_count": 66, @@ -10995,7 +11368,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.2" + "version": "3.10.7" } }, "nbformat": 4, From 104e98aca61d4c526eaade2c6a4a703666a11528 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 4 Jan 2023 15:11:21 +0100 Subject: [PATCH 233/606] fix merging of initial states in state-based automata Fixes #522 reported by Raven Beutner. * spot/parseaut/parseaut.yy: Make sure all edges leaving the initial state have the same color. * THANKS: Add Raven. * NEWS: Mention the bug. * tests/core/522.test: New file. * tests/Makefile.am: Add it. --- NEWS | 6 ++++++ THANKS | 1 + spot/parseaut/parseaut.yy | 33 ++++++++++++++++++++++++------ tests/Makefile.am | 3 ++- tests/core/522.test | 43 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 79 insertions(+), 7 deletions(-) create mode 100755 tests/core/522.test diff --git a/NEWS b/NEWS index 0ac838737..9775339fe 100644 --- a/NEWS +++ b/NEWS @@ -4,6 +4,12 @@ New in spot 2.11.3.dev (not yet released) - spot.acd() no longer depends on jQuery for interactivity. + Bug fixes: + + - When merging initial states from state-based automata with + multiple initial states (because Spot supports only one), the HOA + parser could break state-based acceptance. (Issue #522.) + New in spot 2.11.3 (2022-12-09) Bug fixes: diff --git a/THANKS b/THANKS index 356d187a1..93155f9d1 100644 --- a/THANKS +++ b/THANKS @@ -48,6 +48,7 @@ Nikos Gorogiannis Ondřej Lengál Paul Guénézan Pierre Ganty +Raven Beutner Reuben Rowe Roei Nahum Rüdiger Ehlers diff --git a/spot/parseaut/parseaut.yy b/spot/parseaut/parseaut.yy index 4d96b8c1c..7d5fac361 100644 --- a/spot/parseaut/parseaut.yy +++ b/spot/parseaut/parseaut.yy @@ -1,5 +1,5 @@ /* -*- coding: utf-8 -*- -** Copyright (C) 2014-2022 Laboratoire de Recherche et Développement +** Copyright (C) 2014-2023 Laboratoire de Recherche et Développement ** de l'Epita (LRDE). ** ** This file is part of Spot, a model checking library. @@ -2610,7 +2610,7 @@ static void fix_initial_state(result_& r) start.resize(std::distance(start.begin(), res)); assert(start.size() >= 1); - if (start.size() == 1) + if (start.size() == 1) { if (r.opts.want_kripke) r.h->ks->set_init_state(start.front().front()); @@ -2627,13 +2627,13 @@ static void fix_initial_state(result_& r) "a single initial state"); return; } + auto& aut = r.h->aut; // Fiddling with initial state may turn an incomplete automaton // into a complete one. - if (r.complete.is_false()) - r.complete = spot::trival::maybe(); + if (aut->prop_complete().is_false()) + aut->prop_complete(spot::trival::maybe()); // Multiple initial states. We might need to add a fake one, // unless one of the actual initial state has no incoming edge. - auto& aut = r.h->aut; std::vector has_incoming(aut->num_states(), 0); for (auto& t: aut->edges()) for (unsigned ud: aut->univ_dests(t)) @@ -2672,6 +2672,9 @@ static void fix_initial_state(result_& r) { unsigned p = pp.front(); if (p != init) + // FIXME: If p has no incoming we should be able to + // change the source of the edges of p instead of + // adding new edges. for (auto& t: aut->out(p)) aut->new_edge(init, t.dst, t.cond); } @@ -2694,6 +2697,24 @@ static void fix_initial_state(result_& r) } combiner.new_dests(init, comb_or); } + + // Merging two states may break state-based acceptance + // make sure all outgoing edges have the same color. + if (aut->prop_state_acc().is_true()) + { + bool first = true; + spot::acc_cond::mark_t prev; + for (auto& e: aut->out(init)) + if (first) + { + first = false; + prev = e.acc; + } + else if (e.acc != prev) + { + e.acc = prev; + } + } } } @@ -2871,8 +2892,8 @@ namespace spot r.aut_or_ks->set_named_prop("aliases", p); } fix_acceptance(r); + fix_properties(r); // before fix_initial_state fix_initial_state(r); - fix_properties(r); if (r.h->aut && !r.h->aut->is_existential()) r.h->aut->merge_univ_dests(); return r.h; diff --git a/tests/Makefile.am b/tests/Makefile.am index 4c2fe830c..8a180ddda 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2009-2022 Laboratoire de Recherche et Développement +## Copyright (C) 2009-2023 Laboratoire de Recherche et Développement ## de l'Epita (LRDE). ## Copyright (C) 2003-2006 Laboratoire d'Informatique de Paris 6 ## (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -223,6 +223,7 @@ TESTS_misc = \ TESTS_twa = \ core/385.test \ core/521.test \ + core/522.test \ core/acc.test \ core/acc2.test \ core/bdddict.test \ diff --git a/tests/core/522.test b/tests/core/522.test new file mode 100755 index 000000000..5fe6ba945 --- /dev/null +++ b/tests/core/522.test @@ -0,0 +1,43 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) 2023 Laboratoire de Recherche et Développement de +# l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs + +set -e + +# For issue #522. + +cat >552.hoa < out.hoa +grep 'States: 7' out.hoa From 403e55d555df75e5b910669aa3301d98a04f04c5 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 5 Jan 2023 17:47:46 +0100 Subject: [PATCH 234/606] * doc/org/spot.css: Do not define background twice. --- doc/org/spot.css | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/org/spot.css b/doc/org/spot.css index ca8b12395..569ca37a9 100644 --- a/doc/org/spot.css +++ b/doc/org/spot.css @@ -16,9 +16,9 @@ h1::before{content:"";position:absolute;z-index:-1;background-color:#ffe35e;left #table-of-contents #text-table-of-contents{text-align:left} #org-div-home-and-up{text-align:center;font-size:100%} .outline-2 h2{display:block;width:100%;position:relative} -.outline-2 h2::before{content:"";height:100%;width:calc(100% + 2em);position:absolute;z-index:-1;bottom:0em;left:-1em;background-color:#ffe35e;background:linear-gradient(45deg,#ffe35e 50%,transparent 75%);transform:skew(10deg);border-radius:5px;} +.outline-2 h2::before{content:"";height:100%;width:calc(100% + 2em);position:absolute;z-index:-1;bottom:0em;left:-1em;background:linear-gradient(45deg,#ffe35e 50%,transparent 75%);transform:skew(10deg);border-radius:5px;} .outline-3 h3{display:block;width:auto;position:relative} -.outline-3 h3::before{content:"";position:absolute;z-index:-1;width:calc(100% + 2em);height:100%;left:-1em;bottom:0em;;background-color:#ffe35e;background:linear-gradient(45deg,#ffe35e 25%,transparent 50%);transform:skew(10deg);border-radius:3px} +.outline-3 h3::before{content:"";position:absolute;z-index:-1;width:calc(100% + 2em);height:100%;left:-1em;bottom:0em;background:linear-gradient(45deg,#ffe35e 25%,transparent 50%);transform:skew(10deg);border-radius:3px} .outline-2 h2:hover::before,.outline-3 h3:hover::before{background-color:#ffe35e} pre{margin:1.2ex} pre.src{padding-top:8px;border-left-style:solid;border-color:#00adad;overflow:auto;margin-top:0;margin-bottom:0} From 344d82f2b49837e7e98bcfb0fb10a566e5c97560 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 5 Jan 2023 17:48:14 +0100 Subject: [PATCH 235/606] simplify several comparison operators * spot/twaalgos/dtbasat.cc, spot/twaalgos/dtwasat.cc, spot/twaalgos/simulation.cc: Simplify, as reported by sonarcloud. --- spot/twaalgos/dtbasat.cc | 8 ++------ spot/twaalgos/dtwasat.cc | 8 ++------ spot/twaalgos/simulation.cc | 10 ++-------- 3 files changed, 6 insertions(+), 20 deletions(-) diff --git a/spot/twaalgos/dtbasat.cc b/spot/twaalgos/dtbasat.cc index b2147ebb4..c4bf3d1bc 100644 --- a/spot/twaalgos/dtbasat.cc +++ b/spot/twaalgos/dtbasat.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018, 2021, 2022 Laboratoire de Recherche et +// Copyright (C) 2013-2018, 2021-2023 Laboratoire de Recherche et // Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -77,11 +77,7 @@ namespace spot return true; if (this->src_ref > other.src_ref) return false; - if (this->dst_ref < other.dst_ref) - return true; - if (this->dst_ref > other.dst_ref) - return false; - return false; + return this->dst_ref < other.dst_ref; } }; diff --git a/spot/twaalgos/dtwasat.cc b/spot/twaalgos/dtwasat.cc index 25a299154..2ecf38fd1 100644 --- a/spot/twaalgos/dtwasat.cc +++ b/spot/twaalgos/dtwasat.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2022 Laboratoire de Recherche +// Copyright (C) 2013-2023 Laboratoire de Recherche // et Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -98,11 +98,7 @@ namespace spot return true; if (this->acc_ref > other.acc_ref) return false; - if (this->acc_cand < other.acc_cand) - return true; - if (this->acc_cand > other.acc_cand) - return false; - return false; + return this->acc_cand < other.acc_cand; } }; diff --git a/spot/twaalgos/simulation.cc b/spot/twaalgos/simulation.cc index ca8928888..ed53929b3 100644 --- a/spot/twaalgos/simulation.cc +++ b/spot/twaalgos/simulation.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -93,13 +93,7 @@ namespace spot return true; if (states > r.states) return false; - - if (edges < r.edges) - return true; - if (edges > r.edges) - return false; - - return false; + return edges < r.edges; } inline bool operator>(const automaton_size& r) From 36e79ecca6314a220982131567e6302ac1ea4035 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 5 Jan 2023 17:49:00 +0100 Subject: [PATCH 236/606] * spot/twaalgos/game.cc: Fix incorrect std::forward. --- spot/twaalgos/game.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index f5699bf49..df259b84a 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2018, 2020-2022 Laboratoire de Recherche et +// Copyright (C) 2017-2018, 2020-2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -1034,7 +1034,7 @@ namespace spot ("set_state_players(): There must be as many owners as states"); arena->set_named_prop("state-player", - new region_t(std::forward(owners))); + new region_t(std::move(owners))); } void set_state_player(twa_graph_ptr arena, unsigned state, bool owner) @@ -1101,7 +1101,7 @@ namespace spot throw std::runtime_error("set_strategy(): strategies need to have " "the same size as the automaton."); arena->set_named_prop("strategy", - new strategy_t(std::forward(strat))); + new strategy_t(std::move(strat))); } void set_synthesis_outputs(const twa_graph_ptr& arena, const bdd& outs) @@ -1152,7 +1152,7 @@ namespace spot ("set_state_winners(): There must be as many winners as states"); arena->set_named_prop("state-winner", - new region_t(std::forward(winners))); + new region_t(std::move(winners))); } void set_state_winner(twa_graph_ptr arena, unsigned state, bool winner) From 4a78d1bff4fb113bdbd09b7989c527cfbd6339b3 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 5 Jan 2023 23:34:10 +0100 Subject: [PATCH 237/606] fix some code smells reported by sonarcloud * bench/dtgbasat/gen.py, bin/autcross.cc, bin/autfilt.cc, bin/common_aoutput.cc, bin/common_aoutput.hh: Various cleanups. --- bench/dtgbasat/gen.py | 6 ++-- bin/autcross.cc | 12 +++---- bin/autfilt.cc | 79 ++++++++++++++++++------------------------- bin/common_aoutput.cc | 12 +++---- bin/common_aoutput.hh | 8 ++--- 5 files changed, 51 insertions(+), 66 deletions(-) diff --git a/bench/dtgbasat/gen.py b/bench/dtgbasat/gen.py index e96bf2825..dabf77971 100755 --- a/bench/dtgbasat/gen.py +++ b/bench/dtgbasat/gen.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (C) 2016-2018 Laboratoire de Recherche et Développement de +# Copyright (C) 2016-2018, 2023 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -55,12 +55,12 @@ class BenchConfig(object): if line[0] == '#' or line.isspace(): continue elif line[0:2] == "sh": - sh = re.search('sh (.+?)$', line).group(1) + sh = re.search('sh (.+)$', line).group(1) continue else: name = re.search('(.+?):', line).group(1) code = re.search(':(.+?)>', line).group(1) - xoptions = re.search('>(.+?)$', line).group(1) + xoptions = re.search('>(.+)$', line).group(1) b = Bench(name=name, code=code, xoptions=xoptions) self.l.append(b) self.sh.append(sh) diff --git a/bin/autcross.cc b/bin/autcross.cc index 24cd9bcd4..b3e504bb3 100644 --- a/bin/autcross.cc +++ b/bin/autcross.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2020, 2022 Laboratoire de Recherche et +// Copyright (C) 2017-2020, 2022-2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -549,7 +549,7 @@ namespace { if (!quiet) std::cerr << "info: building " << autname(i, is_really_comp(i)) - << '*' << autname(j, true ^ is_really_comp(j)) + << '*' << autname(j, !is_really_comp(j)) << " requires more acceptance sets than supported\n"; return false; } @@ -557,14 +557,14 @@ namespace if (verbose) std::cerr << "info: check_empty " << autname(i, is_really_comp(i)) - << '*' << autname(j, true ^ is_really_comp(j)) << '\n'; + << '*' << autname(j, !is_really_comp(j)) << '\n'; auto w = aut_i->intersecting_word(aut_j); if (w) { std::ostream& err = global_error(); err << "error: " << autname(i, is_really_comp(i)) - << '*' << autname(j, true ^ is_really_comp(j)) + << '*' << autname(j, !is_really_comp(j)) << (" is nonempty; both automata accept the infinite word:\n" " "); example() << *w << '\n'; @@ -613,7 +613,7 @@ namespace return src.str(); }(); - input_statistics.push_back(in_statistics()); + input_statistics.emplace_back(in_statistics()); input_statistics[round_num].input_source = std::move(source); if (auto name = input->get_named_prop("automaton-name")) @@ -658,7 +658,7 @@ namespace problems += prob; } spot::cleanup_tmpfiles(); - output_statistics.push_back(std::move(stats)); + output_statistics.emplace_back(std::move(stats)); if (verbose) { diff --git a/bin/autfilt.cc b/bin/autfilt.cc index 7cff60e8b..b55d1bc9f 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -448,7 +448,7 @@ struct canon_aut std::vector edges; std::string acc; - canon_aut(const spot::const_twa_graph_ptr& aut) + explicit canon_aut(const spot::const_twa_graph_ptr& aut) : num_states(aut->num_states()) , edges(aut->edge_vector().begin() + 1, aut->edge_vector().end()) @@ -755,6 +755,22 @@ product_or(spot::twa_graph_ptr left, spot::twa_graph_ptr right) return spot::product_or(left, right); } +static spot::twa_graph_ptr +word_to_aut(const char* word, const char *argname) +{ + try + { + return spot::parse_word(word, opt->dict)->as_automaton(); + } + catch (const spot::parse_error& e) + { + error(2, 0, "failed to parse the argument of --%s:\n%s", + argname, e.what()); + } + SPOT_UNREACHABLE(); + return nullptr; +} + static int parse_opt(int key, char* arg, struct argp_state*) { @@ -776,17 +792,14 @@ parse_opt(int key, char* arg, struct argp_state*) opt_nth = parse_range(arg, 0, std::numeric_limits::max()); break; case 'u': - opt->uniq = std::unique_ptr(new std::set()); + opt->uniq = std::make_unique(); break; case 'v': opt_invert = true; break; case 'x': - { - const char* opt = extra_options.parse_options(arg); - if (opt) - error(2, 0, "failed to parse --options near '%s'", opt); - } + if (const char* opt = extra_options.parse_options(arg)) + error(2, 0, "failed to parse --options near '%s'", opt); break; case OPT_ALIASES: opt_aliases = XARGMATCH("--aliases", arg, aliases_args, aliases_types); @@ -802,16 +815,7 @@ parse_opt(int key, char* arg, struct argp_state*) opt_art_sccs_set = true; break; case OPT_ACCEPT_WORD: - try - { - opt->acc_words.push_back(spot::parse_word(arg, opt->dict) - ->as_automaton()); - } - catch (const spot::parse_error& e) - { - error(2, 0, "failed to parse the argument of --accept-word:\n%s", - e.what()); - } + opt->acc_words.emplace_back(word_to_aut(arg, "accept-word")); break; case OPT_ACCEPTANCE_IS: { @@ -964,16 +968,7 @@ parse_opt(int key, char* arg, struct argp_state*) "%d should be followed by a comma and WORD", res); arg = endptr + 1; } - try - { - opt->hl_words.emplace_back(spot::parse_word(arg, opt->dict) - ->as_automaton(), res); - } - catch (const spot::parse_error& e) - { - error(2, 0, "failed to parse the argument of --highlight-word:\n%s", - e.what()); - } + opt->hl_words.emplace_back(word_to_aut(arg, "highlight-word"), res); } break; case OPT_HIGHLIGHT_LANGUAGES: @@ -1157,16 +1152,7 @@ parse_opt(int key, char* arg, struct argp_state*) opt_art_sccs_set = true; break; case OPT_REJECT_WORD: - try - { - opt->rej_words.push_back(spot::parse_word(arg, opt->dict) - ->as_automaton()); - } - catch (const spot::parse_error& e) - { - error(2, 0, "failed to parse the argument of --reject-word:\n%s", - e.what()); - } + opt->rej_words.emplace_back(word_to_aut(arg, "reject-word")); break; case OPT_REM_AP: opt->rem_ap.add_ap(arg); @@ -1291,7 +1277,7 @@ namespace static bool match_acceptance(spot::twa_graph_ptr aut) { - auto& acc = aut->acc(); + const spot::acc_cond& acc = aut->acc(); switch (opt_acceptance_is) { case ACC_Any: @@ -1346,8 +1332,7 @@ namespace { bool max; bool odd; - bool is_p = acc.is_parity(max, odd, true); - if (!is_p) + if (!acc.is_parity(max, odd, true)) return false; switch (opt_acceptance_is) { @@ -1460,7 +1445,7 @@ namespace if (matched && opt_acceptance_is) matched = match_acceptance(aut); - if (matched && (opt_sccs_set | opt_art_sccs_set)) + if (matched && (opt_sccs_set || opt_art_sccs_set)) { spot::scc_info si(aut); unsigned n = si.scc_count(); @@ -1540,14 +1525,14 @@ namespace && spot::contains(aut, opt->equivalent_pos); if (matched && !opt->acc_words.empty()) - for (auto& word_aut: opt->acc_words) + for (const spot::twa_graph_ptr& word_aut: opt->acc_words) if (spot::product(aut, word_aut)->is_empty()) { matched = false; break; } if (matched && !opt->rej_words.empty()) - for (auto& word_aut: opt->rej_words) + for (const spot::twa_graph_ptr& word_aut: opt->rej_words) if (!spot::product(aut, word_aut)->is_empty()) { matched = false; @@ -1681,13 +1666,13 @@ namespace aut->accepting_run()->highlight(opt_highlight_accepting_run); if (!opt->hl_words.empty()) - for (auto& word_aut: opt->hl_words) + for (auto& [word_aut, color]: opt->hl_words) { if (aut->acc().uses_fin_acceptance()) error(2, 0, "--highlight-word does not yet work with Fin acceptance"); - if (auto run = spot::product(aut, word_aut.first)->accepting_run()) - run->project(aut)->highlight(word_aut.second); + if (auto run = spot::product(aut, word_aut)->accepting_run()) + run->project(aut)->highlight(color); } timer.stop(); diff --git a/bin/common_aoutput.cc b/bin/common_aoutput.cc index fcc79fc3c..60f83289e 100644 --- a/bin/common_aoutput.cc +++ b/bin/common_aoutput.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -453,7 +453,7 @@ hoa_stat_printer::print(const spot::const_parsed_aut_ptr& haut, const spot::const_twa_graph_ptr& aut, spot::formula f, const char* filename, int loc, - spot::process_timer& ptimer, + const spot::process_timer& ptimer, const char* csv_prefix, const char* csv_suffix) { timer_ = ptimer; @@ -633,10 +633,10 @@ automaton_printer::print(const spot::twa_graph_ptr& aut, outputnamer.print(haut, aut, f, filename, loc, ptimer, csv_prefix, csv_suffix); std::string fname = outputname.str(); - auto p = outputfiles.emplace(fname, nullptr); - if (p.second) - p.first->second.reset(new output_file(fname.c_str())); - out = &p.first->second->ostream(); + auto [it, b] = outputfiles.try_emplace(fname, nullptr); + if (b) + it->second.reset(new output_file(fname.c_str())); + out = &it->second->ostream(); } // Output it. diff --git a/bin/common_aoutput.hh b/bin/common_aoutput.hh index d33b687d2..f57beae84 100644 --- a/bin/common_aoutput.hh +++ b/bin/common_aoutput.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2018, 2020, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2014-2018, 2020, 2022, 2023 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -155,7 +155,7 @@ public: print(const spot::const_parsed_aut_ptr& haut, const spot::const_twa_graph_ptr& aut, spot::formula f, - const char* filename, int loc, spot::process_timer& ptimer, + const char* filename, int loc, const spot::process_timer& ptimer, const char* csv_prefix, const char* csv_suffix); private: @@ -196,7 +196,7 @@ class automaton_printer std::map> outputfiles; public: - automaton_printer(stat_style input = no_input); + explicit automaton_printer(stat_style input = no_input); ~automaton_printer(); void From 7b0507a950a2734cdf2a7031d24286cd2ce88cb3 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 5 Jan 2023 23:43:31 +0100 Subject: [PATCH 238/606] bin: detect overflows in conversion functions * bin/common_conv.cc (to_int, to_unsigned): Here. * bin/common_range.cc (parse_range): And there. * tests/core/ltlgrind.test, tests/core/genaut.test, tests/core/randaut.test: Add test cases. --- bin/common_conv.cc | 15 ++++++++++++--- bin/common_range.cc | 22 ++++++++++++++-------- tests/core/genaut.test | 9 ++++++--- tests/core/ltlgrind.test | 5 ++++- tests/core/randaut.test | 7 +++++-- 5 files changed, 41 insertions(+), 17 deletions(-) diff --git a/bin/common_conv.cc b/bin/common_conv.cc index e63969b16..02b1815fd 100644 --- a/bin/common_conv.cc +++ b/bin/common_conv.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2018 Laboratoire de Recherche et Développement +// Copyright (C) 2015, 2018, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -25,10 +25,14 @@ int to_int(const char* s, const char* where) { char* endptr; - int res = strtol(s, &endptr, 10); + long int lres = strtol(s, &endptr, 10); if (*endptr) error(2, 0, "failed to parse '%s' as an integer (in argument of %s).", s, where); + int res = lres; + if (res != lres) + error(2, 0, "value '%s' is too large for an int (in argument of %s).", + s, where); return res; } @@ -45,11 +49,16 @@ unsigned to_unsigned (const char *s, const char* where) { char* endptr; - unsigned res = strtoul(s, &endptr, 10); + unsigned long lres = strtoul(s, &endptr, 10); if (*endptr) error(2, 0, "failed to parse '%s' as an unsigned integer (in argument of %s).", s, where); + unsigned res = lres; + if (res != lres) + error(2, 0, + "value '%s' is too large for a unsigned int (in argument of %s).", + s, where); return res; } diff --git a/bin/common_range.cc b/bin/common_range.cc index 8909a26c0..9419cc389 100644 --- a/bin/common_range.cc +++ b/bin/common_range.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2014, 2016 Laboratoire de Recherche et +// Copyright (C) 2012, 2014, 2016, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -36,13 +36,16 @@ parse_range(const char* str, int missing_left, int missing_right) { range res; char* end; - res.min = strtol(str, &end, 10); + long lres = strtol(str, &end, 10); + res.min = lres; + if (res.min != lres) + error(2, 0, "start of range '%s' is too large for an int.", str); if (end == str) { // No leading number. It's OK as long as the string is not // empty. if (!*end) - error(1, 0, "invalid empty range"); + error(2, 0, "invalid empty range"); res.min = missing_left; } if (!*end) @@ -66,19 +69,22 @@ parse_range(const char* str, int missing_left, int missing_right) { // Parse the next integer. char* end2; - res.max = strtol(end, &end2, 10); + lres = strtol(end, &end2, 10); + res.max = lres; + if (res.max != lres) + error(2, 0, "end of range '%s' is too large for an int.", str); if (str == end2) - error(1, 0, "invalid range '%s' " + error(2, 0, "invalid range '%s' " "(should start with digits, dots, or colon)", str); if (end == end2) - error(1, 0, "invalid range '%s' (missing end?)", str); + error(2, 0, "invalid range '%s' (missing end?)", str); if (*end2) - error(1, 0, "invalid range '%s' (trailing garbage?)", str); + error(2, 0, "invalid range '%s' (trailing garbage?)", str); } } if (res.min < 0 || res.max < 0) - error(1, 0, "invalid range '%s': values must be positive", str); + error(2, 0, "invalid range '%s': values must be positive", str); return res; } diff --git a/tests/core/genaut.test b/tests/core/genaut.test index 5da9509ed..f364569e1 100644 --- a/tests/core/genaut.test +++ b/tests/core/genaut.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2019, 2020 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) 2017-2020, 2023 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -60,7 +60,10 @@ genaut --l-nba='1..3?' 2>err && exit 1 grep 'invalid range.*trailing garbage' err genaut --l-nba='1..' 2>err && exit 1 grep 'invalid range.*missing end' err - +genaut --l-nba='9999999999999999999999999..' 2>err && exit 1 +grep 'start.*too large' err +genaut --l-nba='1..9999999999999999999999999' 2>err && exit 1 +grep 'end.*too large' err # Tests for autfilt -N/--nth genaut --ks-nca=1..5 | autfilt -N 2..4 > range1.hoa diff --git a/tests/core/ltlgrind.test b/tests/core/ltlgrind.test index 292756bc6..09e75ee4e 100755 --- a/tests/core/ltlgrind.test +++ b/tests/core/ltlgrind.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014, 2015, 2019 Laboratoire de Recherche et Développement +# Copyright (C) 2014, 2015, 2019, 2023 Laboratoire de Recherche et Développement # de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -198,3 +198,6 @@ checkopt_noparse -F input/2 --format '%<,%f,%>,%F,%L' <err && exit 1 +grep 'too large' err diff --git a/tests/core/randaut.test b/tests/core/randaut.test index 7ff851646..50558e790 100755 --- a/tests/core/randaut.test +++ b/tests/core/randaut.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014-2018, 2020, 2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) 2014-2018, 2020, 2022, 2023 Laboratoire de Recherche +# et Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -29,6 +29,9 @@ grep "randaut: 3.1.*is not between 0 and 1 (in argument of -e" err randaut -n1a 3 2>err && exit 1 grep "randaut: failed to parse '1a' as an integer.* -n/--automata)" err +randaut -n99999999999999999999999999 3 2>err && exit 1 +grep "randaut:.*too large" err + randaut --spin -Q4 a b | ../ikwiad -H -XN - >out grep 'States: 4' out grep 'AP: 2' out From 39212bbcd27daa650f1851f57d0722424fb97ff0 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 6 Jan 2023 11:55:34 +0100 Subject: [PATCH 239/606] more code smells * bin/common_file.cc, bin/common_file.hh, bin/common_finput.cc, bin/common_finput.hh, bin/common_output.cc, bin/common_setup.cc, bin/common_setup.hh, bin/common_trans.cc, bin/common_trans.hh, bin/dstar2tgba.cc, bin/genaut.cc, bin/genltl.cc, bin/ltl2tgba.cc, bin/ltl2tgta.cc, bin/ltlcross.cc, bin/ltldo.cc, bin/ltlfilt.cc, bin/ltlsynt.cc, bin/randltl.cc: Fix minor code issues reported by sonarcloud. --- bin/common_file.cc | 7 ++-- bin/common_file.hh | 13 +++---- bin/common_finput.cc | 10 ++---- bin/common_finput.hh | 16 +++++---- bin/common_output.cc | 20 +++++------ bin/common_setup.cc | 7 ++-- bin/common_setup.hh | 6 ++-- bin/common_trans.cc | 46 ++++++++++++------------ bin/common_trans.hh | 19 +++++----- bin/dstar2tgba.cc | 4 +-- bin/genaut.cc | 4 +-- bin/genltl.cc | 6 ++-- bin/ltl2tgba.cc | 8 ++--- bin/ltl2tgta.cc | 4 +-- bin/ltlcross.cc | 86 ++++++++++++++++---------------------------- bin/ltldo.cc | 14 +++----- bin/ltlfilt.cc | 20 +++++------ bin/ltlsynt.cc | 15 ++++---- bin/randltl.cc | 6 ++-- 19 files changed, 133 insertions(+), 178 deletions(-) diff --git a/bin/common_file.cc b/bin/common_file.cc index 005bb5479..4e56c6d54 100644 --- a/bin/common_file.cc +++ b/bin/common_file.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2022 Laboratoire de Recherche et +// Copyright (C) 2015, 2016, 2022, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -21,7 +21,6 @@ #include #include - output_file::output_file(const char* name, bool force_append) { std::ios_base::openmode mode = std::ios_base::trunc; @@ -39,10 +38,10 @@ output_file::output_file(const char* name, bool force_append) os_ = &std::cout; return; } - of_ = new std::ofstream(name, mode); + of_ = std::make_unique(name, mode); if (!*of_) error(2, errno, "cannot open '%s'", name); - os_ = of_; + os_ = of_.get(); } diff --git a/bin/common_file.hh b/bin/common_file.hh index b8f9842b8..b6aa0bec3 100644 --- a/bin/common_file.hh +++ b/bin/common_file.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2022 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) 2015-2016, 2022-2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -21,13 +21,13 @@ #include "common_sys.hh" #include +#include #include -#include class output_file { std::ostream* os_; - std::ofstream* of_ = nullptr; + std::unique_ptr of_; bool append_ = false; public: // Open a file for output. "-" is interpreted as stdout. @@ -37,11 +37,6 @@ public: void close(const std::string& name); - ~output_file() - { - delete of_; - } - bool append() const { return append_; diff --git a/bin/common_finput.cc b/bin/common_finput.cc index 80aca5df7..dbcdb3849 100644 --- a/bin/common_finput.cc +++ b/bin/common_finput.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2017, 2019, 2021, 2022 Laboratoire de Recherche +// Copyright (C) 2012-2017, 2019, 2021-2023 Laboratoire de Recherche // et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -96,12 +96,6 @@ parse_formula(const std::string& s) (s, spot::default_environment::instance(), false, lenient); } -job_processor::job_processor() - : abort_run(false), real_filename(nullptr), - col_to_read(0), prefix(nullptr), suffix(nullptr) -{ -} - job_processor::~job_processor() { if (real_filename) @@ -370,7 +364,7 @@ int job_processor::run() { int error = 0; - for (auto& j: jobs) + for (const auto& j: jobs) { switch (j.type) { diff --git a/bin/common_finput.hh b/bin/common_finput.hh index 2a5815fc3..9ecb5b025 100644 --- a/bin/common_finput.hh +++ b/bin/common_finput.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2017, 2022 Laboratoire de Recherche et +// Copyright (C) 2012-2017, 2022, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -56,9 +56,11 @@ spot::parsed_formula parse_formula(const std::string& s); class job_processor { protected: - bool abort_run; // Set to true in process_formula() to abort run(). + bool abort_run = false; // Set to true in process_formula() to abort run(). public: - job_processor(); + job_processor() = default; + job_processor(const job_processor&) = delete; + job_processor& operator=(const job_processor&) = delete; virtual ~job_processor(); @@ -84,10 +86,10 @@ public: virtual int run(); - char* real_filename; - long int col_to_read; - char* prefix; - char* suffix; + char* real_filename = nullptr; + long int col_to_read = 0; + char* prefix = nullptr; + char* suffix = nullptr; }; // Report and error message or add a default job depending on whether diff --git a/bin/common_output.cc b/bin/common_output.cc index e9c61a513..93cb2dfaf 100644 --- a/bin/common_output.cc +++ b/bin/common_output.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2019 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2019, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -23,6 +23,7 @@ #include "common_setup.hh" #include #include +#include #include #include #include @@ -297,9 +298,9 @@ namespace }; } -static formula_printer* format = nullptr; +static std::unique_ptr format; static std::ostringstream outputname; -static formula_printer* outputnamer = nullptr; +static std::unique_ptr outputnamer; static std::map> outputfiles; int @@ -320,7 +321,7 @@ parse_opt_output(int key, char* arg, struct argp_state*) output_format = lbt_output; break; case 'o': - outputnamer = new formula_printer(outputname, arg); + outputnamer = std::make_unique(outputname, arg); break; case 'p': full_parenth = true; @@ -341,8 +342,7 @@ parse_opt_output(int key, char* arg, struct argp_state*) output_format = wring_output; break; case OPT_FORMAT: - delete format; - format = new formula_printer(std::cout, arg); + format = std::make_unique(std::cout, arg); break; default: return ARGP_ERR_UNKNOWN; @@ -417,10 +417,10 @@ output_formula_checked(spot::formula f, spot::process_timer* ptimer, formula_with_location fl = { f, filename, linenum, prefix, suffix }; outputnamer->print(fl, ptimer); std::string fname = outputname.str(); - auto p = outputfiles.emplace(fname, nullptr); - if (p.second) - p.first->second.reset(new output_file(fname.c_str())); - out = &p.first->second->ostream(); + auto [it, b] = outputfiles.try_emplace(fname, nullptr); + if (b) + it->second.reset(new output_file(fname.c_str())); + out = &it->second->ostream(); } output_formula(*out, f, ptimer, filename, linenum, prefix, suffix); *out << output_terminator; diff --git a/bin/common_setup.cc b/bin/common_setup.cc index 24cacae85..af033a47f 100644 --- a/bin/common_setup.cc +++ b/bin/common_setup.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -20,13 +20,14 @@ #include "common_setup.hh" #include "common_aoutput.hh" -#include "argp.h" -#include "closeout.h" +#include +#include #include #include #include #include #include +#include #include static void diff --git a/bin/common_setup.hh b/bin/common_setup.hh index e2fce84e0..94cd16f4f 100644 --- a/bin/common_setup.hh +++ b/bin/common_setup.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2018, 2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2012-2013, 2018-2019, 2023 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -34,5 +34,5 @@ int protected_main(char** progname, std::function mainfun); // Diagnose exceptions. [[noreturn]] void handle_any_exception(); -#define BEGIN_EXCEPTION_PROTECT try { (void)0; +#define BEGIN_EXCEPTION_PROTECT try { (void)0 #define END_EXCEPTION_PROTECT } catch (...) { handle_any_exception(); } diff --git a/bin/common_trans.cc b/bin/common_trans.cc index e34f3d77d..b93535173 100644 --- a/bin/common_trans.cc +++ b/bin/common_trans.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2015-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -53,7 +53,7 @@ struct shorthands_t }; #define SHORTHAND(PRE, POST) { PRE, std::regex("^" PRE), POST } -static shorthands_t shorthands_ltl[] = { +static const shorthands_t shorthands_ltl[] = { SHORTHAND("delag", " %f>%O"), SHORTHAND("lbt", " <%L>%O"), SHORTHAND("ltl2ba", " -f %s>%O"), @@ -73,7 +73,7 @@ static shorthands_t shorthands_ltl[] = { SHORTHAND("owl.* ltl-utilities\\b", " -f %f"), }; -static shorthands_t shorthands_autproc[] = { +static const shorthands_t shorthands_autproc[] = { SHORTHAND("autfilt", " %H>%O"), SHORTHAND("dra2dpa", " <%H>%O"), SHORTHAND("dstar2tgba", " %H>%O"), @@ -85,7 +85,7 @@ static shorthands_t shorthands_autproc[] = { " <%H>%O"), }; -static void show_shorthands(shorthands_t* begin, shorthands_t* end) +static void show_shorthands(const shorthands_t* begin, const shorthands_t* end) { std::cout << ("If a COMMANDFMT does not use any %-sequence, and starts with one of\n" @@ -100,7 +100,8 @@ static void show_shorthands(shorthands_t* begin, shorthands_t* end) } -tool_spec::tool_spec(const char* spec, shorthands_t* begin, shorthands_t* end, +tool_spec::tool_spec(const char* spec, + const shorthands_t* begin, const shorthands_t* end, bool is_ref) noexcept : spec(spec), cmd(spec), name(spec), reference(is_ref) { @@ -113,15 +114,15 @@ tool_spec::tool_spec(const char* spec, shorthands_t* begin, shorthands_t* end, { if (*pos == '{') ++count; - else if (*pos == '}') - if (!--count) - { - name = strndup(cmd + 1, pos - cmd - 1); - cmd = pos + 1; - while (*cmd == ' ' || *cmd == '\t') - ++cmd; - break; - } + else if (*pos == '}' && --count == 0) + { + name = strndup(cmd + 1, pos - cmd - 1); + cmd = pos + 1; + // skip leading whitespace + while (*cmd == ' ' || *cmd == '\t') + ++cmd; + break; + } } } // If there is no % in the string, look for a known @@ -147,11 +148,11 @@ tool_spec::tool_spec(const char* spec, shorthands_t* begin, shorthands_t* end, auto& p = *begin++; if (std::regex_search(basename, p.rprefix)) { - int m = strlen(p.suffix); - int q = strlen(cmd); + size_t m = strlen(p.suffix); + size_t q = strlen(cmd); char* tmp = static_cast(malloc(q + m + 1)); - strcpy(tmp, cmd); - strcpy(tmp + q, p.suffix); + memcpy(tmp, cmd, q); + memcpy(tmp + q, p.suffix, m + 1); cmd = tmp; allocated = true; break; @@ -490,9 +491,8 @@ read_stdout_of_command(char* const* args) if (close(cout_pipe[1]) < 0) error(2, errno, "closing write-side of pipe failed"); - std::string buffer(32, 0); std::string results; - int bytes_read; + ssize_t bytes_read; for (;;) { static char buffer[512]; @@ -612,7 +612,7 @@ get_arg(const char*& cmd) { const char* start = cmd; std::string arg; - while (int c = *cmd) + while (char c = *cmd) { switch (c) { @@ -642,14 +642,14 @@ get_arg(const char*& cmd) goto end_loop; case '\'': { - int d = 0; + char d = '\0'; while ((d = *++cmd)) { if (d == '\'') break; arg.push_back(d); } - if (d == 0) + if (d == '\0') return nullptr; } break; diff --git a/bin/common_trans.hh b/bin/common_trans.hh index 31c88c80c..0ebe59e8c 100644 --- a/bin/common_trans.hh +++ b/bin/common_trans.hh @@ -51,7 +51,8 @@ struct tool_spec // Whether the tool is a reference. bool reference; - tool_spec(const char* spec, shorthands_t* begin, shorthands_t* end, + tool_spec(const char* spec, + const shorthands_t* begin, const shorthands_t* end, bool is_ref) noexcept; tool_spec(const tool_spec& other) noexcept; tool_spec& operator=(const tool_spec& other); @@ -71,7 +72,7 @@ struct quoted_formula final: public spot::printable_value struct filed_formula final: public spot::printable { - filed_formula(const quoted_formula& ltl) : f_(ltl) + explicit filed_formula(const quoted_formula& ltl) : f_(ltl) { } @@ -89,9 +90,7 @@ struct filed_formula final: public spot::printable struct filed_automaton final: public spot::printable { - filed_automaton() - { - } + filed_automaton() = default; void print(std::ostream& os, const char* pos) const override; @@ -112,7 +111,7 @@ struct printable_result_filename final: unsigned translator_num; printable_result_filename(); - ~printable_result_filename(); + ~printable_result_filename() override; void reset(unsigned n); void cleanup(); @@ -126,7 +125,7 @@ protected: spot::bdd_dict_ptr dict; // Round-specific variables quoted_formula ltl_formula; - filed_formula filename_formula = ltl_formula; + filed_formula filename_formula{ltl_formula}; // Run-specific variables printable_result_filename output; public: @@ -151,9 +150,9 @@ protected: public: using spot::formater::has; - autproc_runner(// whether we accept the absence of output - // specifier - bool no_output_allowed = false); + explicit autproc_runner(// whether we accept the absence of output + // specifier + bool no_output_allowed = false); void round_automaton(spot::const_twa_graph_ptr aut, unsigned serial); }; diff --git a/bin/dstar2tgba.cc b/bin/dstar2tgba.cc index 5b60a0ecc..4b2ec9662 100644 --- a/bin/dstar2tgba.cc +++ b/bin/dstar2tgba.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2019, 2022 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2019, 2022, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -117,7 +117,7 @@ namespace spot::postprocessor& post; automaton_printer printer; - dstar_processor(spot::postprocessor& post) + explicit dstar_processor(spot::postprocessor& post) : hoa_processor(spot::make_bdd_dict()), post(post), printer(aut_input) { } diff --git a/bin/genaut.cc b/bin/genaut.cc index 26678c588..f8d6b93ff 100644 --- a/bin/genaut.cc +++ b/bin/genaut.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019, 2022 Laboratoire de Recherche et +// Copyright (C) 2017-2019, 2022-2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -128,7 +128,7 @@ output_pattern(gen::aut_pattern_id pattern, int n) static void run_jobs() { - for (auto& j: jobs) + for (const auto& j: jobs) { int inc = (j.range.max < j.range.min) ? -1 : 1; int n = j.range.min; diff --git a/bin/genltl.cc b/bin/genltl.cc index 96d8bd7d3..ef8049171 100644 --- a/bin/genltl.cc +++ b/bin/genltl.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2015-2019, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2012, 2013, 2015-2019, 2022-2023 Laboratoire de +// Recherche et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -317,7 +317,7 @@ output_pattern(gen::ltl_pattern_id pattern, int n, int n2) static void run_jobs() { - for (auto& j: jobs) + for (const auto& j: jobs) { int inc = (j.range.max < j.range.min) ? -1 : 1; int n = j.range.min; diff --git a/bin/ltl2tgba.cc b/bin/ltl2tgba.cc index d4fb2fc17..73a9a23c6 100644 --- a/bin/ltl2tgba.cc +++ b/bin/ltl2tgba.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2019, 2022 Laboratoire de Recherche et +// Copyright (C) 2012-2019, 2022-2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -124,10 +124,10 @@ namespace { public: spot::translator& trans; - automaton_printer printer; + automaton_printer printer{ltl_input}; - trans_processor(spot::translator& trans) - : trans(trans), printer(ltl_input) + explicit trans_processor(spot::translator& trans) + : trans(trans) { } diff --git a/bin/ltl2tgta.cc b/bin/ltl2tgta.cc index ab925c7ac..60afcf9e8 100644 --- a/bin/ltl2tgta.cc +++ b/bin/ltl2tgta.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2020, 2022 Laboratoire de Recherche et +// Copyright (C) 2012-2020, 2022-2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -168,7 +168,7 @@ namespace public: spot::translator& trans; - trans_processor(spot::translator& trans) + explicit trans_processor(spot::translator& trans) : trans(trans) { } diff --git a/bin/ltlcross.cc b/bin/ltlcross.cc index 0dfa09985..3219beb75 100644 --- a/bin/ltlcross.cc +++ b/bin/ltlcross.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2020, 2022 Laboratoire de Recherche et +// Copyright (C) 2012-2020, 2022, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -264,55 +264,32 @@ end_error() struct statistics { - statistics() noexcept - : ok(false), - alternating(false), - status_str(nullptr), - status_code(0), - time(0), - states(0), - edges(0), - transitions(0), - acc(0), - scc(0), - nonacc_scc(0), - terminal_scc(0), - weak_scc(0), - strong_scc(0), - nondetstates(0), - nondeterministic(false), - terminal_aut(false), - weak_aut(false), - strong_aut(false) - { - } - // If OK is false, only the status_str, status_code, and time fields // should be valid. - bool ok; - bool alternating; - const char* status_str; - int status_code; - double time; - unsigned states; - unsigned edges; - unsigned long long transitions; - unsigned acc; - unsigned scc; - unsigned nonacc_scc; - unsigned terminal_scc; - unsigned weak_scc; - unsigned strong_scc; - unsigned nondetstates; - bool nondeterministic; - bool terminal_aut; - bool weak_aut; - bool strong_aut; + bool ok = false; + bool alternating = false; + const char* status_str = nullptr; + int status_code = 0; + double time = 0.0; + unsigned states = 0; + unsigned edges = 0; + unsigned long long transitions = 0; + unsigned acc = 0; + unsigned scc = 0; + unsigned nonacc_scc = 0; + unsigned terminal_scc = 0; + unsigned weak_scc = 0; + unsigned strong_scc = 0; + unsigned nondetstates = 0; + bool nondeterministic = false; + bool terminal_aut = false; + bool weak_aut = false; + bool strong_aut = false; std::vector product_states; std::vector product_transitions; std::vector product_scc; - bool ambiguous; - bool complete; + bool ambiguous = false; + bool complete = false; std::string hoa_str; static void @@ -581,7 +558,7 @@ namespace class xtranslator_runner final: public translator_runner { public: - xtranslator_runner(spot::bdd_dict_ptr dict) + explicit xtranslator_runner(spot::bdd_dict_ptr dict) : translator_runner(dict) { } @@ -1095,17 +1072,14 @@ namespace } // Make sure we do not translate the same formula twice. - if (!allow_dups) + if (!allow_dups && !unique_set.insert(f).second) { - if (!unique_set.insert(f).second) - { - if (!quiet) - std::cerr - << ("warning: This formula or its negation has already" - " been checked.\n Use --allow-dups if it " - "should not be ignored.\n\n"); - return 0; - } + if (!quiet) + std::cerr + << ("warning: This formula or its negation has already" + " been checked.\n Use --allow-dups if it " + "should not be ignored.\n\n"); + return 0; } int problems = 0; diff --git a/bin/ltldo.cc b/bin/ltldo.cc index ffbd4873e..6e7bf5ec7 100644 --- a/bin/ltldo.cc +++ b/bin/ltldo.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2020, 2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2015-2020, 2022-2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -209,7 +209,7 @@ namespace class xtranslator_runner final: public translator_runner { public: - xtranslator_runner(spot::bdd_dict_ptr dict) + explicit xtranslator_runner(spot::bdd_dict_ptr dict) : translator_runner(dict, true) { } @@ -224,8 +224,6 @@ namespace format(command, tools[translator_num].cmd); std::string cmd = command.str(); - //std::cerr << "Running [" << l << translator_num << "]: " - // << cmd << std::endl; timer.start(); int es = exec_with_timeout(cmd.c_str()); timer.stop(); @@ -312,7 +310,7 @@ namespace spot::printable_value inputf; public: - processor(spot::postprocessor& post) + explicit processor(spot::postprocessor& post) : runner(dict), best_printer(best_stream, best_format), post(post) { printer.add_stat('T', &cmdname); @@ -323,9 +321,7 @@ namespace best_printer.declare('f', &inputf); } - ~processor() - { - } + ~processor() override = default; int process_string(const std::string& input, diff --git a/bin/ltlfilt.cc b/bin/ltlfilt.cc index c9064368d..81e895d42 100644 --- a/bin/ltlfilt.cc +++ b/bin/ltlfilt.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -586,7 +586,7 @@ namespace fset_t unique_set; spot::relabeling_map relmap; - ltl_processor(spot::tl_simplifier& simpl) + explicit ltl_processor(spot::tl_simplifier& simpl) : simpl(simpl) { } @@ -722,7 +722,7 @@ namespace matched &= !syntactic_si || f.is_syntactic_stutter_invariant(); if (matched && (ap_n.min > 0 || ap_n.max >= 0)) { - auto s = atomic_prop_collect(f); + spot::atomic_prop_set* s = atomic_prop_collect(f); int n = s->size(); delete s; matched &= (ap_n.min <= 0) || (n >= ap_n.min); @@ -761,7 +761,7 @@ namespace aut = ltl_to_tgba_fm(f, simpl.get_dict(), true); if (matched && !opt->acc_words.empty()) - for (auto& word_aut: opt->acc_words) + for (const spot::twa_graph_ptr& word_aut: opt->acc_words) if (spot::product(aut, word_aut)->is_empty()) { matched = false; @@ -769,7 +769,7 @@ namespace } if (matched && !opt->rej_words.empty()) - for (auto& word_aut: opt->rej_words) + for (const spot::twa_graph_ptr& word_aut: opt->rej_words) if (!spot::product(aut, word_aut)->is_empty()) { matched = false; @@ -843,12 +843,12 @@ namespace { // Sort the formulas alphabetically. std::map m; - for (auto& p: relmap) - m.emplace(str_psl(p.first), p.second); - for (auto& p: m) + for (const auto& [newformula, oldname]: relmap) + m.emplace(str_psl(newformula), oldname); + for (const auto& [newname, oldname]: m) stream_formula(opt->output_define->ostream() - << "#define " << p.first << " (", - p.second, filename, + << "#define " << newname << " (", + oldname, filename, std::to_string(linenum).c_str()) << ")\n"; } one_match = true; diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index aaea855a4..a2ec32cd1 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -152,7 +152,6 @@ static const struct argp_child children[] = { { &finput_argp_headless, 0, nullptr, 0 }, { &aoutput_argp, 0, nullptr, 0 }, - //{ &aoutput_o_format_argp, 0, nullptr, 0 }, { &misc_argp, 0, nullptr, 0 }, { nullptr, 0, nullptr, 0 } }; @@ -425,10 +424,6 @@ namespace auto sub_o = sub_outs_str.begin(); std::vector mealy_machines; - auto print_game = want_game ? - [](const spot::twa_graph_ptr& game)->void { dispatch_print_hoa(game); } - : [](const spot::twa_graph_ptr&)->void{}; - for (; sub_f != sub_form.end(); ++sub_f, ++sub_o) { spot::mealy_like m_like @@ -466,9 +461,11 @@ namespace assert((spptr->at(arena->get_init_state_number()) == false) && "Env needs first turn"); } - print_game(arena); if (want_game) - continue; + { + dispatch_print_hoa(arena); + continue; + } if (!spot::solve_game(arena, *gi)) { if (show_status) @@ -625,7 +622,7 @@ namespace } static void - split_aps(std::string arg, std::vector& where) + split_aps(const std::string& arg, std::vector& where) { std::istringstream aps(arg); std::string ap; diff --git a/bin/randltl.cc b/bin/randltl.cc index 986c437c1..749fcf373 100644 --- a/bin/randltl.cc +++ b/bin/randltl.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2016, 2018-2019, 2022 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) 2012-2016, 2018-2019, 2022, 2023 Laboratoire de +// Recherche et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -65,7 +65,6 @@ enum { OPT_DUMP_PRIORITIES, OPT_DUPS, OPT_LTL_PRIORITIES, - OPT_PSL_PRIORITIES, OPT_SEED, OPT_SERE_PRIORITIES, OPT_TREE_SIZE, @@ -194,7 +193,6 @@ parse_opt(int key, char* arg, struct argp_state* as) case OPT_DUMP_PRIORITIES: opt_dump_priorities = true; break; - // case OPT_PSL_PRIORITIES: break; case OPT_SERE_PRIORITIES: opt_pS = arg; break; From 7e1d68479762a9ce5d66ec226d42ab23d0b38cbd Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 23 Jan 2023 11:59:49 +0100 Subject: [PATCH 240/606] dbranch: fix handling of states without successors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #524, reported by Rüdiger Ehlers. * spot/twaalgos/dbranch.cc: When merging an edge going to state without successors simply delete it. * bin/spot-x.cc: Typo in documentation. * tests/core/ltlcross.test: Add a test case. * NEWS: Mention the bug. --- NEWS | 4 ++++ bin/spot-x.cc | 4 ++-- spot/twaalgos/dbranch.cc | 39 +++++++++++++++++++++------------------ tests/core/ltlcross.test | 5 ++++- 4 files changed, 31 insertions(+), 21 deletions(-) diff --git a/NEWS b/NEWS index 9775339fe..e6d484f12 100644 --- a/NEWS +++ b/NEWS @@ -10,6 +10,10 @@ New in spot 2.11.3.dev (not yet released) multiple initial states (because Spot supports only one), the HOA parser could break state-based acceptance. (Issue #522.) + - delay_branching_here(), a new optimization of Spot 2.11 had an + incorrect handling of states without successors, causing some + segfaults. (Issue #524.) + New in spot 2.11.3 (2022-12-09) Bug fixes: diff --git a/bin/spot-x.cc b/bin/spot-x.cc index 1edb3f54e..964710dc1 100644 --- a/bin/spot-x.cc +++ b/bin/spot-x.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -53,7 +53,7 @@ implication-based simplifications are attempted. Defaults to 16.") }, { nullptr, 0, nullptr, 0, "Translation options:", 0 }, { DOC("ltl-split", "Set to 0 to disable the translation of automata \ as product or sum of subformulas.") }, - { DOC("branch-prop", "Set to 0 to disable branching-postponement \ + { DOC("branch-post", "Set to 0 to disable branching-postponement \ (done during translation, may create more states) and delayed-branching \ (almost similar, but done after translation to only remove states). \ Set to 1 to force branching-postponement, and to 2 \ diff --git a/spot/twaalgos/dbranch.cc b/spot/twaalgos/dbranch.cc index 465f8326e..19a0d9474 100644 --- a/spot/twaalgos/dbranch.cc +++ b/spot/twaalgos/dbranch.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2022 Laboratoire de Recherche et Développement +// Copyright (C) 2022-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -118,27 +118,30 @@ namespace spot continue; } unsigned mergedst = it2->second; - // we have to merge canddst into mergedst. This is as - // simple as: + // we have to merge canddst into mergedst. + // This is as simple as: // 1) connecting their list of transitions - unsigned& mergedfirst = g.state_storage(mergedst).succ; - unsigned& mergedlast = g.state_storage(mergedst).succ_tail; - unsigned& candfirst = g.state_storage(canddst).succ; unsigned& candlast = g.state_storage(canddst).succ_tail; - if (mergedlast) - aut->edge_storage(mergedlast).next_succ = candfirst; - else // mergedst had now successor - mergedfirst = candfirst; - mergedlast = candlast; - // 2) updating the source of the merged transitions - for (unsigned e2 = candfirst; e2 != 0;) + if (candlast) { - auto& edge = aut->edge_storage(e2); - edge.src = mergedst; - e2 = edge.next_succ; + unsigned& mergedfirst = g.state_storage(mergedst).succ; + unsigned& mergedlast = g.state_storage(mergedst).succ_tail; + unsigned& candfirst = g.state_storage(canddst).succ; + if (mergedlast) + aut->edge_storage(mergedlast).next_succ = candfirst; + else // mergedst had no successor + mergedfirst = candfirst; + mergedlast = candlast; + // 2) updating the source of the merged transitions + for (unsigned e2 = candfirst; e2 != 0;) + { + auto& edge = aut->edge_storage(e2); + edge.src = mergedst; + e2 = edge.next_succ; + } + // 3) deleting the edge to canddst. + candfirst = candlast = 0; } - // 3) deleting the edge to canddst. - candfirst = candlast = 0; it.erase(); // 4) updating succ_cand succ_cand[mergedst] += succ_cand[canddst]; diff --git a/tests/core/ltlcross.test b/tests/core/ltlcross.test index 1a5806ba8..ebe20fb26 100755 --- a/tests/core/ltlcross.test +++ b/tests/core/ltlcross.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012-2014, 2016, 2019 Laboratoire de Recherche et +# Copyright (C) 2012-2014, 2016, 2019, 2023 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -65,3 +65,6 @@ ltlcross -D \ # Spot 2.8. We use ltl2tgba twice so ltlcross build cross-products. ltlcross --verbose ltl2tgba ltl2tgba \ -f '(G(F((a1)&(X(X(b1))))))&(G(F((a2)&(X(X(b2))))))&(G(F((a3)&(X(X(b3))))))' + +# Issue #524. +ltlcross ltl2tgba -f '!(X(v3 | G!v5) | ((Xv5 & !(v5 & !X!v3)) U !v5))' From eae91e97cd1b450ac75f121ac314c61691af3ff0 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 23 Jan 2023 15:25:06 +0100 Subject: [PATCH 241/606] robin_hood: update to version version 3.11.5 * spot/priv/robin_hood.hh: Update. * spot/priv/Makefile.am: Patch ROBIN_HOOD_IS_TRIVIALLY_COPYABLE to work around an issue with clang on Arch linux. --- spot/priv/Makefile.am | 10 ++++++++-- spot/priv/robin_hood.hh | 43 +++++++++++++++++++++++++++-------------- 2 files changed, 37 insertions(+), 16 deletions(-) diff --git a/spot/priv/Makefile.am b/spot/priv/Makefile.am index d4e9cc77c..7ec7e6148 100644 --- a/spot/priv/Makefile.am +++ b/spot/priv/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2013-2019, 2021 Laboratoire de Recherche et +## Copyright (C) 2013-2019, 2021-2023 Laboratoire de Recherche et ## Développement de l'Epita (LRDE). ## ## This file is part of Spot, a model checking library. @@ -42,5 +42,11 @@ RH = $(GH)/robin-hood-hashing/master/src/include/robin_hood.h .PHONY: update update: wget $(RH) -O robin_hood.tmp || curl $(RH) -o robin_hood.tmp - sed 's/std::malloc/malloc/' robin_hood.tmp > $(srcdir)/robin_hood.hh +## Do not use std::malloc but malloc, because gnulib may replace it by +## rpl_malloc instead. Also disable to tests of __GNUC__ about +## ROBIN_HOOD_IS_TRIVIALLY_COPYABLE because (1) all versions of G++ we +## support have std::is_trivially_copyable, and (2) clang define +## __GNUC__ to some value that fail this test, and then warn that +## __has_trivial_copy is obsoleted. + sed 's/std::malloc/malloc/;/https:\/\/stackoverflow.com\/a\/31798726/{n;s/defined.*/false/}' robin_hood.tmp > $(srcdir)/robin_hood.hh rm -f robin_hood.tmp diff --git a/spot/priv/robin_hood.hh b/spot/priv/robin_hood.hh index 8c151d517..a4bc8beae 100644 --- a/spot/priv/robin_hood.hh +++ b/spot/priv/robin_hood.hh @@ -36,7 +36,7 @@ // see https://semver.org/ #define ROBIN_HOOD_VERSION_MAJOR 3 // for incompatible API changes #define ROBIN_HOOD_VERSION_MINOR 11 // for adding functionality in a backwards-compatible manner -#define ROBIN_HOOD_VERSION_PATCH 3 // for backwards-compatible bug fixes +#define ROBIN_HOOD_VERSION_PATCH 5 // for backwards-compatible bug fixes #include #include @@ -206,7 +206,7 @@ static Counts& counts() { // workaround missing "is_trivially_copyable" in g++ < 5.0 // See https://stackoverflow.com/a/31798726/48181 -#if defined(__GNUC__) && __GNUC__ < 5 +#if false # define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__) #else # define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value @@ -1820,6 +1820,12 @@ public: InsertionState::key_found != idxAndState.second); } + template + iterator emplace_hint(const_iterator position, Args&&... args) { + (void)position; + return emplace(std::forward(args)...).first; + } + template std::pair try_emplace(const key_type& key, Args&&... args) { return try_emplace_impl(key, std::forward(args)...); @@ -1831,16 +1837,15 @@ public: } template - std::pair try_emplace(const_iterator hint, const key_type& key, - Args&&... args) { + iterator try_emplace(const_iterator hint, const key_type& key, Args&&... args) { (void)hint; - return try_emplace_impl(key, std::forward(args)...); + return try_emplace_impl(key, std::forward(args)...).first; } template - std::pair try_emplace(const_iterator hint, key_type&& key, Args&&... args) { + iterator try_emplace(const_iterator hint, key_type&& key, Args&&... args) { (void)hint; - return try_emplace_impl(std::move(key), std::forward(args)...); + return try_emplace_impl(std::move(key), std::forward(args)...).first; } template @@ -1854,16 +1859,15 @@ public: } template - std::pair insert_or_assign(const_iterator hint, const key_type& key, - Mapped&& obj) { + iterator insert_or_assign(const_iterator hint, const key_type& key, Mapped&& obj) { (void)hint; - return insertOrAssignImpl(key, std::forward(obj)); + return insertOrAssignImpl(key, std::forward(obj)).first; } template - std::pair insert_or_assign(const_iterator hint, key_type&& key, Mapped&& obj) { + iterator insert_or_assign(const_iterator hint, key_type&& key, Mapped&& obj) { (void)hint; - return insertOrAssignImpl(std::move(key), std::forward(obj)); + return insertOrAssignImpl(std::move(key), std::forward(obj)).first; } std::pair insert(const value_type& keyval) { @@ -1871,10 +1875,20 @@ public: return emplace(keyval); } + iterator insert(const_iterator hint, const value_type& keyval) { + (void)hint; + return emplace(keyval).first; + } + std::pair insert(value_type&& keyval) { return emplace(std::move(keyval)); } + iterator insert(const_iterator hint, value_type&& keyval) { + (void)hint; + return emplace(std::move(keyval)).first; + } + // Returns 1 if key is found, 0 otherwise. size_t count(const key_type& key) const { // NOLINT(modernize-use-nodiscard) ROBIN_HOOD_TRACE(this) @@ -2308,13 +2322,14 @@ private: auto const numElementsWithBuffer = calcNumElementsWithBuffer(max_elements); - // calloc also zeroes everything + // malloc & zero mInfo. Faster than calloc everything. auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer); ROBIN_HOOD_LOG("std::calloc " << numBytesTotal << " = calcNumBytesTotal(" << numElementsWithBuffer << ")") mKeyVals = reinterpret_cast( - detail::assertNotNull(std::calloc(1, numBytesTotal))); + detail::assertNotNull(malloc(numBytesTotal))); mInfo = reinterpret_cast(mKeyVals + numElementsWithBuffer); + std::memset(mInfo, 0, numBytesTotal - numElementsWithBuffer * sizeof(Node)); // set sentinel mInfo[numElementsWithBuffer] = 1; From 9ca2927291f6338f69373683d45db686a0b5907f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 23 Jan 2023 16:07:49 +0100 Subject: [PATCH 242/606] bin: update copyright year and laboratory name * bin/common_setup.cc: Here. --- bin/common_setup.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/common_setup.cc b/bin/common_setup.cc index af033a47f..c59ec0695 100644 --- a/bin/common_setup.cc +++ b/bin/common_setup.cc @@ -36,7 +36,7 @@ display_version(FILE *stream, struct argp_state*) fputs(program_name, stream); fputs(" (" PACKAGE_NAME ") " PACKAGE_VERSION "\n\ \n\ -Copyright (C) 2022 Laboratoire de Recherche et Développement de l'Epita.\n\ +Copyright (C) 2023 Laboratoire de Recherche de l'Epita (LRE)\n\ License GPLv3+: \ GNU GPL version 3 or later .\n\ This is free software: you are free to change and redistribute it.\n\ From 315872a54b037a6a1206bfeb119618f8bf64b5ae Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Fri, 20 Jan 2023 15:57:46 +0100 Subject: [PATCH 243/606] ltlsynt: typo in doc * bin/ltlsynt.cc: here --- bin/ltlsynt.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index a2ec32cd1..35ac4194b 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -102,8 +102,8 @@ static const argp_option options[] = "whether to decompose the specification as multiple output-disjoint " "problems to solve independently (enabled by default)", 0 }, { "simplify", OPT_SIMPLIFY, "no|bisim|bwoa|sat|bisim-sat|bwoa-sat", 0, - "simplification to apply to the controler (no) nothing, " - "(bisim) bisimulation-based reduction, (bwoa) bissimulation-based " + "simplification to apply to the controller (no) nothing, " + "(bisim) bisimulation-based reduction, (bwoa) bisimulation-based " "reduction with output assignment, (sat) SAT-based minimization, " "(bisim-sat) SAT after bisim, (bwoa-sat) SAT after bwoa. Defaults " "to 'bwoa'.", 0 }, From a1c02856acfd5421f16c8fa22c182f3766b75e76 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 24 Jan 2023 11:35:14 +0100 Subject: [PATCH 244/606] autfilt: allow --highlight-word to work on Fin acceptance Fixes #523. * bin/autfilt.cc: Remove the restriction. * tests/core/acc_word.test: Add test case. * NEWS: Mention the fix. --- NEWS | 4 ++++ bin/autfilt.cc | 9 ++------- tests/core/acc_word.test | 20 +++++++++++++------- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/NEWS b/NEWS index e6d484f12..10ecce97c 100644 --- a/NEWS +++ b/NEWS @@ -10,6 +10,10 @@ New in spot 2.11.3.dev (not yet released) multiple initial states (because Spot supports only one), the HOA parser could break state-based acceptance. (Issue #522.) + - autfilt --highlight-word refused to work on automata with Fin + acceptance for historical reasons, however the code has been + perfectly able to handle this for a while. (Issue #523.) + - delay_branching_here(), a new optimization of Spot 2.11 had an incorrect handling of states without successors, causing some segfaults. (Issue #524.) diff --git a/bin/autfilt.cc b/bin/autfilt.cc index b55d1bc9f..4487fad8b 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -1667,13 +1667,8 @@ namespace if (!opt->hl_words.empty()) for (auto& [word_aut, color]: opt->hl_words) - { - if (aut->acc().uses_fin_acceptance()) - error(2, 0, - "--highlight-word does not yet work with Fin acceptance"); - if (auto run = spot::product(aut, word_aut)->accepting_run()) - run->project(aut)->highlight(color); - } + if (auto run = spot::product(aut, word_aut)->accepting_run()) + run->project(aut)->highlight(color); timer.stop(); if (opt->uniq) diff --git a/tests/core/acc_word.test b/tests/core/acc_word.test index 53ce4b98e..5f3b6880b 100644 --- a/tests/core/acc_word.test +++ b/tests/core/acc_word.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2017, 2018, 2019 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) 2016-2019, 2023 Laboratoire de Recherche +# et Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -91,6 +91,15 @@ State: 1 EOF diff expected out +ltl2tgba -G '(GF(a & X!a) -> GF(b & XXb)) & GFc' > aut.hoa +word='!a&!c;cycle{!a&b&!c;!a&c;!a&b&c}' +autfilt -H1.1 aut.hoa --highlight-word="$word" > out.hoa +grep spot.highlight.edges out.hoa >out.edges +cat >expected <stderr && exit 1 -test $? -eq 2 -grep 'highlight-word.*Fin' stderr - +# highlight-word used not to work with Fin acceptance, but it's ok now +ltl2tgba -G -D 'FGa' | autfilt --highlight-word='cycle{a}' ltlfilt -f 'GFa' --accept-word 'cycle{!a}' && exit 1 ltlfilt -f 'GF!a' --accept-word 'cycle{!a}' From 126d9bc103b238a910b26cc54523883b42cba607 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 24 Jan 2023 15:48:06 +0100 Subject: [PATCH 245/606] bin: fix number conversion routines on 32bit On 32bit archetectures, long int = int the current check for detecting values that overflow int will fail. Conversion routings should check errno. * bin/common_conv.cc, bin/common_range.cc: Here. --- bin/common_conv.cc | 12 ++++++++---- bin/common_range.cc | 6 ++++-- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/bin/common_conv.cc b/bin/common_conv.cc index 02b1815fd..b23a67c51 100644 --- a/bin/common_conv.cc +++ b/bin/common_conv.cc @@ -25,12 +25,13 @@ int to_int(const char* s, const char* where) { char* endptr; + errno = 0; long int lres = strtol(s, &endptr, 10); if (*endptr) error(2, 0, "failed to parse '%s' as an integer (in argument of %s).", s, where); int res = lres; - if (res != lres) + if (res != lres || errno == ERANGE) error(2, 0, "value '%s' is too large for an int (in argument of %s).", s, where); return res; @@ -49,13 +50,14 @@ unsigned to_unsigned (const char *s, const char* where) { char* endptr; + errno = 0; unsigned long lres = strtoul(s, &endptr, 10); if (*endptr) error(2, 0, "failed to parse '%s' as an unsigned integer (in argument of %s).", s, where); unsigned res = lres; - if (res != lres) + if (res != lres || errno == ERANGE) error(2, 0, "value '%s' is too large for a unsigned int (in argument of %s).", s, where); @@ -66,8 +68,9 @@ float to_float(const char* s, const char* where) { char* endptr; + errno = 0; float res = strtof(s, &endptr); - if (*endptr) + if (*endptr || errno == ERANGE) error(2, 0, "failed to parse '%s' as a float (in argument of %s)", s, where); return res; @@ -89,8 +92,9 @@ to_longs(const char* arg) while (*arg) { char* endptr; + errno = 0; long value = strtol(arg, &endptr, 10); - if (endptr == arg) + if (endptr == arg || errno) error(2, 0, "failed to parse '%s' as an integer.", arg); res.push_back(value); while (*endptr == ' ' || *endptr == ',') diff --git a/bin/common_range.cc b/bin/common_range.cc index 9419cc389..98e568b41 100644 --- a/bin/common_range.cc +++ b/bin/common_range.cc @@ -36,9 +36,10 @@ parse_range(const char* str, int missing_left, int missing_right) { range res; char* end; + errno = 0; long lres = strtol(str, &end, 10); res.min = lres; - if (res.min != lres) + if (res.min != lres || errno == ERANGE) error(2, 0, "start of range '%s' is too large for an int.", str); if (end == str) { @@ -69,9 +70,10 @@ parse_range(const char* str, int missing_left, int missing_right) { // Parse the next integer. char* end2; + errno = 0; lres = strtol(end, &end2, 10); res.max = lres; - if (res.max != lres) + if (res.max != lres || errno == ERANGE) error(2, 0, "end of range '%s' is too large for an int.", str); if (str == end2) error(2, 0, "invalid range '%s' " From 26660728674567f0a0ff97e1eb5a34aa38939955 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 24 Jan 2023 15:54:39 +0100 Subject: [PATCH 246/606] * .gitlab-ci.yml: Use pipeline id to name volumes. --- .gitlab-ci.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a2006ee7f..348bacba1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -276,7 +276,7 @@ debpkg-stable: - stable script: - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian:stable - - vol=spot-stable-$CI_COMMIT_SHA + - vol=spot-stable-$CI_COMMIT_SHA-$CI_PIPELINE_ID - docker volume create $vol - exitcode=0 - docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian:stable ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$? @@ -304,7 +304,7 @@ debpkg-stable-i386: needs: ["debpkg-stable"] script: - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386:stable - - vol=spot-stable-$CI_COMMIT_SHA + - vol=spot-stable-$CI_COMMIT_SHA-$CI_PIPELINE_ID - docker volume create $vol - exitcode=0 - docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386:stable ./bin-spot.sh -j${NBPROC-1} || exitcode=$? @@ -331,7 +331,7 @@ debpkg-unstable: - next script: - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian - - vol=spot-unstable-$CI_COMMIT_SHA + - vol=spot-unstable-$CI_COMMIT_SHA-$CI_PIPELINE_ID - docker volume create $vol - exitcode=0 - docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$? @@ -357,7 +357,7 @@ debpkg-unstable-i386: needs: ["debpkg-unstable"] script: - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386 - - vol=spot-unstable-$CI_COMMIT_SHA + - vol=spot-unstable-$CI_COMMIT_SHA-$CI_PIPELINE_ID - docker volume create $vol - exitcode=0 - docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386 ./bin-spot.sh -j${NBPROC-1} || exitcode=$? From 5969aa4925e52fbfd07e0edc6933fed3faf7ae20 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 30 Jan 2023 17:51:48 +0100 Subject: [PATCH 247/606] work around gcc-snapshot warnings about dangling references * spot/twaalgos/game.hh, spot/twaalgos/game.cc (get_state_players, get_strategy, get_state_winners): Take argument by reference, not copy. * spot/twaalgos/synthesis.cc, spot/twaalgos/mealy_machine.cc: Replace auto by actual type for readability. --- spot/twaalgos/game.cc | 17 ++++++++++++++--- spot/twaalgos/game.hh | 12 ++++++++---- spot/twaalgos/mealy_machine.cc | 8 ++++---- spot/twaalgos/synthesis.cc | 10 +++++----- 4 files changed, 31 insertions(+), 16 deletions(-) diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index df259b84a..17f94a7e4 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -1056,7 +1056,18 @@ namespace spot (*owners)[state] = owner; } - const region_t& get_state_players(const_twa_graph_ptr arena) + const region_t& get_state_players(const const_twa_graph_ptr& arena) + { + region_t *owners = arena->get_named_prop + ("state-player"); + if (!owners) + throw std::runtime_error + ("get_state_players(): state-player property not defined, not a game?"); + + return *owners; + } + + const region_t& get_state_players(twa_graph_ptr& arena) { region_t *owners = arena->get_named_prop ("state-player"); @@ -1081,7 +1092,7 @@ namespace spot } - const strategy_t& get_strategy(const_twa_graph_ptr arena) + const strategy_t& get_strategy(const const_twa_graph_ptr& arena) { auto strat_ptr = arena->get_named_prop("strategy"); if (!strat_ptr) @@ -1174,7 +1185,7 @@ namespace spot (*winners)[state] = winner; } - const region_t& get_state_winners(const_twa_graph_ptr arena) + const region_t& get_state_winners(const const_twa_graph_ptr& arena) { region_t *winners = arena->get_named_prop("state-winner"); if (!winners) diff --git a/spot/twaalgos/game.hh b/spot/twaalgos/game.hh index df5d27439..dbaccce75 100644 --- a/spot/twaalgos/game.hh +++ b/spot/twaalgos/game.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -163,14 +163,18 @@ namespace spot /// \ingroup games /// \brief Get the owner of all states + ///@{ SPOT_API - const region_t& get_state_players(const_twa_graph_ptr arena); + const region_t& get_state_players(const const_twa_graph_ptr& arena); + SPOT_API + const region_t& get_state_players(twa_graph_ptr& arena); + ///@} /// \ingroup games /// \brief Get or set the strategy /// @{ SPOT_API - const strategy_t& get_strategy(const_twa_graph_ptr arena); + const strategy_t& get_strategy(const const_twa_graph_ptr& arena); SPOT_API void set_strategy(twa_graph_ptr arena, const strategy_t& strat); SPOT_API @@ -214,5 +218,5 @@ namespace spot /// \ingroup games /// \brief Get the winner of all states SPOT_API - const region_t& get_state_winners(const_twa_graph_ptr arena); + const region_t& get_state_winners(const const_twa_graph_ptr& arena); } diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index 1126ad8e0..386e44126 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021, 2022 Laboratoire de Recherche et Développement +// Copyright (C) 2021, 2022, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -3849,7 +3849,7 @@ namespace spot // 0 -> "Env" next is input props // 1 -> "Player" next is output prop - const auto& spref = get_state_players(mmw); + const region_t& spref = get_state_players(mmw); assert((spref.size() == mmw->num_states()) && "Inconsistent state players"); @@ -3989,9 +3989,9 @@ namespace spot const unsigned initl = left->get_init_state_number(); const unsigned initr = right->get_init_state_number(); - auto& spr = get_state_players(right); + const region_t& spr = get_state_players(right); #ifndef NDEBUG - auto& spl = get_state_players(left); + const region_t& spl = get_state_players(left); // todo auto check_out = [](const const_twa_graph_ptr& aut, const auto& sp) diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 88e22ff04..494cc0f1f 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2020-2022 Laboratoire de Recherche et +// Copyright (C) 2020-2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -137,12 +137,12 @@ namespace{ // Note, this only deals with deterministic strategies // Note, assumes that env starts playing twa_graph_ptr - apply_strategy(const twa_graph_ptr& arena, + apply_strategy(const const_twa_graph_ptr& arena, bool unsplit, bool keep_acc) { - const auto& win = get_state_winners(arena); - const auto& strat = get_strategy(arena); - const auto& sp = get_state_players(arena); + const region_t& win = get_state_winners(arena); + const strategy_t& strat = get_strategy(arena); + const region_t& sp = get_state_players(arena); auto outs = get_synthesis_outputs(arena); if (!win[arena->get_init_state_number()]) From 43b4d80da14dccbd2740a1595f418ad6a3f84ae8 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 3 Feb 2023 09:35:46 +0100 Subject: [PATCH 248/606] dbranch: fix handling of state-based acceptance Fixes issue #525. * spot/twaalgos/dbranch.hh, NEWS: Document. * spot/twaalgos/dbranch.cc: Detect cases where the acceptance should be changed from state-based to transition-based. * tests/python/dbranch.py: Add a test case. --- NEWS | 4 ++++ spot/twaalgos/dbranch.cc | 19 +++++++++++++++++-- spot/twaalgos/dbranch.hh | 15 ++++++++++----- tests/python/dbranch.py | 29 ++++++++++++++++++++++++++++- 4 files changed, 59 insertions(+), 8 deletions(-) diff --git a/NEWS b/NEWS index 4d9d1def2..e8774df45 100644 --- a/NEWS +++ b/NEWS @@ -32,6 +32,10 @@ New in spot 2.11.3.dev (not yet released) incorrectly handling of states without successors, causing some segfaults. (Issue #524.) + - Running delay_branching_here() on state-based automata (this was not + done in Spot so far) may require the output to use transition-based + acceptance. (Issue #525.) + New in spot 2.11.3 (2022-12-09) Bug fixes: diff --git a/spot/twaalgos/dbranch.cc b/spot/twaalgos/dbranch.cc index 19a0d9474..7cf1b262e 100644 --- a/spot/twaalgos/dbranch.cc +++ b/spot/twaalgos/dbranch.cc @@ -66,6 +66,10 @@ namespace spot hashmap_t first_dest[1 + is_game]; auto& g = aut->get_graph(); + // Merging outgoing transitions may cause the automaton to need + // transition-based acceptance. + bool need_trans = !aut->prop_state_acc().is_true(); + // setup a DFS std::vector seen(ns); std::stack todo; @@ -128,9 +132,18 @@ namespace spot unsigned& mergedlast = g.state_storage(mergedst).succ_tail; unsigned& candfirst = g.state_storage(canddst).succ; if (mergedlast) - aut->edge_storage(mergedlast).next_succ = candfirst; + { + aut->edge_storage(mergedlast).next_succ = candfirst; + // Do we need to require transition-based acceptance? + if (!need_trans) + need_trans = + (aut->edge_storage(candfirst).acc + != aut->edge_storage(mergedfirst).acc); + } else // mergedst had no successor - mergedfirst = candfirst; + { + mergedfirst = candfirst; + } mergedlast = candlast; // 2) updating the source of the merged transitions for (unsigned e2 = candfirst; e2 != 0;) @@ -149,6 +162,8 @@ namespace spot changed = true; } } + if (need_trans) + aut->prop_state_acc(false); return changed; } } diff --git a/spot/twaalgos/dbranch.hh b/spot/twaalgos/dbranch.hh index 9cd0efa5e..022c1a75b 100644 --- a/spot/twaalgos/dbranch.hh +++ b/spot/twaalgos/dbranch.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2022 Laboratoire de Recherche et Développement +// Copyright (C) 2022, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -26,10 +26,15 @@ namespace spot /// \ingroup twa_algorithms /// \brief Merge states to delay /// - /// If a state (x) has two outgoing transitions (x,l,m,y) and - /// (x,l,m,z) going to states (x) and (y) that have no other - /// incoming edges, then (y) and (z) can be merged (keeping the - /// union of their outgoing destinations). + /// In an automaton with transition-based acceptance, if a state (x) + /// has two outgoing transitions (x,l,m,y) and (x,l,m,z) going to + /// states (x) and (y) that have no other incoming edges, then (y) + /// and (z) can be merged (keeping the union of their outgoing + /// destinations). + /// + /// If the input automaton uses state-based acceptance, running this + /// function might make the acceptance transition-based, but only if + /// two states with different acceptance are merged at some point. /// /// \return true iff the automaton was modified. SPOT_API bool delay_branching_here(const twa_graph_ptr& aut); diff --git a/tests/python/dbranch.py b/tests/python/dbranch.py index ecf17d7d0..268c4a3c6 100644 --- a/tests/python/dbranch.py +++ b/tests/python/dbranch.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2022 Laboratoire de Recherche et +# Copyright (C) 2022, 2023 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -145,3 +145,30 @@ State: 5 State: 6 [t] 6 --END--""") + +# Running delay_branching_here on state-based acceptance may require +# the output to use transition-based acceptance. (Issue #525.) +a = spot.automaton(""" +HOA: v1 States: 4 Start: 0 AP: 2 "a" "b" Acceptance: 1 Inf(0) --BODY-- +State: 0 [0] 1 [0] 2 State: 1 [1] 3 State: 2 {0} [!1] 3 State: 3 [t] 0 +--END--""") +copy = spot.make_twa_graph(a, spot.twa_prop_set.all()) +if spot.delay_branching_here(a): + a.purge_unreachable_states() +tc.assertTrue(spot.are_equivalent(a, copy)) +tc.assertEqual(a.to_str(), """HOA: v1 +States: 3 +Start: 0 +AP: 2 "b" "a" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels trans-acc deterministic +--BODY-- +State: 0 +[1] 1 +State: 1 +[0] 2 +[!0] 2 {0} +State: 2 +[t] 0 +--END--""") From 058975c167bfe9af29475e593b133e4f249f4760 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 3 Feb 2023 09:35:46 +0100 Subject: [PATCH 249/606] dbranch: fix handling of state-based acceptance Fixes issue #525. * spot/twaalgos/dbranch.hh, NEWS: Document. * spot/twaalgos/dbranch.cc: Detect cases where the acceptance should be changed from state-based to transition-based. * tests/python/dbranch.py: Add a test case. --- NEWS | 4 ++++ spot/twaalgos/dbranch.cc | 19 +++++++++++++++++-- spot/twaalgos/dbranch.hh | 15 ++++++++++----- tests/python/dbranch.py | 29 ++++++++++++++++++++++++++++- 4 files changed, 59 insertions(+), 8 deletions(-) diff --git a/NEWS b/NEWS index 10ecce97c..c734fb995 100644 --- a/NEWS +++ b/NEWS @@ -18,6 +18,10 @@ New in spot 2.11.3.dev (not yet released) incorrect handling of states without successors, causing some segfaults. (Issue #524.) + - Running delay_branching_here() on state-based automata (this was not + done in Spot so far) may require the output to use transition-based + acceptance. (Issue #525.) + New in spot 2.11.3 (2022-12-09) Bug fixes: diff --git a/spot/twaalgos/dbranch.cc b/spot/twaalgos/dbranch.cc index 19a0d9474..7cf1b262e 100644 --- a/spot/twaalgos/dbranch.cc +++ b/spot/twaalgos/dbranch.cc @@ -66,6 +66,10 @@ namespace spot hashmap_t first_dest[1 + is_game]; auto& g = aut->get_graph(); + // Merging outgoing transitions may cause the automaton to need + // transition-based acceptance. + bool need_trans = !aut->prop_state_acc().is_true(); + // setup a DFS std::vector seen(ns); std::stack todo; @@ -128,9 +132,18 @@ namespace spot unsigned& mergedlast = g.state_storage(mergedst).succ_tail; unsigned& candfirst = g.state_storage(canddst).succ; if (mergedlast) - aut->edge_storage(mergedlast).next_succ = candfirst; + { + aut->edge_storage(mergedlast).next_succ = candfirst; + // Do we need to require transition-based acceptance? + if (!need_trans) + need_trans = + (aut->edge_storage(candfirst).acc + != aut->edge_storage(mergedfirst).acc); + } else // mergedst had no successor - mergedfirst = candfirst; + { + mergedfirst = candfirst; + } mergedlast = candlast; // 2) updating the source of the merged transitions for (unsigned e2 = candfirst; e2 != 0;) @@ -149,6 +162,8 @@ namespace spot changed = true; } } + if (need_trans) + aut->prop_state_acc(false); return changed; } } diff --git a/spot/twaalgos/dbranch.hh b/spot/twaalgos/dbranch.hh index 9cd0efa5e..022c1a75b 100644 --- a/spot/twaalgos/dbranch.hh +++ b/spot/twaalgos/dbranch.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2022 Laboratoire de Recherche et Développement +// Copyright (C) 2022, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -26,10 +26,15 @@ namespace spot /// \ingroup twa_algorithms /// \brief Merge states to delay /// - /// If a state (x) has two outgoing transitions (x,l,m,y) and - /// (x,l,m,z) going to states (x) and (y) that have no other - /// incoming edges, then (y) and (z) can be merged (keeping the - /// union of their outgoing destinations). + /// In an automaton with transition-based acceptance, if a state (x) + /// has two outgoing transitions (x,l,m,y) and (x,l,m,z) going to + /// states (x) and (y) that have no other incoming edges, then (y) + /// and (z) can be merged (keeping the union of their outgoing + /// destinations). + /// + /// If the input automaton uses state-based acceptance, running this + /// function might make the acceptance transition-based, but only if + /// two states with different acceptance are merged at some point. /// /// \return true iff the automaton was modified. SPOT_API bool delay_branching_here(const twa_graph_ptr& aut); diff --git a/tests/python/dbranch.py b/tests/python/dbranch.py index ecf17d7d0..268c4a3c6 100644 --- a/tests/python/dbranch.py +++ b/tests/python/dbranch.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2022 Laboratoire de Recherche et +# Copyright (C) 2022, 2023 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -145,3 +145,30 @@ State: 5 State: 6 [t] 6 --END--""") + +# Running delay_branching_here on state-based acceptance may require +# the output to use transition-based acceptance. (Issue #525.) +a = spot.automaton(""" +HOA: v1 States: 4 Start: 0 AP: 2 "a" "b" Acceptance: 1 Inf(0) --BODY-- +State: 0 [0] 1 [0] 2 State: 1 [1] 3 State: 2 {0} [!1] 3 State: 3 [t] 0 +--END--""") +copy = spot.make_twa_graph(a, spot.twa_prop_set.all()) +if spot.delay_branching_here(a): + a.purge_unreachable_states() +tc.assertTrue(spot.are_equivalent(a, copy)) +tc.assertEqual(a.to_str(), """HOA: v1 +States: 3 +Start: 0 +AP: 2 "b" "a" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels trans-acc deterministic +--BODY-- +State: 0 +[1] 1 +State: 1 +[0] 2 +[!0] 2 {0} +State: 2 +[t] 0 +--END--""") From a117fe1a22d1735995a9f032c6372611b96e5abc Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 7 Feb 2023 14:40:20 +0100 Subject: [PATCH 250/606] to_finit: fix issue #526 * spot/twaalgos/remprop.cc: Use bdd_restrict instead of bdd_exists. * tests/core/ltlf.test: Add test case. * NEWS: Mention the bug. --- NEWS | 4 ++++ spot/twaalgos/remprop.cc | 6 ++--- tests/core/ltlf.test | 51 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 57 insertions(+), 4 deletions(-) diff --git a/NEWS b/NEWS index e8774df45..2ffc85d38 100644 --- a/NEWS +++ b/NEWS @@ -36,6 +36,10 @@ New in spot 2.11.3.dev (not yet released) done in Spot so far) may require the output to use transition-based acceptance. (Issue #525.) + - to_finite(), introduce in 2.11, had a bug that could break the + completeness of automata and trigger an exception from the HOA + printer. (Issue #526.) + New in spot 2.11.3 (2022-12-09) Bug fixes: diff --git a/spot/twaalgos/remprop.cc b/spot/twaalgos/remprop.cc index 942a1b4b5..8d4be8fbc 100644 --- a/spot/twaalgos/remprop.cc +++ b/spot/twaalgos/remprop.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2019, 2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2015-2019, 2022, 2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -205,7 +205,7 @@ namespace spot } else { - e.cond = bdd_exist(e.cond, rem); + e.cond = bdd_restrict(e.cond, rem); } } diff --git a/tests/core/ltlf.test b/tests/core/ltlf.test index 11f2132ac..74a2da79e 100755 --- a/tests/core/ltlf.test +++ b/tests/core/ltlf.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2022 Laboratoire de Recherche et Développement de +# Copyright (C) 2022, 2023 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -173,3 +173,52 @@ grep -v '\[f\]' out4 > out3 cmp out3 out4 && exit 1 # make sure we did remove something autfilt out3 > out4 diff out4 expected3 + +# Issue #526 +ltlfilt -f '(i->XXo)|G(i<->Xo2)' --from-ltlf | ltl2tgba -D |\ + autfilt -C --to-finite > out +cat >exp < Date: Tue, 7 Feb 2023 14:40:20 +0100 Subject: [PATCH 251/606] to_finit: fix issue #526 * spot/twaalgos/remprop.cc: Use bdd_restrict instead of bdd_exists. * tests/core/ltlf.test: Add test case. * NEWS: Mention the bug. --- NEWS | 4 ++++ spot/twaalgos/remprop.cc | 6 ++--- tests/core/ltlf.test | 51 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 57 insertions(+), 4 deletions(-) diff --git a/NEWS b/NEWS index c734fb995..0886eb41d 100644 --- a/NEWS +++ b/NEWS @@ -22,6 +22,10 @@ New in spot 2.11.3.dev (not yet released) done in Spot so far) may require the output to use transition-based acceptance. (Issue #525.) + - to_finite(), introduce in 2.11, had a bug that could break the + completeness of automata and trigger an exception from the HOA + printer. (Issue #526.) + New in spot 2.11.3 (2022-12-09) Bug fixes: diff --git a/spot/twaalgos/remprop.cc b/spot/twaalgos/remprop.cc index 942a1b4b5..8d4be8fbc 100644 --- a/spot/twaalgos/remprop.cc +++ b/spot/twaalgos/remprop.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2019, 2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2015-2019, 2022, 2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -205,7 +205,7 @@ namespace spot } else { - e.cond = bdd_exist(e.cond, rem); + e.cond = bdd_restrict(e.cond, rem); } } diff --git a/tests/core/ltlf.test b/tests/core/ltlf.test index 11f2132ac..74a2da79e 100755 --- a/tests/core/ltlf.test +++ b/tests/core/ltlf.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2022 Laboratoire de Recherche et Développement de +# Copyright (C) 2022, 2023 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -173,3 +173,52 @@ grep -v '\[f\]' out4 > out3 cmp out3 out4 && exit 1 # make sure we did remove something autfilt out3 > out4 diff out4 expected3 + +# Issue #526 +ltlfilt -f '(i->XXo)|G(i<->Xo2)' --from-ltlf | ltl2tgba -D |\ + autfilt -C --to-finite > out +cat >exp < Date: Fri, 10 Feb 2023 08:49:26 +0100 Subject: [PATCH 252/606] Release spot 2.11.4 * NEWS, configure.ac, doc/org/setup.org: Update version. --- NEWS | 2 +- configure.ac | 4 ++-- doc/org/setup.org | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/NEWS b/NEWS index 0886eb41d..7a8ab4e46 100644 --- a/NEWS +++ b/NEWS @@ -1,4 +1,4 @@ -New in spot 2.11.3.dev (not yet released) +New in spot 2.11.4 (2023-02-10) Python: diff --git a/configure.ac b/configure.ac index 68fe4cab7..4643c0b66 100644 --- a/configure.ac +++ b/configure.ac @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2008-2022, Laboratoire de Recherche et Développement +# Copyright (C) 2008-2023, Laboratoire de Recherche et Développement # de l'Epita (LRDE). # Copyright (C) 2003-2007 Laboratoire d'Informatique de Paris 6 # (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.11.3.dev], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.11.4], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) diff --git a/doc/org/setup.org b/doc/org/setup.org index 78091ea45..7b6a4fa70 100644 --- a/doc/org/setup.org +++ b/doc/org/setup.org @@ -1,11 +1,11 @@ #+OPTIONS: H:2 num:nil toc:t html-postamble:nil ^:nil #+EMAIL: spot@lrde.epita.fr #+HTML_LINK_HOME: index.html -#+MACRO: SPOTVERSION 2.11.3 -#+MACRO: LASTRELEASE 2.11.3 -#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.11.3.tar.gz][=spot-2.11.3.tar.gz=]] +#+MACRO: SPOTVERSION 2.11.4 +#+MACRO: LASTRELEASE 2.11.4 +#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.11.3.tar.gz][=spot-2.11.4.tar.gz=]] #+MACRO: LASTNEWS [[https://gitlab.lre.epita.fr/spot/spot/blob/spot-2-11-3/NEWS][summary of the changes]] -#+MACRO: LASTDATE 2022-12-09 +#+MACRO: LASTDATE 2023-02-10 #+ATTR_HTML: :id spotlogo [[file:spot2.svg]] From e44cb5152aeebada10578f7bbd2788b701edf0da Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 10 Feb 2023 08:51:29 +0100 Subject: [PATCH 253/606] Bump version to 2.11.4.dev * NEWS, configure.ac: Here. --- NEWS | 4 ++++ configure.ac | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 7a8ab4e46..7f3be814d 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,7 @@ +New in spot 2.11.4.dev (not yet released) + + Nothing yet. + New in spot 2.11.4 (2023-02-10) Python: diff --git a/configure.ac b/configure.ac index 4643c0b66..e47e2eb29 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.11.4], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.11.4.dev], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) From 4bd023e51510d9b64cb516c9b7120e0b8f741b7e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 16 Feb 2023 17:46:51 +0100 Subject: [PATCH 254/606] org: do not require org-install org-install has been obsolete for a long time, and has been removed from Org 9.6. * doc/org/init.el.in: Remove org-install. --- doc/org/init.el.in | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/org/init.el.in b/doc/org/init.el.in index 9f589bb35..5e51c7250 100644 --- a/doc/org/init.el.in +++ b/doc/org/init.el.in @@ -51,7 +51,6 @@ (package-install ess))))) (require 'ox-publish) -(require 'org-install) (require 'hoa-mode) ; See https://github.com/emacs-ess/ESS/issues/1052 From 8a5b86521cd80c3542ea8d787ed2f9089dec028c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 16 Feb 2023 17:48:49 +0100 Subject: [PATCH 255/606] * NEWS: Remove duplicate entries. --- NEWS | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/NEWS b/NEWS index c3d646fe1..3cfa6edc2 100644 --- a/NEWS +++ b/NEWS @@ -18,28 +18,6 @@ New in spot 2.11.4.dev (not yet released) - spot.acd() no longer depends on jQuery for interactivity. - Bug fixes: - - - When merging initial states from state-based automata with - multiple initial states (because Spot supports only one), the HOA - parser could break state-based acceptance. (Issue #522.) - - - autfilt --highlight-word refused to work on automata with Fin - acceptance for historical reason, but the cose is perfectly able - to handle this now. (Issue #523.) - - - delay_branching_here(), a new optimization of Spot 2.11 had an - incorrectly handling of states without successors, causing some - segfaults. (Issue #524.) - - - Running delay_branching_here() on state-based automata (this was not - done in Spot so far) may require the output to use transition-based - acceptance. (Issue #525.) - - - to_finite(), introduce in 2.11, had a bug that could break the - completeness of automata and trigger an exception from the HOA - printer. (Issue #526.) - New in spot 2.11.4 (2023-02-10) Python: From 66839b1a2920ef6f219e248beb5660ca24ef9491 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 23 Feb 2023 11:53:07 +0100 Subject: [PATCH 256/606] bdd_to_formula: add CNF variant * spot/twa/formula2bdd.hh, spot/twa/formula2bdd.cc (bdd_to_cnf_formula): New function. * python/spot/__init__.py: Add a default dictionary for convenience. * tests/python/bdditer.py: Add test cases. * NEWS: Mention it. --- NEWS | 3 ++ python/spot/__init__.py | 8 ++++-- spot/twa/formula2bdd.cc | 32 +++++++++++++++++---- spot/twa/formula2bdd.hh | 23 +++++++++++----- tests/python/bdditer.py | 61 +++++++++++++++++++++++++++++++++++++++-- 5 files changed, 111 insertions(+), 16 deletions(-) diff --git a/NEWS b/NEWS index 3cfa6edc2..86fd461e1 100644 --- a/NEWS +++ b/NEWS @@ -14,6 +14,9 @@ New in spot 2.11.4.dev (not yet released) supports only one): it now reuse the edges leaving initial states without incoming transitions. + - spot::bdd_to_cnf_formula() is a new variant of spot::bdd_to_formula() + that converts a BDD into a CNF instead of a DNF. + Python: - spot.acd() no longer depends on jQuery for interactivity. diff --git a/python/spot/__init__.py b/python/spot/__init__.py index ef4cd772e..02bdcb1f6 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2014-2022 Laboratoire de -# Recherche et Développement de l'Epita (LRDE). +# Copyright (C) 2014-2023 Laboratoire de Recherche et Développement de +# l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -1347,6 +1347,10 @@ def bdd_to_formula(b, dic=_bdd_dict): from spot.impl import bdd_to_formula as bf return bf(b, dic) +def bdd_to_cnf_formula(b, dic=_bdd_dict): + from spot.impl import bdd_to_cnf_formula as bf + return bf(b, dic) + def language_containment_checker(dic=_bdd_dict): from spot.impl import language_containment_checker as c diff --git a/spot/twa/formula2bdd.cc b/spot/twa/formula2bdd.cc index 7596c0759..15434395f 100644 --- a/spot/twa/formula2bdd.cc +++ b/spot/twa/formula2bdd.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2019 Laboratoire de Recherche et Développement +// Copyright (C) 2009-2019, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris // 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), @@ -30,11 +30,14 @@ namespace spot namespace { // Convert a BDD which is known to be a conjonction into a formula. + // If dual is true, dualize the result, i.e., negate literals, and + // exchange ∧ and ∨. + template static formula conj_to_formula(bdd b, const bdd_dict_ptr d) { if (b == bddfalse) - return formula::ff(); + return dual ? formula::tt() : formula::ff(); std::vector v; while (b != bddtrue) { @@ -49,11 +52,14 @@ namespace spot bdd high = bdd_high(b); if (high == bddfalse) { - res = formula::Not(res); + if (!dual) + res = formula::Not(res); b = bdd_low(b); } else { + if (dual) + res = formula::Not(res); // If bdd_low is not false, then b was not a conjunction. assert(bdd_low(b) == bddfalse); b = high; @@ -61,7 +67,7 @@ namespace spot assert(b != bddfalse); v.emplace_back(res); } - return formula::And(v); + return dual ? formula::Or(v) : formula::And(v); } } // anonymous @@ -143,7 +149,23 @@ namespace spot minato_isop isop(f); bdd cube; while ((cube = isop.next()) != bddfalse) - v.emplace_back(conj_to_formula(cube, d)); + v.emplace_back(conj_to_formula(cube, d)); return formula::Or(std::move(v)); } + + formula + bdd_to_cnf_formula(bdd f, const bdd_dict_ptr d) + { + if (f == bddtrue) + return formula::tt(); + + std::vector v; + + minato_isop isop(!f); + bdd cube; + while ((cube = isop.next()) != bddfalse) + v.emplace_back(conj_to_formula(cube, d)); + return formula::And(std::move(v)); + } + } diff --git a/spot/twa/formula2bdd.hh b/spot/twa/formula2bdd.hh index 4d5c81a60..a84d27996 100644 --- a/spot/twa/formula2bdd.hh +++ b/spot/twa/formula2bdd.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2014, 2015 Laboratoire de Recherche et +// Copyright (C) 2012-2015, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // Copyright (C) 2003 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -52,12 +52,21 @@ namespace spot /// \brief Convert a BDD into a formula. /// - /// Format the BDD as an irredundant sum of product (see the - /// minato_isop class for details) and map the BDD variables back - /// into their atomic propositions. This works only for Boolean - /// formulas, and all the BDD variables used in \a f should have - /// been registered in \a d. Although the result has type - /// formula, it obviously does not use any temporal operator. + /// Format the BDD as a Boolean spot::formula object. This works only + /// for Boolean formulas, and all the BDD variables used in \a f + /// should have been registered in \a d. Although the result has + /// type formula, it obviously does not use any temporal operator. + /// + /// The bdd_to_formula() version produces an irredundant sum of + /// product (see the minato_isop class for details) and map the BDD + /// variables back into their atomic propositions. + /// + /// The bdd_to_cnf_formula() version produces an irredundant product of + /// sum, using the dual construction. + /// @{ SPOT_API formula bdd_to_formula(bdd f, const bdd_dict_ptr d); + SPOT_API + formula bdd_to_cnf_formula(bdd f, const bdd_dict_ptr d); + /// @} } diff --git a/tests/python/bdditer.py b/tests/python/bdditer.py index 95cc441b3..4a2afeea1 100644 --- a/tests/python/bdditer.py +++ b/tests/python/bdditer.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2021, 2022 Laboratoire de Recherche et +# Copyright (C) 2017, 2018, 2021, 2022, 2023 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -27,6 +27,19 @@ import sys from unittest import TestCase tc = TestCase() +# CPython use reference counting, so that automata are destructed +# when we expect them to be. However other implementations like +# PyPy may call destructors latter, causing different output. +from platform import python_implementation +if python_implementation() == 'CPython': + def gcollect(): + pass +else: + import gc + def gcollect(): + gc.collect() + + run = spot.translate('a & !b').accepting_run() b = run.prefix[0].label c = buddy.bdd_satone(b) @@ -43,12 +56,15 @@ while c != buddy.bddtrue: c = h tc.assertEqual(res, [0, -1]) +del res res2 = [] for i in run.aut.ap(): res2.append((str(i), run.aut.register_ap(i))) tc.assertEqual(str(res2), "[('a', 0), ('b', 1)]") - +del res2 +del c +gcollect() f = spot.bdd_to_formula(b) tc.assertTrue(f._is(spot.op_And)) @@ -56,9 +72,50 @@ tc.assertTrue(f[0]._is(spot.op_ap)) tc.assertTrue(f[1]._is(spot.op_Not)) tc.assertTrue(f[1][0]._is(spot.op_ap)) tc.assertEqual(str(f), 'a & !b') +del f +gcollect() try: f = spot.bdd_to_formula(b, spot.make_bdd_dict()) sys.exit(2) except RuntimeError as e: tc.assertIn("not in the dictionary", str(e)) + +f = spot.bdd_to_cnf_formula(b) +tc.assertEqual(str(f), 'a & !b') + +del run +del f + +gcollect() + +f = spot.bdd_to_cnf_formula(buddy.bddtrue) +tc.assertEqual(str(f), '1') +del f +gcollect() + +f = spot.bdd_to_cnf_formula(buddy.bddfalse) +tc.assertEqual(str(f), '0') +del f +gcollect() + +aut = spot.translate('(a & b) <-> c') +# With pypy, running GC here will destroy the translator object used +# by translate(). That object has temporary automata that reference +# the BDDs variables and those affect the order in which the +# bdd_to_formula() result is object is presented. The different order +# is not wrong, but it makes it diffuclt to write tests. +gcollect() + +for e in aut.out(aut.get_init_state_number()): + b = e.cond + break + +f1 = spot.bdd_to_formula(b) +tc.assertEqual(str(f1), '(!a & !c) | (a & b & c) | (!b & !c)') +f2 = spot.bdd_to_cnf_formula(b) +tc.assertEqual(str(f2), '(a | !c) & (!a | !b | c) & (b | !c)') + +b1 = spot.formula_to_bdd(f1, spot._bdd_dict, aut) +b2 = spot.formula_to_bdd(f2, spot._bdd_dict, aut) +tc.assertEqual(b1, b2) From f117159ec416d93a63efcc4e64edd151d44bba8b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 23 Feb 2023 12:02:06 +0100 Subject: [PATCH 257/606] * doc/org/tut03.org: Typos. --- doc/org/tut03.org | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/org/tut03.org b/doc/org/tut03.org index b48366a82..c70a3dab3 100644 --- a/doc/org/tut03.org +++ b/doc/org/tut03.org @@ -81,7 +81,7 @@ simplifications called /trivial identities/. For instance =formula::F(formula::X(formula::tt()))= will return the same formula as =formula::tt()=. These simplifications are those that involve the true and false constants, impotence (=F(F(e))=F(e)=), involutions -(=Not(Not(e)=e=), associativity +(=Not(Not(e))=e=), associativity (=And({And({e1,e2},e3})=And({e1,e2,e3})=). See [[https://spot.lrde.epita.fr/tl.pdf][tl.pdf]] for a list of these /trivial identities/. @@ -113,7 +113,7 @@ detail of the top-level operator in the formula. std::cout << f << '\n'; - // kindstar() prints the name of the operator + // kindstr() prints the name of the operator // size() return the number of operands of the operators std::cout << f.kindstr() << ", " << f.size() << " children\n"; // operator[] accesses each operand @@ -157,7 +157,7 @@ The Python equivalent is similar: print(f) - # kindstar() prints the name of the operator + # kindstr() prints the name of the operator # size() return the number of operands of the operators print("{}, {} children".format(f.kindstr(), f.size())) # [] accesses each operand From 146942953aecba4deaabd6091960b162a96459a9 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 3 Mar 2023 00:14:18 +0100 Subject: [PATCH 258/606] org: fix rendering of R examples for recent ESS/Org * doc/org/.dir-locals.el.in, doc/org/init.el.in: Newer ESS version need to be taught to use default-directory instead of the project directory. * doc/org/ltlcross.org: Use "result file" to render the output. --- doc/org/.dir-locals.el.in | 4 +++- doc/org/init.el.in | 4 +++- doc/org/ltlcross.org | 4 ++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/doc/org/.dir-locals.el.in b/doc/org/.dir-locals.el.in index cba9892fb..80c0a1385 100644 --- a/doc/org/.dir-locals.el.in +++ b/doc/org/.dir-locals.el.in @@ -27,6 +27,9 @@ (setenv "SPOT_DOTEXTRA" "node[fontsize=12] fontsize=12 stylesheet=\"spot.css\" edge[arrowhead=vee, arrowsize=.7, fontsize=12]") (setq org-babel-temporary-directory "@abs_top_builddir@/doc/org/tmp") (make-directory org-babel-temporary-directory t) + ; has to be set globally, not buffer-local + (setq ess-ask-for-ess-directory nil) + (setq ess-startup-directory 'default-directory) (org-babel-do-load-languages 'org-babel-load-languages `((,(if (version< org-version "8.3") 'sh 'shell) . t) (python . t) @@ -39,7 +42,6 @@ (org-babel-python-command . "@PYTHON@") (org-babel-C++-compiler . "./g++wrap") (shell-file-name . "@SHELL@") - (ess-ask-for-ess-directory . nil) (org-export-html-postamble . nil) (org-html-table-header-tags "
" . "
") diff --git a/doc/org/init.el.in b/doc/org/init.el.in index 5e51c7250..4258a95f7 100644 --- a/doc/org/init.el.in +++ b/doc/org/init.el.in @@ -88,7 +88,9 @@ (setq org-babel-C++-compiler "./g++wrap") (setq shell-file-name "@SHELL@") (setq ess-ask-for-ess-directory nil) - +; setting ess-startup-directory to 'default-directory is enough with +; newer ESS version (after Fev 2022) but does not work with older ones. +(setq ess-startup-directory "@abs_top_builddir@/doc/org") (setq org-babel-default-header-args:plantuml '((:results . "file") (:exports . "results") diff --git a/doc/org/ltlcross.org b/doc/org/ltlcross.org index 0fdebae1f..36cce5cbb 100644 --- a/doc/org/ltlcross.org +++ b/doc/org/ltlcross.org @@ -924,7 +924,7 @@ compare the number of states produced by the two configurations of =ltl2tgba= for each formula, we just need to plot column =dt2$state.small= against =dt2$state.deter=. -#+BEGIN_SRC R :results output graphics :width 5 :height 5 :file ltlcross-r.svg +#+BEGIN_SRC R :results output graphics file :width 5 :height 5 :file ltlcross-r.svg library(ggplot2) ggplot(dt2, aes(x=states.small, y=states.deter)) + geom_abline(colour='white') + geom_point() @@ -937,7 +937,7 @@ ggplot(dt2, aes(x=states.small, y=states.deter)) + We should probably print the formulas for the cases where the two sizes differ. -#+BEGIN_SRC R :results output graphics :width 5 :height 5 :file ltlcross-r2.svg +#+BEGIN_SRC R :results output graphics file :width 5 :height 5 :file ltlcross-r2.svg ggplot(dt2, aes(x=states.small, y=states.deter)) + geom_abline(colour='white') + geom_point() + geom_text(data=subset(dt2, states.small != states.deter), From e7e23d5ffcafe3ab362faa58b8864b283c5c3681 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber Date: Thu, 9 Mar 2023 22:41:44 +0100 Subject: [PATCH 259/606] Adding option to solve parity games globally Parity games have been solved semi-locally so far. We deduced a strategy for the reachable part of the arena This lead to some inconsistencies when not all state were rachable. Now you can chose to solve parity games truely globally. * spot/twaalgos/game.cc, spot/twaalgos/game.hh: Here * tests/python/games.ipynb: Test --- spot/twaalgos/game.cc | 164 ++++-- spot/twaalgos/game.hh | 8 +- tests/python/games.ipynb | 1146 +++++++++++++++++++++++++++++++++++++- 3 files changed, 1258 insertions(+), 60 deletions(-) diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index 17f94a7e4..faf29b2ba 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -149,73 +149,133 @@ namespace spot { public: - bool solve(const twa_graph_ptr& arena) + bool solve(const twa_graph_ptr& arena, bool solve_globally) { // todo check if reordering states according to scc is worth it set_up(arena); // Start recursive zielonka in a bottom-up fashion on each scc subgame_info_t subgame_info; - for (c_scc_idx_ = 0; c_scc_idx_ < info_->scc_count(); ++c_scc_idx_) + while (true) { - // Testing - // Make sure that every state that has a winner also - // belongs to a subgame - assert([&]() - { - for (unsigned i = 0; i < arena_->num_states(); ++i) - if (w_.has_winner_[i] - && (subgame_[i] == unseen_mark)) - return false; - return true; - }()); - // Useless SCCs are winning for player 0. - if (!info_->is_useful_scc(c_scc_idx_)) + // If we solve globally, + auto maybe_useful = [&](unsigned scc_idx){ + if (info_->is_useful_scc(scc_idx)) + return true; + if (!solve_globally) + return false; + // Check if we have an out-edge to a winning state + // in another scc + return std::any_of( + info_->states_of(scc_idx).begin(), + info_->states_of(scc_idx).end(), + [&](unsigned s){ + return std::any_of( + arena->out(s).begin(), + arena->out(s).end(), + [&](const auto& e){ + assert ((subgame_[e.dst] == unseen_mark) + || (info_->scc_of(e.dst) != scc_idx)); + return (info_->scc_of(e.dst) != scc_idx) + && w_.winner(e.dst); + }); + }); + }; + + for (c_scc_idx_ = 0; c_scc_idx_ < info_->scc_count(); ++c_scc_idx_) { - // This scc also gets its own subgame - ++rd_; - for (unsigned v: c_states()) - { - subgame_[v] = rd_; - w_.set(v, false); - // The strategy for player 0 is to take the first - // available edge. - if ((*owner_ptr_)[v] == false) - for (const auto &e : arena_->out(v)) + // Testing + // Make sure that every state that has a winner also + // belongs to a subgame + assert([&]() { - s_[v] = arena_->edge_number(e); - break; - } - } - continue; - } - // Convert transitions leaving edges to self-loops - // and check if trivially solvable - subgame_info = fix_scc(); - // If empty, the scc was trivially solved - if (!subgame_info.is_empty) - { - // Check for special cases - if (subgame_info.is_one_parity) - one_par_subgame_solver(subgame_info, max_abs_par_); - else + for (unsigned i = 0; i < arena_->num_states(); ++i) + if (w_.has_winner_[i] + && (subgame_[i] == unseen_mark)) + return false; + return true; + }()); + // Useless SCCs are winning for player 0. + if (!maybe_useful(c_scc_idx_)) { - // "Regular" solver - max_abs_par_ = *subgame_info.all_parities.begin(); - w_stack_.emplace_back(0, 0, - min_par_graph_, max_abs_par_); - zielonka(); + // This scc also gets its own subgame + ++rd_; + for (unsigned v: c_states()) + { + subgame_[v] = rd_; + w_.set(v, false); + // The strategy for player 0 is to take the first + // available edge. + if ((*owner_ptr_)[v] == false) + for (const auto &e : arena_->out(v)) + { + s_[v] = arena_->edge_number(e); + break; + } + } + continue; + } + // Convert transitions leaving edges to self-loops + // and check if trivially solvable + subgame_info = fix_scc(); + // If empty, the scc was trivially solved + if (!subgame_info.is_empty) + { + // Check for special cases + if (subgame_info.is_one_parity) + one_par_subgame_solver(subgame_info, max_abs_par_); + else + { + // "Regular" solver + max_abs_par_ = *subgame_info.all_parities.begin(); + w_stack_.emplace_back(0, 0, + min_par_graph_, max_abs_par_); + zielonka(); + } } } + if (!solve_globally) + break; + + // Update the scc_info and continue + unsigned new_init + = std::distance(subgame_.begin(), + std::find(subgame_.begin(), subgame_.end(), + unseen_mark)); + if (new_init == arena->num_states()) + break; // All states have been solved + // Compute new sccs + scc_info::edge_filter ef + = [](const twa_graph::edge_storage_t&, + unsigned dst, void* subgame){ + const auto& sg = *static_cast*>(subgame); + return sg[dst] == unseen_mark ? + scc_info::edge_filter_choice::keep : + scc_info::edge_filter_choice::ignore; + }; + info_ = std::make_unique(arena, new_init, ef, &subgame_); } - // Every state needs a winner - assert(std::all_of(w_.has_winner_.cbegin(), w_.has_winner_.cend(), - [](bool b) - { return b; })); + // Every state needs a winner (solve_globally) + // Or only those reachable + assert((solve_globally + && std::all_of(w_.has_winner_.cbegin(), w_.has_winner_.cend(), + [](bool b) { return b; })) + || (!solve_globally + && [&](){ + for (unsigned s = 0; s < arena->num_states(); ++s) + { + if ((info_->scc_of(s) != -1u) + && !w_.has_winner_.at(s)) + return false; + } + return true; + }())); // Only the states owned by the winner need a strategy assert([&]() { for (unsigned v = 0; v < arena_->num_states(); ++v) { + if (!solve_globally && (info_->scc_of(v) == -1u)) + continue; if (((*owner_ptr_)[v] == w_.winner(v)) && ((s_[v] <= 0) || (s_[v] > arena_->num_edges()))) return false; @@ -817,10 +877,10 @@ namespace spot } // anonymous - bool solve_parity_game(const twa_graph_ptr& arena) + bool solve_parity_game(const twa_graph_ptr& arena, bool solve_globally) { parity_game pg; - return pg.solve(arena); + return pg.solve(arena, solve_globally); } bool solve_game(const twa_graph_ptr& arena) diff --git a/spot/twaalgos/game.hh b/spot/twaalgos/game.hh index dbaccce75..d4937e46c 100644 --- a/spot/twaalgos/game.hh +++ b/spot/twaalgos/game.hh @@ -70,13 +70,19 @@ namespace spot /// This computes the winning strategy and winning region using /// Zielonka's recursive algorithm. \cite zielonka.98.tcs /// + /// By default only a 'local' strategy is computed: + /// Only the part of the arena reachable from the init state is considered. + /// If you want to compute a strategy for ALL states, set + /// \a solve_globally to true + /// /// Also includes some inspiration from Oink. /// \cite vandijk.18.tacas /// /// Returns the player winning in the initial state, and sets /// the state-winner and strategy named properties. SPOT_API - bool solve_parity_game(const twa_graph_ptr& arena); + bool solve_parity_game(const twa_graph_ptr& arena, + bool solve_globally = false); /// \ingroup games /// \brief Solve a safety game. diff --git a/tests/python/games.ipynb b/tests/python/games.ipynb index a6168b07e..9ec8bb76e 100644 --- a/tests/python/games.ipynb +++ b/tests/python/games.ipynb @@ -897,7 +897,7 @@ "\n" ], "text/plain": [ - " *' at 0x7feee9b0ebb0> >" + " *' at 0x7fcbe436f840> >" ] }, "execution_count": 8, @@ -1224,7 +1224,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fef001c87b0> >" + " *' at 0x7fcbe436e9a0> >" ] }, "execution_count": 11, @@ -1663,11 +1663,1143 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], - "source": [] + "source": [ + "# Global vs local solver\n", + "\n", + "The parity game solver now supports \"local\" and global solutions.\n", + "\n", + "- \"Local\" solutions are the ones computed so far. A strategy is only computed for the part of the automaton that is rachable from the initial state\n", + "- Global solutions can now be obtained by setting the argument \"solve_globally\" to true. In this case a strategy will be computed even for states not reachable in the original automaton.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "7->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "8->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "10->11\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "12->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "12->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->12\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "15\n", + "\n", + "\n", + "\n", + "14->15\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "14->16\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "15->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "17->18\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "19\n", + "\n", + "19\n", + "\n", + "\n", + "\n", + "19->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20\n", + "\n", + "20\n", + "\n", + "\n", + "\n", + "19->20\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20->19\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcbe4382370> >" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "arena = spot.make_twa_graph()\n", + "\n", + "arena.new_states(3*7)\n", + "arena.set_buchi()\n", + "\n", + "edges = [(0,1), (0,2), (1,3), (2,3), (3,4), (4,0), (5,0), (5,6), (6,5)]\n", + "\n", + "for src, dst in edges:\n", + " arena.new_edge(src, dst, bddtrue, [0] if src == 4 else [])\n", + " arena.new_edge(src + 7, dst + 7, bddtrue, [0] if src == 4 else [])\n", + " arena.new_edge(src + 14, dst + 14, bddtrue, [0] if src == 6 else [])\n", + "\n", + "arena.set_state_players(3*[False, True, True, False, True, True, False])\n", + "arena" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(0, 7, 10, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "7->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "8->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "10->11\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "12->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "12->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->12\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "15\n", + "\n", + "\n", + "\n", + "14->15\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "14->16\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "15->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "17->18\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "19\n", + "\n", + "19\n", + "\n", + "\n", + "\n", + "19->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20\n", + "\n", + "20\n", + "\n", + "\n", + "\n", + "19->20\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20->19\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcbe4382370> >" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# 1) Solving the game locally\n", + "# Unreachable parts are ignored, all of them are \"won\" by the env,\n", + "# the associated strategy is the 0 edges indicating no strategy\n", + "spot.solve_parity_game(arena)\n", + "spot.highlight_strategy(arena)\n", + "print(arena.get_strategy())\n", + "arena" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(0, 7, 10, 0, 16, 19, 0, 0, 8, 11, 0, 17, 20, 0, 3, 0, 0, 15, 0, 24, 0)\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "7->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "8->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "10->11\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "12->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "12->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->12\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "15\n", + "\n", + "\n", + "\n", + "14->15\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "14->16\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "15->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "17->18\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "19\n", + "\n", + "19\n", + "\n", + "\n", + "\n", + "19->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20\n", + "\n", + "20\n", + "\n", + "\n", + "\n", + "19->20\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20->19\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcbe4382370> >" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# 1) Solving the game globally\n", + "# The whole automaton is considered in this case\n", + "spot.solve_parity_game(arena, True)\n", + "spot.highlight_strategy(arena)\n", + "print(arena.get_strategy())\n", + "arena" + ] } ], "metadata": { @@ -1686,7 +2818,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.5" + "version": "3.10.7" } }, "nbformat": 4, From 7a91cf78ec4eea3dda0857dfd0de7f677c06599a Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber Date: Wed, 22 Mar 2023 11:00:48 +0100 Subject: [PATCH 260/606] Ignore ltargz.m4 * .gitignore: Ignore it * m4/ltargz.m4: Remove it --- .gitignore | 1 + m4/ltargz.m4 | 74 ---------------------------------------------------- 2 files changed, 1 insertion(+), 74 deletions(-) delete mode 100644 m4/ltargz.m4 diff --git a/.gitignore b/.gitignore index 7392a79db..155a9b5e7 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ configure config.log config.status aclocal.m4 +ltargz.m4 autom4te.cache libtool auto diff --git a/m4/ltargz.m4 b/m4/ltargz.m4 deleted file mode 100644 index 0908d90b9..000000000 --- a/m4/ltargz.m4 +++ /dev/null @@ -1,74 +0,0 @@ -# Portability macros for glibc argz. -*- Autoconf -*- -# -# Copyright (C) 2004-2007, 2011-2015 Free Software Foundation, Inc. -# Written by Gary V. Vaughan -# -# This file is free software; the Free Software Foundation gives -# unlimited permission to copy and/or distribute it, with or without -# modifications, as long as this notice is preserved. - -# serial 1 ltargz.m4 - -AC_DEFUN([LT_FUNC_ARGZ], [ -AC_CHECK_HEADERS([argz.h], [], [], [AC_INCLUDES_DEFAULT]) - -AC_CHECK_TYPES([error_t], - [], - [AC_DEFINE([error_t], [int], - [Define to a type to use for 'error_t' if it is not otherwise available.]) - AC_DEFINE([__error_t_defined], [1], [Define so that glibc/gnulib argp.h - does not typedef error_t.])], - [#if defined(HAVE_ARGZ_H) -# include -#endif]) - -LT_ARGZ_H= -AC_CHECK_FUNCS([argz_add argz_append argz_count argz_create_sep argz_insert \ - argz_next argz_stringify], [], [LT_ARGZ_H=lt__argz.h; AC_LIBOBJ([lt__argz])]) - -dnl if have system argz functions, allow forced use of -dnl libltdl-supplied implementation (and default to do so -dnl on "known bad" systems). Could use a runtime check, but -dnl (a) detecting malloc issues is notoriously unreliable -dnl (b) only known system that declares argz functions, -dnl provides them, yet they are broken, is cygwin -dnl releases prior to 16-Mar-2007 (1.5.24 and earlier) -dnl So, it's more straightforward simply to special case -dnl this for known bad systems. -AS_IF([test -z "$LT_ARGZ_H"], - [AC_CACHE_CHECK( - [if argz actually works], - [lt_cv_sys_argz_works], - [[case $host_os in #( - *cygwin*) - lt_cv_sys_argz_works=no - if test no != "$cross_compiling"; then - lt_cv_sys_argz_works="guessing no" - else - lt_sed_extract_leading_digits='s/^\([0-9\.]*\).*/\1/' - save_IFS=$IFS - IFS=-. - set x `uname -r | sed -e "$lt_sed_extract_leading_digits"` - IFS=$save_IFS - lt_os_major=${2-0} - lt_os_minor=${3-0} - lt_os_micro=${4-0} - if test 1 -lt "$lt_os_major" \ - || { test 1 -eq "$lt_os_major" \ - && { test 5 -lt "$lt_os_minor" \ - || { test 5 -eq "$lt_os_minor" \ - && test 24 -lt "$lt_os_micro"; }; }; }; then - lt_cv_sys_argz_works=yes - fi - fi - ;; #( - *) lt_cv_sys_argz_works=yes ;; - esac]]) - AS_IF([test yes = "$lt_cv_sys_argz_works"], - [AC_DEFINE([HAVE_WORKING_ARGZ], 1, - [This value is set to 1 to indicate that the system argz facility works])], - [LT_ARGZ_H=lt__argz.h - AC_LIBOBJ([lt__argz])])]) - -AC_SUBST([LT_ARGZ_H]) -]) From 7a97a6080ce0f92f9892cec8c2e72fceda40dbf6 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 24 Mar 2023 13:52:37 +0100 Subject: [PATCH 261/606] * doc/tl/tl.tex: Typo in firstmatch semantics. --- doc/tl/tl.tex | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/tl/tl.tex b/doc/tl/tl.tex index 288a5da0c..e7c283bc6 100644 --- a/doc/tl/tl.tex +++ b/doc/tl/tl.tex @@ -701,7 +701,7 @@ $a$ is an atomic proposition. \VDash f\FSTAR{\mvar{i-1}..}))\\ \end{cases}\\ \sigma\VDash \FIRSTMATCH\code(f\code) & \iff - (\sigma\VDash f)\land (\forall k<|\sigma|,\,\sigma^{0..k}\nVDash f) + (\sigma\VDash f)\land (\forall k<|\sigma|,\,\sigma^{0..k-1}\nVDash f) \end{align*}} Notes: From 039cd756d5b18fbdb6f510395222cd648f3f2bc8 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 29 Mar 2023 16:20:51 +0200 Subject: [PATCH 262/606] fix spurious test-case failure when Python is not installed Fixes #530. * tests/core/ltlsynt2.test: Skip when PYTHON is empty. * NEWS: Mention the fix. --- NEWS | 5 +++-- tests/core/ltlsynt2.test | 4 +++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/NEWS b/NEWS index 86fd461e1..e3de94172 100644 --- a/NEWS +++ b/NEWS @@ -17,9 +17,10 @@ New in spot 2.11.4.dev (not yet released) - spot::bdd_to_cnf_formula() is a new variant of spot::bdd_to_formula() that converts a BDD into a CNF instead of a DNF. - Python: + Bug fixes: - - spot.acd() no longer depends on jQuery for interactivity. + - Fix spurious failure of ltlsynt2.test when Python is not installed + (issue #530). New in spot 2.11.4 (2023-02-10) diff --git a/tests/core/ltlsynt2.test b/tests/core/ltlsynt2.test index dbb754d92..546cb0d27 100755 --- a/tests/core/ltlsynt2.test +++ b/tests/core/ltlsynt2.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2022 Laboratoire de Recherche et Développement de +# Copyright (C) 2022, 2023 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -36,6 +36,8 @@ ltlsynt --ins=i1,i2 -F formulas.ltl -f 'o1 & F(i1 <-> o2)' -q --csv=out.csv &&\ exit 2 test $? -eq 1 || exit 2 +test -z "$PYTHON" && exit 77 + cat >test.py < Date: Wed, 29 Mar 2023 17:01:13 +0200 Subject: [PATCH 263/606] correctly fails if emacs needed and missing Fixes #528. * configure.ac: Define EMACS using tools/missing. * NEWS: Mention the bug. --- NEWS | 3 +++ configure.ac | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index e3de94172..37a8ebf88 100644 --- a/NEWS +++ b/NEWS @@ -22,6 +22,9 @@ New in spot 2.11.4.dev (not yet released) - Fix spurious failure of ltlsynt2.test when Python is not installed (issue #530). + - Building from the git repository would fail to report a missing + emacs (issue #528). + New in spot 2.11.4 (2023-02-10) Python: diff --git a/configure.ac b/configure.ac index e47e2eb29..772b4c24a 100644 --- a/configure.ac +++ b/configure.ac @@ -217,7 +217,7 @@ AC_CHECK_PROG([LTL3BA], [ltl3ba], [ltl3ba]) AC_CHECK_PROG([PERL], [perl], [perl]) AC_CHECK_PROG([SPIN], [spin], [spin]) AC_CHECK_PROG([LBTT], [lbtt], [lbtt]) -AC_CHECK_PROG([EMACS], [emacs], [emacs]) +AM_MISSING_PROG([EMACS], [emacs]) AC_CHECK_PROGS([IPYTHON], [ipython3 ipython], [ipython]) AC_CHECK_PROGS([JUPYTER], [jupyter], [jupyter]) AC_CHECK_PROG([LBTT_TRANSLATE], [lbtt-translate], [lbtt-translate]) From d152b3a316171e02c256d258b4eb1c73b63db9ec Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Thu, 30 Mar 2023 14:32:26 +0200 Subject: [PATCH 264/606] Fix parity solver if edgevector is not contiguous Validity of strategies was tested relying on num_edges() which might be smaller than the edge_number * spot/twaalgos/game.cc: Fix here * tests/python/game.py: Test here --- spot/twaalgos/game.cc | 6 +- tests/python/game.py | 139 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 143 insertions(+), 2 deletions(-) diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index faf29b2ba..add0926fe 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -272,12 +272,16 @@ namespace spot // Only the states owned by the winner need a strategy assert([&]() { + std::unordered_set valid_strat; + for (const auto& e : arena_->edges()) + valid_strat.insert(arena_->edge_number(e)); + for (unsigned v = 0; v < arena_->num_states(); ++v) { if (!solve_globally && (info_->scc_of(v) == -1u)) continue; if (((*owner_ptr_)[v] == w_.winner(v)) - && ((s_[v] <= 0) || (s_[v] > arena_->num_edges()))) + && (valid_strat.count(s_.at(v)) == 0)) return false; } return true; diff --git a/tests/python/game.py b/tests/python/game.py index a7080b696..857390335 100644 --- a/tests/python/game.py +++ b/tests/python/game.py @@ -384,4 +384,141 @@ spot.solve_game(aut) S1 = list(spot.get_strategy(aut)) spot.solve_game(aut) S2 = list(spot.get_strategy(aut)) -tc.assertEqual(S1, S2) \ No newline at end of file +tc.assertEqual(S1, S2) + + +# Finite games +alive = "__alive__" +def finite_existential(auts): + # 1 Accepting state -> selfloop + # 2 Prune + acc_state = set() + sp = list(spot.get_state_players(auts)) + for e in auts.edges(): + if e.acc: + acc_state.add(e.src) + for s in acc_state: + e_kill = auts.out_iteraser(s) + while (e_kill): + e_kill.erase() + for s in acc_state: + sprime = auts.new_state() + sp.append(not sp[s]) + auts.new_edge(s, sprime, buddy.bddtrue, [0]) + auts.new_edge(sprime, s, buddy.bddtrue, [0]) + spot.set_state_players(auts, sp) + auts.purge_dead_states() + spot.alternate_players(auts, False, False) + return auts + +def is_input_complete(auts): + sp = spot.get_state_players(auts) + for s in range(auts.num_states()): + if sp[s]: + continue # Player + cumul = buddy.bddfalse + for e in auts.out(s): + cumul |= e.cond + if cumul != buddy.bddtrue: + return False + + return True + +def synt_from_ltlf(f:str, outs): + ff = spot.from_ltlf(f, alive) + aut = ff.translate("buchi", "sbacc") + outbdd = buddy.bddtrue + for out in outs: + outbdd &= buddy.bdd_ithvar(aut.register_ap(out)) + alive_bdd = buddy.bdd_ithvar(aut.register_ap(alive)) + auts = spot.split_2step(aut, outbdd & alive_bdd, False) + auts = spot.to_finite(auts, alive) + spot.alternate_players(auts, False, False) + spot.set_synthesis_outputs(auts, outbdd) + if not is_input_complete(auts): + print("Not synthesizable") + return None + auts = finite_existential(auts) + + return auts + +def synt_ltlf(f:str, outs, res:str = "aut"): + auts = synt_from_ltlf(f, outs) + + succ = spot.solve_parity_game(auts) + if not succ: + if res == "aut": + return False, auts + else: + return False, None + + mealy_cc = spot.solved_game_to_split_mealy(auts) + + if res == "aut": + return True, mealy_cc + elif res == "aig": + return True, spot.mealy_machine_to_aig(mealy_cc, "isop") + else: + raise RuntimeError("Unknown option") + + +sink_player = None + +def negate_ltlf(f:str, outs, opt = "buchi"): + + global sink_player + sink_player = None + + aut = synt_from_ltlf(f, outs) + # Implies input completeness + # We need output completeness + acc = [] + + sp = list(spot.get_state_players(aut)) + + def get_sink(): + global sink_player + if sink_player is None: + sink_player = aut.new_states(2) + aut.new_edge(sink_player, sink_player + 1, buddy.bddtrue, acc) + aut.new_edge(sink_player + 1, sink_player, buddy.bddtrue, acc) + sp.append(False) + sp.append(True) + spot.set_state_players(aut, sp) + return sink_player + + for s in range(aut.num_states()): + if not sp[s]: + continue + rem = buddy.bddtrue + for e in aut.out(s): + rem -= e.cond + if rem != buddy.bddfalse: + aut.new_edge(s, get_sink(), rem) + + # Better to invert colors or condition? + if opt == "buchi": + for e in aut.edges(): + if e.acc: + e.acc = spot.mark_t() + else: + e.acc = spot.mark_t([0]) + elif opt == "cobuchi": + aut.set_co_buchi() + else: + raise RuntimeError("Unknown opt") + return aut + +# Game where the edge_vector is larger +# than the number of transitions +f1 = "((((G (F (idle))) && (G (((idle) && (X ((! (grant_0)) \ + && (! (grant_1))))) -> (X (idle))))) && (G ((X (! (grant_0))) \ + || (X (((! (request_0)) && (! (idle))) U ((! (request_0)) \ + && (idle))))))) -> (((G (((((X (((! (grant_0)) && (true)) \ + || ((true) && (! (grant_1))))) && ((X (grant_0)) -> (request_0))) \ + && ((X (grant_1)) -> (request_1))) && ((request_0) -> (grant_1))) \ + && ((! (idle)) -> (X ((! (grant_0)) && (! (grant_1))))))) \ + && (! (F (G ((request_0) && (X (! (grant_0)))))))) \ + && (! (F (G ((request_1) && (X (! (grant_1)))))))))" +outs = ["grant_0", "grant1"] +tc.assertEqual(synt_ltlf(f1, outs)[0], False) \ No newline at end of file From ae10361bddb9a3a858272a272be7ccd34800c677 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 18 Apr 2023 14:48:10 +0200 Subject: [PATCH 265/606] twa_run: let as_twa work on the result of intersecting_run Reported by Philipp Schlehuber-Caissier. * spot/twaalgos/emptiness.cc (as_twa): Simplify considerably. Don't try to replay the run, and don't merge identical states. * spot/twaalgos/word.hh, spot/twaalgos/emptiness.hh: Improve documentation. * tests/python/intrun.py: Add a test case. * NEWS: Mention the bug. --- NEWS | 5 +++ spot/twaalgos/emptiness.cc | 75 +++++++++++--------------------------- spot/twaalgos/emptiness.hh | 6 +-- spot/twaalgos/word.hh | 7 +++- tests/python/intrun.py | 40 +++++++++++++++++++- 5 files changed, 73 insertions(+), 60 deletions(-) diff --git a/NEWS b/NEWS index 37a8ebf88..7fbb4f66a 100644 --- a/NEWS +++ b/NEWS @@ -25,6 +25,11 @@ New in spot 2.11.4.dev (not yet released) - Building from the git repository would fail to report a missing emacs (issue #528). + - Fix exception raised by aut1.intersecting_run(aut2).as_twa() + because the run did not match transitions present in aut1 + verbatim. We also changed the behavior of as_twa() to not merge + identical states. + New in spot 2.11.4 (2023-02-10) Python: diff --git a/spot/twaalgos/emptiness.cc b/spot/twaalgos/emptiness.cc index fd3319141..ef8890f95 100644 --- a/spot/twaalgos/emptiness.cc +++ b/spot/twaalgos/emptiness.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2011-2019, 2021 Laboratoire de Recherche et +// Copyright (C) 2009, 2011-2019, 2021, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -570,7 +570,7 @@ namespace spot if (debug) os << "ERROR: First state of run (in " << in << "): " << aut->format_state(i->s) - << "\ndoes not match initial state of automata: " + << "\ndoes not match initial state of automaton: " << aut->format_state(s) << '\n'; s->destroy(); return false; @@ -802,38 +802,38 @@ namespace spot res->set_named_prop("state-names", names); } - const state* s = aut->get_init_state(); unsigned src; unsigned dst; const twa_run::steps* l; - acc_cond::mark_t seen_acc = {}; - - state_map seen; + unsigned cycle_entry = 0; if (prefix.empty()) - l = &cycle; + l = &cycle; else - l = &prefix; + l = &prefix; twa_run::steps::const_iterator i = l->begin(); - assert(s->compare(i->s) == 0); +#if NDEBUG + const state* init = aut->get_init_state(); + assert(init->compare(i->s) == 0); + init->destroy(); +#endif + src = res->new_state(); - seen.emplace(i->s, src); if (names) - names->push_back(aut->format_state(s)); + names->push_back(aut->format_state(i->s)); for (; i != l->end();) { - // expected outgoing transition bdd label = i->label; acc_cond::mark_t acc = i->acc; - // compute the next expected state const state* next; ++i; if (i != l->end()) { + dst = res->new_state(); next = i->s; } else @@ -842,57 +842,24 @@ namespace spot { l = &cycle; i = l->begin(); + cycle_entry = dst = res->new_state(); + } + else + { + dst = cycle_entry; } next = l->begin()->s; } - // browse the actual outgoing transitions and - // look for next; - const state* the_next = nullptr; - for (auto j: aut->succ(s)) + if (names && i != l->end()) { - if (j->cond() != label - || j->acc() != acc) - continue; - - const state* s2 = j->dst(); - if (s2->compare(next) == 0) - { - the_next = s2; - break; - } - s2->destroy(); + assert(dst == names->size()); + names->push_back(aut->format_state(next)); } - s->destroy(); - if (!the_next) - throw std::runtime_error("twa_run::as_twa() unable to replay run"); - s = the_next; - - - auto p = seen.emplace(next, 0); - if (p.second) - { - unsigned ns = res->new_state(); - p.first->second = ns; - if (names) - { - assert(ns == names->size()); - names->push_back(aut->format_state(next)); - } - } - dst = p.first->second; - res->new_edge(src, dst, label, acc); src = dst; - - // Sum acceptance conditions. - if (l == &cycle && i != l->begin()) - seen_acc |= acc; } - s->destroy(); - - assert(aut->acc().accepting(seen_acc)); return res; } diff --git a/spot/twaalgos/emptiness.hh b/spot/twaalgos/emptiness.hh index 47896a1d7..66bf8ca56 100644 --- a/spot/twaalgos/emptiness.hh +++ b/spot/twaalgos/emptiness.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013-2018, 2020-2021 Laboratoire de +// Copyright (C) 2011, 2013-2018, 2020-2021, 2023 Laboratoire de // Recherche et Developpement de l'Epita (LRDE). // Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -451,9 +451,9 @@ namespace spot /// Note that this works only if the automaton is a twa_graph_ptr. void highlight(unsigned color); - /// \brief Return a twa_graph_ptr corresponding to \a run + /// \brief Convert the run into a lasso-shaped automaton /// - /// Identical states are merged. + /// This preserves the original acceptance condition. /// /// If \a preserve_names is set, the created states are named /// using the format_state() result from the original state. diff --git a/spot/twaalgos/word.hh b/spot/twaalgos/word.hh index f6f70fc14..979a4070b 100644 --- a/spot/twaalgos/word.hh +++ b/spot/twaalgos/word.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014, 2015, 2016, 2018, 2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2013-2016, 2018-2019, 2023 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -80,6 +80,9 @@ namespace spot /// \brief Convert the twa_word as an automaton. /// + /// Convert the twa_word into a lasso-shapred automaton + /// with "true" acceptance condition. + /// /// This is useful to evaluate a word on an automaton. twa_graph_ptr as_automaton() const; diff --git a/tests/python/intrun.py b/tests/python/intrun.py index e3b708a95..02a7aedd6 100644 --- a/tests/python/intrun.py +++ b/tests/python/intrun.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement +# Copyright (C) 2020, 2022, 2023 Laboratoire de Recherche et Développement # de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -38,3 +38,41 @@ r = b.intersecting_run(spot.complement(a)); c = spot.twa_word(r).as_automaton() tc.assertTrue(c.intersects(b)) tc.assertFalse(c.intersects(a)) + +# The next test came from Philipp Schlehuber-Caissier: running +# as_twa() on a run built from a A.intersecting_run(B) failed to build +# the automaton because it tried to rebuild the run on A and did not +# find transitions matching exactly. Additionally the idea of merging +# states in as_twa() seems to be a way to create some disasters, so we +# removed that too. +a = spot.translate("a"); +b = spot.translate("{a;1;a}"); +r = a.intersecting_run(b) +tc.assertEqual(str(r), """Prefix: + 1 + | a + 0 + | 1 {0} + 0 + | a {0} +Cycle: + 0 + | 1 {0} +""") +tc.assertEqual(r.as_twa().to_str(), """HOA: v1 +States: 4 +Start: 0 +AP: 1 "a" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels state-acc deterministic +--BODY-- +State: 0 +[0] 1 +State: 1 {0} +[t] 2 +State: 2 {0} +[0] 3 +State: 3 {0} +[t] 3 +--END--""") From 0e54a853104f1124fd7dd60546cbbc14e9985d41 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 18 Apr 2023 15:04:58 +0200 Subject: [PATCH 266/606] powerset: fix segfault when the initial state is a sink Reported by Raven Beutner. * spot/twaalgos/minimize.cc: Improve comment. * spot/twaalgos/powerset.cc: Fix handling of an initial state that is also a sink. * tests/core/wdba2.test: Add test case. * NEWS: Mention the bug. --- NEWS | 3 +++ spot/twaalgos/minimize.cc | 6 +++--- spot/twaalgos/powerset.cc | 26 ++++++++++++++------------ tests/core/wdba2.test | 37 +++++++++++++++++++++++++++++++++++-- 4 files changed, 55 insertions(+), 17 deletions(-) diff --git a/NEWS b/NEWS index 7fbb4f66a..32ef6b9ed 100644 --- a/NEWS +++ b/NEWS @@ -30,6 +30,9 @@ New in spot 2.11.4.dev (not yet released) verbatim. We also changed the behavior of as_twa() to not merge identical states. + - Fix segfaults occuring in determinization of 1-state terminal + automata. + New in spot 2.11.4 (2023-02-10) Python: diff --git a/spot/twaalgos/minimize.cc b/spot/twaalgos/minimize.cc index 4fd6847b3..1ac961d46 100644 --- a/spot/twaalgos/minimize.cc +++ b/spot/twaalgos/minimize.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010-2020 Laboratoire de Recherche et Développement +// Copyright (C) 2010-2020, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -394,8 +394,8 @@ namespace spot else { // Find any accepting sink state, to speed up the - // determinization by merging all states containing a sink - // state. + // determinization by merging all macro-states containing a + // sink state. std::vector acc_sinks; unsigned ns = a->num_states(); if (!a->prop_terminal().is_true()) diff --git a/spot/twaalgos/powerset.cc b/spot/twaalgos/powerset.cc index c5fc07f94..326de7c76 100644 --- a/spot/twaalgos/powerset.cc +++ b/spot/twaalgos/powerset.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2011, 2013-2019, 2021 Laboratoire de Recherche et +// Copyright (C) 2009-2011, 2013-2019, 2021, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -217,17 +217,19 @@ namespace spot pm.map_.emplace_back(std::move(ps)); } - { - unsigned init_num = aut->get_init_state_number(); - auto bvi = make_bitvect(ns); - bvi->set(init_num); - power_state ps{init_num}; - unsigned num = res->new_state(); - res->set_init_state(num); - seen[bvi] = num; - assert(pm.map_.size() == num); - pm.map_.emplace_back(std::move(ps)); - toclean.emplace_back(bvi); + // Add the initial state unless it's a sink. + if (unsigned init_num = aut->get_init_state_number(); + !acc_sinks || !acc_sinks->get(init_num)) + { + auto bvi = make_bitvect(ns); + bvi->set(init_num); + power_state ps{init_num}; + unsigned num = res->new_state(); + res->set_init_state(num); + seen[bvi] = num; + assert(pm.map_.size() == num); + pm.map_.emplace_back(std::move(ps)); + toclean.emplace_back(bvi); } // outgoing map diff --git a/tests/core/wdba2.test b/tests/core/wdba2.test index ca49bad94..3850a447a 100755 --- a/tests/core/wdba2.test +++ b/tests/core/wdba2.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2014, 2015, 2018, 2019 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) 2012, 2014-2015, 2018-2019, 2023 Laboratoire de +# Recherche et Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -82,3 +82,36 @@ EOF autfilt --small --high -C -Hi input > output diff output expected + +# This test comes from a report from Raven Beutner and used to cause a +# segfault. +cat >input <output +cat >expected < Date: Tue, 18 Apr 2023 17:35:05 +0200 Subject: [PATCH 267/606] org: replace version references with org-babel blocks This way we have fewer lines to edit multiple when making releases. * doc/org/index.org, doc/org/init.el.in, doc/org/install.org, doc/org/setup.org, doc/org/tools.org: Use org-babel instead of macros for release version and links. --- doc/org/index.org | 2 +- doc/org/init.el.in | 2 +- doc/org/install.org | 4 ++-- doc/org/setup.org | 20 ++++++++++++++++---- doc/org/tools.org | 6 +++--- 5 files changed, 23 insertions(+), 11 deletions(-) diff --git a/doc/org/index.org b/doc/org/index.org index 9af23dba4..08fa16a3d 100644 --- a/doc/org/index.org +++ b/doc/org/index.org @@ -37,7 +37,7 @@ checking. It has the following notable features: * Latest version -The latest version is *{{{LASTRELEASE}}}* and was released on +The latest version is *call_SPOT_VERSION()* and was released on *{{{LASTDATE}}}*. Please see the [[file:install.org][download and installation instructions]]. * Documentation diff --git a/doc/org/init.el.in b/doc/org/init.el.in index 4258a95f7..c46363096 100644 --- a/doc/org/init.el.in +++ b/doc/org/init.el.in @@ -160,7 +160,7 @@ up.html points to index.html, then the result is: (setq body res) (not cmp))) (concat "#+TITLE: " title - "\n#+SETUPFILE: setup.org\n#+HTML_LINK_UP: index.html\n\n" + "\n#+INCLUDE: setup.org\n#+HTML_LINK_UP: index.html\n\n" body))) (setq org-publish-project-alist diff --git a/doc/org/install.org b/doc/org/install.org index dc492af57..b65c02074 100644 --- a/doc/org/install.org +++ b/doc/org/install.org @@ -9,9 +9,9 @@ :CUSTOM_ID: tar :END: -The latest release of Spot is version {{{LASTRELEASE}}}: +The latest release of Spot is version call_SPOT_VERSION() and was released on {{{LASTDATE}}}: -- {{{LASTTARBALL}}} (see also the {{{LASTNEWS}}}) +- call_TARBALL_LINK() (see also the call_NEWS_LINK()) Past releases can be found [[https://www.lrde.epita.fr/dload/spot/][in the same directory]]. If you are interested in /future/ releases, you can always peek at the [[https://gitlab.lre.epita.fr/spot/spot/-/jobs/artifacts/next/browse?job=make-dist][last diff --git a/doc/org/setup.org b/doc/org/setup.org index 7b6a4fa70..974272774 100644 --- a/doc/org/setup.org +++ b/doc/org/setup.org @@ -1,11 +1,23 @@ #+OPTIONS: H:2 num:nil toc:t html-postamble:nil ^:nil #+EMAIL: spot@lrde.epita.fr #+HTML_LINK_HOME: index.html -#+MACRO: SPOTVERSION 2.11.4 -#+MACRO: LASTRELEASE 2.11.4 -#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.11.3.tar.gz][=spot-2.11.4.tar.gz=]] -#+MACRO: LASTNEWS [[https://gitlab.lre.epita.fr/spot/spot/blob/spot-2-11-3/NEWS][summary of the changes]] #+MACRO: LASTDATE 2023-02-10 +#+NAME: SPOT_VERSION +#+BEGIN_SRC python :exports none :results value :wrap org +return "2.11.4" +#+END_SRC + +#+NAME: TARBALL_LINK +#+BEGIN_SRC python :exports none :var version=SPOT_VERSION :results output :wrap org + print(f"[[http://www.lrde.epita.fr/dload/spot/spot-{version}.tar.gz][=spot-{version}.tar.gz=]]") +#+END_SRC + +#+NAME: NEWS_LINK +#+BEGIN_SRC python :exports none :var version=SPOT_VERSION :results output :wrap org + version = version.replace('.', '-') + print(f"[[https://gitlab.lre.epita.fr/spot/spot/blob/spot-{version}/NEWS][summary of the changes]]") +#+END_SRC + #+ATTR_HTML: :id spotlogo [[file:spot2.svg]] diff --git a/doc/org/tools.org b/doc/org/tools.org index 5227f1b4e..46ca38ccd 100644 --- a/doc/org/tools.org +++ b/doc/org/tools.org @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- -#+TITLE: Command-line tools installed by Spot {{{SPOTVERSION}}} -#+DESCRIPTION: List of all the command-line tools installed by Spot {{{SPOTVERSION}}} #+INCLUDE: setup.org +#+TITLE: Command-line tools installed by Spot +#+DESCRIPTION: List of all the command-line tools installed by Spot #+HTML_LINK_UP: index.html #+PROPERTY: header-args:sh :results verbatim :exports both This document introduces command-line tools that are installed with -the Spot library. We give some examples to highlight possible +Spot call_SPOT_VERSION(). We give some examples to highlight possible use-cases but shall not attempt to cover all features exhaustively (please check the man pages for further inspiration). From d3013b072d7c5bac44c9d33e5d43fa43d1a89212 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 16 Feb 2023 17:46:51 +0100 Subject: [PATCH 268/606] org: do not require org-install org-install has been obsolete for a long time, and has been removed from Org 9.6. * doc/org/init.el.in: Remove org-install. --- doc/org/init.el.in | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/org/init.el.in b/doc/org/init.el.in index 9f589bb35..5e51c7250 100644 --- a/doc/org/init.el.in +++ b/doc/org/init.el.in @@ -51,7 +51,6 @@ (package-install ess))))) (require 'ox-publish) -(require 'org-install) (require 'hoa-mode) ; See https://github.com/emacs-ess/ESS/issues/1052 From a146457ea16e99dc47527f3fb05cd08b5d7448a6 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 23 Feb 2023 12:02:06 +0100 Subject: [PATCH 269/606] * doc/org/tut03.org: Typos. --- doc/org/tut03.org | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/org/tut03.org b/doc/org/tut03.org index b48366a82..c70a3dab3 100644 --- a/doc/org/tut03.org +++ b/doc/org/tut03.org @@ -81,7 +81,7 @@ simplifications called /trivial identities/. For instance =formula::F(formula::X(formula::tt()))= will return the same formula as =formula::tt()=. These simplifications are those that involve the true and false constants, impotence (=F(F(e))=F(e)=), involutions -(=Not(Not(e)=e=), associativity +(=Not(Not(e))=e=), associativity (=And({And({e1,e2},e3})=And({e1,e2,e3})=). See [[https://spot.lrde.epita.fr/tl.pdf][tl.pdf]] for a list of these /trivial identities/. @@ -113,7 +113,7 @@ detail of the top-level operator in the formula. std::cout << f << '\n'; - // kindstar() prints the name of the operator + // kindstr() prints the name of the operator // size() return the number of operands of the operators std::cout << f.kindstr() << ", " << f.size() << " children\n"; // operator[] accesses each operand @@ -157,7 +157,7 @@ The Python equivalent is similar: print(f) - # kindstar() prints the name of the operator + # kindstr() prints the name of the operator # size() return the number of operands of the operators print("{}, {} children".format(f.kindstr(), f.size())) # [] accesses each operand From dcd4759896cb2940ca6c0f6af9c4f50a38e94eca Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 3 Mar 2023 00:14:18 +0100 Subject: [PATCH 270/606] org: fix rendering of R examples for recent ESS/Org * doc/org/.dir-locals.el.in, doc/org/init.el.in: Newer ESS version need to be taught to use default-directory instead of the project directory. * doc/org/ltlcross.org: Use "result file" to render the output. --- doc/org/.dir-locals.el.in | 4 +++- doc/org/init.el.in | 4 +++- doc/org/ltlcross.org | 4 ++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/doc/org/.dir-locals.el.in b/doc/org/.dir-locals.el.in index cba9892fb..80c0a1385 100644 --- a/doc/org/.dir-locals.el.in +++ b/doc/org/.dir-locals.el.in @@ -27,6 +27,9 @@ (setenv "SPOT_DOTEXTRA" "node[fontsize=12] fontsize=12 stylesheet=\"spot.css\" edge[arrowhead=vee, arrowsize=.7, fontsize=12]") (setq org-babel-temporary-directory "@abs_top_builddir@/doc/org/tmp") (make-directory org-babel-temporary-directory t) + ; has to be set globally, not buffer-local + (setq ess-ask-for-ess-directory nil) + (setq ess-startup-directory 'default-directory) (org-babel-do-load-languages 'org-babel-load-languages `((,(if (version< org-version "8.3") 'sh 'shell) . t) (python . t) @@ -39,7 +42,6 @@ (org-babel-python-command . "@PYTHON@") (org-babel-C++-compiler . "./g++wrap") (shell-file-name . "@SHELL@") - (ess-ask-for-ess-directory . nil) (org-export-html-postamble . nil) (org-html-table-header-tags "
" . "
") diff --git a/doc/org/init.el.in b/doc/org/init.el.in index 5e51c7250..4258a95f7 100644 --- a/doc/org/init.el.in +++ b/doc/org/init.el.in @@ -88,7 +88,9 @@ (setq org-babel-C++-compiler "./g++wrap") (setq shell-file-name "@SHELL@") (setq ess-ask-for-ess-directory nil) - +; setting ess-startup-directory to 'default-directory is enough with +; newer ESS version (after Fev 2022) but does not work with older ones. +(setq ess-startup-directory "@abs_top_builddir@/doc/org") (setq org-babel-default-header-args:plantuml '((:results . "file") (:exports . "results") diff --git a/doc/org/ltlcross.org b/doc/org/ltlcross.org index 0fdebae1f..36cce5cbb 100644 --- a/doc/org/ltlcross.org +++ b/doc/org/ltlcross.org @@ -924,7 +924,7 @@ compare the number of states produced by the two configurations of =ltl2tgba= for each formula, we just need to plot column =dt2$state.small= against =dt2$state.deter=. -#+BEGIN_SRC R :results output graphics :width 5 :height 5 :file ltlcross-r.svg +#+BEGIN_SRC R :results output graphics file :width 5 :height 5 :file ltlcross-r.svg library(ggplot2) ggplot(dt2, aes(x=states.small, y=states.deter)) + geom_abline(colour='white') + geom_point() @@ -937,7 +937,7 @@ ggplot(dt2, aes(x=states.small, y=states.deter)) + We should probably print the formulas for the cases where the two sizes differ. -#+BEGIN_SRC R :results output graphics :width 5 :height 5 :file ltlcross-r2.svg +#+BEGIN_SRC R :results output graphics file :width 5 :height 5 :file ltlcross-r2.svg ggplot(dt2, aes(x=states.small, y=states.deter)) + geom_abline(colour='white') + geom_point() + geom_text(data=subset(dt2, states.small != states.deter), From 5714ecce3291ed94937dfd14f85c411430d53ab9 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber Date: Wed, 22 Mar 2023 11:00:48 +0100 Subject: [PATCH 271/606] Ignore ltargz.m4 * .gitignore: Ignore it * m4/ltargz.m4: Remove it --- .gitignore | 1 + m4/ltargz.m4 | 74 ---------------------------------------------------- 2 files changed, 1 insertion(+), 74 deletions(-) delete mode 100644 m4/ltargz.m4 diff --git a/.gitignore b/.gitignore index 7392a79db..155a9b5e7 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ configure config.log config.status aclocal.m4 +ltargz.m4 autom4te.cache libtool auto diff --git a/m4/ltargz.m4 b/m4/ltargz.m4 deleted file mode 100644 index 0908d90b9..000000000 --- a/m4/ltargz.m4 +++ /dev/null @@ -1,74 +0,0 @@ -# Portability macros for glibc argz. -*- Autoconf -*- -# -# Copyright (C) 2004-2007, 2011-2015 Free Software Foundation, Inc. -# Written by Gary V. Vaughan -# -# This file is free software; the Free Software Foundation gives -# unlimited permission to copy and/or distribute it, with or without -# modifications, as long as this notice is preserved. - -# serial 1 ltargz.m4 - -AC_DEFUN([LT_FUNC_ARGZ], [ -AC_CHECK_HEADERS([argz.h], [], [], [AC_INCLUDES_DEFAULT]) - -AC_CHECK_TYPES([error_t], - [], - [AC_DEFINE([error_t], [int], - [Define to a type to use for 'error_t' if it is not otherwise available.]) - AC_DEFINE([__error_t_defined], [1], [Define so that glibc/gnulib argp.h - does not typedef error_t.])], - [#if defined(HAVE_ARGZ_H) -# include -#endif]) - -LT_ARGZ_H= -AC_CHECK_FUNCS([argz_add argz_append argz_count argz_create_sep argz_insert \ - argz_next argz_stringify], [], [LT_ARGZ_H=lt__argz.h; AC_LIBOBJ([lt__argz])]) - -dnl if have system argz functions, allow forced use of -dnl libltdl-supplied implementation (and default to do so -dnl on "known bad" systems). Could use a runtime check, but -dnl (a) detecting malloc issues is notoriously unreliable -dnl (b) only known system that declares argz functions, -dnl provides them, yet they are broken, is cygwin -dnl releases prior to 16-Mar-2007 (1.5.24 and earlier) -dnl So, it's more straightforward simply to special case -dnl this for known bad systems. -AS_IF([test -z "$LT_ARGZ_H"], - [AC_CACHE_CHECK( - [if argz actually works], - [lt_cv_sys_argz_works], - [[case $host_os in #( - *cygwin*) - lt_cv_sys_argz_works=no - if test no != "$cross_compiling"; then - lt_cv_sys_argz_works="guessing no" - else - lt_sed_extract_leading_digits='s/^\([0-9\.]*\).*/\1/' - save_IFS=$IFS - IFS=-. - set x `uname -r | sed -e "$lt_sed_extract_leading_digits"` - IFS=$save_IFS - lt_os_major=${2-0} - lt_os_minor=${3-0} - lt_os_micro=${4-0} - if test 1 -lt "$lt_os_major" \ - || { test 1 -eq "$lt_os_major" \ - && { test 5 -lt "$lt_os_minor" \ - || { test 5 -eq "$lt_os_minor" \ - && test 24 -lt "$lt_os_micro"; }; }; }; then - lt_cv_sys_argz_works=yes - fi - fi - ;; #( - *) lt_cv_sys_argz_works=yes ;; - esac]]) - AS_IF([test yes = "$lt_cv_sys_argz_works"], - [AC_DEFINE([HAVE_WORKING_ARGZ], 1, - [This value is set to 1 to indicate that the system argz facility works])], - [LT_ARGZ_H=lt__argz.h - AC_LIBOBJ([lt__argz])])]) - -AC_SUBST([LT_ARGZ_H]) -]) From 1a0b1f235d16439cac4f20a13ab247f0fad9dc18 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 24 Mar 2023 13:52:37 +0100 Subject: [PATCH 272/606] * doc/tl/tl.tex: Typo in firstmatch semantics. --- doc/tl/tl.tex | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/tl/tl.tex b/doc/tl/tl.tex index f9205cced..371886711 100644 --- a/doc/tl/tl.tex +++ b/doc/tl/tl.tex @@ -701,7 +701,7 @@ $a$ is an atomic proposition. \VDash f\FSTAR{\mvar{i-1}..}))\\ \end{cases}\\ \sigma\VDash \FIRSTMATCH\code(f\code) & \iff - (\sigma\VDash f)\land (\forall k<|\sigma|,\,\sigma^{0..k}\nVDash f) + (\sigma\VDash f)\land (\forall k<|\sigma|,\,\sigma^{0..k-1}\nVDash f) \end{align*}} Notes: From 646b6e546f0549eb7e30100815d8a0a1e1505382 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 29 Mar 2023 16:20:51 +0200 Subject: [PATCH 273/606] fix spurious test-case failure when Python is not installed Fixes #530. * tests/core/ltlsynt2.test: Skip when PYTHON is empty. * NEWS: Mention the fix. --- NEWS | 5 ++++- tests/core/ltlsynt2.test | 4 +++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index 7f3be814d..7db7e111c 100644 --- a/NEWS +++ b/NEWS @@ -1,6 +1,9 @@ New in spot 2.11.4.dev (not yet released) - Nothing yet. + Bug fixes: + + - Fix spurious failure of ltlsynt2.test when Python is not installed + (issue #530). New in spot 2.11.4 (2023-02-10) diff --git a/tests/core/ltlsynt2.test b/tests/core/ltlsynt2.test index dbb754d92..546cb0d27 100755 --- a/tests/core/ltlsynt2.test +++ b/tests/core/ltlsynt2.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2022 Laboratoire de Recherche et Développement de +# Copyright (C) 2022, 2023 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -36,6 +36,8 @@ ltlsynt --ins=i1,i2 -F formulas.ltl -f 'o1 & F(i1 <-> o2)' -q --csv=out.csv &&\ exit 2 test $? -eq 1 || exit 2 +test -z "$PYTHON" && exit 77 + cat >test.py < Date: Wed, 29 Mar 2023 17:01:13 +0200 Subject: [PATCH 274/606] correctly fails if emacs needed and missing Fixes #528. * configure.ac: Define EMACS using tools/missing. * NEWS: Mention the bug. --- NEWS | 3 +++ configure.ac | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 7db7e111c..2c8bb7d7d 100644 --- a/NEWS +++ b/NEWS @@ -5,6 +5,9 @@ New in spot 2.11.4.dev (not yet released) - Fix spurious failure of ltlsynt2.test when Python is not installed (issue #530). + - Building from the git repository would fail to report a missing + emacs (issue #528). + New in spot 2.11.4 (2023-02-10) Python: diff --git a/configure.ac b/configure.ac index e47e2eb29..772b4c24a 100644 --- a/configure.ac +++ b/configure.ac @@ -217,7 +217,7 @@ AC_CHECK_PROG([LTL3BA], [ltl3ba], [ltl3ba]) AC_CHECK_PROG([PERL], [perl], [perl]) AC_CHECK_PROG([SPIN], [spin], [spin]) AC_CHECK_PROG([LBTT], [lbtt], [lbtt]) -AC_CHECK_PROG([EMACS], [emacs], [emacs]) +AM_MISSING_PROG([EMACS], [emacs]) AC_CHECK_PROGS([IPYTHON], [ipython3 ipython], [ipython]) AC_CHECK_PROGS([JUPYTER], [jupyter], [jupyter]) AC_CHECK_PROG([LBTT_TRANSLATE], [lbtt-translate], [lbtt-translate]) From 993695a2c4c7f86948259ae49b58067bc95049a7 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Thu, 30 Mar 2023 14:32:26 +0200 Subject: [PATCH 275/606] Fix parity solver if edgevector is not contiguous Validity of strategies was tested relying on num_edges() which might be smaller than the edge_number * spot/twaalgos/game.cc: Fix here * tests/python/game.py: Test here --- spot/twaalgos/game.cc | 6 +- tests/python/game.py | 139 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 143 insertions(+), 2 deletions(-) diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index 17f94a7e4..ac0e46520 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -214,10 +214,14 @@ namespace spot // Only the states owned by the winner need a strategy assert([&]() { + std::unordered_set valid_strat; + for (const auto& e : arena_->edges()) + valid_strat.insert(arena_->edge_number(e)); + for (unsigned v = 0; v < arena_->num_states(); ++v) { if (((*owner_ptr_)[v] == w_.winner(v)) - && ((s_[v] <= 0) || (s_[v] > arena_->num_edges()))) + && (valid_strat.count(s_.at(v)) == 0)) return false; } return true; diff --git a/tests/python/game.py b/tests/python/game.py index a7080b696..857390335 100644 --- a/tests/python/game.py +++ b/tests/python/game.py @@ -384,4 +384,141 @@ spot.solve_game(aut) S1 = list(spot.get_strategy(aut)) spot.solve_game(aut) S2 = list(spot.get_strategy(aut)) -tc.assertEqual(S1, S2) \ No newline at end of file +tc.assertEqual(S1, S2) + + +# Finite games +alive = "__alive__" +def finite_existential(auts): + # 1 Accepting state -> selfloop + # 2 Prune + acc_state = set() + sp = list(spot.get_state_players(auts)) + for e in auts.edges(): + if e.acc: + acc_state.add(e.src) + for s in acc_state: + e_kill = auts.out_iteraser(s) + while (e_kill): + e_kill.erase() + for s in acc_state: + sprime = auts.new_state() + sp.append(not sp[s]) + auts.new_edge(s, sprime, buddy.bddtrue, [0]) + auts.new_edge(sprime, s, buddy.bddtrue, [0]) + spot.set_state_players(auts, sp) + auts.purge_dead_states() + spot.alternate_players(auts, False, False) + return auts + +def is_input_complete(auts): + sp = spot.get_state_players(auts) + for s in range(auts.num_states()): + if sp[s]: + continue # Player + cumul = buddy.bddfalse + for e in auts.out(s): + cumul |= e.cond + if cumul != buddy.bddtrue: + return False + + return True + +def synt_from_ltlf(f:str, outs): + ff = spot.from_ltlf(f, alive) + aut = ff.translate("buchi", "sbacc") + outbdd = buddy.bddtrue + for out in outs: + outbdd &= buddy.bdd_ithvar(aut.register_ap(out)) + alive_bdd = buddy.bdd_ithvar(aut.register_ap(alive)) + auts = spot.split_2step(aut, outbdd & alive_bdd, False) + auts = spot.to_finite(auts, alive) + spot.alternate_players(auts, False, False) + spot.set_synthesis_outputs(auts, outbdd) + if not is_input_complete(auts): + print("Not synthesizable") + return None + auts = finite_existential(auts) + + return auts + +def synt_ltlf(f:str, outs, res:str = "aut"): + auts = synt_from_ltlf(f, outs) + + succ = spot.solve_parity_game(auts) + if not succ: + if res == "aut": + return False, auts + else: + return False, None + + mealy_cc = spot.solved_game_to_split_mealy(auts) + + if res == "aut": + return True, mealy_cc + elif res == "aig": + return True, spot.mealy_machine_to_aig(mealy_cc, "isop") + else: + raise RuntimeError("Unknown option") + + +sink_player = None + +def negate_ltlf(f:str, outs, opt = "buchi"): + + global sink_player + sink_player = None + + aut = synt_from_ltlf(f, outs) + # Implies input completeness + # We need output completeness + acc = [] + + sp = list(spot.get_state_players(aut)) + + def get_sink(): + global sink_player + if sink_player is None: + sink_player = aut.new_states(2) + aut.new_edge(sink_player, sink_player + 1, buddy.bddtrue, acc) + aut.new_edge(sink_player + 1, sink_player, buddy.bddtrue, acc) + sp.append(False) + sp.append(True) + spot.set_state_players(aut, sp) + return sink_player + + for s in range(aut.num_states()): + if not sp[s]: + continue + rem = buddy.bddtrue + for e in aut.out(s): + rem -= e.cond + if rem != buddy.bddfalse: + aut.new_edge(s, get_sink(), rem) + + # Better to invert colors or condition? + if opt == "buchi": + for e in aut.edges(): + if e.acc: + e.acc = spot.mark_t() + else: + e.acc = spot.mark_t([0]) + elif opt == "cobuchi": + aut.set_co_buchi() + else: + raise RuntimeError("Unknown opt") + return aut + +# Game where the edge_vector is larger +# than the number of transitions +f1 = "((((G (F (idle))) && (G (((idle) && (X ((! (grant_0)) \ + && (! (grant_1))))) -> (X (idle))))) && (G ((X (! (grant_0))) \ + || (X (((! (request_0)) && (! (idle))) U ((! (request_0)) \ + && (idle))))))) -> (((G (((((X (((! (grant_0)) && (true)) \ + || ((true) && (! (grant_1))))) && ((X (grant_0)) -> (request_0))) \ + && ((X (grant_1)) -> (request_1))) && ((request_0) -> (grant_1))) \ + && ((! (idle)) -> (X ((! (grant_0)) && (! (grant_1))))))) \ + && (! (F (G ((request_0) && (X (! (grant_0)))))))) \ + && (! (F (G ((request_1) && (X (! (grant_1)))))))))" +outs = ["grant_0", "grant1"] +tc.assertEqual(synt_ltlf(f1, outs)[0], False) \ No newline at end of file From eb0f40b9d650a264b0c2d3f21f7c8910e70c59fa Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 18 Apr 2023 14:48:10 +0200 Subject: [PATCH 276/606] twa_run: let as_twa work on the result of intersecting_run Reported by Philipp Schlehuber-Caissier. * spot/twaalgos/emptiness.cc (as_twa): Simplify considerably. Don't try to replay the run, and don't merge identical states. * spot/twaalgos/word.hh, spot/twaalgos/emptiness.hh: Improve documentation. * tests/python/intrun.py: Add a test case. * NEWS: Mention the bug. --- NEWS | 5 +++ spot/twaalgos/emptiness.cc | 75 +++++++++++--------------------------- spot/twaalgos/emptiness.hh | 6 +-- spot/twaalgos/word.hh | 7 +++- tests/python/intrun.py | 40 +++++++++++++++++++- 5 files changed, 73 insertions(+), 60 deletions(-) diff --git a/NEWS b/NEWS index 2c8bb7d7d..d670ba105 100644 --- a/NEWS +++ b/NEWS @@ -8,6 +8,11 @@ New in spot 2.11.4.dev (not yet released) - Building from the git repository would fail to report a missing emacs (issue #528). + - Fix exception raised by aut1.intersecting_run(aut2).as_twa() + because the run did not match transitions present in aut1 + verbatim. We also changed the behavior of as_twa() to not merge + identical states. + New in spot 2.11.4 (2023-02-10) Python: diff --git a/spot/twaalgos/emptiness.cc b/spot/twaalgos/emptiness.cc index fd3319141..ef8890f95 100644 --- a/spot/twaalgos/emptiness.cc +++ b/spot/twaalgos/emptiness.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2011-2019, 2021 Laboratoire de Recherche et +// Copyright (C) 2009, 2011-2019, 2021, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -570,7 +570,7 @@ namespace spot if (debug) os << "ERROR: First state of run (in " << in << "): " << aut->format_state(i->s) - << "\ndoes not match initial state of automata: " + << "\ndoes not match initial state of automaton: " << aut->format_state(s) << '\n'; s->destroy(); return false; @@ -802,38 +802,38 @@ namespace spot res->set_named_prop("state-names", names); } - const state* s = aut->get_init_state(); unsigned src; unsigned dst; const twa_run::steps* l; - acc_cond::mark_t seen_acc = {}; - - state_map seen; + unsigned cycle_entry = 0; if (prefix.empty()) - l = &cycle; + l = &cycle; else - l = &prefix; + l = &prefix; twa_run::steps::const_iterator i = l->begin(); - assert(s->compare(i->s) == 0); +#if NDEBUG + const state* init = aut->get_init_state(); + assert(init->compare(i->s) == 0); + init->destroy(); +#endif + src = res->new_state(); - seen.emplace(i->s, src); if (names) - names->push_back(aut->format_state(s)); + names->push_back(aut->format_state(i->s)); for (; i != l->end();) { - // expected outgoing transition bdd label = i->label; acc_cond::mark_t acc = i->acc; - // compute the next expected state const state* next; ++i; if (i != l->end()) { + dst = res->new_state(); next = i->s; } else @@ -842,57 +842,24 @@ namespace spot { l = &cycle; i = l->begin(); + cycle_entry = dst = res->new_state(); + } + else + { + dst = cycle_entry; } next = l->begin()->s; } - // browse the actual outgoing transitions and - // look for next; - const state* the_next = nullptr; - for (auto j: aut->succ(s)) + if (names && i != l->end()) { - if (j->cond() != label - || j->acc() != acc) - continue; - - const state* s2 = j->dst(); - if (s2->compare(next) == 0) - { - the_next = s2; - break; - } - s2->destroy(); + assert(dst == names->size()); + names->push_back(aut->format_state(next)); } - s->destroy(); - if (!the_next) - throw std::runtime_error("twa_run::as_twa() unable to replay run"); - s = the_next; - - - auto p = seen.emplace(next, 0); - if (p.second) - { - unsigned ns = res->new_state(); - p.first->second = ns; - if (names) - { - assert(ns == names->size()); - names->push_back(aut->format_state(next)); - } - } - dst = p.first->second; - res->new_edge(src, dst, label, acc); src = dst; - - // Sum acceptance conditions. - if (l == &cycle && i != l->begin()) - seen_acc |= acc; } - s->destroy(); - - assert(aut->acc().accepting(seen_acc)); return res; } diff --git a/spot/twaalgos/emptiness.hh b/spot/twaalgos/emptiness.hh index 47896a1d7..66bf8ca56 100644 --- a/spot/twaalgos/emptiness.hh +++ b/spot/twaalgos/emptiness.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013-2018, 2020-2021 Laboratoire de +// Copyright (C) 2011, 2013-2018, 2020-2021, 2023 Laboratoire de // Recherche et Developpement de l'Epita (LRDE). // Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -451,9 +451,9 @@ namespace spot /// Note that this works only if the automaton is a twa_graph_ptr. void highlight(unsigned color); - /// \brief Return a twa_graph_ptr corresponding to \a run + /// \brief Convert the run into a lasso-shaped automaton /// - /// Identical states are merged. + /// This preserves the original acceptance condition. /// /// If \a preserve_names is set, the created states are named /// using the format_state() result from the original state. diff --git a/spot/twaalgos/word.hh b/spot/twaalgos/word.hh index f6f70fc14..979a4070b 100644 --- a/spot/twaalgos/word.hh +++ b/spot/twaalgos/word.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014, 2015, 2016, 2018, 2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2013-2016, 2018-2019, 2023 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -80,6 +80,9 @@ namespace spot /// \brief Convert the twa_word as an automaton. /// + /// Convert the twa_word into a lasso-shapred automaton + /// with "true" acceptance condition. + /// /// This is useful to evaluate a word on an automaton. twa_graph_ptr as_automaton() const; diff --git a/tests/python/intrun.py b/tests/python/intrun.py index e3b708a95..02a7aedd6 100644 --- a/tests/python/intrun.py +++ b/tests/python/intrun.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement +# Copyright (C) 2020, 2022, 2023 Laboratoire de Recherche et Développement # de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -38,3 +38,41 @@ r = b.intersecting_run(spot.complement(a)); c = spot.twa_word(r).as_automaton() tc.assertTrue(c.intersects(b)) tc.assertFalse(c.intersects(a)) + +# The next test came from Philipp Schlehuber-Caissier: running +# as_twa() on a run built from a A.intersecting_run(B) failed to build +# the automaton because it tried to rebuild the run on A and did not +# find transitions matching exactly. Additionally the idea of merging +# states in as_twa() seems to be a way to create some disasters, so we +# removed that too. +a = spot.translate("a"); +b = spot.translate("{a;1;a}"); +r = a.intersecting_run(b) +tc.assertEqual(str(r), """Prefix: + 1 + | a + 0 + | 1 {0} + 0 + | a {0} +Cycle: + 0 + | 1 {0} +""") +tc.assertEqual(r.as_twa().to_str(), """HOA: v1 +States: 4 +Start: 0 +AP: 1 "a" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels state-acc deterministic +--BODY-- +State: 0 +[0] 1 +State: 1 {0} +[t] 2 +State: 2 {0} +[0] 3 +State: 3 {0} +[t] 3 +--END--""") From eb80f5d5af51a7ce1d94bf5c712880b4e44ee09f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 18 Apr 2023 15:04:58 +0200 Subject: [PATCH 277/606] powerset: fix segfault when the initial state is a sink Reported by Raven Beutner. * spot/twaalgos/minimize.cc: Improve comment. * spot/twaalgos/powerset.cc: Fix handling of an initial state that is also a sink. * tests/core/wdba2.test: Add test case. * NEWS: Mention the bug. --- NEWS | 3 +++ spot/twaalgos/minimize.cc | 6 +++--- spot/twaalgos/powerset.cc | 26 ++++++++++++++------------ tests/core/wdba2.test | 37 +++++++++++++++++++++++++++++++++++-- 4 files changed, 55 insertions(+), 17 deletions(-) diff --git a/NEWS b/NEWS index d670ba105..9d14735e7 100644 --- a/NEWS +++ b/NEWS @@ -13,6 +13,9 @@ New in spot 2.11.4.dev (not yet released) verbatim. We also changed the behavior of as_twa() to not merge identical states. + - Fix segfaults occuring in determinization of 1-state terminal + automata. + New in spot 2.11.4 (2023-02-10) Python: diff --git a/spot/twaalgos/minimize.cc b/spot/twaalgos/minimize.cc index 4fd6847b3..1ac961d46 100644 --- a/spot/twaalgos/minimize.cc +++ b/spot/twaalgos/minimize.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010-2020 Laboratoire de Recherche et Développement +// Copyright (C) 2010-2020, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -394,8 +394,8 @@ namespace spot else { // Find any accepting sink state, to speed up the - // determinization by merging all states containing a sink - // state. + // determinization by merging all macro-states containing a + // sink state. std::vector acc_sinks; unsigned ns = a->num_states(); if (!a->prop_terminal().is_true()) diff --git a/spot/twaalgos/powerset.cc b/spot/twaalgos/powerset.cc index c5fc07f94..326de7c76 100644 --- a/spot/twaalgos/powerset.cc +++ b/spot/twaalgos/powerset.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2011, 2013-2019, 2021 Laboratoire de Recherche et +// Copyright (C) 2009-2011, 2013-2019, 2021, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -217,17 +217,19 @@ namespace spot pm.map_.emplace_back(std::move(ps)); } - { - unsigned init_num = aut->get_init_state_number(); - auto bvi = make_bitvect(ns); - bvi->set(init_num); - power_state ps{init_num}; - unsigned num = res->new_state(); - res->set_init_state(num); - seen[bvi] = num; - assert(pm.map_.size() == num); - pm.map_.emplace_back(std::move(ps)); - toclean.emplace_back(bvi); + // Add the initial state unless it's a sink. + if (unsigned init_num = aut->get_init_state_number(); + !acc_sinks || !acc_sinks->get(init_num)) + { + auto bvi = make_bitvect(ns); + bvi->set(init_num); + power_state ps{init_num}; + unsigned num = res->new_state(); + res->set_init_state(num); + seen[bvi] = num; + assert(pm.map_.size() == num); + pm.map_.emplace_back(std::move(ps)); + toclean.emplace_back(bvi); } // outgoing map diff --git a/tests/core/wdba2.test b/tests/core/wdba2.test index ca49bad94..3850a447a 100755 --- a/tests/core/wdba2.test +++ b/tests/core/wdba2.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2014, 2015, 2018, 2019 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) 2012, 2014-2015, 2018-2019, 2023 Laboratoire de +# Recherche et Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -82,3 +82,36 @@ EOF autfilt --small --high -C -Hi input > output diff output expected + +# This test comes from a report from Raven Beutner and used to cause a +# segfault. +cat >input <output +cat >expected < Date: Tue, 18 Apr 2023 17:35:05 +0200 Subject: [PATCH 278/606] org: replace version references with org-babel blocks This way we have fewer lines to edit multiple when making releases. * doc/org/index.org, doc/org/init.el.in, doc/org/install.org, doc/org/setup.org, doc/org/tools.org: Use org-babel instead of macros for release version and links. --- doc/org/index.org | 2 +- doc/org/init.el.in | 2 +- doc/org/install.org | 4 ++-- doc/org/setup.org | 20 ++++++++++++++++---- doc/org/tools.org | 6 +++--- 5 files changed, 23 insertions(+), 11 deletions(-) diff --git a/doc/org/index.org b/doc/org/index.org index 9af23dba4..08fa16a3d 100644 --- a/doc/org/index.org +++ b/doc/org/index.org @@ -37,7 +37,7 @@ checking. It has the following notable features: * Latest version -The latest version is *{{{LASTRELEASE}}}* and was released on +The latest version is *call_SPOT_VERSION()* and was released on *{{{LASTDATE}}}*. Please see the [[file:install.org][download and installation instructions]]. * Documentation diff --git a/doc/org/init.el.in b/doc/org/init.el.in index 4258a95f7..c46363096 100644 --- a/doc/org/init.el.in +++ b/doc/org/init.el.in @@ -160,7 +160,7 @@ up.html points to index.html, then the result is: (setq body res) (not cmp))) (concat "#+TITLE: " title - "\n#+SETUPFILE: setup.org\n#+HTML_LINK_UP: index.html\n\n" + "\n#+INCLUDE: setup.org\n#+HTML_LINK_UP: index.html\n\n" body))) (setq org-publish-project-alist diff --git a/doc/org/install.org b/doc/org/install.org index dc492af57..b65c02074 100644 --- a/doc/org/install.org +++ b/doc/org/install.org @@ -9,9 +9,9 @@ :CUSTOM_ID: tar :END: -The latest release of Spot is version {{{LASTRELEASE}}}: +The latest release of Spot is version call_SPOT_VERSION() and was released on {{{LASTDATE}}}: -- {{{LASTTARBALL}}} (see also the {{{LASTNEWS}}}) +- call_TARBALL_LINK() (see also the call_NEWS_LINK()) Past releases can be found [[https://www.lrde.epita.fr/dload/spot/][in the same directory]]. If you are interested in /future/ releases, you can always peek at the [[https://gitlab.lre.epita.fr/spot/spot/-/jobs/artifacts/next/browse?job=make-dist][last diff --git a/doc/org/setup.org b/doc/org/setup.org index 7b6a4fa70..974272774 100644 --- a/doc/org/setup.org +++ b/doc/org/setup.org @@ -1,11 +1,23 @@ #+OPTIONS: H:2 num:nil toc:t html-postamble:nil ^:nil #+EMAIL: spot@lrde.epita.fr #+HTML_LINK_HOME: index.html -#+MACRO: SPOTVERSION 2.11.4 -#+MACRO: LASTRELEASE 2.11.4 -#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.11.3.tar.gz][=spot-2.11.4.tar.gz=]] -#+MACRO: LASTNEWS [[https://gitlab.lre.epita.fr/spot/spot/blob/spot-2-11-3/NEWS][summary of the changes]] #+MACRO: LASTDATE 2023-02-10 +#+NAME: SPOT_VERSION +#+BEGIN_SRC python :exports none :results value :wrap org +return "2.11.4" +#+END_SRC + +#+NAME: TARBALL_LINK +#+BEGIN_SRC python :exports none :var version=SPOT_VERSION :results output :wrap org + print(f"[[http://www.lrde.epita.fr/dload/spot/spot-{version}.tar.gz][=spot-{version}.tar.gz=]]") +#+END_SRC + +#+NAME: NEWS_LINK +#+BEGIN_SRC python :exports none :var version=SPOT_VERSION :results output :wrap org + version = version.replace('.', '-') + print(f"[[https://gitlab.lre.epita.fr/spot/spot/blob/spot-{version}/NEWS][summary of the changes]]") +#+END_SRC + #+ATTR_HTML: :id spotlogo [[file:spot2.svg]] diff --git a/doc/org/tools.org b/doc/org/tools.org index 5227f1b4e..46ca38ccd 100644 --- a/doc/org/tools.org +++ b/doc/org/tools.org @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- -#+TITLE: Command-line tools installed by Spot {{{SPOTVERSION}}} -#+DESCRIPTION: List of all the command-line tools installed by Spot {{{SPOTVERSION}}} #+INCLUDE: setup.org +#+TITLE: Command-line tools installed by Spot +#+DESCRIPTION: List of all the command-line tools installed by Spot #+HTML_LINK_UP: index.html #+PROPERTY: header-args:sh :results verbatim :exports both This document introduces command-line tools that are installed with -the Spot library. We give some examples to highlight possible +Spot call_SPOT_VERSION(). We give some examples to highlight possible use-cases but shall not attempt to cover all features exhaustively (please check the man pages for further inspiration). From b6c076ce1946a083ab2cd14146160edbce8ce72c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 20 Apr 2023 09:43:33 +0200 Subject: [PATCH 279/606] release Spot 2.11.5 * NEWS, configure.ac, doc/org/setup.org: Update version. --- NEWS | 5 ++++- configure.ac | 2 +- doc/org/setup.org | 4 ++-- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/NEWS b/NEWS index 9d14735e7..bc43ce155 100644 --- a/NEWS +++ b/NEWS @@ -1,4 +1,4 @@ -New in spot 2.11.4.dev (not yet released) +New in spot 2.11.5 (2023-04-20) Bug fixes: @@ -16,6 +16,9 @@ New in spot 2.11.4.dev (not yet released) - Fix segfaults occuring in determinization of 1-state terminal automata. + - Fix incorrect assertion in game solver when the edge vector + contains deleted transitions. + New in spot 2.11.4 (2023-02-10) Python: diff --git a/configure.ac b/configure.ac index 772b4c24a..ef5ce7391 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.11.4.dev], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.11.5], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) diff --git a/doc/org/setup.org b/doc/org/setup.org index 974272774..255a01c3d 100644 --- a/doc/org/setup.org +++ b/doc/org/setup.org @@ -1,11 +1,11 @@ #+OPTIONS: H:2 num:nil toc:t html-postamble:nil ^:nil #+EMAIL: spot@lrde.epita.fr #+HTML_LINK_HOME: index.html -#+MACRO: LASTDATE 2023-02-10 +#+MACRO: LASTDATE 2023-04-20 #+NAME: SPOT_VERSION #+BEGIN_SRC python :exports none :results value :wrap org -return "2.11.4" +return "2.11.5" #+END_SRC #+NAME: TARBALL_LINK From d0ae0dfc388df2c744c25af722de19e863a38232 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 20 Apr 2023 09:48:22 +0200 Subject: [PATCH 280/606] * NEWS, configure.ac: Bump version to 2.11.5.dev. --- NEWS | 4 ++++ configure.ac | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index bc43ce155..0b425272f 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,7 @@ +New in spot 2.11.5.dev (not yet released) + + Nothing yet. + New in spot 2.11.5 (2023-04-20) Bug fixes: diff --git a/configure.ac b/configure.ac index ef5ce7391..09fe45364 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.11.5], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.11.5.dev], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) From 747ec8b1c56e0b286086762b73e34fafaf2f1c18 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 11 May 2023 21:25:59 +0200 Subject: [PATCH 281/606] debian: add missing build dependencies * debian/control: Add Build-Depends on graphviz, jupyter-nbconvert, doxygen. --- debian/control | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/control b/debian/control index d1f9c652c..e29454c54 100644 --- a/debian/control +++ b/debian/control @@ -2,7 +2,7 @@ Source: spot Section: science Priority: optional Maintainer: Alexandre Duret-Lutz -Build-Depends: debhelper (>= 12), python3-all-dev, ipython3-notebook | python3-ipykernel, ipython3-notebook | python3-nbconvert, libltdl-dev, dh-python +Build-Depends: debhelper (>= 12), python3-all-dev, ipython3-notebook | python3-ipykernel, ipython3-notebook | python3-nbconvert, libltdl-dev, dh-python, graphviz, jupyter-nbconvert, doxygen Standards-Version: 4.5.1 Homepage: http://spot.lrde.epita.fr/ From 134da9209c50be09fbe0fc3511898787acdb6fcd Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 11 May 2023 21:40:14 +0200 Subject: [PATCH 282/606] genem: Add yet another version of the algorithm * spot/twa/acc.hh, spot/twa/acc.cc (fin_unit_one_split_improved): New function. * python/spot/impl.i: Add bindings for fin_unit_one_split_improved. * spot/twaalgos/genem.cc: Add the spot212 version. * tests/python/genem.py: Test it. --- python/spot/impl.i | 12 +++++++++- spot/twa/acc.cc | 51 ++++++++++++++++++++++++++++++++++++++---- spot/twa/acc.hh | 36 ++++++++++++++++++++--------- spot/twaalgos/genem.cc | 26 +++++++++++++++++---- tests/python/genem.py | 11 +++++---- 5 files changed, 112 insertions(+), 24 deletions(-) diff --git a/python/spot/impl.i b/python/spot/impl.i index 502770fcb..f95270e21 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2009-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // Copyright (C) 2003-2006 Laboratoire d'Informatique de Paris 6 // (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -564,6 +564,16 @@ namespace std { swig::from(std::get<2>(v))); } %} +// Must occur before the twa declaration +%typemap(out) SWIGTYPE spot::acc_cond::fin_unit_one_split_improved %{ + { + auto& v = static_cast>($1); + $result = PyTuple_Pack(3, + swig::from(std::get<0>(v)), + swig::from(std::get<1>(v)), + swig::from(std::get<2>(v))); + } +%} %include %template(pair_bool_mark) std::pair; diff --git a/spot/twa/acc.cc b/spot/twa/acc.cc index 07aac36f9..d73af33b0 100644 --- a/spot/twa/acc.cc +++ b/spot/twa/acc.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2015-2023 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -2707,8 +2707,9 @@ namespace spot return false; } - // Check wheter pos looks like Fin(f) or Fin(f)&rest - bool is_conj_fin(const acc_cond::acc_word* pos, acc_cond::mark_t f) + // Check if pos contains Fin(f) in a substree + template + bool has_fin(const acc_cond::acc_word* pos, acc_cond::mark_t f) { auto sub = pos - pos->sub.size; do @@ -2719,7 +2720,10 @@ namespace spot --pos; break; case acc_cond::acc_op::Or: - pos -= pos->sub.size + 1; + if constexpr (top_conjunct_only) + pos -= pos->sub.size + 1; + else + --pos; break; case acc_cond::acc_op::Fin: if (pos[-1].mark & f) @@ -2736,6 +2740,12 @@ namespace spot return false; } + // Check whether pos looks like Fin(f) or Fin(f)&rest + bool is_conj_fin(const acc_cond::acc_word* pos, acc_cond::mark_t f) + { + return has_fin(pos, f); + } + acc_cond::acc_code extract_fin(const acc_cond::acc_word* pos, acc_cond::mark_t f) { @@ -2772,6 +2782,7 @@ namespace spot return {}; } + template std::pair split_top_fin(const acc_cond::acc_word* pos, acc_cond::mark_t f) { @@ -2798,6 +2809,17 @@ namespace spot tmp |= std::move(left); std::swap(tmp, left); } + else if (deeper_check + && has_top_fin(pos) == -1 + && has_fin(pos, f)) + { + auto tmp = strip_rec(pos, f, true, false); + tmp |= std::move(left); + std::swap(tmp, left); + tmp = force_inf_rec(pos, f); + tmp |= std::move(right); + std::swap(tmp, right); + } else { acc_cond::acc_code tmp(pos); @@ -2851,6 +2873,27 @@ namespace spot return {selected_fin, extract_fin(pos, fo_m), force_inf(fo_m)}; } + std::tuple + acc_cond::acc_code::fin_unit_one_split_improved() const + { + if (SPOT_UNLIKELY(is_t() || is_f())) + err: + throw std::runtime_error("fin_unit_one_split_improved(): no Fin"); + const acc_cond::acc_word* pos = &back(); + int selected_fin = has_top_fin(pos); + if (selected_fin >= 0) + { + auto [left, right] = + split_top_fin(pos, {(unsigned) selected_fin}); + return {selected_fin, std::move(left), std::move(right)}; + } + selected_fin = fin_one(); + if (selected_fin < 0) + goto err; + acc_cond::mark_t fo_m = {(unsigned) selected_fin}; + return {selected_fin, extract_fin(pos, fo_m), force_inf(fo_m)}; + } + namespace { bool diff --git a/spot/twa/acc.hh b/spot/twa/acc.hh index 1c460cfc4..1b46e4024 100644 --- a/spot/twa/acc.hh +++ b/spot/twa/acc.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2014-2023 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -1300,21 +1300,26 @@ namespace spot /// \brief Split an acceptance condition, trying to select one /// unit-Fin. /// - /// If the condition is a disjunction and one of the disjunct as - /// has the shape `...&Fin(i)&...`, then this will return - /// (i, left, right), where left is all disjunct of this form, and - /// right are all the others. + /// If the condition is a disjunction and one of the disjunct has + /// the shape `...&Fin(i)&...`, then this will return (i, left, + /// right), where left is all disjunct of this form (with Fin(i) + /// replaced by true), and right are all the others. /// /// If the input formula has the shape `...&Fin(i)&...` then left - /// is set to the entire formula, and right is empty. + /// is set to the entire formula (with Fin(i) replaced by true), + /// and right is empty. /// /// If no disjunct has the right shape, then a random Fin(i) is /// searched in the formula, and the output (i, left, right). /// is such that left contains all disjuncts containing Fin(i) /// (at any depth), and right contains the original formlula /// where Fin(i) has been replaced by false. + /// @{ std::tuple fin_unit_one_split() const; + std::tuple + fin_unit_one_split_improved() const; + /// @} /// \brief Help closing accepting or rejecting cycle. /// @@ -2258,25 +2263,34 @@ namespace spot /// \brief Split an acceptance condition, trying to select one /// unit-Fin. /// - /// If the condition is a disjunction and one of the disjunct as - /// has the shape `...&Fin(i)&...`, then this will return - /// (i, left, right), where left is all disjunct of this form, and - /// right are all the others. + /// If the condition is a disjunction and one of the disjunct has + /// the shape `...&Fin(i)&...`, then this will return (i, left, + /// right), where left is all disjunct of this form (with Fin(i) + /// replaced by true), and right are all the others. /// /// If the input formula has the shape `...&Fin(i)&...` then left - /// is set to the entire formula, and right is empty. + /// is set to the entire formula (with Fin(i) replaced by true), + /// and right is empty. /// /// If no disjunct has the right shape, then a random Fin(i) is /// searched in the formula, and the output (i, left, right). /// is such that left contains all disjuncts containing Fin(i) /// (at any depth), and right contains the original formlula /// where Fin(i) has been replaced by false. + /// @{ std::tuple fin_unit_one_split() const { auto [f, l, r] = code_.fin_unit_one_split(); return {f, {num_sets(), std::move(l)}, {num_sets(), std::move(r)}}; } + std::tuple + fin_unit_one_split_improved() const + { + auto [f, l, r] = code_.fin_unit_one_split_improved(); + return {f, {num_sets(), std::move(l)}, {num_sets(), std::move(r)}}; + } + /// @} /// \brief Return the top-level disjuncts. /// diff --git a/spot/twaalgos/genem.cc b/spot/twaalgos/genem.cc index 237b10118..0b0d1fd5f 100644 --- a/spot/twaalgos/genem.cc +++ b/spot/twaalgos/genem.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2022 Laboratoire de Recherche et Developpement +// Copyright (C) 2017-2023 Laboratoire de Recherche et Developpement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -25,7 +25,7 @@ namespace spot { namespace { - enum genem_version_t { spot28, atva19, spot29, spot210, spot211 }; + enum genem_version_t { spot28, atva19, spot29, spot210, spot211, spot212 }; static genem_version_t genem_version = spot29; } @@ -33,6 +33,8 @@ namespace spot { if (emversion == nullptr || !strcasecmp(emversion, "spot29")) genem_version = spot29; + else if (!strcasecmp(emversion, "spot212")) + genem_version = spot212; else if (!strcasecmp(emversion, "spot211")) genem_version = spot211; else if (!strcasecmp(emversion, "spot210")) @@ -44,7 +46,7 @@ namespace spot else throw std::invalid_argument("generic_emptiness_check version should be " "one of {spot28, atva19, spot29, spot210, " - "spot211}"); + "spot211, spot212}"); } namespace @@ -87,7 +89,9 @@ namespace spot scc_split_check(const scc_info& si, unsigned scc, const acc_cond& acc, Extra extra, acc_cond::mark_t tocut) { - if (genem_version == spot211 || genem_version == spot210) + if (genem_version == spot211 + || genem_version == spot212 + || genem_version == spot210) tocut |= acc.fin_unit(); scc_and_mark_filter filt(si, scc, tocut); filt.override_acceptance(acc); @@ -144,6 +148,20 @@ namespace spot } while (!acc.is_f()); } + else if (genem_version == spot212) + { + do + { + auto [fo, fpart, rest] = acc.fin_unit_one_split_improved(); + acc_cond::mark_t fo_m = {(unsigned) fo}; + if (!scc_split_check + (si, scc, fpart, extra, fo_m)) + if constexpr (EarlyStop) + return false; + acc = rest; + } + while (!acc.is_f()); + } else if (genem_version == spot29) do { diff --git a/tests/python/genem.py b/tests/python/genem.py index 962112ac0..970fe705b 100644 --- a/tests/python/genem.py +++ b/tests/python/genem.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018-2022 Laboratoire de Recherche et Développement de +# Copyright (C) 2018-2023 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -307,15 +307,18 @@ def run_bench(automata): res3d = spot.generic_emptiness_check(aut) spot.generic_emptiness_check_select_version("spot211") res3e = spot.generic_emptiness_check(aut) + spot.generic_emptiness_check_select_version("spot212") + res3f = spot.generic_emptiness_check(aut) spot.generic_emptiness_check_select_version("spot29") res2 = spot.remove_fin(aut).is_empty() res1 = generic_emptiness2(aut) res = (str(res1)[0] + str(res2)[0] + str(res3a)[0] + str(res3b)[0] + str(res3c)[0] + str(res3d)[0] - + str(res3e)[0] + str(res4)[0] + str(res5)[0]) + + str(res3e)[0] + str(res3f)[0] + str(res4)[0] + + str(res5)[0]) print(res) - tc.assertIn(res, ('TTTTTTTTT', 'FFFFFFFFF')) - if res == 'FFFFFFFFF': + tc.assertIn(res, ('TTTTTTTTTT', 'FFFFFFFFFF')) + if res == 'FFFFFFFFFF': run3 = spot.generic_accepting_run(aut) tc.assertTrue(run3.replay(spot.get_cout())) From abe722297306c75ee1b2521e5dbc7a32a83d5df0 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 12 May 2023 11:32:46 +0200 Subject: [PATCH 283/606] bitvect: work around incorrect warning from gcc * spot/misc/bitvect.hh: Don't free the old ptr if realloc() returns NULL, as this confuse GCC who warns that we are freeing something that has already been freed. Instead, let the ~bitvect() destructor handle this. --- spot/misc/bitvect.hh | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/spot/misc/bitvect.hh b/spot/misc/bitvect.hh index 3588b406e..74ab2bf3f 100644 --- a/spot/misc/bitvect.hh +++ b/spot/misc/bitvect.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2021, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -111,22 +111,22 @@ namespace spot return; if (storage_ == &local_storage_) { - block_t* new_storage_ = static_cast + block_t* new_storage = static_cast (malloc(new_block_count * sizeof(block_t))); + if (SPOT_UNLIKELY(!new_storage)) + throw std::bad_alloc(); for (size_t i = 0; i < block_count_; ++i) - new_storage_[i] = storage_[i]; - storage_ = new_storage_; + new_storage[i] = storage_[i]; + storage_ = new_storage; } else { - auto old = storage_; - storage_ = static_cast - (realloc(old, new_block_count * sizeof(block_t))); - if (!storage_) - { - free(old); - throw std::bad_alloc(); - } + block_t* new_storage = static_cast + (realloc(storage_, new_block_count * sizeof(block_t))); + if (SPOT_UNLIKELY(!new_storage)) + // storage_, untouched, will be freed by the destructor. + throw std::bad_alloc(); + storage_ = new_storage; } block_count_ = new_block_count; } @@ -134,8 +134,8 @@ namespace spot private: void grow() { - size_t new_block_count_ = (block_count_ + 1) * 7 / 5; - reserve_blocks(new_block_count_); + size_t new_block_count = (block_count_ + 1) * 7 / 5; + reserve_blocks(new_block_count); } public: From 7868115a8bcf98e6b89b9de1d2c06953c2ee21f2 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Thu, 4 May 2023 15:28:48 +0200 Subject: [PATCH 284/606] parity_type_to_parity: Add missing cases * spot/twaalgos/toparity.cc: Correct some cases where the solution was not detected. * tests/python/toparity.py: Update tests. --- spot/twaalgos/toparity.cc | 53 ++++++++++++++++++++++++--------------- tests/python/toparity.py | 24 +++++++++++++++++- 2 files changed, 56 insertions(+), 21 deletions(-) diff --git a/spot/twaalgos/toparity.cc b/spot/twaalgos/toparity.cc index c936ef57b..a82c7d57a 100644 --- a/spot/twaalgos/toparity.cc +++ b/spot/twaalgos/toparity.cc @@ -95,7 +95,8 @@ namespace spot const bool need_equivalent, std::vector &status, std::vector &res_colors, - acc_cond &new_cond, bool &was_able_to_color) + acc_cond &new_cond, bool &was_able_to_color, + unsigned max_col) { auto& ev = aut->edge_vector(); const auto ev_size = ev.size(); @@ -134,7 +135,7 @@ namespace spot kind == cond_kind::INF_PARITY; unsigned max_iter = want_parity ? -1U : 1; - unsigned color = want_parity ? SPOT_MAX_ACCSETS - 1 : 0; + unsigned color = max_col; // Do we want always accepting transitions? // Don't consider CO_BUCHI as it is done by Büchi bool search_inf = kind != cond_kind::FIN_PARITY; @@ -167,14 +168,15 @@ namespace spot auto filter_data = filter_data_t{aut, status}; scc_info si(aut, aut_init, filter, &filter_data, scc_info_options::TRACK_STATES); + if (search_inf) + si.determine_unknown_acceptance(); bool worked = false; unsigned ssc_size = si.scc_count(); for (unsigned scc = 0; scc < ssc_size; ++scc) { // scc_info can detect that we will not be able to find an - // accepting/rejecting cycle. - if (!((search_inf && !si.is_accepting_scc(scc)) || - (!search_inf && !si.is_rejecting_scc(scc)))) + // accepting cycle. + if ((search_inf && si.is_accepting_scc(scc)) || !search_inf) { accepting_transitions_scc(si, scc, cond, {}, not_decidable_transitions, *keep); @@ -224,6 +226,8 @@ namespace spot break; } + new_cond = acc_cond(new_code); + // We check parity if (need_equivalent) { @@ -269,19 +273,19 @@ namespace spot aut->set_acceptance(acc_cond(aut_acc_comp)); } } - new_cond = acc_cond(new_code); + return true; } static twa_graph_ptr cond_type_main(const twa_graph_ptr &aut, const cond_kind kind, - bool &was_able_to_color) + bool &was_able_to_color, unsigned max_color) { std::vector res_colors; std::vector status; acc_cond new_cond; if (cond_type_main_aux(aut, kind, true, status, res_colors, new_cond, - was_able_to_color)) + was_able_to_color, max_color)) { auto res = make_twa_graph(aut, twa::prop_set::all()); auto &res_vector = res->edge_vector(); @@ -311,14 +315,19 @@ namespace spot bool was_able_to_color; // If the automaton is parity-type with a condition that has Inf as // outermost term - auto res = cond_type_main(aut, cond_kind::INF_PARITY, was_able_to_color); + auto res = cond_type_main(aut, cond_kind::INF_PARITY, + was_able_to_color, SPOT_MAX_ACCSETS - 1); // If it was impossible to find an accepting edge, it is perhaps possible // to find a rejecting transition if (res == nullptr && !was_able_to_color) - res = cond_type_main(aut, cond_kind::FIN_PARITY, was_able_to_color); + res = cond_type_main(aut, cond_kind::FIN_PARITY, + was_able_to_color, SPOT_MAX_ACCSETS - 1); if (res) + { + res->prop_state_acc(false); reduce_parity_here(res); + } return res; } @@ -326,14 +335,14 @@ namespace spot buchi_type_to_buchi(const twa_graph_ptr &aut) { bool useless; - return cond_type_main(aut, cond_kind::BUCHI, useless); + return cond_type_main(aut, cond_kind::BUCHI, useless, 0); } twa_graph_ptr co_buchi_type_to_co_buchi(const twa_graph_ptr &aut) { bool useless; - return cond_type_main(aut, cond_kind::CO_BUCHI, useless); + return cond_type_main(aut, cond_kind::CO_BUCHI, useless, 0); } // New version for paritizing @@ -1943,12 +1952,14 @@ namespace spot // Is the maximal color accepting? bool start_inf = true; cond_type_main_aux(sub_aut, cond_kind::INF_PARITY, false, status, - res_colors, new_cond, was_able_to_color); + res_colors, new_cond, was_able_to_color, + SPOT_MAX_ACCSETS - 1); // Otherwise we can try to find a rejecting transition as first step if (!was_able_to_color) { cond_type_main_aux(sub_aut, cond_kind::FIN_PARITY, false, status, - res_colors, new_cond, was_able_to_color); + res_colors, new_cond, was_able_to_color, + SPOT_MAX_ACCSETS - 1); if (!was_able_to_color) return false; start_inf = false; @@ -2127,11 +2138,11 @@ namespace spot bool is_co_bu = false; bool was_able_to_color; if (!cond_type_main_aux(sub_aut, cond_kind::BUCHI, true, status, - res_colors, new_cond, was_able_to_color)) + res_colors, new_cond, was_able_to_color, 0)) { is_co_bu = true; if (!cond_type_main_aux(sub_aut, cond_kind::CO_BUCHI, true, status, - res_colors, new_cond, was_able_to_color)) + res_colors, new_cond, was_able_to_color, 0)) return false; change_to_odd(); } @@ -2172,16 +2183,18 @@ namespace spot acc_cond new_cond; bool was_able_to_color; if (!cond_type_main_aux(sub_aut, cond_kind::INF_PARITY, true, status, - res_colors, new_cond, was_able_to_color)) + res_colors, new_cond, was_able_to_color, + SPOT_MAX_ACCSETS - 3)) { if (!cond_type_main_aux(sub_aut, cond_kind::FIN_PARITY, true, status, - res_colors, new_cond, was_able_to_color)) + res_colors, new_cond, was_able_to_color, + SPOT_MAX_ACCSETS - 3)) return false; } bool is_max, is_odd; new_cond.is_parity(is_max, is_odd); - auto [min, max] = - std::minmax_element(res_colors.begin() + 1, res_colors.end()); + auto min = + std::min_element(res_colors.begin() + 1, res_colors.end()); // cond_type_main_aux returns a parity max condition assert(is_max); auto col_fun = diff --git a/tests/python/toparity.py b/tests/python/toparity.py index ab5fbf314..80c2c19ef 100644 --- a/tests/python/toparity.py +++ b/tests/python/toparity.py @@ -547,4 +547,26 @@ State: 9 3 {4} 2 3 {4} 6 --END-- b = spot.iar_maybe(a) tc.assertEqual(b.num_states(), 87) tc.assertTrue(a.equivalent_to(b)) -test(a, [87, 91, 91, 87, 87, 87, 51, 51, 21]) +test(a, [87, 91, 91, 87, 87, 87, 51, 35, 21]) + +a = spot.automaton("""HOA: v1 +States: 4 +Start: 0 +AP: 2 "p0" "p1" +Acceptance: 2 Fin(1) & Fin(0) +properties: trans-labels explicit-labels state-acc +--BODY-- +State: 0 +[!0&!1] 2 +[!0&!1] 1 +State: 1 +[!0&1] 0 +[0&1] 3 +State: 2 +[0&!1] 1 +State: 3 {0} +[!0&1] 3 +[!0&!1] 1 +--END--""") +b = spot.parity_type_to_parity(a) +tc.assertTrue(spot.are_equivalent(a, b)) From 83696633800d3a7f341790f9fda4e2da8c699d43 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 12 Jun 2023 11:01:30 +0200 Subject: [PATCH 285/606] fix spurious failure of ltlcross4.test Reported by Yuri Victorovich. * tests/core/ltlcross4.test: Drop the 'formula' column before computing aggregates. It causes warnings in some Pandas versions, and errors in others. --- tests/core/ltlcross4.test | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/core/ltlcross4.test b/tests/core/ltlcross4.test index b7c85979a..7d124d689 100755 --- a/tests/core/ltlcross4.test +++ b/tests/core/ltlcross4.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012-2014, 2017, 2020 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) 2012-2014, 2017, 2020, 2023 Laboratoire de Recherche +# et Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -54,8 +54,7 @@ x = pandas.read_csv("output.csv") # We used to call describe() instead of agg(), # but the output of this function was changed # in pandas 0.20. -print(x.filter(('formula', 'tool', - 'states', 'transitions')).\ +print(x.filter(('tool', 'states', 'transitions')).\ groupby('tool').\ agg([np.mean, np.std, np.min, np.max])) EOF From 61b457a37ed8c991bfc75b9231475fe4f1f70d40 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 21 Jul 2023 17:06:01 +0200 Subject: [PATCH 286/606] bin: allow %l to be used to print serial numbers * NEWS: Mention it. * bin/autfilt.cc, bin/common_aoutput.cc, bin/common_aoutput.hh, bin/common_output.cc, bin/common_output.hh, bin/dstar2tgba.cc, bin/genaut.cc, bin/genltl.cc, bin/ltl2tgba.cc, bin/ltldo.cc, bin/ltlfilt.cc, bin/ltlgrind.cc, bin/randaut.cc, bin/randltl.cc: Implement it. * doc/org/oaut.org: Add a short example. * tests/core/serial.test: New file. * tests/Makefile.am: Add it. --- NEWS | 12 +++++++ bin/autfilt.cc | 6 ++-- bin/common_aoutput.cc | 20 ++++++++---- bin/common_aoutput.hh | 17 ++++++---- bin/common_output.cc | 19 ++++++++--- bin/common_output.hh | 4 ++- bin/dstar2tgba.cc | 4 ++- bin/genaut.cc | 3 +- bin/genltl.cc | 8 +++-- bin/ltl2tgba.cc | 5 +-- bin/ltldo.cc | 7 ++--- bin/ltlfilt.cc | 5 ++- bin/ltlgrind.cc | 9 ++++-- bin/randaut.cc | 5 +-- bin/randltl.cc | 9 ++++-- doc/org/oaut.org | 19 +++++++++-- tests/Makefile.am | 1 + tests/core/serial.test | 71 ++++++++++++++++++++++++++++++++++++++++++ 18 files changed, 183 insertions(+), 41 deletions(-) create mode 100755 tests/core/serial.test diff --git a/NEWS b/NEWS index 9a2b7ccbd..c840cea18 100644 --- a/NEWS +++ b/NEWS @@ -1,5 +1,17 @@ New in spot 2.11.5.dev (not yet released) + Command-line tools: + + - In places that accept format strings with '%' sequences, like + options --stats, --name, or --output, the new '%l' can now be used + to produce the 0-based serial number of the produced object. This + differs from the existing '%L' that is usually related to the line + number of the input (when that makes sense). For instance to + split a file that contains many automaton into several files, one + per automata, do + + autfilt input.hoa -o output-%l.hoa + Library: - The following new trivial simplifications have been implemented for SEREs: diff --git a/bin/autfilt.cc b/bin/autfilt.cc index 4487fad8b..eec2246b3 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -1680,8 +1680,6 @@ namespace return 0; } - ++match_count; - if (aliases) { if (opt_aliases) @@ -1690,7 +1688,9 @@ namespace set_aliases(aut, {}); } printer.print(aut, timer, nullptr, haut->filename.c_str(), -1, - haut, prefix, suffix); + match_count, haut, prefix, suffix); + + ++match_count; if (opt_max_count >= 0 && match_count >= opt_max_count) abort_run = true; diff --git a/bin/common_aoutput.cc b/bin/common_aoutput.cc index 60f83289e..b3a26c410 100644 --- a/bin/common_aoutput.cc +++ b/bin/common_aoutput.cc @@ -198,6 +198,8 @@ static const argp_option io_options[] = " minuscules for output):", 4 }, { "%F", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, F_doc, 0 }, { "%L", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, L_doc, 0 }, + { "%l", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "serial number of the output automaton (0-based)", 0 }, { "%H, %h", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "the automaton in HOA format on a single line (use %[opt]H or %[opt]h " "to specify additional options as in --hoa=opt)", 0 }, @@ -269,6 +271,8 @@ static const argp_option o_options[] = "the following interpreted sequences:", 4 }, { "%F", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, F_doc, 0 }, { "%L", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, L_doc, 0 }, + { "%l", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "serial number of the output automaton (0-based)", 0 }, { "%h", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "the automaton in HOA format on a single line (use %[opt]h " "to specify additional options as in --hoa=opt)", 0 }, @@ -442,6 +446,7 @@ hoa_stat_printer::hoa_stat_printer(std::ostream& os, const char* format, if (input != ltl_input) declare('f', &filename_); // Override the formula printer. declare('h', &output_aut_); + declare('l', &index_); declare('m', &aut_name_); declare('u', &aut_univbranch_); declare('w', &aut_word_); @@ -453,11 +458,12 @@ hoa_stat_printer::print(const spot::const_parsed_aut_ptr& haut, const spot::const_twa_graph_ptr& aut, spot::formula f, const char* filename, int loc, + unsigned index, const spot::process_timer& ptimer, const char* csv_prefix, const char* csv_suffix) { timer_ = ptimer; - + index_ = index; filename_ = filename ? filename : ""; csv_prefix_ = csv_prefix ? csv_prefix : ""; csv_suffix_ = csv_suffix ? csv_suffix : ""; @@ -599,6 +605,7 @@ automaton_printer::print(const spot::twa_graph_ptr& aut, // Input location for errors and statistics. const char* filename, int loc, + unsigned index, // input automaton for statistics const spot::const_parsed_aut_ptr& haut, const char* csv_prefix, @@ -622,7 +629,8 @@ automaton_printer::print(const spot::twa_graph_ptr& aut, if (opt_name) { name.str(""); - namer.print(haut, aut, f, filename, loc, ptimer, csv_prefix, csv_suffix); + namer.print(haut, aut, f, filename, loc, index, + ptimer, csv_prefix, csv_suffix); aut->set_named_prop("automaton-name", new std::string(name.str())); } @@ -630,8 +638,8 @@ automaton_printer::print(const spot::twa_graph_ptr& aut, if (opt_output) { outputname.str(""); - outputnamer.print(haut, aut, f, filename, loc, ptimer, - csv_prefix, csv_suffix); + outputnamer.print(haut, aut, f, filename, loc, index, + ptimer, csv_prefix, csv_suffix); std::string fname = outputname.str(); auto [it, b] = outputfiles.try_emplace(fname, nullptr); if (b) @@ -660,8 +668,8 @@ automaton_printer::print(const spot::twa_graph_ptr& aut, break; case Stats: statistics.set_output(*out); - statistics.print(haut, aut, f, filename, loc, ptimer, - csv_prefix, csv_suffix) << '\n'; + statistics.print(haut, aut, f, filename, loc, index, + ptimer, csv_prefix, csv_suffix) << '\n'; break; } flush_cout(); diff --git a/bin/common_aoutput.hh b/bin/common_aoutput.hh index f57beae84..051212b3d 100644 --- a/bin/common_aoutput.hh +++ b/bin/common_aoutput.hh @@ -133,10 +133,10 @@ public: void print(std::ostream& os, const char* pos) const override; }; -/// \brief prints various statistics about a TGBA +/// \brief prints various statistics about a TwA /// /// This object can be configured to display various statistics -/// about a TGBA. Some %-sequence of characters are interpreted in +/// about a TwA. Some %-sequence of characters are interpreted in /// the format string, and replaced by the corresponding statistics. class hoa_stat_printer: protected spot::stat_printer { @@ -153,10 +153,12 @@ public: /// to be output. std::ostream& print(const spot::const_parsed_aut_ptr& haut, - const spot::const_twa_graph_ptr& aut, - spot::formula f, - const char* filename, int loc, const spot::process_timer& ptimer, - const char* csv_prefix, const char* csv_suffix); + const spot::const_twa_graph_ptr& aut, + spot::formula f, + const char* filename, int loc, + unsigned index, + const spot::process_timer& ptimer, + const char* csv_prefix, const char* csv_suffix); private: spot::printable_value filename_; @@ -165,6 +167,7 @@ private: spot::printable_value aut_name_; spot::printable_value aut_word_; spot::printable_value haut_word_; + spot::printable_value index_; spot::printable_acc_cond haut_gen_acc_; spot::printable_size haut_states_; spot::printable_size haut_edges_; @@ -206,6 +209,8 @@ public: // Input location for errors and statistics. const char* filename = nullptr, int loc = -1, + // serial numbner + unsigned index = 0, // Time and input automaton for statistics const spot::const_parsed_aut_ptr& haut = nullptr, const char* csv_prefix = nullptr, diff --git a/bin/common_output.cc b/bin/common_output.cc index 93cb2dfaf..6d414583a 100644 --- a/bin/common_output.cc +++ b/bin/common_output.cc @@ -160,6 +160,7 @@ namespace spot::formula f; const char* filename; const char* line; + unsigned index; const char* prefix; const char* suffix; }; @@ -237,6 +238,7 @@ namespace declare('R', &timer_); declare('r', &timer_); declare('L', &line_); + declare('l', &index_); declare('s', &size_); declare('h', &class_); declare('n', &nesting_); @@ -248,7 +250,8 @@ namespace } std::ostream& - print(const formula_with_location& fl, spot::process_timer* ptimer) + print(const formula_with_location& fl, + spot::process_timer* ptimer) { if (has('R') || has('r')) timer_ = *ptimer; @@ -256,6 +259,7 @@ namespace fl_ = &fl; filename_ = fl.filename ? fl.filename : ""; line_ = fl.line; + index_ = fl.index; prefix_ = fl.prefix ? fl.prefix : ""; suffix_ = fl.suffix ? fl.suffix : ""; auto f = fl_.val()->f; @@ -288,6 +292,7 @@ namespace printable_timer timer_; spot::printable_value filename_; spot::printable_value line_; + spot::printable_value index_; spot::printable_value prefix_; spot::printable_value suffix_; spot::printable_value size_; @@ -356,6 +361,7 @@ static void output_formula(std::ostream& out, spot::formula f, spot::process_timer* ptimer, const char* filename, const char* linenum, + unsigned index, const char* prefix, const char* suffix) { if (!format) @@ -391,7 +397,8 @@ output_formula(std::ostream& out, } else { - formula_with_location fl = { f, filename, linenum, prefix, suffix }; + formula_with_location fl = { f, filename, linenum, + index, prefix, suffix }; format->print(fl, ptimer); } } @@ -399,6 +406,7 @@ output_formula(std::ostream& out, void output_formula_checked(spot::formula f, spot::process_timer* ptimer, const char* filename, const char* linenum, + unsigned index, const char* prefix, const char* suffix) { if (output_format == count_output) @@ -414,7 +422,8 @@ output_formula_checked(spot::formula f, spot::process_timer* ptimer, if (outputnamer) { outputname.str(""); - formula_with_location fl = { f, filename, linenum, prefix, suffix }; + formula_with_location fl = { f, filename, linenum, + index, prefix, suffix }; outputnamer->print(fl, ptimer); std::string fname = outputname.str(); auto [it, b] = outputfiles.try_emplace(fname, nullptr); @@ -422,7 +431,7 @@ output_formula_checked(spot::formula f, spot::process_timer* ptimer, it->second.reset(new output_file(fname.c_str())); out = &it->second->ostream(); } - output_formula(*out, f, ptimer, filename, linenum, prefix, suffix); + output_formula(*out, f, ptimer, filename, linenum, index, prefix, suffix); *out << output_terminator; // Make sure we abort if we can't write to std::cout anymore // (like disk full or broken pipe with SIGPIPE ignored). @@ -432,10 +441,12 @@ output_formula_checked(spot::formula f, spot::process_timer* ptimer, void output_formula_checked(spot::formula f, spot::process_timer* ptimer, const char* filename, int linenum, + unsigned index, const char* prefix, const char* suffix) { output_formula_checked(f, ptimer, filename, std::to_string(linenum).c_str(), + index, prefix, suffix); } diff --git a/bin/common_output.hh b/bin/common_output.hh index 1cff67229..3b08db27f 100644 --- a/bin/common_output.hh +++ b/bin/common_output.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2018 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2018, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -81,11 +81,13 @@ void output_formula_checked(spot::formula f, spot::process_timer* ptimer = nullptr, const char* filename = nullptr, const char* linenum = nullptr, + unsigned output_index = 0U, const char* prefix = nullptr, const char* suffix = nullptr); void output_formula_checked(spot::formula f, spot::process_timer* ptimer, const char* filename, int linenum, + unsigned output_index, const char* prefix = nullptr, const char* suffix = nullptr); diff --git a/bin/dstar2tgba.cc b/bin/dstar2tgba.cc index 4b2ec9662..6d7414587 100644 --- a/bin/dstar2tgba.cc +++ b/bin/dstar2tgba.cc @@ -129,7 +129,9 @@ namespace timer.start(); auto aut = post.run(haut->aut, nullptr); timer.stop(); - printer.print(aut, timer, nullptr, haut->filename.c_str(), -1, haut); + static unsigned index = 0; + printer.print(aut, timer, nullptr, haut->filename.c_str(), -1, + index++, haut); flush_cout(); return 0; } diff --git a/bin/genaut.cc b/bin/genaut.cc index f8d6b93ff..fbcffb48d 100644 --- a/bin/genaut.cc +++ b/bin/genaut.cc @@ -122,7 +122,8 @@ output_pattern(gen::aut_pattern_id pattern, int n) twa_graph_ptr aut = spot::gen::aut_pattern(pattern, n); timer.stop(); automaton_printer printer; - printer.print(aut, timer, nullptr, aut_pattern_name(pattern), n); + static unsigned serial = 0; + printer.print(aut, timer, nullptr, aut_pattern_name(pattern), n, serial++); } static void diff --git a/bin/genltl.cc b/bin/genltl.cc index ef8049171..79b71b699 100644 --- a/bin/genltl.cc +++ b/bin/genltl.cc @@ -192,6 +192,8 @@ static const argp_option options[] = "the formula (in the selected syntax)", 0 }, { "%F", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "the name of the pattern", 0 }, + { "%l", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "serial number of the output formula (0-based)", 0 }, { "%L", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "the argument of the pattern", 0 }, { "%%", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, @@ -287,6 +289,8 @@ parse_opt(int key, char* arg, struct argp_state*) } +static unsigned output_count = 0U; + static void output_pattern(gen::ltl_pattern_id pattern, int n, int n2) { @@ -303,14 +307,14 @@ output_pattern(gen::ltl_pattern_id pattern, int n, int n2) if (opt_positive || !opt_negative) { output_formula_checked(f, nullptr, gen::ltl_pattern_name(pattern), - args.c_str()); + args.c_str(), output_count++); } if (opt_negative) { std::string tmp = "!"; tmp += gen::ltl_pattern_name(pattern); output_formula_checked(formula::Not(f), nullptr, tmp.c_str(), - args.c_str()); + args.c_str(), output_count++); } } diff --git a/bin/ltl2tgba.cc b/bin/ltl2tgba.cc index 73a9a23c6..1229cd422 100644 --- a/bin/ltl2tgba.cc +++ b/bin/ltl2tgba.cc @@ -155,8 +155,9 @@ namespace auto aut = trans.run(&f); timer.stop(); - printer.print(aut, timer, f, filename, linenum, nullptr, - prefix, suffix); + static unsigned index = 0; + printer.print(aut, timer, f, filename, linenum, index++, + nullptr, prefix, suffix); // If we keep simplification caches around, atomic propositions // will still be defined, and one translation may influence the // variable order of the next one. diff --git a/bin/ltldo.cc b/bin/ltldo.cc index 6e7bf5ec7..c695631df 100644 --- a/bin/ltldo.cc +++ b/bin/ltldo.cc @@ -360,8 +360,7 @@ namespace const char* csv_prefix, const char* csv_suffix) { static long int output_count = 0; - ++output_count; - printer.print(aut, ptimer, f, filename, loc, nullptr, + printer.print(aut, ptimer, f, filename, loc, output_count++, nullptr, csv_prefix, csv_suffix); if (opt_max_count >= 0 && output_count >= opt_max_count) abort_run = true; @@ -420,8 +419,8 @@ namespace aut = post.run(aut, f); if (best_type) { - best_printer.print(nullptr, aut, f, filename, linenum, timer, - prefix, suffix); + best_printer.print(nullptr, aut, f, filename, linenum, 0, + timer, prefix, suffix); std::string aut_stats = best_stream.str(); if (!best_aut || (strverscmp(best_stats.c_str(), aut_stats.c_str()) diff --git a/bin/ltlfilt.cc b/bin/ltlfilt.cc index 81e895d42..ed2f2d08d 100644 --- a/bin/ltlfilt.cc +++ b/bin/ltlfilt.cc @@ -254,6 +254,8 @@ static const argp_option options[] = "the formula (in the selected syntax)", 0 }, { "%F", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "the name of the input file", 0 }, + { "%l", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "the serial number of the output formula", 0 }, { "%L", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "the original line number in the input file", 0 }, { "%r", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, @@ -852,7 +854,8 @@ namespace std::to_string(linenum).c_str()) << ")\n"; } one_match = true; - output_formula_checked(f, &timer, filename, linenum, prefix, suffix); + output_formula_checked(f, &timer, filename, linenum, + match_count, prefix, suffix); ++match_count; } return 0; diff --git a/bin/ltlgrind.cc b/bin/ltlgrind.cc index b59569a59..5e56f7d2c 100644 --- a/bin/ltlgrind.cc +++ b/bin/ltlgrind.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2019, 2022 Laboratoire de Recherche et +// Copyright (C) 2014-2019, 2022, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -87,6 +87,8 @@ static const argp_option options[] = { "the formula (in the selected syntax)", 0 }, { "%F", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "the name of the input file", 0 }, + { "%l", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "the serial number of the output formula (0-based)", 0 }, { "%L", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "the original line number in the input file", 0 }, { "%<", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, @@ -112,6 +114,8 @@ static const argp_child children[] = { namespace { + static unsigned output_count = 0; + class mutate_processor final: public job_processor { public: @@ -122,7 +126,8 @@ namespace auto mutations = spot::mutate(f, mut_opts, max_output, mutation_nb, opt_sort); for (auto g: mutations) - output_formula_checked(g, nullptr, filename, linenum, prefix, suffix); + output_formula_checked(g, nullptr, filename, linenum, + output_count++, prefix, suffix); return 0; } }; diff --git a/bin/randaut.cc b/bin/randaut.cc index 1ceb82ee0..4e2065c2a 100644 --- a/bin/randaut.cc +++ b/bin/randaut.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2016, 2018-2020, 2022 Laboratoire de Recherche +// Copyright (C) 2012-2016, 2018-2020, 2022, 2023 Laboratoire de Recherche // et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -406,7 +406,8 @@ main(int argc, char** argv) timer.stop(); printer.print(aut, timer, nullptr, - opt_seed_str, automaton_num, nullptr); + opt_seed_str, automaton_num, + automaton_num, nullptr); ++automaton_num; if (opt_automata > 0 && automaton_num >= opt_automata) diff --git a/bin/randltl.cc b/bin/randltl.cc index 749fcf373..4dacf5264 100644 --- a/bin/randltl.cc +++ b/bin/randltl.cc @@ -115,8 +115,10 @@ static const argp_option options[] = "the following interpreted sequences:", -19 }, { "%f", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "the formula (in the selected syntax)", 0 }, + { "%l", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "the (serial) number of the formula (0-based)", 0 }, { "%L", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "the (serial) number of the formula", 0 }, + "the (serial) number of the formula (1-based)", 0 }, { "%%", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "a single %", 0 }, COMMON_LTL_OUTPUT_SPECS, @@ -305,9 +307,9 @@ main(int argc, char** argv) exit(0); } + int count = 0; while (opt_formulas < 0 || opt_formulas--) { - static int count = 0; spot::formula f = rg.next(); if (!f) { @@ -316,7 +318,8 @@ main(int argc, char** argv) } else { - output_formula_checked(f, nullptr, nullptr, ++count); + output_formula_checked(f, nullptr, nullptr, count + 1, count); + ++count; } }; flush_cout(); diff --git a/doc/org/oaut.org b/doc/org/oaut.org index 65687e493..1d5d8a7cb 100644 --- a/doc/org/oaut.org +++ b/doc/org/oaut.org @@ -725,7 +725,8 @@ randaut --help | sed -n '/ sequences:/,/^$/p' | sed '1d;$d' (iw) inherently weak. Use uppercase letters to negate them. %d 1 if the output is deterministic, 0 otherwise - %e number of reachable edges + %e, %[LETTER]e number of edges (add one LETTER to select (r) + reachable [default], (u) unreachable, (a) all). %F seed number %g, %[LETTERS]g acceptance condition (in HOA syntax); add brackets to print an acceptance name instead and LETTERS to @@ -740,6 +741,7 @@ randaut --help | sed -n '/ sequences:/,/^$/p' | sed '1d;$d' %[opt]h to specify additional options as in --hoa=opt) %L automaton number + %l serial number of the output automaton (0-based) %m name of the automaton %n number of nondeterministic states in output %p 1 if the output is complete, 0 otherwise @@ -749,8 +751,11 @@ randaut --help | sed -n '/ sequences:/,/^$/p' | sed '1d;$d' LETTERS to restrict to(u) user time, (s) system time, (p) parent process, or (c) children processes. - %s number of reachable states - %t number of reachable transitions + %s, %[LETTER]s number of states (add one LETTER to select (r) + reachable [default], (u) unreachable, (a) all). + %t, %[LETTER]t number of transitions (add one LETTER to select + (r) reachable [default], (u) unreachable, (a) + all). %u, %[e]u number of states (or [e]dges) with universal branching %u, %[LETTER]u 1 if the automaton contains some universal @@ -996,6 +1001,14 @@ randaut -D -n 20 -Q2 1 -o '>>out-det%d.hoa' (You need the quotes so that the shell does not interpret =>>=.) +The =%l= sequence is a number that is incremented for each output +automaton. For instance if =input.hoa= contains multiple automata, +you can separate them into separate files with: + +#+BEGIN_SRC sh :exports code +autilt input.hoa -o output-%l.hoa +#+END_SRC + #+BEGIN_SRC sh :results silent :exports results rm -f out-det0.hoa out-det1.hoa #+END_SRC diff --git a/tests/Makefile.am b/tests/Makefile.am index 3bd43d5f4..7e8a42347 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -230,6 +230,7 @@ TESTS_twa = \ core/cube.test \ core/alternating.test \ core/gamehoa.test \ + core/serial.test \ core/ltlcross3.test \ core/ltlcross5.test \ core/taatgba.test \ diff --git a/tests/core/serial.test b/tests/core/serial.test new file mode 100755 index 000000000..285c04807 --- /dev/null +++ b/tests/core/serial.test @@ -0,0 +1,71 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) 2023 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs +set -e + +# all tools should be able to use %l as serial number for their output + +# Make sure serial numbers count the output automata +randaut -n10 --name='aut %l' 2 | + autfilt -N3..5 --name='%M/out %l' | + autfilt --stats=%M >out +cat >exp < aut.hoa +rm aut-?.hoa +autfilt aut.hoa -o aut-%l.hoa + +# check serial output in various tools +genaut --m-nba=2..3 --name='%F=%L/%l' | autfilt --stats=%M >out +genltl --and-f=2..3 --stats=%F=%L/%l >> out +ltl2tgba a b --name=%f/%l | autfilt --stats=%M >> out +ltldo -f a -f b ltl2tgba --name=%f/%l | autfilt --stats=%M >> out +genltl --or-g=2..5 --stats=%L,%l,%f | + ltlfilt -F -/3 -N 2..3 --stats='%<,%l' >>out +randltl -n10 3 --stats=%l,%f | + ltlfilt -F -/2 -N 2..3 --stats='%<,%l' >> out +cat >exp< Date: Mon, 24 Jul 2023 12:16:30 +0200 Subject: [PATCH 287/606] * tests/core/ltlcross4.test: Work around recent Pandas change. --- tests/core/ltlcross4.test | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/core/ltlcross4.test b/tests/core/ltlcross4.test index 7d124d689..f171876a9 100755 --- a/tests/core/ltlcross4.test +++ b/tests/core/ltlcross4.test @@ -65,12 +65,14 @@ $PYTHON test.py >out.1 # remove trailing whitespace from pandas' output, and limit to 6 # lines, because Pandas 0.13 adds the size of the dataframe # afterwards. Alse the spacing between columns differs from version -# to version. -sed 's/[ \t]*$//g;6q' py.out +# to version. The name of the output columns changed from "amin amax" +# to "min max" in some Pandas version (maybe around 2.0). +sed 's/[ \t]*$//g;s/amin/min/g;s/amax/max/g;6q' py.out cat >expected < Date: Mon, 24 Jul 2023 16:56:24 +0200 Subject: [PATCH 288/606] bin: handle thousands of output files Fixes #534. * bin/common_file.hh, bin/common_file.cc: Make it possible to reopen a closed file. * bin/common_output.cc, bin/common_aoutput.cc: Add a heuristic to decide when to close files. * tests/core/serial.test: Add a test case. * NEWS: Mention the issue. --- NEWS | 7 +++++++ bin/common_aoutput.cc | 20 ++++++++++++++++++++ bin/common_file.cc | 19 ++++++++++++++++++- bin/common_file.hh | 2 ++ bin/common_output.cc | 20 ++++++++++++++++++++ tests/core/serial.test | 9 +++++++++ 6 files changed, 76 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index c840cea18..feb630320 100644 --- a/NEWS +++ b/NEWS @@ -29,6 +29,13 @@ New in spot 2.11.5.dev (not yet released) - spot::bdd_to_cnf_formula() is a new variant of spot::bdd_to_formula() that converts a BDD into a CNF instead of a DNF. + Bug fixes: + + - Running command lines such as "autfilt input.hoa -o output-%L.hoa" + where thousands of different filenames can be created failed with + "Too many open files". (Issue #534) + + New in spot 2.11.5 (2023-04-20) Bug fixes: diff --git a/bin/common_aoutput.cc b/bin/common_aoutput.cc index b3a26c410..1311c7c28 100644 --- a/bin/common_aoutput.cc +++ b/bin/common_aoutput.cc @@ -644,7 +644,27 @@ automaton_printer::print(const spot::twa_graph_ptr& aut, auto [it, b] = outputfiles.try_emplace(fname, nullptr); if (b) it->second.reset(new output_file(fname.c_str())); + else + // reopen if the file has been closed; see below + it->second->reopen_for_append(fname); out = &it->second->ostream(); + + // If we have opened fewer than 10 files, we keep them all open + // to avoid wasting time on open/close calls. + // + // However we cannot keep all files open, especially in + // scenarios were we use thousands of files only once. To keep + // things simple, we only close the previous file if it is not + // the current output. This way we still save the close/open + // cost when consecutive automata are sent to the same file. + static output_file* previous = nullptr; + static const std::string* previous_name = nullptr; + if (previous + && outputfiles.size() > 10 + && &previous->ostream() != out) + previous->close(*previous_name); + previous = it->second.get(); + previous_name = &it->first; } // Output it. diff --git a/bin/common_file.cc b/bin/common_file.cc index 4e56c6d54..ebad8b878 100644 --- a/bin/common_file.cc +++ b/bin/common_file.cc @@ -44,13 +44,30 @@ output_file::output_file(const char* name, bool force_append) os_ = of_.get(); } +void +output_file::reopen_for_append(const std::string& name) +{ + if (of_ && of_->is_open()) // nothing to do + return; + const char* cname = name.c_str(); + if (cname[0] == '>' && cname[1] == '>') + cname += 2; + if (name[0] == '-' && name[1] == 0) + { + os_ = &std::cout; + return; + } + of_->open(cname, std::ios_base::app); + if (!*of_) + error(2, errno, "cannot reopen '%s'", cname); +} void output_file::close(const std::string& name) { // We close of_, not os_, so that we never close std::cout. if (os_) os_->flush(); - if (of_) + if (of_ && of_->is_open()) of_->close(); if (os_ && !*os_) error(2, 0, "error writing to %s", diff --git a/bin/common_file.hh b/bin/common_file.hh index b6aa0bec3..51000d18c 100644 --- a/bin/common_file.hh +++ b/bin/common_file.hh @@ -37,6 +37,8 @@ public: void close(const std::string& name); + void reopen_for_append(const std::string& name); + bool append() const { return append_; diff --git a/bin/common_output.cc b/bin/common_output.cc index 6d414583a..2f851f109 100644 --- a/bin/common_output.cc +++ b/bin/common_output.cc @@ -429,7 +429,27 @@ output_formula_checked(spot::formula f, spot::process_timer* ptimer, auto [it, b] = outputfiles.try_emplace(fname, nullptr); if (b) it->second.reset(new output_file(fname.c_str())); + else + // reopen if the file has been closed; see below + it->second->reopen_for_append(fname); out = &it->second->ostream(); + + // If we have opened fewer than 10 files, we keep them all open + // to avoid wasting time on open/close calls. + // + // However we cannot keep all files open, especially in + // scenarios were we use thousands of files only once. To keep + // things simple, we only close the previous file if it is not + // the current output. This way we still save the close/open + // cost when consecutive formulas are sent to the same file. + static output_file* previous = nullptr; + static const std::string* previous_name = nullptr; + if (previous + && outputfiles.size() > 10 + && &previous->ostream() != out) + previous->close(*previous_name); + previous = it->second.get(); + previous_name = &it->first; } output_formula(*out, f, ptimer, filename, linenum, index, prefix, suffix); *out << output_terminator; diff --git a/tests/core/serial.test b/tests/core/serial.test index 285c04807..6f4c48c6a 100755 --- a/tests/core/serial.test +++ b/tests/core/serial.test @@ -69,3 +69,12 @@ b/1 2,1 EOF diff -u out exp + + +# Split on more than 1024 files. In Spot < 2.12 this was likely +# to run out of file descriptors, because they weren't closed. +randaut -Q3 2 -n 2000 -o randaut-%l.hoa +test 2000 = `ls -l randaut-*.hoa | wc -l` +# likewise for LTL formulas +randltl 2 -n 2000 -o randltl-%l.ltl +test 2000 = `ls -l randltl-*.ltl | wc -l` From 3b59240133b6c2d858ae341f9018da205b6ee017 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 25 Jul 2023 16:44:21 +0200 Subject: [PATCH 289/606] gen: generalize fin_unit to mafins() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Based on work with Jan Strejček. * spot/twa/acc.cc, spot/twa/acc.hh (acc_cond::mafins, acc_cond::acc_code::mafins): New methods. (fin_unit_one_split, fin_unit_one_split_improved): Use mafins() instead on fin_unit(). * spot/twaalgos/genem.cc: Use mafins() instead on fin_unit(). --- spot/twa/acc.cc | 111 +++++++++++++++++++++++++++-------------- spot/twa/acc.hh | 35 +++++++++++++ spot/twaalgos/genem.cc | 10 ++-- 3 files changed, 114 insertions(+), 42 deletions(-) diff --git a/spot/twa/acc.cc b/spot/twa/acc.cc index d73af33b0..8fec855af 100644 --- a/spot/twa/acc.cc +++ b/spot/twa/acc.cc @@ -2646,34 +2646,65 @@ namespace spot namespace { - int has_top_fin(const acc_cond::acc_word* pos, bool top = true) + static acc_cond::mark_t mafins_rec(const acc_cond::acc_word* pos) + { + acc_cond::mark_t res{}; + auto sub = pos - pos->sub.size; + switch (pos->sub.op) + { + case acc_cond::acc_op::And: + --pos; + do + { + res |= mafins_rec(pos); + pos -= pos->sub.size + 1; + } + while (sub < pos); + return res; + case acc_cond::acc_op::Or: + --pos; + res = mafins_rec(pos); + pos -= pos->sub.size + 1; + while (sub < pos) + { + if (!res) + return res; + res &= mafins_rec(pos); + pos -= pos->sub.size + 1; + } + return res; + case acc_cond::acc_op::Inf: + case acc_cond::acc_op::InfNeg: + case acc_cond::acc_op::FinNeg: + return res; + case acc_cond::acc_op::Fin: + auto m = pos[-1].mark; + if (m.is_singleton()) + return m; + return res; + } + SPOT_UNREACHABLE(); + return res; + } + + + int has_top_fin(const acc_cond::acc_word* pos) { if (pos->sub.op == acc_cond::acc_op::Fin) { acc_cond::mark_t m = pos[-1].mark; - if (top || m.is_singleton()) - return m.min_set() - 1; + return m.min_set() - 1; } - else if (pos->sub.op == acc_cond::acc_op::And) + else if (pos->sub.op == acc_cond::acc_op::And + || pos->sub.op == acc_cond::acc_op::Or) { auto sub = pos - pos->sub.size; do { --pos; - if (int f = has_top_fin(pos, false); f >= 0) - return f; - pos -= pos->sub.size; - } - while (sub < pos); - } - else if (top && pos->sub.op == acc_cond::acc_op::Or) - { - auto sub = pos - pos->sub.size; - do - { - --pos; - if (int f = has_top_fin(pos); f >= 0) - return f; + acc_cond::mark_t m = mafins_rec(pos); + if (m) + return m.min_set() - 1; pos -= pos->sub.size; } while (sub < pos); @@ -2740,12 +2771,6 @@ namespace spot return false; } - // Check whether pos looks like Fin(f) or Fin(f)&rest - bool is_conj_fin(const acc_cond::acc_word* pos, acc_cond::mark_t f) - { - return has_fin(pos, f); - } - acc_cond::acc_code extract_fin(const acc_cond::acc_word* pos, acc_cond::mark_t f) { @@ -2784,16 +2809,21 @@ namespace spot template std::pair - split_top_fin(const acc_cond::acc_word* pos, acc_cond::mark_t f) + split_top_fin(const acc_cond::acc_word* pos, unsigned f) { auto start = pos - pos->sub.size; switch (pos->sub.op) { - case acc_cond::acc_op::And: case acc_cond::acc_op::Fin: - if (is_conj_fin(pos, f)) + { + auto m = pos[-1].mark; + return {acc_cond::acc_code::fin({f}), + acc_cond::acc_code::fin(m - acc_cond::mark_t{f})}; + } + case acc_cond::acc_op::And: + if (mafins_rec(pos).has(f)) return {pos, acc_cond::acc_code::f()}; - SPOT_FALLTHROUGH; + return {acc_cond::acc_code::f(), pos}; case acc_cond::acc_op::Inf: return {acc_cond::acc_code::f(), pos}; case acc_cond::acc_op::Or: @@ -2803,20 +2833,19 @@ namespace spot auto right = acc_cond::acc_code::f(); do { - if (is_conj_fin(pos, f)) + acc_cond::mark_t mf = mafins_rec(pos); + if (mf.has(f)) { - auto tmp = strip_rec(pos, f, true, false); + auto tmp = strip_rec(pos, {f}, true, false); tmp |= std::move(left); std::swap(tmp, left); } - else if (deeper_check - && has_top_fin(pos) == -1 - && has_fin(pos, f)) + else if (deeper_check && !mf && has_fin(pos, {f})) { - auto tmp = strip_rec(pos, f, true, false); + auto tmp = strip_rec(pos, {f}, true, false); tmp |= std::move(left); std::swap(tmp, left); - tmp = force_inf_rec(pos, f); + tmp = force_inf_rec(pos, {f}); tmp |= std::move(right); std::swap(tmp, right); } @@ -2863,7 +2892,7 @@ namespace spot int selected_fin = has_top_fin(pos); if (selected_fin >= 0) { - auto [left, right] = split_top_fin(pos, {(unsigned) selected_fin}); + auto [left, right] = split_top_fin(pos, (unsigned) selected_fin); return {selected_fin, std::move(left), std::move(right)}; } selected_fin = fin_one(); @@ -2884,7 +2913,7 @@ namespace spot if (selected_fin >= 0) { auto [left, right] = - split_top_fin(pos, {(unsigned) selected_fin}); + split_top_fin(pos, (unsigned) selected_fin); return {selected_fin, std::move(left), std::move(right)}; } selected_fin = fin_one(); @@ -3075,6 +3104,14 @@ namespace spot return res; } + acc_cond::mark_t + acc_cond::acc_code::mafins() const + { + if (empty() || is_f()) + return {}; + return mafins_rec(&back()); + } + acc_cond::mark_t acc_cond::acc_code::inf_unit() const { mark_t res = {}; diff --git a/spot/twa/acc.hh b/spot/twa/acc.hh index 1b46e4024..6d673bb8c 100644 --- a/spot/twa/acc.hh +++ b/spot/twa/acc.hh @@ -1254,8 +1254,24 @@ namespace spot /// If multiple unit-Fin appear as unit-clauses, the set of /// those will be returned. For instance applied to /// `Fin(0)&Fin(1)&(Inf(2)|Fin(3))`, this will return `{0,1}`. + /// + /// \see acc_cond::acc_code::mafins mark_t fin_unit() const; + /// \brief Find a `Fin(i)` that is mandatory. + /// + /// This return a mark_t `{i}` such that `Fin(i)` appears in + /// all branches of the AST of the acceptance conditions. This + /// is therefore a bit stronger than fin_unit(). + /// For instance on `Inf(1)&Fin(2) | Fin(2)&Fin(3)` this will + /// return `{2}`. + /// + /// If multiple mandatory Fins exist, the set of those will be + /// returned. + /// + /// \see acc_cond::acc_code::fin_unit + mark_t mafins() const; + /// \brief Find a `Inf(i)` that is a unit clause. /// /// This return a mark_t `{i}` such that `Inf(i)` appears as a @@ -2204,11 +2220,30 @@ namespace spot /// If multiple unit-Fin appear as unit-clauses, the set of /// those will be returned. For instance applied to /// `Fin(0)&Fin(1)&(Inf(2)|Fin(3))``, this will return `{0,1}`. + /// + /// \see acc_cond::mafins mark_t fin_unit() const { return code_.fin_unit(); } + /// \brief Find a `Fin(i)` that is mandatory. + /// + /// This return a mark_t `{i}` such that `Fin(i)` appears in + /// all branches of the AST of the acceptance conditions. This + /// is therefore a bit stronger than fin_unit(). + /// For instance on `Inf(1)&Fin(2) | Fin(2)&Fin(3)` this will + /// return `{2}`. + /// + /// If multiple mandatory Fins exist, the set of those will be + /// returned. + /// + /// \see acc_cond::fin_unit + mark_t mafins() const + { + return code_.mafins(); + } + /// \brief Find a `Inf(i)` that is a unit clause. /// /// This return a mark_t `{i}` such that `Inf(i)` appears as a diff --git a/spot/twaalgos/genem.cc b/spot/twaalgos/genem.cc index 0b0d1fd5f..77597db9f 100644 --- a/spot/twaalgos/genem.cc +++ b/spot/twaalgos/genem.cc @@ -92,7 +92,7 @@ namespace spot if (genem_version == spot211 || genem_version == spot212 || genem_version == spot210) - tocut |= acc.fin_unit(); + tocut |= acc.mafins(); scc_and_mark_filter filt(si, scc, tocut); filt.override_acceptance(acc); scc_info upper_si(filt, EarlyStop @@ -167,7 +167,7 @@ namespace spot { acc_cond::acc_code rest = acc_cond::acc_code::f(); for (const acc_cond& disjunct: acc.top_disjuncts()) - if (acc_cond::mark_t fu = disjunct.fin_unit()) + if (acc_cond::mark_t fu = disjunct.mafins()) { if (!scc_split_check (si, scc, disjunct.remove(fu, true), extra, fu)) @@ -244,10 +244,10 @@ namespace spot } return true; } - // Filter with fin_unit() right away if possible. - // scc_and_mark_filter will have no effect if fin_unit() is + // Filter with mafins() right away if possible. + // scc_and_mark_filter will have no effect if mafins() is // empty. - scc_and_mark_filter filt(aut, aut_acc.fin_unit()); + scc_and_mark_filter filt(aut, aut_acc.mafins()); scc_info si(filt, scc_info_options::STOP_ON_ACC); const int accepting_scc = si.one_accepting_scc(); From 06b1ecb50ba85cdf48f70f0bf407234beb5a2e3a Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 26 Jul 2023 16:17:31 +0200 Subject: [PATCH 290/606] work around spurious GCC 13 warnings * spot/graph/graph.hh (new_univ_dests): Add an overload taking a temporary vector. * spot/twa/twagraph.cc (defrag_states): Use it. * tests/core/parity.cc: Remove some temporary variables. --- spot/graph/graph.hh | 11 ++++++++++- spot/twa/twagraph.cc | 5 ++--- tests/core/parity.cc | 16 +++++++--------- 3 files changed, 19 insertions(+), 13 deletions(-) diff --git a/spot/graph/graph.hh b/spot/graph/graph.hh index 04c21fec9..d2a97d1c5 100644 --- a/spot/graph/graph.hh +++ b/spot/graph/graph.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2018, 2020-2022 Laboratoire de Recherche et +// Copyright (C) 2014-2018, 2020-2023 Laboratoire de Recherche et // Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -576,6 +576,15 @@ namespace spot return p.first->second; } + unsigned new_univ_dests(std::vector&& tmp) + { + std::sort(tmp.begin(), tmp.end()); + tmp.erase(std::unique(tmp.begin(), tmp.end()), tmp.end()); + auto p = uniq_.emplace(tmp, 0); + if (p.second) + p.first->second = g_.new_univ_dests(tmp.begin(), tmp.end()); + return p.first->second; + } }; } // namespace internal diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 2a72702f3..055d6ca11 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2014-2023 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -1243,8 +1243,7 @@ namespace spot // the state so that graph::degrag_states() will // eventually update it to the correct value. nd = newst.size(); - newst.emplace_back(uniq.new_univ_dests(tmp.begin(), - tmp.end())); + newst.emplace_back(uniq.new_univ_dests(std::move(tmp))); } } in_dst = nd; diff --git a/tests/core/parity.cc b/tests/core/parity.cc index 4cb8256ef..7ff391745 100644 --- a/tests/core/parity.cc +++ b/tests/core/parity.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2018-2019 Laboratoire de Recherche et +// Copyright (C) 2016, 2018-2019, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -320,16 +320,14 @@ int main() for (auto acc_tuple: acceptance_sets) for (auto& aut_tuple: automata_tuples) { - auto& aut = aut_tuple.first; - auto aut_num_sets = aut_tuple.second; - - auto acc = std::get<0>(acc_tuple); - auto is_max = std::get<1>(acc_tuple); - auto is_odd = std::get<2>(acc_tuple); - auto acc_num_sets = std::get<3>(acc_tuple); + spot::twa_graph_ptr& aut = aut_tuple.first; + unsigned aut_num_sets = aut_tuple.second; + unsigned acc_num_sets = std::get<3>(acc_tuple); if (aut_num_sets <= acc_num_sets) { - aut->set_acceptance(acc_num_sets, acc); + bool is_max = std::get<1>(acc_tuple); + bool is_odd = std::get<2>(acc_tuple); + aut->set_acceptance(acc_num_sets, std::get<0>(acc_tuple)); // Check change_parity for (auto kind: parity_kinds) for (auto style: parity_styles) From 7f1a33cc61b0998f6d89544eb160930fde807c11 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 26 Jul 2023 16:19:35 +0200 Subject: [PATCH 291/606] * HACKING: Mention the svgo version we use. --- HACKING | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/HACKING b/HACKING index f2cf27e8c..8029fe6db 100644 --- a/HACKING +++ b/HACKING @@ -55,7 +55,7 @@ only for certain operations (like releases): pandoc used during Debian packaging for the conversion of IPython notebooks to html svgo for reducing SVG images before generating the tarball - (install with: npm install -g svgo) + (install with: npm install -g svgo@1.3.2) ltl2ba used in the generated documentation and the test suite ltl2dstar likewise ltl3dra likewise From 0923f8efe2606e8389c4bf55de0c081bbce9b6f4 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 26 Jul 2023 21:33:36 +0200 Subject: [PATCH 292/606] bin: fix handling for --output & --format with LTL outputs * bin/common_output.cc: Set the output stream for LTL formats. * tests/core/serial.test: Add a test case about this issue that also improve the covering of the previous patch about saving file descriptors. --- NEWS | 3 +++ bin/common_output.cc | 3 ++- tests/core/serial.test | 13 +++++++++++++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index feb630320..220947d3a 100644 --- a/NEWS +++ b/NEWS @@ -35,6 +35,9 @@ New in spot 2.11.5.dev (not yet released) where thousands of different filenames can be created failed with "Too many open files". (Issue #534) + - Using --format=... on a tool that output formulas would force + the output on standard output, even when --output was given. + New in spot 2.11.5 (2023-04-20) diff --git a/bin/common_output.cc b/bin/common_output.cc index 2f851f109..16beca34d 100644 --- a/bin/common_output.cc +++ b/bin/common_output.cc @@ -225,7 +225,7 @@ namespace } }; - class formula_printer final: protected spot::formater + class formula_printer final: public spot::formater { public: formula_printer(std::ostream& os, const char* format) @@ -399,6 +399,7 @@ output_formula(std::ostream& out, { formula_with_location fl = { f, filename, linenum, index, prefix, suffix }; + format->set_output(out); format->print(fl, ptimer); } } diff --git a/tests/core/serial.test b/tests/core/serial.test index 6f4c48c6a..7914316c8 100755 --- a/tests/core/serial.test +++ b/tests/core/serial.test @@ -78,3 +78,16 @@ test 2000 = `ls -l randaut-*.hoa | wc -l` # likewise for LTL formulas randltl 2 -n 2000 -o randltl-%l.ltl test 2000 = `ls -l randltl-*.ltl | wc -l` + + +# Test the code path that works that has to reopen files +randltl -n100 --tree-size 1 26 --allow-dups -o '%f'.ltl --format=pass1 +randltl -n100 --tree-size 1 26 --allow-dups -o '>>%f'.ltl --format=pass2 +(uniq -c p1.ltl; uniq -c p20.ltl) | sed 's/^ *\([0-9][0-9]*\) */\1 /g' >out +cat >expected < Date: Wed, 26 Jul 2023 21:50:16 +0200 Subject: [PATCH 293/606] bin: fix handling of -o '>>-' in the close/reopen path * tests/core/serial.test: Add test case. * bin/common_file.cc: Fix it. --- bin/common_file.cc | 9 +++------ tests/core/serial.test | 22 ++++++++++++++++++++++ 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/bin/common_file.cc b/bin/common_file.cc index ebad8b878..68a69a2af 100644 --- a/bin/common_file.cc +++ b/bin/common_file.cc @@ -47,16 +47,13 @@ output_file::output_file(const char* name, bool force_append) void output_file::reopen_for_append(const std::string& name) { - if (of_ && of_->is_open()) // nothing to do + if (os_ == &std::cout || of_->is_open()) // nothing to do return; const char* cname = name.c_str(); if (cname[0] == '>' && cname[1] == '>') cname += 2; - if (name[0] == '-' && name[1] == 0) - { - os_ = &std::cout; - return; - } + // the name cannot be "-" at this point, otherwise os_ would be + // equal to std::cout. of_->open(cname, std::ios_base::app); if (!*of_) error(2, errno, "cannot reopen '%s'", cname); diff --git a/tests/core/serial.test b/tests/core/serial.test index 7914316c8..389335eff 100755 --- a/tests/core/serial.test +++ b/tests/core/serial.test @@ -91,3 +91,25 @@ cat >expected < naut-.hoa +for i in 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15; do + ltl2tgba 1 --name "file$i" > naut$i.hoa +done +autfilt naut*.hoa naut*.hoa --output='>>%M' --format=%M > stdout +cat >expected <expected7 <expected15 < Date: Wed, 26 Jul 2023 23:00:24 +0200 Subject: [PATCH 294/606] acc: remove some dead functions * spot/twa/acc.hh, spot/twa/acc.cc (has_parity_prefix, is_parity_max_equiv): Remove. * spot/twa/acc.hh, spot/twa/twagraph.cc, spot/twa/twagraph.hh (apply_permutation): Remove. --- spot/twa/acc.cc | 180 ------------------------------------------- spot/twa/acc.hh | 81 ------------------- spot/twa/twagraph.cc | 9 --- spot/twa/twagraph.hh | 4 +- 4 files changed, 1 insertion(+), 273 deletions(-) diff --git a/spot/twa/acc.cc b/spot/twa/acc.cc index 8fec855af..4c4013ce7 100644 --- a/spot/twa/acc.cc +++ b/spot/twa/acc.cc @@ -1067,81 +1067,6 @@ namespace spot return res; } - namespace - { - bool - has_parity_prefix_aux(acc_cond original, - acc_cond &new_cond, - std::vector &colors, - std::vector elements, - acc_cond::acc_op op) - { - if (elements.size() > 2) - { - new_cond = original; - return false; - } - if (elements.size() == 2) - { - unsigned pos = (elements[1].back().sub.op == op - && elements[1][0].mark.is_singleton()); - if (!(elements[0].back().sub.op == op || pos)) - { - new_cond = original; - return false; - } - if ((elements[1 - pos].used_sets() & elements[pos][0].mark)) - { - new_cond = original; - return false; - } - if (!elements[pos][0].mark.is_singleton()) - { - return false; - } - colors.push_back(elements[pos][0].mark.min_set() - 1); - elements[1 - pos].has_parity_prefix(new_cond, colors); - return true; - } - return false; - } - } - - bool - acc_cond::acc_code::has_parity_prefix(acc_cond &new_cond, - std::vector &colors) const - { - auto disj = top_disjuncts(); - if (!(has_parity_prefix_aux((*this), new_cond, colors, - disj, acc_cond::acc_op::Inf) || - has_parity_prefix_aux((*this), new_cond, colors, - top_conjuncts(), acc_cond::acc_op::Fin))) - new_cond = acc_cond((*this)); - return disj.size() == 2; - } - - bool - acc_cond::has_parity_prefix(acc_cond& new_cond, - std::vector& colors) const - { - return code_.has_parity_prefix(new_cond, colors); - } - - bool - acc_cond::is_parity_max_equiv(std::vector&permut, bool even) const - { - if (code_.used_once_sets() != code_.used_sets()) - return false; - bool result = code_.is_parity_max_equiv(permut, 0, even); - int max_value = *std::max_element(std::begin(permut), std::end(permut)); - for (unsigned i = 0; i < permut.size(); ++i) - if (permut[i] != -1) - permut[i] = max_value - permut[i]; - else - permut[i] = i; - return result; - } - bool acc_cond::is_parity(bool& max, bool& odd, bool equiv) const { unsigned sets = num_; @@ -1408,111 +1333,6 @@ namespace spot return patterns; } - bool - acc_cond::acc_code::is_parity_max_equiv(std::vector& permut, - unsigned new_color, - bool even) const - { - auto conj = top_conjuncts(); - auto disj = top_disjuncts(); - if (conj.size() == 1) - { - if (disj.size() == 1) - { - acc_cond::acc_code elem = conj[0]; - if ((even && elem.back().sub.op == acc_cond::acc_op::Inf) - || (!even && elem.back().sub.op == acc_cond::acc_op::Fin)) - { - for (auto color : disj[0][0].mark.sets()) - { - if (permut[color] != -1 - && ((unsigned) permut[color]) != new_color) - return false; - permut[color] = new_color; - } - return true; - } - return false; - } - else - { - std::sort(disj.begin(), disj.end(), - [](acc_code c1, acc_code c2) - { - return (c1 != c2) && - c1.back().sub.op == acc_cond::acc_op::Inf; - }); - unsigned i = 0; - for (; i < disj.size() - 1; ++i) - { - if (disj[i].back().sub.op != acc_cond::acc_op::Inf - || !disj[i][0].mark.is_singleton()) - return false; - for (auto color : disj[i][0].mark.sets()) - { - if (permut[color] != -1 - && ((unsigned) permut[color]) != new_color) - return false; - permut[color] = new_color; - } - } - if (disj[i].back().sub.op == acc_cond::acc_op::Inf) - { - if (!even || !disj[i][0].mark.is_singleton()) - return false; - for (auto color : disj[i][0].mark.sets()) - { - if (permut[color] != -1 - && ((unsigned) permut[color]) != new_color) - return false; - permut[color] = new_color; - } - return true; - } - return disj[i].is_parity_max_equiv(permut, new_color + 1, even); - } - } - else - { - std::sort(conj.begin(), conj.end(), - [](acc_code c1, acc_code c2) - { - return (c1 != c2) - && c1.back().sub.op == acc_cond::acc_op::Fin; - }); - unsigned i = 0; - for (; i < conj.size() - 1; i++) - { - if (conj[i].back().sub.op != acc_cond::acc_op::Fin - || !conj[i][0].mark.is_singleton()) - return false; - for (auto color : conj[i][0].mark.sets()) - { - if (permut[color] != -1 && permut[color != new_color]) - return false; - permut[color] = new_color; - } - } - if (conj[i].back().sub.op == acc_cond::acc_op::Fin) - { - if (even) - return 0; - if (!conj[i][0].mark.is_singleton()) - return false; - for (auto color : conj[i][0].mark.sets()) - { - if (permut[color] != -1 && permut[color != new_color]) - return false; - permut[color] = new_color; - } - return true; - } - - return conj[i].is_parity_max_equiv(permut, new_color + 1, even); - } - } - - namespace { template diff --git a/spot/twa/acc.hh b/spot/twa/acc.hh index 6d673bb8c..766dd5224 100644 --- a/spot/twa/acc.hh +++ b/spot/twa/acc.hh @@ -60,11 +60,6 @@ namespace spot /// could be removed.) class SPOT_API acc_cond { - - public: - bool - has_parity_prefix(acc_cond& new_acc, std::vector& colors) const; - #ifndef SWIG private: [[noreturn]] static void report_too_many_sets(); @@ -102,10 +97,6 @@ namespace spot /// Initialize an empty mark_t. mark_t() = default; - mark_t - apply_permutation(std::vector permut); - - #ifndef SWIG /// Create a mark_t from a range of set numbers. template @@ -489,15 +480,6 @@ namespace spot acc_code unit_propagation(); - bool - has_parity_prefix(acc_cond& new_cond, - std::vector& colors) const; - - bool - is_parity_max_equiv(std::vector& permut, - unsigned new_color, - bool even) const; - bool operator==(const acc_code& other) const { unsigned pos = size(); @@ -1814,8 +1796,6 @@ namespace spot bool is_parity(bool& max, bool& odd, bool equiv = false) const; - bool is_parity_max_equiv(std::vector& permut, bool even) const; - /// \brief check is the acceptance condition matches one of the /// four type of parity acceptance defined in the HOA format. bool is_parity() const @@ -1997,57 +1977,6 @@ namespace spot return all_; } - acc_cond - apply_permutation(std::vectorpermut) - { - return acc_cond(apply_permutation_aux(permut)); - } - - acc_code - apply_permutation_aux(std::vectorpermut) - { - auto conj = top_conjuncts(); - auto disj = top_disjuncts(); - - if (conj.size() > 1) - { - auto transformed = std::vector(); - for (auto elem : conj) - transformed.push_back(elem.apply_permutation_aux(permut)); - std::sort(transformed.begin(), transformed.end()); - auto uniq = std::unique(transformed.begin(), transformed.end()); - auto result = std::accumulate(transformed.begin(), uniq, acc_code::t(), - [](acc_code c1, acc_code c2) - { - return c1 & c2; - }); - return result; - } - else if (disj.size() > 1) - { - auto transformed = std::vector(); - for (auto elem : disj) - transformed.push_back(elem.apply_permutation_aux(permut)); - std::sort(transformed.begin(), transformed.end()); - auto uniq = std::unique(transformed.begin(), transformed.end()); - auto result = std::accumulate(transformed.begin(), uniq, acc_code::f(), - [](acc_code c1, acc_code c2) - { - return c1 | c2; - }); - return result; - } - else - { - if (code_.back().sub.op == acc_cond::acc_op::Fin) - return fin(code_[0].mark.apply_permutation(permut)); - if (code_.back().sub.op == acc_cond::acc_op::Inf) - return inf(code_[0].mark.apply_permutation(permut)); - } - SPOT_ASSERT(false); - return {}; - } - /// \brief Check whether visiting *exactly* all sets \a inf /// infinitely often satisfies the acceptance condition. bool accepting(mark_t inf) const @@ -2522,16 +2451,6 @@ namespace spot { return {*this}; } - - inline acc_cond::mark_t - acc_cond::mark_t::apply_permutation(std::vector permut) - { - mark_t result { }; - for (auto color : sets()) - if (color < permut.size()) - result.set(permut[color]); - return result; - } } namespace std diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 055d6ca11..3f74d4d99 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -108,15 +108,6 @@ namespace namespace spot { - void - twa_graph::apply_permutation(std::vector permut) - { - for (auto& e : edges()) - { - e.acc.apply_permutation(permut); - } - } - std::string twa_graph::format_state(unsigned n) const { if (is_univ_dest(n)) diff --git a/spot/twa/twagraph.hh b/spot/twa/twagraph.hh index 742a4d69a..1540692c6 100644 --- a/spot/twa/twagraph.hh +++ b/spot/twa/twagraph.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2014-2023 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -220,8 +220,6 @@ namespace spot public: - void apply_permutation(std::vector permut); - twa_graph(const bdd_dict_ptr& dict) : twa(dict), init_number_(0) From 95379c18cc52284575a1fad34b982fcfde0575bc Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 26 Jul 2023 23:57:12 +0200 Subject: [PATCH 295/606] aiger: order the inputs of binary AND gates * spot/twaalgos/aiger.cc: Here. * tests/core/ltlsynt.test: Adjust, and add test case for aiger=optim. --- spot/twaalgos/aiger.cc | 30 +++++++++++++++--------------- tests/core/ltlsynt.test | 8 ++++++-- 2 files changed, 21 insertions(+), 17 deletions(-) diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index af255a167..6b608dd59 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -552,20 +552,20 @@ namespace spot assert(var2bdd_.count(v1)); assert(var2bdd_.count(v2)); - if (v1 != v2) - { - bdd b = var2bdd_[v1] & var2bdd_[v2]; - auto [it, inserted] = bdd2var_.try_emplace(b.id(), 0); - if (!inserted) - return it->second; - max_var_ += 2; - it->second = max_var_; - and_gates_.emplace_back(v1, v2); - register_new_lit_(max_var_, b); - return max_var_; - } - else - return v1; + if (SPOT_UNLIKELY(v1 > v2)) + std::swap(v1, v2); + if (SPOT_UNLIKELY(v1 == v2)) + return v1; + + bdd b = var2bdd_[v1] & var2bdd_[v2]; + auto [it, inserted] = bdd2var_.try_emplace(b.id(), 0); + if (!inserted) + return it->second; + max_var_ += 2; + it->second = max_var_; + and_gates_.emplace_back(v1, v2); + register_new_lit_(max_var_, b); + return max_var_; } unsigned aig::aig_and(std::vector& vs) diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 4a7595539..02d248754 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2019-2022 Laboratoire de Recherche et +# Copyright (C) 2017, 2019-2023 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -160,6 +160,10 @@ ltlsynt --ins=a --outs=b,c -f 'GFa <-> (GFb & GFc)' \ --algo=ds --simplify=no --aiger=isop >out diff out exp +ltlsynt --ins=a --outs=b,c -f 'GFa <-> (GFb & GFc)' \ + --algo=ds --simplify=no --aiger=optim >out +diff out exp + cat >exp < Date: Thu, 27 Jul 2023 09:48:00 +0200 Subject: [PATCH 296/606] tests: add some test to cover autcross' univ-edges removal * tests/core/ltl3ba.test: Here. --- tests/core/ltl3ba.test | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/core/ltl3ba.test b/tests/core/ltl3ba.test index fdcebc926..acc68a2c5 100755 --- a/tests/core/ltl3ba.test +++ b/tests/core/ltl3ba.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016-2018, 2021 Laboratoire de Recherche et +# Copyright (C) 2016-2018, 2021, 2023 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -47,8 +47,10 @@ GF(((a & Xb) | XXc) & Xd) GF((b | Fa) & (b R Xb)) EOF randltl -n 30 2 -) | ltlcross -D 'ltl3ba -H1' 'ltl3ba -H2' 'ltl3ba -H3' ltl2tgba \ - --ambiguous --strength --csv=output.csv +) > file.ltl + +ltlcross -F file.ltl -D 'ltl3ba -H1' 'ltl3ba -H2' 'ltl3ba -H3' ltl2tgba \ + --ambiguous --strength --verbose --csv=output.csv grep _x output.csv && exit 1 @@ -59,6 +61,9 @@ while read l; do test "x$first" = "x$l" || exit 1 done) +ltldo 'ltl3ba -H1' -F file.ltl | + autcross --language-complemented 'autfilt --dualize' --verbose + # The name of the HOA is preserved case `ltldo 'ltl3ba -H' -f xxx --stats=%m` in *xxx*);; From 95e3bb815c79b0296097c56e34b27905f98f8472 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 27 Jul 2023 10:23:33 +0200 Subject: [PATCH 297/606] bin: cover more tmpfile failure when running as root * tests/core/ltlcross5.test: reorganize to test missing directory before permission issues, as the latter cannot be run as root. --- tests/core/ltlcross5.test | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/tests/core/ltlcross5.test b/tests/core/ltlcross5.test index 82e9fdc89..c89a7bd0b 100644 --- a/tests/core/ltlcross5.test +++ b/tests/core/ltlcross5.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2019 Laboratoire de Recherche et Développement de +# Copyright (C) 2019, 2023 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -24,10 +24,22 @@ set -e unset TMPDIR unset SPOT_TMPDIR +err=0 + +SPOT_TMPDIR=bar ltlcross ltl2tgba -f GFa 2>err && err=1 +cat err +grep 'failed to create temporary file' err +grep 'Note that the directory.*SPOT_TMPDIR ' err + +TMPDIR=bar ltlcross ltl2tgba -f GFa 2>err && err=2 +cat err +grep 'failed to create temporary file' err +grep 'Note that the directory.* TMPDIR ' err + + mkdir foo chmod a-w foo cd foo -err=0 if touch bar; then # We are likely running as root, so we cannot detect permission # issues. @@ -39,27 +51,17 @@ fi ltlcross ltl2tgba -f GFa 2>../err && err=1 cd .. cat err -grep 'failed to create temporary file' err || err=1 -grep 'executing this from a writable' err || err=1 +grep 'failed to create temporary file' err || err=3 +grep 'executing this from a writable' err || err=3 grep 'SPOT_TMPDIR' err || err=1 -SPOT_TMPDIR=foo ltlcross ltl2tgba -f GFa 2>err && err=2 +SPOT_TMPDIR=foo ltlcross ltl2tgba -f GFa 2>err && err=4 cat err -grep 'failed to create temporary file' err || err=2 -grep 'executing this from a writable' err && err=2 +grep 'failed to create temporary file' err || err=4 +grep 'executing this from a writable' err && err=4 grep 'SPOT_TMPDIR' err chmod a+w foo rmdir foo -SPOT_TMPDIR=bar ltlcross ltl2tgba -f GFa 2>err && err=3 -cat err -grep 'failed to create temporary file' err -grep 'Note that the directory.*SPOT_TMPDIR ' err - -TMPDIR=bar ltlcross ltl2tgba -f GFa 2>err && err=4 -cat err -grep 'failed to create temporary file' err -grep 'Note that the directory.* TMPDIR ' err - exit $err From 7358a264923065a33b2b0a561936c9e088a8df84 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 27 Jul 2023 14:28:15 +0200 Subject: [PATCH 298/606] improve coverage of LaTeX/utf8 printers for SERE * bin/common_output.cc, bin/common_output.hh, bin/randltl.cc: Adjust so that running "randltl -S" use the SERE flavor of the spot/latex/utf8 formula printers. * tests/core/latex.test, tests/core/utf8.test, tests/python/ltlparse.py: Add more test cases. --- NEWS | 2 ++ bin/common_output.cc | 16 +++++++++++++--- bin/common_output.hh | 1 + bin/randltl.cc | 1 + tests/core/latex.test | 7 ++++--- tests/core/utf8.test | 17 ++++++++++++++++- tests/python/ltlparse.py | 18 +++++++++++++++++- 7 files changed, 54 insertions(+), 8 deletions(-) diff --git a/NEWS b/NEWS index 220947d3a..58cbcb28c 100644 --- a/NEWS +++ b/NEWS @@ -38,6 +38,8 @@ New in spot 2.11.5.dev (not yet released) - Using --format=... on a tool that output formulas would force the output on standard output, even when --output was given. + - Using "randltl -S" did not correctly go through the SERE printer + functions. New in spot 2.11.5 (2023-04-20) diff --git a/bin/common_output.cc b/bin/common_output.cc index 16beca34d..13988688a 100644 --- a/bin/common_output.cc +++ b/bin/common_output.cc @@ -45,6 +45,7 @@ output_format_t output_format = spot_output; bool full_parenth = false; bool escape_csv = false; char output_terminator = '\n'; +bool output_ratexp = false; static const argp_option options[] = { @@ -105,7 +106,10 @@ stream_formula(std::ostream& out, report_not_ltl(f, filename, linenum, "LBT"); break; case spot_output: - spot::print_psl(out, f, full_parenth); + if (output_ratexp) + spot::print_sere(out, f, full_parenth); + else + spot::print_psl(out, f, full_parenth); break; case spin_output: if (f.is_ltl_formula()) @@ -120,10 +124,16 @@ stream_formula(std::ostream& out, report_not_ltl(f, filename, linenum, "Wring"); break; case utf8_output: - spot::print_utf8_psl(out, f, full_parenth); + if (output_ratexp) + spot::print_utf8_sere(out, f, full_parenth); + else + spot::print_utf8_psl(out, f, full_parenth); break; case latex_output: - spot::print_latex_psl(out, f, full_parenth); + if (output_ratexp) + spot::print_latex_sere(out, f, full_parenth); + else + spot::print_latex_psl(out, f, full_parenth); break; case count_output: case quiet_output: diff --git a/bin/common_output.hh b/bin/common_output.hh index 3b08db27f..30fe9e7d1 100644 --- a/bin/common_output.hh +++ b/bin/common_output.hh @@ -36,6 +36,7 @@ enum output_format_t { spot_output, spin_output, utf8_output, extern output_format_t output_format; extern bool full_parenth; extern bool escape_csv; +extern bool output_ratexp; #define COMMON_X_OUTPUT_SPECS(where) \ "number of atomic propositions " #where "; " \ diff --git a/bin/randltl.cc b/bin/randltl.cc index 4dacf5264..2a95def20 100644 --- a/bin/randltl.cc +++ b/bin/randltl.cc @@ -182,6 +182,7 @@ parse_opt(int key, char* arg, struct argp_state* as) break; case 'S': output = spot::randltlgenerator::SERE; + output_ratexp = true; break; case OPT_BOOLEAN_PRIORITIES: opt_pB = arg; diff --git a/tests/core/latex.test b/tests/core/latex.test index 6e94e14d6..bd27964c5 100755 --- a/tests/core/latex.test +++ b/tests/core/latex.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2015 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) 2013, 2015, 2023 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -50,7 +50,8 @@ cat <<\EOF EOF ( ltlfilt --latex input --format='\texttt{%F:%L} & $%f$ \\'; genltl --go-theta=1..3 --latex \ - --format='\texttt{--%F:%L} & $%f$ \\') + --format='\texttt{--%F:%L} & $%f$ \\'; + randltl -S -n10 --latex 2 --format='\texttt{random %l} & $%f$ \\') cat <<\EOF \end{tabular} \end{document} diff --git a/tests/core/utf8.test b/tests/core/utf8.test index 45ba950f5..b0bfef043 100755 --- a/tests/core/utf8.test +++ b/tests/core/utf8.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2013, 2015, 2016, 2019 Laboratoire de Recherche et +# Copyright (C) 2012, 2013, 2015, 2016, 2019, 2023 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -70,3 +70,18 @@ ltlfilt -8 -f 'X[!]a' >out diff in out ltlfilt -8 -F in >out diff in out + +randltl --sere -8 --seed 0 --tree-size 8 a b c -n 10 > formulae +cat >expected <3]') +tc.assertEqual(spot.str_sere(pf.f), 'a[->3]') +tc.assertEqual(spot.str_latex_sere(pf.f), 'a\\SereGoto{3}') +pf = spot.parse_infix_sere('(!b)[*];b;(!b)[*]') +tc.assertEqual(spot.str_sere(pf.f), 'b[=1]') +pf = spot.parse_infix_sere('b[=1]') +tc.assertEqual(spot.str_sere(pf.f), 'b[=1]') +tc.assertEqual(spot.str_latex_sere(pf.f), 'b\\SereEqual{1}') +tc.assertEqual(spot.str_sclatex_sere(pf.f), 'b^{=1}') +pf = spot.parse_infix_sere('(!b)[*];b') +tc.assertEqual(spot.str_sere(pf.f), 'b[->]') +pf = spot.parse_infix_sere('b[->1]') +tc.assertEqual(spot.str_latex_sere(pf.f), 'b\\SereGoto{}') +tc.assertEqual(spot.str_sclatex_sere(pf.f), 'b^{\\to}') From e3e50672b5271ec84b59a16696dcff2716162ce6 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 28 Jul 2023 16:20:18 +0200 Subject: [PATCH 299/606] * .gitlab-ci.yml: temporary disable raspbian. --- .gitlab-ci.yml | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 348bacba1..424f7bc21 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -458,22 +458,26 @@ publish-unstable: - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=next" https://gitlab.lre.epita.fr/api/v4/projects/131/trigger/pipeline - curl -X POST -F ref=master -F token=$TRIGGER_SANDBOX https://gitlab.lre.epita.fr/api/v4/projects/181/trigger/pipeline -raspbian: - stage: build - only: - - branches - except: - - /wip/ - tags: - - armv7 - script: - - autoreconf -vfi - - ./configure - - make - - make distcheck || { chmod -R u+w ./spot-*; false; } - artifacts: - when: always - paths: - - ./spot-*/_build/sub/tests/*/*.log - - ./*.log - - ./*.tar.gz + +# The SD card of our Raspberry failed. Disable this job until we +# can make it work again. +# +# raspbian: +# stage: build +# only: +# - branches +# except: +# - /wip/ +# tags: +# - armv7 +# script: +# - autoreconf -vfi +# - ./configure +# - make +# - make distcheck || { chmod -R u+w ./spot-*; false; } +# artifacts: +# when: always +# paths: +# - ./spot-*/_build/sub/tests/*/*.log +# - ./*.log +# - ./*.tar.gz From d58b7da5626ecd81f41c99ffb879ead5704436f4 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 31 Jul 2023 21:07:07 +0200 Subject: [PATCH 300/606] [buddy] fix cache index of bdd_forall Fix a 20 year old typo that caused a bug reported by Guillermo Perez. * src/bddop.c (bdd_forall, bdd_forallcomp): Fix the cache index. --- buddy/src/bddop.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/buddy/src/bddop.c b/buddy/src/bddop.c index f54a39442..fd62ed9b8 100644 --- a/buddy/src/bddop.c +++ b/buddy/src/bddop.c @@ -2300,7 +2300,7 @@ RETURN {* The quantified BDD. *} */ BDD bdd_forall(BDD r, BDD var) { - return quantify(r, var, bddop_and, 0, CACHEID_EXIST); + return quantify(r, var, bddop_and, 0, CACHEID_FORALL); } /* @@ -2315,7 +2315,7 @@ RETURN {* The quantified BDD. *} */ BDD bdd_forallcomp(BDD r, BDD var) { - return quantify(r, var, bddop_and, 1, CACHEID_EXISTC); + return quantify(r, var, bddop_and, 1, CACHEID_FORALLC); } From 69d9b78a554637f447987162462fa20354dd5c2f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 31 Jul 2023 21:11:39 +0200 Subject: [PATCH 301/606] mention the bug fixed in BuDDy * NEWS: Explain the bug fixed in previous patch and reported in issue #535. * THANKS: Add Guillermo. --- NEWS | 5 +++++ THANKS | 1 + 2 files changed, 6 insertions(+) diff --git a/NEWS b/NEWS index 58cbcb28c..dd4431071 100644 --- a/NEWS +++ b/NEWS @@ -41,6 +41,11 @@ New in spot 2.11.5.dev (not yet released) - Using "randltl -S" did not correctly go through the SERE printer functions. + - Our copy of BuDDy's bdd_forall() had a 20 year old typo that + caused cache entries from bdd_exist() and bdd_forall() to be + mixed. Spot was safe from this bug because it was only using + bdd_exist(). (Issue #535) + New in spot 2.11.5 (2023-04-20) Bug fixes: diff --git a/THANKS b/THANKS index 93155f9d1..7986c3875 100644 --- a/THANKS +++ b/THANKS @@ -21,6 +21,7 @@ Felix Klaedtke Florian Perlié-Long František Blahoudek Gerard J. Holzmann +Guillermo A. Perez Hashim Ali Heikki Tauriainen Henrich Lauko From 16373cfb107e89cfd2074a2900c8d5bcab30a2e4 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 8 Dec 2022 17:27:32 +0100 Subject: [PATCH 302/606] Work around spurious g++-12 warnings * spot/twaalgos/ltl2tgba_fm.cc, spot/tl/formula.hh, spot/twaalgos/translate.cc: Add SPOT_ASSUME in various places to help g++. --- spot/tl/formula.hh | 4 +++- spot/twaalgos/ltl2tgba_fm.cc | 15 ++++++++++----- spot/twaalgos/translate.cc | 9 ++++++--- 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/spot/tl/formula.hh b/spot/tl/formula.hh index d01b8379c..b60000479 100644 --- a/spot/tl/formula.hh +++ b/spot/tl/formula.hh @@ -294,7 +294,9 @@ namespace spot { if (SPOT_UNLIKELY(i >= size())) report_non_existing_child(); - return children[i]; + const fnode* c = children[i]; + SPOT_ASSUME(c != nullptr); + return c; } /// \see formula::ff diff --git a/spot/twaalgos/ltl2tgba_fm.cc b/spot/twaalgos/ltl2tgba_fm.cc index 42571f00f..9768dfbfd 100644 --- a/spot/twaalgos/ltl2tgba_fm.cc +++ b/spot/twaalgos/ltl2tgba_fm.cc @@ -1026,11 +1026,16 @@ namespace spot bool coacc = false; auto& st = sm->states_of(n); for (auto l: st) - if (namer->get_name(l).accepts_eword()) - { - coacc = true; - break; - } + { + formula lf = namer->get_name(l); + // Somehow gcc 12.2.0 thinks lf can be nullptr. + SPOT_ASSUME(lf != nullptr); + if (lf.accepts_eword()) + { + coacc = true; + break; + } + } if (!coacc) { // ... or if any of its successors is coaccessible. diff --git a/spot/twaalgos/translate.cc b/spot/twaalgos/translate.cc index d5b1aacd0..339463426 100644 --- a/spot/twaalgos/translate.cc +++ b/spot/twaalgos/translate.cc @@ -209,9 +209,12 @@ namespace spot if (!rest.empty() && !oblg.empty()) { auto safety = [](formula f) - { - return f.is_syntactic_safety(); - }; + { + // Prevent gcc 12.2.0 from warning us that f could be a + // nullptr formula. + SPOT_ASSUME(f != nullptr); + return f.is_syntactic_safety(); + }; auto i = std::remove_if(oblg.begin(), oblg.end(), safety); rest.insert(rest.end(), i, oblg.end()); oblg.erase(i, oblg.end()); From 2495004afd98b25a2e7b3c95fa2391d51db18a83 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 9 Dec 2022 16:35:05 +0100 Subject: [PATCH 303/606] avoid a g++-12 warning about potential null pointer dereference * spot/twaalgos/determinize.cc (sorted_nodes): Rewrite to avoid reallocation of temporary vector. --- spot/twaalgos/determinize.cc | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/spot/twaalgos/determinize.cc b/spot/twaalgos/determinize.cc index ba4fb3ded..d2d35a824 100644 --- a/spot/twaalgos/determinize.cc +++ b/spot/twaalgos/determinize.cc @@ -472,15 +472,23 @@ namespace spot std::vector res; for (const auto& n: s.nodes_) { - int brace = n.second; - std::vector tmp; - while (brace >= 0) + // First, count the number of braces. + unsigned nbraces = 0; + for (int brace = n.second; brace >= 0; brace = s.braces_[brace]) + ++nbraces; + // Then list them in reverse order. Since we know the + // number of braces, we can allocate exactly what we need. + if (nbraces > 0) { - // FIXME: is there a smarter way? - tmp.insert(tmp.begin(), brace); - brace = s.braces_[brace]; + std::vector tmp(nbraces, 0); + for (int brace = n.second; brace >= 0; brace = s.braces_[brace]) + tmp[--nbraces] = brace; + res.emplace_back(n.first, std::move(tmp)); + } + else + { + res.emplace_back(n.first, std::vector{}); } - res.emplace_back(n.first, std::move(tmp)); } std::sort(res.begin(), res.end(), compare()); return res; From 9c6fa4921bf3bf7cce496d9b953a612ae4f8a804 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 11 May 2023 21:25:59 +0200 Subject: [PATCH 304/606] debian: add missing build dependencies * debian/control: Add Build-Depends on graphviz, jupyter-nbconvert, doxygen. --- debian/control | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/control b/debian/control index d1f9c652c..e29454c54 100644 --- a/debian/control +++ b/debian/control @@ -2,7 +2,7 @@ Source: spot Section: science Priority: optional Maintainer: Alexandre Duret-Lutz -Build-Depends: debhelper (>= 12), python3-all-dev, ipython3-notebook | python3-ipykernel, ipython3-notebook | python3-nbconvert, libltdl-dev, dh-python +Build-Depends: debhelper (>= 12), python3-all-dev, ipython3-notebook | python3-ipykernel, ipython3-notebook | python3-nbconvert, libltdl-dev, dh-python, graphviz, jupyter-nbconvert, doxygen Standards-Version: 4.5.1 Homepage: http://spot.lrde.epita.fr/ From 75bd595d2877e7e41e331397d7da0d25de772b4f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 12 May 2023 11:32:46 +0200 Subject: [PATCH 305/606] bitvect: work around incorrect warning from gcc * spot/misc/bitvect.hh: Don't free the old ptr if realloc() returns NULL, as this confuse GCC who warns that we are freeing something that has already been freed. Instead, let the ~bitvect() destructor handle this. --- spot/misc/bitvect.hh | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/spot/misc/bitvect.hh b/spot/misc/bitvect.hh index 3588b406e..74ab2bf3f 100644 --- a/spot/misc/bitvect.hh +++ b/spot/misc/bitvect.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2021, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -111,22 +111,22 @@ namespace spot return; if (storage_ == &local_storage_) { - block_t* new_storage_ = static_cast + block_t* new_storage = static_cast (malloc(new_block_count * sizeof(block_t))); + if (SPOT_UNLIKELY(!new_storage)) + throw std::bad_alloc(); for (size_t i = 0; i < block_count_; ++i) - new_storage_[i] = storage_[i]; - storage_ = new_storage_; + new_storage[i] = storage_[i]; + storage_ = new_storage; } else { - auto old = storage_; - storage_ = static_cast - (realloc(old, new_block_count * sizeof(block_t))); - if (!storage_) - { - free(old); - throw std::bad_alloc(); - } + block_t* new_storage = static_cast + (realloc(storage_, new_block_count * sizeof(block_t))); + if (SPOT_UNLIKELY(!new_storage)) + // storage_, untouched, will be freed by the destructor. + throw std::bad_alloc(); + storage_ = new_storage; } block_count_ = new_block_count; } @@ -134,8 +134,8 @@ namespace spot private: void grow() { - size_t new_block_count_ = (block_count_ + 1) * 7 / 5; - reserve_blocks(new_block_count_); + size_t new_block_count = (block_count_ + 1) * 7 / 5; + reserve_blocks(new_block_count); } public: From 330b34e84d4e8b5ad1e93722259b1e0f932e4aac Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Thu, 4 May 2023 15:28:48 +0200 Subject: [PATCH 306/606] parity_type_to_parity: Add missing cases * spot/twaalgos/toparity.cc: Correct some cases where the solution was not detected. * tests/python/toparity.py: Update tests. --- spot/twaalgos/toparity.cc | 53 ++++++++++++++++++++++++--------------- tests/python/toparity.py | 24 +++++++++++++++++- 2 files changed, 56 insertions(+), 21 deletions(-) diff --git a/spot/twaalgos/toparity.cc b/spot/twaalgos/toparity.cc index c936ef57b..a82c7d57a 100644 --- a/spot/twaalgos/toparity.cc +++ b/spot/twaalgos/toparity.cc @@ -95,7 +95,8 @@ namespace spot const bool need_equivalent, std::vector &status, std::vector &res_colors, - acc_cond &new_cond, bool &was_able_to_color) + acc_cond &new_cond, bool &was_able_to_color, + unsigned max_col) { auto& ev = aut->edge_vector(); const auto ev_size = ev.size(); @@ -134,7 +135,7 @@ namespace spot kind == cond_kind::INF_PARITY; unsigned max_iter = want_parity ? -1U : 1; - unsigned color = want_parity ? SPOT_MAX_ACCSETS - 1 : 0; + unsigned color = max_col; // Do we want always accepting transitions? // Don't consider CO_BUCHI as it is done by Büchi bool search_inf = kind != cond_kind::FIN_PARITY; @@ -167,14 +168,15 @@ namespace spot auto filter_data = filter_data_t{aut, status}; scc_info si(aut, aut_init, filter, &filter_data, scc_info_options::TRACK_STATES); + if (search_inf) + si.determine_unknown_acceptance(); bool worked = false; unsigned ssc_size = si.scc_count(); for (unsigned scc = 0; scc < ssc_size; ++scc) { // scc_info can detect that we will not be able to find an - // accepting/rejecting cycle. - if (!((search_inf && !si.is_accepting_scc(scc)) || - (!search_inf && !si.is_rejecting_scc(scc)))) + // accepting cycle. + if ((search_inf && si.is_accepting_scc(scc)) || !search_inf) { accepting_transitions_scc(si, scc, cond, {}, not_decidable_transitions, *keep); @@ -224,6 +226,8 @@ namespace spot break; } + new_cond = acc_cond(new_code); + // We check parity if (need_equivalent) { @@ -269,19 +273,19 @@ namespace spot aut->set_acceptance(acc_cond(aut_acc_comp)); } } - new_cond = acc_cond(new_code); + return true; } static twa_graph_ptr cond_type_main(const twa_graph_ptr &aut, const cond_kind kind, - bool &was_able_to_color) + bool &was_able_to_color, unsigned max_color) { std::vector res_colors; std::vector status; acc_cond new_cond; if (cond_type_main_aux(aut, kind, true, status, res_colors, new_cond, - was_able_to_color)) + was_able_to_color, max_color)) { auto res = make_twa_graph(aut, twa::prop_set::all()); auto &res_vector = res->edge_vector(); @@ -311,14 +315,19 @@ namespace spot bool was_able_to_color; // If the automaton is parity-type with a condition that has Inf as // outermost term - auto res = cond_type_main(aut, cond_kind::INF_PARITY, was_able_to_color); + auto res = cond_type_main(aut, cond_kind::INF_PARITY, + was_able_to_color, SPOT_MAX_ACCSETS - 1); // If it was impossible to find an accepting edge, it is perhaps possible // to find a rejecting transition if (res == nullptr && !was_able_to_color) - res = cond_type_main(aut, cond_kind::FIN_PARITY, was_able_to_color); + res = cond_type_main(aut, cond_kind::FIN_PARITY, + was_able_to_color, SPOT_MAX_ACCSETS - 1); if (res) + { + res->prop_state_acc(false); reduce_parity_here(res); + } return res; } @@ -326,14 +335,14 @@ namespace spot buchi_type_to_buchi(const twa_graph_ptr &aut) { bool useless; - return cond_type_main(aut, cond_kind::BUCHI, useless); + return cond_type_main(aut, cond_kind::BUCHI, useless, 0); } twa_graph_ptr co_buchi_type_to_co_buchi(const twa_graph_ptr &aut) { bool useless; - return cond_type_main(aut, cond_kind::CO_BUCHI, useless); + return cond_type_main(aut, cond_kind::CO_BUCHI, useless, 0); } // New version for paritizing @@ -1943,12 +1952,14 @@ namespace spot // Is the maximal color accepting? bool start_inf = true; cond_type_main_aux(sub_aut, cond_kind::INF_PARITY, false, status, - res_colors, new_cond, was_able_to_color); + res_colors, new_cond, was_able_to_color, + SPOT_MAX_ACCSETS - 1); // Otherwise we can try to find a rejecting transition as first step if (!was_able_to_color) { cond_type_main_aux(sub_aut, cond_kind::FIN_PARITY, false, status, - res_colors, new_cond, was_able_to_color); + res_colors, new_cond, was_able_to_color, + SPOT_MAX_ACCSETS - 1); if (!was_able_to_color) return false; start_inf = false; @@ -2127,11 +2138,11 @@ namespace spot bool is_co_bu = false; bool was_able_to_color; if (!cond_type_main_aux(sub_aut, cond_kind::BUCHI, true, status, - res_colors, new_cond, was_able_to_color)) + res_colors, new_cond, was_able_to_color, 0)) { is_co_bu = true; if (!cond_type_main_aux(sub_aut, cond_kind::CO_BUCHI, true, status, - res_colors, new_cond, was_able_to_color)) + res_colors, new_cond, was_able_to_color, 0)) return false; change_to_odd(); } @@ -2172,16 +2183,18 @@ namespace spot acc_cond new_cond; bool was_able_to_color; if (!cond_type_main_aux(sub_aut, cond_kind::INF_PARITY, true, status, - res_colors, new_cond, was_able_to_color)) + res_colors, new_cond, was_able_to_color, + SPOT_MAX_ACCSETS - 3)) { if (!cond_type_main_aux(sub_aut, cond_kind::FIN_PARITY, true, status, - res_colors, new_cond, was_able_to_color)) + res_colors, new_cond, was_able_to_color, + SPOT_MAX_ACCSETS - 3)) return false; } bool is_max, is_odd; new_cond.is_parity(is_max, is_odd); - auto [min, max] = - std::minmax_element(res_colors.begin() + 1, res_colors.end()); + auto min = + std::min_element(res_colors.begin() + 1, res_colors.end()); // cond_type_main_aux returns a parity max condition assert(is_max); auto col_fun = diff --git a/tests/python/toparity.py b/tests/python/toparity.py index ab5fbf314..80c2c19ef 100644 --- a/tests/python/toparity.py +++ b/tests/python/toparity.py @@ -547,4 +547,26 @@ State: 9 3 {4} 2 3 {4} 6 --END-- b = spot.iar_maybe(a) tc.assertEqual(b.num_states(), 87) tc.assertTrue(a.equivalent_to(b)) -test(a, [87, 91, 91, 87, 87, 87, 51, 51, 21]) +test(a, [87, 91, 91, 87, 87, 87, 51, 35, 21]) + +a = spot.automaton("""HOA: v1 +States: 4 +Start: 0 +AP: 2 "p0" "p1" +Acceptance: 2 Fin(1) & Fin(0) +properties: trans-labels explicit-labels state-acc +--BODY-- +State: 0 +[!0&!1] 2 +[!0&!1] 1 +State: 1 +[!0&1] 0 +[0&1] 3 +State: 2 +[0&!1] 1 +State: 3 {0} +[!0&1] 3 +[!0&!1] 1 +--END--""") +b = spot.parity_type_to_parity(a) +tc.assertTrue(spot.are_equivalent(a, b)) From 47674c0d2f0bd1aa2d0ee98aec79eee08691a727 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 12 Jun 2023 11:01:30 +0200 Subject: [PATCH 307/606] fix spurious failure of ltlcross4.test Reported by Yuri Victorovich. * tests/core/ltlcross4.test: Drop the 'formula' column before computing aggregates. It causes warnings in some Pandas versions, and errors in others. --- tests/core/ltlcross4.test | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/core/ltlcross4.test b/tests/core/ltlcross4.test index b7c85979a..7d124d689 100755 --- a/tests/core/ltlcross4.test +++ b/tests/core/ltlcross4.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012-2014, 2017, 2020 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) 2012-2014, 2017, 2020, 2023 Laboratoire de Recherche +# et Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -54,8 +54,7 @@ x = pandas.read_csv("output.csv") # We used to call describe() instead of agg(), # but the output of this function was changed # in pandas 0.20. -print(x.filter(('formula', 'tool', - 'states', 'transitions')).\ +print(x.filter(('tool', 'states', 'transitions')).\ groupby('tool').\ agg([np.mean, np.std, np.min, np.max])) EOF From adca03a30afd0cd495b5b4b496435bbb67630a48 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 24 Jul 2023 12:16:30 +0200 Subject: [PATCH 308/606] * tests/core/ltlcross4.test: Work around recent Pandas change. --- tests/core/ltlcross4.test | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/core/ltlcross4.test b/tests/core/ltlcross4.test index 7d124d689..f171876a9 100755 --- a/tests/core/ltlcross4.test +++ b/tests/core/ltlcross4.test @@ -65,12 +65,14 @@ $PYTHON test.py >out.1 # remove trailing whitespace from pandas' output, and limit to 6 # lines, because Pandas 0.13 adds the size of the dataframe # afterwards. Alse the spacing between columns differs from version -# to version. -sed 's/[ \t]*$//g;6q' py.out +# to version. The name of the output columns changed from "amin amax" +# to "min max" in some Pandas version (maybe around 2.0). +sed 's/[ \t]*$//g;s/amin/min/g;s/amax/max/g;6q' py.out cat >expected < Date: Mon, 24 Jul 2023 16:56:24 +0200 Subject: [PATCH 309/606] bin: handle thousands of output files Fixes #534. Test case is only on next branch. * bin/common_file.hh, bin/common_file.cc: Make it possible to reopen a closed file. * bin/common_output.cc, bin/common_aoutput.cc: Add a heuristic to decide when to close files. * NEWS: Mention the issue. --- NEWS | 7 +++++++ bin/common_aoutput.cc | 20 ++++++++++++++++++++ bin/common_file.cc | 19 ++++++++++++++++++- bin/common_file.hh | 2 ++ bin/common_output.cc | 20 ++++++++++++++++++++ 5 files changed, 67 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 0b425272f..29cfa7cb1 100644 --- a/NEWS +++ b/NEWS @@ -2,6 +2,13 @@ New in spot 2.11.5.dev (not yet released) Nothing yet. + Bug fixes: + + - Running command lines such as "autfilt input.hoa -o output-%L.hoa" + where thousands of different filenames can be created failed with + "Too many open files". (Issue #534) + + New in spot 2.11.5 (2023-04-20) Bug fixes: diff --git a/bin/common_aoutput.cc b/bin/common_aoutput.cc index 60f83289e..ea44d6c22 100644 --- a/bin/common_aoutput.cc +++ b/bin/common_aoutput.cc @@ -636,7 +636,27 @@ automaton_printer::print(const spot::twa_graph_ptr& aut, auto [it, b] = outputfiles.try_emplace(fname, nullptr); if (b) it->second.reset(new output_file(fname.c_str())); + else + // reopen if the file has been closed; see below + it->second->reopen_for_append(fname); out = &it->second->ostream(); + + // If we have opened fewer than 10 files, we keep them all open + // to avoid wasting time on open/close calls. + // + // However we cannot keep all files open, especially in + // scenarios were we use thousands of files only once. To keep + // things simple, we only close the previous file if it is not + // the current output. This way we still save the close/open + // cost when consecutive automata are sent to the same file. + static output_file* previous = nullptr; + static const std::string* previous_name = nullptr; + if (previous + && outputfiles.size() > 10 + && &previous->ostream() != out) + previous->close(*previous_name); + previous = it->second.get(); + previous_name = &it->first; } // Output it. diff --git a/bin/common_file.cc b/bin/common_file.cc index 4e56c6d54..ebad8b878 100644 --- a/bin/common_file.cc +++ b/bin/common_file.cc @@ -44,13 +44,30 @@ output_file::output_file(const char* name, bool force_append) os_ = of_.get(); } +void +output_file::reopen_for_append(const std::string& name) +{ + if (of_ && of_->is_open()) // nothing to do + return; + const char* cname = name.c_str(); + if (cname[0] == '>' && cname[1] == '>') + cname += 2; + if (name[0] == '-' && name[1] == 0) + { + os_ = &std::cout; + return; + } + of_->open(cname, std::ios_base::app); + if (!*of_) + error(2, errno, "cannot reopen '%s'", cname); +} void output_file::close(const std::string& name) { // We close of_, not os_, so that we never close std::cout. if (os_) os_->flush(); - if (of_) + if (of_ && of_->is_open()) of_->close(); if (os_ && !*os_) error(2, 0, "error writing to %s", diff --git a/bin/common_file.hh b/bin/common_file.hh index b6aa0bec3..51000d18c 100644 --- a/bin/common_file.hh +++ b/bin/common_file.hh @@ -37,6 +37,8 @@ public: void close(const std::string& name); + void reopen_for_append(const std::string& name); + bool append() const { return append_; diff --git a/bin/common_output.cc b/bin/common_output.cc index 93cb2dfaf..4ab62a9aa 100644 --- a/bin/common_output.cc +++ b/bin/common_output.cc @@ -420,7 +420,27 @@ output_formula_checked(spot::formula f, spot::process_timer* ptimer, auto [it, b] = outputfiles.try_emplace(fname, nullptr); if (b) it->second.reset(new output_file(fname.c_str())); + else + // reopen if the file has been closed; see below + it->second->reopen_for_append(fname); out = &it->second->ostream(); + + // If we have opened fewer than 10 files, we keep them all open + // to avoid wasting time on open/close calls. + // + // However we cannot keep all files open, especially in + // scenarios were we use thousands of files only once. To keep + // things simple, we only close the previous file if it is not + // the current output. This way we still save the close/open + // cost when consecutive formulas are sent to the same file. + static output_file* previous = nullptr; + static const std::string* previous_name = nullptr; + if (previous + && outputfiles.size() > 10 + && &previous->ostream() != out) + previous->close(*previous_name); + previous = it->second.get(); + previous_name = &it->first; } output_formula(*out, f, ptimer, filename, linenum, prefix, suffix); *out << output_terminator; From 090dcf17eb7c45ce42cef6d34dcb9d77dbd204b2 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 26 Jul 2023 16:17:31 +0200 Subject: [PATCH 310/606] work around spurious GCC 13 warnings * spot/graph/graph.hh (new_univ_dests): Add an overload taking a temporary vector. * spot/twa/twagraph.cc (defrag_states): Use it. * tests/core/parity.cc: Remove some temporary variables. --- spot/graph/graph.hh | 11 ++++++++++- spot/twa/twagraph.cc | 5 ++--- tests/core/parity.cc | 16 +++++++--------- 3 files changed, 19 insertions(+), 13 deletions(-) diff --git a/spot/graph/graph.hh b/spot/graph/graph.hh index 04c21fec9..d2a97d1c5 100644 --- a/spot/graph/graph.hh +++ b/spot/graph/graph.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2018, 2020-2022 Laboratoire de Recherche et +// Copyright (C) 2014-2018, 2020-2023 Laboratoire de Recherche et // Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -576,6 +576,15 @@ namespace spot return p.first->second; } + unsigned new_univ_dests(std::vector&& tmp) + { + std::sort(tmp.begin(), tmp.end()); + tmp.erase(std::unique(tmp.begin(), tmp.end()), tmp.end()); + auto p = uniq_.emplace(tmp, 0); + if (p.second) + p.first->second = g_.new_univ_dests(tmp.begin(), tmp.end()); + return p.first->second; + } }; } // namespace internal diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 2a72702f3..055d6ca11 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2014-2023 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -1243,8 +1243,7 @@ namespace spot // the state so that graph::degrag_states() will // eventually update it to the correct value. nd = newst.size(); - newst.emplace_back(uniq.new_univ_dests(tmp.begin(), - tmp.end())); + newst.emplace_back(uniq.new_univ_dests(std::move(tmp))); } } in_dst = nd; diff --git a/tests/core/parity.cc b/tests/core/parity.cc index 4cb8256ef..7ff391745 100644 --- a/tests/core/parity.cc +++ b/tests/core/parity.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2018-2019 Laboratoire de Recherche et +// Copyright (C) 2016, 2018-2019, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -320,16 +320,14 @@ int main() for (auto acc_tuple: acceptance_sets) for (auto& aut_tuple: automata_tuples) { - auto& aut = aut_tuple.first; - auto aut_num_sets = aut_tuple.second; - - auto acc = std::get<0>(acc_tuple); - auto is_max = std::get<1>(acc_tuple); - auto is_odd = std::get<2>(acc_tuple); - auto acc_num_sets = std::get<3>(acc_tuple); + spot::twa_graph_ptr& aut = aut_tuple.first; + unsigned aut_num_sets = aut_tuple.second; + unsigned acc_num_sets = std::get<3>(acc_tuple); if (aut_num_sets <= acc_num_sets) { - aut->set_acceptance(acc_num_sets, acc); + bool is_max = std::get<1>(acc_tuple); + bool is_odd = std::get<2>(acc_tuple); + aut->set_acceptance(acc_num_sets, std::get<0>(acc_tuple)); // Check change_parity for (auto kind: parity_kinds) for (auto style: parity_styles) From 8065759fbd3ef9164094bdced28bd4e7623583e4 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 26 Jul 2023 16:19:35 +0200 Subject: [PATCH 311/606] * HACKING: Mention the svgo version we use. --- HACKING | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/HACKING b/HACKING index f2cf27e8c..8029fe6db 100644 --- a/HACKING +++ b/HACKING @@ -55,7 +55,7 @@ only for certain operations (like releases): pandoc used during Debian packaging for the conversion of IPython notebooks to html svgo for reducing SVG images before generating the tarball - (install with: npm install -g svgo) + (install with: npm install -g svgo@1.3.2) ltl2ba used in the generated documentation and the test suite ltl2dstar likewise ltl3dra likewise From 69b9ffef9a086533bb2386f144eb41c69e78f5aa Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 26 Jul 2023 21:33:36 +0200 Subject: [PATCH 312/606] bin: fix handling for --output & --format with LTL outputs * bin/common_output.cc: Set the output stream for LTL formats. --- NEWS | 3 +++ bin/common_output.cc | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 29cfa7cb1..5691c96ea 100644 --- a/NEWS +++ b/NEWS @@ -8,6 +8,9 @@ New in spot 2.11.5.dev (not yet released) where thousands of different filenames can be created failed with "Too many open files". (Issue #534) + - Using --format=... on a tool that output formulas would force + the output on standard output, even when --output was given. + New in spot 2.11.5 (2023-04-20) diff --git a/bin/common_output.cc b/bin/common_output.cc index 4ab62a9aa..13b4daf0f 100644 --- a/bin/common_output.cc +++ b/bin/common_output.cc @@ -224,7 +224,7 @@ namespace } }; - class formula_printer final: protected spot::formater + class formula_printer final: public spot::formater { public: formula_printer(std::ostream& os, const char* format) @@ -392,6 +392,7 @@ output_formula(std::ostream& out, else { formula_with_location fl = { f, filename, linenum, prefix, suffix }; + format->set_output(out); format->print(fl, ptimer); } } From e548bf0a8e0a2a9967ff58f56a6043c7b28bc4d7 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 26 Jul 2023 23:00:24 +0200 Subject: [PATCH 313/606] acc: remove some dead functions * spot/twa/acc.hh, spot/twa/acc.cc (has_parity_prefix, is_parity_max_equiv): Remove. * spot/twa/acc.hh, spot/twa/twagraph.cc, spot/twa/twagraph.hh (apply_permutation): Remove. --- spot/twa/acc.cc | 180 ------------------------------------------- spot/twa/acc.hh | 81 ------------------- spot/twa/twagraph.cc | 9 --- spot/twa/twagraph.hh | 4 +- 4 files changed, 1 insertion(+), 273 deletions(-) diff --git a/spot/twa/acc.cc b/spot/twa/acc.cc index 07aac36f9..732ea124c 100644 --- a/spot/twa/acc.cc +++ b/spot/twa/acc.cc @@ -1067,81 +1067,6 @@ namespace spot return res; } - namespace - { - bool - has_parity_prefix_aux(acc_cond original, - acc_cond &new_cond, - std::vector &colors, - std::vector elements, - acc_cond::acc_op op) - { - if (elements.size() > 2) - { - new_cond = original; - return false; - } - if (elements.size() == 2) - { - unsigned pos = (elements[1].back().sub.op == op - && elements[1][0].mark.is_singleton()); - if (!(elements[0].back().sub.op == op || pos)) - { - new_cond = original; - return false; - } - if ((elements[1 - pos].used_sets() & elements[pos][0].mark)) - { - new_cond = original; - return false; - } - if (!elements[pos][0].mark.is_singleton()) - { - return false; - } - colors.push_back(elements[pos][0].mark.min_set() - 1); - elements[1 - pos].has_parity_prefix(new_cond, colors); - return true; - } - return false; - } - } - - bool - acc_cond::acc_code::has_parity_prefix(acc_cond &new_cond, - std::vector &colors) const - { - auto disj = top_disjuncts(); - if (!(has_parity_prefix_aux((*this), new_cond, colors, - disj, acc_cond::acc_op::Inf) || - has_parity_prefix_aux((*this), new_cond, colors, - top_conjuncts(), acc_cond::acc_op::Fin))) - new_cond = acc_cond((*this)); - return disj.size() == 2; - } - - bool - acc_cond::has_parity_prefix(acc_cond& new_cond, - std::vector& colors) const - { - return code_.has_parity_prefix(new_cond, colors); - } - - bool - acc_cond::is_parity_max_equiv(std::vector&permut, bool even) const - { - if (code_.used_once_sets() != code_.used_sets()) - return false; - bool result = code_.is_parity_max_equiv(permut, 0, even); - int max_value = *std::max_element(std::begin(permut), std::end(permut)); - for (unsigned i = 0; i < permut.size(); ++i) - if (permut[i] != -1) - permut[i] = max_value - permut[i]; - else - permut[i] = i; - return result; - } - bool acc_cond::is_parity(bool& max, bool& odd, bool equiv) const { unsigned sets = num_; @@ -1408,111 +1333,6 @@ namespace spot return patterns; } - bool - acc_cond::acc_code::is_parity_max_equiv(std::vector& permut, - unsigned new_color, - bool even) const - { - auto conj = top_conjuncts(); - auto disj = top_disjuncts(); - if (conj.size() == 1) - { - if (disj.size() == 1) - { - acc_cond::acc_code elem = conj[0]; - if ((even && elem.back().sub.op == acc_cond::acc_op::Inf) - || (!even && elem.back().sub.op == acc_cond::acc_op::Fin)) - { - for (auto color : disj[0][0].mark.sets()) - { - if (permut[color] != -1 - && ((unsigned) permut[color]) != new_color) - return false; - permut[color] = new_color; - } - return true; - } - return false; - } - else - { - std::sort(disj.begin(), disj.end(), - [](acc_code c1, acc_code c2) - { - return (c1 != c2) && - c1.back().sub.op == acc_cond::acc_op::Inf; - }); - unsigned i = 0; - for (; i < disj.size() - 1; ++i) - { - if (disj[i].back().sub.op != acc_cond::acc_op::Inf - || !disj[i][0].mark.is_singleton()) - return false; - for (auto color : disj[i][0].mark.sets()) - { - if (permut[color] != -1 - && ((unsigned) permut[color]) != new_color) - return false; - permut[color] = new_color; - } - } - if (disj[i].back().sub.op == acc_cond::acc_op::Inf) - { - if (!even || !disj[i][0].mark.is_singleton()) - return false; - for (auto color : disj[i][0].mark.sets()) - { - if (permut[color] != -1 - && ((unsigned) permut[color]) != new_color) - return false; - permut[color] = new_color; - } - return true; - } - return disj[i].is_parity_max_equiv(permut, new_color + 1, even); - } - } - else - { - std::sort(conj.begin(), conj.end(), - [](acc_code c1, acc_code c2) - { - return (c1 != c2) - && c1.back().sub.op == acc_cond::acc_op::Fin; - }); - unsigned i = 0; - for (; i < conj.size() - 1; i++) - { - if (conj[i].back().sub.op != acc_cond::acc_op::Fin - || !conj[i][0].mark.is_singleton()) - return false; - for (auto color : conj[i][0].mark.sets()) - { - if (permut[color] != -1 && permut[color != new_color]) - return false; - permut[color] = new_color; - } - } - if (conj[i].back().sub.op == acc_cond::acc_op::Fin) - { - if (even) - return 0; - if (!conj[i][0].mark.is_singleton()) - return false; - for (auto color : conj[i][0].mark.sets()) - { - if (permut[color] != -1 && permut[color != new_color]) - return false; - permut[color] = new_color; - } - return true; - } - - return conj[i].is_parity_max_equiv(permut, new_color + 1, even); - } - } - - namespace { template diff --git a/spot/twa/acc.hh b/spot/twa/acc.hh index 1c460cfc4..aed1b3a2a 100644 --- a/spot/twa/acc.hh +++ b/spot/twa/acc.hh @@ -60,11 +60,6 @@ namespace spot /// could be removed.) class SPOT_API acc_cond { - - public: - bool - has_parity_prefix(acc_cond& new_acc, std::vector& colors) const; - #ifndef SWIG private: [[noreturn]] static void report_too_many_sets(); @@ -102,10 +97,6 @@ namespace spot /// Initialize an empty mark_t. mark_t() = default; - mark_t - apply_permutation(std::vector permut); - - #ifndef SWIG /// Create a mark_t from a range of set numbers. template @@ -489,15 +480,6 @@ namespace spot acc_code unit_propagation(); - bool - has_parity_prefix(acc_cond& new_cond, - std::vector& colors) const; - - bool - is_parity_max_equiv(std::vector& permut, - unsigned new_color, - bool even) const; - bool operator==(const acc_code& other) const { unsigned pos = size(); @@ -1793,8 +1775,6 @@ namespace spot bool is_parity(bool& max, bool& odd, bool equiv = false) const; - bool is_parity_max_equiv(std::vector& permut, bool even) const; - /// \brief check is the acceptance condition matches one of the /// four type of parity acceptance defined in the HOA format. bool is_parity() const @@ -1976,57 +1956,6 @@ namespace spot return all_; } - acc_cond - apply_permutation(std::vectorpermut) - { - return acc_cond(apply_permutation_aux(permut)); - } - - acc_code - apply_permutation_aux(std::vectorpermut) - { - auto conj = top_conjuncts(); - auto disj = top_disjuncts(); - - if (conj.size() > 1) - { - auto transformed = std::vector(); - for (auto elem : conj) - transformed.push_back(elem.apply_permutation_aux(permut)); - std::sort(transformed.begin(), transformed.end()); - auto uniq = std::unique(transformed.begin(), transformed.end()); - auto result = std::accumulate(transformed.begin(), uniq, acc_code::t(), - [](acc_code c1, acc_code c2) - { - return c1 & c2; - }); - return result; - } - else if (disj.size() > 1) - { - auto transformed = std::vector(); - for (auto elem : disj) - transformed.push_back(elem.apply_permutation_aux(permut)); - std::sort(transformed.begin(), transformed.end()); - auto uniq = std::unique(transformed.begin(), transformed.end()); - auto result = std::accumulate(transformed.begin(), uniq, acc_code::f(), - [](acc_code c1, acc_code c2) - { - return c1 | c2; - }); - return result; - } - else - { - if (code_.back().sub.op == acc_cond::acc_op::Fin) - return fin(code_[0].mark.apply_permutation(permut)); - if (code_.back().sub.op == acc_cond::acc_op::Inf) - return inf(code_[0].mark.apply_permutation(permut)); - } - SPOT_ASSERT(false); - return {}; - } - /// \brief Check whether visiting *exactly* all sets \a inf /// infinitely often satisfies the acceptance condition. bool accepting(mark_t inf) const @@ -2473,16 +2402,6 @@ namespace spot { return {*this}; } - - inline acc_cond::mark_t - acc_cond::mark_t::apply_permutation(std::vector permut) - { - mark_t result { }; - for (auto color : sets()) - if (color < permut.size()) - result.set(permut[color]); - return result; - } } namespace std diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 055d6ca11..3f74d4d99 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -108,15 +108,6 @@ namespace namespace spot { - void - twa_graph::apply_permutation(std::vector permut) - { - for (auto& e : edges()) - { - e.acc.apply_permutation(permut); - } - } - std::string twa_graph::format_state(unsigned n) const { if (is_univ_dest(n)) diff --git a/spot/twa/twagraph.hh b/spot/twa/twagraph.hh index 742a4d69a..1540692c6 100644 --- a/spot/twa/twagraph.hh +++ b/spot/twa/twagraph.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2014-2023 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -220,8 +220,6 @@ namespace spot public: - void apply_permutation(std::vector permut); - twa_graph(const bdd_dict_ptr& dict) : twa(dict), init_number_(0) From 531252119ccc88fb1088ae4cad085fd4a187cf5f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 26 Jul 2023 23:57:12 +0200 Subject: [PATCH 314/606] aiger: order the inputs of binary AND gates * spot/twaalgos/aiger.cc: Here. * tests/core/ltlsynt.test: Adjust, and add test case for aiger=optim. --- spot/twaalgos/aiger.cc | 30 +++++++++++++++--------------- tests/core/ltlsynt.test | 8 ++++++-- 2 files changed, 21 insertions(+), 17 deletions(-) diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index af255a167..6b608dd59 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -552,20 +552,20 @@ namespace spot assert(var2bdd_.count(v1)); assert(var2bdd_.count(v2)); - if (v1 != v2) - { - bdd b = var2bdd_[v1] & var2bdd_[v2]; - auto [it, inserted] = bdd2var_.try_emplace(b.id(), 0); - if (!inserted) - return it->second; - max_var_ += 2; - it->second = max_var_; - and_gates_.emplace_back(v1, v2); - register_new_lit_(max_var_, b); - return max_var_; - } - else - return v1; + if (SPOT_UNLIKELY(v1 > v2)) + std::swap(v1, v2); + if (SPOT_UNLIKELY(v1 == v2)) + return v1; + + bdd b = var2bdd_[v1] & var2bdd_[v2]; + auto [it, inserted] = bdd2var_.try_emplace(b.id(), 0); + if (!inserted) + return it->second; + max_var_ += 2; + it->second = max_var_; + and_gates_.emplace_back(v1, v2); + register_new_lit_(max_var_, b); + return max_var_; } unsigned aig::aig_and(std::vector& vs) diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 4a7595539..02d248754 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2019-2022 Laboratoire de Recherche et +# Copyright (C) 2017, 2019-2023 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -160,6 +160,10 @@ ltlsynt --ins=a --outs=b,c -f 'GFa <-> (GFb & GFc)' \ --algo=ds --simplify=no --aiger=isop >out diff out exp +ltlsynt --ins=a --outs=b,c -f 'GFa <-> (GFb & GFc)' \ + --algo=ds --simplify=no --aiger=optim >out +diff out exp + cat >exp < Date: Thu, 27 Jul 2023 09:48:00 +0200 Subject: [PATCH 315/606] tests: add some test to cover autcross' univ-edges removal * tests/core/ltl3ba.test: Here. --- tests/core/ltl3ba.test | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/core/ltl3ba.test b/tests/core/ltl3ba.test index fdcebc926..acc68a2c5 100755 --- a/tests/core/ltl3ba.test +++ b/tests/core/ltl3ba.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016-2018, 2021 Laboratoire de Recherche et +# Copyright (C) 2016-2018, 2021, 2023 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -47,8 +47,10 @@ GF(((a & Xb) | XXc) & Xd) GF((b | Fa) & (b R Xb)) EOF randltl -n 30 2 -) | ltlcross -D 'ltl3ba -H1' 'ltl3ba -H2' 'ltl3ba -H3' ltl2tgba \ - --ambiguous --strength --csv=output.csv +) > file.ltl + +ltlcross -F file.ltl -D 'ltl3ba -H1' 'ltl3ba -H2' 'ltl3ba -H3' ltl2tgba \ + --ambiguous --strength --verbose --csv=output.csv grep _x output.csv && exit 1 @@ -59,6 +61,9 @@ while read l; do test "x$first" = "x$l" || exit 1 done) +ltldo 'ltl3ba -H1' -F file.ltl | + autcross --language-complemented 'autfilt --dualize' --verbose + # The name of the HOA is preserved case `ltldo 'ltl3ba -H' -f xxx --stats=%m` in *xxx*);; From 15857385a5a53d0d03a9a5c4a2891cbf4836adf5 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 27 Jul 2023 10:23:33 +0200 Subject: [PATCH 316/606] bin: cover more tmpfile failure when running as root * tests/core/ltlcross5.test: reorganize to test missing directory before permission issues, as the latter cannot be run as root. --- tests/core/ltlcross5.test | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/tests/core/ltlcross5.test b/tests/core/ltlcross5.test index 82e9fdc89..c89a7bd0b 100644 --- a/tests/core/ltlcross5.test +++ b/tests/core/ltlcross5.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2019 Laboratoire de Recherche et Développement de +# Copyright (C) 2019, 2023 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -24,10 +24,22 @@ set -e unset TMPDIR unset SPOT_TMPDIR +err=0 + +SPOT_TMPDIR=bar ltlcross ltl2tgba -f GFa 2>err && err=1 +cat err +grep 'failed to create temporary file' err +grep 'Note that the directory.*SPOT_TMPDIR ' err + +TMPDIR=bar ltlcross ltl2tgba -f GFa 2>err && err=2 +cat err +grep 'failed to create temporary file' err +grep 'Note that the directory.* TMPDIR ' err + + mkdir foo chmod a-w foo cd foo -err=0 if touch bar; then # We are likely running as root, so we cannot detect permission # issues. @@ -39,27 +51,17 @@ fi ltlcross ltl2tgba -f GFa 2>../err && err=1 cd .. cat err -grep 'failed to create temporary file' err || err=1 -grep 'executing this from a writable' err || err=1 +grep 'failed to create temporary file' err || err=3 +grep 'executing this from a writable' err || err=3 grep 'SPOT_TMPDIR' err || err=1 -SPOT_TMPDIR=foo ltlcross ltl2tgba -f GFa 2>err && err=2 +SPOT_TMPDIR=foo ltlcross ltl2tgba -f GFa 2>err && err=4 cat err -grep 'failed to create temporary file' err || err=2 -grep 'executing this from a writable' err && err=2 +grep 'failed to create temporary file' err || err=4 +grep 'executing this from a writable' err && err=4 grep 'SPOT_TMPDIR' err chmod a+w foo rmdir foo -SPOT_TMPDIR=bar ltlcross ltl2tgba -f GFa 2>err && err=3 -cat err -grep 'failed to create temporary file' err -grep 'Note that the directory.*SPOT_TMPDIR ' err - -TMPDIR=bar ltlcross ltl2tgba -f GFa 2>err && err=4 -cat err -grep 'failed to create temporary file' err -grep 'Note that the directory.* TMPDIR ' err - exit $err From 44d9e34e324629a8483d9b08209113763bc3ac5c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 27 Jul 2023 14:28:15 +0200 Subject: [PATCH 317/606] improve coverage of LaTeX/utf8 printers for SERE * bin/common_output.cc, bin/common_output.hh, bin/randltl.cc: Adjust so that running "randltl -S" use the SERE flavor of the spot/latex/utf8 formula printers. * tests/core/latex.test, tests/core/utf8.test, tests/python/ltlparse.py: Add more test cases. --- NEWS | 2 ++ bin/common_output.cc | 16 +++++++++++++--- bin/common_output.hh | 1 + bin/randltl.cc | 1 + tests/core/latex.test | 7 ++++--- tests/core/utf8.test | 17 ++++++++++++++++- tests/python/ltlparse.py | 18 +++++++++++++++++- 7 files changed, 54 insertions(+), 8 deletions(-) diff --git a/NEWS b/NEWS index 5691c96ea..609805130 100644 --- a/NEWS +++ b/NEWS @@ -11,6 +11,8 @@ New in spot 2.11.5.dev (not yet released) - Using --format=... on a tool that output formulas would force the output on standard output, even when --output was given. + - Using "randltl -S" did not correctly go through the SERE printer + functions. New in spot 2.11.5 (2023-04-20) diff --git a/bin/common_output.cc b/bin/common_output.cc index 13b4daf0f..11d0da80e 100644 --- a/bin/common_output.cc +++ b/bin/common_output.cc @@ -45,6 +45,7 @@ output_format_t output_format = spot_output; bool full_parenth = false; bool escape_csv = false; char output_terminator = '\n'; +bool output_ratexp = false; static const argp_option options[] = { @@ -105,7 +106,10 @@ stream_formula(std::ostream& out, report_not_ltl(f, filename, linenum, "LBT"); break; case spot_output: - spot::print_psl(out, f, full_parenth); + if (output_ratexp) + spot::print_sere(out, f, full_parenth); + else + spot::print_psl(out, f, full_parenth); break; case spin_output: if (f.is_ltl_formula()) @@ -120,10 +124,16 @@ stream_formula(std::ostream& out, report_not_ltl(f, filename, linenum, "Wring"); break; case utf8_output: - spot::print_utf8_psl(out, f, full_parenth); + if (output_ratexp) + spot::print_utf8_sere(out, f, full_parenth); + else + spot::print_utf8_psl(out, f, full_parenth); break; case latex_output: - spot::print_latex_psl(out, f, full_parenth); + if (output_ratexp) + spot::print_latex_sere(out, f, full_parenth); + else + spot::print_latex_psl(out, f, full_parenth); break; case count_output: case quiet_output: diff --git a/bin/common_output.hh b/bin/common_output.hh index 1cff67229..1661929b9 100644 --- a/bin/common_output.hh +++ b/bin/common_output.hh @@ -36,6 +36,7 @@ enum output_format_t { spot_output, spin_output, utf8_output, extern output_format_t output_format; extern bool full_parenth; extern bool escape_csv; +extern bool output_ratexp; #define COMMON_X_OUTPUT_SPECS(where) \ "number of atomic propositions " #where "; " \ diff --git a/bin/randltl.cc b/bin/randltl.cc index 749fcf373..6f672f092 100644 --- a/bin/randltl.cc +++ b/bin/randltl.cc @@ -180,6 +180,7 @@ parse_opt(int key, char* arg, struct argp_state* as) break; case 'S': output = spot::randltlgenerator::SERE; + output_ratexp = true; break; case OPT_BOOLEAN_PRIORITIES: opt_pB = arg; diff --git a/tests/core/latex.test b/tests/core/latex.test index 6e94e14d6..61eeab993 100755 --- a/tests/core/latex.test +++ b/tests/core/latex.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2015 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) 2013, 2015, 2023 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -50,7 +50,8 @@ cat <<\EOF EOF ( ltlfilt --latex input --format='\texttt{%F:%L} & $%f$ \\'; genltl --go-theta=1..3 --latex \ - --format='\texttt{--%F:%L} & $%f$ \\') + --format='\texttt{--%F:%L} & $%f$ \\'; + randltl -S -n10 --latex 2 --format='\texttt{random %L} & $%f$ \\') cat <<\EOF \end{tabular} \end{document} diff --git a/tests/core/utf8.test b/tests/core/utf8.test index 45ba950f5..b0bfef043 100755 --- a/tests/core/utf8.test +++ b/tests/core/utf8.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2013, 2015, 2016, 2019 Laboratoire de Recherche et +# Copyright (C) 2012, 2013, 2015, 2016, 2019, 2023 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -70,3 +70,18 @@ ltlfilt -8 -f 'X[!]a' >out diff in out ltlfilt -8 -F in >out diff in out + +randltl --sere -8 --seed 0 --tree-size 8 a b c -n 10 > formulae +cat >expected <3]') +tc.assertEqual(spot.str_sere(pf.f), 'a[->3]') +tc.assertEqual(spot.str_latex_sere(pf.f), 'a\\SereGoto{3}') +pf = spot.parse_infix_sere('(!b)[*];b;(!b)[*]') +tc.assertEqual(spot.str_sere(pf.f), 'b[=1]') +pf = spot.parse_infix_sere('b[=1]') +tc.assertEqual(spot.str_sere(pf.f), 'b[=1]') +tc.assertEqual(spot.str_latex_sere(pf.f), 'b\\SereEqual{1}') +tc.assertEqual(spot.str_sclatex_sere(pf.f), 'b^{=1}') +pf = spot.parse_infix_sere('(!b)[*];b') +tc.assertEqual(spot.str_sere(pf.f), 'b[->]') +pf = spot.parse_infix_sere('b[->1]') +tc.assertEqual(spot.str_latex_sere(pf.f), 'b\\SereGoto{}') +tc.assertEqual(spot.str_sclatex_sere(pf.f), 'b^{\\to}') From de7d5a956fe2ec6968283f5194bb27a6d8ded576 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 28 Jul 2023 16:20:18 +0200 Subject: [PATCH 318/606] * .gitlab-ci.yml: temporary disable raspbian. --- .gitlab-ci.yml | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 348bacba1..424f7bc21 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -458,22 +458,26 @@ publish-unstable: - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=next" https://gitlab.lre.epita.fr/api/v4/projects/131/trigger/pipeline - curl -X POST -F ref=master -F token=$TRIGGER_SANDBOX https://gitlab.lre.epita.fr/api/v4/projects/181/trigger/pipeline -raspbian: - stage: build - only: - - branches - except: - - /wip/ - tags: - - armv7 - script: - - autoreconf -vfi - - ./configure - - make - - make distcheck || { chmod -R u+w ./spot-*; false; } - artifacts: - when: always - paths: - - ./spot-*/_build/sub/tests/*/*.log - - ./*.log - - ./*.tar.gz + +# The SD card of our Raspberry failed. Disable this job until we +# can make it work again. +# +# raspbian: +# stage: build +# only: +# - branches +# except: +# - /wip/ +# tags: +# - armv7 +# script: +# - autoreconf -vfi +# - ./configure +# - make +# - make distcheck || { chmod -R u+w ./spot-*; false; } +# artifacts: +# when: always +# paths: +# - ./spot-*/_build/sub/tests/*/*.log +# - ./*.log +# - ./*.tar.gz From e37bc9e1ae1935f81196c31e62cb8888026f8493 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 31 Jul 2023 21:07:07 +0200 Subject: [PATCH 319/606] [buddy] fix cache index of bdd_forall Fix a 20 year old typo that caused a bug reported by Guillermo Perez. * src/bddop.c (bdd_forall, bdd_forallcomp): Fix the cache index. --- buddy/src/bddop.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/buddy/src/bddop.c b/buddy/src/bddop.c index f54a39442..fd62ed9b8 100644 --- a/buddy/src/bddop.c +++ b/buddy/src/bddop.c @@ -2300,7 +2300,7 @@ RETURN {* The quantified BDD. *} */ BDD bdd_forall(BDD r, BDD var) { - return quantify(r, var, bddop_and, 0, CACHEID_EXIST); + return quantify(r, var, bddop_and, 0, CACHEID_FORALL); } /* @@ -2315,7 +2315,7 @@ RETURN {* The quantified BDD. *} */ BDD bdd_forallcomp(BDD r, BDD var) { - return quantify(r, var, bddop_and, 1, CACHEID_EXISTC); + return quantify(r, var, bddop_and, 1, CACHEID_FORALLC); } From bb95705d5251e97af646ada29b86cd986507a638 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 31 Jul 2023 21:11:39 +0200 Subject: [PATCH 320/606] mention the bug fixed in BuDDy * NEWS: Explain the bug fixed in previous patch and reported in issue #535. * THANKS: Add Guillermo. --- NEWS | 5 +++++ THANKS | 1 + 2 files changed, 6 insertions(+) diff --git a/NEWS b/NEWS index 609805130..888a05da4 100644 --- a/NEWS +++ b/NEWS @@ -14,6 +14,11 @@ New in spot 2.11.5.dev (not yet released) - Using "randltl -S" did not correctly go through the SERE printer functions. + - Our copy of BuDDy's bdd_forall() had a 20 year old typo that + caused cache entries from bdd_exist() and bdd_forall() to be + mixed. Spot was safe from this bug because it was only using + bdd_exist(). (Issue #535) + New in spot 2.11.5 (2023-04-20) Bug fixes: diff --git a/THANKS b/THANKS index 93155f9d1..7986c3875 100644 --- a/THANKS +++ b/THANKS @@ -21,6 +21,7 @@ Felix Klaedtke Florian Perlié-Long František Blahoudek Gerard J. Holzmann +Guillermo A. Perez Hashim Ali Heikki Tauriainen Henrich Lauko From f4b397a2bfbcf171e6b2ef9c663b34f92d122616 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 1 Aug 2023 12:19:47 +0200 Subject: [PATCH 321/606] Release Spot 2.11.6 * NEWS, configure.ac, doc/org/setup.org: Update version. --- NEWS | 6 +++--- configure.ac | 2 +- doc/org/setup.org | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/NEWS b/NEWS index 888a05da4..986e1be91 100644 --- a/NEWS +++ b/NEWS @@ -1,6 +1,4 @@ -New in spot 2.11.5.dev (not yet released) - - Nothing yet. +New in spot 2.11.6 (2023-08-01) Bug fixes: @@ -19,6 +17,8 @@ New in spot 2.11.5.dev (not yet released) mixed. Spot was safe from this bug because it was only using bdd_exist(). (Issue #535) + - Work around recent Pandas and GCC changes. + New in spot 2.11.5 (2023-04-20) Bug fixes: diff --git a/configure.ac b/configure.ac index 09fe45364..9bfa98eac 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.11.5.dev], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.11.6], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) diff --git a/doc/org/setup.org b/doc/org/setup.org index 255a01c3d..772ff2cc0 100644 --- a/doc/org/setup.org +++ b/doc/org/setup.org @@ -1,11 +1,11 @@ #+OPTIONS: H:2 num:nil toc:t html-postamble:nil ^:nil #+EMAIL: spot@lrde.epita.fr #+HTML_LINK_HOME: index.html -#+MACRO: LASTDATE 2023-04-20 +#+MACRO: LASTDATE 2023-08-01 #+NAME: SPOT_VERSION #+BEGIN_SRC python :exports none :results value :wrap org -return "2.11.5" +return "2.11.6" #+END_SRC #+NAME: TARBALL_LINK From 41751b80a175e5110dae966a4c1399e87b94931c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 1 Aug 2023 14:22:32 +0200 Subject: [PATCH 322/606] * NEWS, configure.ac: Bump version to 2.11.6.dev. --- NEWS | 4 ++++ configure.ac | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 986e1be91..455c4825b 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,7 @@ +New in spot 2.11.6.dev (not yet released) + + Nothing yet. + New in spot 2.11.6 (2023-08-01) Bug fixes: diff --git a/configure.ac b/configure.ac index 9bfa98eac..41261d84c 100644 --- a/configure.ac +++ b/configure.ac @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.11.6], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.11.6.dev], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) From 14347cdc52f7da6c141711b20665a9d2be62a078 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 3 Aug 2023 11:49:08 +0200 Subject: [PATCH 323/606] * tests/sanity/style.test: Don't use egrep. --- tests/sanity/style.test | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/sanity/style.test b/tests/sanity/style.test index 325ebe78d..064079f37 100755 --- a/tests/sanity/style.test +++ b/tests/sanity/style.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009-2022 Laboratoire de Recherche et Développement de +# Copyright (C) 2009-2023 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 # (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -267,23 +267,23 @@ for dir in "$TOP/spot" "$TOP/bin" "$TOP/tests"; do # std::list::size() can be O(n). Better use empty() whenever # possible, even for other containers. - e$GREP '(->|[.])size\(\) [=!]= 0|![a-zA-Z0-9_]*(->|[.])size\(\)|(if |while |assert)\([a-zA-Z0-9_]*(->|[.])size\(\)\)' $tmp && + $GREP -E '(->|[.])size\(\) [=!]= 0|![a-zA-Z0-9_]*(->|[.])size\(\)|(if |while |assert)\([a-zA-Z0-9_]*(->|[.])size\(\)\)' $tmp && diag 'Prefer empty() to check emptiness.' $GREP -E 'std::cerr.*<<.*endl' $tmp && diag 'std::cerr has unitbuf set; use \\n instead of endl' - e$GREP 'assert\((0|!".*")\)' $tmp && + $GREP -E 'assert\((0|!".*")\)' $tmp && diag 'Prefer SPOT_UNREACHABLE or SPOT_UNIMPLEMENTED.' - e$GREP '^[^=*<]*([+][+]|--);' $tmp && + $GREP -E '^[^=*<]*([+][+]|--);' $tmp && diag 'Take good habits: use ++i instead of i++ when you have the choice.' $GREP '[^a-zA-Z0-9_](\*[a-zA-Z0-9_]*)\.' $tmp && diag 'Use "x->y", not "(*x).y"' # we allow these functions only in ?...:... - e$GREP 'bdd_(false|true)[ ]*\(' $tmp | $GREP -v '[?:]' && + $GREP -E 'bdd_(false|true)[ ]*\(' $tmp | $GREP -v '[?:]' && diag 'Use bddfalse and bddtrue instead of bdd_false() and bdd_true()' res=`perl -ne '$/ = undef; @@ -300,7 +300,7 @@ for dir in "$TOP/spot" "$TOP/bin" "$TOP/tests"; do case $file in *.hh | *.hxx) - if e$GREP '(<<|>>)' $tmp >/dev/null; then + if $GREP -E '(<<|>>)' $tmp >/dev/null; then : else $GREP '#.*include.*' $tmp && @@ -338,7 +338,7 @@ for dir in "$TOP/spot" "$TOP/bin" "$TOP/tests"; do $GREP '^[ ]*class[ ]' $tmp && diag 'Private definitions must be in anonymous namespace.' fi - e$GREP ' ' $tmp && + $GREP ' ' $tmp && diag 'Use spaces instead of tabs.' case $file in */bin/*) ;; From 18478e663fa4e07294750a47141890de6a434b1a Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 28 Aug 2023 15:42:19 +0200 Subject: [PATCH 324/606] relabel: introduce an overlapping relabeling version Related to issue #500 and issue #536. * spot/tl/relabel.hh (relabel_overlapping_bse): New function. * spot/tl/relabel.cc: Implement it. * bin/ltlfilt.cc: Add a --relabel-overlapping-bool option. * tests/core/ltlfilt.test: Test it. * NEWS: Mention it. --- NEWS | 8 ++++ bin/ltlfilt.cc | 51 +++++++++++++++----- spot/tl/relabel.cc | 103 +++++++++++++++++++++++++++------------- spot/tl/relabel.hh | 23 ++++++++- tests/core/ltlfilt.test | 70 ++++++++++++++++++++++++++- 5 files changed, 207 insertions(+), 48 deletions(-) diff --git a/NEWS b/NEWS index 99b1f1f40..af455e012 100644 --- a/NEWS +++ b/NEWS @@ -12,6 +12,10 @@ New in spot 2.11.6.dev (not yet released) autfilt input.hoa -o output-%l.hoa + - ltlfilt has a new option --relabel-overlapping-bool=abc|pnn that + will replace boolean subformulas by fresh atomic propositions even + if those subformulas share atomic propositions. + Library: - The following new trivial simplifications have been implemented for SEREs: @@ -29,6 +33,10 @@ New in spot 2.11.6.dev (not yet released) - spot::bdd_to_cnf_formula() is a new variant of spot::bdd_to_formula() that converts a BDD into a CNF instead of a DNF. + - spot::relabel_overlapping_bse() is a new function that will + replace boolean subformulas by fresh atomic propositions even if + those subformulas share atomic propositions. + New in spot 2.11.6 (2023-08-01) Bug fixes: diff --git a/bin/ltlfilt.cc b/bin/ltlfilt.cc index ed2f2d08d..5ed7a2fe1 100644 --- a/bin/ltlfilt.cc +++ b/bin/ltlfilt.cc @@ -94,6 +94,7 @@ enum { OPT_REJECT_WORD, OPT_RELABEL, OPT_RELABEL_BOOL, + OPT_RELABEL_OVERLAP, OPT_REMOVE_WM, OPT_REMOVE_X, OPT_SAFETY, @@ -139,8 +140,12 @@ static const argp_option options[] = "relabel all atomic propositions, alphabetically unless " \ "specified otherwise", 0 }, { "relabel-bool", OPT_RELABEL_BOOL, "abc|pnn", OPTION_ARG_OPTIONAL, - "relabel Boolean subexpressions, alphabetically unless " \ - "specified otherwise", 0 }, + "relabel Boolean subexpressions that do not share atomic propositions," + " relabel alphabetically unless specified otherwise", 0 }, + { "relabel-overlapping-bool", OPT_RELABEL_OVERLAP, "abc|pnn", + OPTION_ARG_OPTIONAL, + "relabel Boolean subexpressions even if they share atomic propositions," + " relabel alphabetically unless specified otherwise", 0 }, { "define", OPT_DEFINE, "FILENAME", OPTION_ARG_OPTIONAL, "when used with --relabel or --relabel-bool, output the relabeling map " "using #define statements", 0 }, @@ -316,7 +321,10 @@ static bool recurrence = false; static bool persistence = false; static range size = { -1, -1 }; static range bsize = { -1, -1 }; -enum relabeling_mode { NoRelabeling = 0, ApRelabeling, BseRelabeling }; +enum relabeling_mode { NoRelabeling = 0, + ApRelabeling, + BseRelabeling, + OverlappingRelabeling }; static relabeling_mode relabeling = NoRelabeling; static spot::relabeling_style style = spot::Abc; static bool remove_x = false; @@ -358,6 +366,19 @@ parse_formula_arg(const std::string& input) return pf.f; } +static void +parse_relabeling_style(const char* arg, const char* optname) +{ + if (!arg || !strncasecmp(arg, "abc", 6)) + style = spot::Abc; + else if (!strncasecmp(arg, "pnn", 4)) + style = spot::Pnn; + else + error(2, 0, "invalid argument for --relabel%s: '%s'\n" + "expecting 'abc' or 'pnn'", optname, arg); +} + + static int parse_opt(int key, char* arg, struct argp_state*) { @@ -500,16 +521,16 @@ parse_opt(int key, char* arg, struct argp_state*) } break; case OPT_RELABEL: + relabeling = ApRelabeling; + parse_relabeling_style(arg, ""); + break; case OPT_RELABEL_BOOL: - relabeling = (key == OPT_RELABEL_BOOL ? BseRelabeling : ApRelabeling); - if (!arg || !strncasecmp(arg, "abc", 6)) - style = spot::Abc; - else if (!strncasecmp(arg, "pnn", 4)) - style = spot::Pnn; - else - error(2, 0, "invalid argument for --relabel%s: '%s'", - (key == OPT_RELABEL_BOOL ? "-bool" : ""), - arg); + relabeling = BseRelabeling; + parse_relabeling_style(arg, "-bool"); + break; + case OPT_RELABEL_OVERLAP: + relabeling = OverlappingRelabeling; + parse_relabeling_style(arg, "-overlapping-bool"); break; case OPT_REMOVE_WM: unabbreviate += "MW"; @@ -701,6 +722,12 @@ namespace f = spot::relabel_bse(f, style, &relmap); break; } + case OverlappingRelabeling: + { + relmap.clear(); + f = spot::relabel_overlapping_bse(f, style, &relmap); + break; + } case NoRelabeling: break; } diff --git a/spot/tl/relabel.cc b/spot/tl/relabel.cc index 26c7564c1..f57c0c919 100644 --- a/spot/tl/relabel.cc +++ b/spot/tl/relabel.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2016, 2018-2020, 2022 Laboratoire de Recherche et +// Copyright (C) 2012-2016, 2018-2020, 2022, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -80,7 +80,9 @@ namespace spot } }; - + // if subexp == false, matches APs + // if subexp == true, matches boolean subexps + template class relabeler { public: @@ -101,54 +103,89 @@ namespace spot formula rename(formula old) { + if constexpr (subexp) + { + // have we given a name to the negation of this formula? + auto neg = newname.find(formula::Not(old)); + if (neg != newname.end()) + return formula::Not(neg->second); + } + auto r = newname.emplace(old, nullptr); if (!r.second) - { - return r.first->second; - } - else - { - formula res = gen->next(); - r.first->second = res; - if (oldnames) - (*oldnames)[res] = old; - return res; - } + return r.first->second; + + formula res = gen->next(); + r.first->second = res; + if (oldnames) + (*oldnames)[res] = old; + return res; } formula visit(formula f) { - if (f.is(op::ap)) - return rename(f); + if ((!subexp && f.is(op::ap)) + || (subexp && f.is_boolean())) + { + return rename(f); + } + if (subexp && f.is(op::Or, op::And) && f[0].is_boolean()) + { + // Boolean terms are always beginning of And and Or, so + // the above test capture Or/And that some Boolean arguments + // and some non-Boolean arguments. + unsigned i = 0; + formula b = f.boolean_operands(&i); + unsigned sz = f.size(); + std::vector res; + res.reserve(sz - i + 1); + res.emplace_back(visit(b)); + for (; i < sz; ++i) + res.emplace_back(visit(f[i])); + return formula::multop(f.kind(), res); + } else - return f.map([this](formula f) - { - return this->visit(f); - }); + { + return f.map([this](formula f) + { + return this->visit(f); + }); + } } }; - } + template + formula + relabel_do(formula f, relabeling_style style, relabeling_map* m) + { + ap_generator* gen = nullptr; + switch (style) + { + case Pnn: + gen = new pnn_generator; + break; + case Abc: + gen = new abc_generator; + break; + } + relabeler r(gen, m); + return r.visit(f); + } + } formula relabel(formula f, relabeling_style style, relabeling_map* m) { - ap_generator* gen = nullptr; - switch (style) - { - case Pnn: - gen = new pnn_generator; - break; - case Abc: - gen = new abc_generator; - break; - } + return relabel_do(f, style, m); + } - relabeler r(gen, m); - return r.visit(f); + formula + relabel_overlapping_bse(formula f, relabeling_style style, relabeling_map* m) + { + return relabel_do(f, style, m); } namespace @@ -502,7 +539,7 @@ namespace spot } - class bse_relabeler final: public relabeler + class bse_relabeler final: public relabeler { public: const fset& c; diff --git a/spot/tl/relabel.hh b/spot/tl/relabel.hh index 384d1d43f..59efdf94b 100644 --- a/spot/tl/relabel.hh +++ b/spot/tl/relabel.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2015, 2019 Laboratoire de Recherche et +// Copyright (C) 2012, 2013, 2015, 2019, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -34,6 +34,9 @@ namespace spot /// /// If \a m is non-null, it is filled with correspondence /// between the new names (keys) and the old names (values). + /// + /// \see relabel_bse + /// \see relabel_overlaping_bse SPOT_API formula relabel(formula f, relabeling_style style, relabeling_map* m = nullptr); @@ -45,9 +48,27 @@ namespace spot /// /// If \a m is non-null, it is filled with correspondence /// between the new names (keys) and the old names (values). + /// + /// The relabel_overlapping_bse() will introduce a new atomic + /// proposition for each maximal Boolean subexpression encountered, + /// even if they overlap (i.e., share common atomic + /// propositions). For instance `(a & b & c) U (c & d & e)` will be + /// simply be relabeled as `p0 U p1`. This kind of renaming to not + /// preserves the + /// + /// The relabel_bse() version will make sure that the replaced + /// subexpressions do not share atomic propositions. For instance + /// `(a & b & c) U (c & d & e)` will be simply be relabeled as + /// `(p0 & p1) U (p1 & p2)`, were `p1` replaces `c` and the rest + /// is obvious. + /// + /// @{ SPOT_API formula relabel_bse(formula f, relabeling_style style, relabeling_map* m = nullptr); + SPOT_API formula + relabel_overlapping_bse(formula f, relabeling_style style, relabeling_map* m); + // @} /// \ingroup tl_rewriting /// \brief Replace atomic propositions of \a f by subformulas diff --git a/tests/core/ltlfilt.test b/tests/core/ltlfilt.test index 43d50ce06..192a60fef 100755 --- a/tests/core/ltlfilt.test +++ b/tests/core/ltlfilt.test @@ -1,7 +1,7 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013-2020, 2022 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) 2013-2020, 2022, 2023 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -361,6 +361,8 @@ G(d & e) | FG(Xf| !c) | h | i b & !Xc & e & (f | g) b & GF(a | c) & !GF!(a | c) F(a <-> b) -> (c xor d) +(a & b & c) U (c & d & e) +(a & b & c) U !(a & b & c) EOF cat >exp <out @@ -393,6 +397,14 @@ p0 || []p1 || <>[](p2 || Xp3) #define p0 ((c && !d) || (!c && d)) #define p1 ((a && !b) || (!a && b)) p0 || []p1 +#define p0 (a && b) +#define p1 (c) +#define p2 (d && e) +(p0 && p1) U (p1 && p2) +#define p0 (a) +#define p1 (b) +#define p2 (c) +(p0 && p1 && p2) U (!p0 || !p1 || !p2) EOF run 0 ltlfilt -s -u --nnf --relabel-bool=pnn --define in >out @@ -433,11 +445,53 @@ p0 && []<>(p1 || p2) && ![]<>!(p1 || p2) #define p2 (c) #define p3 (d) <>(p0 <-> p1) -> !(p2 <-> p3) +#define p0 (a) +#define p1 (b) +#define p2 (c) +#define p3 (d) +#define p4 (e) +(p0 && p1 && p2) U (p2 && p3 && p4) +#define p0 (a) +#define p1 (b) +#define p2 (c) +(p0 && p1 && p2) U !(p0 && p1 && p2) EOF run 0 ltlfilt -s -u --relabel=pnn --define in >out diff exp out +cat >exp < b) +#define p1 (c xor d) +Fp0 -> p1 +#define p0 (a & b & c) +#define p1 (c & d & e) +p0 U p1 +#define p0 (a & b & c) +p0 U !p0 +EOF + +run 0 ltlfilt -u --relabel-over=pnn --define in >out +diff exp out + + toolong='((p2=0) * (p3=1))' # work around the 80-col check cat >exp <exp <out diff exp out @@ -484,6 +548,8 @@ h | i | G(d & e) | FG(!c | Xf)@ b & e & (f | g) & !Xc@ b & GF(a | c) & !GF!(a | c)@ F(a <-> b) -> (c xor d)@ +(a & b & c) U (c & d & e)@ +(a & b & c) U !(a & b & c)@ EOF diff exp out From 110b052b7d9ba1cc198af7b3a827cef2adf9564c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 28 Aug 2023 22:45:44 +0200 Subject: [PATCH 325/606] translate: add a new relabel-overlap option Fixes issue #536. Also a part of issue #500. * spot/twaalgos/translate.hh, spot/twaalgos/translate.cc: Implement this new option. * bin/spot-x.cc, NEWS: Mention it. * tests/core/ltl2tgba2.test: Add the test case from issue #536. --- NEWS | 23 ++++++++++++++++ bin/spot-x.cc | 11 +++++++- spot/twaalgos/translate.cc | 54 +++++++++++++++++++++++++++++++------- spot/twaalgos/translate.hh | 5 ++-- tests/core/ltl2tgba2.test | 13 ++++++++- 5 files changed, 92 insertions(+), 14 deletions(-) diff --git a/NEWS b/NEWS index af455e012..251f5feb3 100644 --- a/NEWS +++ b/NEWS @@ -37,6 +37,29 @@ New in spot 2.11.6.dev (not yet released) replace boolean subformulas by fresh atomic propositions even if those subformulas share atomic propositions. + - spot::translate() has a new -x option "relabel-overlap=M" that + augments the existing "relabel-bool=N". By default, N=4, M=8. + When the formula to translate has more than N atomic propositions, + relabel_bse() is first called to attempt to rename non-overlaping + boolean subexpressions (i.e., no shared atomic proposition) in + order to reduce the number of atomic proposition, a source of + explonential explosion in several places of the translation + pipeline. This relabel-bool optimization exists since Spot 2.4. + The new feature is that if, after relabel-bool, the formula still + has more than M atomic propositions, then spot::translate() now + attempts to relabel boolean subexpressions even if they have + overlapping atomic propositions, in an attempt to reduce the + number of atomic proposition even more. Doing so has the slightly + unfortunate side effect of hindering some simplifications (because + the new atomic propositions hide their interactions), but it + usually incures a large speedup. (See Issue #500, Issue #536.) + + For instance on Alexandre's laptop, running + 'ltlsynt --tlsf SPIReadManag.tlsf --aiger' + with Spot 2.11.6 used to produce an AIG circuit with 48 nodes in + 36 seconds; it now produce an AIG circuit with 53 nodes in only + 0.1 second. + New in spot 2.11.6 (2023-08-01) Bug fixes: diff --git a/bin/spot-x.cc b/bin/spot-x.cc index 964710dc1..19721daeb 100644 --- a/bin/spot-x.cc +++ b/bin/spot-x.cc @@ -196,7 +196,16 @@ disabled, it is just an upper bound otherwise.") }, with N atomic propositions or more will have its Boolean subformulas \ abstracted as atomic propositions during the translation to automaton. \ This relabeling can speeds the translation if a few Boolean subformulas \ -use a large number of atomic propositions. By default N=4. Setting \ +use a large number of atomic propositions. This relabeling make sure \ +the subexpression that are replaced do not share atomic propositions. \ +By default N=4. Setting this value to 0 will disable the rewriting.") }, + { DOC("relabel-overlap", "If set to a positive integer N, a formula \ +with N atomic propositions or more will have its Boolean subformulas \ +abstracted as atomic propositions during the translation to automaton. \ +This version does not care about overlapping atomic propositions, so \ +it can cause the created temporary automata to have incompatible \ +combinations of atomic propositions that will be eventually be removed. \ +This relabeling is attempted after relabel-bool. By default N=8. Setting \ this value to 0 will disable the rewriting.") }, { DOC("wdba-minimize", "Set to 0 to disable WDBA-minimization, to 1 to \ always try it, or 2 to attempt it only on syntactic obligations or on automata \ diff --git a/spot/twaalgos/translate.cc b/spot/twaalgos/translate.cc index 339463426..7ad57347f 100644 --- a/spot/twaalgos/translate.cc +++ b/spot/twaalgos/translate.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018, 2020-2022 Laboratoire de Recherche et +// Copyright (C) 2013-2018, 2020-2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -38,6 +38,7 @@ namespace spot { comp_susp_ = early_susp_ = skel_wdba_ = skel_simul_ = 0; relabel_bool_ = 4; + relabel_overlap_ = 8; tls_impl_ = -1; ltl_split_ = true; exprop_ = -1; @@ -47,6 +48,7 @@ namespace spot return; relabel_bool_ = opt->get("relabel-bool", 4); + relabel_overlap_ = opt->get("relabel-overlap", 6); comp_susp_ = opt->get("comp-susp", 0); if (comp_susp_ == 1) { @@ -480,15 +482,23 @@ namespace spot // 2) has some Boolean subformula // 3) relabel_bse() actually reduces the number of atomic // propositions. + // + // If the formula still has more than relabel_overlap_ APs after + // the above, we try the more aggressive relabel_overlapping_bse() + // function. However after applying this function, we might have + // false edges. relabeling_map m; formula to_work_on = *f; - if (relabel_bool_ > 0) + if (relabel_bool_ > 0 || relabel_overlap_ > 0) { std::set aps; atomic_prop_collect(to_work_on, &aps); unsigned atomic_props = aps.size(); - if (atomic_props >= (unsigned) relabel_bool_) + if ((relabel_bool_ + && atomic_props >= (unsigned) relabel_bool_) + || (relabel_overlap_ + && atomic_props >= (unsigned) relabel_overlap_)) { // Make a very quick simplification path before for // Boolean subformulas, only only syntactic rules. This @@ -507,14 +517,15 @@ namespace spot options.nenoform_stop_on_boolean = true; options.boolean_to_isop = false; tl_simplifier simpl(options, simpl_->get_dict()); - to_work_on = simpl.simplify(to_work_on); + formula simplified = to_work_on = simpl.simplify(to_work_on); // Do we have Boolean subformulas that are not atomic // propositions? bool has_boolean_sub = false; to_work_on.traverse([&](const formula& f) { - if (f.is_boolean()) + if (f.is_boolean() + && !f.is(op::ap, op::Not)) { has_boolean_sub = true; return true; @@ -524,11 +535,34 @@ namespace spot if (has_boolean_sub) { - formula relabeled = relabel_bse(to_work_on, Pnn, &m); - if (m.size() < atomic_props) - to_work_on = relabeled; - else - m.clear(); + if (relabel_bool_ + && atomic_props >= (unsigned) relabel_bool_) + { + formula relabeled = relabel_bse(to_work_on, Pnn, &m); + if (m.size() < atomic_props) + { + atomic_props = m.size(); + to_work_on = relabeled; + } + else + { + m.clear(); + } + } + if (relabel_overlap_ + && atomic_props >= (unsigned) relabel_overlap_) + { + relabeling_map m2; + formula relabeled = + relabel_overlapping_bse(simplified, Pnn, &m2); + if (m2.size() < atomic_props) + { + atomic_props = m2.size(); + to_work_on = relabeled; + std::swap(m, m2); + } + m2.clear(); + } } } } diff --git a/spot/twaalgos/translate.hh b/spot/twaalgos/translate.hh index 8428a2f22..4e534b1f7 100644 --- a/spot/twaalgos/translate.hh +++ b/spot/twaalgos/translate.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018, 2020, 2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2013-2018, 2020, 2022, 2023 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -150,6 +150,7 @@ namespace spot int skel_wdba_; int skel_simul_; int relabel_bool_; + int relabel_overlap_; int tls_impl_; bool gf_guarantee_ = true; bool gf_guarantee_set_ = false; diff --git a/tests/core/ltl2tgba2.test b/tests/core/ltl2tgba2.test index 8397bbc85..6e64e7081 100755 --- a/tests/core/ltl2tgba2.test +++ b/tests/core/ltl2tgba2.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009-2022 Laboratoire de Recherche et Développement de +# Copyright (C) 2009-2023 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -516,3 +516,14 @@ f='G((a -> X((!a U b) | G!a)) & (a -> X(G!a | (!a U c))) & (a -> X(G!a (a -> X(G!a | (!a U g))) & (a -> X(G!a | (!a U h)))) & (GFa <-> (GFb & GFc & GFd & GFe & GFf & GFg & GFh))' test 128 = `ltl2tgba -G -D "$f" --stats=%s` + +# This used to die from out of memory after 5 minutes. See issue #536. +f='(TRUE & (G F ~v21)& (( G F v39 -> G F v23))) -> (TRUE & +(G F (v1 -> (v41 & v29)))& (G F (v3 -> (v42 & v29)))& (G F (v5 +-> (v43 & v29)))& (G F (v7 -> (v44 & v29)))& (G F (v9 -> (v45 & +v29)))& (G F (v11 -> (v46 & v29)))& (G F (v13 -> (v47 & v29)))& +(G F (v15 -> (v48 & v29)))& (G F (v17 -> (v49 & v29)))& (G F (v19 +-> (v50 & v29)))& (G F (v41 | (v1 | (v3 | (v5 | (v7 | (v9 | +(v11 | (v13 | (v15 | (v17 | v19))))))))))))' +ltl2tgba -p'min even' -D -C "$f" --stats='%s %e'>out +test '22 288' = "`cat out`" From e2149fabf443391ad2f776cef458e5e6407ddc52 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 4 Sep 2023 17:51:26 +0200 Subject: [PATCH 326/606] determinize: work around overflow in reachability matrix indices Fixes #541, reported by David Dokoupil. * spot/twaalgos/determinize.cc: Disable use_simulation when the input has more than 2^16 SCCs.. Also rework the reachability matrix to store only its lower half triangle. * spot/twaalgos/determinize.hh, NEWS: Mention the limitation of use_simulation. * THANKS: Add David. --- NEWS | 8 +++ THANKS | 1 + spot/twaalgos/determinize.cc | 133 ++++++++++++++++++++--------------- spot/twaalgos/determinize.hh | 8 ++- 4 files changed, 91 insertions(+), 59 deletions(-) diff --git a/NEWS b/NEWS index 251f5feb3..d2c5ce5cb 100644 --- a/NEWS +++ b/NEWS @@ -60,6 +60,14 @@ New in spot 2.11.6.dev (not yet released) 36 seconds; it now produce an AIG circuit with 53 nodes in only 0.1 second. + Bugs fixed: + + - tgba_determinize()'s use_simulation option would cause it to + segfault on automata with more than 2^16 SCCs, due to overflows in + computations of indices in the reachability matrix for SCCs. + (Issue #541.) This has been fixed by disabled the use_simulation + optimization in this case. + New in spot 2.11.6 (2023-08-01) Bug fixes: diff --git a/THANKS b/THANKS index 7986c3875..25fac900b 100644 --- a/THANKS +++ b/THANKS @@ -10,6 +10,7 @@ Caroline Lemieux Christian Dax Christopher Ziegler Clément Tamines +David Dokoupil David Müller Dávid Smolka Edmond Irani Liu diff --git a/spot/twaalgos/determinize.cc b/spot/twaalgos/determinize.cc index d2d35a824..2bc84cd6a 100644 --- a/spot/twaalgos/determinize.cc +++ b/spot/twaalgos/determinize.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2022 Laboratoire de Recherche et +// Copyright (C) 2015-2023 Laboratoire de Recherche et // Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -820,30 +820,44 @@ namespace spot return nodes_ == other.nodes_ && braces_ == other.braces_; } - // res[i + scccount*j] = 1 iff SCC i is reachable from SCC j - std::vector - find_scc_paths(const scc_info& scc) + namespace { - unsigned scccount = scc.scc_count(); - std::vector res(scccount * scccount, 0); - for (unsigned i = 0; i != scccount; ++i) - res[i + scccount * i] = 1; - for (unsigned i = 0; i != scccount; ++i) + class reachability_matrix final + { + // Store a lower triangular matrix. + // (j can reach i) <=> (i<=j and m[j*(j+1)/2 + i]==1) + std::vector m; + public: + reachability_matrix(const scc_info& scc) { - unsigned ibase = i * scccount; - for (unsigned d: scc.succ(i)) + unsigned scccount = scc.scc_count(); + m.resize(scccount * (scccount + 1) / 2, 0); + for (unsigned i = 0; i < scccount; ++i) + m[(i * (i + 1) / 2) + i] = 1; + for (unsigned i = 1; i < scccount; ++i) { - // we necessarily have d < i because of the way SCCs are - // numbered, so we can build the transitive closure by - // just ORing any SCC reachable from d. - unsigned dbase = d * scccount; - for (unsigned j = 0; j != scccount; ++j) - res[ibase + j] |= res[dbase + j]; + unsigned ibase = i * (i + 1) / 2; + for (unsigned d: scc.succ(i)) + { + // we necessarily have d < i because of the way SCCs are + // numbered, so we can build the transitive closure by + // just ORing any SCC reachable from d. + unsigned dbase = d * (d + 1) / 2; + for (unsigned j = 0; j <= d; ++j) + m[ibase + j] |= m[dbase + j]; + } } } - return res; + + bool operator()(unsigned j, unsigned i) const + { + return i <= j && m[(j * (j + 1) / 2) + i]; + } + }; + } + twa_graph_ptr tgba_determinize(const const_twa_graph_ptr& a, bool pretty_print, bool use_scc, @@ -882,49 +896,56 @@ namespace spot scc_opt = scc_info_options::TRACK_SUCCS | scc_info_options::TRACK_STATES; scc_info scc = scc_info(aut, scc_opt); + // If we have too many SCCs, disable simulation-based checks, as + // computations to index the matrix would overflow. (Issue #541.) + if (scc.scc_count() >= (1 << 16)) + { + use_simulation = false; + implications.clear(); + } + // If use_simulation is false, implications is empty, so nothing is built std::vector> implies( implications.size(), std::vector(implications.size(), 0)); - { - std::vector is_connected = find_scc_paths(scc); - unsigned sccs = scc.scc_count(); - bool something_implies_something = false; - for (unsigned i = 0; i != implications.size(); ++i) - { - // NB spot::simulation() does not remove unreachable states, as it - // would invalidate the contents of 'implications'. - // so we need to explicitly test for unreachable states - // FIXME: based on the scc_info, we could remove the unreachable - // states, both in the input automaton and in 'implications' - // to reduce the size of 'implies'. - if (!scc.reachable_state(i)) - continue; - unsigned scc_of_i = scc.scc_of(i); - bool i_implies_something = false; - for (unsigned j = 0; j != implications.size(); ++j) - { - if (!scc.reachable_state(j)) - continue; - - bool i_implies_j = !is_connected[sccs * scc.scc_of(j) + scc_of_i] - && bdd_implies(implications[i], implications[j]); - implies[i][j] = i_implies_j; - i_implies_something |= i_implies_j; - } - // Clear useless lines. - if (!i_implies_something) - implies[i].clear(); - else - something_implies_something = true; - } - if (!something_implies_something) - { - implies.clear(); - use_simulation = false; - } - } + if (use_simulation) + { + reachability_matrix scc_can_reach(scc); + bool something_implies_something = false; + for (unsigned i = 0; i != implications.size(); ++i) + { + // NB spot::simulation() does not remove unreachable states, as it + // would invalidate the contents of 'implications'. + // so we need to explicitly test for unreachable states + // FIXME: based on the scc_info, we could remove the unreachable + // states, both in the input automaton and in 'implications' + // to reduce the size of 'implies'. + if (!scc.reachable_state(i)) + continue; + unsigned scc_of_i = scc.scc_of(i); + bool i_implies_something = false; + for (unsigned j = 0; j != implications.size(); ++j) + { + if (!scc.reachable_state(j)) + continue; + bool i_implies_j = !scc_can_reach(scc.scc_of(j), scc_of_i) + && bdd_implies(implications[i], implications[j]); + implies[i][j] = i_implies_j; + i_implies_something |= i_implies_j; + } + // Clear useless lines. + if (!i_implies_something) + implies[i].clear(); + else + something_implies_something = true; + } + if (!something_implies_something) + { + implies.clear(); + use_simulation = false; + } + } // Compute the support of each state std::vector support(aut->num_states()); diff --git a/spot/twaalgos/determinize.hh b/spot/twaalgos/determinize.hh index b047e8234..aa8196eaa 100644 --- a/spot/twaalgos/determinize.hh +++ b/spot/twaalgos/determinize.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2016, 2019-2021 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) 2015-2016, 2019-2021, 2023 Laboratoire de Recherche +// et Développement de l'Epita. // // This file is part of Spot, a model checking library. // @@ -64,7 +64,9 @@ namespace spot /// /// \param use_simulation whether to simplify the construction based /// on simulation relations between states in - /// the original automaton. + /// the original automaton. This optimization + /// is automatically disabled on automata with + /// more than 2^16 SCCs. /// /// \param use_stutter whether to simplify the construction when the /// input automaton is known to be From 538afeb73be0fa5ad07a4efaf45af42c2b7a0cf8 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 6 Sep 2023 10:30:19 +0200 Subject: [PATCH 327/606] * spot/twaalgos/aiger.hh: Add missing include. --- spot/twaalgos/aiger.hh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/spot/twaalgos/aiger.hh b/spot/twaalgos/aiger.hh index c89ae1ecb..bd0424e8e 100644 --- a/spot/twaalgos/aiger.hh +++ b/spot/twaalgos/aiger.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2020-21 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2020-2021, 2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -30,6 +30,7 @@ #include #include #include +#include // std::none_of #include From cbb981ffd500a8779c0c147bd6a3a264b72c29c7 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 8 Sep 2023 12:02:42 +0200 Subject: [PATCH 328/606] python: add bindings for set of unsigned int Requested by Marek Jankola. * python/spot/impl.i: Add bindings for std::set. * tests/python/powerset.py: New file. * tests/Makefile.am: Add it. * THANKS: Add Marek. --- THANKS | 1 + python/spot/impl.i | 1 + tests/Makefile.am | 1 + tests/python/powerset.py | 31 +++++++++++++++++++++++++++++++ 4 files changed, 34 insertions(+) create mode 100644 tests/python/powerset.py diff --git a/THANKS b/THANKS index 25fac900b..ae3a37f8c 100644 --- a/THANKS +++ b/THANKS @@ -38,6 +38,7 @@ Juan Tzintzun Juraj Major Kristin Y. Rozier Marc Espie +Marek Jankola Martin Dieguez Lodeiro Matthias Heizmann Maxime Bouton diff --git a/python/spot/impl.i b/python/spot/impl.i index f95270e21..cb9318a1e 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -523,6 +523,7 @@ namespace std { %template(vectorint) vector; %template(pair_formula_vectorstring) pair>; %template(atomic_prop_set) set; + %template(setunsigned) set; %template(relabeling_map) map; } diff --git a/tests/Makefile.am b/tests/Makefile.am index 7e8a42347..db70e8810 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -441,6 +441,7 @@ TESTS_python = \ python/_partitioned_relabel.ipynb \ python/parity.py \ python/pdegen.py \ + python/powerset.py \ python/prodexpt.py \ python/_product_weak.ipynb \ python/_product_susp.ipynb \ diff --git a/tests/python/powerset.py b/tests/python/powerset.py new file mode 100644 index 000000000..91575c263 --- /dev/null +++ b/tests/python/powerset.py @@ -0,0 +1,31 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (C) 2023 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import spot +import spot.gen as gen +from unittest import TestCase +tc = TestCase() + +# Make sure we can iterate on the states of a power_map. +a = gen.aut_pattern(gen.AUT_CYCLIST_TRACE_NBA, 1) +p = spot.power_map() +d = spot.tgba_powerset(a, p) +tc.assertEqual(p.states_of(0), (0,)) +tc.assertEqual(p.states_of(1), (0,1)) +tc.assertEqual(p.states_of(2), (0,2)) From 7149521f4899b6d5dcaba5fb9aa8af9c6215dd51 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 13 Sep 2023 11:31:49 +0200 Subject: [PATCH 329/606] relabel_bse: rework to simplify more patterns Rework the way we compute and use cut-points to catch more patterns we can rewrite. Also Use BDDs to check if a Boolean sub-expression is false or true. Fixes issue #540. * spot/tl/relabel.hh: Update documentation * spot/tl/relabel.cc (relabel_bse): Rework. * tests/core/ltlfilt.test: Add more test cases. * tests/python/_mealy.ipynb: Update. * NEWS: Mention the change. --- NEWS | 5 + spot/tl/relabel.cc | 214 +++++++++++++++++++++----------------- spot/tl/relabel.hh | 8 +- tests/core/ltlfilt.test | 125 +++++++++++++++++++++- tests/python/_mealy.ipynb | 112 ++++++++++---------- 5 files changed, 305 insertions(+), 159 deletions(-) diff --git a/NEWS b/NEWS index d2c5ce5cb..8f147ad35 100644 --- a/NEWS +++ b/NEWS @@ -33,6 +33,11 @@ New in spot 2.11.6.dev (not yet released) - spot::bdd_to_cnf_formula() is a new variant of spot::bdd_to_formula() that converts a BDD into a CNF instead of a DNF. + - spot::relabel_bse() has been improved to better deal with more + cases. For instance '(a & b & c) U (!c & d & e)' is now + correctly reduced as '(p0 & p1) U (!p1 & p2)', and + '((a & !b) | (a->b)) U c' now becomes '1 U c'. (Issue #540.) + - spot::relabel_overlapping_bse() is a new function that will replace boolean subformulas by fresh atomic propositions even if those subformulas share atomic propositions. diff --git a/spot/tl/relabel.cc b/spot/tl/relabel.cc index f57c0c919..7fe94842c 100644 --- a/spot/tl/relabel.cc +++ b/spot/tl/relabel.cc @@ -19,6 +19,7 @@ #include "config.h" #include +#include #include #include #include @@ -82,7 +83,7 @@ namespace spot // if subexp == false, matches APs // if subexp == true, matches boolean subexps - template + template class relabeler { public: @@ -90,6 +91,7 @@ namespace spot map newname; ap_generator* gen; relabeling_map* oldnames; + tl_simplifier tl; relabeler(ap_generator* gen, relabeling_map* m) : gen(gen), oldnames(m) @@ -115,6 +117,16 @@ namespace spot if (!r.second) return r.first->second; + if constexpr (use_bdd) + if (!old.is(op::ap)) + { + bdd b = tl.as_bdd(old); + if (b == bddtrue) + return r.first->second = formula::tt(); + if (b == bddfalse) + return r.first->second = formula::ff(); + } + formula res = gen->next(); r.first->second = res; if (oldnames) @@ -269,7 +281,7 @@ namespace spot // Furthermore, because we are already representing LTL formulas // with sharing of identical sub-expressions we can easily rename // a subexpression (such as c&d above) only once. However this - // scheme has two problems: + // scheme (done by relabel_overlapping_bse()) has two problems: // // A. It will not detect inter-dependent Boolean subexpressions. // For instance it will mistakenly relabel "(a & b) U (a & !b)" @@ -278,53 +290,40 @@ namespace spot // B. Because of our n-ary operators, it will fail to // notice that (a & b) is a sub-expression of (a & b & c). // - // The way we compute the subexpressions that can be relabeled is - // by transforming the formula syntax tree into an undirected - // graph, and computing the cut points of this graph. The cut - // points (or articulation points) are the nodes whose removal - // would split the graph in two components. To ensure that a - // Boolean operator is only considered as a cut point if it would - // separate all of its children from the rest of the graph, we - // connect all the children of Boolean operators. + // The way we compute the subexpressions that can be relabeled is by + // transforming the formula syntax tree into an undirected graph, + // and computing the cut-points of this graph. The cut-points (or + // articulation points) are the nodes whose removal would split the + // graph in two components; in our case, we extend this definition to + // also consider the leaves as cut-points. // - // For instance (a & b) U (c & d) has two (Boolean) cut points - // corresponding to the two AND operators: + // For instance ((a|b)&c&d)U(!d&e&f) is represented by + // the following graph, were cut-points are marked with *. // - // (a&b)U(c&d) - // ╱ ╲ - // a&b c&d - // ╱ ╲ ╱ ╲ - // a─────b c─────d + // ((a|b)&c&d)U(!d&e&f) + // ╱ ╲ + // ((a|b)&c&d)* (!d&e&f)* + // ╱ │ ╲ ╱ │ ╲ + // a|b* │ ╲ ! │ ╲ + // ╱ ╲ │ ╲ ╱ │ ╲ + // a* b* c* d e* f* // - // (The root node is also a cut point, but we only consider Boolean - // cut points for relabeling.) + // The relabeling of a formula is done in 3 passes: + // 1. Convert the formula's syntax tree into an undirected graph. + // 2. Compute the (Boolean) cut points of that graph, using the + // Hopcroft-Tarjan algorithm (see below for a reference). + // 3. Recursively scan the formula's tree until we reach + // a (Boolean) cut-point. If all the children of this node + // are cut-points, rename the node with a fresh label. + // If it's a n-ary operator, group all children that are + // and cut-points relabel them as a whole. // - // On the other hand, (a & b) U (b & !c) has only one Boolean - // cut-point which corresponds to the NOT operator: - // - // (a&b)U(b&!c) - // ╱ ╲ - // a&b b&!c - // ╱ ╲ ╱ ╲ - // a─────b────!c - // │ - // c - // - // Note that if the children of a&b and b&c were not connected, - // a&b and b&c would be considered as cut points because they - // separate "a" or "!c" from the rest of the graph. - // - // The relabeling of a formula is therefore done in 3 passes: - // 1. convert the formula's syntax tree into an undirected graph, - // adding links between children of Boolean operators - // 2. compute the (Boolean) cut points of that graph, using the - // Hopcroft-Tarjan algorithm (see below for a reference) - // 3. recursively scan the formula's tree until we reach - // either a (Boolean) cut point or an atomic proposition, and - // replace that node by a fresh atomic proposition. - // - // In the example above (a&b)U(b&!c), the last recursion - // stops on a, b, and !c, producing (p0&p1)U(p1&p2). + // On the above example, when processing the cut-point + // ((a|b)&c&d) we group its children that are cut-points + // (a|b)&c and rename this group as p0. Then d gets + // his own name p1, and when processing (!d&e&f) we group + // e&f because they are both cut-points, are rename them p1. + // The result is (p0 & p1) U (!p1 & p2). // // Problem #B above (handling of n-ary expression) need some // additional tricks. Consider (a&b&c&d) U X(c&d), and assume @@ -343,24 +342,19 @@ namespace spot // Convert the formula's syntax tree into an undirected graph // labeled by subformulas. - class formula_to_fgraph + class formula_to_fgraph final { public: fgraph& g; std::stack s; sub_formula_count_t& subcount; - formula_to_fgraph(fgraph& g, sub_formula_count_t& subcount): - g(g), subcount(subcount) - { - } + formula_to_fgraph(fgraph& g, sub_formula_count_t& subcount) + : g(g), subcount(subcount) + { + } - ~formula_to_fgraph() - { - } - - void - visit(formula f) + void visit(formula f) { { // Connect to parent @@ -411,27 +405,6 @@ namespace spot } for (; i < sz; ++i) visit(f[i]); - if (sz > 1 && f.is_boolean()) - { - // For Boolean nodes, connect all children in a - // loop. This way the node can only be a cut point - // if it separates all children from the reset of - // the graph (not only one). - formula pred = f[0]; - for (i = 1; i < sz; ++i) - { - formula next = f[i]; - // Note that we add an edge in both directions, - // as the cut point algorithm really need undirected - // graphs. (We used to do only one direction, and - // that turned out to be a bug.) - g[pred].emplace_back(next); - g[next].emplace_back(pred); - pred = next; - } - g[pred].emplace_back(f[0]); - g[f[0]].emplace_back(pred); - } done: s.pop(); } @@ -465,11 +438,12 @@ namespace spot // the ACM, 16 (6), June 1973. // // It differs from the original algorithm by returning only the - // Boolean cutpoints, and not dealing with the initial state + // Boolean cut-points, not dealing with the initial state // properly (our initial state will always be considered as a // cut-point, but since we only return Boolean cut-points it's // OK: if the top-most formula is Boolean we want to replace it - // as a whole). + // as a whole), and considering the atomic propositions that + // are leaves as cutpoints too. void cut_points(const fgraph& g, fset& c, formula start) { stack_t s; @@ -530,7 +504,14 @@ namespace spot data_entry& dgrand_parent = data[grand_parent]; if (dparent.low >= dgrand_parent.num // cut-point && grand_parent.is_boolean()) - c.insert(grand_parent); + { + c.insert(grand_parent); + // Also consider atomic propositions as + // cut-points if they are leaves. + if (parent.is(op::ap) + && g.find(parent)->second.size() == 1) + c.insert(parent); + } if (dparent.low < dgrand_parent.low) dgrand_parent.low = dparent.low; } @@ -539,7 +520,7 @@ namespace spot } - class bse_relabeler final: public relabeler + class bse_relabeler final: public relabeler { public: const fset& c; @@ -553,12 +534,45 @@ namespace spot using relabeler::visit; - formula - visit(formula f) + formula visit(formula f) { - if (f.is(op::ap) || (c.find(f) != c.end())) + if (f.is(op::ap)) return rename(f); + // This is Boolean cut-point? + // We can only relabel it if all its children are cut-points. + if (c.find(f) != c.end()) + { + unsigned fsz = f.size(); + assert(fsz > 0); // A cut point has children + if (fsz == 1 + || (fsz == 2 + && ((c.find(f[0]) != c.end()) + == (c.find(f[1]) != c.end())))) + return rename(f); + if (fsz > 2) + { + // cp[0] will contains non cut-points + // cp[1] will contain cut-points or atomic propositions + std::vector cp[2]; + cp[0].reserve(fsz); + cp[1].reserve(fsz); + for (unsigned i = 0; i < fsz; ++i) + { + formula cf = f[i]; + cp[c.find(cf) != c.end()].push_back(cf); + } + if (cp[0].empty() + || cp[1].empty()) + // all children are cut-points or non-cut-points + return rename(f); + formula cp1group = rename(formula::multop(f.kind(), cp[1])); + formula cp0group = visit(formula::multop(f.kind(), cp[0])); + return formula::multop(f.kind(), {cp1group, cp0group}); + } + } + + // Not a cut-point, recurse unsigned sz = f.size(); if (sz <= 2) return f.map([this](formula f) @@ -566,24 +580,24 @@ namespace spot return visit(f); }); - unsigned i = 0; - std::vector res; if (f.is_boolean() && sz > 2) - { - // If we have a Boolean formula with more than two - // children, like (a & b & c & d) where some children - // (assume {a,b}) are used only once, but some other - // (assume {c,d}) are used multiple time in the formula, - // then split that into ((a & b) & (c & d)) to give - // (a & b) a chance to be relabeled as a whole. - auto pair = split_used_once(f, subcount); - if (pair.second) - return formula::multop(f.kind(), { visit(pair.first), - visit(pair.second) }); - } + // If we have a Boolean formula with more than two + // children, like (a & b & c & d) where some children + // (assume {a,b}) are used only once, but some other + // (assume {c,d}) are used multiple time in the formula, + // then split that into ((a & b) & (c & d)) to give + // (a & b) a chance to be relabeled as a whole. + if (auto pair = split_used_once(f, subcount); pair.second) + { + formula left = visit(pair.first); + formula right = visit(pair.second); + return formula::multop(f.kind(), { left, right }); + } /// If we have a formula like (a & b & Xc), consider /// it as ((a & b) & Xc) in the graph to isolate the /// Boolean operands as a single node. + unsigned i = 0; + std::vector res; formula b = f.boolean_operands(&i); if (b && b != f) { @@ -630,6 +644,10 @@ namespace spot fset c; cut_points(g, c, f); + // std::cerr << "cut-points\n"; + // for (formula cp: c) + // std::cerr << " - " << cp << '\n'; + // Relabel the formula recursively, stopping // at cut-points or atomic propositions. ap_generator* gen = nullptr; diff --git a/spot/tl/relabel.hh b/spot/tl/relabel.hh index 59efdf94b..5d076f10c 100644 --- a/spot/tl/relabel.hh +++ b/spot/tl/relabel.hh @@ -53,13 +53,13 @@ namespace spot /// proposition for each maximal Boolean subexpression encountered, /// even if they overlap (i.e., share common atomic /// propositions). For instance `(a & b & c) U (c & d & e)` will be - /// simply be relabeled as `p0 U p1`. This kind of renaming to not - /// preserves the + /// simply be relabeled as `p0 U p1`. This kind of renaming does not + /// preserve the satisfiability of the input formula. /// /// The relabel_bse() version will make sure that the replaced /// subexpressions do not share atomic propositions. For instance - /// `(a & b & c) U (c & d & e)` will be simply be relabeled as - /// `(p0 & p1) U (p1 & p2)`, were `p1` replaces `c` and the rest + /// `(a & b & c) U (!c & d & e)` will be simply be relabeled as + /// `(p0 & p1) U (!p1 & p2)`, were `p1` replaces `c` and the rest /// is obvious. /// /// @{ diff --git a/tests/core/ltlfilt.test b/tests/core/ltlfilt.test index 192a60fef..f28ee445d 100755 --- a/tests/core/ltlfilt.test +++ b/tests/core/ltlfilt.test @@ -363,6 +363,14 @@ b & GF(a | c) & !GF!(a | c) F(a <-> b) -> (c xor d) (a & b & c) U (c & d & e) (a & b & c) U !(a & b & c) +(a & b & c) U (!c & d & e) +((a | b) & c & d) U (!d & e & f) +((a | b) & d) U (!d & e & f) +(a & !a) | (b & !b) | (c & !c) +((a & !a) | (b & !b) | (c & !c)) U d +((a & !a) | (b & !b) | (c & e)) U d +((a & !b) | (!a & b)) U c +((a & !b) | (a->b)) U c EOF cat >exp <out @@ -405,6 +419,22 @@ p0 || []p1 #define p1 (b) #define p2 (c) (p0 && p1 && p2) U (!p0 || !p1 || !p2) +#define p0 (a && b) +#define p1 (c) +#define p2 (d && e) +(p0 && p1) U (!p1 && p2) +#define p0 (d) +#define p1 (a || b) +#define p2 (e && f) +(p0 && p1) U (!p0 && p2) +false +#define p0 (d) +p0 +#define p0 ((a && !a) || (b && !b) || (c && e)) +#define p1 (d) +p0 U p1 +#define p0 (c) +true U p0 EOF run 0 ltlfilt -s -u --nnf --relabel-bool=pnn --define in >out @@ -455,6 +485,48 @@ p0 && []<>(p1 || p2) && ![]<>!(p1 || p2) #define p1 (b) #define p2 (c) (p0 && p1 && p2) U !(p0 && p1 && p2) +#define p0 (a) +#define p1 (b) +#define p2 (c) +#define p3 (d) +#define p4 (e) +(p0 && p1 && p2) U (!p2 && p3 && p4) +#define p0 (c) +#define p1 (d) +#define p2 (a) +#define p3 (b) +#define p4 (e) +#define p5 (f) +(p0 && p1 && (p2 || p3)) U (!p1 && p4 && p5) +#define p0 (d) +#define p1 (a) +#define p2 (b) +#define p3 (e) +#define p4 (f) +(p0 && (p1 || p2)) U (!p0 && p3 && p4) +#define p0 (a) +#define p1 (b) +#define p2 (c) +(p0 && !p0) || (p1 && !p1) || (p2 && !p2) +#define p0 (a) +#define p1 (b) +#define p2 (c) +#define p3 (d) +((p0 && !p0) || (p1 && !p1) || (p2 && !p2)) U p3 +#define p0 (a) +#define p1 (b) +#define p2 (c) +#define p3 (e) +#define p4 (d) +((p2 && p3) || (p0 && !p0) || (p1 && !p1)) U p4 +#define p0 (a) +#define p1 (b) +#define p2 (c) +((p0 && !p1) || (!p0 && p1)) U p2 +#define p0 (a) +#define p1 (b) +#define p2 (c) +((p0 && !p1) || (p0 -> p1)) U p2 EOF run 0 ltlfilt -s -u --relabel=pnn --define in >out @@ -486,12 +558,13 @@ Fp0 -> p1 p0 U p1 #define p0 (a & b & c) p0 U !p0 +#define p0 ((a & !a) | (b & !b) | (c & !c)) +p0 EOF run 0 ltlfilt -u --relabel-over=pnn --define in >out diff exp out - toolong='((p2=0) * (p3=1))' # work around the 80-col check cat >exp <exp <out diff exp out @@ -550,6 +665,14 @@ b & GF(a | c) & !GF!(a | c)@ F(a <-> b) -> (c xor d)@ (a & b & c) U (c & d & e)@ (a & b & c) U !(a & b & c)@ +(a & b & c) U (!c & d & e)@ +(c & d & (a | b)) U (!d & e & f)@ +(d & (a | b)) U (!d & e & f)@ +(a & !a) | (b & !b) | (c & !c)@ +((a & !a) | (b & !b) | (c & !c)) U d@ +((a & !a) | (b & !b) | (c & e)) U d@ +((a & !b) | (!a & b)) U c@ +((a -> b) | (a & !b)) U c@ EOF diff exp out diff --git a/tests/python/_mealy.ipynb b/tests/python/_mealy.ipynb index ebeeaacb7..20ea5fd4e 100644 --- a/tests/python/_mealy.ipynb +++ b/tests/python/_mealy.ipynb @@ -129,7 +129,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f86481a2690> >" + " *' at 0x7f8877796550> >" ] }, "execution_count": 4, @@ -209,7 +209,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f85f45cbb70> >" + " *' at 0x7f8877796820> >" ] }, "execution_count": 6, @@ -283,7 +283,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f85f45cbb70> >" + " *' at 0x7f8877796820> >" ] }, "execution_count": 8, @@ -387,7 +387,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f861bfc8ae0> >" + " *' at 0x7f8877797b40> >" ] }, "execution_count": 9, @@ -532,7 +532,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f85f45efde0> >" + " *' at 0x7f88777970f0> >" ] }, "execution_count": 10, @@ -600,15 +600,15 @@ " \n", " 0\n", " presat\n", - " 3868.95\n", - " 3.282e-06\n", - " 1.4388e-05\n", - " 0.000129765\n", - " 1.3759e-05\n", - " 9.499e-06\n", - " 8.73e-06\n", - " 9.01e-06\n", - " 6.6209e-05\n", + " 7.7176e-05\n", + " 2.863e-06\n", + " 1.6553e-05\n", + " 0.000186061\n", + " 7.753e-06\n", + " 1.0616e-05\n", + " 1.1804e-05\n", + " 8.101e-06\n", + " 6.7328e-05\n", " ...\n", " NaN\n", " NaN\n", @@ -634,7 +634,7 @@ " NaN\n", " NaN\n", " ...\n", - " 0.000743251\n", + " 0.000496302\n", " 2\n", " 0\n", " 7\n", @@ -652,16 +652,16 @@ ], "text/plain": [ " task premin_time reorg_time partsol_time player_incomp_time incomp_time \\\n", - "0 presat 3868.95 3.282e-06 1.4388e-05 0.000129765 1.3759e-05 \n", + "0 presat 7.7176e-05 2.863e-06 1.6553e-05 0.000186061 7.753e-06 \n", "1 sat NaN NaN NaN NaN NaN \n", "\n", " split_all_let_time split_min_let_time split_cstr_time prob_init_build_time \\\n", - "0 9.499e-06 8.73e-06 9.01e-06 6.6209e-05 \n", + "0 1.0616e-05 1.1804e-05 8.101e-06 6.7328e-05 \n", "1 NaN NaN NaN NaN \n", "\n", " ... total_time n_classes n_refinement n_lit n_clauses n_iteration \\\n", "0 ... NaN NaN NaN NaN NaN NaN \n", - "1 ... 0.000743251 2 0 7 12 0 \n", + "1 ... 0.000496302 2 0 7 12 0 \n", "\n", " n_letters_part n_bisim_let n_min_states done \n", "0 3 2 NaN NaN \n", @@ -758,7 +758,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f861bfc8630> >" + " *' at 0x7f8877796eb0> >" ] }, "execution_count": 11, @@ -855,13 +855,13 @@ "1\n", "/\n", "\n", - "(!o0 & o1) | (o0 & !o1)\n", + "(o0 & !o1) | (!o0 & o1)\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f861bf9fb40> >" + " *' at 0x7f8877797120> >" ] }, "execution_count": 12, @@ -994,13 +994,13 @@ "5->2\n", "\n", "\n", - "(!o0 & o1) | (o0 & !o1)\n", + "(o0 & !o1) | (!o0 & o1)\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f861bf9f210> >" + " *' at 0x7f8877797990> >" ] }, "execution_count": 13, @@ -1067,15 +1067,15 @@ " \n", " 0\n", " presat\n", - " 3869.08\n", - " 3.213e-06\n", - " 9.079e-06\n", - " 9.5752e-05\n", - " 5.168e-06\n", - " 5.727e-06\n", - " 7.543e-06\n", - " 1.5784e-05\n", - " 4.0507e-05\n", + " 3.282e-06\n", + " 3.702e-06\n", + " 1.4248e-05\n", + " 0.000109094\n", + " 6.705e-06\n", + " 9.219e-06\n", + " 8.52e-06\n", + " 1.0407e-05\n", + " 3.2896e-05\n", " ...\n", " NaN\n", " NaN\n", @@ -1149,7 +1149,7 @@ " NaN\n", " NaN\n", " ...\n", - " 0.000399073\n", + " 0.00041242\n", " 2\n", " 0\n", " 17\n", @@ -1167,22 +1167,22 @@ ], "text/plain": [ " task premin_time reorg_time partsol_time player_incomp_time \\\n", - "0 presat 3869.08 3.213e-06 9.079e-06 9.5752e-05 \n", + "0 presat 3.282e-06 3.702e-06 1.4248e-05 0.000109094 \n", "1 sat NaN NaN NaN NaN \n", "2 refinement NaN NaN NaN NaN \n", "3 sat NaN NaN NaN NaN \n", "\n", " incomp_time split_all_let_time split_min_let_time split_cstr_time \\\n", - "0 5.168e-06 5.727e-06 7.543e-06 1.5784e-05 \n", + "0 6.705e-06 9.219e-06 8.52e-06 1.0407e-05 \n", "1 NaN NaN NaN NaN \n", "2 NaN NaN NaN NaN \n", "3 NaN NaN NaN NaN \n", "\n", - " prob_init_build_time ... total_time n_classes n_refinement n_lit \\\n", - "0 4.0507e-05 ... NaN NaN NaN NaN \n", - "1 NaN ... NaN 1 0 3 \n", - "2 NaN ... NaN 1 1 10 \n", - "3 NaN ... 0.000399073 2 0 17 \n", + " prob_init_build_time ... total_time n_classes n_refinement n_lit \\\n", + "0 3.2896e-05 ... NaN NaN NaN NaN \n", + "1 NaN ... NaN 1 0 3 \n", + "2 NaN ... NaN 1 1 10 \n", + "3 NaN ... 0.00041242 2 0 17 \n", "\n", " n_clauses n_iteration n_letters_part n_bisim_let n_min_states done \n", "0 NaN NaN 1 1 NaN NaN \n", @@ -1286,7 +1286,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f861bfcdc00> >" + " *' at 0x7f8877797cc0> >" ] }, "execution_count": 14, @@ -1365,15 +1365,15 @@ " \n", " 0\n", " presat\n", - " 3869.14\n", - " 2.863e-06\n", - " 9.08e-06\n", - " 6.0622e-05\n", - " 4.679e-06\n", - " 5.308e-06\n", - " 8.59e-06\n", - " 7.962e-06\n", - " 4.0159e-05\n", + " 1.956e-06\n", + " 2.445e-06\n", + " 8.171e-06\n", + " 5.0007e-05\n", + " 4.819e-06\n", + " 6.077e-06\n", + " 5.797e-06\n", + " 4.33e-06\n", + " 2.242e-05\n", " ...\n", " NaN\n", " NaN\n", @@ -1447,7 +1447,7 @@ " NaN\n", " NaN\n", " ...\n", - " 0.000416464\n", + " 0.000252132\n", " 2\n", " 0\n", " 17\n", @@ -1465,22 +1465,22 @@ ], "text/plain": [ " task premin_time reorg_time partsol_time player_incomp_time \\\n", - "0 presat 3869.14 2.863e-06 9.08e-06 6.0622e-05 \n", + "0 presat 1.956e-06 2.445e-06 8.171e-06 5.0007e-05 \n", "1 sat NaN NaN NaN NaN \n", "2 refinement NaN NaN NaN NaN \n", "3 sat NaN NaN NaN NaN \n", "\n", " incomp_time split_all_let_time split_min_let_time split_cstr_time \\\n", - "0 4.679e-06 5.308e-06 8.59e-06 7.962e-06 \n", + "0 4.819e-06 6.077e-06 5.797e-06 4.33e-06 \n", "1 NaN NaN NaN NaN \n", "2 NaN NaN NaN NaN \n", "3 NaN NaN NaN NaN \n", "\n", " prob_init_build_time ... total_time n_classes n_refinement n_lit \\\n", - "0 4.0159e-05 ... NaN NaN NaN NaN \n", + "0 2.242e-05 ... NaN NaN NaN NaN \n", "1 NaN ... NaN 1 0 3 \n", "2 NaN ... NaN 1 1 10 \n", - "3 NaN ... 0.000416464 2 0 17 \n", + "3 NaN ... 0.000252132 2 0 17 \n", "\n", " n_clauses n_iteration n_letters_part n_bisim_let n_min_states done \n", "0 NaN NaN 1 1 NaN NaN \n", @@ -1705,7 +1705,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.11.5" }, "vscode": { "interpreter": { From d96796121af28b4bb6958abb1b58ad34c178e817 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 13 Sep 2023 15:39:36 +0200 Subject: [PATCH 330/606] replace sprintf by snprintf MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This was reported by Pierre Ganty, who said that sprintf is reported as deprecated on MacOS 13.5.2 (22G91). * spot/twa/acc.cc, spot/twaalgos/aiger.cc: Here. --- spot/twa/acc.cc | 2 +- spot/twaalgos/aiger.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/spot/twa/acc.cc b/spot/twa/acc.cc index 4c4013ce7..0549ce462 100644 --- a/spot/twa/acc.cc +++ b/spot/twa/acc.cc @@ -2134,7 +2134,7 @@ namespace spot if (*input != c) { char msg[20]; - sprintf(msg, "was expecting %c '.'", c); + std::snprintf(msg, sizeof msg, "was expecting '%c'.", c); syntax_error(input, msg); } ++input; diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index 6b608dd59..7d724b124 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -2224,7 +2224,7 @@ namespace spot // vars are unsigned -> 10 digits at most char gate_buffer[3 * 10 + 5]; auto write_gate = [&](unsigned o, unsigned i0, unsigned i1) { - std::sprintf(gate_buffer, "%u %u %u\n", o, i0, i1); + std::snprintf(gate_buffer, sizeof gate_buffer, "%u %u %u\n", o, i0, i1); os << gate_buffer; }; // Count active gates From c2832cabfc594c2e37d0a6beebf535b185ddb389 Mon Sep 17 00:00:00 2001 From: Jonah Romero Date: Thu, 3 Aug 2023 11:54:49 +0200 Subject: [PATCH 331/606] split: add a new split_edge variant * spot/twaalgos/split.cc, spot/twaalgos/split.hh: Here. * tests/python/splitedge.py: New file. * tests/Makefile.am: Add it. --- spot/twaalgos/split.cc | 251 +++++++++++++++++++++++++++++++++++++- spot/twaalgos/split.hh | 17 ++- tests/Makefile.am | 1 + tests/python/splitedge.py | 215 ++++++++++++++++++++++++++++++++ 4 files changed, 479 insertions(+), 5 deletions(-) create mode 100644 tests/python/splitedge.py diff --git a/spot/twaalgos/split.cc b/spot/twaalgos/split.cc index d1ffbbecd..09a1a2eb0 100644 --- a/spot/twaalgos/split.cc +++ b/spot/twaalgos/split.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2021 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) 2017-2023 Laboratoire de Recherche et Développement +// de l'Epita. IMDEA Software Institute. // // This file is part of Spot, a model checking library. // @@ -25,9 +25,200 @@ #include #include +#include + +namespace std +{ + template<> + struct hash<::bdd> + { + size_t operator()(::bdd const& instance) const noexcept + { + return ::spot::bdd_hash{}(instance); + } + }; + + template<> + struct hash> + { + size_t operator()(pair const& x) const noexcept + { + size_t first_hash = std::hash()(x.first); + size_t second_hash = std::hash()(x.second); + size_t sum = second_hash + + 0x9e3779b9 + + (first_hash << 6) + + (first_hash >> 2); + return first_hash ^ sum; + } + }; +} namespace spot { + // We attempt to add a potentially new set of symbols defined as "value" to + // our current set of edge partitions, "current_set". We also specify a set + // of valid symbols considered + static void add_to_lower_bound_set_helper( + std::unordered_set& current_set, + bdd valid_symbol_set, + bdd value) + { + // This function's correctness is defined by the invariant, that we never + // add a bdd to our current set unless the bdd is disjoint from every other + // element in the current_set. In other words, we will only reach the final + // set.insert(value), if we can iterate over the whole of current_set + // without finding some set intersections + if (value == bddfalse) // Don't add empty sets, as they subsume everything + { + return; + } + for (auto sym : current_set) + { + // If a sym is a subset of value, recursively add the set of symbols + // defined in value, but not in sym. This ensures the two elements + // are disjoint. + if (bdd_implies(sym, value)) + { + add_to_lower_bound_set_helper( + current_set, valid_symbol_set, (value - sym) & valid_symbol_set); + return; + } + // If a sym is a subset of the value we're trying to add, then we + // remove the symbol and add the two symbols created by partitioning + // the sym with value. + else if (bdd_implies(value, sym)) + { + current_set.erase(sym); + add_to_lower_bound_set_helper(current_set, + valid_symbol_set, + sym & value); + add_to_lower_bound_set_helper(current_set, + valid_symbol_set, + sym - value); + return; + } + } + // This line is only reachable if value is not a subset and doesn't + // subsume any element currently in our set + current_set.insert(value); + } + + static std::array create_possible_intersections( + bdd valid_symbol_set, + std::pair const& first, + std::pair const& second) + { + auto intermediate = second.first & valid_symbol_set; + auto intermediate2 = second.second & valid_symbol_set; + return { + first.first & intermediate, + first.second & intermediate, + first.first & intermediate2, + first.second & intermediate2, + }; + } + + using bdd_set = std::unordered_set; + using bdd_pair_set = std::unordered_set>; + + // Transforms each element of the basis into a complement pair, + // with a valid symbol set specified + static bdd_pair_set create_complement_pairs(std::vector const& basis, + bdd valid_symbol_set) + { + bdd_pair_set intersections; + for (auto& sym : basis) + { + auto intersection = sym & valid_symbol_set; + if (intersection != bddfalse) + { + auto negation = valid_symbol_set - intersection; + intersections.insert(std::make_pair(intersection, negation)); + } + } + return intersections; + } + + template + void iterate_possible_intersections(bdd_pair_set const& complement_pairs, + bdd valid_symbol_set, + Callable callable) + { + for (auto it = complement_pairs.begin(); it != complement_pairs.end(); ++it) + { + for (auto it2 = std::next(it); it2 != complement_pairs.end(); ++it2) + { + auto intersections = create_possible_intersections( + valid_symbol_set, *it, *it2); + for (auto& intersection : intersections) + { + callable(intersection); + } + } + } + } + + // Compute the lower set bound of a set. A valid symbol set is also + // provided to make sure that no symbol exists in the output if it is + // not also included in the valid symbol set + static bdd_set lower_set_bound(std::vector const& basis, + bdd valid_symbol_set) + { + auto complement_pairs = create_complement_pairs(basis, valid_symbol_set); + if (complement_pairs.size() == 1) + { + bdd_set lower_bound; + auto& pair = *complement_pairs.begin(); + if (pair.first != bddfalse + && bdd_implies(pair.first, valid_symbol_set)) + { + lower_bound.insert(pair.first); + } + if (pair.second != bddfalse + && bdd_implies(pair.second, valid_symbol_set)) + { + lower_bound.insert(pair.second); + } + return lower_bound; + } + else + { + bdd_set lower_bound; + iterate_possible_intersections(complement_pairs, valid_symbol_set, + [&](auto intersection) + { + add_to_lower_bound_set_helper(lower_bound, + valid_symbol_set, + intersection); + }); + + return lower_bound; + } + } + + // Partitions a symbol based on a list of other bdds called the basis. + // The resulting partition will have the property that for any paritioned + // element and any element element in the basis, the partitioned element will + // either by completely contained by that element of the basis, or completely + // disjoint. + static bdd_set generate_contained_or_disjoint_symbols(bdd sym, + std::vector const& basis) + { + auto lower_bound = lower_set_bound(basis, sym); + // If the sym was disjoint from everything in the basis, we'll be left with + // an empty lower_bound. To fix this, we will simply return a singleton, + // with sym as the only element. Notice, this singleton will satisfy the + // requirements of a return value from this function. Additionally, if the + // sym is false, that means nothing can traverse it, so we simply are left + // with no edges. + if (lower_bound.empty() && sym != bddfalse) + { + lower_bound.insert(sym); + } + return lower_bound; + } + twa_graph_ptr split_edges(const const_twa_graph_ptr& aut) { twa_graph_ptr out = make_twa_graph(aut->get_dict()); @@ -77,4 +268,60 @@ namespace spot } return out; } + + twa_graph_ptr split_edges(const const_twa_graph_ptr& aut, + std::vector const& basis) + { + twa_graph_ptr out = make_twa_graph(aut->get_dict()); + out->copy_acceptance_of(aut); + out->copy_ap_of(aut); + out->prop_copy(aut, twa::prop_set::all()); + out->new_states(aut->num_states()); + out->set_init_state(aut->get_init_state_number()); + + // We use a cache to avoid the costly loop around minterms_of(). + // Cache entries have the form (id, [begin, end]) where id is the + // number of a BDD that as been (or will be) split, and begin/end + // denotes a range of existing transition numbers that cover the + // split. + using cached_t = std::pair; + std::unordered_map split_cond; + internal::univ_dest_mapper uniq(out->get_graph()); + + for (auto& e: aut->edges()) + { + bdd const& cond = e.cond; + unsigned dst = e.dst; + + if (cond == bddfalse) + continue; + if (aut->is_univ_dest(dst)) + { + auto d = aut->univ_dests(dst); + dst = uniq.new_univ_dests(d.begin(), d.end()); + } + + auto& [begin, end] = split_cond[cond.id()]; + if (begin == end) + { + begin = out->num_edges() + 1; + auto split = generate_contained_or_disjoint_symbols(cond, + basis); + for (bdd minterm : split) + { + out->new_edge(e.src, dst, minterm, e.acc); + } + end = out->num_edges() + 1; + } + else + { + auto& g = out->get_graph(); + for (unsigned i = begin; i < end; ++i) + { + out->new_edge(e.src, dst, g.edge_storage(i).cond, e.acc); + } + } + } + return out; + } } diff --git a/spot/twaalgos/split.hh b/spot/twaalgos/split.hh index 7bfe7b02c..54490ab8b 100644 --- a/spot/twaalgos/split.hh +++ b/spot/twaalgos/split.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2018, 2020 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) 2017, 2018, 2020, 2023 Laboratoire de Recherche +// et Développement de l'Epita. IMDEA Software Institute. // // This file is part of Spot, a model checking library. // @@ -20,6 +20,7 @@ #pragma once #include +#include namespace spot { @@ -28,7 +29,17 @@ namespace spot /// /// Create a new version of the automaton where all edges are split /// so that they are all labeled by a conjunction of all atomic - /// propositions. After this we can consider that each edge of the + /// propositions. After this we can consider that each edge of the /// automate is a transition labeled by one letter. SPOT_API twa_graph_ptr split_edges(const const_twa_graph_ptr& aut); + + /// \ingroup twa_misc + /// \brief transform edges into transitions based on set of bdds + /// + /// Create a new version of the automaton where all edges are split + /// such that, for any transformed edge and any set of symbols in + /// the basis, the transformed edge is either completely disjoint + /// from the set of symbols, or it is a subset of them. + SPOT_API twa_graph_ptr split_edges( + const const_twa_graph_ptr& aut, std::vector const& basis); } diff --git a/tests/Makefile.am b/tests/Makefile.am index db70e8810..3cca3e975 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -461,6 +461,7 @@ TESTS_python = \ python/simstate.py \ python/sonf.py \ python/split.py \ + python/splitedge.py \ python/streett_totgba.py \ python/streett_totgba2.py \ python/stutter.py \ diff --git a/tests/python/splitedge.py b/tests/python/splitedge.py new file mode 100644 index 000000000..fa9ff4358 --- /dev/null +++ b/tests/python/splitedge.py @@ -0,0 +1,215 @@ +#!/usr/bin/python3 +# -*- mode: python; coding: utf-8 -*- +# Copyright (C) 2020-2022 Laboratoire de Recherche et Développement de +# l'EPITA. +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +import spot, buddy +from unittest import TestCase +tc = TestCase() + +def create_aps(aut): + return [buddy.bdd_ithvar(aut.register_ap(ap.ap_name())) for ap in aut.ap()] + +def do_edge_test(aut, aps, edges_before, edges_after): + tc.assertEqual(aut.num_edges(), edges_before) + aut = spot.split_edges(aut, aps) + tc.assertEqual(aut.num_edges(), edges_after) + +aut = spot.automaton(""" +HOA: v1 +States: 1 +Start: 0 +AP: 2 "a" "b" +Acceptance: 1 Inf(0) +--BODY-- +State: 0 +[t] 0 +--END--""") + +aps = create_aps(aut) +do_edge_test(aut, aps, 1, 4) + +aut = spot.automaton(""" +HOA: v1 +States: 2 +Start: 0 +AP: 2 "a" "b" +Acceptance: 1 Inf(0) +--BODY-- +State: 0 +[t] 0 +State: 1 +[0&1] 1 +--END--""") + +aps = create_aps(aut) +do_edge_test(aut, aps, 2, 5) + +aut = spot.automaton(""" +HOA: v1 +States: 1 +Start: 0 +AP: 1 "a" +Acceptance: 1 Inf(0) +--BODY-- +State: 0 +[f] 0 +--END--""") + +aps = create_aps(aut) +do_edge_test(aut, aps, 0, 0) + +aut = spot.automaton(""" +HOA: v1 +States: 3 +Start: 0 +AP: 2 "a" "b" +Acceptance: 1 Inf(0) +--BODY-- +State: 0 +[0|1] 1 +[!1] 2 +State: 1 +State: 2 +--END--""") +# Before: +# State: 0 +# {a, b, c, d} +# {a, b} +# After: +# State : 0 +# {a, b, c}, {d} +# {a, b} + +# a = 00 +# b = 10 +# c = 01 +# d = 11 + +aps = create_aps(aut) +# [{a, b, c}] +aps = [buddy.bdd_not(aps[0]) | buddy.bdd_not(aps[1])] +do_edge_test(aut, aps, 2, 3) + +aut = spot.automaton(""" +HOA: v1 +States: 3 +Start: 0 +AP: 2 "a" "b" +Acceptance: 1 Inf(0) +--BODY-- +State: 0 +[t] 1 +[!0] 2 +State: 1 +State: 2 +--END--""") +# Before: +# State: 0 +# {a, b, c, d} +# {a, c} +# After: +# State : 0 +# {a, b}, {c, d} +# {a}, {c} + +# a = 00 +# b = 10 +# c = 01 +# d = 11 + +aps = create_aps(aut) +# [{a, b}, {c, d}] +aps = [buddy.bdd_not(aps[1]), aps[1]] +do_edge_test(aut, aps, 2, 4) + +aut = spot.automaton(""" +HOA: v1 +States: 3 +Start: 0 +AP: 2 "a" "b" +Acceptance: 1 Inf(0) +--BODY-- +State: 0 +[t] 1 +[!0&!1 | !0&1] 2 +State: 1 +State: 2 +--END--""") +# Before: +# State: 0 +# {a, b, c, d} +# {a, c} +# After: +# State : 0 +# {a},{b},{c},{d} +# {a},{c} + +# a = 00 +# b = 10 +# c = 01 +# d = 11 + +aps = create_aps(aut) +neg_aps = [buddy.bdd_not(a) for a in aps] +# [{a},{b},{c},{d}] +aps = [ + neg_aps[0] & neg_aps[1], + neg_aps[0] & aps[1], + aps[0] & neg_aps[1], + aps[0] & aps[1] +] + +do_edge_test(aut, aps, 2, 6) + +aut = spot.automaton(""" +HOA: v1 +States: 3 +Start: 0 +AP: 2 "a" "b" +Acceptance: 1 Inf(0) +--BODY-- +State: 0 +[t] 1 +[!0&!1 | 0&!1] 2 +State: 1 +State: 2 +--END--""") +# Before +# State: 0 +# {a, b, c, d} +# {a, b} +# After: +# State : 0 +# {a, b, c}, {d} +# {a, b} + +# a = 00 +# b = 10 +# c = 01 +# d = 11 + +aps = create_aps(aut) +neg_aps = [buddy.bdd_not(a) for a in aps] +# [{a, b, c}, {d}] +aps = [ + neg_aps[0] | neg_aps[1], + aps[0] & aps[1] +] +do_edge_test(aut, aps, 2, 3) From d1c5b2efdf44397033ba4a9ed130482a8946a55e Mon Sep 17 00:00:00 2001 From: Jonah Romero Date: Thu, 3 Aug 2023 12:04:47 +0200 Subject: [PATCH 332/606] implement a FORQ-based inclusion check for SBAs * spot/twaalgos/forq_contains.cc, spot/twaalgos/forq_contains.hh: New files. * spot/twaalgos/Makefile.am, python/spot/impl.i: Add them. * tests/python/forq_contains.py: New file. * tests/Makefile.am: Add it. --- python/spot/impl.i | 2 + spot/twaalgos/Makefile.am | 2 + spot/twaalgos/forq_contains.cc | 1191 ++++++++++++++++++++++++++++++++ spot/twaalgos/forq_contains.hh | 51 ++ tests/Makefile.am | 1 + tests/python/forq_contains.py | 327 +++++++++ 6 files changed, 1574 insertions(+) create mode 100644 spot/twaalgos/forq_contains.cc create mode 100644 spot/twaalgos/forq_contains.hh create mode 100644 tests/python/forq_contains.py diff --git a/python/spot/impl.i b/python/spot/impl.i index cb9318a1e..2408486e6 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -123,6 +123,7 @@ #include #include #include +#include #include #include #include @@ -702,6 +703,7 @@ def state_is_accepting(self, src) -> "bool": %feature("flatnested") spot::twa_run::step; %include %template(list_step) std::list; +%include %include %include %include diff --git a/spot/twaalgos/Makefile.am b/spot/twaalgos/Makefile.am index 57ae8ce9f..7a09928f1 100644 --- a/spot/twaalgos/Makefile.am +++ b/spot/twaalgos/Makefile.am @@ -51,6 +51,7 @@ twaalgos_HEADERS = \ dualize.hh \ emptiness.hh \ emptiness_stats.hh \ + forq_contains.hh \ game.hh \ genem.hh \ gfguarantee.hh \ @@ -124,6 +125,7 @@ libtwaalgos_la_SOURCES = \ dtwasat.cc \ dualize.cc \ emptiness.cc \ + forq_contains.cc \ genem.cc \ gfguarantee.cc \ gv04.cc \ diff --git a/spot/twaalgos/forq_contains.cc b/spot/twaalgos/forq_contains.cc new file mode 100644 index 000000000..a396784d6 --- /dev/null +++ b/spot/twaalgos/forq_contains.cc @@ -0,0 +1,1191 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2023 Laboratoire de Recherche et Développement +// de l'Epita. IMDEA Software Institute. +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" +#include +#include "forq_contains.hh" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace spot::forq +{ + // Wrapper to represent the edges along a given edge in our automaton. + // As a result, we can abstract the bdds usually used to represent the + // edges as a set of symbols. + namespace + { + class symbol_set + { + friend class ::std::hash; + public: + symbol_set(bdd symbols); + static symbol_set empty(); + + bool operator==(symbol_set const& other) const; + bool contains(symbol_set const& other) const; + bdd const& data() const; + + private: + symbol_set() = default; + bdd symbols; + }; + } + using symbol_set_pair = std::pair; +} + +namespace std +{ + template<> + struct hash<::spot::forq::symbol_set> + { + size_t operator()(::spot::forq::symbol_set const& instance) const noexcept + { + return ::spot::bdd_hash{}(instance.symbols); + } + }; + + template <> + class hash<::spot::forq::symbol_set_pair> + { + public : + size_t operator()(::spot::forq::symbol_set_pair const& x) const noexcept + { + size_t first_hash = std::hash<::spot::forq::symbol_set>()(x.first); + size_t second_hash = std::hash<::spot::forq::symbol_set>()(x.second); + size_t sum = 0x9e3779b9 + + second_hash + + (first_hash << 6) + + (first_hash >> 2); + return sum ^ first_hash; + } + }; +} + +namespace spot::forq +{ + namespace + { + class word + { + friend class ::std::hash; + public: + word(symbol_set sym); + static word empty(); + + word operator+(symbol_set const& other) const; + bool operator==(word const& other) const; + + auto begin() const + { + return symbols.begin(); + } + auto end() const + { + return symbols.end(); + } + + private: + word() = default; + std::vector symbols; + }; + } +} + + +namespace std +{ + template<> + struct hash<::spot::forq::word> + { + size_t operator()(::spot::forq::word const& instance) const noexcept + { + std::size_t seed = instance.symbols.size(); + for (auto const& sym : instance.symbols) + { + size_t x = std::hash<::spot::forq::symbol_set>{}(sym); + x = ((x >> 16) ^ x) * 0x45d9f3b; + x = ((x >> 16) ^ x) * 0x45d9f3b; + x = (x >> 16) ^ x; + seed ^= x + 0x9e3779b9 + (seed << 6) + (seed >> 2); + } + return seed; + } + }; +} + +namespace spot::forq +{ + using state = size_t; + using edge = std::pair; + using const_graph = ::spot::const_twa_graph_ptr; + + struct final_edge + { + symbol_set acc; + state src, dst; + + bool operator==(final_edge const& other) const + { + return acc == other.acc && src == other.src && dst == other.dst; + } + + struct hash + { + size_t operator()(::spot::forq::final_edge const& other) const noexcept + { + size_t lhs = std::hash{}(other.src); + size_t sum = std::hash{}(other.dst) + + 0x9e3779b9 + + (lhs << 6) + + (lhs >> 2); + return lhs ^ sum; + } + }; + }; + + + using final_edge_set = std::unordered_set; + using outgoing_list = std::vector>>; + + struct forq_context + { + using ipost_calculations = std::vector>>>; + public: + struct + { + const_graph aut; + final_edge_set final_edges; + outgoing_list outgoing; + } A, B; + + struct + { + ipost_calculations precomputed_ipost; + } mutable cache; + }; + + namespace util + { + std::vector get_final_states(const_graph const& automata); + forq_context create_forq_context(const_graph const& A, + const_graph const& B); + + ::spot::twa_word_ptr as_twa_word_ptr( + ::spot::bdd_dict_ptr const&, + ::spot::forq::word const& stem, + ::spot::forq::word const& cycle); + } + +// concepts would be nice +#define TEMPLATE_QUASI_TYPE(x) \ + typename x, typename = std::enable_if_t, x>> + + template + struct quasi_order + { + virtual bool operator<=(T const&) const = 0; + }; + + // State_entry's hash and equality operator should only depend on the actual + // word, since the actual set is what's used to compare it, and is auxillary + template + struct state_entry { + T set; + word w; + bool operator==(state_entry const& other) const + { + return w == other.w; + } + }; +} + +namespace std +{ + template + struct hash> + { + size_t operator()(spot::forq::state_entry const& other) const noexcept + { + return std::hash{}(other.w); + } + }; +} + +namespace spot::forq +{ + namespace + { + template + class post_variable + { + using state_entry_t = state_entry; + using state_set_t = std::unordered_set>; + public: + bool empty() const + { + return state_vector.empty(); + } + //state_set_t& operator[](state s) { return state_vector[s]; } + state_set_t const& operator[](state s) const + { + return state_vector[s]; + } + + void add(state s, std::shared_ptr entry) + { + state_vector[s].insert(std::move(entry)); + } + + bool add_if_min(state s, + std::shared_ptr const& entry) + { + state_set_t& old_state_set = state_vector[s]; + for (auto it = old_state_set.begin(); it != old_state_set.end();) + { + if ((*it)->set <= entry->set) + return false; + if (entry->set <= (*it)->set) + it = old_state_set.erase(it); + else + ++it; + } + old_state_set.insert(entry); + return true; + } + + bool add_if_max(state s, + std::shared_ptr const& entry) + { + state_set_t& old_state_set = state_vector[s]; + for (auto it = old_state_set.begin(); it != old_state_set.end();) + { + if (entry->set <= (*it)->set) + return false; + if ((*it)->set <= entry->set) + it = old_state_set.erase(it); + else + ++it; + } + old_state_set.insert(entry); + return true; + } + + auto begin() + { + return state_vector.begin(); + } + + auto begin() const + { + return state_vector.begin(); + } + + auto end() + { + return state_vector.end(); + } + + auto end() const + { + return state_vector.end(); + } + + private: + mutable std::unordered_map state_vector; + }; + } + + struct state_set : quasi_order + { + public: + + state_set(bool reversed); + state_set(state single_state, bool reversed); + + void insert(state s); + size_t size() const; + state_set& operator+=(std::set const& other); + bool operator<=(state_set const& other) const override; + + auto begin() + { + return set.begin(); + } + + auto begin() const + { + return set.begin(); + } + + auto end() + { + return set.end(); + } + + auto end() const + { + return set.end(); + } + + private: + std::set set; + bool reversed; + }; + + template class post_base + { + using state_entry_t = state_entry; + using state_set_t = std::unordered_set>; + public: + + post_base(forq_context const& context) : context(context) + { + } + + state_set_t const& operator[](state s) const + { + return basis[s]; + } + + protected: + forq_context const& context; + post_variable basis; + void evaluate(post_variable initial_updates); + + private: + // gets new cxt or tgt based on current state + virtual quasi_type post(quasi_type const& current_state, + symbol_set const& edge_expr) const = 0; + + // adds or removes elements from basis, in order to form a proper basis + virtual bool add_extreme(state current_state, + std::shared_ptr const& entry) = 0; + + // Uses updates in order to iteratively construct the basis + bool apply_step(post_variable& updates); + }; + + template + void post_base::evaluate(post_variable initial) + { + while (apply_step(initial)); + } + + template + bool post_base::apply_step(post_variable& updates) + { + post_variable buffer; + + for (auto& [from_a, word_set] : updates) + { + // Each state has a set of words, where each word has associated + // cxt or tgt in order to compare them, so state_set would like + // { , , } + for (auto& word_pair : word_set) + { + auto& [quazi_b_set, current_word] = *word_pair; + + // Look at all the successors for a given state (.p), + // in the form (symbol, successors) + for (auto& [sym_a, succ_a] : context.A.outgoing[from_a]) + { + auto new_entry = std::make_shared( + state_entry_t{ + post(quazi_b_set, sym_a), current_word + sym_a + }); + if (add_extreme(succ_a, new_entry)) + { + buffer.add(succ_a, new_entry); + } + } + } + } + updates = std::move(buffer); + return !updates.empty(); + } + namespace + { + class post_i : public post_base + { + using entry_ptr = std::shared_ptr>; + public: + static post_i create(forq_context const& context, + state A_initial, state B_initial); + + static post_i create_reversed(forq_context const& context, + state A_initial, state B_initial); + + using post_base::operator[]; + private: + post_i(forq_context const& context, + state A_initial, state B_initial, bool reversed); + + state_set post(state_set const& current, + symbol_set const& sym) const override; + bool add_extreme(state A_initial, entry_ptr const& entry) override; + + bool is_reversed; + }; + + class context_set : quasi_order + { + public: + context_set() = default; + + bool operator<=(context_set const& other) const override; + + void add(state initial, std::set> const& other); + bool relevance_test(state_set const& set) const; + + template + void iterate(Callable callable) const + { + for (auto& [state1, set] : states) + { + for (auto [state2, flag] : set) + { + callable(state1, state2, flag); + } + } + } + + private: + mutable std::map>> states; + }; + + class post_f : public post_base + { + using entry_ptr = std::shared_ptr>; + public: + static post_f create(forq_context const& context, + state A_initial, state_set const& b_tgt); + + using post_base::operator[]; + + private: + using post_intermediate = std::set>; + post_f(forq_context const& context, + state A_initial, state_set const& B_initial); + + context_set post(context_set const& current, + symbol_set const& sym) const override; + bool add_extreme(state A_initial, entry_ptr const& entry) override; + + context_set compute_cxt_b(state_set const& b_tgt, + symbol_set const& sym) const; + bool is_final_edge(symbol_set const& sym, state src, state dst) const; + + post_intermediate get_post_set(bool already_accepting, + symbol_set const& a_sym, + state from_b) const; + }; + } + + enum class forq_status + { + FORQ_OKAY, // The forq works as expected + FORQ_INVALID_AC_COND, // The automata passed do not + // use buchi acceptance conditions + FORQ_INCOMPATIBLE_DICTS, // The two automata are using + // different bdd_dict objects + FORQ_INCOMPATIBLE_AP, // The two automata are using + // different atomic propositions + FORQ_INVALID_INPUT_BA, // The two automata passed are + // nullptrs and are invalid + FORQ_INVALID_RESULT_PTR // The pointer forq_result, that + // was passed into function + // contains_forq, cannot be nullptr + }; + + struct forq_result + { + // Whether language of graph A is included in B + bool included; + // If the language of graph A is not included in B, + // a counter example is provided + spot::twa_word_ptr counter_example; + }; + + // Returns a human-readable string given a forq_status, + // which can be aquired through a call to contains_forq + static const char* forq_status_message(forq_status status) + { + switch (status) + { + case forq_status::FORQ_OKAY: + return "Forq was able to properly run on the two buchi automata."; + case forq_status::FORQ_INVALID_AC_COND: + return "Forq only operates on automata with buchi " + "acceptance conditions."; + case forq_status::FORQ_INCOMPATIBLE_DICTS: + return "The two input graphs must utilize the same twa_dict."; + case forq_status::FORQ_INCOMPATIBLE_AP: + return "The two input graphs must utilize the same set of atomic" + "propositions defined in their shared twa_dict."; + case forq_status::FORQ_INVALID_INPUT_BA: + return "One of the two buchi automata passed in was a nullptr."; + case forq_status::FORQ_INVALID_RESULT_PTR: + return "The result pointer passed in was a nullptr."; + default: + return "Unknown Forq Status Code."; + } + } + + static forq_status valid_automata(const_graph const& A, + const_graph const& B, + forq_result* result) + { + if (!result) + { + return forq_status::FORQ_INVALID_RESULT_PTR; + } + if (!A || !B) + { + return forq_status::FORQ_INVALID_INPUT_BA; + } + + const auto buchi_acceptance = spot::acc_cond::acc_code::buchi(); + auto accept_A = A->get_acceptance(); + auto accept_B = B->get_acceptance(); + + if (accept_A != buchi_acceptance || accept_B != buchi_acceptance) + { + return forq_status::FORQ_INVALID_AC_COND; + } + if (A->get_dict() != B->get_dict()) + { + return forq_status::FORQ_INCOMPATIBLE_DICTS; + } + if (A->ap() != B->ap()) + { + return forq_status::FORQ_INCOMPATIBLE_AP; + } + return forq_status::FORQ_OKAY; + } + + static forq_status create_result(forq_result* result, + spot::twa_word_ptr counter_example = nullptr) + { + result->included = static_cast(counter_example); + result->counter_example = std::move(counter_example); + return forq_status::FORQ_OKAY; + } + + struct forq_setup + { + forq::forq_context context; + forq::post_i post_i_forward; + forq::post_i post_i_reverse; + }; + + static forq_setup create_forq_setup(forq::const_graph A, forq::const_graph B) + { + auto context = forq::util::create_forq_context(A, B); + auto A_initial = A->get_init_state_number(), + B_initial = B->get_init_state_number(); + + auto post_i_forward = forq::post_i::create(context, A_initial, B_initial); + auto post_i_reverse = forq::post_i::create_reversed(context, + A_initial, B_initial); + + return forq_setup{ + context, post_i_forward, post_i_reverse + }; + } + + namespace + { + class final_state_result + { + public: + static final_state_result failure(spot::twa_word_ptr counter_example) + { + return final_state_result(std::move(counter_example)); + } + + static final_state_result success() + { + return final_state_result(nullptr); + } + + bool should_continue() const + { + return counter_example == nullptr; + } + + spot::twa_word_ptr const& get_counter_example() const + { + return counter_example; + } + private: + final_state_result(spot::twa_word_ptr counter_example) + : counter_example(std::move(counter_example)) + { + } + spot::twa_word_ptr counter_example = nullptr; + }; + } + + static spot::twa_word_ptr find_counter_example(forq::state src, + state_set const& W, + word const& word_of_v, + forq_setup const& setup) + { + for (auto& u_ptr : setup.post_i_forward[src]) + { + auto& [U, word_of_u] = *u_ptr; + if (U <= W) + { + auto shared_dict = setup.context.A.aut->get_dict(); + auto current_word = util::as_twa_word_ptr(shared_dict, word_of_u, + word_of_v); + if (!setup.context.B.aut->intersects(current_word->as_automaton())) + { + return current_word; + } + } + } + return nullptr; + } + + static final_state_result run_from_final_state(forq::state src, + forq_setup const& setup) + { + auto& context = setup.context; + for (auto& w_ptr : setup.post_i_reverse[src]) + { + auto& [W, word_of_w] = *w_ptr; + auto new_post_f = forq::post_f::create(context, src, W); + + for (auto& v_ptr : new_post_f[src]) + { + auto& [V, word_of_v] = *v_ptr; + if (!V.relevance_test(W)) + continue; + auto counter_example = find_counter_example(src, W, + word_of_v, + setup); + if (counter_example) + { + return final_state_result::failure(std::move(counter_example)); + } + } + } + return final_state_result::success(); + } + + static forq_status forq_impl(const_graph const& A, + const_graph const& B, forq_result* result) + { + if (auto rc = valid_automata(A, B, result); rc != forq_status::FORQ_OKAY) + { + return rc; + } + forq_setup setup = create_forq_setup(A, B); + + for (auto src : util::get_final_states(A)) + { + auto final_state_result = run_from_final_state(src, setup); + if (!final_state_result.should_continue()){ + return create_result(result, final_state_result.get_counter_example()); + } + } + return create_result(result); + } +} + +namespace spot +{ + twa_word_ptr difference_word_forq(forq::const_graph lhs, + forq::const_graph rhs) + { + forq::forq_result result; + auto rc = forq::forq_impl(lhs, rhs, &result); + if (rc != forq::forq_status::FORQ_OKAY) + { + throw std::runtime_error(forq::forq_status_message(rc)); + } + return result.counter_example; + } + + bool contains_forq(forq::const_graph lhs, forq::const_graph rhs) + { + return !difference_word_forq(lhs, rhs); + } +} + +namespace spot::forq::util +{ + // In spot, there exists acceptance sets and an edge can be part of any + // number of them. However, forq only operates on automata with a single + // acceptance set i.e {0}. + static bool is_final_edge(::spot::twa_graph::edge_storage_t const& edge) + { + return edge.acc == ::spot::acc_cond::mark_t({0}); + } + + // We consider any state as final if it's the source of an edge that's + // considered accepting + std::vector get_final_states(const_graph const& automata) + { + std::unordered_set states; + for (auto& edge_storage : automata->edges()) + { + if (is_final_edge(edge_storage)) + { + states.insert(edge_storage.src); + } + } + return std::vector(states.begin(), states.end()); + } + + static final_edge_set get_final_edges(const_graph const& automata) + { + final_edge_set edges; + for (auto& edge_storage : automata->edges()) + { + if (is_final_edge(edge_storage)) + { + edges.insert(final_edge{ + symbol_set(edge_storage.cond), edge_storage.src, edge_storage.dst + }); + } + } + return edges; + } + + // Create a fast query structure for determining the outgoing states of a + // given state. This structure is indexed by the source node, which returns + // a list of outgoing edges represented as a pair: + // [set of symbols that allow the transition, the destination state] + static outgoing_list generate_outgoing_states(const_graph const& A) + { + outgoing_list all_states; + all_states.resize(A->num_states()); + for (auto& edge_storage : A->edges()) + { + auto& outgoing = all_states[edge_storage.src]; + outgoing.emplace_back(symbol_set(edge_storage.cond), edge_storage.dst); + } + return all_states; + } + + // Create a list of bdds, where each corresponds to an edge in B + static std::vector create_edge_splitting_basis(const_graph const& B) + { + auto edges = B->edges(); + std::unordered_set out; + std::transform(edges.begin(), edges.end(), std::inserter(out, out.begin()), + [](auto& edge) + { + return edge.cond; + }); + return std::vector(out.begin(), out.end()); + } + + forq_context create_forq_context(const_graph const& A, const_graph const& B) + { + forq_context retval; + retval.B.aut = B; + retval.B.outgoing = util::generate_outgoing_states(B); + retval.B.final_edges = get_final_edges(B); + + retval.A.aut = split_edges(A, create_edge_splitting_basis(B)); + retval.A.outgoing = util::generate_outgoing_states(retval.A.aut); + retval.A.final_edges = get_final_edges(retval.A.aut); + retval.cache.precomputed_ipost.resize(B->num_states()); + return retval; + } + + twa_word_ptr as_twa_word_ptr(bdd_dict_ptr const& dict, + word const& stem, + word const& cycle) + { + auto new_word = ::spot::make_twa_word(dict); + for (auto& symbol : stem) + { + new_word->prefix.push_back(symbol.data()); + } + for (auto& symbol : cycle) + { + new_word->cycle.push_back(symbol.data()); + } + return new_word; + } +} + +namespace spot::forq +{ + symbol_set symbol_set::empty() + { + return symbol_set(); + } + + symbol_set::symbol_set(bdd symbols) : symbols(symbols) + { + } + + bool symbol_set::operator==(symbol_set const& other) const + { + return other.symbols == symbols; + } + + bool symbol_set::contains(symbol_set const& other) const + { + return bdd_implies(other.symbols, symbols); + } + + bdd const& symbol_set::data() const + { + return symbols; + } + + word::word(symbol_set sym) + { + symbols.emplace_back(std::move(sym)); + } + + word word::empty() + { + return word(); + } + + word word::operator+(symbol_set const& other) const + { + auto temp = *this; + if (temp.symbols.size() == 1 + && temp.symbols.front() == symbol_set::empty()) + { + temp.symbols.front() = other; + } + else + { + temp.symbols.push_back(other); + } + return temp; + } + + bool word::operator==(word const& other) const + { + return symbols == other.symbols; + } + + post_f post_f::create(forq_context const& context, + state a_initial, state_set const& b_tgt) + { + return post_f(context, a_initial, b_tgt); + } + + post_f::post_f(forq_context const& context, + state a_initial, state_set const& b_tgt) + : post_base(context) + { + // This is different from post_i initialization, as we have a lot more + // starting states to check (all states of b_tgt), and additionally, we're + // looking at the successors of these nodes, as we want to determine the + // period and not include any of the stem. + post_variable updates; + for (auto& [sym_a, dst] : context.A.outgoing[a_initial]) + { + auto it = std::find_if (context.A.final_edges.begin(), + context.A.final_edges.end(), + [&sym_a = sym_a, a_initial, dst=dst](auto& other) + { + return other.acc.contains(sym_a) + && other.src == a_initial + && other.dst == dst; + }); + + if (it != context.A.final_edges.end()) + { + auto initial = std::make_shared>( + state_entry{ + compute_cxt_b(b_tgt, sym_a), word(sym_a) + }); + updates.add_if_min(dst, initial); + basis .add_if_min(dst, initial); + } + } + evaluate(std::move(updates)); + } + + post_f::post_intermediate post_f::get_post_set(bool already_accepting, + symbol_set const& a_sym, + state from_b) const + { + auto new_set = std::set>{}; + for (auto& [sym, dst] : context.B.outgoing[from_b]) + { + if (sym.contains(a_sym)) + { + new_set.emplace(std::make_pair(dst, false)); + if (already_accepting || is_final_edge(sym, from_b, dst)) + new_set.emplace(std::make_pair(dst, true)); + } + } + return new_set; + } + + bool post_f::is_final_edge(symbol_set const& sym, state src, state dst) const + { + auto it = context.B.final_edges.find(final_edge{sym, src, dst}); + return it != context.B.final_edges.end(); + } + + context_set post_f::compute_cxt_b(state_set const& b_tgt, + symbol_set const& a_sym) const + { + // Algorithm deviation + context_set context_b; + for (auto from_b : b_tgt) + { + auto post_set = get_post_set(false, a_sym, from_b); + context_b.add(from_b, std::move(post_set)); + } + return context_b; + } + + context_set post_f::post(context_set const& current, + symbol_set const& a_sym) const + { + context_set post_b; + current.iterate([&](state init_b, state from_b, bool already_accepting) + { + auto post_set = get_post_set(already_accepting, a_sym, from_b); + post_b.add(init_b, std::move(post_set)); + }); + return post_b; + } + + bool post_f::add_extreme(state a_initial, entry_ptr const& entry) + { + return basis.add_if_min(a_initial, entry); + } + + post_i post_i::create(forq_context const& context, + state A_initial, state B_initial) + { + return post_i(context, std::move(A_initial), B_initial, false); + } + + post_i post_i::create_reversed(forq_context const& context, + state A_initial, state B_initial) + { + return post_i(context, std::move(A_initial), B_initial, true); + } + + post_i::post_i(forq_context const& context, state A_initial, + state B_initial, bool reversed) + : post_base(context), is_reversed(reversed) + { + post_variable updates; + auto initial = std::make_shared>( + state_entry{ + state_set{B_initial, reversed}, + word::empty() + }); + updates.add(A_initial, initial); + basis .add(A_initial, initial); + evaluate(std::move(updates)); + } + + state_set post_i::post(state_set const& current, + symbol_set const& a_sym) const + { + // Algorithm deviation + // Take current Tgt_B(w) and return Tgt_B(w + sym) + state_set post_b{is_reversed}; + for (state from_b : current) + { + auto& precomputed = context.cache.precomputed_ipost[from_b][a_sym]; + if (!precomputed.has_value()) + { + std::set temp; + for (auto& [sym, dst] : context.B.outgoing[from_b]) + { + // If there exists overlap between the two, then there + // exists some symbols within a_sym that can bring us + // to the new state dst + if (sym.contains(a_sym)) + { + temp.insert(dst); + } + } + precomputed = std::move(temp); + } + post_b += precomputed.value(); + } + return post_b; + } + + bool post_i::add_extreme(state A_initial, entry_ptr const& entry) + { + if (is_reversed) + { + return basis.add_if_max(A_initial, entry); + } + else + { + return basis.add_if_min(A_initial, entry); + } + } + + static bool operator<=( + std::set> const& f, + std::set> const& s) + { + return std::includes(s.begin(), s.end(), f.begin(), f.end()); + } + + static bool operator<=( + std::set> const& f, + state_set const& set) + { + if (set.size() < f.size()) + return false; + auto first1 = set.begin(), last1 = set.end(); + auto first2 = f.begin(), last2 = f.begin(); + + for (; first2 != last2; ++first1) + { + if (first1 == last1 || first2->first < *first1) + { + return false; + } + if (first2->first == *first1) + { + ++first2; + } + } + return true; + } + + void context_set::add(state initial, + std::set> const& other) + { + states[initial].insert(other.begin(), other.end()); + } + + bool context_set::relevance_test(state_set const& W) const + { + for (auto& [s1, quazi] : states) + { + if (!(quazi <= W)) + return false; + } + return true; + } + + bool context_set::operator<=(context_set const& other) const + { + for (auto& [s, set] : states) + { + if (!(set <= other.states[s])) + return false; + } + return true; + + if (other.states.size() != states.size()) + return false; + auto first1 = other.states.begin(), last1 = other.states.end(); + auto first2 = states.begin(), last2 = states.begin(); + + for (; first2 != last2; ++first1) + { + if (first1 == last1 || first2->first < first1->first) + return false; + if (first2->first == first1->first) + { + if (!(first2->second <= first1->second)) + return false; + ++first2; + } + } + return true; + } + + state_set::state_set(state single_state, bool reversed) + : set(std::set{single_state}), reversed(reversed) + { + } + + state_set::state_set(bool reversed) + : reversed(reversed) + { + } + + void state_set::insert(state s) + { + set.insert(s); + } + + size_t state_set::size() const + { + return set.size(); + } + + state_set& state_set::operator+=(std::set const& other) + { + set.insert(other.begin(), other.end()); + return *this; + } + + bool state_set::operator<=(state_set const& other) const + { + // determines if the "this" set is a subset of "other" set + if (other.set.size() < this->set.size()) + return false; + if (reversed) + { + return std::includes(other.set.rbegin(), other.set.rend(), + this->set.rbegin(), this->set.rend(), + std::greater{}); + } + else + { + return std::includes(other.begin(), other.end(), + this->begin(), this->end()); + } + } +} diff --git a/spot/twaalgos/forq_contains.hh b/spot/twaalgos/forq_contains.hh new file mode 100644 index 000000000..24e2e8065 --- /dev/null +++ b/spot/twaalgos/forq_contains.hh @@ -0,0 +1,51 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2023 Laboratoire de Recherche et Développement +// de l'Epita. IMDEA Software Institute. +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once +#include +#include + +namespace spot +{ + /// \brief Returns a word accepted by \a left that is rejected by \a right, + /// or nullptr. + /// + /// This implements the language containment algorithm from + /// \cite{doveriFORQBasedLanguageInclusion2022} + /// to check whether L(left)⊆L(right), in which case, it returns nullptr. + /// Otherwise, it returns a counterexample, i.e., a word that is accepted + /// by $L(left)\setminus L(right)$, hence the name of the function. + /// + /// \pre Automata \a left and \a right should be + /// non-alternating state-based Büchi-automata. + SPOT_API twa_word_ptr difference_word_forq( + const_twa_graph_ptr left, spot::const_twa_graph_ptr right); + + /// \brief Returns a boolean value indicating + /// whether \a left is included in the language of \a right. + /// + /// This implements the language containment algorithm from + /// \cite{doveriFORQBasedLanguageInclusion2022} + /// to check whether L(left)⊆L(right). + /// + /// \pre Automata \a left and \a right should be + /// non-alternating state-based Büchi-automata. + SPOT_API bool contains_forq( + const_twa_graph_ptr left, const_twa_graph_ptr right); +} diff --git a/tests/Makefile.am b/tests/Makefile.am index 3cca3e975..b26f128ed 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -416,6 +416,7 @@ TESTS_python = \ python/dualize.py \ python/ecfalse.py \ python/except.py \ + python/forq_contains.py \ python/game.py \ python/gen.py \ python/genem.py \ diff --git a/tests/python/forq_contains.py b/tests/python/forq_contains.py new file mode 100644 index 000000000..be510e66c --- /dev/null +++ b/tests/python/forq_contains.py @@ -0,0 +1,327 @@ +#!/usr/bin/python3 +# -*- mode: python; coding: utf-8 -*- +# Copyright (C) 2017, 2020, 2022 Laboratoire de Recherche et +# Développement de l'EPITA. +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import spot +from unittest import TestCase +tc = TestCase() + +def do_test(subset, superset, expected=True): + result = spot.contains_forq(subset, superset) + truth = spot.contains(superset, subset) + tc.assertTrue(truth == expected) + tc.assertTrue(result == truth) + +def do_symmetric_test(subset, superset): + do_test(subset, superset, True) + do_test(superset, subset, False) + +always_true = spot.automaton(""" +HOA: v1 +States: 1 +Start: 0 +acc-name: Buchi +Acceptance: 1 Inf(0) +AP: 2 "a" "b" +--BODY-- +State: 0 +[t] 0 {0} +--END--""") + +one = spot.automaton(""" +HOA: v1 +States: 2 +Start: 0 +acc-name: Buchi +Acceptance: 1 Inf(0) +AP: 2 "a" "b" +--BODY-- +State: 0 +[0|1] 1 +State: 1 +[0] 1 +[!0] 1 {0} +--END--""") + +both = spot.automaton(""" +HOA: v1 +States: 1 +Start: 0 +acc-name: Buchi +Acceptance: 1 Inf(0) +AP: 2 "a" "b" +--BODY-- +State: 0 +[0] 0 {0} +[!0] 0 {0} +--END--""") + +do_test(both, always_true) +do_test(always_true, both) +do_symmetric_test(one, always_true) + +superset = spot.automaton(""" +HOA: v1 +States: 3 +Start: 0 +acc-name: Buchi +Acceptance: 1 Inf(0) +AP: 2 "a" "b" +--BODY-- +State: 0 +[!0|!1] 1 +[0 & 1] 2 +State: 1 +[t] 1 {0} +State: 2 +[t] 2 {0} +--END--""") + +subset = spot.automaton(""" +HOA: v1 +States: 3 +Start: 0 +acc-name: Buchi +Acceptance: 1 Inf(0) +AP: 2 "a" "b" +--BODY-- +State: 0 +[!0] 1 +[!0&1 | 0&!1] 2 +State: 1 +[t] 1 {0} +State: 2 +[t] 2 {0} +--END--""") + +do_symmetric_test(subset, superset) + +subset = spot.automaton(""" +HOA: v1 +States: 1 +Start: 0 +AP: 1 "__ap832" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels state-acc +--BODY-- +State: 0 {0} +[!0] 0 +--END--""") + +superset = spot.automaton(""" +HOA: v1 +States: 1 +Start: 0 +AP: 1 "__ap832" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels state-acc +--BODY-- +State: 0 {0} +[0] 0 +--END--""") + +do_test(subset, superset, False) +do_test(superset, subset, False) + +subset = spot.automaton(""" +HOA: v1 +States: 2 +Start: 0 +acc-name: Buchi +Acceptance: 1 Inf(0) +AP: 2 "a" "b" +--BODY-- +State: 0 +[!0|!1] 1 +State: 1 +[t] 1 {0} +--END--""") +superset = spot.automaton(""" +HOA: v1 +States: 3 +Start: 0 +acc-name: Buchi +Acceptance: 1 Inf(0) +AP: 2 "a" "b" +--BODY-- +State: 0 +[!0&1 | !0&!1] 1 +[!0&1 | 0&!1] 2 +State: 1 +[t] 1 {0} +State: 2 +[t] 2 {0} +--END--""") + +# Equivlent Languages +do_test(subset, superset) +do_test(superset, subset) + +superset = spot.automaton(""" +HOA: v1 +States: 20 +Start: 0 +AP: 4 "__ap876" "__ap877" "__ap878" "__ap879" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels state-acc +--BODY-- +State: 0 +[0&!1&2&!3] 1 +State: 1 +[!0&!1&!2] 2 +[!0&1&!2] 3 +[!0&1&!2] 4 +[!0&!1&!2&!3] 5 +[!0&1&!2&!3] 6 +State: 2 +[!0&!1&!2] 2 +[!0&!1&!2&!3] 5 +[!0&1&!2&!3] 6 +[!0&1&!2] 7 +State: 3 +[!0&!1&!2] 3 +[0&!1&2&!3] 4 +State: 4 +[!0&!1&!2] 4 +[!0&!2&!3] 6 +[!0&1&!2] 7 +State: 5 +[!0&!2&3] 5 +[!0&!2&!3] 6 +State: 6 +[!0&!1&!2&3] 5 +[!0&!1&!2&!3 | !0&1&!2&3] 6 +[!0&1&!2&!3] 8 +State: 7 +[!0&!2&!3] 6 +[!0&!2] 7 +[0&!1&2&!3] 9 +State: 8 +[!0&!2&3] 6 +[!0&!2&!3] 8 +[0&!1&2&!3] 10 +State: 9 +[!0&!2&!3] 6 +[!0&!1&!2] 9 +[!0&1&!2] 11 +State: 10 +[!0&!1&!2&!3] 12 +[!0&1&!2&!3] 13 +State: 11 +[!0&!2&!3] 6 +[!0&!2] 11 +[0&!1&2&!3] 14 +State: 12 +[!0&!1&!2&!3] 12 +[!0&1&!2&!3] 15 +State: 13 +[0&!1&2&!3] 12 +[!0&!1&!2&!3] 13 +[!0&1&!2&!3] 15 +State: 14 +[!0&!1&!2&!3] 6 +[!0&1&!2&!3] 8 +[!0&!1&!2] 9 +[!0&1&!2] 16 +State: 15 +[!0&!2&!3] 15 +[0&!1&2&!3] 17 +State: 16 +[!0&!1&!2&!3] 6 +[!0&1&!2&!3] 8 +[!0&!1&!2] 11 +[0&!1&2&!3] 14 +[!0&1&!2] 16 +State: 17 +[!0&!1&!2&!3] 17 +[!0&1&!2&!3] 18 +State: 18 +[!0&!2&!3] 18 +[0&!1&2&!3] 19 +State: 19 {0} +[!0&!1&!2&!3] 17 +[!0&1&!2&!3] 18 +--END--""") + +subset = spot.automaton(""" +HOA: v1 +States: 12 +Start: 0 +AP: 4 "__ap876" "__ap877" "__ap878" "__ap879" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels state-acc +--BODY-- +State: 0 +[0&!1&2&!3] 1 +State: 1 +[!0&!1&!2&!3] 1 +[!0&!1&!2&3] 2 +[!0&1&!2&!3] 3 +[!0&1&!2&3] 4 +State: 2 +[!0&!1&!2] 2 +[!0&1&!2&3] 4 +[!0&1&!2&!3] 5 +State: 3 +[!0&!2&!3] 3 +[!0&!1&!2&3] 4 +[!0&1&!2&3] 5 +[0&!1&2&!3] 6 +State: 4 +[!0&!1&!2 | !0&!2&3] 4 +[!0&1&!2&!3] 5 +State: 5 +[!0&!1&!2&3] 4 +[!0&1&!2 | !0&!2&!3] 5 +[0&!1&2&!3] 7 +State: 6 +[!0&!1&!2&3] 2 +[!0&1&!2&!3] 3 +[!0&1&!2&3] 5 +[!0&!1&!2&!3] 8 +[!0&!1&!2&!3] 9 +[!0&1&!2&!3] 10 +State: 7 +[!0&!1&!2&!3] 1 +[!0&!1&!2&3] 2 +[!0&1&!2&!3] 3 +[!0&1&!2&3] 4 +[!0&1&!2&!3] 10 +State: 8 +[!0&!1&!2&!3] 8 +[!0&1&!2&!3] 10 +State: 9 +[!0&!1&!2&3] 2 +[!0&1&!2&!3] 3 +[!0&1&!2&3] 5 +[!0&!1&!2&!3] 9 +State: 10 +[!0&!2&!3] 10 +[0&!1&2&!3] 11 +State: 11 {0} +[!0&!1&!2&!3] 8 +[!0&1&!2&!3] 10 +--END--""") + +do_symmetric_test(subset, superset) + From ad22eb3e650549ec66a9fa7f2631f504427a7f15 Mon Sep 17 00:00:00 2001 From: Jonah Romero Date: Tue, 8 Aug 2023 12:08:28 +0200 Subject: [PATCH 333/606] add enviroment variables for FORQ algorithm * AUTHORS: added Jonah Romero * bin/man/spot-x.x: Added the enviroment variables, SPOT_EXCLUSIVE_WORD and SPOT_CONTAINMENT_CHECK * doc/spot.bib: Added paper citation for FORQ inclusion algorithm * spot/twa/twa.cc: Modified exclusive_word to also use FORQ * spot/twaalgos/contains.cc: Modified contains to also use FORQ --- AUTHORS | 1 + bin/man/spot-x.x | 36 +++++++++++++++++++++++++++++ doc/spot.bib | 13 +++++++++++ spot/twa/twa.cc | 48 +++++++++++++++++++++++++++++++++++---- spot/twaalgos/contains.cc | 31 ++++++++++++++++++++++++- 5 files changed, 123 insertions(+), 6 deletions(-) diff --git a/AUTHORS b/AUTHORS index b9b029f9c..d33ca89e1 100644 --- a/AUTHORS +++ b/AUTHORS @@ -18,6 +18,7 @@ Guillaume Sadegh Heikki Tauriainen Henrich Lauko Jérôme Dubois +Jonah Romero Laurent Xu Maximilien Colange Philipp Schlehuber diff --git a/bin/man/spot-x.x b/bin/man/spot-x.x index b961a8ba3..c6091c339 100644 --- a/bin/man/spot-x.x +++ b/bin/man/spot-x.x @@ -50,6 +50,14 @@ time. Note that it restarts all the encoding each time. If this variable is set to any value, statistics about BDD garbage collection and resizing will be output on standard error. +.TP +\fSPOT_CONTAINMENT_CHECK\fR +Specifies which inclusion algorithm spot should use. This can currently +take on 1 of 2 values: 0 for the legacy implementation, and 1 for the +forq implementation [6] (See bibliography below). Forq uses buchi +automata in order to determine inclusion, and will default to the legacy +version if these constraints are not satisfied. + .TP \fBSPOT_DEFAULT_FORMAT\fR Set to a value of \fBdot\fR or \fBhoa\fR to override the default @@ -95,6 +103,14 @@ The contents of this variable is added to any dot output, immediately before the first state is output. This makes it easy to override global attributes of the graph. +.TP +\fSPOT_EXCLUSIVE_WORD\fR +Specifies which algorithm spot should use for exclusive_word. This can +currently take on 1 of 2 values: 0 for the legacy implementation, and 1 +for the forq implementation [6] (See bibliography below). Forq assumes buchi +automata in order to find an exclusive word, and will default to the legacy +version if these constraints are not satisfied with the automata passed. + .TP \fBSPOT_HOA_TOLERANT\fR If this variable is set, a few sanity checks performed by the HOA @@ -317,6 +333,26 @@ Describes (among other things) the constructions used for translating formulas of the form GF(guarantee) or FG(safety), that can be disabled with \fB-x gf-guarantee=0\fR. +.TP +6. +Doveri, Kyveli and Ganty, Pierre and Mazzocchi, Nicolas: +FORQ-Based Language Inclusion Formal Testing. +Proceedings of CAV'22. LNCS 13372. + +We propose a novel algorithm to decide the language inclusion between +(nondeterministic) Büchi automata, a PSpace-complete problem. Our approach, +like others before, leverage a notion of quasiorder to prune the search for a +counterexample by discarding candidates which are subsumed by others for the +quasiorder.Discarded candidates are guaranteed to not compromise the +completeness of the algorithm. The novelty of our work lies in the quasiorder k +used to discard candidates. We introduce FORQs (family of right quasiorders) +that we obtain by adapting the notion of family of right congruences put forward +by Maler and Staiger in 1993. We define a FORQ-based inclusion algorithm which +we prove correct and instantiate it for a specific FORQ, called the structural +FORQ, induced by the B\"uchi automaton to the right of the inclusion sign. The +resulting implementation, called Forklift, scales up better than the +state-of-the-art on a variety of benchmarks including benchmarks from program +verification and theorem proving for word combinatorics. [SEE ALSO] .BR ltl2tgba (1) diff --git a/doc/spot.bib b/doc/spot.bib index 6193cb1a2..2ca89c721 100644 --- a/doc/spot.bib +++ b/doc/spot.bib @@ -336,6 +336,19 @@ month = aug } +@inproceedings{doveriFORQBasedLanguageInclusion2022, + title = {{{FORQ-Based Language Inclusion Formal Testing}}}, + booktitle = {{{CAV}}'22: {{Proc}}. 32nd {{Int}}. {{Conf}}. on {{Computer Aided Verification}}}, + author = {Doveri, Kyveli and Ganty, Pierre and Mazzocchi, Nicolas}, + year = {2022}, + volume = {13372}, + pages = {109--129}, + publisher = {{Springer International Publishing}}, + doi = {10.1007/978-3-031-13188-2_6}, + urldate = {2022-09-15}, + isbn = {978-3-031-13187-5 978-3-031-13188-2} +} + @InProceedings{ duret.11.vecos, author = {Alexandre Duret-Lutz}, title = {{LTL} Translation Improvements in {Spot}}, diff --git a/spot/twa/twa.cc b/spot/twa/twa.cc index a2ffbc70d..8ebb761f7 100644 --- a/spot/twa/twa.cc +++ b/spot/twa/twa.cc @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -228,20 +229,57 @@ namespace spot return b->intersecting_run(complement(ensure_graph(a))); } + static bool + is_buchi_automata(const_twa_graph_ptr const& aut) + { + return spot::acc_cond::acc_code::buchi() == aut->get_acceptance(); + } + twa_word_ptr twa::exclusive_word(const_twa_ptr other) const { const_twa_ptr a = shared_from_this(); const_twa_ptr b = other; + enum class containment_type : unsigned { LEGACY = 0, FORQ }; + static containment_type containment = [&]() + { + char* s = getenv("SPOT_EXCLUSIVE_WORD"); + // We expect a single digit that represents a valid enumeration value + if (!s) + return containment_type::LEGACY; + else if (*s == '\0' || *(s + 1) != '\0' || *s < '0' || *s > '1') + throw std::runtime_error("Invalid value for enviroment variable: " + "SPOT_EXCLUSIVE_WORD"); + else + return static_cast(*s - '0'); + }(); + // We have to find a word in A\B or in B\A. When possible, let's // make sure the first automaton we complement is deterministic. - if (auto aa = std::dynamic_pointer_cast(a)) - if (is_deterministic(aa)) + auto a_twa_as_graph = std::dynamic_pointer_cast(a); + auto b_twa_as_graph = std::dynamic_pointer_cast(a); + if (a_twa_as_graph) + if (is_deterministic(a_twa_as_graph)) std::swap(a, b); - if (auto word = a->intersecting_word(complement(ensure_graph(b)))) - return word; - return b->intersecting_word(complement(ensure_graph(a))); + + bool uses_buchi = is_buchi_automata(a_twa_as_graph) + && is_buchi_automata(b_twa_as_graph); + if (containment == containment_type::FORQ + && uses_buchi + && a_twa_as_graph + && b_twa_as_graph) + { + if (auto word = difference_word_forq(a_twa_as_graph, b_twa_as_graph)) + return word; + return difference_word_forq(b_twa_as_graph, a_twa_as_graph); + } + else + { + if (auto word = a->intersecting_word(complement(ensure_graph(b)))) + return word; + return b->intersecting_word(complement(ensure_graph(a))); + } } void diff --git a/spot/twaalgos/contains.cc b/spot/twaalgos/contains.cc index cf2680d01..d30904aa6 100644 --- a/spot/twaalgos/contains.cc +++ b/spot/twaalgos/contains.cc @@ -19,6 +19,7 @@ #include "config.h" #include +#include #include #include #include @@ -34,9 +35,37 @@ namespace spot } } + static bool is_buchi_automata(const_twa_graph_ptr const& aut) + { + return spot::acc_cond::acc_code::buchi() == aut->get_acceptance(); + } + bool contains(const_twa_graph_ptr left, const_twa_ptr right) { - return !complement(left)->intersects(right); + enum class containment_type : unsigned { LEGACY = 0, FORQ }; + static containment_type containment = [&]() + { + char* s = getenv("SPOT_CONTAINMENT_CHECK"); + // We expect a single digit that represents a valid enumeration value + if (!s) + return containment_type::LEGACY; + else if (*s == '\0' || *(s + 1) != '\0' || *s < '0' || *s > '1') + throw std::runtime_error("Invalid value for enviroment variable: " + "SPOT_CONTAINMENT_CHECK"); + else + return static_cast(*s - '0'); + }(); + + auto as_graph = std::dynamic_pointer_cast(right); + bool uses_buchi = is_buchi_automata(left) && is_buchi_automata(as_graph); + if (containment == containment_type::FORQ && uses_buchi && as_graph) + { + return contains_forq(left, as_graph); + } + else + { + return !complement(left)->intersects(right); + } } bool contains(const_twa_graph_ptr left, formula right) From 28a6471efbae8ebea5568bb650f0c4d3b4501168 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 5 Sep 2023 11:39:13 +0200 Subject: [PATCH 334/606] forq: fix bib entry and bind the doxygen doc to a group * doc/spot.bib: Reformat the FORQ reference in the style of the rest of the bibliographic file. * spot/twaalgos/forq_contains.hh: Adjust, and add missing \ingroup. --- doc/spot.bib | 29 +++++++++++++++-------------- spot/twaalgos/forq_contains.hh | 34 ++++++++++++++++++---------------- 2 files changed, 33 insertions(+), 30 deletions(-) diff --git a/doc/spot.bib b/doc/spot.bib index 2ca89c721..c645c8156 100644 --- a/doc/spot.bib +++ b/doc/spot.bib @@ -1,3 +1,4 @@ + @InProceedings{ babiak.12.tacas, author = {Tom{\'a}{\v{s}} Babiak and Mojm{\'i}r K{\v{r}}et{\'i}nsk{\'y} and Vojt{\v{e}}ch {\v{R}}eh{\'a}k @@ -185,7 +186,7 @@ month = apr, volume = {13244}, pages = {99--117}, - doi = {10.1007/978-3-030-99527-0_6}, + doi = {10.1007/978-3-030-99527-0_6} } @InProceedings{ cerna.03.mfcs, @@ -336,17 +337,17 @@ month = aug } -@inproceedings{doveriFORQBasedLanguageInclusion2022, - title = {{{FORQ-Based Language Inclusion Formal Testing}}}, - booktitle = {{{CAV}}'22: {{Proc}}. 32nd {{Int}}. {{Conf}}. on {{Computer Aided Verification}}}, - author = {Doveri, Kyveli and Ganty, Pierre and Mazzocchi, Nicolas}, - year = {2022}, - volume = {13372}, - pages = {109--129}, - publisher = {{Springer International Publishing}}, - doi = {10.1007/978-3-031-13188-2_6}, - urldate = {2022-09-15}, - isbn = {978-3-031-13187-5 978-3-031-13188-2} +@InProceedings{ doveri.22.cav, + title = {{FORQ}-Based Language Inclusion Formal Testing}, + booktitle = {Proceedings of the 32nd International Conference on + Computer Aided Verification (CAV'22)}, + author = {Kyveli Doveri and Pierre Ganty and Nicolas Mazzocchi}, + year = {2022}, + volume = {13372}, + pages = {109--129}, + publisher = {Springer International Publishing}, + doi = {10.1007/978-3-031-13188-2_6}, + month = aug } @InProceedings{ duret.11.vecos, @@ -1099,7 +1100,8 @@ publisher = {Elsevier}, editor = {Rance Cleaveland and Hubert Garavel}, year = {2002}, - month = jul, pdf = {adl/duret.16.atva.pdf}, + month = jul, + pdf = {adl/duret.16.atva.pdf}, abstract = {Checking liveness properties with partial-order reductions requires a cycle proviso to ensure that an action cannot be postponed forever. The proviso forces each cycle to contain @@ -1111,7 +1113,6 @@ the source of this edge, while this paper also explores the expansion of the destination and the use of SCC-based information.}, - address = {M{\'a}laga, Spain}, doi = {10.1016/S1571-0661(04)80409-2} } diff --git a/spot/twaalgos/forq_contains.hh b/spot/twaalgos/forq_contains.hh index 24e2e8065..3d49c5498 100644 --- a/spot/twaalgos/forq_contains.hh +++ b/spot/twaalgos/forq_contains.hh @@ -23,29 +23,31 @@ namespace spot { + /// \ingroup containment /// \brief Returns a word accepted by \a left that is rejected by \a right, /// or nullptr. /// - /// This implements the language containment algorithm from - /// \cite{doveriFORQBasedLanguageInclusion2022} - /// to check whether L(left)⊆L(right), in which case, it returns nullptr. - /// Otherwise, it returns a counterexample, i.e., a word that is accepted - /// by $L(left)\setminus L(right)$, hence the name of the function. + /// This implements a FORQ-based language containment algorithm + /// \cite doveri.22.cav to check whether L(left)⊆L(right), in which + /// case, it returns nullptr. Otherwise, it returns a + /// counterexample, i.e., a word that is accepted by + /// $L(left)\setminus L(right)$, hence the name of the function. /// - /// \pre Automata \a left and \a right should be - /// non-alternating state-based Büchi-automata. - SPOT_API twa_word_ptr difference_word_forq( - const_twa_graph_ptr left, spot::const_twa_graph_ptr right); + /// \pre Automata \a left and \a right should be non-alternating + /// Büchi-automata. + SPOT_API + twa_word_ptr difference_word_forq(const_twa_graph_ptr left, + const_twa_graph_ptr right); + /// \ingroup containment /// \brief Returns a boolean value indicating /// whether \a left is included in the language of \a right. /// - /// This implements the language containment algorithm from - /// \cite{doveriFORQBasedLanguageInclusion2022} - /// to check whether L(left)⊆L(right). + /// This implements a FORQ-based language containment algorithm + /// to check whether L(left)⊆L(right). \cite doveri.22.cav /// - /// \pre Automata \a left and \a right should be - /// non-alternating state-based Büchi-automata. - SPOT_API bool contains_forq( - const_twa_graph_ptr left, const_twa_graph_ptr right); + /// \pre Automata \a left and \a right should be non-alternating + /// Büchi-automata. + SPOT_API + bool contains_forq(const_twa_graph_ptr left, const_twa_graph_ptr right); } From 3861c045812cd4428fb572b0db6e4ecbf9d1ef7b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 5 Sep 2023 18:03:09 +0200 Subject: [PATCH 335/606] forq: fix Buchi acceptance test * spot/twa/twa.cc: Here. * spot/twaalgos/forq_contains.cc: And there. Also simplify the handling code by simply throwing the exception when the error is detected. --- spot/twa/twa.cc | 21 ++--- spot/twaalgos/forq_contains.cc | 145 +++++---------------------------- 2 files changed, 28 insertions(+), 138 deletions(-) diff --git a/spot/twa/twa.cc b/spot/twa/twa.cc index 8ebb761f7..a5288ac3c 100644 --- a/spot/twa/twa.cc +++ b/spot/twa/twa.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2014-2019, 2021, 2022 Laboratoire de Recherche et +// Copyright (C) 2011, 2014-2019, 2021, 2022, 2023 Laboratoire de Recherche et // Developpement de l'EPITA (LRDE). // Copyright (C) 2003, 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -229,12 +229,6 @@ namespace spot return b->intersecting_run(complement(ensure_graph(a))); } - static bool - is_buchi_automata(const_twa_graph_ptr const& aut) - { - return spot::acc_cond::acc_code::buchi() == aut->get_acceptance(); - } - twa_word_ptr twa::exclusive_word(const_twa_ptr other) const { @@ -256,19 +250,18 @@ namespace spot }(); // We have to find a word in A\B or in B\A. When possible, let's - // make sure the first automaton we complement is deterministic. + // make sure the first automaton we complement, i.e., b, is deterministic. auto a_twa_as_graph = std::dynamic_pointer_cast(a); - auto b_twa_as_graph = std::dynamic_pointer_cast(a); + auto b_twa_as_graph = std::dynamic_pointer_cast(b); if (a_twa_as_graph) if (is_deterministic(a_twa_as_graph)) std::swap(a, b); - bool uses_buchi = is_buchi_automata(a_twa_as_graph) - && is_buchi_automata(b_twa_as_graph); if (containment == containment_type::FORQ - && uses_buchi - && a_twa_as_graph - && b_twa_as_graph) + && a_twa_as_graph + && b_twa_as_graph + && a_twa_as_graph->acc().is_buchi() + && b_twa_as_graph->acc().is_buchi()) { if (auto word = difference_word_forq(a_twa_as_graph, b_twa_as_graph)) return word; diff --git a/spot/twaalgos/forq_contains.cc b/spot/twaalgos/forq_contains.cc index a396784d6..adf7c597a 100644 --- a/spot/twaalgos/forq_contains.cc +++ b/spot/twaalgos/forq_contains.cc @@ -19,25 +19,19 @@ #include "config.h" #include -#include "forq_contains.hh" +#include -#include #include -#include -#include #include #include -#include #include #include -#include #include #include #include #include #include #include -#include #include #include #include @@ -522,96 +516,6 @@ namespace spot::forq }; } - enum class forq_status - { - FORQ_OKAY, // The forq works as expected - FORQ_INVALID_AC_COND, // The automata passed do not - // use buchi acceptance conditions - FORQ_INCOMPATIBLE_DICTS, // The two automata are using - // different bdd_dict objects - FORQ_INCOMPATIBLE_AP, // The two automata are using - // different atomic propositions - FORQ_INVALID_INPUT_BA, // The two automata passed are - // nullptrs and are invalid - FORQ_INVALID_RESULT_PTR // The pointer forq_result, that - // was passed into function - // contains_forq, cannot be nullptr - }; - - struct forq_result - { - // Whether language of graph A is included in B - bool included; - // If the language of graph A is not included in B, - // a counter example is provided - spot::twa_word_ptr counter_example; - }; - - // Returns a human-readable string given a forq_status, - // which can be aquired through a call to contains_forq - static const char* forq_status_message(forq_status status) - { - switch (status) - { - case forq_status::FORQ_OKAY: - return "Forq was able to properly run on the two buchi automata."; - case forq_status::FORQ_INVALID_AC_COND: - return "Forq only operates on automata with buchi " - "acceptance conditions."; - case forq_status::FORQ_INCOMPATIBLE_DICTS: - return "The two input graphs must utilize the same twa_dict."; - case forq_status::FORQ_INCOMPATIBLE_AP: - return "The two input graphs must utilize the same set of atomic" - "propositions defined in their shared twa_dict."; - case forq_status::FORQ_INVALID_INPUT_BA: - return "One of the two buchi automata passed in was a nullptr."; - case forq_status::FORQ_INVALID_RESULT_PTR: - return "The result pointer passed in was a nullptr."; - default: - return "Unknown Forq Status Code."; - } - } - - static forq_status valid_automata(const_graph const& A, - const_graph const& B, - forq_result* result) - { - if (!result) - { - return forq_status::FORQ_INVALID_RESULT_PTR; - } - if (!A || !B) - { - return forq_status::FORQ_INVALID_INPUT_BA; - } - - const auto buchi_acceptance = spot::acc_cond::acc_code::buchi(); - auto accept_A = A->get_acceptance(); - auto accept_B = B->get_acceptance(); - - if (accept_A != buchi_acceptance || accept_B != buchi_acceptance) - { - return forq_status::FORQ_INVALID_AC_COND; - } - if (A->get_dict() != B->get_dict()) - { - return forq_status::FORQ_INCOMPATIBLE_DICTS; - } - if (A->ap() != B->ap()) - { - return forq_status::FORQ_INCOMPATIBLE_AP; - } - return forq_status::FORQ_OKAY; - } - - static forq_status create_result(forq_result* result, - spot::twa_word_ptr counter_example = nullptr) - { - result->included = static_cast(counter_example); - result->counter_example = std::move(counter_example); - return forq_status::FORQ_OKAY; - } - struct forq_setup { forq::forq_context context; @@ -707,32 +611,11 @@ namespace spot::forq word_of_v, setup); if (counter_example) - { - return final_state_result::failure(std::move(counter_example)); - } + return final_state_result::failure(std::move(counter_example)); } } return final_state_result::success(); } - - static forq_status forq_impl(const_graph const& A, - const_graph const& B, forq_result* result) - { - if (auto rc = valid_automata(A, B, result); rc != forq_status::FORQ_OKAY) - { - return rc; - } - forq_setup setup = create_forq_setup(A, B); - - for (auto src : util::get_final_states(A)) - { - auto final_state_result = run_from_final_state(src, setup); - if (!final_state_result.should_continue()){ - return create_result(result, final_state_result.get_counter_example()); - } - } - return create_result(result); - } } namespace spot @@ -740,13 +623,27 @@ namespace spot twa_word_ptr difference_word_forq(forq::const_graph lhs, forq::const_graph rhs) { - forq::forq_result result; - auto rc = forq::forq_impl(lhs, rhs, &result); - if (rc != forq::forq_status::FORQ_OKAY) + if (!lhs || !rhs) + throw std::runtime_error("One of the two automata passed was a nullptr."); + if (!lhs->acc().is_buchi() || !rhs->acc().is_buchi()) + throw std::runtime_error("Forq only operates on automata with Büchi " + "acceptance conditions."); + if (lhs->get_dict() != rhs->get_dict()) + throw std::runtime_error + ("The two input graphs must utilize the same twa_dict."); + if (lhs->ap() != rhs->ap()) + throw std::runtime_error("The two input graphs must use the same set " + "of APs"); + + forq::forq_setup setup = forq::create_forq_setup(lhs, rhs); + + for (auto src: forq::util::get_final_states(lhs)) { - throw std::runtime_error(forq::forq_status_message(rc)); + auto final_state_result = forq::run_from_final_state(src, setup); + if (!final_state_result.should_continue()) + return final_state_result.get_counter_example(); } - return result.counter_example; + return nullptr; } bool contains_forq(forq::const_graph lhs, forq::const_graph rhs) From ca4e6c4b481dd2322f4bd36a4bc98b99c751f8ae Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 7 Sep 2023 16:01:46 +0200 Subject: [PATCH 336/606] forq: swap arguments of contains_forq * spot/twaalgos/forq_contains.hh, spot/twaalgos/forq_contains.cc (contains_forq): Swap arguments so they follow the same order as contains(). * tests/python/forq_contains.py: Adjust. --- spot/twaalgos/forq_contains.cc | 4 ++-- spot/twaalgos/forq_contains.hh | 4 ++-- tests/python/forq_contains.py | 3 +-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/spot/twaalgos/forq_contains.cc b/spot/twaalgos/forq_contains.cc index adf7c597a..ccb635839 100644 --- a/spot/twaalgos/forq_contains.cc +++ b/spot/twaalgos/forq_contains.cc @@ -630,7 +630,7 @@ namespace spot "acceptance conditions."); if (lhs->get_dict() != rhs->get_dict()) throw std::runtime_error - ("The two input graphs must utilize the same twa_dict."); + ("The two input automata must use the same twa_dict."); if (lhs->ap() != rhs->ap()) throw std::runtime_error("The two input graphs must use the same set " "of APs"); @@ -648,7 +648,7 @@ namespace spot bool contains_forq(forq::const_graph lhs, forq::const_graph rhs) { - return !difference_word_forq(lhs, rhs); + return !difference_word_forq(rhs, lhs); } } diff --git a/spot/twaalgos/forq_contains.hh b/spot/twaalgos/forq_contains.hh index 3d49c5498..9e7c792af 100644 --- a/spot/twaalgos/forq_contains.hh +++ b/spot/twaalgos/forq_contains.hh @@ -41,10 +41,10 @@ namespace spot /// \ingroup containment /// \brief Returns a boolean value indicating - /// whether \a left is included in the language of \a right. + /// whether the language of \a left includes in the language of \a right. /// /// This implements a FORQ-based language containment algorithm - /// to check whether L(left)⊆L(right). \cite doveri.22.cav + /// to check whether L(left)⊇L(right). \cite doveri.22.cav /// /// \pre Automata \a left and \a right should be non-alternating /// Büchi-automata. diff --git a/tests/python/forq_contains.py b/tests/python/forq_contains.py index be510e66c..3e47bec87 100644 --- a/tests/python/forq_contains.py +++ b/tests/python/forq_contains.py @@ -23,7 +23,7 @@ from unittest import TestCase tc = TestCase() def do_test(subset, superset, expected=True): - result = spot.contains_forq(subset, superset) + result = spot.contains_forq(superset, subset) truth = spot.contains(superset, subset) tc.assertTrue(truth == expected) tc.assertTrue(result == truth) @@ -324,4 +324,3 @@ State: 11 {0} --END--""") do_symmetric_test(subset, superset) - From 05d7622f8f67616f0c207b01ba3bb2ab96b73685 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 7 Sep 2023 17:36:09 +0200 Subject: [PATCH 337/606] forq: make it easier to select contains's version * spot/twaalgos/contains.hh, spot/twaalgos/contains.cc (containment_select_version): New function. (contains): Use it. * spot/twa/twa.cc (exclusive_word): Likewise. * bin/autfilt.cc (--included-in): Adjust to use forq depending on containement_select_version. * bin/man/spot-x.x: Adjust documentation of CONTAINMENT_SELECT_VERSION. * tests/core/included.test, tests/python/forq_contains.py: Add more tests. * NEWS: Mention the new feature. --- NEWS | 13 +++++++ bin/autfilt.cc | 30 ++++++++++++--- bin/man/spot-x.x | 31 +++++---------- spot/twa/twa.cc | 34 +++++------------ spot/twaalgos/contains.cc | 72 +++++++++++++++++++++-------------- spot/twaalgos/contains.hh | 30 ++++++++++++++- tests/core/included.test | 26 ++++++++++++- tests/python/forq_contains.py | 25 ++++++++++++ 8 files changed, 178 insertions(+), 83 deletions(-) diff --git a/NEWS b/NEWS index 8f147ad35..61f7c6df7 100644 --- a/NEWS +++ b/NEWS @@ -65,6 +65,19 @@ New in spot 2.11.6.dev (not yet released) 36 seconds; it now produce an AIG circuit with 53 nodes in only 0.1 second. + - spot::contains_forq() is a implementation of the paper "FORQ-Based + Language Inclusion Formal Testing" (Doveri, Ganty, Mazzocchi; + CAV'22) contributed by Jonah Romero. + + - spot::contains() still default to the complementation-based + algorithm, however by calling + spot::containment_select_version("forq") or by setting + SPOT_CONTAINMENT_CHECK=forq in the environment, the + spot::contains_forq() implementation will be used instead when + applicable (inclusion between Büchi automata). + + The above also impacts autfilt --included-in option. + Bugs fixed: - tgba_determinize()'s use_simulation option would cause it to diff --git a/bin/autfilt.cc b/bin/autfilt.cc index eec2246b3..820b37f49 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -984,12 +985,24 @@ parse_opt(int key, char* arg, struct argp_state*) break; case OPT_INCLUDED_IN: { - auto aut = ensure_deterministic(read_automaton(arg, opt->dict), true); - aut = spot::dualize(aut); - if (!opt->included_in) - opt->included_in = aut; + auto aut = read_automaton(arg, opt->dict); + if (spot::containment_select_version() == 0) + { + aut = spot::complement(aut); + if (!aut->is_existential()) + aut = spot::remove_alternation(aut); + if (!opt->included_in) + opt->included_in = aut; + else + opt->included_in = ::product_or(opt->included_in, aut); + } else - opt->included_in = ::product_or(opt->included_in, aut); + { + if (opt->included_in) + error(2, 0, "FORQ-based inclusion check only works " + "with one inclusion-test at a time"); + opt->included_in = aut; + } } break; case OPT_INHERENTLY_WEAK_SCCS: @@ -1519,7 +1532,12 @@ namespace if (opt->intersect) matched &= aut->intersects(opt->intersect); if (opt->included_in) - matched &= !aut->intersects(opt->included_in); + { + if (spot::containment_select_version() == 0) + matched &= !aut->intersects(opt->included_in); + else + matched &= spot::contains(opt->included_in, aut); + } if (opt->equivalent_pos) matched &= !aut->intersects(opt->equivalent_neg) && spot::contains(aut, opt->equivalent_pos); diff --git a/bin/man/spot-x.x b/bin/man/spot-x.x index c6091c339..b9e3c7166 100644 --- a/bin/man/spot-x.x +++ b/bin/man/spot-x.x @@ -51,12 +51,13 @@ If this variable is set to any value, statistics about BDD garbage collection and resizing will be output on standard error. .TP -\fSPOT_CONTAINMENT_CHECK\fR -Specifies which inclusion algorithm spot should use. This can currently -take on 1 of 2 values: 0 for the legacy implementation, and 1 for the -forq implementation [6] (See bibliography below). Forq uses buchi -automata in order to determine inclusion, and will default to the legacy -version if these constraints are not satisfied. +\fBSPOT_CONTAINMENT_CHECK\fR +Specifies which inclusion algorithm Spot should use. If the variable +is unset, or set to \fB"default"\fR, containment checks are done +using a complementation-based procedure. If the variable is set to +\fB"forq"\fR, and FORQ-based containment check is used for Büchi automata +(the default procedure is still used for non-Büchi automata). See +[6] in the bibliography below. .TP \fBSPOT_DEFAULT_FORMAT\fR @@ -335,24 +336,12 @@ disabled with \fB-x gf-guarantee=0\fR. .TP 6. -Doveri, Kyveli and Ganty, Pierre and Mazzocchi, Nicolas: +Kyveli Doveri and Pierre Ganty and Nicolas Mazzocchi: FORQ-Based Language Inclusion Formal Testing. Proceedings of CAV'22. LNCS 13372. -We propose a novel algorithm to decide the language inclusion between -(nondeterministic) Büchi automata, a PSpace-complete problem. Our approach, -like others before, leverage a notion of quasiorder to prune the search for a -counterexample by discarding candidates which are subsumed by others for the -quasiorder.Discarded candidates are guaranteed to not compromise the -completeness of the algorithm. The novelty of our work lies in the quasiorder k -used to discard candidates. We introduce FORQs (family of right quasiorders) -that we obtain by adapting the notion of family of right congruences put forward -by Maler and Staiger in 1993. We define a FORQ-based inclusion algorithm which -we prove correct and instantiate it for a specific FORQ, called the structural -FORQ, induced by the B\"uchi automaton to the right of the inclusion sign. The -resulting implementation, called Forklift, scales up better than the -state-of-the-art on a variety of benchmarks including benchmarks from program -verification and theorem proving for word combinatorics. +The containment check implemented as spot::contains_forq(), and +used for Büchi automata when \fBSPOT_CONTAINMENT_CHECK=forq\fR. [SEE ALSO] .BR ltl2tgba (1) diff --git a/spot/twa/twa.cc b/spot/twa/twa.cc index a5288ac3c..c1c113e87 100644 --- a/spot/twa/twa.cc +++ b/spot/twa/twa.cc @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -213,6 +214,7 @@ namespace spot return make_twa_graph(aut_in, twa::prop_set::all()); } } + twa_run_ptr twa::exclusive_run(const_twa_ptr other) const { @@ -235,37 +237,21 @@ namespace spot const_twa_ptr a = shared_from_this(); const_twa_ptr b = other; - enum class containment_type : unsigned { LEGACY = 0, FORQ }; - static containment_type containment = [&]() - { - char* s = getenv("SPOT_EXCLUSIVE_WORD"); - // We expect a single digit that represents a valid enumeration value - if (!s) - return containment_type::LEGACY; - else if (*s == '\0' || *(s + 1) != '\0' || *s < '0' || *s > '1') - throw std::runtime_error("Invalid value for enviroment variable: " - "SPOT_EXCLUSIVE_WORD"); - else - return static_cast(*s - '0'); - }(); - // We have to find a word in A\B or in B\A. When possible, let's // make sure the first automaton we complement, i.e., b, is deterministic. - auto a_twa_as_graph = std::dynamic_pointer_cast(a); - auto b_twa_as_graph = std::dynamic_pointer_cast(b); - if (a_twa_as_graph) + if (auto a_twa_as_graph = std::dynamic_pointer_cast(a)) if (is_deterministic(a_twa_as_graph)) std::swap(a, b); - if (containment == containment_type::FORQ - && a_twa_as_graph - && b_twa_as_graph - && a_twa_as_graph->acc().is_buchi() - && b_twa_as_graph->acc().is_buchi()) + if (containment_select_version() == 1 + && a->acc().is_buchi() + && b->acc().is_buchi()) { - if (auto word = difference_word_forq(a_twa_as_graph, b_twa_as_graph)) + auto ag = ensure_graph(a); + auto bg = ensure_graph(b); + if (auto word = difference_word_forq(ag, bg)) return word; - return difference_word_forq(b_twa_as_graph, a_twa_as_graph); + return difference_word_forq(bg, ag); } else { diff --git a/spot/twaalgos/contains.cc b/spot/twaalgos/contains.cc index d30904aa6..7170fbedf 100644 --- a/spot/twaalgos/contains.cc +++ b/spot/twaalgos/contains.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018, 2019, 2022 Laboratoire de Recherche et Développement de -// l'Epita. +// Copyright (C) 2018, 2019, 2022, 2023 Laboratoire de Recherche et +// Développement de l'Epita. // // This file is part of Spot, a model checking library. // @@ -33,39 +33,25 @@ namespace spot { return ltl_to_tgba_fm(f, dict); } - } - static bool is_buchi_automata(const_twa_graph_ptr const& aut) - { - return spot::acc_cond::acc_code::buchi() == aut->get_acceptance(); + static const_twa_graph_ptr + ensure_graph(const const_twa_ptr& aut_in) + { + const_twa_graph_ptr aut = + std::dynamic_pointer_cast(aut_in); + if (aut) + return aut; + return make_twa_graph(aut_in, twa::prop_set::all()); + } } bool contains(const_twa_graph_ptr left, const_twa_ptr right) { - enum class containment_type : unsigned { LEGACY = 0, FORQ }; - static containment_type containment = [&]() - { - char* s = getenv("SPOT_CONTAINMENT_CHECK"); - // We expect a single digit that represents a valid enumeration value - if (!s) - return containment_type::LEGACY; - else if (*s == '\0' || *(s + 1) != '\0' || *s < '0' || *s > '1') - throw std::runtime_error("Invalid value for enviroment variable: " - "SPOT_CONTAINMENT_CHECK"); - else - return static_cast(*s - '0'); - }(); - - auto as_graph = std::dynamic_pointer_cast(right); - bool uses_buchi = is_buchi_automata(left) && is_buchi_automata(as_graph); - if (containment == containment_type::FORQ && uses_buchi && as_graph) - { - return contains_forq(left, as_graph); - } + if (containment_select_version() == 1 + && left->acc().is_buchi() && right->acc().is_buchi()) + return contains_forq(left, ensure_graph(right)); else - { - return !complement(left)->intersects(right); - } + return !complement(left)->intersects(right); } bool contains(const_twa_graph_ptr left, formula right) @@ -111,4 +97,32 @@ namespace spot { return contains(right, left) && contains(left, right); } + + int containment_select_version(const char* version) + { + static int pref = -1; + const char *env = nullptr; + if (!version && pref < 0) + version = env = getenv("SPOT_CONTAINMENT_CHECK"); + if (version) + { + if (!strcasecmp(version, "default")) + pref = 0; + else if (!strcasecmp(version, "forq")) + pref = 1; + else + { + const char* err = ("containment_select_version(): argument" + " should be one of {default,forq}"); + if (env) + err = "SPOT_CONTAINMENT_CHECK should be one of {default,forq}"; + throw std::runtime_error(err); + } + } + else if (pref < 0) + { + pref = 0; + } + return pref; + } } diff --git a/spot/twaalgos/contains.hh b/spot/twaalgos/contains.hh index a1d64f1b1..5c30a66b9 100644 --- a/spot/twaalgos/contains.hh +++ b/spot/twaalgos/contains.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement de -// l'Epita. +// Copyright (C) 2018, 2022, 2023 Laboratoire de Recherche et +// Développement de l'Epita. // // This file is part of Spot, a model checking library. // @@ -62,4 +62,30 @@ namespace spot SPOT_API bool are_equivalent(formula left, const_twa_graph_ptr right); SPOT_API bool are_equivalent(formula left, formula right); /// @} + + /// \ingroup containment + /// + /// Query, or change the version of the containment check to use by + /// contains() or twa::exclusive_run(). + /// + /// By default those containment checks use a complementation-based + /// algorithm that is generic that work on any acceptance condition. + /// Alternative algorithms such as contains_forq() are available, + /// for Büchi automata, but are not used by default. + /// + /// When calling this function \a version can be: + /// - "default" to force the above default containment checks to be used + /// - "forq" to use contains_forq() when possible + /// - nullptr do not modify the preference. + /// + /// If the first call to containement_select_version() is done with + /// nullptr as an argument, then the value of the + /// SPOT_CONTAINMENT_CHECK environment variable is used instead. + /// + /// In all cases, the preferred containment check is returned as an + /// integer. This integer is meant to be used by Spot's algorithms + /// to select the desired containment check to apply, but it's + /// encoding (currently 1 for FORQ, 0 for default) should be + /// regarded as an implementation detail subject to change. + SPOT_API int containment_select_version(const char* version = nullptr); } diff --git a/tests/core/included.test b/tests/core/included.test index 3574af9e3..e9f2cea08 100755 --- a/tests/core/included.test +++ b/tests/core/included.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2022 Laboratoire de Recherche et Développement +# Copyright (C) 2016, 2022, 2023 Laboratoire de Recherche et Développement # de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -38,6 +38,20 @@ run 0 autfilt -q gab.hoa --included-in fga.hoa --included-in fgb.hoa run 1 autfilt -q ga.hoa --included-in fga.hoa --included-in fgb.hoa run 0 autfilt -q false.hoa --included-in fga.hoa +SPOT_CONTAINMENT_CHECK=forq run 0 autfilt -q fga.hoa --included-in gfa.hoa +SPOT_CONTAINMENT_CHECK=forq run 0 autfilt -q fga.hoa --included-in fga.hoa +SPOT_CONTAINMENT_CHECK=forq run 1 autfilt -q gfa.hoa --included-in fga.hoa +SPOT_CONTAINMENT_CHECK=forq \ + run 2 autfilt -q gab.hoa --included-in fga.hoa --included-in fgb.hoa +SPOT_CONTAINMENT_CHECK=forq \ + run 2 autfilt -q ga.hoa --included-in fga.hoa --included-in fgb.hoa +SPOT_CONTAINMENT_CHECK=forq run 0 autfilt -q false.hoa --included-in fga.hoa + +SPOT_CONTAINMENT_CHECK=error \ + autfilt -q fga.hoa --included-in gfa.hoa >err && exit 1 +test $? -eq 2 +grep 'SPOT_CONTAINMENT_CHECK.*forq' error + run 1 autfilt -q gfa.hoa --equivalent-to fga.hoa run 1 autfilt -q fga.hoa --equivalent-to gfa.hoa @@ -61,6 +75,7 @@ ltl2tgba '!(a U c)' | autfilt --product-or a1.hoa > out.hoa ltl2tgba true | autfilt out.hoa --equivalent-to - && exit 1 # In Spot 2.10, the following was very slow. +export SPOT_CONTAINMENT_CHECK=default for n in 1 2 4 8 16 512 1024 2048 4096 8192; do genaut --cyclist-trace-nba=$n > trace.hoa genaut --cyclist-proof-dba=$n > proof.hoa @@ -68,4 +83,13 @@ for n in 1 2 4 8 16 512 1024 2048 4096 8192; do autfilt -q --included-in=proof.hoa trace.hoa && exit 1 done +# The forq-based version does not scale well on this particular test +export SPOT_CONTAINMENT_CHECK=forq +for n in 1 2 4 8 16 128; do + genaut --cyclist-trace-nba=$n > trace.hoa + genaut --cyclist-proof-dba=$n > proof.hoa + autfilt -q --included-in=trace.hoa proof.hoa || exit 1 + autfilt -q --included-in=proof.hoa trace.hoa && exit 1 +done + : diff --git a/tests/python/forq_contains.py b/tests/python/forq_contains.py index 3e47bec87..8525f3f92 100644 --- a/tests/python/forq_contains.py +++ b/tests/python/forq_contains.py @@ -324,3 +324,28 @@ State: 11 {0} --END--""") do_symmetric_test(subset, superset) + + +tba = spot.translate('GFa') +tgba = spot.translate('GFa & GFb') +tc.assertTrue(spot.contains(tba, tgba)) +try: + spot.containment_select_version("fork") +except RuntimeError as e: + tc.assertIn("forq", str(e)) +else: + raise RuntimeError("missing exception") +spot.containment_select_version("forq") +tc.assertTrue(spot.contains(tba, tgba)) # does not call contains_forq +try: + spot.contains_forq(tba, tgba) # because contains_forq wants Büchi +except RuntimeError as e: + tc.assertIn("Büchi", str(e)) +else: + raise RuntimeError("missing exception") + +# This shows that exclusive word also depend on +# containment_select_version() +tc.assertEqual(str(one.exclusive_word(both)), "!a & !b; cycle{a}") +spot.containment_select_version("default") +tc.assertEqual(str(one.exclusive_word(both)), "cycle{a}") From 6eff384fca3a1b32711384b53f590508e1eba01c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 14 Sep 2023 11:23:57 +0200 Subject: [PATCH 338/606] forq: remove the relevance test Looks like the comparison operator between std::set> and std::set had a few issue. This is part of an optimization that Pierre Ganty prefers to see removed, so I'm just removing that code. For reference, changing the removed operator<= to the following also seem to fix all tests. static bool operator<=(std::set> const& f, state_set const& set) { auto first1 = set.begin(), last1 = set.end(); auto first2 = f.begin(), last2 = f.end(); for (; first2 != last2; ++first1) if (first1 == last1 || first2->first < *first1) { return false; } else if (first2->first == *first1) { ++first2; // Some states of f may appear twice because of the attached // Boolean. if (first2 != last2 && first2->first == *first1) ++first2; } return true; } * spot/twaalgos/forq_contains.cc: Remove relevance-based optimization. --- spot/twaalgos/forq_contains.cc | 59 ++-------------------------------- 1 file changed, 2 insertions(+), 57 deletions(-) diff --git a/spot/twaalgos/forq_contains.cc b/spot/twaalgos/forq_contains.cc index ccb635839..94bad76a6 100644 --- a/spot/twaalgos/forq_contains.cc +++ b/spot/twaalgos/forq_contains.cc @@ -470,7 +470,6 @@ namespace spot::forq bool operator<=(context_set const& other) const override; void add(state initial, std::set> const& other); - bool relevance_test(state_set const& set) const; template void iterate(Callable callable) const @@ -605,8 +604,6 @@ namespace spot::forq for (auto& v_ptr : new_post_f[src]) { auto& [V, word_of_v] = *v_ptr; - if (!V.relevance_test(W)) - continue; auto counter_example = find_counter_example(src, W, word_of_v, setup); @@ -968,52 +965,18 @@ namespace spot::forq } } - static bool operator<=( - std::set> const& f, - std::set> const& s) + static bool operator<=(std::set> const& f, + std::set> const& s) { return std::includes(s.begin(), s.end(), f.begin(), f.end()); } - static bool operator<=( - std::set> const& f, - state_set const& set) - { - if (set.size() < f.size()) - return false; - auto first1 = set.begin(), last1 = set.end(); - auto first2 = f.begin(), last2 = f.begin(); - - for (; first2 != last2; ++first1) - { - if (first1 == last1 || first2->first < *first1) - { - return false; - } - if (first2->first == *first1) - { - ++first2; - } - } - return true; - } - void context_set::add(state initial, std::set> const& other) { states[initial].insert(other.begin(), other.end()); } - bool context_set::relevance_test(state_set const& W) const - { - for (auto& [s1, quazi] : states) - { - if (!(quazi <= W)) - return false; - } - return true; - } - bool context_set::operator<=(context_set const& other) const { for (auto& [s, set] : states) @@ -1022,24 +985,6 @@ namespace spot::forq return false; } return true; - - if (other.states.size() != states.size()) - return false; - auto first1 = other.states.begin(), last1 = other.states.end(); - auto first2 = states.begin(), last2 = states.begin(); - - for (; first2 != last2; ++first1) - { - if (first1 == last1 || first2->first < first1->first) - return false; - if (first2->first == first1->first) - { - if (!(first2->second <= first1->second)) - return false; - ++first2; - } - } - return true; } state_set::state_set(state single_state, bool reversed) From 6ac2416e5de0af3ee429690dfa290f84edd2657c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 13 Sep 2023 17:46:48 +0200 Subject: [PATCH 339/606] forq: remove the "same AP set" restriction * spot/twaalgos/forq_contains.cc: Remove the check. * tests/python/forq_contains.py: Add two test cases for this. --- spot/twaalgos/forq_contains.cc | 3 --- tests/python/forq_contains.py | 12 ++++++++++++ 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/spot/twaalgos/forq_contains.cc b/spot/twaalgos/forq_contains.cc index 94bad76a6..6292dbe76 100644 --- a/spot/twaalgos/forq_contains.cc +++ b/spot/twaalgos/forq_contains.cc @@ -628,9 +628,6 @@ namespace spot if (lhs->get_dict() != rhs->get_dict()) throw std::runtime_error ("The two input automata must use the same twa_dict."); - if (lhs->ap() != rhs->ap()) - throw std::runtime_error("The two input graphs must use the same set " - "of APs"); forq::forq_setup setup = forq::create_forq_setup(lhs, rhs); diff --git a/tests/python/forq_contains.py b/tests/python/forq_contains.py index 8525f3f92..5c94c3946 100644 --- a/tests/python/forq_contains.py +++ b/tests/python/forq_contains.py @@ -349,3 +349,15 @@ else: tc.assertEqual(str(one.exclusive_word(both)), "!a & !b; cycle{a}") spot.containment_select_version("default") tc.assertEqual(str(one.exclusive_word(both)), "cycle{a}") + +tba2 = spot.translate('GFa & GFb', "buchi") +spot.containment_select_version("default") +tc.assertTrue(spot.contains(tba, tba2)) +tc.assertFalse(spot.contains(tba2, tba)) +spot.containment_select_version("forq") +tc.assertTrue(spot.contains(tba, tba2)) +tc.assertFalse(spot.contains(tba2, tba)) + +a = spot.translate("(p0 & p2) -> G!p1", "buchi") +b = spot.translate("p0 -> G!p1", "buchi") +do_symmetric_test(b, a) From abca0f7fd92e2b93f8618211700972f3c27750c7 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 15 Sep 2023 11:25:48 +0200 Subject: [PATCH 340/606] * spot/tl/formula.hh: Typo in comment. --- spot/tl/formula.hh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spot/tl/formula.hh b/spot/tl/formula.hh index 074ec8b02..0c7377e1c 100644 --- a/spot/tl/formula.hh +++ b/spot/tl/formula.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2015-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -64,7 +64,7 @@ // Use #if SPOT_HAS_STRONG_X in code that need to be backward // compatible with older Spot versions. # define SPOT_HAS_STRONG_X 1 -// You me #define SPOT_WANT_STRONG_X yourself before including +// You may #define SPOT_WANT_STRONG_X yourself before including // this file to force the use of STRONG_X # define SPOT_WANT_STRONG_X 1 #endif From 202ab92d1d49b24031fd3a5616e89b1a0fc10580 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 19 Sep 2023 09:53:22 +0200 Subject: [PATCH 341/606] ltlsynt: detect APs with constant polarity This implements the first point of issue #529. * spot/tl/apcollect.cc, spot/tl/apcollect.hh (collect_litterals): New function. * bin/ltlsynt.cc: Implement the --polarity option, use collect_litterals() to simplify the specification, finally patch the game, Mealy, or Aiger output. * spot/twaalgos/aiger.cc, spot/twaalgos/aiger.hh: Take a relabeling_map has argument to specify extra APs. * tests/core/ltlsynt.test, tests/core/ltlsynt2.test: Adjust test cases. --- NEWS | 7 ++ bin/ltlsynt.cc | 165 +++++++++++++++++++++++++++++++-------- spot/tl/apcollect.cc | 70 ++++++++++++++++- spot/tl/apcollect.hh | 12 ++- spot/twaalgos/aiger.cc | 66 +++++++++++++--- spot/twaalgos/aiger.hh | 19 ++++- tests/core/ltlsynt.test | 113 +++++++++++++++++++++------ tests/core/ltlsynt2.test | 2 +- 8 files changed, 378 insertions(+), 76 deletions(-) diff --git a/NEWS b/NEWS index 61f7c6df7..b6ad95362 100644 --- a/NEWS +++ b/NEWS @@ -16,6 +16,13 @@ New in spot 2.11.6.dev (not yet released) will replace boolean subformulas by fresh atomic propositions even if those subformulas share atomic propositions. + - ltlsynt will no check for output atomic propositions that always + have the same polarity in the specification. When this happens, + these output APs are replaced by true or false before running the + synthesis pipeline, and the resulting game, Mealy machine, or + Aiger circuit is eventually patched to include that constant + output. This can be disabled with --polarity=no. + Library: - The following new trivial simplifications have been implemented for SEREs: diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 35ac4194b..d2d85caa7 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -55,6 +55,7 @@ enum OPT_HIDE, OPT_INPUT, OPT_OUTPUT, + OPT_POLARITY, OPT_PRINT, OPT_PRINT_AIGER, OPT_PRINT_HOA, @@ -101,6 +102,9 @@ static const argp_option options[] = { "decompose", OPT_DECOMPOSE, "yes|no", 0, "whether to decompose the specification as multiple output-disjoint " "problems to solve independently (enabled by default)", 0 }, + { "polarity", OPT_POLARITY, "yes|no", 0, + "whether to remove atomic propositions that always have the same " + "polarity in the formula to speed things up (enabled by default)", 0 }, { "simplify", OPT_SIMPLIFY, "no|bisim|bwoa|sat|bisim-sat|bwoa-sat", 0, "simplification to apply to the controller (no) nothing, " "(bisim) bisimulation-based reduction, (bwoa) bisimulation-based " @@ -236,6 +240,7 @@ static bool decompose_values[] = }; ARGMATCH_VERIFY(decompose_args, decompose_values); bool opt_decompose_ltl = true; +bool opt_polarity = true; static const char* const simplify_args[] = { @@ -268,8 +273,35 @@ namespace }; static void - dispatch_print_hoa(const spot::const_twa_graph_ptr& game) + dispatch_print_hoa(spot::twa_graph_ptr& game, + const std::vector* input_aps = nullptr, + const spot::relabeling_map* rm = nullptr) { + if (rm && !rm->empty()) // Add any AP we removed + { + assert(input_aps); + auto& sp = spot::get_state_players(game); + + bdd add = bddtrue; + for (auto [k, v]: *rm) + { + int i = game->register_ap(k); + // skip inputs + if (std::find(input_aps->begin(), input_aps->end(), + k.ap_name()) != input_aps->end()) + continue; + if (v.is_tt()) + add &= bdd_ithvar(i); + else if (v.is_ff()) + add &= bdd_nithvar(i); + } + for (auto& e: game->edges()) + if (sp[e.src]) + e.cond &= add; + set_synthesis_outputs(game, + get_synthesis_outputs(game) + & bdd_support(add)); + } if (opt_dot) spot::print_dot(std::cout, game, opt_print_hoa_args); else if (opt_print_pg) @@ -355,10 +387,11 @@ namespace } static int - solve_formula(const spot::formula& f, + solve_formula(spot::formula original_f, const std::vector& input_aps, const std::vector& output_aps) { + spot::formula f = original_f; if (opt_csv) // reset benchmark data gi->bv = spot::synthesis_info::bench_var(); spot::stopwatch sw; @@ -371,47 +404,92 @@ namespace gi->bv->total_time = sw.stop(); }; + // Check if some output propositions are always in positive form, + // or always in negative form. + // In syntcomp, this occurs more frequently for input variables than + // output variable. See issue #529 for some examples. + spot::relabeling_map rm; + if (opt_polarity) + { + std::set lits = spot::collect_litterals(f); + for (const std::string& ap: output_aps) + { + spot::formula pos = spot::formula::ap(ap); + spot::formula neg = spot::formula::Not(pos); + bool has_pos = lits.find(pos) != lits.end(); + bool has_neg = lits.find(neg) != lits.end(); + if (has_pos && !has_neg) + rm[pos] = spot::formula::tt(); + else if (has_neg && !has_pos) + rm[pos] = spot::formula::ff(); + } + for (const std::string& ap: input_aps) + { + spot::formula pos = spot::formula::ap(ap); + spot::formula neg = spot::formula::Not(pos); + bool has_pos = lits.find(pos) != lits.end(); + bool has_neg = lits.find(neg) != lits.end(); + if (has_pos && !has_neg) + rm[pos] = spot::formula::ff(); + else if (has_neg && !has_pos) + rm[pos] = spot::formula::tt(); + } + if (!rm.empty()) + { + if (gi->verbose_stream) + { + *gi->verbose_stream << ("the following APs are polarized, " + "they can be replaced by constants:\n"); + for (auto [k, v]: rm) + *gi->verbose_stream << " " << k << " := " << v <<'\n'; + } + f = spot::relabel_apply(f, &rm); + if (gi->verbose_stream) + *gi->verbose_stream << "new formula: " << f << '\n'; + } + } + std::vector sub_form; std::vector> sub_outs; if (opt_decompose_ltl) { auto subs = split_independant_formulas(f, output_aps); if (gi->verbose_stream) - { - *gi->verbose_stream << "there are " - << subs.first.size() - << " subformulas\n"; - } + { + *gi->verbose_stream << "there are " + << subs.first.size() + << " subformulas\n"; + } if (subs.first.size() > 1) - { - sub_form = subs.first; - sub_outs = subs.second; - } + { + sub_form = subs.first; + sub_outs = subs.second; + } } // When trying to split the formula, we can apply transformations that // increase its size. This is why we will use the original formula if it // has not been cut. if (sub_form.empty()) - { - sub_form = { f }; - sub_outs.resize(1); - std::transform(output_aps.begin(), output_aps.end(), - std::inserter(sub_outs[0], sub_outs[0].begin()), - [](const std::string& name) { - return spot::formula::ap(name); - }); - } + { + sub_form = { f }; + sub_outs.resize(1); + for (const std::string& apstr: output_aps) + { + spot::formula ap = spot::formula::ap(apstr); + if (rm.find(ap) == rm.end()) + sub_outs[0].insert(ap); + } + } std::vector> sub_outs_str; std::transform(sub_outs.begin(), sub_outs.end(), std::back_inserter(sub_outs_str), - [](const auto& forms) - { - std::vector r; - r.reserve(forms.size()); - for (auto f : forms) - r.push_back(f.ap_name()); - return r; - }); + [](const auto& forms) { + std::vector r; + r.reserve(forms.size()); + for (auto f: forms) + r.push_back(f.ap_name()); + return r; + }); assert((sub_form.size() == sub_outs.size()) && (sub_form.size() == sub_outs_str.size())); @@ -463,7 +541,7 @@ namespace } if (want_game) { - dispatch_print_hoa(arena); + dispatch_print_hoa(arena, &input_aps, &rm); continue; } if (!spot::solve_game(arena, *gi)) @@ -552,7 +630,7 @@ namespace sw2.start(); saig = spot::mealy_machines_to_aig(mealy_machines, opt_print_aiger, input_aps, - sub_outs_str); + sub_outs_str, &rm); if (gi->bv) { gi->bv->aig_time = sw2.stop(); @@ -584,6 +662,27 @@ namespace for (size_t i = 1; i < mealy_machines.size(); ++i) tot_strat = spot::mealy_product(tot_strat, mealy_machines[i].mealy_like); + if (!rm.empty()) // Add any AP we removed + { + bdd add = bddtrue; + for (auto [k, v]: rm) + { + int i = tot_strat->register_ap(k); + // skip inputs (they are don't care) + if (std::find(input_aps.begin(), input_aps.end(), k.ap_name()) + != input_aps.end()) + continue; + if (v.is_tt()) + add &= bdd_ithvar(i); + else if (v.is_ff()) + add &= bdd_nithvar(i); + } + for (auto& e: tot_strat->edges()) + e.cond &= add; + set_synthesis_outputs(tot_strat, + get_synthesis_outputs(tot_strat) + & bdd_support(add)); + } printer.print(tot_strat, timer_printer_dummy); } @@ -597,7 +696,7 @@ namespace // TODO: different options to speed up verification?! spot::translator trans(gi->dict, &gi->opt); - auto neg_spec = trans.run(spot::formula::Not(f)); + auto neg_spec = trans.run(spot::formula::Not(original_f)); if (saig) { // Test the aiger @@ -952,6 +1051,10 @@ parse_opt(int key, char *arg, struct argp_state *) split_aps(arg, *all_output_aps); break; } + case OPT_POLARITY: + opt_polarity = XARGMATCH("--polarity", arg, + decompose_args, decompose_values); + break; case OPT_PRINT: opt_print_pg = true; gi->force_sbacc = true; diff --git a/spot/tl/apcollect.cc b/spot/tl/apcollect.cc index e1f11372d..74790f1c4 100644 --- a/spot/tl/apcollect.cc +++ b/spot/tl/apcollect.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2014, 2015, 2018 Laboratoire de Recherche et +// Copyright (C) 2012, 2014, 2015, 2018, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -63,4 +63,72 @@ namespace spot res &= bdd_ithvar(a->register_ap(f)); return res; } + + atomic_prop_set collect_litterals(formula f) + { + atomic_prop_set res; + + // polirity: 0 = negative, 1 = positive, 2 or more = both. + auto rec = [&res](formula f, unsigned polarity, auto self) + { + switch (f.kind()) + { + case op::ff: + case op::tt: + case op::eword: + return; + case op::ap: + if (polarity != 0) + res.insert(f); + if (polarity != 1) + res.insert(formula::Not(f)); + return; + case op::Not: + case op::NegClosure: + case op::NegClosureMarked: + self(f[0], polarity ^ 1, self); + return; + case op::Xor: + case op::Equiv: + self(f[0], 2, self); + self(f[1], 2, self); + return; + case op::Implies: + case op::UConcat: + self(f[0], polarity ^ 1, self); + self(f[1], polarity, self); + return; + case op::U: + case op::R: + case op::W: + case op::M: + case op::EConcat: + case op::EConcatMarked: + self(f[0], polarity, self); + self(f[1], polarity, self); + return; + case op::X: + case op::F: + case op::G: + case op::Closure: + case op::Or: + case op::OrRat: + case op::And: + case op::AndRat: + case op::AndNLM: + case op::Concat: + case op::Fusion: + case op::Star: + case op::FStar: + case op::first_match: + case op::strong_X: + for (formula c: f) + self(c, polarity, self); + return; + } + }; + rec(f, 1, rec); + return res; + } + } diff --git a/spot/tl/apcollect.hh b/spot/tl/apcollect.hh index 012916381..42788dc9c 100644 --- a/spot/tl/apcollect.hh +++ b/spot/tl/apcollect.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2014, 2015 Laboratoire de Recherche et +// Copyright (C) 2012, 2013, 2014, 2015, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -59,5 +59,15 @@ namespace spot SPOT_API bdd atomic_prop_collect_as_bdd(formula f, const twa_ptr& a); + + /// \brief Collect the litterals occuring in f + /// + /// This function records each atomic proposition occurring in f + /// along with the polarity of its occurrence. For instance if the + /// formula is `G(a -> b) & X(!b & c)`, then this will output `{!a, + /// b, !b, c}`. + SPOT_API + atomic_prop_set collect_litterals(formula f); + /// @} } diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index 7d724b124..e4ba12444 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -1541,10 +1541,11 @@ namespace // outputs into an Aig static aig_ptr auts_to_aiger(const std::vector>& - strat_vec, + strat_vec, const char* mode, const std::vector& unused_ins = {}, - const std::vector& unused_outs = {}) + const std::vector& unused_outs = {}, + const relabeling_map* rm = nullptr) { // The aiger circuit can currently noly encode separated mealy machines @@ -1619,6 +1620,23 @@ namespace unused_outs.cbegin(), unused_outs.cend()); + if (rm) + // If we have removed some APs from the original formula, they + // might have dropped out of the output_names list (depending on + // how we split the formula), but they should not have dropped + // from the input_names list. So let's fix the output_names + // lists by adding anything that's not an input and not already + // there. + for (auto [k, v]: *rm) + { + const std::string s = k.ap_name(); + if (std::find(input_names_all.begin(), input_names_all.end(), s) + == input_names_all.end() + && std::find(output_names_all.begin(), output_names_all.end(), s) + == output_names_all.end()) + output_names_all.push_back(s); + } + // Decide on which outcond to use // The edges of the automaton all have the form in&out // due to the unsplit @@ -1962,7 +1980,7 @@ namespace } //Use the best sol circuit.reapply_(sf, ss); - trace << "Finished encoding, reasssigning\n" + trace << "Finished encoding, reassigning\n" << "Final gate count is " << circuit.num_gates() << '\n'; // Reset them for (unsigned i = 0; i < n_outs; ++i) @@ -1970,7 +1988,25 @@ namespace // Add the unused propositions const unsigned n_outs_all = output_names_all.size(); for (unsigned i = n_outs; i < n_outs_all; ++i) - circuit.set_output(i, circuit.aig_false()); + if (rm) + { + if (auto to = rm->find(formula::ap(output_names_all[i])); + to != rm->end()) + { + if (to->second.is_tt()) + { + circuit.set_output(i, circuit.aig_true()); + continue; + } + else if (to->second.is_ff()) + { + circuit.set_output(i, circuit.aig_false()); + continue; + } + } + } + else + circuit.set_output(i, circuit.aig_false()); for (unsigned i = 0; i < n_latches; ++i) circuit.set_next_latch(i, bdd2var_min(latch[i], bddfalse)); return circuit_ptr; @@ -2002,8 +2038,9 @@ namespace spot aig_ptr mealy_machine_to_aig(const twa_graph_ptr &m, const char *mode, - const std::vector& ins, - const std::vector& outs) + const std::vector& ins, + const std::vector& outs, + const relabeling_map* rm) { if (!m) throw std::runtime_error("mealy_machine_to_aig(): " @@ -2036,19 +2073,20 @@ namespace spot } // todo Some additional checks? return auts_to_aiger({{m, get_synthesis_outputs(m)}}, mode, - unused_ins, unused_outs); + unused_ins, unused_outs, rm); } aig_ptr mealy_machine_to_aig(mealy_like& m, const char *mode, const std::vector& ins, - const std::vector& outs) + const std::vector& outs, + const relabeling_map* rm) { if (m.success != mealy_like::realizability_code::REALIZABLE_REGULAR) throw std::runtime_error("mealy_machine_to_aig(): " "Can only handle regular mealy machine, yet."); - return mealy_machine_to_aig(m.mealy_like, mode, ins, outs); + return mealy_machine_to_aig(m.mealy_like, mode, ins, outs, rm); } aig_ptr @@ -2107,7 +2145,8 @@ namespace spot mealy_machines_to_aig(const std::vector& m_vec, const char *mode, const std::vector& ins, - const std::vector>& outs) + const std::vector>& outs, + const relabeling_map* rm) { if (m_vec.empty()) throw std::runtime_error("mealy_machines_to_aig(): No strategy given."); @@ -2164,14 +2203,15 @@ namespace spot if (!used_aps.count(ai)) unused_ins.push_back(ai); - return auts_to_aiger(new_vec, mode, unused_ins, unused_outs); + return auts_to_aiger(new_vec, mode, unused_ins, unused_outs, rm); } aig_ptr mealy_machines_to_aig(const std::vector& strat_vec, const char* mode, const std::vector& ins, - const std::vector>& outs) + const std::vector>& outs, + const relabeling_map* rm) { // todo extend to TGBA and possibly others const unsigned ns = strat_vec.size(); @@ -2205,7 +2245,7 @@ namespace spot "success identifier."); } } - return mealy_machines_to_aig(m_machines, mode, ins, outs_used); + return mealy_machines_to_aig(m_machines, mode, ins, outs_used, rm); } std::ostream & diff --git a/spot/twaalgos/aiger.hh b/spot/twaalgos/aiger.hh index bd0424e8e..77ef2d827 100644 --- a/spot/twaalgos/aiger.hh +++ b/spot/twaalgos/aiger.hh @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -436,20 +437,25 @@ namespace spot /// If \a ins and \a outs are specified, the named-property /// synthesis-output is ignored and all properties in \a ins and \a /// outs are guaranteed to appear in the aiger circuit. + /// + /// If \a rm is given and is not empty, it can be used to specify how + /// unused output should be encoded by mapping them to some constant. ///@{ SPOT_API aig_ptr mealy_machine_to_aig(const const_twa_graph_ptr& m, const char* mode); SPOT_API aig_ptr mealy_machine_to_aig(const twa_graph_ptr& m, const char *mode, const std::vector& ins, - const std::vector& outs); + const std::vector& outs, + const relabeling_map* rm = nullptr); SPOT_API aig_ptr mealy_machine_to_aig(const mealy_like& m, const char* mode); SPOT_API aig_ptr mealy_machine_to_aig(mealy_like& m, const char *mode, const std::vector& ins, - const std::vector& outs); + const std::vector& outs, + const relabeling_map* rm = nullptr); ///@} /// \ingroup synthesis @@ -465,6 +471,9 @@ namespace spot /// during the call to ltl_to_game() are absent. /// If \a ins and \a outs are used, all properties they list are /// guaranteed to appear in the aiger circuit. + /// + /// If \a rm is given and is not empty, it can be used to specify how + /// unused output should be encoded by mapping them to some constant. /// @{ SPOT_API aig_ptr mealy_machines_to_aig(const std::vector& m_vec, @@ -476,12 +485,14 @@ namespace spot mealy_machines_to_aig(const std::vector& m_vec, const char* mode, const std::vector& ins, - const std::vector>& outs); + const std::vector>& outs, + const relabeling_map* rm = nullptr); SPOT_API aig_ptr mealy_machines_to_aig(const std::vector& m_vec, const char* mode, const std::vector& ins, - const std::vector>& outs); + const std::vector>& outs, + const relabeling_map* rm = nullptr); /// @} /// \ingroup twa_io diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 02d248754..ae476e71d 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -227,15 +227,19 @@ sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp cat >exp < G(i1 <-> o0) there are 1 subformulas -trying to create strategy directly for G(Fi0 & Fi1 & Fi2) -> G(i1 <-> o0) +trying to create strategy directly for GFi1 -> G(i1 <-> o0) direct strategy might exist but was not found. translating formula done in X seconds -automaton has 2 states and 3 colors +automaton has 2 states and 1 colors LAR construction done in X seconds -DPA has 4 states, 1 colors +DPA has 2 states, 1 colors split inputs and outputs done in X seconds -automaton has 12 states +automaton has 6 states solving game with acceptance: co-Büchi game solved in X seconds EOF @@ -386,7 +390,8 @@ State: 2 [!0] 2 --END-- EOF -ltlsynt --outs=p0 -x tls-impl=0 --simpl=no -f '!XXF(p0 & (p0 M Gp0))' > out +ltlsynt --outs=p0 -x tls-impl=0 --polar=no --simpl=no \ + -f '!XXF(p0 & (p0 M Gp0))' > out diff out exp cat >exp < out +ltlsynt --outs=p0 -x tls-impl=1 --polar=no -f '!XXF(p0 & (p0 M Gp0))' > out +diff out exp +ltlsynt --outs=p0 -x tls-impl=0 -f '!XXF(p0 & (p0 M Gp0))' > out diff out exp ltlsynt --outs=p0 -f '!XXF(p0 & (p0 M Gp0))' > out @@ -637,19 +644,19 @@ tanslating formula done in X seconds direct strategy was found. direct strat has 1 states, 2 edges and 0 colors simplification took X seconds -trying to create strategy directly for Gc +trying to create strategy directly for G(c <-> d) direct strategy was found. direct strat has 1 states, 1 edges and 0 colors simplification took X seconds EOF -ltlsynt -f '(GFa <-> GFb) && (Gc)' --outs=b,c --verbose 2> out +ltlsynt -f '(GFa <-> GFb) && (G(c <-> d))' --outs=b,c --verbose 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp -# Try to find a direct strategy for (GFa <-> GFb) & Gc. The order should not -# impact the result -for f in "(GFa <-> GFb) & Gc" "(GFb <-> GFa) & Gc" \ - "Gc & (GFa <-> GFb)" "Gc & (GFb <-> GFa)" +# Try to find a direct strategy for (GFa <-> GFb) & G(c <-> d). The +# order should not impact the result +for f in "(GFa <-> GFb) & G(c <-> d)" "(GFb <-> GFa) & G(c <-> d)" \ + "G(c <-> d) & (GFa <-> GFb)" "G(c <-> d) & (GFb <-> GFa)" do cat >exp <exp < GFa) & G(a & c) +the following APs are polarized, they can be replaced by constants: + c := 1 +new formula: (GFb <-> GFa) & Ga +trying to create strategy directly for (GFb <-> GFa) & Ga no strategy exists. EOF ltlsynt -f '(GFb <-> GFa) && G(a&c)' --outs=b,c --verbose\ @@ -747,8 +757,43 @@ game solved in X seconds simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF -ltlsynt -f '((a|x) & (b | y) & b) => (x & y)' --outs="x,y" --aiger=ite\ - --verify --verbose 2> out +ltlsynt -f '((a|x) & (b | y) & b) => (x & y)' \ + --outs="x,y" --aiger=ite --pol=no --verify --verbose 2> out +sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx +diff outx exp + +cat >exp < (x & y)' \ + --outs="x,y" --aiger=ite --verify --verbose 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp @@ -765,7 +810,8 @@ direct strat has 1 states, 1 edges and 0 colors simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF -ltlsynt -f 'G!(!x | !y)' --outs="x, y" --aiger=ite --verify --verbose 2> out +ltlsynt -f 'G!(!x | !y)' --outs="x, y" --pol=no --aiger=ite \ + --verify --verbose 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp @@ -775,7 +821,8 @@ there are 2 subformulas trying to create strategy directly for G!a no strategy exists. EOF -ltlsynt -f '!F(a|b)' --outs=b --decompose=yes --aiger --verbose 2> out || true +ltlsynt -f '!F(a|b)' --outs=b --pol=no --decompose=yes \ + --aiger --verbose 2> out || true sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp @@ -786,7 +833,7 @@ trying to create strategy directly for Ga no strategy exists. EOF ltlsynt -f 'G!(a -> b)' --outs=b --decompose=yes --aiger\ - --verbose 2> out || true + --pol=no --verbose 2> out || true sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp @@ -807,7 +854,7 @@ simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF ltlsynt -f '(a & b) U (b & c)' --outs=b,c --decompose=yes --aiger --verbose\ - --verify 2> out + --pol=no --verify 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp @@ -851,7 +898,7 @@ simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF ltlsynt -f 'a => (b & c & d)' --outs=b,c,d, --decompose=yes\ - --verbose --aiger 2> out + --pol=no --verbose --aiger 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp @@ -862,7 +909,7 @@ trying to create strategy directly for G!a no strategy exists. EOF ltlsynt -f '!(F(a | b))' --outs=b, --decompose=yes \ - --verbose --aiger 2> out || true + --verbose --pol=no --aiger 2> out || true sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp @@ -887,7 +934,7 @@ ltlsynt --outs="$OUT" -f "$LTL" --aiger=both+ud\ --algo=lar | grep "aag 34 2 3 2 29" ltlsynt -f 'G(c) & (G(a) <-> GFb)' --outs=b,c --decompose=yes\ - --verbose --realizability 2> out + --verbose --pol=no --realizability 2> out cat >exp < GFb)' --outs=b,c --decompose=yes\ +ltlsynt -f 'G(c) & (G(a) <-> GFb)' --outs=b,c --decompose=yes --pol=no \ --verbose --realizability --bypass=no 2> out cat >exp < GFb) && (Gc)' --outs=b,c --verbose --bypass=no\ - --algo=acd 2> out + --algo=acd --pol=no 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp @@ -967,7 +1014,7 @@ solving game with acceptance: Büchi game solved in X seconds EOF ltlsynt -f "G(o1) & (GFi <-> GFo1)" --outs="o1" --verbose\ - --bypass=yes 2> out || true + --bypass=yes --pol=no 2> out || true sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp @@ -985,6 +1032,22 @@ solving game with acceptance: Streett 1 game solved in X seconds simplification took X seconds EOF +ltlsynt -f "G(o1|o2) & (GFi <-> GFo1)" --outs="o1,o2" --verbose\ + --bypass=yes --pol=no 2> out +sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx +diff outx exp + +cat >exp < GFo1 +there are 1 subformulas +trying to create strategy directly for GFi <-> GFo1 +tanslating formula done in X seconds +direct strategy was found. +direct strat has 1 states, 2 edges and 0 colors +simplification took X seconds +EOF ltlsynt -f "G(o1|o2) & (GFi <-> GFo1)" --outs="o1,o2" --verbose\ --bypass=yes 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff --git a/tests/core/ltlsynt2.test b/tests/core/ltlsynt2.test index 546cb0d27..f6c7787fe 100755 --- a/tests/core/ltlsynt2.test +++ b/tests/core/ltlsynt2.test @@ -61,7 +61,7 @@ G(i1 <-> Xo1),lar,1,3 F(i1 xor i2) <-> Fo1,lar,1,2 i1 <-> F(o1 xor o2),lar,1,3 Fi1 <-> Go2,lar,0,0 -o1 & F(i1 <-> o2),lar,1,4 +o1 & F(i1 <-> o2),lar,1,2 EOF diff filtered.csv expected From 858629dd3ad9b13e253f187cc1ea71e098c3d911 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 20 Sep 2023 00:00:15 +0200 Subject: [PATCH 342/606] twagraph: fix merge_states() on automata without edges This corner case was simply causing segfaults. * tests/python/mergedge.py: Add a test case. * spot/twa/twagraph.cc (merge_states): Add special handling for the case where the automaton has no edges. --- spot/twa/twagraph.cc | 23 ++++++++++++++++++----- tests/python/mergedge.py | 22 +++++++++++++++++++++- 2 files changed, 39 insertions(+), 6 deletions(-) diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 3f74d4d99..50145803d 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -363,6 +363,24 @@ namespace spot throw std::runtime_error( "twa_graph::merge_states() does not work on alternating automata"); + const unsigned n_states = num_states(); + + const auto& e_vec = edge_vector(); + unsigned n_edges = e_vec.size(); + if (n_edges <= 1) + { + if (n_states == 1) + return 0; + // We don't have a very convenient way to resize the state + // vector. + std::vector remap(n_states, -1U); + remap[0] = 0; + get_graph().defrag_states(remap, 1); + SPOT_ASSERT(num_states() == 1); + set_init_state(0); + return n_states - 1; + } + #ifdef ENABLE_PTHREAD const unsigned nthreads = ppolicy.nthreads(); #else @@ -387,8 +405,6 @@ namespace spot }, nthreads); g_.chain_edges_(); - const unsigned n_states = num_states(); - // Edges are nicely chained and there are no erased edges // -> We can work with the edge_vector @@ -406,9 +422,6 @@ namespace spot for (unsigned i = 0; i < n_states; ++i) hash_of_state.push_back(i); - const auto& e_vec = edge_vector(); - unsigned n_edges = e_vec.size(); - // For each state we need 4 indices of the edge vector // [first, first_non_sfirst_selflooplfloop, first_selfloop, end] // The init value makes sure nothing is done for dead end states diff --git a/tests/python/mergedge.py b/tests/python/mergedge.py index b3e934946..e2c88874e 100644 --- a/tests/python/mergedge.py +++ b/tests/python/mergedge.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020-2022 Laboratoire de Recherche et Développement de +# Copyright (C) 2020-2023 Laboratoire de Recherche et Développement de # l'EPITA. # # This file is part of Spot, a model checking library. @@ -56,6 +56,26 @@ aut.merge_edges() tc.assertEqual(aut.num_edges(), 5) tc.assertTrue(spot.is_deterministic(aut)) +aut = spot.automaton(""" +HOA: v1 +States: 3 +Start: 2 +AP: 0 +Acceptance: 0 t +--BODY-- +State: 0 +State: 1 +State: 2 +--END--""") +tc.assertEqual(aut.num_states(), 3) +tc.assertEqual(aut.num_edges(), 0) +tc.assertEqual(aut.get_init_state_number(), 2) +tc.assertEqual(aut.merge_states(), 2); +tc.assertEqual(aut.num_states(), 1) +tc.assertEqual(aut.num_edges(), 0) +tc.assertEqual(aut.get_init_state_number(), 0) +tc.assertEqual(aut.merge_states(), 0); + for nthread in range(1, 16, 2): aut = spot.automaton(""" HOA: v1 From 6dc11b4715957b07ef8d2e392945b8014667cdc9 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Tue, 20 Jun 2023 15:10:00 +0200 Subject: [PATCH 343/606] notebooks: correction of typos * tests/python/_partitioned_relabel.ipynb, tests/python/_product_weak.ipynb, tests/python/acc_cond.ipynb, tests/python/aliases.ipynb, tests/python/automata.ipynb, tests/python/cav22-figs.ipynb, tests/python/contains.ipynb, tests/python/decompose.ipynb, tests/python/formulas.ipynb, tests/python/games.ipynb, tests/python/highlighting.ipynb, tests/python/ltsmin-dve.ipynb, tests/python/parity.ipynb, tests/python/product.ipynb, tests/python/satmin.ipynb, tests/python/stutter-inv.ipynb, tests/python/synthesis.ipynb, tests/python/twagraph-internals.ipynb, tests/python/word.ipynb, tests/python/zlktree.ipynb: here --- tests/python/_partitioned_relabel.ipynb | 2 +- tests/python/_product_weak.ipynb | 2 +- tests/python/acc_cond.ipynb | 50 ++++++++++++++--- tests/python/aliases.ipynb | 4 +- tests/python/automata.ipynb | 4 +- tests/python/cav22-figs.ipynb | 4 +- tests/python/contains.ipynb | 4 +- tests/python/decompose.ipynb | 31 +++++++++-- tests/python/formulas.ipynb | 4 +- tests/python/games.ipynb | 6 +- tests/python/highlighting.ipynb | 2 +- tests/python/ltsmin-dve.ipynb | 4 +- tests/python/parity.ipynb | 6 +- tests/python/product.ipynb | 10 ++-- tests/python/satmin.ipynb | 2 +- tests/python/stutter-inv.ipynb | 72 +++++++++++++++++------- tests/python/synthesis.ipynb | 10 ++-- tests/python/twagraph-internals.ipynb | 32 ++++++++++- tests/python/word.ipynb | 2 +- tests/python/zlktree.ipynb | 73 ++++++++++++++++++++----- 20 files changed, 242 insertions(+), 82 deletions(-) diff --git a/tests/python/_partitioned_relabel.ipynb b/tests/python/_partitioned_relabel.ipynb index b7f1c4380..549eb04be 100644 --- a/tests/python/_partitioned_relabel.ipynb +++ b/tests/python/_partitioned_relabel.ipynb @@ -1047,7 +1047,7 @@ "\n", "concerned_aps = a & b # concerned aps are given as a conjunction of positive aps\n", "# As partitioning can be exponentially costly,\n", - "# one can limit the number of new letters generated before abadoning\n", + "# one can limit the number of new letters generated before abandoning\n", "# This can be done either as a hard limit and/or as the number of current condition\n", "# times a factor\n", "relabel_dict = spot.partitioned_relabel_here(aut, False, 1000, 1000, concerned_aps)\n", diff --git a/tests/python/_product_weak.ipynb b/tests/python/_product_weak.ipynb index e52b9b793..745688fa1 100644 --- a/tests/python/_product_weak.ipynb +++ b/tests/python/_product_weak.ipynb @@ -1819,7 +1819,7 @@ "autslen = len(auts)\n", "# In a previous version we used to iterate over all possible left automata with \"for left in auts:\"\n", "# however we had trouble with Jupyter on i386, where running the full loop abort with some low-level \n", - "# exeptions from Jupyter client. Halving the loop helped for some times, but then the timeout\n", + "# exceptions from Jupyter client. Halving the loop helped for some times, but then the timeout\n", "# came back. So we do one left automaton at a time.\n", "left = auts[0]\n", "display(left)\n", diff --git a/tests/python/acc_cond.ipynb b/tests/python/acc_cond.ipynb index 492c416ca..f2773938b 100644 --- a/tests/python/acc_cond.ipynb +++ b/tests/python/acc_cond.ipynb @@ -11,6 +11,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -32,9 +33,9 @@ "\n", "Note that the number of sets given can be larger than what is actually needed by the acceptance formula.\n", "\n", - "Transitions in automata can be tagged as being part of some member sets, and a path in the automaton is accepting if the set of acceptance sets visited along this path satify the acceptance condition.\n", + "Transitions in automata can be tagged as being part of some member sets, and a path in the automaton is accepting if the set of acceptance sets visited along this path satisfy the acceptance condition.\n", "\n", - "Definining acceptance conditions in Spot involves three different types of C++ objects:\n", + "Defining acceptance conditions in Spot involves three different types of C++ objects:\n", "\n", "- `spot::acc_cond` is used to represent an acceptance condition, that is: a number of sets and a formula.\n", "- `spot::acc_cond::acc_code`, is used to represent Boolean formula for the acceptance condition using a kind of byte code (hence the name)\n", @@ -99,10 +100,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "As seen above, the sequence of set numbers can be specified using a list or a tuple. While from the Python language point of view, using a tuple is faster than using a list, the overhead to converting all the arguments from Python to C++ and then converting the resuslting back from C++ to Python makes this difference completely negligeable. In the following, we opted to use lists, because brackets are more readable than nested parentheses." + "As seen above, the sequence of set numbers can be specified using a list or a tuple. While from the Python language point of view, using a tuple is faster than using a list, the overhead to converting all the arguments from Python to C++ and then converting the resulting back from C++ to Python makes this difference completely negligible. In the following, we opted to use lists, because brackets are more readable than nested parentheses." ] }, { @@ -129,6 +131,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -191,6 +194,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -216,6 +220,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -247,6 +252,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -274,6 +280,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -324,6 +331,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -351,12 +359,13 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## `acc_code`\n", "\n", - "`acc_code` encodes the formula of the acceptance condition using a kind of bytecode that basically corresponds to an encoding in [reverse Polish notation](http://en.wikipedia.org/wiki/Reverse_Polish_notation) in which conjunctions of `Inf(n)` terms, and disjunctions of `Fin(n)` terms are grouped. In particular, the frequently-used genaralized-Büchi acceptance conditions (like `Inf(0)&Inf(1)&Inf(2)`) are always encoded as a single term (like `Inf({0,1,2})`).\n", + "`acc_code` encodes the formula of the acceptance condition using a kind of bytecode that basically corresponds to an encoding in [reverse Polish notation](http://en.wikipedia.org/wiki/Reverse_Polish_notation) in which conjunctions of `Inf(n)` terms, and disjunctions of `Fin(n)` terms are grouped. In particular, the frequently-used generalized-Büchi acceptance conditions (like `Inf(0)&Inf(1)&Inf(2)`) are always encoded as a single term (like `Inf({0,1,2})`).\n", "\n", "The simplest way to construct an `acc_code` by passing a string that represent the formula to build." ] @@ -391,6 +400,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -418,6 +428,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -444,6 +455,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -471,6 +483,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -539,6 +552,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -589,6 +603,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -615,6 +630,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -727,6 +743,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -762,6 +779,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -791,10 +809,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "The `used_inf_fin_sets()` returns a pair of marks instead, the first one with all sets occuring in `Inf`, and the second one with all sets appearing in `Fin`." + "The `used_inf_fin_sets()` returns a pair of marks instead, the first one with all sets occurring in `Inf`, and the second one with all sets appearing in `Fin`." ] }, { @@ -818,6 +837,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -853,6 +873,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -888,6 +909,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -956,6 +978,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1026,6 +1049,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1054,6 +1078,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1083,6 +1108,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1112,6 +1138,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1143,6 +1170,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1174,6 +1202,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1206,6 +1235,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1234,6 +1264,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1261,6 +1292,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1288,10 +1320,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "For convencience, the `accepting()` method of `acc_cond` delegates to that of the `acc_code`. \n", + "For convenience, the `accepting()` method of `acc_cond` delegates to that of the `acc_code`. \n", "Any set passed to `accepting()` that is not used by the acceptance formula has no influence." ] }, @@ -1317,6 +1350,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1385,6 +1419,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1411,12 +1446,13 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "`fin_one()` return the number of one color `x` that appears as `Fin(x)` in the formula, or `-1` if the formula is Fin-less.\n", "\n", - "The variant `fin_one_extract()` consider the acceptance condition as a disjunction (if the top-level operator is not a disjunction, we just assume the formula is a disjunction with only one disjunct), and return a pair `(x,c)` where `c` is the disjunction of all disjuncts of the original formula where `Fin(x)` used to appear but where `Fin(x)` have been replaced by `true`, and `Inf(x)` by `false`. Also this function tries to choose an `x` such that one of the disjunct has the form `...&Fin(x)&...` if possible: this is visible in the third example, where 5 is prefered to 2." + "The variant `fin_one_extract()` consider the acceptance condition as a disjunction (if the top-level operator is not a disjunction, we just assume the formula is a disjunction with only one disjunct), and return a pair `(x,c)` where `c` is the disjunction of all disjuncts of the original formula where `Fin(x)` used to appear but where `Fin(x)` have been replaced by `true`, and `Inf(x)` by `false`. Also this function tries to choose an `x` such that one of the disjunct has the form `...&Fin(x)&...` if possible: this is visible in the third example, where 5 is preferred to 2." ] }, { diff --git a/tests/python/aliases.ipynb b/tests/python/aliases.ipynb index 4cca2fa86..7b3194335 100644 --- a/tests/python/aliases.ipynb +++ b/tests/python/aliases.ipynb @@ -18,7 +18,7 @@ "id": "4dc12445", "metadata": {}, "source": [ - "Aliases is a feature of the HOA format that allows Boolean formulas to be named and reused to label automata. This can be helpful to reduce the size of a file, but it can also be abused to \"fake\" arbritary alphabets by using an alphabet of $n$ aliases encoded over $\\log_2(n)$ atomic propositions. \n", + "Aliases is a feature of the HOA format that allows Boolean formulas to be named and reused to label automata. This can be helpful to reduce the size of a file, but it can also be abused to \"fake\" arbitrary alphabets by using an alphabet of $n$ aliases encoded over $\\log_2(n)$ atomic propositions. \n", "\n", "Spot knows how to read HOA files containing aliases since version 2.0. However support for producing files with aliases was only added in version 2.11.\n", "\n", @@ -653,7 +653,7 @@ "source": [ "Notice how `p0` and `!p0` were rewritten as disjunction of aliases because no direct aliases could be found for them. \n", "\n", - "Generaly, the display code tries to format formulas as a sum of product. It wil recognize conjunctions and disjunctions of aliases, but if it fails, it will resort to printing the original atomic propositions (maybe mixed with aliases)." + "Generally, the display code tries to format formulas as a sum of product. It will recognize conjunctions and disjunctions of aliases, but if it fails, it will resort to printing the original atomic propositions (maybe mixed with aliases)." ] }, { diff --git a/tests/python/automata.ipynb b/tests/python/automata.ipynb index ee62dd0bd..4497408bc 100644 --- a/tests/python/automata.ipynb +++ b/tests/python/automata.ipynb @@ -194,7 +194,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The call the `spot.setup()` in the first cells has installed a default style for the graphviz output. If you want to change this style temporarily, you can call the `show(style)` method explicitely. For instance here is a vertical layout with the default font of GraphViz." + "The call the `spot.setup()` in the first cells has installed a default style for the graphviz output. If you want to change this style temporarily, you can call the `show(style)` method explicitly. For instance here is a vertical layout with the default font of GraphViz." ] }, { @@ -3013,7 +3013,7 @@ } ], "source": [ - "# Using +1 in the display options is a convient way to shift the \n", + "# Using +1 in the display options is a convenient way to shift the \n", "# set numbers in the output, as an aid in reading the product.\n", "a1 = spot.translate('GF(a <-> Xa)')\n", "print(a1.prop_weak())\n", diff --git a/tests/python/cav22-figs.ipynb b/tests/python/cav22-figs.ipynb index ea84319a2..d232bebe2 100644 --- a/tests/python/cav22-figs.ipynb +++ b/tests/python/cav22-figs.ipynb @@ -990,7 +990,7 @@ "source": [ "# Figure 3\n", "\n", - "Fig. 3 shows an example of game generated by `ltlsynt` from the LTL specification of a reactive controler, and then how this game can be encoded into an And-Inverter-Graph.\n", + "Fig. 3 shows an example of game generated by `ltlsynt` from the LTL specification of a reactive controller, and then how this game can be encoded into an And-Inverter-Graph.\n", "First we retrieve the game generated by `ltlsynt` (any argument passed to `spot.automaton` is interpreted as a command if it ends with a pipe), then we solve it to compute a possible winning strategy. \n", "\n", "Player 0 plays from round states and tries to violate the acceptance condition; Player 1 plays from diamond states and tries to satisfy the acceptance condition. Once a game has been solved, the `highlight_strategy` function will decorate the automaton with winning region and computed strategies for player 0 and 1 in red and green respectively. Therefore this game is winning for player 1 from the initial state.\n", @@ -1295,7 +1295,7 @@ "metadata": {}, "source": [ "The `solved_game_to_mealy()` shown in the paper does not always produce the same type of output, so it is\n", - "better to explicitely call `solved_game_to_split_mealy()` or `solved_game_to_separated_mealy()` depending on the type of output one need. We also show how to use the `reduce_mealy()` method to simplify one." + "better to explicitly call `solved_game_to_split_mealy()` or `solved_game_to_separated_mealy()` depending on the type of output one need. We also show how to use the `reduce_mealy()` method to simplify one." ] }, { diff --git a/tests/python/contains.ipynb b/tests/python/contains.ipynb index ca8e65bdc..b16d896db 100644 --- a/tests/python/contains.ipynb +++ b/tests/python/contains.ipynb @@ -252,7 +252,7 @@ "source": [ "# Containement checks between formulas with cache\n", "\n", - "In the case of containement checks between formulas, `language_containement_checker` instances provide similar services, but they cache automata representing the formulas checked. This should be prefered when performing several containement checks using the same formulas." + "In the case of containement checks between formulas, `language_containement_checker` instances provide similar services, but they cache automata representing the formulas checked. This should be preferred when performing several containement checks using the same formulas." ] }, { @@ -312,7 +312,7 @@ "\n", "Assume you have computed two automata, that `are_equivalent(a1, a2)` returns `False`, and you want to know why. (This often occur when debugging some algorithm that produce an automaton that is not equivalent to which it should.) The automaton class has a method called `a1.exclusive_run(a2)` that can help with this task: it returns a run that recognizes a word is is accepted by one of the two automata but not by both. The method `a1.exclusive_word(a2)` will return just a word.\n", "\n", - "For instance let's find a word that is exclusive between `aut_f` and `aut_g`. (The adjective *exlusive* is a reference to the *exclusive or* operator: the word belongs to L(aut_f) \"xor\" it belongs to L(aut_g).)" + "For instance let's find a word that is exclusive between `aut_f` and `aut_g`. (The adjective *exclusive* is a reference to the *exclusive or* operator: the word belongs to L(aut_f) \"xor\" it belongs to L(aut_g).)" ] }, { diff --git a/tests/python/decompose.ipynb b/tests/python/decompose.ipynb index d7c2061a2..898ed3f91 100644 --- a/tests/python/decompose.ipynb +++ b/tests/python/decompose.ipynb @@ -12,6 +12,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -28,9 +29,9 @@ "- an accepting SCC is **strictly inherently weak** if it is *inherently weak* and not complete (in other words: *weak* but not *terminal*)\n", "- an accepting SCC is **strong** if it is not inherently weak.\n", "\n", - "The strengths **strong**, **stricly inherently weak**, and **inherently terminal** define a partition of all accepting SCCs. The following Büchi automaton has 4 SCCs, and its 3 accepting SCCs show an example of each strength.\n", + "The strengths **strong**, **strictly inherently weak**, and **inherently terminal** define a partition of all accepting SCCs. The following Büchi automaton has 4 SCCs, and its 3 accepting SCCs show an example of each strength.\n", "\n", - "Note: the reason we use the word *inherently* is that the *weak* and *terminal* properties are usually defined syntactically: an accepting SCC would be weak if all its transitions belong to the same acceptance sets. This syntactic criterion is a sufficient condition for an accepting SCC to not have any rejecting cycle, but it is not necessary. Hence a *weak* SCC is *inherently weak*; but while an *inherently weak* SCC is not necessarily *weak*, it can be modified to be *weak* without alterning the langage." + "Note: the reason we use the word *inherently* is that the *weak* and *terminal* properties are usually defined syntactically: an accepting SCC would be weak if all its transitions belong to the same acceptance sets. This syntactic criterion is a sufficient condition for an accepting SCC to not have any rejecting cycle, but it is not necessary. Hence a *weak* SCC is *inherently weak*; but while an *inherently weak* SCC is not necessarily *weak*, it can be modified to be *weak* without altering the language." ] }, { @@ -214,6 +215,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -225,7 +227,7 @@ "- `w`: (strictly inherently) weak\n", "- `s`: strong\n", "\n", - "For instance if we want to preserve only the **strictly inherently weak** part of this automaton, we should get only the SCC with the self-loop on $b$, and the SCC above it so that we can reach it. However the SCC above is not stricly weak, so it should not accept any word in the new automaton." + "For instance if we want to preserve only the **strictly inherently weak** part of this automaton, we should get only the SCC with the self-loop on $b$, and the SCC above it so that we can reach it. However the SCC above is not strictly weak, so it should not accept any word in the new automaton." ] }, { @@ -343,6 +345,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -502,6 +505,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -600,6 +604,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -682,6 +687,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1124,6 +1130,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1536,6 +1543,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2355,12 +2363,13 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Note how the two weak automata (i.e., stricly weak and terminal) are now using a Büchi acceptance condition (because that is sufficient for weak automata) while the strong automaton inherited the original acceptance condition.\n", + "Note how the two weak automata (i.e., strictly weak and terminal) are now using a Büchi acceptance condition (because that is sufficient for weak automata) while the strong automaton inherited the original acceptance condition.\n", "\n", - "When extracting multiple strengths and one of the strength is **strong**, we preserve the original acceptance. For instance extracting **strong** and **inherently terminal** gives the following automaton, where only **stricly inherently weak** SCCs have become rejecting." + "When extracting multiple strengths and one of the strength is **strong**, we preserve the original acceptance. For instance extracting **strong** and **inherently terminal** gives the following automaton, where only **strictly inherently weak** SCCs have become rejecting." ] }, { @@ -2741,6 +2750,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -3188,6 +3198,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -4219,6 +4230,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -4566,6 +4578,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -4577,7 +4590,7 @@ "\n", "### `Acceptance: 0 t`\n", "\n", - "This occur frequently whant translating LTL formulas that are safety properties:" + "This occur frequently when translating LTL formulas that are safety properties:" ] }, { @@ -4863,6 +4876,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -4957,6 +4971,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -4964,6 +4979,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -5127,6 +5143,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -5509,6 +5526,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -5854,6 +5872,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ diff --git a/tests/python/formulas.ipynb b/tests/python/formulas.ipynb index 7075cf653..714220c2a 100644 --- a/tests/python/formulas.ipynb +++ b/tests/python/formulas.ipynb @@ -164,7 +164,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If you prefer to print the string in another syntax, you may use the `to_str()` method, with an argument that indicates the output format to use. The `latex` format assumes that you will the define macros such as `\\U`, `\\R` to render all operators as you wish. On the otherhand, the `sclatex` (with `sc` for self-contained) format hard-codes the rendering of each of those operators: this is almost the same output that is used to render formulas using MathJax in a notebook. `sclatex` and `mathjax` only differ in the rendering of double-quoted atomic propositions." + "If you prefer to print the string in another syntax, you may use the `to_str()` method, with an argument that indicates the output format to use. The `latex` format assumes that you will the define macros such as `\\U`, `\\R` to render all operators as you wish. On the other hand, the `sclatex` (with `sc` for self-contained) format hard-codes the rendering of each of those operators: this is almost the same output that is used to render formulas using MathJax in a notebook. `sclatex` and `mathjax` only differ in the rendering of double-quoted atomic propositions." ] }, { @@ -342,7 +342,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Similarly, `is_syntactic_stutter_invariant()` tells wether the structure of the formula guarranties it to be stutter invariant. For LTL formula, this means the `X` operator should not be used. For PSL formula, this function capture all formulas built using the [siPSL grammar](http://www.daxc.de/eth/paper/09atva.pdf)." + "Similarly, `is_syntactic_stutter_invariant()` tells whether the structure of the formula guaranties it to be stutter invariant. For LTL formula, this means the `X` operator should not be used. For PSL formula, this function capture all formulas built using the [siPSL grammar](http://www.daxc.de/eth/paper/09atva.pdf)." ] }, { diff --git a/tests/python/games.ipynb b/tests/python/games.ipynb index 9ec8bb76e..863e7efdc 100644 --- a/tests/python/games.ipynb +++ b/tests/python/games.ipynb @@ -228,7 +228,7 @@ "metadata": {}, "source": [ "The `set_state_players()` function takes a list of owner for each of the states in the automaton. In the output,\n", - "states from player 0 use circles, ellispes, or rectangle with rounded corners (mnemonic: 0 is round) while states from player 1 have a losanse shape (1 has only straight lines). \n", + "states from player 0 use circles, ellipses, or rectangle with rounded corners (mnemonic: 0 is round) while states from player 1 have a losanse shape (1 has only straight lines). \n", "\n", "\n", "State ownership can also be manipulated by the following functions:" @@ -914,7 +914,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In the graphical output, player 0 is represented by circles (or ellipses or rounded rectangles depending on the situations), while player 1's states are diamond shaped. In the case of `ltlsynt`, player 0 plays the role of the environment, and player 1 plays the role of the controler.\n", + "In the graphical output, player 0 is represented by circles (or ellipses or rounded rectangles depending on the situations), while player 1's states are diamond shaped. In the case of `ltlsynt`, player 0 plays the role of the environment, and player 1 plays the role of the controller.\n", "\n", "In the HOA output, a header `spot-state-player` (or `spot.state-player` in HOA 1.1) lists the owner of each state." ] @@ -1670,7 +1670,7 @@ "\n", "The parity game solver now supports \"local\" and global solutions.\n", "\n", - "- \"Local\" solutions are the ones computed so far. A strategy is only computed for the part of the automaton that is rachable from the initial state\n", + "- \"Local\" solutions are the ones computed so far. A strategy is only computed for the part of the automaton that is reachable from the initial state\n", "- Global solutions can now be obtained by setting the argument \"solve_globally\" to true. In this case a strategy will be computed even for states not reachable in the original automaton.\n" ] }, diff --git a/tests/python/highlighting.ipynb b/tests/python/highlighting.ipynb index 12077b8c9..15f9047a8 100644 --- a/tests/python/highlighting.ipynb +++ b/tests/python/highlighting.ipynb @@ -153,7 +153,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Using these numbers you can selectively hightlight some transitions. The second argument is a color number (from a list of predefined colors)." + "Using these numbers you can selectively highlight some transitions. The second argument is a color number (from a list of predefined colors)." ] }, { diff --git a/tests/python/ltsmin-dve.ipynb b/tests/python/ltsmin-dve.ipynb index 63df611c9..05530ca8e 100644 --- a/tests/python/ltsmin-dve.ipynb +++ b/tests/python/ltsmin-dve.ipynb @@ -93,7 +93,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Compiling the model creates all several kinds of files. The `test1.dve` file is converted into a C++ source code `test1.dve.cpp` which is then compiled into a shared library `test1.dve2c`. Becauce `spot.ltsmin.load()` has already loaded this shared library, all those files can be erased. If you do not erase the files, `spot.ltsmin.load()` will use the timestamps to decide whether the library should be recompiled or not everytime you load the library.\n", + "Compiling the model creates all several kinds of files. The `test1.dve` file is converted into a C++ source code `test1.dve.cpp` which is then compiled into a shared library `test1.dve2c`. Because `spot.ltsmin.load()` has already loaded this shared library, all those files can be erased. If you do not erase the files, `spot.ltsmin.load()` will use the timestamps to decide whether the library should be recompiled or not everytime you load the library.\n", "\n", "For editing and loading DVE file from a notebook, it is a better to use the `%%dve` as shown next." ] @@ -1832,7 +1832,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You can load this as a Kripke structure by passing the `want_kripke` option to `spot.automaton()`. The type `kripke_graph` stores the Kripke structure explicitely (like a `twa_graph` stores an automaton explicitely), so you may want to avoid it for very large modelsand use it only for development." + "You can load this as a Kripke structure by passing the `want_kripke` option to `spot.automaton()`. The type `kripke_graph` stores the Kripke structure explicitly (like a `twa_graph` stores an automaton explicitly), so you may want to avoid it for very large modelsand use it only for development." ] }, { diff --git a/tests/python/parity.ipynb b/tests/python/parity.ipynb index 56d6af350..09d4b133e 100644 --- a/tests/python/parity.ipynb +++ b/tests/python/parity.ipynb @@ -888,7 +888,7 @@ "source": [ "## Changing the **kind**\n", "\n", - "Generaly to go from `parity max` to `parity min`, it suffices to reverse the order of vertices.\n", + "Generally to go from `parity max` to `parity min`, it suffices to reverse the order of vertices.\n", "\n", "### max odd 5 → min odd 5:" ] @@ -3446,7 +3446,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Here `Streett 1` is just a synonym for `parity max odd 2`. (Spot's automaton printer cannot guess which name should be prefered.)" + "Here `Streett 1` is just a synonym for `parity max odd 2`. (Spot's automaton printer cannot guess which name should be preferred.)" ] }, { @@ -4231,7 +4231,7 @@ "2. a Boolean indicating whether the output should be colored (`True`), or if transition with no color can be used (`False`).\n", "3. a Boolean indicating whether the output should be layered, i.e., in a max parity automaton, that means the color of a transition should be the maximal color visited by all cycles going through it.\n", "\n", - "By default, the second argument is `False`, because acceptance sets is a scarse ressource in Spot. The third argument also defaults to `False`, but for empircal reason: adding more colors like this tends to hinder simulation-based reductions." + "By default, the second argument is `False`, because acceptance sets is a scarce resource in Spot. The third argument also defaults to `False`, but for empirical reason: adding more colors like this tends to hinder simulation-based reductions." ] }, { diff --git a/tests/python/product.ipynb b/tests/python/product.ipynb index 5b60aae79..90dde8243 100644 --- a/tests/python/product.ipynb +++ b/tests/python/product.ipynb @@ -798,7 +798,7 @@ "\n", "First, we build a product without taking care of the acceptance sets. We just want to get the general shape of the algorithm.\n", "\n", - "We will build an automaton of type `twa_graph`, i.e., an automaton represented explicitely using a graph. In those automata, states are numbered by integers, starting from `0`. (Those states can also be given a different name, which is why the the `product()` shows us something that appears to be labeled by pairs, but the real identifier of each state is an integer.)\n", + "We will build an automaton of type `twa_graph`, i.e., an automaton represented explicitly using a graph. In those automata, states are numbered by integers, starting from `0`. (Those states can also be given a different name, which is why the the `product()` shows us something that appears to be labeled by pairs, but the real identifier of each state is an integer.)\n", "\n", "We will use a dictionary to keep track of the association between a pair `(ls,rs)` of input states, and its number in the output." ] @@ -1145,7 +1145,7 @@ "source": [ "def product1(left, right):\n", " # A bdd_dict object associates BDD variables (that are \n", - " # used in BDDs labeleing the edges) to atomic propositions.\n", + " # used in BDDs labeling the edges) to atomic propositions.\n", " bdict = left.get_dict()\n", " # If the two automata do not have the same BDD dict, then\n", " # we cannot easily detect compatible transitions.\n", @@ -1235,7 +1235,7 @@ "source": [ "## Second attempt: a working product\n", "\n", - "This fixes the list of atomtic propositions, as discussed above, and also sets the correct acceptance condition.\n", + "This fixes the list of atomic propositions, as discussed above, and also sets the correct acceptance condition.\n", "The `set_acceptance` method takes two arguments: a number of sets, and an acceptance function. In our case, both of these arguments are readily computed from the number of states and acceptance functions of the input automata." ] }, @@ -1671,7 +1671,7 @@ "\n", "The former point could be addressed by calling `set_state_names()` and passing an array of strings: if a state number is smaller than the size of that array, then the string at that position will be displayed instead of the state number in the dot output. However we can do even better by using `set_product_states()` and passing an array of pairs of states. Besides the output routines, some algorithms actually retrieve this vector of pair of states to work on the product.\n", "\n", - "Regarding the latter point, consider for instance the deterministic nature of these automata. In Spot an automaton is deterministic if it is both existential (no universal branching) and universal (no non-deterministic branching). In our case we will restrict the algorithm to existantial input (by asserting `is_existential()` on both operands), so we can consider that the `prop_universal()` property is an indication of determinism:" + "Regarding the latter point, consider for instance the deterministic nature of these automata. In Spot an automaton is deterministic if it is both existential (no universal branching) and universal (no non-deterministic branching). In our case we will restrict the algorithm to existential input (by asserting `is_existential()` on both operands), so we can consider that the `prop_universal()` property is an indication of determinism:" ] }, { @@ -1710,7 +1710,7 @@ "\n", " result.prop_universal(left.prop_universal() and right.prop_universal())\n", "\n", - "because the results the `prop_*()` family of functions take and return instances of `spot.trival` values. These `spot.trival`, can, as their name implies, take one amongst three values representing `yes`, `no`, and `maybe`. `yes` and `no` should be used when we actually know that the automaton is deterministic or not (not deterministic meaning that there actually exists some non determinitic state in the automaton), and `maybe` when we do not know. \n", + "because the results the `prop_*()` family of functions take and return instances of `spot.trival` values. These `spot.trival`, can, as their name implies, take one amongst three values representing `yes`, `no`, and `maybe`. `yes` and `no` should be used when we actually know that the automaton is deterministic or not (not deterministic meaning that there actually exists some non deterministic state in the automaton), and `maybe` when we do not know. \n", "\n", "The one-liner above is wrong for two reasons:\n", "\n", diff --git a/tests/python/satmin.ipynb b/tests/python/satmin.ipynb index ff14b3195..4442e5495 100644 --- a/tests/python/satmin.ipynb +++ b/tests/python/satmin.ipynb @@ -1250,7 +1250,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "But do we really need 2 rabin pairs? Let's ask if we can get an equivalent with only one pair. (Note that reducing the number of pairs might require more state, but the `sat_minimize()` function will never attempt to add state unless explicitely instructed to do so. In this case we are therefore looking for a state-based Rabin-1 automaton with at most 4 states.)" + "But do we really need 2 Rabin pairs? Let's ask if we can get an equivalent with only one pair. (Note that reducing the number of pairs might require more state, but the `sat_minimize()` function will never attempt to add state unless explicitly instructed to do so. In this case we are therefore looking for a state-based Rabin-1 automaton with at most 4 states.)" ] }, { diff --git a/tests/python/stutter-inv.ipynb b/tests/python/stutter-inv.ipynb index 4e0f142f1..4b02a9c0f 100644 --- a/tests/python/stutter-inv.ipynb +++ b/tests/python/stutter-inv.ipynb @@ -12,17 +12,19 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# Stutter-invariant languages\n", "\n", - "A language $L$ is said to be _stutter-invariant_ iff $\\ell_0\\ldots\\ell_{i-1}\\ell_i\\ell_{i+1}\\ldots\\in L \\iff \\ell_0\\ldots\\ell_{i-1}\\ell_i\\ell_i\\ell_{i+1}\\ldots\\in L$, i.e., if duplicating a letter in a word or removing a duplicated letter does not change the membership of that word to $L$. These languages are also called _stutter-insensitive_. We use the adjective _sutter-sensitive_ to describe a language that is not stutter-invariant. Of course we can extend this vocabulary to LTL formulas or automata that represent stutter-invariant languages.\n", + "A language $L$ is said to be _stutter-invariant_ iff $\\ell_0\\ldots\\ell_{i-1}\\ell_i\\ell_{i+1}\\ldots\\in L \\iff \\ell_0\\ldots\\ell_{i-1}\\ell_i\\ell_i\\ell_{i+1}\\ldots\\in L$, i.e., if duplicating a letter in a word or removing a duplicated letter does not change the membership of that word to $L$. These languages are also called _stutter-insensitive_. We use the adjective _stutter-sensitive_ to describe a language that is not stutter-invariant. Of course we can extend this vocabulary to LTL formulas or automata that represent stutter-invariant languages.\n", "\n", "Stutter-invariant languages play an important role in model checking. When verifying a stutter-invariant specification against a system, we know that we have some freedom in how we discretize the time in the model: as long as we do not hide changes of model variables that are observed by the specification, we can merge multiple steps of the model. This, combined by careful analysis of actions of the model that are independent, is the basis for a set of techniques known as _partial-order reductions_ (POR) that postpone the visit of some successors in the model, because we know we can always visit them later." ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -68,6 +70,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -95,13 +98,15 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Of course this `is_stutter_invariant()` function first checks whether the formula is `X`-free before wasting time building automata, so if you want to detect stutter-invariant formulas in your model checker, this is the only function to use. Also, if you hapen to already have an automaton `aut_g` for `g`, you should pass it as a second argument to avoid it being recomputed: `spot.is_stutter_invariant(g, aut_g)`." + "Of course this `is_stutter_invariant()` function first checks whether the formula is `X`-free before wasting time building automata, so if you want to detect stutter-invariant formulas in your model checker, this is the only function to use. Also, if you happen to already have an automaton `aut_g` for `g`, you should pass it as a second argument to avoid it being recomputed: `spot.is_stutter_invariant(g, aut_g)`." ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -132,6 +137,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -139,10 +145,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Similarly to formulas, automata use a few bits to store some known properties about themselves, like whether they represent a stutter-invariant language. This property can be checked with the `prop_stutter_invariant()` method, but that returns a `trival` instance (i.e., yes, no, or maybe). Some algorithms will update that property whenever that is cheap or expliclitely asked for. For instance `spot.translate()` only sets the property if the translated formula is `X`-free." + "Similarly to formulas, automata use a few bits to store some known properties about themselves, like whether they represent a stutter-invariant language. This property can be checked with the `prop_stutter_invariant()` method, but that returns a `trival` instance (i.e., yes, no, or maybe). Some algorithms will update that property whenever that is cheap or explicitly asked for. For instance `spot.translate()` only sets the property if the translated formula is `X`-free." ] }, { @@ -164,6 +171,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -188,10 +196,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Note that `prop_stutter_invariant()` was updated as a side-effect so that any futher call to `is_stutter_invariant()` with this automaton will be instantaneous." + "Note that `prop_stutter_invariant()` was updated as a side-effect so that any further call to `is_stutter_invariant()` with this automaton will be instantaneous." ] }, { @@ -212,10 +221,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "You have to be aware of this property being set in your back because if while playing with `is_stutter_invariant()` you the incorrect formula for an automaton by mistake, the automaton will have its property set incorrectly, and running `is_stutter_inariant()` with the correct formula will simply return the cached property.\n", + "You have to be aware of this property being set in your back because if while playing with `is_stutter_invariant()` you the incorrect formula for an automaton by mistake, the automaton will have its property set incorrectly, and running `is_stutter_invariant()` with the correct formula will simply return the cached property.\n", "\n", "In doubt, you can always reset the property as follows:" ] @@ -239,6 +249,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -331,17 +342,19 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## Explaining why a formula is not sutter-invariant" + "## Explaining why a formula is not stutter-invariant" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "As explained in our [Spin'15 paper](https://www.lrde.epita.fr/~adl/dl/adl/michaud.15.spin.pdf) the sutter-invariant checks are implemented using simple operators suchs as `spot.closure(aut)`, that augment the language of L by adding words that can be obtained by removing duplicated letters, and `spot.sl(aut)` or `spot.sl2(aut)` that both augment the language that L by adding words that can be obtained by duplicating letters. The default `is_stutter_invariant()` function is implemented as `spot.product(spot.closure(aut), spot.closure(neg_aut)).is_empty()`, but that is just one possible implementation selected because it was more efficient.\n", + "As explained in our [Spin'15 paper](https://www.lrde.epita.fr/~adl/dl/adl/michaud.15.spin.pdf) the stutter-invariant checks are implemented using simple operators such as `spot.closure(aut)`, that augment the language of L by adding words that can be obtained by removing duplicated letters, and `spot.sl(aut)` or `spot.sl2(aut)` that both augment the language that L by adding words that can be obtained by duplicating letters. The default `is_stutter_invariant()` function is implemented as `spot.product(spot.closure(aut), spot.closure(neg_aut)).is_empty()`, but that is just one possible implementation selected because it was more efficient.\n", "\n", "Using these bricks, we can modify the original algorithm so it uses a counterexample to explain why a formula is stutter-sensitive." ] @@ -394,22 +407,25 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Note that a variant of the above explanation procedure is already integerated in our [on-line LTL translator tool](https://spot.lrde.epita.fr/app/) (use the study tab)." + "Note that a variant of the above explanation procedure is already integrated in our [on-line LTL translator tool](https://spot.lrde.epita.fr/app/) (use the study tab)." ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Detecting stutter-invariant states\n", "\n", - "Even if the language of an automaton is not sutter invariant, some of its states may recognize a stutter-invariant language. (We assume the language of a state is the language the automaton would have when starting from this state.)" + "Even if the language of an automaton is not stutter invariant, some of its states may recognize a stutter-invariant language. (We assume the language of a state is the language the automaton would have when starting from this state.)" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -616,6 +632,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -645,10 +662,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "For convenience, the `highligh_...()` version colors the stutter-invariant states of the automaton for display.\n", + "For convenience, the `highlight_...()` version colors the stutter-invariant states of the automaton for display.\n", "(That 5 is the color number for red in Spot's hard-coded palette.)" ] }, @@ -825,6 +843,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -832,6 +851,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -839,10 +859,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "This second example illustrates the fact that a state can be marked if it it not sutter-invariant but appear below a stutter-invariant state. We build our example automaton as the disjuction of the following two stutter-sensitive formulas, whose union is equivalent to the sutter-invariant formula `GF!a`." + "This second example illustrates the fact that a state can be marked if it is not stutter-invariant but appear below a stutter-invariant state. We build our example automaton as the disjuction of the following two stutter-sensitive formulas, whose union is equivalent to the stutter-invariant formula `GF!a`." ] }, { @@ -878,6 +899,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1082,6 +1104,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1286,11 +1309,12 @@ "spot.highlight_stutter_invariant_states(aut, g, 5)\n", "display(aut)\n", "# The stutter_invariant property is set on AUT as a side effect\n", - "# of calling sutter_invariant_states() or any variant of it.\n", + "# of calling stutter_invariant_states() or any variant of it.\n", "assert(aut.prop_stutter_invariant().is_true())" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1415,6 +1439,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1422,10 +1447,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## Sutter-invariance at the letter level\n", + "## Stutter-invariance at the letter level\n", "\n", "Instead of marking each state as stuttering or not, we can list the letters that we can stutter in each state.\n", "More precisely, a state $q$ is _stutter-invariant for letter $a$_ if the membership to $L(q)$ of any word starting with $a$ is preserved by the operations that duplicate letters or remove duplicates. \n", @@ -1547,12 +1573,13 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "The `stutter_invariant_letters()` functions returns a vector of BDDs indexed by state numbers. The BDD at index $q$ specifies all letters $\\ell$ for which state $q$ would be stuttering. Note that if $q$ is stutter-invariant or reachable from a stutter-invariant state, the associated BDD will be `bddtrue` (printed as `1` below).\n", "\n", - "This interface is a bit inconveniant to use interactively, due to the fact that we need a `spot.bdd_dict` object to print a BDD." + "This interface is a bit inconvenient to use interactively, due to the fact that we need a `spot.bdd_dict` object to print a BDD." ] }, { @@ -1578,6 +1605,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1585,7 +1613,7 @@ "\n", "Consider the following automaton, which is a variant of our second example above. \n", "\n", - "The language accepted from state (2) is `!GF(a & Xa) & GF!a` (this can be simplified to `FG(!a | X!a)`), while the language accepted from state (0) is `GF(a & Xa) & GF!a`. Therefore. the language accepted from state (5) is `a & X(GF!a)`. Since this is equivalent to `a & GF(!a)` state (5) recognizes stutter-invariant language, but as we can see, it is not the case that all states below (5) are also marked. In fact, states (0) can also be reached via states (7) and (6), recognizing respectively `(a & X(a & GF!a)) | (!a & X(!a & GF(a & Xa) & GF!a))` and `!a & GF(a & Xa) & GF!a))`, i.e., two stutter-sentive languages." + "The language accepted from state (2) is `!GF(a & Xa) & GF!a` (this can be simplified to `FG(!a | X!a)`), while the language accepted from state (0) is `GF(a & Xa) & GF!a`. Therefore. the language accepted from state (5) is `a & X(GF!a)`. Since this is equivalent to `a & GF(!a)` state (5) recognizes stutter-invariant language, but as we can see, it is not the case that all states below (5) are also marked. In fact, states (0) can also be reached via states (7) and (6), recognizing respectively `(a & X(a & GF!a)) | (!a & X(!a & GF(a & Xa) & GF!a))` and `!a & GF(a & Xa) & GF!a))`, i.e., two stutter-sensitive languages." ] }, { @@ -1817,6 +1845,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1845,11 +1874,12 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "In cases where we prefer to have a forward-closed set of stutter-invariant states, it is always possible to duplicate\n", - "the problematic states. The `make_stutter_invariant_foward_closed_inplace()` modifies the automaton in place, and also returns an updated copie of the vector of stutter-invariant states." + "the problematic states. The `make_stutter_invariant_forward_closed_inplace()` modifies the automaton in place, and also returns an updated copy of the vector of stutter-invariant states." ] }, { @@ -2128,10 +2158,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Now, state 0 is no longuer a problem." + "Now, state 0 is no longer a problem." ] }, { @@ -2155,10 +2186,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Let's see how infrequently the set of stutter-invarant states is not closed." + "Let's see how infrequently the set of stutter-invariant states is not closed." ] }, { @@ -2277,6 +2309,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2304,10 +2337,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Here is the percentage of stutter-invarant states." + "Here is the percentage of stutter-invariant states." ] }, { diff --git a/tests/python/synthesis.ipynb b/tests/python/synthesis.ipynb index ba1b562cc..b7edb2752 100644 --- a/tests/python/synthesis.ipynb +++ b/tests/python/synthesis.ipynb @@ -28,10 +28,10 @@ "The process is decomposed in three steps:\n", "- Creating the game\n", "- Solving the game\n", - "- Simplifying the winnning strategy\n", + "- Simplifying the winning strategy\n", "- Building the circuit from the strategy\n", "\n", - "Each of these steps is parametrized by a structure called `synthesis_info`. This structure stores some additional data needed to pass fine-tuning options or to store statistics.\n", + "Each of these steps is parameterized by a structure called `synthesis_info`. This structure stores some additional data needed to pass fine-tuning options or to store statistics.\n", "\n", "The `ltl_to_game` function takes the LTL specification, and the list of controllable atomic propositions (or output signals). It returns a two-player game, where player 0 plays the input variables (and wants to invalidate the acceptance condition), and player 1 plays the output variables (and wants to satisfy the output condition). The conversion from LTL to parity automata can use one of many algorithms, and can be specified in the `synthesis_info` structure (this works like the `--algo=` option of `ltlsynt`)." ] @@ -2238,7 +2238,7 @@ "id": "9d8d52f6", "metadata": {}, "source": [ - "If needed, a separated Mealy machine can be turned into game shape using `split_sepearated_mealy()`, which is more efficient than `split_2step()`." + "If needed, a separated Mealy machine can be turned into game shape using `split_separated_mealy()`, which is more efficient than `split_2step()`." ] }, { @@ -2744,7 +2744,7 @@ "id": "5c2b0b78", "metadata": {}, "source": [ - "It can happen that propositions declared as output are ommited in the aig circuit (either because they are not part of the specification, or because they do not appear in the winning strategy). In that case those \n", + "It can happen that propositions declared as output are omitted in the aig circuit (either because they are not part of the specification, or because they do not appear in the winning strategy). In that case those \n", "values can take arbitrary values.\n", "\n", "For instance so following constraint mention `o1` and `i1`, but those atomic proposition are actually unconstrained (`F(... U x)` can be simplified to `Fx`). Without any indication, the circuit built will ignore those variables:" @@ -3412,7 +3412,7 @@ "2. Combine the mealy machines into one before passing it to `mealy_machine_to aig(). This currently only supports input complete machines of the same type (mealy/separated mealy/split mealy)\n", "\n", "Note that the method version is usually preferable as it is faster.\n", - "Also note that in order for this to work, all mealy machines need to share the same `bdd_dict`. This can be ensured by passing a common options strucuture." + "Also note that in order for this to work, all mealy machines need to share the same `bdd_dict`. This can be ensured by passing a common options structure." ] }, { diff --git a/tests/python/twagraph-internals.ipynb b/tests/python/twagraph-internals.ipynb index 654aa7343..21b58cb8c 100644 --- a/tests/python/twagraph-internals.ipynb +++ b/tests/python/twagraph-internals.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -20,6 +21,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -27,6 +29,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -164,10 +167,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "The graphical representation above is just a convenient representation of that automaton and hides some details. Internally, this automaton is stored as two vectors plus some additional data. All of those can be displayed using the `show_storage()` method. The two vectors are the `states` and `edges` vectors. The additional data gives the initial state, number of acceptance sets, acceptance condition, list of atomic propositions, as well as a bunch of [property flags](https://spot.lrde.epita.fr/concepts.html#property-flags) on the automaton. All those properties default to `maybe`, but some algorithms will turn them to `yes` or `no` whenever that property can be decided at very low cost (usually a side effect of the algorithm). In this example we asked for a deterministic automaton, so the output of the construction is necessarily `universal` (this means no existantial branching, hence deterministic for our purpose), and this property implies `unambiguous` and `semi_deterministic`." + "The graphical representation above is just a convenient representation of that automaton and hides some details. Internally, this automaton is stored as two vectors plus some additional data. All of those can be displayed using the `show_storage()` method. The two vectors are the `states` and `edges` vectors. The additional data gives the initial state, number of acceptance sets, acceptance condition, list of atomic propositions, as well as a bunch of [property flags](https://spot.lrde.epita.fr/concepts.html#property-flags) on the automaton. All those properties default to `maybe`, but some algorithms will turn them to `yes` or `no` whenever that property can be decided at very low cost (usually a side effect of the algorithm). In this example we asked for a deterministic automaton, so the output of the construction is necessarily `universal` (this means no existential branching, hence deterministic for our purpose), and this property implies `unambiguous` and `semi_deterministic`." ] }, { @@ -417,6 +421,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -801,6 +806,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -832,6 +838,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1199,6 +1206,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1539,6 +1547,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1546,6 +1555,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1952,10 +1962,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Such an inconsistency will cause many issues when the automaton is passed to algorithm with specialized handling of universal automata. When writing an algorithm that modify the automaton, it is your responsibility to update the property bits as well. In this case it could be fixed by calling `aut.prop_universal(False); aut.prop_unambiguous(spot.trival_maybe()); ...` for each property, or by reseting all properties to `maybe` with `prop_reset()`:" + "Such an inconsistency will cause many issues when the automaton is passed to algorithm with specialized handling of universal automata. When writing an algorithm that modify the automaton, it is your responsibility to update the property bits as well. In this case it could be fixed by calling `aut.prop_universal(False); aut.prop_unambiguous(spot.trival_maybe()); ...` for each property, or by resetting all properties to `maybe` with `prop_reset()`:" ] }, { @@ -2019,6 +2030,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2290,6 +2302,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2319,6 +2332,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2351,6 +2365,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2378,6 +2393,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2601,6 +2617,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -3069,6 +3086,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -3574,12 +3592,13 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "Here we have created two universal transitions: `0->[0,2]` and `2->[0,1]`. The destination groups `[0,2]` and `[0,1]` are stored in a integer vector called `dests`. Each group is encoded by its size immediately followed by the state numbers of the destinations. So group `[0,2]` get encoded as `2,0,2` at position `0` of `dests`, and group `[0,1]` is encoded as `2,0,1` at position `3`. Each group is denoted by the index of its size in the `dests` vector. When an edge targets a destination group, the complement of that destination index is written in the `dst` field of the `edges` entry, hence that `~0` and `~3` that appear here. Using a complement like this allows us to quickly detect universal edges by looking at the sign bit if their `dst` entry.\n", "\n", - "To work on alternating automata, one can no longuer just blindingly use the `dst` field of outgoing iterations:" + "To work on alternating automata, one can no longer just blindingly use the `dst` field of outgoing iterations:" ] }, { @@ -3609,6 +3628,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -3642,6 +3662,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -3675,6 +3696,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -4238,6 +4260,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -5302,6 +5325,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -5309,6 +5333,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -5611,6 +5636,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ diff --git a/tests/python/word.ipynb b/tests/python/word.ipynb index 4e02bf224..aacc1280a 100644 --- a/tests/python/word.ipynb +++ b/tests/python/word.ipynb @@ -243,7 +243,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "To convert the run into a word, using `spot.twa_word()`. Note that our runs are labeled by Boolean formulas that are not necessarily a conjunction of all involved litterals. The word is just the projection of the run on its labels." + "To convert the run into a word, using `spot.twa_word()`. Note that our runs are labeled by Boolean formulas that are not necessarily a conjunction of all involved literals. The word is just the projection of the run on its labels." ] }, { diff --git a/tests/python/zlktree.ipynb b/tests/python/zlktree.ipynb index c9eb3503d..5424366be 100644 --- a/tests/python/zlktree.ipynb +++ b/tests/python/zlktree.ipynb @@ -12,6 +12,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "fb656100", "metadata": {}, @@ -21,17 +22,18 @@ "\n", "These two structures are used to decompose an acceptance condition (or automaton) into trees that alternate accepting and rejecting elements in order to help converting an automaton to parity acceptance. Spot implements those structures, includes some display code to better explore them iteractively, and finally use them to implement a transformation to parity acceptance.\n", "\n", - "For a formal tratement of these, in a slightly different formalism, see [Optimal Transformations of Games and Automata Using Muller Conditions](https://arxiv.org/abs/2011.13041) by Casares, Colcombet, and Fijalkow. In Spot those definitions have been adapted to use Emerson-Lei acceptance, and support transitions labeled by multiple colors (the main differences are for the Zielonka Tree, the ACD is almost identical)." + "For a formal treatment of these, in a slightly different formalism, see [Optimal Transformations of Games and Automata Using Muller Conditions](https://arxiv.org/abs/2011.13041) by Casares, Colcombet, and Fijalkow. In Spot those definitions have been adapted to use Emerson-Lei acceptance, and support transitions labeled by multiple colors (the main differences are for the Zielonka Tree, the ACD is almost identical)." ] }, { + "attachments": {}, "cell_type": "markdown", "id": "4e8b5d3f", "metadata": {}, "source": [ "# Zielonka Tree\n", "\n", - "The Zielonka tree is built from an acceptance formula and is labeled by sets of colors. The root contains all colors used in the formula. If seing infinitely all colors of one node would satisfy the acceptance condition, we say that the node is accepting and draw it with an ellipse, otherwise is is rejecting and drawn with a rectangle. The children of an accepting (resp. rejecting) node, are the largest subsets of colors that are rejecting (resp. accepting).\n", + "The Zielonka tree is built from an acceptance formula and is labeled by sets of colors. The root contains all colors used in the formula. If seeing infinitely all colors of one node would satisfy the acceptance condition, we say that the node is accepting and draw it with an ellipse, otherwise is is rejecting and drawn with a rectangle. The children of an accepting (resp. rejecting) node, are the largest subsets of colors that are rejecting (resp. accepting).\n", "\n", "Here is an example:" ] @@ -231,6 +233,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "d7629725", "metadata": {}, @@ -241,7 +244,7 @@ "\n", "This tree is also layered: all nodes in each layers are alternatively rejecting and accepting. Layers are numbered incrementally from 0 at the root. In this example, leaves are in layer 3. Since it is conventional to put the root at the top, we will say that a node is high in the tree when it has a small level.\n", "\n", - "In this example, odd levels are accepting: we say the tree is odd. On another example, it could be the other way arround. The `is_even()` method tells us which way it is." + "In this example, odd levels are accepting: we say the tree is odd. On another example, it could be the other way around. The `is_even()` method tells us which way it is." ] }, { @@ -266,6 +269,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "15fbd4e6", "metadata": {}, @@ -295,6 +299,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "de4cdc45", "metadata": {}, @@ -329,6 +334,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "4c3bf70b", "metadata": {}, @@ -358,6 +364,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "0d865f30", "metadata": {}, @@ -387,6 +394,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "8b6b3928", "metadata": {}, @@ -430,16 +438,18 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "656e05f4", "metadata": {}, "source": [ - "If we imagine an run looping on tree transitions labeled by `[1]`, `[0]`, `[3]`, we know (from the original acceptance condition) that it should be accepting. Infinite repetition of the `step()` procedure will emit many levels, but the smallest level we see infinitely often is `1`. It corresponds to node 2, labeled by `{0,1,3}`: this is the highest node we visit infinitely often while steping through this tree in a loop.\n", + "If we imagine an run looping on tree transitions labeled by `[1]`, `[0]`, `[3]`, we know (from the original acceptance condition) that it should be accepting. Infinite repetition of the `step()` procedure will emit many levels, but the smallest level we see infinitely often is `1`. It corresponds to node 2, labeled by `{0,1,3}`: this is the highest node we visit infinitely often while stepping through this tree in a loop.\n", "\n", "Similarly, a loop of two transitions labeled by `[1]` and `[3]` should be rejecting. Stepping through the tree will emit infinitely many 2, a rejecting level." ] }, { + "attachments": {}, "cell_type": "markdown", "id": "5c7014e9", "metadata": {}, @@ -450,6 +460,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "2750cb1d", "metadata": {}, @@ -1078,11 +1089,12 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "ea452913", "metadata": {}, "source": [ - "Here the parity automaton output has as many proprities as there are levels in the Zielonka tree.\n", + "Here the parity automaton output has as many priorities as there are levels in the Zielonka tree.\n", "\n", "The call to `copy_state_names_from()` above causes the states to be labeled by strings of the form `orig#leaf` when `orig` is the original state number, and `leaf` is a leaf of the Zielonka tree.\n", "\n", @@ -1111,6 +1123,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "da8d9e97", "metadata": {}, @@ -1716,6 +1729,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "9bf70138", "metadata": {}, @@ -1756,6 +1770,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "147a71a6", "metadata": {}, @@ -2718,6 +2733,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "77db26c3", "metadata": {}, @@ -2727,7 +2743,7 @@ "The `zielonka_tree` class accepts a few options that can alter its behaviour.\n", "\n", "Options `CHECK_RABIN`, `CHECK_STREETT`, `CHECK_PARITY` can be combined with\n", - "`ABORT_WRONG_SHAPE` to abort the construction as soon as it is detected that the Zielonka tree has the wrong shape. When this happens, the number of branchs of the tree is set to 0.\n", + "`ABORT_WRONG_SHAPE` to abort the construction as soon as it is detected that the Zielonka tree has the wrong shape. When this happens, the number of branches of the tree is set to 0.\n", "\n", "For instance we can check that the original acceptance condition does not behaves like a Parity condition." ] @@ -2755,6 +2771,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "4786f64c", "metadata": {}, @@ -2941,6 +2958,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "9d7688b3", "metadata": {}, @@ -2949,6 +2967,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "75838579", "metadata": {}, @@ -2985,6 +3004,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "cb546bc2", "metadata": {}, @@ -4229,6 +4249,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "0f2f00c4", "metadata": {}, @@ -4260,6 +4281,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "3a3db431", "metadata": {}, @@ -4270,6 +4292,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "6595333d", "metadata": {}, @@ -4327,6 +4350,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "1c6d4fe9", "metadata": {}, @@ -4421,6 +4445,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "ad201f45", "metadata": { @@ -4439,6 +4464,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "04d7cc51", "metadata": {}, @@ -4468,6 +4494,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "1015abb6", "metadata": {}, @@ -4476,6 +4503,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "b89a6186", "metadata": {}, @@ -5106,6 +5134,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "f039aeaa", "metadata": {}, @@ -5135,6 +5164,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "07aaab3a", "metadata": {}, @@ -5746,11 +5776,12 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "f14ee428", "metadata": {}, "source": [ - "This transformation can have substantiually fewer states than the one based on Zielonka tree, because the branches are actually restricted to only those that matter for a given state." + "This transformation can have substantially fewer states than the one based on Zielonka tree, because the branches are actually restricted to only those that matter for a given state." ] }, { @@ -5796,6 +5827,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "ce62b966", "metadata": {}, @@ -5804,6 +5836,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "e3d0ff64", "metadata": {}, @@ -5842,19 +5875,21 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "09ec9887", "metadata": {}, "source": [ - "Calling the above methods withtout passing the relevant option will raise an exception." + "Calling the above methods without passing the relevant option will raise an exception." ] }, { + "attachments": {}, "cell_type": "markdown", "id": "816ee0eb", "metadata": {}, "source": [ - "Additonally, when the goal is only to check some typeness, the construction of the ACD can be aborted as soon as the typeness is found to be wrong. This can be enabled by passing the additional option `spot.acd_options_ABORT_WRONG_SHAPE`. In case the construction is aborted the ACD forest will be erased (to make sure it is not used), and `node_count()` will return 0." + "Additionally, when the goal is only to check some typeness, the construction of the ACD can be aborted as soon as the typeness is found to be wrong. This can be enabled by passing the additional option `spot.acd_options_ABORT_WRONG_SHAPE`. In case the construction is aborted the ACD forest will be erased (to make sure it is not used), and `node_count()` will return 0." ] }, { @@ -5904,16 +5939,17 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "f380ca5f", "metadata": {}, "source": [ "## State-based transformation\n", "\n", - "The ACD usage can be modified slightly in order to produce a state-based automaton. The rules for stepping through the ACD are similar, except that when we detect that a cycle through all children of a node has been done, we return the current node without going to the leftmost leave of the next children. When stepping a transition from a node a child, we should act as if we were in the leftmost child of that node containing the source of tha transition. Stepping through a transition this way do not emit any color, instead the color of a state will be the level of its associated node.\n", + "The ACD usage can be modified slightly in order to produce a state-based automaton. The rules for stepping through the ACD are similar, except that when we detect that a cycle through all children of a node has been done, we return the current node without going to the leftmost leave of the next children. When stepping a transition from a node a child, we should act as if we were in the leftmost child of that node containing the source of that transition. Stepping through a transition this way do not emit any color, instead the color of a state will be the level of its associated node.\n", "(This modified transformation do not claim to be optimal, unlike the transition-based version.)\n", "\n", - "The `ORDER_HEURISTIC` used below will be explained in the next section, it justs alters ordering of children of the ACD in a way that the state-based transformation prefers." + "The `ORDER_HEURISTIC` used below will be explained in the next section, it just alters ordering of children of the ACD in a way that the state-based transformation prefers." ] }, { @@ -7165,6 +7201,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "98a1474c", "metadata": {}, @@ -7258,11 +7295,12 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "c9725681", "metadata": {}, "source": [ - "The state-based version of the ACD transformation is `acd_transform_sbacc()`. In the output, the cycle between `2#8` and `2#0` corresponds to the repeatitions of edges 9 and 10 we stepped through above." + "The state-based version of the ACD transformation is `acd_transform_sbacc()`. In the output, the cycle between `2#8` and `2#0` corresponds to the repetitions of edges 9 and 10 we stepped through above." ] }, { @@ -8081,6 +8119,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "5b770f23", "metadata": {}, @@ -8108,6 +8147,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "9f39b61d", "metadata": {}, @@ -8649,6 +8689,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "03f0eda7", "metadata": {}, @@ -8657,11 +8698,12 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "45596824", "metadata": {}, "source": [ - "The `ORDER_HEURISTIC` option of the ACD construction, attemps to order the children of a node by decreasing number of number of successors that are out of the node. It is activated by default inside `spot.acd_transform_sbacc()`." + "The `ORDER_HEURISTIC` option of the ACD construction, attempts to order the children of a node by decreasing number of number of successors that are out of the node. It is activated by default inside `spot.acd_transform_sbacc()`." ] }, { @@ -9170,6 +9212,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "15f094c0", "metadata": {}, @@ -10220,6 +10263,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "36629c32", "metadata": {}, @@ -11118,11 +11162,12 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "7d638d20", "metadata": {}, "source": [ - "An issue in Spot is to always ensure that property bits of automata (cleaming that an automaton is weak, inherently weak, deterministic, etc.) are properly preserved or reset.\n", + "An issue in Spot is to always ensure that property bits of automata (claiming that an automaton is weak, inherently weak, deterministic, etc.) are properly preserved or reset.\n", "\n", "Here if the input is inherently weak, the output should be weak. " ] From 70812046d2fed8e9a775209791ebd8e2a7a24e04 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 26 Sep 2023 17:10:05 +0200 Subject: [PATCH 344/606] ltlsynt: do a fixpoint around the polarity simplifications * bin/ltlsynt.cc: Here. * tests/core/ltlsynt.test: Adjust. --- bin/ltlsynt.cc | 92 ++++++++++++++++++++++++----------------- tests/core/ltlsynt.test | 31 ++++---------- 2 files changed, 62 insertions(+), 61 deletions(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index d2d85caa7..59fe52494 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -408,46 +408,62 @@ namespace // or always in negative form. // In syntcomp, this occurs more frequently for input variables than // output variable. See issue #529 for some examples. + spot::relabeling_map rm; + bool first_dap = true; + auto display_ap = [&rm, &first_dap](spot::formula p) + { + if (SPOT_LIKELY(!gi->verbose_stream)) + return; + if (first_dap) + { + *gi->verbose_stream << ("the following APs are polarized, " + "they can be replaced by constants:\n"); + first_dap = false; + } + *gi->verbose_stream << " " << p << " := " << rm[p] <<'\n'; + }; + spot::formula oldf; if (opt_polarity) - { - std::set lits = spot::collect_litterals(f); - for (const std::string& ap: output_aps) - { - spot::formula pos = spot::formula::ap(ap); - spot::formula neg = spot::formula::Not(pos); - bool has_pos = lits.find(pos) != lits.end(); - bool has_neg = lits.find(neg) != lits.end(); - if (has_pos && !has_neg) - rm[pos] = spot::formula::tt(); - else if (has_neg && !has_pos) - rm[pos] = spot::formula::ff(); - } - for (const std::string& ap: input_aps) - { - spot::formula pos = spot::formula::ap(ap); - spot::formula neg = spot::formula::Not(pos); - bool has_pos = lits.find(pos) != lits.end(); - bool has_neg = lits.find(neg) != lits.end(); - if (has_pos && !has_neg) - rm[pos] = spot::formula::ff(); - else if (has_neg && !has_pos) - rm[pos] = spot::formula::tt(); - } - if (!rm.empty()) - { - if (gi->verbose_stream) - { - *gi->verbose_stream << ("the following APs are polarized, " - "they can be replaced by constants:\n"); - for (auto [k, v]: rm) - *gi->verbose_stream << " " << k << " := " << v <<'\n'; - } - f = spot::relabel_apply(f, &rm); - if (gi->verbose_stream) - *gi->verbose_stream << "new formula: " << f << '\n'; - } - } + do + { + bool rm_has_new_terms = false; + std::set lits = spot::collect_litterals(f); + for (const std::string& ap: output_aps) + { + spot::formula pos = spot::formula::ap(ap); + spot::formula neg = spot::formula::Not(pos); + bool has_pos = lits.find(pos) != lits.end(); + bool has_neg = lits.find(neg) != lits.end(); + if (has_pos ^ has_neg) + { + rm[pos] = has_pos ? spot::formula::tt() : spot::formula::ff(); + rm_has_new_terms = true; + display_ap(pos); + } + } + for (const std::string& ap: input_aps) + { + spot::formula pos = spot::formula::ap(ap); + spot::formula neg = spot::formula::Not(pos); + bool has_pos = lits.find(pos) != lits.end(); + bool has_neg = lits.find(neg) != lits.end(); + if (has_pos ^ has_neg) + { + rm[pos] = has_neg ? spot::formula::tt() : spot::formula::ff(); + rm_has_new_terms = true; + display_ap(pos); + } + } + oldf = f; + if (rm_has_new_terms) + { + f = spot::relabel_apply(f, &rm); + if (gi->verbose_stream) + *gi->verbose_stream << "new formula: " << f << '\n'; + } + } + while (oldf != f); std::vector sub_form; std::vector> sub_outs; diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index ae476e71d..cd48cf18e 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -764,31 +764,16 @@ diff outx exp cat >exp < Date: Mon, 2 Oct 2023 11:54:34 +0200 Subject: [PATCH 345/606] sccinfo: implement PROCESS_UNREACHABLE_STATES This is actually used by next patch. * spot/twaalgos/sccinfo.cc, spot/twaalgos/sccinfo.hh: Here. * tests/python/sccinfo.py: Add a small test case. * NEWS: Mention it. --- NEWS | 3 +++ spot/twaalgos/sccinfo.cc | 24 ++++++++++++++++-------- spot/twaalgos/sccinfo.hh | 9 ++++++++- tests/python/sccinfo.py | 39 ++++++++++++++++++++++++++++++++++++++- 4 files changed, 65 insertions(+), 10 deletions(-) diff --git a/NEWS b/NEWS index b6ad95362..1da2a9cde 100644 --- a/NEWS +++ b/NEWS @@ -85,6 +85,9 @@ New in spot 2.11.6.dev (not yet released) The above also impacts autfilt --included-in option. + - spot::scc_info has a new option PROCESS_UNREACHABLE_STATES that + causes it to enumerated even unreachable SCCs. + Bugs fixed: - tgba_determinize()'s use_simulation option would cause it to diff --git a/spot/twaalgos/sccinfo.cc b/spot/twaalgos/sccinfo.cc index 7d82c6b81..3abc6fcbd 100644 --- a/spot/twaalgos/sccinfo.cc +++ b/spot/twaalgos/sccinfo.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2014-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -206,13 +206,21 @@ namespace spot } }; - // Setup depth-first search from the initial state. But we may - // have a conjunction of initial state in alternating automata. - if (initial_state_ == -1U) - initial_state_ = aut->get_init_state_number(); - for (unsigned init: aut->univ_dests(initial_state_)) - push_init(init); - + if (!!(options & scc_info_options::PROCESS_UNREACHABLE_STATES)) + { + unsigned e = aut->num_states(); + for (unsigned i = 0; i < e; ++i) + push_init(i); + } + else + { + // Setup depth-first search from the initial state. But we may + // have a conjunction of initial state in alternating automata. + if (initial_state_ == -1U) + initial_state_ = aut->get_init_state_number(); + for (unsigned init: aut->univ_dests(initial_state_)) + push_init(init); + } while (!init_states.empty()) { unsigned init = init_states.front(); diff --git a/spot/twaalgos/sccinfo.hh b/spot/twaalgos/sccinfo.hh index 2de9a1cb8..dc275c517 100644 --- a/spot/twaalgos/sccinfo.hh +++ b/spot/twaalgos/sccinfo.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2014-2021, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -393,6 +393,13 @@ namespace spot /// Conditionally track states if the acceptance conditions uses Fin. /// This is sufficiant for determine_unknown_acceptance(). TRACK_STATES_IF_FIN_USED = 8, + /// Also compute SCCs for the unreachable states. When this is + /// used, SCCs are first enumerated from state 0, and then from + /// the next unvisited states. In other words the initial state + /// does not play any role. If STOP_ON_ACC is used with + /// PROCESS_UNREACHABLE_STATES, the enumeration will stop as soon + /// as an SCC is found, but that SCC might not be reachable. + PROCESS_UNREACHABLE_STATES = 16, /// Default behavior: explore everything and track states and succs. ALL = TRACK_STATES | TRACK_SUCCS, }; diff --git a/tests/python/sccinfo.py b/tests/python/sccinfo.py index f8ade7e4b..197dd7254 100644 --- a/tests/python/sccinfo.py +++ b/tests/python/sccinfo.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2021, 2022 Laboratoire de Recherche et +# Copyright (C) 2017, 2021, 2022, 2023 Laboratoire de Recherche et # Développement de l'EPITA. # # This file is part of Spot, a model checking library. @@ -114,3 +114,40 @@ tc.assertTrue(si.is_accepting_scc(0)) tc.assertFalse(si.is_rejecting_scc(0)) tc.assertTrue(si.is_rejecting_scc(1)) tc.assertFalse(si.is_accepting_scc(1)) + +a = spot.automaton(""" +HOA: v1 +States: 4 +Start: 0 +AP: 1 "a" +Acceptance: 2 Inf(0)&Fin(1) +--BODY-- +State: 0 +[t] 0 {1} +[t] 1 {0} +State: 1 +[t] 1 {1} +[t] 0 {1} +State: 2 +[t] 2 {1} +[t] 3 {0} +State: 3 +[t] 3 {1} +[t] 2 +--END-- +""") +si = spot.scc_info(a) +si.determine_unknown_acceptance() +tc.assertEqual(si.scc_count(), 1) +tc.assertFalse(si.is_accepting_scc(0)) +tc.assertTrue(si.is_rejecting_scc(0)) +si = spot.scc_info_with_options \ + (a, + spot.scc_info_options_PROCESS_UNREACHABLE_STATES | + spot.scc_info_options_TRACK_STATES) +si.determine_unknown_acceptance() +tc.assertEqual(si.scc_count(), 2) +tc.assertTrue(si.is_accepting_scc(1)) +tc.assertFalse(si.is_rejecting_scc(1)) +tc.assertTrue(si.is_rejecting_scc(0)) +tc.assertFalse(si.is_accepting_scc(0)) From 9bf1edd80d8da5e7c8726e7b9397f74575f10e12 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 2 Oct 2023 14:11:45 +0200 Subject: [PATCH 346/606] ltlsynt: add option --global-equivalence Fixes issue #529. * spot/tl/apcollect.hh, spot/tl/apcollect.cc (collect_equivalent_literals): New function. * python/spot/impl.i: Adjust. * spot/tl/formula.hh, spot/tl/formula.cc (formula_ptr_less_than_bool_first): New comparison function. * spot/twaalgos/aiger.hh, spot/twaalgos/aiger.cc: Adjust to deal with equivalent assignments. * bin/ltlsynt.cc: Implement the new option. * tests/core/ltlsynt.test: Adjust test cases. --- NEWS | 18 ++- bin/ltlsynt.cc | 257 ++++++++++++++++++++++++++++++++-------- python/spot/impl.i | 1 + spot/tl/apcollect.cc | 152 +++++++++++++++++++++++- spot/tl/apcollect.hh | 13 +- spot/tl/formula.cc | 9 +- spot/tl/formula.hh | 8 +- spot/twaalgos/aiger.cc | 66 +++++++++++ spot/twaalgos/aiger.hh | 9 ++ tests/core/ltlsynt.test | 52 ++++++-- 10 files changed, 515 insertions(+), 70 deletions(-) diff --git a/NEWS b/NEWS index 1da2a9cde..aa16106f3 100644 --- a/NEWS +++ b/NEWS @@ -16,13 +16,25 @@ New in spot 2.11.6.dev (not yet released) will replace boolean subformulas by fresh atomic propositions even if those subformulas share atomic propositions. - - ltlsynt will no check for output atomic propositions that always - have the same polarity in the specification. When this happens, - these output APs are replaced by true or false before running the + - ltlsynt will now check for atomic propositions that always have + the same polarity in the specification. When this happens, these + output APs are replaced by true or false before running the synthesis pipeline, and the resulting game, Mealy machine, or Aiger circuit is eventually patched to include that constant output. This can be disabled with --polarity=no. + - ltlsynt will now check for atomic propositions that are specified + as equivalent. When this is detected, equivalent atomic + propositions are replaced by one representative of their class, to + limit the number of different APs processed by the synthesis + pipeline. The resulting game, Mealy machine, or Aiger circuit is + eventually patched to include the removed APs. This optimization + can be disabled with --global-equivalence=no. As an exception, an + equivalence between input and output signals (such as G(in<->out)) + will be ignored if ltlsynt is configured to output a game (because + patching the game a posteriori is cumbersome if the equivalence + concerns different players). + Library: - The following new trivial simplifications have been implemented for SEREs: diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 59fe52494..05d190994 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -52,6 +53,7 @@ enum OPT_DECOMPOSE, OPT_DOT, OPT_FROM_PGAME, + OPT_GEQUIV, OPT_HIDE, OPT_INPUT, OPT_OUTPUT, @@ -105,6 +107,9 @@ static const argp_option options[] = { "polarity", OPT_POLARITY, "yes|no", 0, "whether to remove atomic propositions that always have the same " "polarity in the formula to speed things up (enabled by default)", 0 }, + { "global-equivalence", OPT_GEQUIV, "yes|no", 0, + "whether to remove atomic propositions that are always equivalent to " + "another one (enabled by default)", 0 }, { "simplify", OPT_SIMPLIFY, "no|bisim|bwoa|sat|bisim-sat|bwoa-sat", 0, "simplification to apply to the controller (no) nothing, " "(bisim) bisimulation-based reduction, (bwoa) bisimulation-based " @@ -241,6 +246,7 @@ static bool decompose_values[] = ARGMATCH_VERIFY(decompose_args, decompose_values); bool opt_decompose_ltl = true; bool opt_polarity = true; +bool opt_gequiv = true; static const char* const simplify_args[] = { @@ -265,6 +271,11 @@ ARGMATCH_VERIFY(simplify_args, simplify_values); namespace { + static bool want_game() + { + return opt_print_pg || opt_print_hoa; + } + auto str_tolower = [] (std::string s) { std::transform(s.begin(), s.end(), s.begin(), @@ -272,12 +283,17 @@ namespace return s; }; + static void dispatch_print_hoa(spot::twa_graph_ptr& game, const std::vector* input_aps = nullptr, const spot::relabeling_map* rm = nullptr) { - if (rm && !rm->empty()) // Add any AP we removed + // Add any AP we removed. This is a game, so player moves are + // separated. Consequently at this point we cannot deal with + // removed signals such as "o1 <-> i2": if the game has to be + // printed, we can only optimize for signals such as o1 <-> o2. + if (rm && !rm->empty()) { assert(input_aps); auto& sp = spot::get_state_players(game); @@ -294,6 +310,15 @@ namespace add &= bdd_ithvar(i); else if (v.is_ff()) add &= bdd_nithvar(i); + else + { + bdd bv; + if (v.is(spot::op::ap)) + bv = bdd_ithvar(game->register_ap(v.ap_name())); + else // Not Ap + bv = bdd_nithvar(game->register_ap(v[0].ap_name())); + add &= bdd_biimp(bdd_ithvar(i), bv); + } } for (auto& e: game->edges()) if (sp[e.src]) @@ -417,53 +442,156 @@ namespace return; if (first_dap) { - *gi->verbose_stream << ("the following APs are polarized, " - "they can be replaced by constants:\n"); + *gi->verbose_stream + << "the following signals can be temporarily removed:\n"; first_dap = false; } *gi->verbose_stream << " " << p << " := " << rm[p] <<'\n'; }; spot::formula oldf; - if (opt_polarity) - do - { - bool rm_has_new_terms = false; - std::set lits = spot::collect_litterals(f); - for (const std::string& ap: output_aps) - { - spot::formula pos = spot::formula::ap(ap); - spot::formula neg = spot::formula::Not(pos); - bool has_pos = lits.find(pos) != lits.end(); - bool has_neg = lits.find(neg) != lits.end(); - if (has_pos ^ has_neg) - { - rm[pos] = has_pos ? spot::formula::tt() : spot::formula::ff(); - rm_has_new_terms = true; - display_ap(pos); - } - } - for (const std::string& ap: input_aps) - { - spot::formula pos = spot::formula::ap(ap); - spot::formula neg = spot::formula::Not(pos); - bool has_pos = lits.find(pos) != lits.end(); - bool has_neg = lits.find(neg) != lits.end(); - if (has_pos ^ has_neg) - { - rm[pos] = has_neg ? spot::formula::tt() : spot::formula::ff(); - rm_has_new_terms = true; - display_ap(pos); - } - } - oldf = f; - if (rm_has_new_terms) - { - f = spot::relabel_apply(f, &rm); - if (gi->verbose_stream) - *gi->verbose_stream << "new formula: " << f << '\n'; - } - } - while (oldf != f); + if (opt_polarity || opt_gequiv) + { + robin_hood::unordered_set ap_inputs; + for (const std::string& ap: input_aps) + ap_inputs.insert(spot::formula::ap(ap)); + + do + { + bool rm_has_new_terms = false; + oldf = f; + + if (opt_polarity) + { + std::set lits = spot::collect_literals(f); + for (const std::string& ap: output_aps) + { + spot::formula pos = spot::formula::ap(ap); + spot::formula neg = spot::formula::Not(pos); + bool has_pos = lits.find(pos) != lits.end(); + bool has_neg = lits.find(neg) != lits.end(); + if (has_pos ^ has_neg) + { + rm[pos] = + has_pos ? spot::formula::tt() : spot::formula::ff(); + rm_has_new_terms = true; + display_ap(pos); + } + } + for (const std::string& ap: input_aps) + { + spot::formula pos = spot::formula::ap(ap); + spot::formula neg = spot::formula::Not(pos); + bool has_pos = lits.find(pos) != lits.end(); + bool has_neg = lits.find(neg) != lits.end(); + if (has_pos ^ has_neg) + { + rm[pos] = + has_neg ? spot::formula::tt() : spot::formula::ff(); + rm_has_new_terms = true; + display_ap(pos); + } + } + if (rm_has_new_terms) + { + f = spot::relabel_apply(f, &rm); + if (gi->verbose_stream) + *gi->verbose_stream << "new formula: " << f << '\n'; + rm_has_new_terms = false; + } + } + if (opt_gequiv) + { + // check for equivalent terms + spot::formula_ptr_less_than_bool_first cmp; + for (std::vector& equiv: + spot::collect_equivalent_literals(f)) + { + // For each set of equivalent literals, we want to + // pick a representative. That representative + // should be an input if one of the literal is an + // input. (If we have two inputs or more, the + // formula is not realizable.) + spot::formula repr = nullptr; + bool repr_is_input = false; + spot::formula input_seen = nullptr; + for (spot::formula lit: equiv) + { + spot::formula ap = lit; + if (ap.is(spot::op::Not)) + ap = ap[0]; + if (ap_inputs.find(ap) != ap_inputs.end()) + { + if (input_seen) + { + // ouch! we have two equivalent inputs. + // This means the formula is simply + // unrealizable. Make it false for the + // rest of the algorithm. + f = spot::formula::ff(); + goto done; + } + input_seen = lit; + // Normally, we want the input to be the + // representative. However as a special + // case, we ignore the input literal from + // the set if we are asked to print a + // game. Fixing the game to add a i<->o + // equivalence would require more code + // than I care to write. + // + // So if the set was {i,o1,o2}, instead + // of the desirable + // o1 := i + // o2 := i + // we only do + // o2 := o1 + // when printing games. + if (!want_game()) + { + repr_is_input = true; + repr = lit; + } + } + else if (!repr_is_input && (!repr || cmp(ap, repr))) + repr = lit; + } + // now map equivalent each atomic proposition to the + // representative + spot::formula not_repr = spot::formula::Not(repr); + for (spot::formula lit: equiv) + { + // input or representative are not removed + // (we have repr != input_seen either when input_seen + // is nullptr, or if want_game is true) + if (lit == repr || lit == input_seen) + continue; + if (lit.is(spot::op::Not)) + { + spot::formula ap = lit[0]; + rm[ap] = not_repr; + display_ap(ap); + } + else + { + rm[lit] = repr; + display_ap(lit); + } + rm_has_new_terms = true; + } + } + if (rm_has_new_terms) + { + f = spot::relabel_apply(f, &rm); + if (gi->verbose_stream) + *gi->verbose_stream << "new formula: " << f << '\n'; + rm_has_new_terms = false; + } + } + } + while (oldf != f); + done: + /* can't have a label followed by closing brace */; + } std::vector sub_form; std::vector> sub_outs; @@ -510,8 +638,6 @@ namespace assert((sub_form.size() == sub_outs.size()) && (sub_form.size() == sub_outs_str.size())); - const bool want_game = opt_print_pg || opt_print_hoa; - std::vector arenas; auto sub_f = sub_form.begin(); @@ -528,7 +654,7 @@ namespace }; // If we want to print a game, // we never use the direct approach - if (!want_game && opt_bypass) + if (!want_game() && opt_bypass) m_like = spot::try_create_direct_strategy(*sub_f, *sub_o, *gi, !opt_real); @@ -555,7 +681,7 @@ namespace assert((spptr->at(arena->get_init_state_number()) == false) && "Env needs first turn"); } - if (want_game) + if (want_game()) { dispatch_print_hoa(arena, &input_aps, &rm); continue; @@ -615,7 +741,7 @@ namespace } // If we only wanted to print the game we are done - if (want_game) + if (want_game()) { safe_tot_time(); return 0; @@ -681,6 +807,7 @@ namespace if (!rm.empty()) // Add any AP we removed { bdd add = bddtrue; + bdd additional_outputs = bddtrue; for (auto [k, v]: rm) { int i = tot_strat->register_ap(k); @@ -689,15 +816,39 @@ namespace != input_aps.end()) continue; if (v.is_tt()) - add &= bdd_ithvar(i); + { + bdd bv = bdd_ithvar(i); + additional_outputs &= bv; + add &= bv; + } else if (v.is_ff()) - add &= bdd_nithvar(i); + { + additional_outputs &= bdd_ithvar(i); + add &= bdd_nithvar(i); + } + else + { + bdd left = bdd_ithvar(i); // this is necessarily an output + additional_outputs &= left; + bool pos = v.is(spot::op::ap); + const std::string apname = + pos ? v.ap_name() : v[0].ap_name(); + bdd right = bdd_ithvar(tot_strat->register_ap(apname)); + // right might be an input + if (std::find(input_aps.begin(), input_aps.end(), apname) + == input_aps.end()) + additional_outputs &= right; + if (pos) + add &= bdd_biimp(left, right); + else + add &= bdd_xor(left, right); + } } for (auto& e: tot_strat->edges()) e.cond &= add; set_synthesis_outputs(tot_strat, get_synthesis_outputs(tot_strat) - & bdd_support(add)); + & additional_outputs); } printer.print(tot_strat, timer_printer_dummy); } @@ -1052,6 +1203,10 @@ parse_opt(int key, char *arg, struct argp_state *) case OPT_FROM_PGAME: jobs.emplace_back(arg, job_type::AUT_FILENAME); break; + case OPT_GEQUIV: + opt_gequiv = XARGMATCH("--global-equivalence", arg, + decompose_args, decompose_values); + break; case OPT_HIDE: show_status = false; break; diff --git a/python/spot/impl.i b/python/spot/impl.i index 2408486e6..668ccff89 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -524,6 +524,7 @@ namespace std { %template(vectorint) vector; %template(pair_formula_vectorstring) pair>; %template(atomic_prop_set) set; + %template(vectorofvectorofformulas) vector>; %template(setunsigned) set; %template(relabeling_map) map; } diff --git a/spot/tl/apcollect.cc b/spot/tl/apcollect.cc index 74790f1c4..87d5e5b3e 100644 --- a/spot/tl/apcollect.cc +++ b/spot/tl/apcollect.cc @@ -21,9 +21,13 @@ // along with this program. If not, see . #include "config.h" +#include #include #include +#include #include +#include +#include namespace spot { @@ -64,7 +68,7 @@ namespace spot return res; } - atomic_prop_set collect_litterals(formula f) + atomic_prop_set collect_literals(formula f) { atomic_prop_set res; @@ -131,4 +135,150 @@ namespace spot return res; } + std::vector> + collect_equivalent_literals(formula f) + { + std::map l2s; + // represent the implication graph as a twa_graph so we cab reuse + // scc_info. Literals can be converted to states using the l2s + // map. + twa_graph_ptr igraph = make_twa_graph(make_bdd_dict()); + + auto state_of = [&](formula a) + { + auto [it, b] = l2s.insert({a, 0}); + if (b) + it->second = igraph->new_state(); + return it->second; + }; + + auto implies = [&](formula a, formula b) + { + unsigned pos_a = state_of(a); + unsigned neg_a = state_of(formula::Not(a)); + unsigned pos_b = state_of(b); + unsigned neg_b = state_of(formula::Not(b)); + igraph->new_edge(pos_a, pos_b, bddtrue); + igraph->new_edge(neg_b, neg_a, bddtrue); + }; + + auto collect = [&](formula f, bool in_g, auto self) + { + switch (f.kind()) + { + case op::ff: + case op::tt: + case op::eword: + case op::ap: + case op::UConcat: + case op::Not: + case op::NegClosure: + case op::NegClosureMarked: + case op::U: + case op::R: + case op::W: + case op::M: + case op::EConcat: + case op::EConcatMarked: + case op::X: + case op::F: + case op::Closure: + case op::OrRat: + case op::AndRat: + case op::AndNLM: + case op::Concat: + case op::Fusion: + case op::Star: + case op::FStar: + case op::first_match: + case op::strong_X: + return; + case op::Xor: + if (in_g && f[0].is_literal() && f[1].is_literal()) + { + implies(f[0], formula::Not(f[1])); + implies(formula::Not(f[0]), f[1]); + } + return; + case op::Equiv: + if (in_g && f[0].is_literal() && f[1].is_literal()) + { + implies(f[0], f[1]); + implies(formula::Not(f[0]), formula::Not(f[1])); + } + return; + case op::Implies: + if (in_g && f[0].is_literal() && f[1].is_literal()) + implies(f[0], f[1]); + return; + case op::G: + self(f[0], true, self); + return; + case op::Or: + if (f.size() == 2 && f[0].is_literal() && f[1].is_literal()) + implies(formula::Not(f[0]), f[1]); + return; + case op::And: + for (formula c: f) + self(c, in_g, self); + return; + } + }; + collect(f, false, collect); + + scc_info si(igraph, scc_info_options::PROCESS_UNREACHABLE_STATES); + + // print_hoa(std::cerr, igraph); + + // Build sets of equivalent literals. + unsigned nscc = si.scc_count(); + std::vector> scc(nscc); + for (auto [f, i]: l2s) + scc[si.scc_of(i)].push_back(f); + + // For each set, we will decide if we keep it or not. + std::vector keep(nscc, true); + + for (unsigned i = 0; i < nscc; ++i) + { + if (keep[i] == false) + continue; + // We don't keep trivial SCCs + if (scc[i].size() <= 1) + { + keep[i] = false; + continue; + } + // Each SCC will appear twice. Because if {a,!b,c,!d,!e} are + // equivalent literals, then so are {!a,b,!c,d,e}. We will + // keep the SCC with the fewer negation if we can. + unsigned neg_count = 0; + for (formula f: scc[i]) + { + SPOT_ASSUME(f != nullptr); + neg_count += f.is(op::Not); + } + if (neg_count > scc[i].size()/2) + { + keep[i] = false; + continue; + } + // We will keep the current SCC. Just + // mark the dual one for removal. + keep[si.scc_of(state_of(formula::Not(*scc[i].begin())))] = false; + } + // purge unwanted SCCs + unsigned j = 0; + for (unsigned i = 0; i < nscc; ++i) + { + if (keep[i] == false) + continue; + if (i > j) + scc[j] = std::move(scc[i]); + ++j; + } + scc.resize(j); + return scc; + } + } diff --git a/spot/tl/apcollect.hh b/spot/tl/apcollect.hh index 42788dc9c..7cc8ccb3c 100644 --- a/spot/tl/apcollect.hh +++ b/spot/tl/apcollect.hh @@ -24,6 +24,7 @@ #include #include +#include #include #include @@ -60,14 +61,22 @@ namespace spot atomic_prop_collect_as_bdd(formula f, const twa_ptr& a); - /// \brief Collect the litterals occuring in f + /// \brief Collect the literals occuring in f /// /// This function records each atomic proposition occurring in f /// along with the polarity of its occurrence. For instance if the /// formula is `G(a -> b) & X(!b & c)`, then this will output `{!a, /// b, !b, c}`. SPOT_API - atomic_prop_set collect_litterals(formula f); + atomic_prop_set collect_literals(formula f); + /// \brief Collect equivalent APs + /// + /// Looks for patterns like `...&G(...&(x->y)&...)&...` or + /// other forms of constant implications, then build a graph + /// of implications to compute equivalence classes of literals. + SPOT_API + std::vector> + collect_equivalent_literals(formula f); /// @} } diff --git a/spot/tl/formula.cc b/spot/tl/formula.cc index 370a50e8f..294c8cb5b 100644 --- a/spot/tl/formula.cc +++ b/spot/tl/formula.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2019, 2021, 2022 Laboratoire de Recherche et +// Copyright (C) 2015-2019, 2021, 2022, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -2071,4 +2071,11 @@ namespace spot { return print_psl(os, f); } + + bool + formula_ptr_less_than_bool_first::operator()(const formula& left, + const formula& right) const + { + return operator()(left.ptr_, right.ptr_); + } } diff --git a/spot/tl/formula.hh b/spot/tl/formula.hh index 0c7377e1c..7b7a5c174 100644 --- a/spot/tl/formula.hh +++ b/spot/tl/formula.hh @@ -652,6 +652,8 @@ namespace spot SPOT_API int atomic_prop_cmp(const fnode* f, const fnode* g); + class SPOT_API formula; + struct formula_ptr_less_than_bool_first { bool @@ -718,7 +720,10 @@ namespace spot right->dump(ord); return old.str() < ord.str(); } - }; + + SPOT_API bool + operator()(const formula& left, const formula& right) const; +}; #endif // SWIG @@ -726,6 +731,7 @@ namespace spot /// \brief Main class for temporal logic formula class SPOT_API formula final { + friend struct formula_ptr_less_than_bool_first; const fnode* ptr_; public: /// \brief Create a formula from an fnode. diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index e4ba12444..455400f7d 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -1986,6 +1986,21 @@ namespace for (unsigned i = 0; i < n_outs; ++i) circuit.set_output(i, bdd2var_min(out[i], out_dc[i])); // Add the unused propositions + // + // RM contains assignments like + // out1 := 1 + // out2 := 0 + // out3 := in1 + // out4 := !out3 + // but it is possible that the rhs could refer to a variable + // that is not yet defined because of the ordering. For + // this reason, the first pass will store signals it could not + // complete in the POSTPONE vector. + // + // In that vector, (u,v,b) means that output u should be mapped to + // the same formula as output v, possibly negated (if b). + std::vector> postpone; + const unsigned n_outs_all = output_names_all.size(); for (unsigned i = n_outs; i < n_outs_all; ++i) if (rm) @@ -2003,10 +2018,61 @@ namespace circuit.set_output(i, circuit.aig_false()); continue; } + else + { + formula repr = to->second; + bool neg_repr = false; + if (repr.is(op::Not)) + { + neg_repr = true; + repr = repr[0]; + } + // is repr an input? + if (auto it = std::find(input_names_all.begin(), + input_names_all.end(), + repr.ap_name()); + it != input_names_all.end()) + { + unsigned ivar = + circuit.input_var(it - input_names_all.begin(), + neg_repr); + circuit.set_output(i, ivar); + } + // is repr an output? + else if (auto it = std::find(output_names_all.begin(), + output_names_all.end(), + repr.ap_name()); + it != output_names_all.end()) + { + unsigned outnum = it - output_names_all.begin(); + unsigned outvar = circuit.output(outnum); + if (outvar == -1u) + postpone.emplace_back(i, outnum, neg_repr); + else + circuit.set_output(i, outvar + neg_repr); + } + } } } else circuit.set_output(i, circuit.aig_false()); + unsigned postponed = postpone.size(); + while (postponed) + { + unsigned postponed_again = 0; + for (auto [u, v, b]: postpone) + { + unsigned outvar = circuit.output(v); + if (outvar == -1u) + ++postponed_again; + else + circuit.set_output(u, outvar + b); + } + if (postponed_again >= postponed) + throw std::runtime_error("aiger encoding bug: " + "postponed output shunts not decreasing"); + postponed = postponed_again; + } for (unsigned i = 0; i < n_latches; ++i) circuit.set_next_latch(i, bdd2var_min(latch[i], bddfalse)); return circuit_ptr; diff --git a/spot/twaalgos/aiger.hh b/spot/twaalgos/aiger.hh index 77ef2d827..9f55f3f00 100644 --- a/spot/twaalgos/aiger.hh +++ b/spot/twaalgos/aiger.hh @@ -174,6 +174,15 @@ namespace spot [](unsigned o){return o == -1u; })); return outputs_; } + + /// \brief return the variable associated to output \a num + /// + /// This will be equal to -1U if aig::set_output() hasn't been called. + unsigned output(unsigned num) const + { + return outputs_[num]; + } + /// \brief Get the set of output names const std::vector& output_names() const { diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index cd48cf18e..d6f5815f5 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -227,7 +227,7 @@ sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp cat >exp < G(i1 <-> o0) @@ -638,16 +638,15 @@ grep "one of --ins or --outs" stderr # Try to find a direct strategy for GFa <-> GFb and a direct strategy for # Gc cat >exp < GFb +there are 1 subformulas trying to create strategy directly for GFa <-> GFb tanslating formula done in X seconds direct strategy was found. direct strat has 1 states, 2 edges and 0 colors simplification took X seconds -trying to create strategy directly for G(c <-> d) -direct strategy was found. -direct strat has 1 states, 1 edges and 0 colors -simplification took X seconds EOF ltlsynt -f '(GFa <-> GFb) && (G(c <-> d))' --outs=b,c --verbose 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx @@ -658,14 +657,15 @@ diff outx exp for f in "(GFa <-> GFb) & G(c <-> d)" "(GFb <-> GFa) & G(c <-> d)" \ "G(c <-> d) & (GFa <-> GFb)" "G(c <-> d) & (GFb <-> GFa)" do -cat >exp <exp < out + ltlsynt -f "$f" --outs=b,c --verbose --decompose=0 \ + --global-equiv=no --verify 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp done @@ -673,7 +673,7 @@ done # # Ltlsynt should be able to detect that G(a&c) is not input-complete so it is # # impossible to find a strategy. cat >exp < GFa) & Ga trying to create strategy directly for (GFb <-> GFa) & Ga @@ -763,7 +763,7 @@ sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp cat >exp < outx diff outx exp cat >exp < GFo1 there are 1 subformulas @@ -1038,6 +1038,36 @@ ltlsynt -f "G(o1|o2) & (GFi <-> GFo1)" --outs="o1,o2" --verbose\ sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp + +# Test the loop around polarity/global-equiv +cat >exp < o) & G(o <-> o2) & G(!o | !o3) & GFo3 + o := i + o2 := i +new formula: GFo3 & G(!i | !o3) + i := 1 +new formula: GFo3 & G!o3 +there are 1 subformulas +trying to create strategy directly for GFo3 & G!o3 +direct strategy might exist but was not found. +translating formula done in X seconds +automaton has 1 states and 0 colors +LAR construction done in X seconds +DPA has 1 states, 0 colors +split inputs and outputs done in X seconds +automaton has 3 states +solving game with acceptance: co-Büchi +game solved in X seconds +UNREALIZABLE +EOF +ltlsynt -f 'G(o<->i) & G(o2 <-> o) & G(!o | !o3) & G(r3 -> Fo3)' \ + --ins=i,r3 --verbose 2>out 1>&2 && exit 1 +sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx +diff outx exp + + # Test --dot and --hide-status ltlsynt -f 'i <-> Fo' --ins=i --aiger --dot | grep arrowhead=dot ltlsynt -f 'i <-> Fo' --ins=i --print-game-hoa --dot | grep 'shape="diamond"' From 02f9f0a1c936fcea505c0288e4d8d16942841a39 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 3 Oct 2023 16:25:19 +0200 Subject: [PATCH 347/606] * .gitlab-ci.yml: Activate Raspbian again. --- .gitlab-ci.yml | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 424f7bc21..b0d4f48ed 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -459,25 +459,22 @@ publish-unstable: - curl -X POST -F ref=master -F token=$TRIGGER_SANDBOX https://gitlab.lre.epita.fr/api/v4/projects/181/trigger/pipeline -# The SD card of our Raspberry failed. Disable this job until we -# can make it work again. -# -# raspbian: -# stage: build -# only: -# - branches -# except: -# - /wip/ -# tags: -# - armv7 -# script: -# - autoreconf -vfi -# - ./configure -# - make -# - make distcheck || { chmod -R u+w ./spot-*; false; } -# artifacts: -# when: always -# paths: -# - ./spot-*/_build/sub/tests/*/*.log -# - ./*.log -# - ./*.tar.gz +raspbian: + stage: build + only: + - branches + except: + - /wip/ + tags: + - armv7 + script: + - autoreconf -vfi + - ./configure + - make + - make distcheck || { chmod -R u+w ./spot-*; false; } + artifacts: + when: always + paths: + - ./spot-*/_build/sub/tests/*/*.log + - ./*.log + - ./*.tar.gz From 9e40a32fd1cdaf11b2b80c711fe55077f4bea859 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 4 Oct 2023 17:22:35 +0200 Subject: [PATCH 348/606] * .gitlab-ci.yml: Add a centos7 build. --- .gitlab-ci.yml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b0d4f48ed..c9ed140ff 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -478,3 +478,29 @@ raspbian: - ./spot-*/_build/sub/tests/*/*.log - ./*.log - ./*.tar.gz + +centos7: + stage: build2 + needs: + - job: make-dist + artifacts: true + variables: + GIT_STRATEGY: none + only: + - branches + except: + - /wip/ + image: gitlab-registry.lre.epita.fr/spot/buildenv/centos7 + script: + - VERSION=`cat VERSION` + - tar xvf spot-$VERSION.tar.gz + - mkdir build-$VERSION + - cd build-$VERSION + - scl enable devtoolset-7 "../spot-$VERSION/configure --enable-devel" + - scl enable devtoolset-7 make + - scl enable devtoolset-7 "make distcheck DISTCHECK_CONFIGURE_FLAGS='--enable-devel --enable-optimizations'" + artifacts: + when: on_failure + paths: + - build-*/spot-*/_build/sub/tests/*/*.log + - build-*/*.log From f2d034130a04f01c0a529780bddedd99abcb9852 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 9 Oct 2023 17:53:12 +0200 Subject: [PATCH 349/606] introduce realizability_simplifier to share more of ltlsynt's code * spot/tl/apcollect.hh, spot/tl/apcollect.cc (realizability_simplifier): New class, built from code existing in ltlsynt, so that other tools may use this too. * bin/ltlsynt.cc: Use realizability_simplifier. * spot/twaalgos/aiger.cc, spot/twaalgos/aiger.hh: Adjust to use realizability_simplifier instead of relabeling_map. * NEWS: Mention the new class. --- NEWS | 4 + bin/ltlsynt.cc | 279 +++++------------------------------------ spot/tl/apcollect.cc | 231 ++++++++++++++++++++++++++++++++++ spot/tl/apcollect.hh | 46 +++++++ spot/twaalgos/aiger.cc | 142 ++++++++++----------- spot/twaalgos/aiger.hh | 14 +-- 6 files changed, 386 insertions(+), 330 deletions(-) diff --git a/NEWS b/NEWS index aa16106f3..cc61c1236 100644 --- a/NEWS +++ b/NEWS @@ -100,6 +100,10 @@ New in spot 2.11.6.dev (not yet released) - spot::scc_info has a new option PROCESS_UNREACHABLE_STATES that causes it to enumerated even unreachable SCCs. + - spot::realizability_simplifier is a new class that performs the + removal of superfluous APs that is now performed by ltlsynt + (search for --polarity and --global-equivalence above). + Bugs fixed: - tgba_determinize()'s use_simulation option would cause it to diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 05d190994..0ff44625d 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -286,47 +286,15 @@ namespace static void dispatch_print_hoa(spot::twa_graph_ptr& game, - const std::vector* input_aps = nullptr, - const spot::relabeling_map* rm = nullptr) + const spot::realizability_simplifier* rs = nullptr) { // Add any AP we removed. This is a game, so player moves are // separated. Consequently at this point we cannot deal with // removed signals such as "o1 <-> i2": if the game has to be // printed, we can only optimize for signals such as o1 <-> o2. - if (rm && !rm->empty()) - { - assert(input_aps); - auto& sp = spot::get_state_players(game); + if (rs) + rs->patch_game(game); - bdd add = bddtrue; - for (auto [k, v]: *rm) - { - int i = game->register_ap(k); - // skip inputs - if (std::find(input_aps->begin(), input_aps->end(), - k.ap_name()) != input_aps->end()) - continue; - if (v.is_tt()) - add &= bdd_ithvar(i); - else if (v.is_ff()) - add &= bdd_nithvar(i); - else - { - bdd bv; - if (v.is(spot::op::ap)) - bv = bdd_ithvar(game->register_ap(v.ap_name())); - else // Not Ap - bv = bdd_nithvar(game->register_ap(v[0].ap_name())); - add &= bdd_biimp(bdd_ithvar(i), bv); - } - } - for (auto& e: game->edges()) - if (sp[e.src]) - e.cond &= add; - set_synthesis_outputs(game, - get_synthesis_outputs(game) - & bdd_support(add)); - } if (opt_dot) spot::print_dot(std::cout, game, opt_print_hoa_args); else if (opt_print_pg) @@ -429,168 +397,26 @@ namespace gi->bv->total_time = sw.stop(); }; - // Check if some output propositions are always in positive form, - // or always in negative form. - // In syntcomp, this occurs more frequently for input variables than - // output variable. See issue #529 for some examples. - - spot::relabeling_map rm; - bool first_dap = true; - auto display_ap = [&rm, &first_dap](spot::formula p) - { - if (SPOT_LIKELY(!gi->verbose_stream)) - return; - if (first_dap) - { - *gi->verbose_stream - << "the following signals can be temporarily removed:\n"; - first_dap = false; - } - *gi->verbose_stream << " " << p << " := " << rm[p] <<'\n'; - }; - spot::formula oldf; + // Attempt to remove superfluous atomic propositions + spot::realizability_simplifier* rs = nullptr; if (opt_polarity || opt_gequiv) { - robin_hood::unordered_set ap_inputs; - for (const std::string& ap: input_aps) - ap_inputs.insert(spot::formula::ap(ap)); - - do + unsigned opt = 0; + if (opt_polarity) + opt |= spot::realizability_simplifier::polarity; + if (opt_gequiv) { - bool rm_has_new_terms = false; - oldf = f; - - if (opt_polarity) - { - std::set lits = spot::collect_literals(f); - for (const std::string& ap: output_aps) - { - spot::formula pos = spot::formula::ap(ap); - spot::formula neg = spot::formula::Not(pos); - bool has_pos = lits.find(pos) != lits.end(); - bool has_neg = lits.find(neg) != lits.end(); - if (has_pos ^ has_neg) - { - rm[pos] = - has_pos ? spot::formula::tt() : spot::formula::ff(); - rm_has_new_terms = true; - display_ap(pos); - } - } - for (const std::string& ap: input_aps) - { - spot::formula pos = spot::formula::ap(ap); - spot::formula neg = spot::formula::Not(pos); - bool has_pos = lits.find(pos) != lits.end(); - bool has_neg = lits.find(neg) != lits.end(); - if (has_pos ^ has_neg) - { - rm[pos] = - has_neg ? spot::formula::tt() : spot::formula::ff(); - rm_has_new_terms = true; - display_ap(pos); - } - } - if (rm_has_new_terms) - { - f = spot::relabel_apply(f, &rm); - if (gi->verbose_stream) - *gi->verbose_stream << "new formula: " << f << '\n'; - rm_has_new_terms = false; - } - } - if (opt_gequiv) - { - // check for equivalent terms - spot::formula_ptr_less_than_bool_first cmp; - for (std::vector& equiv: - spot::collect_equivalent_literals(f)) - { - // For each set of equivalent literals, we want to - // pick a representative. That representative - // should be an input if one of the literal is an - // input. (If we have two inputs or more, the - // formula is not realizable.) - spot::formula repr = nullptr; - bool repr_is_input = false; - spot::formula input_seen = nullptr; - for (spot::formula lit: equiv) - { - spot::formula ap = lit; - if (ap.is(spot::op::Not)) - ap = ap[0]; - if (ap_inputs.find(ap) != ap_inputs.end()) - { - if (input_seen) - { - // ouch! we have two equivalent inputs. - // This means the formula is simply - // unrealizable. Make it false for the - // rest of the algorithm. - f = spot::formula::ff(); - goto done; - } - input_seen = lit; - // Normally, we want the input to be the - // representative. However as a special - // case, we ignore the input literal from - // the set if we are asked to print a - // game. Fixing the game to add a i<->o - // equivalence would require more code - // than I care to write. - // - // So if the set was {i,o1,o2}, instead - // of the desirable - // o1 := i - // o2 := i - // we only do - // o2 := o1 - // when printing games. - if (!want_game()) - { - repr_is_input = true; - repr = lit; - } - } - else if (!repr_is_input && (!repr || cmp(ap, repr))) - repr = lit; - } - // now map equivalent each atomic proposition to the - // representative - spot::formula not_repr = spot::formula::Not(repr); - for (spot::formula lit: equiv) - { - // input or representative are not removed - // (we have repr != input_seen either when input_seen - // is nullptr, or if want_game is true) - if (lit == repr || lit == input_seen) - continue; - if (lit.is(spot::op::Not)) - { - spot::formula ap = lit[0]; - rm[ap] = not_repr; - display_ap(ap); - } - else - { - rm[lit] = repr; - display_ap(lit); - } - rm_has_new_terms = true; - } - } - if (rm_has_new_terms) - { - f = spot::relabel_apply(f, &rm); - if (gi->verbose_stream) - *gi->verbose_stream << "new formula: " << f << '\n'; - rm_has_new_terms = false; - } - } + if (want_game()) + opt |= spot::realizability_simplifier::global_equiv_output_only; + else + opt |= spot::realizability_simplifier::global_equiv; } - while (oldf != f); - done: - /* can't have a label followed by closing brace */; + rs = + new spot::realizability_simplifier(original_f, + input_aps, + opt, + gi ? gi->verbose_stream : nullptr); + f = rs->simplified_formula(); } std::vector sub_form; @@ -617,11 +443,18 @@ namespace { sub_form = { f }; sub_outs.resize(1); - for (const std::string& apstr: output_aps) + if (rs) { - spot::formula ap = spot::formula::ap(apstr); - if (rm.find(ap) == rm.end()) - sub_outs[0].insert(ap); + robin_hood::unordered_set removed_outputs; + for (auto [from, from_is_input, to] : rs->get_mapping()) + if (!from_is_input) + removed_outputs.insert(from); + for (const std::string& apstr: output_aps) + { + spot::formula ap = spot::formula::ap(apstr); + if (removed_outputs.find(ap) == removed_outputs.end()) + sub_outs[0].insert(ap); + } } } std::vector> sub_outs_str; @@ -683,7 +516,7 @@ namespace } if (want_game()) { - dispatch_print_hoa(arena, &input_aps, &rm); + dispatch_print_hoa(arena, rs); continue; } if (!spot::solve_game(arena, *gi)) @@ -772,7 +605,7 @@ namespace sw2.start(); saig = spot::mealy_machines_to_aig(mealy_machines, opt_print_aiger, input_aps, - sub_outs_str, &rm); + sub_outs_str, rs); if (gi->bv) { gi->bv->aig_time = sw2.stop(); @@ -804,52 +637,8 @@ namespace for (size_t i = 1; i < mealy_machines.size(); ++i) tot_strat = spot::mealy_product(tot_strat, mealy_machines[i].mealy_like); - if (!rm.empty()) // Add any AP we removed - { - bdd add = bddtrue; - bdd additional_outputs = bddtrue; - for (auto [k, v]: rm) - { - int i = tot_strat->register_ap(k); - // skip inputs (they are don't care) - if (std::find(input_aps.begin(), input_aps.end(), k.ap_name()) - != input_aps.end()) - continue; - if (v.is_tt()) - { - bdd bv = bdd_ithvar(i); - additional_outputs &= bv; - add &= bv; - } - else if (v.is_ff()) - { - additional_outputs &= bdd_ithvar(i); - add &= bdd_nithvar(i); - } - else - { - bdd left = bdd_ithvar(i); // this is necessarily an output - additional_outputs &= left; - bool pos = v.is(spot::op::ap); - const std::string apname = - pos ? v.ap_name() : v[0].ap_name(); - bdd right = bdd_ithvar(tot_strat->register_ap(apname)); - // right might be an input - if (std::find(input_aps.begin(), input_aps.end(), apname) - == input_aps.end()) - additional_outputs &= right; - if (pos) - add &= bdd_biimp(left, right); - else - add &= bdd_xor(left, right); - } - } - for (auto& e: tot_strat->edges()) - e.cond &= add; - set_synthesis_outputs(tot_strat, - get_synthesis_outputs(tot_strat) - & additional_outputs); - } + if (rs) // Add any AP we removed + rs->patch_mealy(tot_strat); printer.print(tot_strat, timer_printer_dummy); } diff --git a/spot/tl/apcollect.cc b/spot/tl/apcollect.cc index 87d5e5b3e..0a2c2d259 100644 --- a/spot/tl/apcollect.cc +++ b/spot/tl/apcollect.cc @@ -28,6 +28,9 @@ #include #include #include +#include +#include +#include namespace spot { @@ -281,4 +284,232 @@ namespace spot return scc; } + realizability_simplifier::realizability_simplifier + (formula f, const std::vector& inputs, + unsigned options, std::ostream* verbose) + { + bool first_mapping = true; + relabeling_map rm; + auto add_to_mapping = [&](formula from, bool from_is_input, formula to) + { + mapping_.emplace_back(from, from_is_input, to); + rm[from] = to; + if (SPOT_LIKELY(!verbose)) + return; + if (first_mapping) + { + *verbose << "the following signals can be temporarily removed:\n"; + first_mapping = false; + } + *verbose << " " << from << " := " << to <<'\n'; + }; + global_equiv_output_only_ = + (options & global_equiv_output_only) == global_equiv_output_only; + + robin_hood::unordered_set ap_inputs; + for (const std::string& ap: inputs) + ap_inputs.insert(spot::formula::ap(ap)); + + formula oldf; + f_ = f; + do + { + bool rm_has_new_terms = false; + oldf = f_; + + if (options & polarity) + { + // Check if some output propositions are always in + // positive form, or always in negative form. In + // syntcomp, this occurs more frequently for input + // variables than output variable. See issue #529 for + // some examples. + std::set lits = spot::collect_literals(f_); + for (const formula& lit: lits) + if (lits.find(spot::formula::Not(lit)) == lits.end()) + { + formula ap = lit; + bool neg = false; + if (lit.is(op::Not)) + { + ap = lit[0]; + neg = true; + } + bool is_input = ap_inputs.find(ap) != ap_inputs.end(); + formula to = (is_input == neg) + ? spot::formula::tt() : spot::formula::ff(); + add_to_mapping(ap, is_input, to); + rm_has_new_terms = true; + } + if (rm_has_new_terms) + { + f_ = spot::relabel_apply(f_, &rm); + if (verbose) + *verbose << "new formula: " << f_ << '\n'; + rm_has_new_terms = false; + } + } + if (options & global_equiv) + { + // check for equivalent terms + spot::formula_ptr_less_than_bool_first cmp; + for (std::vector& equiv: + spot::collect_equivalent_literals(f_)) + { + // For each set of equivalent literals, we want to + // pick a representative. That representative + // should be an input if one of the literal is an + // input. (If we have two inputs or more, the + // formula is not realizable.) + spot::formula repr = nullptr; + bool repr_is_input = false; + spot::formula input_seen = nullptr; + for (spot::formula lit: equiv) + { + spot::formula ap = lit; + if (ap.is(spot::op::Not)) + ap = ap[0]; + if (ap_inputs.find(ap) != ap_inputs.end()) + { + if (input_seen) + { + // ouch! we have two equivalent inputs. + // This means the formula is simply + // unrealizable. Make it false for the + // rest of the algorithm. + f = spot::formula::ff(); + return; + } + input_seen = lit; + // Normally, we want the input to be the + // representative. However as a special + // case, we ignore the input literal from + // the set if we are asked to print a + // game. Fixing the game to add a i<->o + // equivalence would require more code + // than I care to write. + // + // So if the set was {i,o1,o2}, instead + // of the desirable + // o1 := i + // o2 := i + // we only do + // o2 := o1 + // when printing games. + if (!global_equiv_output_only_) + { + repr_is_input = true; + repr = lit; + } + } + else if (!repr_is_input && (!repr || cmp(ap, repr))) + repr = lit; + } + // now map equivalent each atomic proposition to the + // representative + spot::formula not_repr = spot::formula::Not(repr); + for (spot::formula lit: equiv) + { + // input or representative are not removed + // (we have repr != input_seen either when input_seen + // is nullptr, or if want_game is true) + if (lit == repr || lit == input_seen) + continue; + SPOT_ASSUME(lit != nullptr); + if (lit.is(spot::op::Not)) + add_to_mapping(lit[0], repr_is_input, not_repr); + else + add_to_mapping(lit, repr_is_input, repr); + rm_has_new_terms = true; + } + } + if (rm_has_new_terms) + { + f_ = spot::relabel_apply(f_, &rm); + if (verbose) + *verbose << "new formula: " << f_ << '\n'; + rm_has_new_terms = false; + } + } + } + while (oldf != f_); + } + + void realizability_simplifier::patch_mealy(twa_graph_ptr mealy) const + { + bdd add = bddtrue; + bdd additional_outputs = bddtrue; + for (auto [k, k_is_input, v]: mapping_) + { + int i = mealy->register_ap(k); + // skip inputs (they are don't care) + if (k_is_input) + continue; + if (v.is_tt()) + { + bdd bv = bdd_ithvar(i); + additional_outputs &= bv; + add &= bv; + } + else if (v.is_ff()) + { + additional_outputs &= bdd_ithvar(i); + add &= bdd_nithvar(i); + } + else + { + bdd left = bdd_ithvar(i); // this is necessarily an output + additional_outputs &= left; + bool pos = v.is(spot::op::ap); + const std::string apname = + pos ? v.ap_name() : v[0].ap_name(); + bdd right = bdd_ithvar(mealy->register_ap(apname)); + if (pos) + add &= bdd_biimp(left, right); + else + add &= bdd_xor(left, right); + } + } + for (auto& e: mealy->edges()) + e.cond &= add; + set_synthesis_outputs(mealy, + get_synthesis_outputs(mealy) + & additional_outputs); + } + + void realizability_simplifier::patch_game(twa_graph_ptr game) const + { + if (SPOT_UNLIKELY(!global_equiv_output_only_)) + throw std::runtime_error("realizability_simplifier::path_game() requires " + "option global_equiv_output_only"); + + auto& sp = spot::get_state_players(game); + bdd add = bddtrue; + for (auto [k, k_is_input, v]: mapping_) + { + int i = game->register_ap(k); + if (k_is_input) + continue; + if (v.is_tt()) + add &= bdd_ithvar(i); + else if (v.is_ff()) + add &= bdd_nithvar(i); + else + { + bdd bv; + if (v.is(spot::op::ap)) + bv = bdd_ithvar(game->register_ap(v.ap_name())); + else // Not Ap + bv = bdd_nithvar(game->register_ap(v[0].ap_name())); + add &= bdd_biimp(bdd_ithvar(i), bv); + } + } + for (auto& e: game->edges()) + if (sp[e.src]) + e.cond &= add; + set_synthesis_outputs(game, + get_synthesis_outputs(game) + & bdd_support(add)); + } + } diff --git a/spot/tl/apcollect.hh b/spot/tl/apcollect.hh index 7cc8ccb3c..b155de551 100644 --- a/spot/tl/apcollect.hh +++ b/spot/tl/apcollect.hh @@ -78,5 +78,51 @@ namespace spot SPOT_API std::vector> collect_equivalent_literals(formula f); + + + + /// \brief Simplify a reactive specification, preserving realizability + class SPOT_API realizability_simplifier final + { + public: + enum realizability_simplifier_option { + /// \brief remove APs with single polarity + polarity = 0b1, + /// \brief remove equivalent APs + global_equiv = 0b10, + /// \brief likewise, but don't consider equivalent input and output + global_equiv_output_only = 0b110, + }; + + realizability_simplifier(formula f, + const std::vector& inputs, + unsigned options = polarity | global_equiv, + std::ostream* verbose = nullptr); + + /// \brief Return the simplified formula. + formula simplified_formula() const + { + return f_; + } + + /// \brief Returns a vector of (from,from_is_input,to) + const std::vector>& get_mapping() const + { + return mapping_; + } + + /// \brief Patch a Mealy machine to add the missing APs. + void patch_mealy(twa_graph_ptr mealy) const; + + /// \brief Patch a game to add the missing APs. + void patch_game(twa_graph_ptr mealy) const; + + private: + void add_to_mapping(formula from, bool from_is_input, formula to); + std::vector> mapping_; + formula f_; + bool global_equiv_output_only_; + }; + /// @} } diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index 455400f7d..49ac54997 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -1545,7 +1545,7 @@ namespace const char* mode, const std::vector& unused_ins = {}, const std::vector& unused_outs = {}, - const relabeling_map* rm = nullptr) + const realizability_simplifier* rs = nullptr) { // The aiger circuit can currently noly encode separated mealy machines @@ -1620,19 +1620,19 @@ namespace unused_outs.cbegin(), unused_outs.cend()); - if (rm) + if (rs) // If we have removed some APs from the original formula, they // might have dropped out of the output_names list (depending on // how we split the formula), but they should not have dropped // from the input_names list. So let's fix the output_names // lists by adding anything that's not an input and not already // there. - for (auto [k, v]: *rm) + for (auto [k, k_is_input, v]: rs->get_mapping()) { + if (k_is_input) + continue; const std::string s = k.ap_name(); - if (std::find(input_names_all.begin(), input_names_all.end(), s) - == input_names_all.end() - && std::find(output_names_all.begin(), output_names_all.end(), s) + if (std::find(output_names_all.begin(), output_names_all.end(), s) == output_names_all.end()) output_names_all.push_back(s); } @@ -1985,93 +1985,79 @@ namespace // Reset them for (unsigned i = 0; i < n_outs; ++i) circuit.set_output(i, bdd2var_min(out[i], out_dc[i])); - // Add the unused propositions - // - // RM contains assignments like + + // Set unused signal to false by default + const unsigned n_outs_all = output_names_all.size(); + for (unsigned i = n_outs; i < n_outs_all; ++i) + circuit.set_output(i, circuit.aig_false()); + + // RS may contains assignments for unused signals, such as // out1 := 1 // out2 := 0 // out3 := in1 // out4 := !out3 - // but it is possible that the rhs could refer to a variable - // that is not yet defined because of the ordering. For - // this reason, the first pass will store signals it could not - // complete in the POSTPONE vector. - // - // In that vector, (u,v,b) means that output u should be mapped to - // the same formula as output v, possibly negated (if b). - std::vector> postpone; + // But because the formula is simplified in a loop (forcing + // some of those values in the formula reveal more values to + // be forced), it is possible that the rhs refers to a variable + // that is forced later in the mapping. Therefore the mapping + // should be applied in reverse order. + if (rs) + { + auto mapping = rs->get_mapping(); + for (auto it = mapping.rbegin(); it != mapping.rend(); ++it) + { + auto [from, from_is_input, to] = *it; + if (from_is_input) + continue; - const unsigned n_outs_all = output_names_all.size(); - for (unsigned i = n_outs; i < n_outs_all; ++i) - if (rm) - { - if (auto to = rm->find(formula::ap(output_names_all[i])); - to != rm->end()) - { - if (to->second.is_tt()) - { - circuit.set_output(i, circuit.aig_true()); - continue; - } - else if (to->second.is_ff()) - { - circuit.set_output(i, circuit.aig_false()); - continue; - } - else - { - formula repr = to->second; - bool neg_repr = false; + auto j = std::find(output_names_all.begin(), + output_names_all.end(), + from.ap_name()); + assert(j != output_names_all.end()); + int i = j - output_names_all.begin(); + if (to.is_tt()) + { + circuit.set_output(i, circuit.aig_true()); + continue; + } + else if (to.is_ff()) + { + circuit.set_output(i, circuit.aig_false()); + continue; + } + else + { + formula repr = to; + bool neg_repr = false; if (repr.is(op::Not)) { neg_repr = true; repr = repr[0]; } // is repr an input? - if (auto it = std::find(input_names_all.begin(), + if (auto it2 = std::find(input_names_all.begin(), input_names_all.end(), repr.ap_name()); - it != input_names_all.end()) + it2 != input_names_all.end()) { unsigned ivar = - circuit.input_var(it - input_names_all.begin(), + circuit.input_var(it2 - input_names_all.begin(), neg_repr); circuit.set_output(i, ivar); } // is repr an output? - else if (auto it = std::find(output_names_all.begin(), - output_names_all.end(), - repr.ap_name()); - it != output_names_all.end()) + else { - unsigned outnum = it - output_names_all.begin(); + assert(std::find(output_names_all.begin(), + output_names_all.end(), + repr.ap_name()) == + output_names_all.end()); + unsigned outnum = it2 - output_names_all.begin(); unsigned outvar = circuit.output(outnum); - if (outvar == -1u) - postpone.emplace_back(i, outnum, neg_repr); - else - circuit.set_output(i, outvar + neg_repr); + circuit.set_output(i, outvar + neg_repr); } - } - } - } - else - circuit.set_output(i, circuit.aig_false()); - unsigned postponed = postpone.size(); - while (postponed) - { - unsigned postponed_again = 0; - for (auto [u, v, b]: postpone) - { - unsigned outvar = circuit.output(v); - if (outvar == -1u) - ++postponed_again; - else - circuit.set_output(u, outvar + b); + } } - if (postponed_again >= postponed) - throw std::runtime_error("aiger encoding bug: " - "postponed output shunts not decreasing"); - postponed = postponed_again; } for (unsigned i = 0; i < n_latches; ++i) circuit.set_next_latch(i, bdd2var_min(latch[i], bddfalse)); @@ -2106,7 +2092,7 @@ namespace spot mealy_machine_to_aig(const twa_graph_ptr &m, const char *mode, const std::vector& ins, const std::vector& outs, - const relabeling_map* rm) + const realizability_simplifier* rs) { if (!m) throw std::runtime_error("mealy_machine_to_aig(): " @@ -2139,20 +2125,20 @@ namespace spot } // todo Some additional checks? return auts_to_aiger({{m, get_synthesis_outputs(m)}}, mode, - unused_ins, unused_outs, rm); + unused_ins, unused_outs, rs); } aig_ptr mealy_machine_to_aig(mealy_like& m, const char *mode, const std::vector& ins, const std::vector& outs, - const relabeling_map* rm) + const realizability_simplifier* rs) { if (m.success != mealy_like::realizability_code::REALIZABLE_REGULAR) throw std::runtime_error("mealy_machine_to_aig(): " "Can only handle regular mealy machine, yet."); - return mealy_machine_to_aig(m.mealy_like, mode, ins, outs, rm); + return mealy_machine_to_aig(m.mealy_like, mode, ins, outs, rs); } aig_ptr @@ -2212,7 +2198,7 @@ namespace spot const char *mode, const std::vector& ins, const std::vector>& outs, - const relabeling_map* rm) + const realizability_simplifier* rs) { if (m_vec.empty()) throw std::runtime_error("mealy_machines_to_aig(): No strategy given."); @@ -2269,7 +2255,7 @@ namespace spot if (!used_aps.count(ai)) unused_ins.push_back(ai); - return auts_to_aiger(new_vec, mode, unused_ins, unused_outs, rm); + return auts_to_aiger(new_vec, mode, unused_ins, unused_outs, rs); } aig_ptr @@ -2277,7 +2263,7 @@ namespace spot const char* mode, const std::vector& ins, const std::vector>& outs, - const relabeling_map* rm) + const realizability_simplifier* rs) { // todo extend to TGBA and possibly others const unsigned ns = strat_vec.size(); @@ -2311,7 +2297,7 @@ namespace spot "success identifier."); } } - return mealy_machines_to_aig(m_machines, mode, ins, outs_used, rm); + return mealy_machines_to_aig(m_machines, mode, ins, outs_used, rs); } std::ostream & diff --git a/spot/twaalgos/aiger.hh b/spot/twaalgos/aiger.hh index 9f55f3f00..4737a80be 100644 --- a/spot/twaalgos/aiger.hh +++ b/spot/twaalgos/aiger.hh @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include @@ -447,7 +447,7 @@ namespace spot /// synthesis-output is ignored and all properties in \a ins and \a /// outs are guaranteed to appear in the aiger circuit. /// - /// If \a rm is given and is not empty, it can be used to specify how + /// If \a rs is given and is not empty, it can be used to specify how /// unused output should be encoded by mapping them to some constant. ///@{ SPOT_API aig_ptr @@ -456,7 +456,7 @@ namespace spot mealy_machine_to_aig(const twa_graph_ptr& m, const char *mode, const std::vector& ins, const std::vector& outs, - const relabeling_map* rm = nullptr); + const realizability_simplifier* rs = nullptr); SPOT_API aig_ptr mealy_machine_to_aig(const mealy_like& m, const char* mode); @@ -464,7 +464,7 @@ namespace spot mealy_machine_to_aig(mealy_like& m, const char *mode, const std::vector& ins, const std::vector& outs, - const relabeling_map* rm = nullptr); + const realizability_simplifier* rs = nullptr); ///@} /// \ingroup synthesis @@ -481,7 +481,7 @@ namespace spot /// If \a ins and \a outs are used, all properties they list are /// guaranteed to appear in the aiger circuit. /// - /// If \a rm is given and is not empty, it can be used to specify how + /// If \a rs is given and is not empty, it can be used to specify how /// unused output should be encoded by mapping them to some constant. /// @{ SPOT_API aig_ptr @@ -495,13 +495,13 @@ namespace spot const char* mode, const std::vector& ins, const std::vector>& outs, - const relabeling_map* rm = nullptr); + const realizability_simplifier* rs = nullptr); SPOT_API aig_ptr mealy_machines_to_aig(const std::vector& m_vec, const char* mode, const std::vector& ins, const std::vector>& outs, - const relabeling_map* rm = nullptr); + const realizability_simplifier* rs = nullptr); /// @} /// \ingroup twa_io From 1a2746e182af40d12759cd10567aa1e25287ca6a Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 12 Oct 2023 16:05:27 +0200 Subject: [PATCH 350/606] sbacc: ignore false edges and unreachable states * spot/twaalgos/sbacc.cc: Here. --- spot/twaalgos/sbacc.cc | 46 +++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/spot/twaalgos/sbacc.cc b/spot/twaalgos/sbacc.cc index 5f93e0584..b23c95b6d 100644 --- a/spot/twaalgos/sbacc.cc +++ b/spot/twaalgos/sbacc.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2018, 2021 Laboratoire de Recherche et Développement +// Copyright (C) 2015-2018, 2021, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -59,29 +59,31 @@ namespace spot // about a possible uninitialized use later. unsigned true_state_last = unsigned(); for (auto& e: old->edges()) - for (unsigned d: old->univ_dests(e.dst)) - if (si.scc_of(e.src) == si.scc_of(d)) - { - common_in[d] &= e.acc; - common_out[e.src] &= e.acc; - // Any state with an accepting edge labeled by true is a - // "true state". We will merge all true states, and - // ignore other outgoing edges. See issue #276 for an - // example. - if (e.src == e.dst && e.cond == bddtrue - && old->acc().accepting(e.acc)) - { - true_state[d] = true; - true_state_acc = e.acc; - true_state_last = e.src; - } - } + if (SPOT_LIKELY(e.cond != bddfalse && si.reachable_state(e.src))) + for (unsigned d: old->univ_dests(e.dst)) + if (si.scc_of(e.src) == si.scc_of(d)) + { + common_in[d] &= e.acc; + common_out[e.src] &= e.acc; + // Any state with an accepting edge labeled by true is a + // "true state". We will merge all true states, and + // ignore other outgoing edges. See issue #276 for an + // example. + if (e.src == e.dst && e.cond == bddtrue + && old->acc().accepting(e.acc)) + { + true_state[d] = true; + true_state_acc = e.acc; + true_state_last = e.src; + } + } for (unsigned s = 0; s < ns; ++s) common_out[s] |= common_in[s]; for (auto& e: old->edges()) - for (unsigned d: old->univ_dests(e.dst)) - if (si.scc_of(e.src) == si.scc_of(d)) - one_in[d] = e.acc - common_out[e.src]; + if (SPOT_LIKELY(e.cond != bddfalse && si.reachable_state(e.src))) + for (unsigned d: old->univ_dests(e.dst)) + if (si.scc_of(e.src) == si.scc_of(d)) + one_in[d] = e.acc - common_out[e.src]; auto res = make_twa_graph(old->get_dict()); res->copy_ap_of(old); @@ -159,6 +161,8 @@ namespace spot bool maybe_accepting = !si.is_rejecting_scc(scc_src); for (auto& t: old->out(one.first.first)) { + if (SPOT_UNLIKELY(t.cond == bddfalse)) + continue; std::vector dests; for (unsigned d: old->univ_dests(t.dst)) { From 75f3a5f2c5f679221212851816b55762bfc2b6d7 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sun, 15 Oct 2023 00:21:12 +0200 Subject: [PATCH 351/606] Fix warnings with GCC 7 on Centos 7 * spot/twa/twagraph.cc: Mark two variables as unused. * spot/twaalgos/aiger.cc: Avoid spurious nullptr dereference warning, and mark more variable as unused. * spot/twaalgos/forq_contains.cc (word::operator==): Mark as maybe_unused. * bin/ltlsynt.cc, spot/twaalgos/relabel.cc, spot/twaalgos/mealy_machine.cc, spot/twaalgos/synthesis.cc, spot/twaalgos/zlktree.cc: Avoid unused variables warnings. * spot/twaalgos/toparity.cc: Remove uses of std::optional, they were not necessary, and they trigger spurious warnings in GCC 7. Also work around a spurious "potential nullptr deref". * tests/core/twacube.cc: Fix another potential nullptr warning. * spot/twaalgos/simulation.cc: Work a around GCC 6/7 bug. --- bin/ltlsynt.cc | 7 +- spot/twa/twagraph.cc | 2 + spot/twaalgos/aiger.cc | 22 +++-- spot/twaalgos/forq_contains.cc | 1 + spot/twaalgos/mealy_machine.cc | 5 ++ spot/twaalgos/relabel.cc | 8 +- spot/twaalgos/simulation.cc | 12 +++ spot/twaalgos/synthesis.cc | 6 +- spot/twaalgos/toparity.cc | 148 ++++++++++++++------------------- spot/twaalgos/zlktree.cc | 4 +- tests/core/twacube.cc | 7 +- 11 files changed, 115 insertions(+), 107 deletions(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 0ff44625d..fb24a4e6b 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -447,8 +447,11 @@ namespace { robin_hood::unordered_set removed_outputs; for (auto [from, from_is_input, to] : rs->get_mapping()) - if (!from_is_input) - removed_outputs.insert(from); + { + (void) to; + if (!from_is_input) + removed_outputs.insert(from); + } for (const std::string& apstr: output_aps) { spot::formula ap = spot::formula::ap(apstr); diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 50145803d..e7a875680 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -531,7 +531,9 @@ namespace spot }; auto [i1, nsl1, sl1, e1] = e_idx[s1]; + (void) nsl1; auto [i2, nsl2, sl2, e2] = e_idx[s2]; + (void) nsl2; unsigned n_trans = e1 - i1; if ((e2 - i2) != n_trans) diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index 49ac54997..df3d05bdb 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -509,6 +509,7 @@ namespace spot { // Do some check_ups auto& [gates, vardict, _] = ss; + (void)_; trace << "Reapplying sf: " << sf.first << "; " << sf.second << "\nwith " << gates.size() << " additional gates.\n\n"; assert(gates.size() == vardict.size()); @@ -993,6 +994,7 @@ namespace spot { auto [it, ins] = occur_map.try_emplace({term[i], term[j]} , 0); + (void)ins; it->second += 1; } }; @@ -1169,12 +1171,14 @@ namespace spot aig_ptr res_ptr = std::make_shared(in_names__, out_names__, next_latches__.size(), dict); + // For some reason g++-7 thinks res_ptr could be null. + SPOT_ASSUME(res_ptr); aig& circ = *res_ptr; - res_ptr->max_var_ = + circ.max_var_ = (in_names__.size() + next_latches__.size() + gates__.size())*2; - std::swap(res_ptr->next_latches_, next_latches__); - std::swap(res_ptr->outputs_, outputs__); - std::swap(res_ptr->and_gates_, gates__); + std::swap(circ.next_latches_, next_latches__); + std::swap(circ.outputs_, outputs__); + std::swap(circ.and_gates_, gates__); // Create all the bdds/vars // true/false/latches/inputs already exist @@ -1520,7 +1524,10 @@ namespace { // We do not care about an output if it does not appear in the bdd for (auto& [_, dc_v] : out_dc_vec) - dc_v = true; + { + (void) _; + dc_v = true; + } v.clear(); while (b != bddtrue) { @@ -1591,8 +1598,8 @@ namespace if (bdd_implies(all_outputs, b)) // ap is an output AP { output_names.push_back(ap.ap_name()); - auto [it, inserted] = bddvar_to_num.try_emplace(bddvar, - i_out++); + bool inserted = bddvar_to_num.try_emplace(bddvar, + i_out++).second; if (SPOT_UNLIKELY(!inserted)) throw std::runtime_error("Intersecting outputs"); } @@ -1629,6 +1636,7 @@ namespace // there. for (auto [k, k_is_input, v]: rs->get_mapping()) { + (void) v; if (k_is_input) continue; const std::string s = k.ap_name(); diff --git a/spot/twaalgos/forq_contains.cc b/spot/twaalgos/forq_contains.cc index 6292dbe76..1e397106f 100644 --- a/spot/twaalgos/forq_contains.cc +++ b/spot/twaalgos/forq_contains.cc @@ -797,6 +797,7 @@ namespace spot::forq return temp; } + [[maybe_unused]] bool word::operator==(word const& other) const { return symbols == other.symbols; diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index 25bab05a9..fc36a42f1 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -1326,6 +1326,8 @@ namespace if (inserted) trace << "Register oc " << it->first << ", " << it->second << " for state " << s1 << '\n'; +#else + (void)inserted; #endif } // Are two player condition ids states incompatible @@ -1952,6 +1954,7 @@ namespace // construction of the sat-problem latter on depends on it for (auto&& [id, pv] : sim_map) { + (void)id; // We want front (the representative) to be the smallest std::sort(pv.second.begin(), pv.second.end()); bs.emplace_back(std::move(pv.second)); @@ -2562,6 +2565,7 @@ namespace picosat_push(lm.psat_); for (auto& [_, clause] : state_cover_clauses) { + (void)_; // Clause is not nullterminated! clause.push_back(0); picosat_add_lits(lm.psat_, clause.data()); @@ -3278,6 +3282,7 @@ namespace bdd repr = red.minimal_letters_vec[group][idx]; const auto& [_, repr_letters] = red.minimal_letters[group].at(repr); + (void)_; // The class of letters is the first set for (int id : repr_letters) { diff --git a/spot/twaalgos/relabel.cc b/spot/twaalgos/relabel.cc index ba5e4ed14..2ba9e7d52 100644 --- a/spot/twaalgos/relabel.cc +++ b/spot/twaalgos/relabel.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2018, 2020, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2015-2018, 2020, 2022, 2023 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -184,9 +184,7 @@ namespace spot continue; } - auto [_, ins] = - all_cond_id2idx.try_emplace(e.cond.id(), all_cond.size()); - if (ins) + if (all_cond_id2idx.try_emplace(e.cond.id(), all_cond.size()).second) { all_cond.push_back(e.cond); if (all_cond.size() > max_letter) diff --git a/spot/twaalgos/simulation.cc b/spot/twaalgos/simulation.cc index ed53929b3..404a8eebc 100644 --- a/spot/twaalgos/simulation.cc +++ b/spot/twaalgos/simulation.cc @@ -33,6 +33,14 @@ #include #include +// Work around GCC bug 80947 (dominates_edge is causing spurious +// visibility warnings) +#if __GNUC__ <= 7 +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wattributes" +#endif + + // Simulation-based reduction, implemented using bdd-based signatures. // // The signature of a state is a Boolean function (implemented as a @@ -1554,3 +1562,7 @@ namespace spot return reduce_iterated_(aut); } } // End namespace spot. + +#if __GNUC__ <= 7 +#pragma GCC diagnostic pop +#endif diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index aef11d27b..0aff98880 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1393,7 +1393,7 @@ namespace spot std::swap(left_outs, right_outs); } - auto [_, g_outs] = form2props.aps_of(f_g); + std::set g_outs = form2props.aps_of(f_g).second; if (are_intersecting(g_outs, right_outs)) return ret_sol_maybe(); @@ -1480,7 +1480,7 @@ namespace spot auto res = make_twa_graph(dict); bdd output_bdd = bddtrue; - auto [ins_f, _] = form2props.aps_of(f_g); + std::set ins_f = form2props.aps_of(f_g).first; for (auto &out : output_aps) output_bdd &= bdd_ithvar(res->register_ap(out)); @@ -1697,7 +1697,7 @@ namespace // anonymous for subsformula continue; done_ass[i] = true; auto &ass = assumptions_split[i]; - auto [left_aps, right_aps] = form2props.aps_of(ass); + std::set left_aps = form2props.aps_of(ass).first; // If an assumption hasn't any decRelProp, it is considered as // a free assumption. if (!are_intersecting(left_aps, decRelProps_ins)) diff --git a/spot/twaalgos/toparity.cc b/spot/twaalgos/toparity.cc index a82c7d57a..7abec3e15 100644 --- a/spot/twaalgos/toparity.cc +++ b/spot/twaalgos/toparity.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018-2020, 2022 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) 2018-2020, 2022-2023 Laboratoire de Recherche et +// Développement de l'Epita. // // This file is part of Spot, a model checking library. // @@ -31,7 +31,6 @@ #include #include -#include namespace std { @@ -288,6 +287,8 @@ namespace spot was_able_to_color, max_color)) { auto res = make_twa_graph(aut, twa::prop_set::all()); + // GCC 7 warns about potential null pointer dereference. + SPOT_ASSUME(res); auto &res_vector = res->edge_vector(); unsigned rv_size = res_vector.size(); for (unsigned i = 1; i < rv_size; ++i) @@ -796,9 +797,9 @@ namespace spot // Tells if we are constructing a parity max odd bool is_odd_ = false; // min_color used in the automaton + 1 (result of max_set). - std::optional min_color_used_; - std::optional max_color_scc_; - std::optional max_color_used_; + unsigned min_color_used_ = -1U; + unsigned max_color_scc_ = 0; + unsigned max_color_used_ = 0; std::vector state_to_res_; std::vector res_to_aut_; // Map a state of aut_ to every copy of this state. Used by a recursive call @@ -976,7 +977,7 @@ namespace spot edge_cache = nullptr) { // In a parity automaton we just need the maximal value - auto simax = mark.max_set(); + unsigned simax = mark.max_set(); const bool need_cache = edge_cache != nullptr && can_merge_edge; long long key = 0; @@ -999,24 +1000,9 @@ namespace spot assert(res_src != -1U); assert(res_dst != -1U); - // No edge already done in the current scc. - if (!max_color_scc_.has_value()) - max_color_scc_.emplace(simax); - else - max_color_scc_.emplace(std::max(*max_color_scc_, simax)); - - // If it is the first edge of the result - if (!min_color_used_.has_value()) - { - assert(!max_color_used_.has_value()); - max_color_used_.emplace(simax); - min_color_used_.emplace(simax); - } - else - { - min_color_used_.emplace(std::min(*min_color_used_, simax)); - max_color_used_.emplace(std::max(*max_color_used_, simax)); - } + max_color_scc_ = std::max(max_color_scc_, simax); + min_color_used_ = std::min(min_color_used_, simax); + max_color_used_ = std::max(max_color_used_, simax); auto new_edge_num = res_->new_edge(res_src, res_dst, cond, simplified); if (need_cache) @@ -1478,31 +1464,30 @@ namespace spot return; is_odd_ = true; // We can reduce if we don't have an edge without color. - bool can_reduce = (min_color_used_.has_value() && *min_color_used_ != 0); + bool can_reduce = (min_color_used_ != -1U) && (min_color_used_ != 0); int shift; if (can_reduce) - shift = -1 * (*min_color_used_ - (*min_color_used_ % 2) + 1); + shift = - (min_color_used_ | 1); else shift = 1; // If we cannot decrease and we already the the maximum color, we don't // have to try. Constructs a mark_t to avoid to make report_too_many_sets // public. - if (!can_reduce && max_color_used_.value_or(-1) + shift == MAX_ACCSETS) + if (!can_reduce && max_color_used_ + shift >= MAX_ACCSETS) acc_cond::mark_t {SPOT_MAX_ACCSETS}; - if (max_color_used_.has_value()) - *max_color_used_ += shift; - if (min_color_used_.has_value()) - *min_color_used_ += shift; + max_color_used_ += shift; + if (min_color_used_ < -1U) + min_color_used_ += shift; for (auto &e : res_->edges()) - { - auto new_val = e.acc.max_set() - 1 + shift; - if (new_val != -1U) - e.acc = { new_val }; - else - e.acc = {}; - } + { + auto new_val = e.acc.max_set() - 1 + shift; + if (new_val != -1U) + e.acc = { new_val }; + else + e.acc = {}; + } } template @@ -1877,29 +1862,22 @@ namespace spot bool old_pp_gen = opt_.parity_prefix_general; opt_.parity_prefix_general = false; - auto max_scc_color_rec = max_color_scc_; + unsigned max_scc_color_rec = max_color_scc_; for (auto x : sub.split_aut({removed_cols})) - { - x->set_acceptance(new_cond); - process_scc(x, algorithm::PARITY_PREFIX); - if (max_color_scc_.has_value()) { - if (!max_scc_color_rec.has_value()) - max_scc_color_rec.emplace(*max_color_scc_); - else - max_scc_color_rec.emplace( - std::max(*max_scc_color_rec, *max_color_scc_)); + x->set_acceptance(new_cond); + process_scc(x, algorithm::PARITY_PREFIX); + max_scc_color_rec = std::max(max_scc_color_rec, max_color_scc_); } - } opt_.parity_prefix = true; opt_.parity_prefix_general = old_pp_gen; - assert(max_scc_color_rec.has_value()); - auto max_used_is_accepting = ((*max_scc_color_rec - 1) % 2) == is_odd_; + assert(max_scc_color_rec > 0); + bool max_used_is_accepting = ((max_scc_color_rec - 1) % 2) == is_odd_; bool last_prefix_acc = (prefixes.size() % 2) != first_is_accepting; unsigned m = prefixes.size() + (max_used_is_accepting != last_prefix_acc) - + *max_scc_color_rec - 1; + + max_scc_color_rec - 1; auto sub_aut_orig = sub_aut->get_named_prop>("original-states"); assert(sub_aut_orig); @@ -1914,8 +1892,7 @@ namespace spot const unsigned col = m - pos; // As it is a parity prefix we should never get a lower value than // the color recursively produced. - assert(!max_scc_color_rec.has_value() || *max_scc_color_rec == 0 - || col + 1 > *max_scc_color_rec); + assert(col >= max_scc_color_rec); unsigned dst = state_to_res_[(*sub_aut_orig)[e.dst]]; for (auto src : (*state_to_nums_)[(*sub_aut_orig)[e.src]]) if (col != -1U) @@ -2003,7 +1980,7 @@ namespace spot bool old_pp = opt_.parity_prefix; opt_.parity_prefix = false; - auto max_scc_color_rec = max_color_scc_; + unsigned max_scc_color_rec = max_color_scc_; scc_info lower_scc(sub_aut, scc_info_options::TRACK_STATES); scc_info_to_parity sub(lower_scc, keep); state_to_nums_ = @@ -2011,11 +1988,7 @@ namespace spot for (auto x : sub.split_aut(keep)) { process_scc(x, algorithm::PARITY_PREFIX_GENERAL); - if (!max_scc_color_rec.has_value()) - max_scc_color_rec = max_color_scc_; - else if (max_color_scc_.has_value()) - max_scc_color_rec.emplace( - std::max(*max_scc_color_rec, *max_color_scc_)); + max_scc_color_rec = std::max(max_scc_color_rec, max_color_scc_); } // restore options @@ -2043,11 +2016,11 @@ namespace spot const bool min_prefix_accepting = (min_set_prefix % 2) == start_inf; // max_scc_color_rec has a value as the automaton is not parity-type, // so there was a recursive paritisation - assert(max_scc_color_rec.has_value()); - const bool max_rec_accepting = ((*max_scc_color_rec - 1) % 2) == is_odd_; + assert(max_scc_color_rec > 0); + const bool max_rec_accepting = ((max_scc_color_rec - 1) % 2) == is_odd_; const bool same_prio = min_prefix_accepting == max_rec_accepting; const unsigned delta = - min_set_prefix - (*max_scc_color_rec + 1) - !same_prio; + min_set_prefix - (max_scc_color_rec + 1) - !same_prio; auto sub_aut_orig = sub_aut->get_named_prop>("original-states"); @@ -2248,7 +2221,7 @@ namespace spot const algorithm none_algo = algorithm::NONE) { // Init the maximal color produced when processing this SCC. - max_color_scc_.reset(); + max_color_scc_ = 0; // If the sub_automaton is "empty", we don't need to apply an algorithm. if (sub_aut->num_edges() == 0) { @@ -2386,29 +2359,29 @@ namespace spot res_ = make_twa_graph(aut_->get_dict()); res_->copy_ap_of(aut_); const unsigned num_scc = si_.scc_count(); - auto orig_aut = + std::vector* orig_aut = aut_->get_named_prop>("original-states"); - std::optional> orig_st; + std::vector orig_st; if (orig_aut) - { - orig_st.emplace(std::vector{*orig_aut}); - std::const_pointer_cast(aut_) - ->set_named_prop("original-states", nullptr); - } + { + orig_st = std::move(*orig_aut); + std::const_pointer_cast(aut_) + ->set_named_prop("original-states", nullptr); + } auto sccs = si_.split_aut(); for (unsigned scc = 0; scc < num_scc; ++scc) - { - auto sub_automaton = sccs[scc]; - process_scc(sub_automaton); - } + { + auto sub_automaton = sccs[scc]; + process_scc(sub_automaton); + } link_sccs(); // During the execution, to_parity works on its own // original-states and we must combine it with the property original // states of aut_ to propagate the information. - if (orig_st) + if (orig_aut) for (unsigned i = 0; i < orig_->size(); ++i) - (*orig_)[i] = (*orig_aut)[(*orig_)[i]]; + (*orig_)[i] = orig_st[(*orig_)[i]]; res_->set_named_prop("original-states", orig_); if (opt_.pretty_print) res_->set_named_prop("state-names", names_); @@ -2421,16 +2394,17 @@ namespace spot res_->purge_unreachable_states(); // A special case is an automaton without edge. It implies // max_color_used_ has not value so we need to test it. - if (!max_color_used_.has_value()) - { - assert(aut_->num_edges() == 0); - res_->set_acceptance(acc_cond(acc_cond::acc_code::f())); - } + if (max_color_used_ == 0) + { + assert(aut_->num_edges() == 0); + res_->set_acceptance(acc_cond(acc_cond::acc_code::f())); + } else - { - res_->set_acceptance(acc_cond( - acc_cond::acc_code::parity(true, is_odd_, *max_color_used_))); - } + { + res_->set_acceptance(acc_cond(acc_cond::acc_code:: + parity(true, is_odd_, + max_color_used_))); + } if (opt_.datas) { constexpr std::array diff --git a/spot/twaalgos/zlktree.cc b/spot/twaalgos/zlktree.cc index f31c46896..da6a5e208 100644 --- a/spot/twaalgos/zlktree.cc +++ b/spot/twaalgos/zlktree.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021, 2022 Laboratoire de Recherche et Developpement de +// Copyright (C) 2021, 2022, 2023 Laboratoire de Recherche et Developpement de // l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -669,6 +669,7 @@ namespace spot // seen_dup. for (auto& [sz, bv, colors, minstate]: out) { + (void) sz; (void) colors; (void) minstate; seen_src->clear_all(); // local source of the node @@ -735,6 +736,7 @@ namespace spot std::unique_ptr cur(make_bitvect(nstates)); for (const auto& [sz, bv, colors, minstate]: out) { + (void) sz; (void) colors; (void) minstate; cur->clear_all(); diff --git a/tests/core/twacube.cc b/tests/core/twacube.cc index 31eeaed69..1a2cd73a8 100644 --- a/tests/core/twacube.cc +++ b/tests/core/twacube.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2018, 2020 Laboratoire de Recherche et +// Copyright (C) 2015, 2016, 2018, 2020, 2023 Laboratoire de Recherche et // Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -78,7 +78,10 @@ int main() const std::vector& aps = aut->ap(); unsigned int seed = 17; - for (auto it = aut->succ(2); !it->done(); it->next()) + auto it = aut->succ(2); + SPOT_ASSUME(it); // GCC 7 warns about potential nullptr. + for (; !it->done(); it->next()) + for (; !it->done(); it->next()) { auto& t = aut->trans_storage(it, seed); auto& d = aut->trans_data(it, seed); From 9bdc5000138dd4b8cb75435ee5af024a4d84fb04 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 26 Oct 2023 17:58:12 +0200 Subject: [PATCH 352/606] powerset: speedup computation for singleton with single edge * spot/twaalgos/powerset.cc: Here. --- spot/twaalgos/powerset.cc | 64 ++++++++++++++++++++++++++------------- 1 file changed, 43 insertions(+), 21 deletions(-) diff --git a/spot/twaalgos/powerset.cc b/spot/twaalgos/powerset.cc index 326de7c76..c69ffd75d 100644 --- a/spot/twaalgos/powerset.cc +++ b/spot/twaalgos/powerset.cc @@ -192,7 +192,7 @@ namespace spot typedef power_map::power_state power_state; - typedef std::unordered_map power_set; + typedef std::unordered_map power_set; power_set seen; std::vector toclean; @@ -235,11 +235,50 @@ namespace spot // outgoing map auto om = std::unique_ptr(make_bitvect_array(ns, nc)); + // Map a bitvector to a state number, possibly creating the state. + auto to_state = [&](bitvect* dst) { + if (acc_sinks && dst->intersects(*acc_sinks)) + *dst = *acc_sinks; + auto i = seen.find(dst); + if (i != seen.end()) + return i->second; + unsigned dst_num = res->new_state(); + auto dst2 = dst->clone(); + seen[dst2] = dst_num; + toclean.emplace_back(dst2); + auto ps = bv_to_ps(dst); + assert(pm.map_.size() == dst_num); + pm.map_.emplace_back(std::move(ps)); + return dst_num; + }; + auto& graph = aut->get_graph(); for (unsigned src_num = 0; src_num < res->num_states(); ++src_num) { - om->clear_all(); - const power_state& src = pm.states_of(src_num); + unsigned srcsz = src.size(); + if (srcsz == 0) + continue; + om->clear_all(); + // If the meta-state is a singleton {st} with can avoid + // some bitvector work in case st has 0 or 1 edge. + if (srcsz == 1) + { + unsigned st = *src.begin(); + auto& st_storage = graph.state_storage(st); + unsigned e = st_storage.succ; + if (SPOT_UNLIKELY(e == 0U)) // no edge + continue; + if (e == st_storage.succ_tail) // single edge + { + auto& ed_storage = graph.edge_storage(e); + bitvect& bv = om->at(0); + bv.set(ed_storage.dst); + res->new_edge(src_num, to_state(&bv), ed_storage.cond); + // Don't bother with the aborter here, as this path is + // clearly not exploding. + continue; + } + } for (auto s: src) { size_t base = index(s) * nc; @@ -251,24 +290,7 @@ namespace spot auto dst = &om->at(c); if (dst->is_fully_clear()) continue; - if (acc_sinks && dst->intersects(*acc_sinks)) - *dst = *acc_sinks; - auto i = seen.find(dst); - unsigned dst_num; - if (i != seen.end()) - { - dst_num = i->second; - } - else - { - dst_num = res->new_state(); - auto dst2 = dst->clone(); - seen[dst2] = dst_num; - toclean.emplace_back(dst2); - auto ps = bv_to_ps(dst); - assert(pm.map_.size() == dst_num); - pm.map_.emplace_back(std::move(ps)); - } + unsigned dst_num = to_state(dst); res->new_edge(src_num, dst_num, num2bdd[c]); if (aborter && aborter->too_large(res)) { From 127cb89cad89d324ffc9aed15a738688ff8c71e3 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Wed, 8 Nov 2023 09:57:16 +0100 Subject: [PATCH 353/606] Remove binary integer literals Remove this notation because Swig only supports it since version 4.0.0, whereas Spot requires a version greater than or equal to 3.0.2. * spot/tl/apcollect.hh: Here. --- spot/tl/apcollect.hh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/spot/tl/apcollect.hh b/spot/tl/apcollect.hh index b155de551..42424b4ac 100644 --- a/spot/tl/apcollect.hh +++ b/spot/tl/apcollect.hh @@ -87,11 +87,11 @@ namespace spot public: enum realizability_simplifier_option { /// \brief remove APs with single polarity - polarity = 0b1, + polarity = 1, /// \brief remove equivalent APs - global_equiv = 0b10, + global_equiv = 2, /// \brief likewise, but don't consider equivalent input and output - global_equiv_output_only = 0b110, + global_equiv_output_only = 6, }; realizability_simplifier(formula f, From ac0503526752a8a3351cf3cea49e1a3906543cbc Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 7 Nov 2023 17:17:58 +0100 Subject: [PATCH 354/606] product_susp: fix handling of unsatisfiable/universal acceptances MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Part of issue #546 reported by Rüdiger Ehlers * spot/twaalgos/product.cc (product_susp): Fix detection and handling of unsatisfiable/universal acceptances. * tests/python/_product_susp.ipynb: Add test cases. --- NEWS | 4 + spot/twaalgos/product.cc | 49 +- tests/python/_product_susp.ipynb | 1269 +++++++++++++++++++++++++++++- 3 files changed, 1265 insertions(+), 57 deletions(-) diff --git a/NEWS b/NEWS index cc61c1236..df7bf8dd3 100644 --- a/NEWS +++ b/NEWS @@ -112,6 +112,10 @@ New in spot 2.11.6.dev (not yet released) (Issue #541.) This has been fixed by disabled the use_simulation optimization in this case. + - product_or_susp() and product_susp() would behave incorrectly in + presence of unsatisifable or universal acceptance conditions that + were not f or t. (Part of issue #546.) + New in spot 2.11.6 (2023-08-01) Bug fixes: diff --git a/spot/twaalgos/product.cc b/spot/twaalgos/product.cc index 243f3768c..b337a5a43 100644 --- a/spot/twaalgos/product.cc +++ b/spot/twaalgos/product.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2020, 2022 Laboratoire de Recherche et Développement +// Copyright (C) 2014-2020, 2022, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -607,33 +607,36 @@ namespace spot throw std::runtime_error("product_susp(): left and right automata " "should share their bdd_dict"); - auto false_or_left = [&] (bool ff) - { - if (ff) - { - auto res = make_twa_graph(left->get_dict()); - res->new_state(); - res->prop_terminal(true); - res->prop_stutter_invariant(true); - res->prop_universal(true); - res->prop_complete(false); - return res; - } - return make_twa_graph(left, twa::prop_set::all()); - }; + auto const_automaton = [&left] (bool is_true) + { + auto res = make_twa_graph(left->get_dict()); + res->new_state(); + res->prop_terminal(true); + res->prop_stutter_invariant(true); + res->prop_universal(true); + res->prop_complete(is_true); + if (is_true) + res->new_edge(0, 0, bddtrue); + return res; + }; // We assume RIGHT is suspendable, but we want to deal with some // trivial true/false cases so we can later assume right has // more than one acceptance set. // Note: suspendable with "t" acceptance = universal language. - if (SPOT_UNLIKELY(right->num_sets() == 0)) + if (SPOT_UNLIKELY(right->num_sets() == 0 + || right->num_edges() == 0 + || right->acc().is_t() + || right->acc().is_f())) { - if (and_acc) - return false_or_left(right->is_empty()); - else if (right->is_empty()) // left OR false = left + trivial: + if (and_acc && right->is_empty()) + return const_automaton(false); + else if (!and_acc && !right->is_empty()) + // suspendable with "t" acceptance = universal language. + return const_automaton(true); + else // left AND true = left; left OR false = left return make_twa_graph(left, twa::prop_set::all()); - else // left OR true = true - return make_twa_graph(right, twa::prop_set::all()); } auto res = make_twa_graph(left->get_dict()); @@ -644,8 +647,8 @@ namespace spot res->prop_state_acc(left->prop_state_acc() && right->prop_state_acc()); auto rightunsatmark = right->acc().unsat_mark(); - if (SPOT_UNLIKELY(!rightunsatmark.first)) - return false_or_left(and_acc); + if (SPOT_UNLIKELY(!rightunsatmark.first)) // right is universal + goto trivial; acc_cond::mark_t rejmark = rightunsatmark.second; if (leftweak) diff --git a/tests/python/_product_susp.ipynb b/tests/python/_product_susp.ipynb index a0de91f95..cba4d3bf6 100644 --- a/tests/python/_product_susp.ipynb +++ b/tests/python/_product_susp.ipynb @@ -607,13 +607,15 @@ "source": [ "left = spot.translate('(Ga | Gb | Gc)')\n", "right = spot.translate('GFa')\n", - "test(left, right)\n" + "test(left, right)" ] }, { "cell_type": "code", "execution_count": 3, - "metadata": {}, + "metadata": { + "scrolled": false + }, "outputs": [ { "data": { @@ -624,11 +626,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1129,11 +1131,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1717,11 +1719,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1932,11 +1934,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -2719,7 +2721,8 @@ } ], "source": [ - "test(spot.translate('(Ga | GF!a)', 'det', 'gen'), spot.translate('GF(a <-> !Xa)'))" + "a = spot.translate('(Ga | GF!a)', 'det', 'gen')\n", + "test(a, spot.translate('GF(a <-> !Xa)'))" ] }, { @@ -3089,13 +3092,1218 @@ } ], "source": [ - "test(spot.translate('(Ga | GF!a)'), spot.translate('true', 'monitor'))" + "a = spot.translate('(Ga | GF!a)')\n", + "tt = spot.translate('true', 'monitor')\n", + "test(a, tt)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[Fin-less 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[Fin-less 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "tt.set_acceptance(spot.acc_cond(1, \"t\")) # issue 546\n", + "test(a, tt)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")\n", + "[Streett-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")\n", + "[Streett-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "tt.set_acceptance(spot.acc_cond(1, \"Fin(1) | Inf(1)\")) # issue 546\n", + "test(a, tt)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")) & Fin(\n", + "\n", + ")\n", + "[Streett-like 3]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "tt.set_acceptance(spot.acc_cond(1, \"Fin(1) & Inf(1)\"))\n", + "test(a, tt)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "scrolled": false + }, "outputs": [ { "data": { @@ -3458,20 +4666,13 @@ } ], "source": [ - "test(spot.translate('(Ga | GF!a)'), spot.translate('false'))" + "test(a, spot.translate('false'))" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -3485,7 +4686,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.1+" + "version": "3.11.6" } }, "nbformat": 4, From 67b5d2aa9a4cde950e7d78ef8126b9e59dd094c8 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 9 Nov 2023 22:13:05 +0100 Subject: [PATCH 355/606] fix several algorithms that incorrectly preserved !weak This massive set of changes was triggered by issue #546. In addition to the better handling of !weak, this also adds some weak properties in a few places. * spot/twaalgos/product.cc (product_aux): Throw some exception if an automaton with t or f acceptance has the !weak property. This is a cheap sanity check to help detect algorithms that incorrectly assumed !weak input would necessarily become !weak output. * spot/twaalgos/hoa.cc (print_hoa): Likewise, also do not assume that terminal implies very-weak. * spot/parseaut/parseaut.yy: Add several diagnostics for similar cases. E.g., a one-state automaton cannot be declared as !very-weak. * tests/core/parseaut.test: Check those new diagnostics. * spot/twa/twa.cc (twa::intersecting_run): Temporary remove the weak property by setting it to maybe, not to false. * spot/twaalgos/minimize.cc, spot/twaalgos/parity.cc, spot/twaalgos/sccfilter.cc, spot/twaalgos/simulation.cc: Account for the fact that these algorithm may in fact improve the weakness. * spot/twaalgos/strength.cc: Only look at colors used by the acceptance condition when deciding weakness. * spot/twaalgos/synthesis.cc: Declare the strategy as weak. * bin/randaut.cc: Add weak to automata with t/f acceptance. * spot/kripke/kripke.hh: Make kripke structures as weak. * tests/core/acc_word.test, tests/core/alternating.test, tests/core/complement.test, tests/core/complete.test, tests/core/ltlsynt.test, tests/core/randomize.test, tests/core/readsave.test, tests/core/remfin.test, tests/core/sccsimpl.test, tests/core/strength.test, tests/core/wdba2.test, tests/ltsmin/kripke.test, tests/python/automata-io.ipynb, tests/python/automata.ipynb, tests/python/dbranch.py, tests/python/highlighting.ipynb, tests/python/kripke.py, tests/python/ltsmin-dve.ipynb, tests/python/mealy.py, tests/python/simstate.py: Adjust all these test cases. * NEWS: Mention the fixes. --- NEWS | 12 ++ bin/randaut.cc | 2 + spot/kripke/kripke.hh | 5 +- spot/parseaut/parseaut.yy | 24 ++++ spot/twa/twa.cc | 4 +- spot/twaalgos/hoa.cc | 9 +- spot/twaalgos/minimize.cc | 10 ++ spot/twaalgos/parity.cc | 9 +- spot/twaalgos/product.cc | 16 +++ spot/twaalgos/sccfilter.cc | 21 +++- spot/twaalgos/simulation.cc | 10 +- spot/twaalgos/strength.cc | 10 +- spot/twaalgos/synthesis.cc | 4 + tests/core/acc_word.test | 4 +- tests/core/alternating.test | 2 +- tests/core/complement.test | 10 +- tests/core/complete.test | 6 +- tests/core/ltlsynt.test | 2 +- tests/core/parseaut.test | 23 +++- tests/core/randomize.test | 6 +- tests/core/readsave.test | 10 +- tests/core/remfin.test | 6 +- tests/core/sccsimpl.test | 6 +- tests/core/strength.test | 6 +- tests/core/wdba2.test | 3 +- tests/ltsmin/kripke.test | 6 +- tests/python/automata-io.ipynb | 4 +- tests/python/automata.ipynb | 187 ++++++++++++++++---------------- tests/python/dbranch.py | 2 +- tests/python/highlighting.ipynb | 4 +- tests/python/kripke.py | 10 +- tests/python/ltsmin-dve.ipynb | 2 +- tests/python/mealy.py | 8 +- tests/python/simstate.py | 8 +- 34 files changed, 287 insertions(+), 164 deletions(-) diff --git a/NEWS b/NEWS index df7bf8dd3..928adf584 100644 --- a/NEWS +++ b/NEWS @@ -116,6 +116,18 @@ New in spot 2.11.6.dev (not yet released) presence of unsatisifable or universal acceptance conditions that were not f or t. (Part of issue #546.) + - Several algorithms were incorrectly dealing with the "!weak" + property. Because they (rightly) assumed that a weak input would + produce a weak output, they also wrongly assumed that a non-weak + output would produce a non-weak output. Unfortunately removing + states or edges in a non-weak automaton can lead a weak automaton. + The incorrect property lead to issue #546. In addition to fixing + several algorithms, product() and print_hoa() will now raise an + exception if automaton with t or f acceptance is declared !weak. + (This cheap check will not catch all automata incorrectly labeled + by !weak, but helps detects some issues nonetheless.) + + New in spot 2.11.6 (2023-08-01) Bug fixes: diff --git a/bin/randaut.cc b/bin/randaut.cc index 4e2065c2a..ec1a06a88 100644 --- a/bin/randaut.cc +++ b/bin/randaut.cc @@ -385,6 +385,8 @@ main(int argc, char** argv) if (opt_acceptance) aut->set_acceptance(accs, code); + if (aut->acc().is_t() || aut->acc().is_f()) + aut->prop_weak(true); if (opt_uniq) { diff --git a/spot/kripke/kripke.hh b/spot/kripke/kripke.hh index 8b039f37c..ed540ac41 100644 --- a/spot/kripke/kripke.hh +++ b/spot/kripke/kripke.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2010, 2013, 2014, 2016, 2017, 2019, 2020 Laboratoire -// de Recherche et Developpement de l'Epita +// Copyright (C) 2009, 2010, 2013, 2014, 2016, 2017, 2019, 2020, 2023 +// Laboratoire de Recherche et Developpement de l'Epita // // This file is part of Spot, a model checking library. // @@ -180,6 +180,7 @@ namespace spot kripke(const bdd_dict_ptr& d) : fair_kripke(d) { + prop_weak(true); } virtual ~kripke(); diff --git a/spot/parseaut/parseaut.yy b/spot/parseaut/parseaut.yy index 5b8792e96..c52beb1e3 100644 --- a/spot/parseaut/parseaut.yy +++ b/spot/parseaut/parseaut.yy @@ -691,6 +691,19 @@ header: format-version header-items } if (t != e) a->prop_terminal(t->second.val); + if (a->acc().is_t() || a->acc().is_f()) + { + if (w != e && !w->second.val) + error(w->second.loc, "an automaton with this condition" + " is necessarily weak"); + if (iw != e && !iw->second.val) + error(iw->second.loc, "an automaton with this condition" + " is necessarily inherently-weak"); + if (vw != e && !vw->second.val + && (res.states == 0 || res.states == 1)) + error(vw->second.loc, "an automaton with 0 or 1 state " + "is necessarily very-weak"); + } auto u = p.find("unambiguous"); if (u != e) { @@ -2743,6 +2756,17 @@ static void fix_properties(result_& r) if (r.acc_style == State_Acc || (r.acc_style == Mixed_Acc && !r.trans_acc_seen)) r.aut_or_ks->prop_state_acc(true); + if (r.aut_or_ks->acc().is_t() || r.aut_or_ks->acc().is_f()) + { + r.aut_or_ks->prop_weak(true); + unsigned ns; + if (r.opts.want_kripke) + ns = r.h->ks->num_states(); + else + ns = r.h->aut->num_states(); + if (ns == 0 || ns == 1) + r.aut_or_ks->prop_very_weak(true); + } } static void check_version(const result_& r) diff --git a/spot/twa/twa.cc b/spot/twa/twa.cc index c1c113e87..6ada4b6f4 100644 --- a/spot/twa/twa.cc +++ b/spot/twa/twa.cc @@ -171,8 +171,8 @@ namespace spot // for weak automata, otherwise project(g1) would be unable to // compute the correct marks. See issue #471. It's OK to // optimize the right part if g2 is weak. - spot::trival g1weak = g1->prop_weak(); - std::const_pointer_cast(g1)->prop_weak(false); + trival g1weak = g1->prop_weak(); + std::const_pointer_cast(g1)->prop_weak(trival::maybe()); auto run = generic_accepting_run(product(g1, g2)); std::const_pointer_cast(g1)->prop_weak(g1weak); if (!run) diff --git a/spot/twaalgos/hoa.cc b/spot/twaalgos/hoa.cc index e6147afda..c8e7b13b9 100644 --- a/spot/twaalgos/hoa.cc +++ b/spot/twaalgos/hoa.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2022 Laboratoire de Recherche et +// Copyright (C) 2014-2023 Laboratoire de Recherche et // Developpement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -507,6 +507,11 @@ namespace spot } } + if (SPOT_UNLIKELY(aut->prop_weak().is_false() + && (aut->acc().is_t() || aut->acc().is_f()))) + throw std::runtime_error("print_hoa(): automaton is declared not weak, " + "but the acceptance makes this impossible"); + metadata md(aut, implicit_labels, state_labels); if (acceptance == Hoa_Acceptance_States && !md.has_state_acc) @@ -724,7 +729,7 @@ namespace spot } if (aut->prop_terminal()) prop(" terminal"); - if (aut->prop_very_weak() && (verbose || aut->prop_terminal() != true)) + if (aut->prop_very_weak()) prop(" very-weak"); if (aut->prop_weak() && (verbose || (aut->prop_terminal() != true && aut->prop_very_weak() != true))) diff --git a/spot/twaalgos/minimize.cc b/spot/twaalgos/minimize.cc index 1ac961d46..889028e57 100644 --- a/spot/twaalgos/minimize.cc +++ b/spot/twaalgos/minimize.cc @@ -556,6 +556,16 @@ namespace spot // add a quick check inside minimize_dfa. if (a->prop_terminal()) res->prop_terminal(true); + else if (a->num_states() == 1) + { + // If thie automaton has only one state, check w + for (auto& e: a->out(0)) + if (e.cond == bddtrue && a->acc().accepting(e.acc)) + { + res->prop_terminal(true); + break; + } + } return res; } diff --git a/spot/twaalgos/parity.cc b/spot/twaalgos/parity.cc index c8507ac53..98ac010fe 100644 --- a/spot/twaalgos/parity.cc +++ b/spot/twaalgos/parity.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2018, 2019, 2022 Laboratoire de Recherche et +// Copyright (C) 2016, 2018, 2019, 2022, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -649,6 +649,13 @@ namespace spot e.acc = acc_cond::mark_t({n}); } + // Reducing the number of colors could turn a non-weak automaton + // into a weak one + if (aut->prop_weak().is_false()) + aut->prop_weak(trival::maybe()); + if (aut->prop_very_weak().is_false()) + aut->prop_very_weak(trival::maybe()); + return aut; } } diff --git a/spot/twaalgos/product.cc b/spot/twaalgos/product.cc index b337a5a43..dc12b34f8 100644 --- a/spot/twaalgos/product.cc +++ b/spot/twaalgos/product.cc @@ -27,6 +27,8 @@ #include #include +using namespace std::string_literals; + namespace spot { namespace @@ -102,6 +104,13 @@ namespace spot enum acc_op { and_acc, or_acc, xor_acc, xnor_acc }; + [[noreturn]] static + void report_should_be_weak(const char* what) + { + std::string s = what + " automaton is declared not weak, " + "but the acceptance makes this impossible"s; + throw std::runtime_error(s); + } static twa_graph_ptr product_aux(const const_twa_graph_ptr& left, @@ -128,6 +137,13 @@ namespace spot bool leftweak = left->prop_weak().is_true(); bool rightweak = right->prop_weak().is_true(); + if (SPOT_UNLIKELY(!leftweak && left->prop_weak().is_false() + && (lacc.is_t() || lacc.is_f()))) + report_should_be_weak("product: left"); + if (SPOT_UNLIKELY(!rightweak && right->prop_weak().is_false() + && (racc.is_t() || racc.is_f()))) + report_should_be_weak("product: right"); + // The conjunction of two co-Büchi automata is a co-Büchi automaton. // The disjunction of two Büchi automata is a Büchi automaton. // diff --git a/spot/twaalgos/sccfilter.cc b/spot/twaalgos/sccfilter.cc index 65e022453..20dea3f7e 100644 --- a/spot/twaalgos/sccfilter.cc +++ b/spot/twaalgos/sccfilter.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2018 Laboratoire de Recherche et Développement +// Copyright (C) 2009-2018, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -428,6 +428,15 @@ namespace spot res = scc_filter_apply>>(aut, given_si); res->prop_copy(aut, { true, true, false, true, false, true }); + if (res->num_edges() != aut->num_edges()) + { + if (res->prop_weak().is_false()) + res->prop_weak(trival::maybe()); + if (res->prop_very_weak().is_false()) + res->prop_very_weak(trival::maybe()); + } + if (res->prop_weak().is_true() && res->num_states() <= 1) + res->prop_very_weak(true); return res; } @@ -496,6 +505,16 @@ namespace spot if (!given_si) delete si; } + else + if (res->num_edges() != aut->num_edges()) + { + if (res->prop_weak().is_false()) + res->prop_weak(trival::maybe()); + if (res->prop_very_weak().is_false()) + res->prop_very_weak(trival::maybe()); + } + if (res->prop_weak().is_true() && res->num_states() <= 1) + res->prop_very_weak(true); return res; } diff --git a/spot/twaalgos/simulation.cc b/spot/twaalgos/simulation.cc index 404a8eebc..aa3ed2a15 100644 --- a/spot/twaalgos/simulation.cc +++ b/spot/twaalgos/simulation.cc @@ -761,11 +761,19 @@ namespace spot delete gb; res->prop_copy(original_, { false, // state-based acc forced below - true, // weakness preserved, + true, // weakness preserved false, true, // determinism improved true, // completeness preserved true, // stutter inv. }); + + // weakness can actually be improved + if (res->prop_weak().is_false()) + res->prop_weak(trival::maybe()); + if (res->prop_very_weak().is_false()) + res->prop_very_weak(trival::maybe()); + if (res->prop_inherently_weak().is_false()) + res->prop_inherently_weak(trival::maybe()); // !unambiguous and !semi-deterministic are not preserved if (!Cosimulation && nb_minato == nb_minterms) // Note that nb_minato != nb_minterms does not imply diff --git a/spot/twaalgos/strength.cc b/spot/twaalgos/strength.cc index 1ef93a7c2..8140b1b0a 100644 --- a/spot/twaalgos/strength.cc +++ b/spot/twaalgos/strength.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010-2011, 2013-2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE) +// Copyright (C) 2010-2011, 2013-2018, 2023 Laboratoire de Recherche +// et Développement de l'Epita (LRDE) // // This file is part of Spot, a model checking library. // @@ -43,6 +43,8 @@ namespace spot if (inweak) si->determine_unknown_acceptance(); + acc_cond::mark_t mask = aut->get_acceptance().used_sets(); + bool is_inweak = true; bool is_weak = true; bool is_single_state_scc = true; @@ -66,9 +68,9 @@ namespace spot if (first) { first = false; - m = t.acc; + m = t.acc & mask; } - else if (m != t.acc) + else if (m != (t.acc & mask)) { is_weak = false; if (!inweak) diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 0aff98880..11a154da3 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1469,6 +1469,9 @@ namespace spot edge.acc = {}; } res->set_acceptance(acc_cond::acc_code::t()); + res->prop_weak(true); + if (res->prop_terminal().is_false()) + res->prop_terminal(trival::maybe()); res->set_named_prop("synthesis-outputs", new bdd(output_bdd)); return ret_sol_exists(res); @@ -1478,6 +1481,7 @@ namespace spot if (!want_strategy) return ret_sol_exists(nullptr); auto res = make_twa_graph(dict); + res->prop_weak(true); bdd output_bdd = bddtrue; std::set ins_f = form2props.aps_of(f_g).first; diff --git a/tests/core/acc_word.test b/tests/core/acc_word.test index 5f3b6880b..4fc5a5602 100644 --- a/tests/core/acc_word.test +++ b/tests/core/acc_word.test @@ -79,7 +79,7 @@ AP: 2 "b" "a" acc-name: Buchi Acceptance: 1 Inf(0) properties: trans-labels explicit-labels state-acc !complete -properties: deterministic stutter-invariant terminal +properties: deterministic stutter-invariant terminal very-weak spot.highlight.edges: 1 1 2 1 --BODY-- State: 0 {0} @@ -113,7 +113,7 @@ AP: 2 "a" "b" acc-name: Buchi Acceptance: 1 Inf(0) properties: trans-labels explicit-labels state-acc complete -properties: deterministic stutter-invariant terminal +properties: deterministic stutter-invariant terminal very-weak spot.highlight.edges: 1 3 2 3 5 3 6 3 7 2 8 2 --BODY-- State: 0 {0} diff --git a/tests/core/alternating.test b/tests/core/alternating.test index df4e47624..6706eddc8 100755 --- a/tests/core/alternating.test +++ b/tests/core/alternating.test @@ -472,7 +472,7 @@ Start: 0 AP: 1 "a" acc-name: all Acceptance: 0 t -properties: trans-labels explicit-labels state-acc deterministic +properties: trans-labels explicit-labels state-acc deterministic weak --BODY-- State: 0 [!0] 1 diff --git a/tests/core/complement.test b/tests/core/complement.test index eb3902d28..ebaafa1c2 100755 --- a/tests/core/complement.test +++ b/tests/core/complement.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015-2019, 2021 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) 2015-2019, 2021, 2023 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -41,7 +41,7 @@ AP: 0 acc-name: all Acceptance: 0 t properties: trans-labels explicit-labels state-acc complete -properties: deterministic +properties: deterministic very-weak --BODY-- State: 0 [t] 0 @@ -74,7 +74,7 @@ AP: 1 "a" acc-name: co-Buchi Acceptance: 1 Fin(0) properties: trans-labels explicit-labels state-acc complete -properties: deterministic weak +properties: deterministic very-weak --BODY-- State: 0 [0] 2 @@ -93,7 +93,7 @@ AP: 1 "a" acc-name: co-Buchi Acceptance: 1 Fin(0) properties: trans-labels explicit-labels state-acc complete -properties: deterministic stutter-invariant weak +properties: deterministic stutter-invariant very-weak --BODY-- State: 0 {0} [t] 0 diff --git a/tests/core/complete.test b/tests/core/complete.test index 7f557f67c..1d1be12bc 100755 --- a/tests/core/complete.test +++ b/tests/core/complete.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015-2017, 2022 Laboratoire de Recherche et Développement +# Copyright (C) 2015-2017, 2022, 2023 Laboratoire de Recherche et Développement # de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -151,7 +151,7 @@ AP: 1 "a" acc-name: Buchi Acceptance: 1 Inf(0) properties: trans-labels explicit-labels state-acc complete -properties: deterministic +properties: deterministic weak --BODY-- State: 0 {0} [0] 1 @@ -169,7 +169,7 @@ AP: 1 "a" acc-name: none Acceptance: 0 f properties: trans-labels explicit-labels state-acc complete -properties: deterministic +properties: deterministic weak --BODY-- State: 0 [t] 1 diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index d6f5815f5..f11e1dbd1 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -611,7 +611,7 @@ Start: 0 AP: 3 "c" "a" "b" acc-name: all Acceptance: 0 t -properties: trans-labels explicit-labels state-acc deterministic +properties: trans-labels explicit-labels state-acc deterministic weak controllable-AP: 0 --BODY-- State: 0 diff --git a/tests/core/parseaut.test b/tests/core/parseaut.test index 7dabd563d..52748b07c 100755 --- a/tests/core/parseaut.test +++ b/tests/core/parseaut.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014-2018, 2020-2022 Laboratoire de Recherche et +# Copyright (C) 2014-2018, 2020-2023 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -664,7 +664,7 @@ Start: 0 AP: 1 "a" acc-name: all Acceptance: 0 t -properties: trans-labels explicit-labels state-acc deterministic +properties: trans-labels explicit-labels state-acc deterministic weak --BODY-- State: 0 [0] 1 @@ -1322,7 +1322,7 @@ AP: 2 "a" "b" acc-name: all Acceptance: 0 t properties: trans-labels explicit-labels state-acc complete -properties: deterministic +properties: deterministic weak --BODY-- State: 0 [0&!1] 0 @@ -1778,7 +1778,7 @@ Start: 0 AP: 1 "a" Acceptance: 1 t properties: trans-labels explicit-labels trans-acc complete -properties: deterministic +properties: deterministic weak --BODY-- State: 0 "F(a)" [0] 1 {0} @@ -1844,7 +1844,7 @@ AP: 1 "a" acc-name: all Acceptance: 0 t properties: trans-labels explicit-labels state-acc complete -properties: deterministic +properties: deterministic weak --BODY-- State: 0 "F(a)" [0] 1 @@ -2079,6 +2079,15 @@ properties: complete !weak very-weak --BODY-- State: 0 0 --END-- +HOA: v1 +States: 1 +Start: 0 +AP: 0 +Acceptance: 0 t +properties: complete !very-weak +--BODY-- +State: 0 0 +--END-- EOF expecterr input <\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -178,7 +178,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a002151e0> >" + " *' at 0x7f773078f870> >" ] }, "execution_count": 2, @@ -211,10 +211,10 @@ "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "[Büchi]\n", "\n", "\n", @@ -607,11 +607,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -657,7 +657,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000cc3c0> >" + " *' at 0x7f77307ac6c0> >" ] }, "execution_count": 6, @@ -683,11 +683,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -733,7 +733,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000cc690> >" + " *' at 0x7f77307ac840> >" ] }, "execution_count": 7, @@ -816,7 +816,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000cc2a0> >" + " *' at 0x7f77307ac930> >" ] }, "execution_count": 8, @@ -872,11 +872,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -983,11 +983,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1349,7 +1349,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000d42a0> >" + " *' at 0x7f77307ad320> >" ] }, "execution_count": 12, @@ -1463,7 +1463,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000d4630> >" + " *' at 0x7f77307ad500> >" ] }, "execution_count": 13, @@ -1496,11 +1496,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1594,7 +1594,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000d4c00> >" + " *' at 0x7f77307add70> >" ] }, "execution_count": 14, @@ -1816,7 +1816,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000d8510> >" + " *' at 0x7f77307ae280> >" ] }, "metadata": {}, @@ -1974,7 +1974,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000d8420> >" + " *' at 0x7f77307ae250> >" ] }, "metadata": {}, @@ -2132,7 +2132,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000d8300> >" + " *' at 0x7f77307adda0> >" ] }, "metadata": {}, @@ -2147,11 +2147,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -2280,7 +2280,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000d8270> >" + " *' at 0x7f77307adbc0> >" ] }, "metadata": {}, @@ -2469,7 +2469,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a00215fc0> >" + " *' at 0x7f77307adad0> >" ] }, "execution_count": 19, @@ -2495,11 +2495,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -2545,7 +2545,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000e1420> >" + " *' at 0x7f77307ad440> >" ] }, "execution_count": 20, @@ -2582,13 +2582,6 @@ "execution_count": 22, "metadata": {}, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "no\n" - ] - }, { "data": { "text/html": [ @@ -3016,7 +3009,6 @@ "# Using +1 in the display options is a convenient way to shift the \n", "# set numbers in the output, as an aid in reading the product.\n", "a1 = spot.translate('GF(a <-> Xa)')\n", - "print(a1.prop_weak())\n", "a2 = spot.translate('a U b & GFc')\n", "display_inline(a1.show('.t'), a2.show('.t+1'))\n", "# the product should display pairs of states, unless asked not to (using '1').\n", @@ -3045,11 +3037,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -3095,7 +3087,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000ccd50> >" + " *' at 0x7f77307af1e0> >" ] }, "metadata": {}, @@ -3195,7 +3187,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000ccde0> >" + " *' at 0x7f77307afa20> >" ] }, "execution_count": 24, @@ -3268,7 +3260,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000cce70> >" + " *' at 0x7f77307ac750> >" ] }, "execution_count": 25, @@ -3439,7 +3431,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a00215ab0> >" + " *' at 0x7f77307aef40> >" ] }, "execution_count": 27, @@ -3472,11 +3464,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -3522,7 +3514,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000ccd50> >" + " *' at 0x7f77307af1e0> >" ] }, "metadata": {}, @@ -3537,11 +3529,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -3587,7 +3579,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000ccd50> >" + " *' at 0x7f77307af1e0> >" ] }, "metadata": {}, @@ -3624,11 +3616,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -3674,7 +3666,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f6a000ccd50> >" + " *' at 0x7f77307af1e0> >" ] }, "execution_count": 29, @@ -3689,11 +3681,18 @@ " e.cond &= c\n", "a" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -3707,7 +3706,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.2" + "version": "3.11.6" } }, "nbformat": 4, diff --git a/tests/python/dbranch.py b/tests/python/dbranch.py index 268c4a3c6..99c81bdf6 100644 --- a/tests/python/dbranch.py +++ b/tests/python/dbranch.py @@ -65,7 +65,7 @@ Start: 0 AP: 3 "b" "a" "c" acc-name: all Acceptance: 0 t -properties: trans-labels explicit-labels state-acc univ-branch +properties: trans-labels explicit-labels state-acc univ-branch weak --BODY-- State: 0 [1] 1 diff --git a/tests/python/highlighting.ipynb b/tests/python/highlighting.ipynb index 15f9047a8..6d9ac1ed8 100644 --- a/tests/python/highlighting.ipynb +++ b/tests/python/highlighting.ipynb @@ -506,7 +506,7 @@ "acc-name: Buchi\n", "Acceptance: 1 Inf(0)\n", "properties: trans-labels explicit-labels state-acc deterministic\n", - "properties: stutter-invariant terminal\n", + "properties: stutter-invariant terminal very-weak\n", "--BODY--\n", "State: 0 {0}\n", "[t] 0\n", @@ -526,7 +526,7 @@ "acc-name: Buchi\n", "Acceptance: 1 Inf(0)\n", "properties: trans-labels explicit-labels state-acc !complete\n", - "properties: deterministic stutter-invariant terminal\n", + "properties: deterministic stutter-invariant terminal very-weak\n", "spot.highlight.states: 0 0 1 5 2 5\n", "spot.highlight.edges: 2 1 4 1 5 1 6 2\n", "--BODY--\n", diff --git a/tests/python/kripke.py b/tests/python/kripke.py index 3670f592d..2d65ebc5d 100644 --- a/tests/python/kripke.py +++ b/tests/python/kripke.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2019, 2022 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) 2019, 2022, 2023 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -45,7 +45,7 @@ Start: 1 AP: 2 "p1" "p2" acc-name: all Acceptance: 0 t -properties: state-labels explicit-labels state-acc +properties: state-labels explicit-labels state-acc weak --BODY-- State: [0&1] 0 0 @@ -65,7 +65,7 @@ Start: 1 AP: 2 "p1" "p2" acc-name: all Acceptance: 0 t -properties: state-labels explicit-labels state-acc +properties: state-labels explicit-labels state-acc weak --BODY-- State: [0&1] 0 "s0" 0 @@ -83,7 +83,7 @@ Start: 1 AP: 2 "p1" "p2" acc-name: all Acceptance: 0 t -properties: state-labels explicit-labels state-acc +properties: state-labels explicit-labels state-acc weak --BODY-- State: [0&1] 0 "s0" 0 diff --git a/tests/python/ltsmin-dve.ipynb b/tests/python/ltsmin-dve.ipynb index 05530ca8e..7b474ce30 100644 --- a/tests/python/ltsmin-dve.ipynb +++ b/tests/python/ltsmin-dve.ipynb @@ -1773,7 +1773,7 @@ "AP: 3 \"a<1\" \"b>2\" \"dead\"\n", "acc-name: all\n", "Acceptance: 0 t\n", - "properties: state-labels explicit-labels state-acc\n", + "properties: state-labels explicit-labels state-acc weak\n", "--BODY--\n", "State: [0&!1&!2] 0 \"a=0, b=0, Q=0\"\n", "1 2\n", diff --git a/tests/python/mealy.py b/tests/python/mealy.py index 7f6070146..ef61daca9 100644 --- a/tests/python/mealy.py +++ b/tests/python/mealy.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2021, 2022 Laboratoire de Recherche et Développement de +# Copyright (C) 2021-2023 Laboratoire de Recherche et Développement de # l'EPITA. # # This file is part of Spot, a model checking library. @@ -557,7 +557,7 @@ Start: 0 AP: 2 "a" "b" acc-name: all Acceptance: 0 t -properties: trans-labels explicit-labels state-acc deterministic +properties: trans-labels explicit-labels state-acc deterministic weak controllable-AP: 1 --BODY-- State: 0 @@ -598,7 +598,7 @@ Start: 0 AP: 2 "a" "b" acc-name: all Acceptance: 0 t -properties: trans-labels explicit-labels state-acc deterministic +properties: trans-labels explicit-labels state-acc deterministic weak controllable-AP: 1 --BODY-- State: 0 @@ -677,4 +677,4 @@ spot.minimize_mealy(aut, 1) auts = spot.split_2step(aut) spot.minimize_mealy(auts, -1) spot.minimize_mealy(auts, 0) -spot.minimize_mealy(auts, 1) \ No newline at end of file +spot.minimize_mealy(auts, 1) diff --git a/tests/python/simstate.py b/tests/python/simstate.py index b0b62267d..4874fd478 100644 --- a/tests/python/simstate.py +++ b/tests/python/simstate.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2017-2018, 2020-2022 Laboratoire de Recherche +# Copyright (C) 2015, 2017-2018, 2020-2023 Laboratoire de Recherche # et Développement de l'Epita # # This file is part of Spot, a model checking library. @@ -539,10 +539,10 @@ States: 1 Start: 0 AP: 1 "a" Acceptance: 1 t -properties: trans-labels explicit-labels state-acc colored -properties: deterministic +properties: trans-labels explicit-labels state-acc deterministic +properties: very-weak --BODY-- -State: 0 {0} +State: 0 [0] 0 --END--''') From 5826a400637df677b4e8d36d6a47faeff0cb28a6 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 9 Nov 2023 23:19:39 +0100 Subject: [PATCH 356/606] add test case for issue #546 This, and the previous two patches, fixes issue #546. * tests/core/ltlcross.test: Add a test case. --- tests/core/ltlcross.test | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/core/ltlcross.test b/tests/core/ltlcross.test index ebe20fb26..faa386cb9 100755 --- a/tests/core/ltlcross.test +++ b/tests/core/ltlcross.test @@ -68,3 +68,6 @@ ltlcross --verbose ltl2tgba ltl2tgba \ # Issue #524. ltlcross ltl2tgba -f '!(X(v3 | G!v5) | ((Xv5 & !(v5 & !X!v3)) U !v5))' + +# Issue #546. +ltlcross 'ltl2tgba --medium -p' 'ltl2tgba -p' -f 'a | FGa | GF(!b | Gb)' From 13f66e55af581084e25b03289ac0e57fa40f2f41 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 10 Nov 2023 23:00:19 +0100 Subject: [PATCH 357/606] * tests/core/twacube.cc: Remove duplicate line. --- tests/core/twacube.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/core/twacube.cc b/tests/core/twacube.cc index 1a2cd73a8..43118d6a5 100644 --- a/tests/core/twacube.cc +++ b/tests/core/twacube.cc @@ -80,7 +80,6 @@ int main() unsigned int seed = 17; auto it = aut->succ(2); SPOT_ASSUME(it); // GCC 7 warns about potential nullptr. - for (; !it->done(); it->next()) for (; !it->done(); it->next()) { auto& t = aut->trans_storage(it, seed); From 997f7ec7fbd064182f7af6860b9099e5696dc9cf Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 14 Nov 2023 15:40:24 +0100 Subject: [PATCH 358/606] gfguarantee: fix handling of true/false languages Fixes #546 (again). * spot/twaalgos/gfguarantee.cc (g_f_terminal_inplace): Detect true/false languages early, so that we do not tag them as non-inherently-weak. * tests/core/ltlcross.test: Improve test case. --- spot/twaalgos/gfguarantee.cc | 13 ++++++++++++- tests/core/ltlcross.test | 4 +++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/spot/twaalgos/gfguarantee.cc b/spot/twaalgos/gfguarantee.cc index f0aa58274..f25c378c4 100644 --- a/spot/twaalgos/gfguarantee.cc +++ b/spot/twaalgos/gfguarantee.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018, 2021 Laboratoire de Recherche et Développement +// Copyright (C) 2018, 2021, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -61,6 +61,11 @@ namespace spot if (!is_terminal_automaton(aut, &si, true)) throw std::runtime_error("g_f_terminal() expects a terminal automaton"); + // If a terminal automaton has only one SCC, it is either + // universal or empty. In both cases G(automaton)=automaton. + if (si.scc_count() == 1) + return aut; + unsigned ns = si.scc_count(); std::vector term(ns, false); for (unsigned n = 0; n < ns; ++n) @@ -69,6 +74,9 @@ namespace spot aut->prop_keep({ false, false, true, false, true, true }); aut->prop_state_acc(state_based); + // The case where the input automaton is universal or empty has + // already been dealt with, before do_g_f_terminal_inplace was + // called. aut->prop_inherently_weak(false); aut->set_buchi(); @@ -477,8 +485,11 @@ namespace spot } twa_graph_ptr aut = ltl_to_tgba_fm(f, dict, true); twa_graph_ptr reduced = minimize_obligation(aut, f); + + // If f was not an obligation, we cannot deal with it here. if (reduced == aut) return nullptr; + scc_info si(reduced); if (!is_terminal_automaton(reduced, &si, true)) return nullptr; diff --git a/tests/core/ltlcross.test b/tests/core/ltlcross.test index faa386cb9..381cf7a66 100755 --- a/tests/core/ltlcross.test +++ b/tests/core/ltlcross.test @@ -70,4 +70,6 @@ ltlcross --verbose ltl2tgba ltl2tgba \ ltlcross ltl2tgba -f '!(X(v3 | G!v5) | ((Xv5 & !(v5 & !X!v3)) U !v5))' # Issue #546. -ltlcross 'ltl2tgba --medium -p' 'ltl2tgba -p' -f 'a | FGa | GF(!b | Gb)' +ltlcross 'ltl2tgba --medium -p' 'ltl2tgba -p' 'ltl2tgba --medium -D -p' \ + -f 'a | FGa | GF(!b | Gb)' \ + -f '(~ v1 U ~ v5) | G(F v9 & F G v1) | G F(~ v7 | G v7 | G v3)' From b7a0a8c324f81072dec374645120e0439403a409 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 14 Nov 2023 16:52:41 +0100 Subject: [PATCH 359/606] gfguarante: update citation * spot/twaalgos/gfguarantee.hh: Properly cite the LICS'18 paper. * doc/spot.bib: Add the entry. --- doc/spot.bib | 15 ++++++++++++++- spot/twaalgos/gfguarantee.hh | 28 ++++++++++++++-------------- 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/doc/spot.bib b/doc/spot.bib index c645c8156..3f24e40be 100644 --- a/doc/spot.bib +++ b/doc/spot.bib @@ -1,4 +1,3 @@ - @InProceedings{ babiak.12.tacas, author = {Tom{\'a}{\v{s}} Babiak and Mojm{\'i}r K{\v{r}}et{\'i}nsk{\'y} and Vojt{\v{e}}ch {\v{R}}eh{\'a}k @@ -457,6 +456,20 @@ doi = {10.1007/978-3-642-01702-5_17} } +@InProceedings{ esparza.18.lics, + author = {Javier Esparza and Jan K\v{r}et{\'{\i}}nsk{\'{y}} and + Salomon Sickert}, + title = {One Theorem to Rule Them All: {A} Unified Translation of + {LTL} into $\omega$-Automata}, + booktitle = {Proceedings of the 33rd Annual {ACM/IEEE} Symposium on + Logic in Computer Science (LICS'18)}, + pages = {384--393}, + year = {2018}, + editor = {Anuj Dawar and Erich Gr{\"{a}}del}, + publisher = {ACM}, + doi = {10.1145/3209108.3209161} +} + @InProceedings{ etessami.00.concur, author = {Kousha Etessami and Gerard J. Holzmann}, title = {Optimizing {B\"u}chi Automata}, diff --git a/spot/twaalgos/gfguarantee.hh b/spot/twaalgos/gfguarantee.hh index 40cb16f97..32edae439 100644 --- a/spot/twaalgos/gfguarantee.hh +++ b/spot/twaalgos/gfguarantee.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2018, 2022, 2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -30,17 +30,17 @@ namespace spot /// \brief Given a terminal automaton \a f_terminal recognizing /// some formula F(φ), modify it to recognize GF(φ). /// - /// If \a state_based is set, the automaton all terminal states are - /// replaced by a unique accepting state that has the same outgoing - /// transitions as the initial state, and the initial state is - /// actually relocated to that accepting state. The latter point is - /// not necessary, but it favors shorter accepting cycles. + /// If \a state_based is set, the automaton's terminal states are + /// all replaced by a unique accepting state that has the same + /// outgoing transitions as the initial state, and the initial state + /// is actually relocated to that accepting state. The latter point + /// is not necessary, but it favors shorter accepting cycles. /// /// If \a state_based is not set, all transition going to terminal /// states are made accepting and redirected to the initial state. /// /// This construction is inspired by a similar construction in the - /// LICS'18 paper by J. Esparza, J. Křetínský, and S. Sickert. + /// LICS'18 paper by Esparza et al. \cite esparza.18.lics SPOT_API twa_graph_ptr g_f_terminal_inplace(twa_graph_ptr f_terminal, bool state_based = false); @@ -57,12 +57,12 @@ namespace spot /// Return nullptr if the input formula is not of the supported /// form. /// - /// This construction generalizes a construction in the LICS'18 - /// paper of J. Esparza, J. Křetínský, and S. Sickert. This version - /// will work if Φ represent a safety property, even if it is not a - /// syntactic safety. When building deterministic transition-based - /// automata, it will also try to remove useless trivial components - /// at the beginning of wdba(A_Φ). + /// This construction generalizes a construction in a LICS'18 + /// paper by Esparza et al. \cite esparza.18.lics + /// This version will work if Φ represents a safety property, even if + /// it is not a syntactic safety. When building deterministic + /// transition-based automata, it will also try to remove useless + /// trivial components at the beginning of wdba(A_Φ). SPOT_API twa_graph_ptr gf_guarantee_to_ba_maybe(formula gf, const bdd_dict_ptr& dict, bool deterministic = true, bool state_based = false); From bed87c60a4f283ebe10a9a387098eb107d7b9759 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 14 Nov 2023 18:14:12 +0100 Subject: [PATCH 360/606] parseaut: update highlight-edges when edges are dropped/added MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes #548, reported by Dávid Smolka. * spot/parseaut/parseaut.yy: Update the edge numbers in the highlight-edges property. * tests/core/highlightstate.test: Add test case. * NEWS: Mention the bug. --- NEWS | 2 + spot/parseaut/parseaut.yy | 75 ++++++++++++++++++++++++---------- tests/core/highlightstate.test | 26 +++++++++++- 3 files changed, 80 insertions(+), 23 deletions(-) diff --git a/NEWS b/NEWS index 928adf584..d5bc2371d 100644 --- a/NEWS +++ b/NEWS @@ -127,6 +127,8 @@ New in spot 2.11.6.dev (not yet released) (This cheap check will not catch all automata incorrectly labeled by !weak, but helps detects some issues nonetheless.) + - The automaton parser forgot to update the list of highlighted + edges while dropping edges labeled by bddfalse. (issue #548.) New in spot 2.11.6 (2023-08-01) diff --git a/spot/parseaut/parseaut.yy b/spot/parseaut/parseaut.yy index c52beb1e3..05e404474 100644 --- a/spot/parseaut/parseaut.yy +++ b/spot/parseaut/parseaut.yy @@ -118,6 +118,17 @@ extern "C" int strverscmp(const char *s1, const char *s2); bool in_alias = false; map_t dest_map; std::vector info_states; // States declared and used. + // Mapping of edges in the HOA file to edges in the automaton. + // Edges are counted from 0 in the HOA file and from 1 in the + // automaton. Given edge #i in the HOA file, edge_map[i] gives + // corresponding edge in the automaton, or 0 if that edge was + // removed (because labeled by bddfalse). This map is used to + // update properties such as highlight_edges after the automaton + // has been read. Note that the parser may also introduce + // unlisted edges, e.g., a bddfalse self-loop to hold the + // acceptance of a state without declared outgoing edge. Those + // added edges are not a concern for this edge_map. + std::vector edge_map; std::vector>> start; // Initial states; std::unordered_map alias; @@ -1431,10 +1442,8 @@ states: %empty if (res.acc_state && !res.opts.want_kripke && res.h->aut->get_graph().state_storage(res.cur_state).succ == 0) - { - res.h->aut->new_edge(res.cur_state, res.cur_state, - bddfalse, res.acc_state); - } + res.h->aut->new_edge(res.cur_state, res.cur_state, + bddfalse, res.acc_state); } state: state-name labeled-edges | state-name unlabeled-edges @@ -1642,16 +1651,20 @@ incorrectly-unlabeled-edge: checked-state-num trans-acc_opt cond = res.state_label; if (cond != bddfalse) { + unsigned e; if (res.opts.want_kripke) - res.h->ks->new_edge(res.cur_state, $1); + e = res.h->ks->new_edge(res.cur_state, $1); else - res.h->aut->new_edge(res.cur_state, $1, - cond, - $2 | res.acc_state); + e = res.h->aut->new_edge(res.cur_state, $1, + cond, + $2 | + res.acc_state); + res.edge_map.push_back(e); } } labeled-edge: trans-label checked-state-num trans-acc_opt { + unsigned e = 0; if (res.cur_label != bddfalse || // As a hack to allow states to be accepting // even if they do not have transitions, we @@ -1660,22 +1673,26 @@ labeled-edge: trans-label checked-state-num trans-acc_opt ($2 == res.cur_state && !!($3 | res.acc_state))) { if (res.opts.want_kripke) - res.h->ks->new_edge(res.cur_state, $2); + e = res.h->ks->new_edge(res.cur_state, $2); else - res.h->aut->new_edge(res.cur_state, $2, - res.cur_label, $3 | res.acc_state); + e = res.h->aut->new_edge(res.cur_state, $2, + res.cur_label, + $3 | res.acc_state); } + res.edge_map.push_back(e); } | trans-label state-conj-checked trans-acc_opt { + unsigned e = 0; if (res.cur_label != bddfalse) { assert(!res.opts.want_kripke); - res.h->aut->new_univ_edge(res.cur_state, - $2->begin(), $2->end(), - res.cur_label, - $3 | res.acc_state); + e = res.h->aut->new_univ_edge(res.cur_state, + $2->begin(), $2->end(), + res.cur_label, + $3 | res.acc_state); } + res.edge_map.push_back(e); delete $2; } @@ -1719,14 +1736,16 @@ unlabeled-edge: checked-state-num trans-acc_opt cond = *res.cur_guard++; } } + unsigned e = 0; if (cond != bddfalse) { if (res.opts.want_kripke) - res.h->ks->new_edge(res.cur_state, $1); + e = res.h->ks->new_edge(res.cur_state, $1); else - res.h->aut->new_edge(res.cur_state, $1, - cond, $2 | res.acc_state); + e = res.h->aut->new_edge(res.cur_state, $1, + cond, $2 | res.acc_state); } + res.edge_map.push_back(e); } | state-conj-checked trans-acc_opt { @@ -1750,13 +1769,15 @@ unlabeled-edge: checked-state-num trans-acc_opt cond = *res.cur_guard++; } } + unsigned e = 0; if (cond != bddfalse) { assert(!res.opts.want_kripke); - res.h->aut->new_univ_edge(res.cur_state, - $1->begin(), $1->end(), - cond, $2 | res.acc_state); + e = res.h->aut->new_univ_edge(res.cur_state, + $1->begin(), $1->end(), + cond, $2 | res.acc_state); } + res.edge_map.push_back(e); delete $1; } incorrectly-labeled-edge: trans-label unlabeled-edge @@ -2919,7 +2940,17 @@ namespace spot if (r.state_names) r.aut_or_ks->set_named_prop("state-names", r.state_names); if (r.highlight_edges) - r.aut_or_ks->set_named_prop("highlight-edges", r.highlight_edges); + { + // Update the highlight_edges map to deal with removed/added + // edges. + std::map remap; + for (auto [edgnum, color]: *r.highlight_edges) + if (edgnum > 0) /* not expected, but can't trust input data */ + if (unsigned newnum = r.edge_map[edgnum - 1]; newnum > 0) + remap[newnum] = color; + std::swap(remap, *r.highlight_edges); + r.aut_or_ks->set_named_prop("highlight-edges", r.highlight_edges); + } if (r.highlight_states) r.aut_or_ks->set_named_prop("highlight-states", r.highlight_states); if (r.state_player) diff --git a/tests/core/highlightstate.test b/tests/core/highlightstate.test index a42eb3b53..879ee2ce8 100755 --- a/tests/core/highlightstate.test +++ b/tests/core/highlightstate.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016-2019 Laboratoire de Recherche et Développement +# Copyright (C) 2016-2019, 2023 Laboratoire de Recherche et Développement # de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -231,3 +231,27 @@ cat >expect.hoa <bug548.hoa < out.hoa +cat >expect.hoa < Date: Tue, 14 Nov 2023 22:21:52 +0100 Subject: [PATCH 361/606] parseaut: allow false edges to not be dropped This is a followup to issue #548, which was caused by edges being dropped. In that context dropping edge was not really desirable, so let's make this behavior configurable. * spot/parseaut/public.hh: Add a new option. * python/spot/__init__.py: Likewise. * spot/parseaut/parseaut.yy: Honor that option. * tests/python/parsetgba.py: Add a short test for it. * NEWS: Mention it. --- NEWS | 10 +++++++ python/spot/__init__.py | 6 +++- spot/parseaut/parseaut.yy | 13 +++++---- spot/parseaut/public.hh | 15 +++++----- tests/python/parsetgba.py | 59 +++++++++++++++++++++++++++++++++++---- 5 files changed, 84 insertions(+), 19 deletions(-) diff --git a/NEWS b/NEWS index d5bc2371d..021548c09 100644 --- a/NEWS +++ b/NEWS @@ -49,6 +49,10 @@ New in spot 2.11.6.dev (not yet released) supports only one): it now reuse the edges leaving initial states without incoming transitions. + - The automaton parser has a new option "drop_false_edges" to + specify where edges labeled by "false" should be ignored during + parsing. It is enabled by default for backward compatibility. + - spot::bdd_to_cnf_formula() is a new variant of spot::bdd_to_formula() that converts a BDD into a CNF instead of a DNF. @@ -104,6 +108,12 @@ New in spot 2.11.6.dev (not yet released) removal of superfluous APs that is now performed by ltlsynt (search for --polarity and --global-equivalence above). + Python: + + - The spot.automata() and spot.automaton() functions now accept a + drop_false_edges=False argument to disable the historical behavior + of ignoring edges labeled by False. + Bugs fixed: - tgba_determinize()'s use_simulation option would cause it to diff --git a/python/spot/__init__.py b/python/spot/__init__.py index 02bdcb1f6..cefb59b77 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -564,7 +564,7 @@ function acd{num}_node(node, acc){{ def automata(*sources, timeout=None, ignore_abort=True, trust_hoa=True, no_sid=False, debug=False, - want_kripke=False): + want_kripke=False, drop_false_edges=True): """Read automata from a list of sources. Parameters @@ -587,6 +587,9 @@ def automata(*sources, timeout=None, ignore_abort=True, If True, the input is expected to discribe Kripke structures, in the HOA format, and the returned type will be of type kripke_graph_ptr. + drop_false_edges : bool, optional + If True (the default), edges labeled by false will + be ignored during parsing. no_sid : bool, optional When an automaton is obtained from a subprocess, this subprocess is started from a shell with its own session @@ -647,6 +650,7 @@ def automata(*sources, timeout=None, ignore_abort=True, o.trust_hoa = trust_hoa o.raise_errors = True o.want_kripke = want_kripke + o.drop_false_edges = drop_false_edges for filename in sources: try: diff --git a/spot/parseaut/parseaut.yy b/spot/parseaut/parseaut.yy index 05e404474..8de9d9fab 100644 --- a/spot/parseaut/parseaut.yy +++ b/spot/parseaut/parseaut.yy @@ -1649,7 +1649,7 @@ incorrectly-unlabeled-edge: checked-state-num trans-acc_opt "(previous edge is labeled)"); else cond = res.state_label; - if (cond != bddfalse) + if (cond != bddfalse || !res.opts.drop_false_edges) { unsigned e; if (res.opts.want_kripke) @@ -1665,12 +1665,13 @@ incorrectly-unlabeled-edge: checked-state-num trans-acc_opt labeled-edge: trans-label checked-state-num trans-acc_opt { unsigned e = 0; - if (res.cur_label != bddfalse || + if (res.cur_label != bddfalse + || !res.opts.drop_false_edges // As a hack to allow states to be accepting // even if they do not have transitions, we // do not ignore false-labeled self-loops if they // have some colors) - ($2 == res.cur_state && !!($3 | res.acc_state))) + || ($2 == res.cur_state && !!($3 | res.acc_state))) { if (res.opts.want_kripke) e = res.h->ks->new_edge(res.cur_state, $2); @@ -1684,7 +1685,7 @@ labeled-edge: trans-label checked-state-num trans-acc_opt | trans-label state-conj-checked trans-acc_opt { unsigned e = 0; - if (res.cur_label != bddfalse) + if (res.cur_label != bddfalse || !res.opts.drop_false_edges) { assert(!res.opts.want_kripke); e = res.h->aut->new_univ_edge(res.cur_state, @@ -1737,7 +1738,7 @@ unlabeled-edge: checked-state-num trans-acc_opt } } unsigned e = 0; - if (cond != bddfalse) + if (cond != bddfalse || !res.opts.drop_false_edges) { if (res.opts.want_kripke) e = res.h->ks->new_edge(res.cur_state, $1); @@ -1770,7 +1771,7 @@ unlabeled-edge: checked-state-num trans-acc_opt } } unsigned e = 0; - if (cond != bddfalse) + if (cond != bddfalse || !res.opts.drop_false_edges) { assert(!res.opts.want_kripke); e = res.h->aut->new_univ_edge(res.cur_state, diff --git a/spot/parseaut/public.hh b/spot/parseaut/public.hh index ec16b3ad7..2a5cfff76 100644 --- a/spot/parseaut/public.hh +++ b/spot/parseaut/public.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014, 2015, 2016, 2017, 2022 Laboratoire de Recherche et +// Copyright (C) 2013-2017, 2022-2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -103,6 +103,7 @@ namespace spot bool trust_hoa = true; ///< Trust properties in HOA files bool raise_errors = false; ///< Raise errors as exceptions. bool want_kripke = false; ///< Parse as a Kripke structure. + bool drop_false_edges = true; ///< Drop edges with false labels. }; /// \brief Parse a stream of automata @@ -193,12 +194,12 @@ namespace spot /// \param opts Additional options to pass to the parser. /// \return A pointer to a \c parsed_aut structure. /// - /// This is a wrapper around spot::automaton_stream_parser that returns - /// the first automaton of the file. Empty inputs are reported as - /// syntax errors, so the \c aut field of the result is guaranteed not - /// to be null if \c errors is empty. (This is unlike - /// automaton_stream_parser::parse() where a null \c aut denots the - /// end of a stream.) + /// This is a wrapper around spot::automaton_stream_parser that + /// returns the first automaton of the file. Empty inputs are + /// reported as syntax errors, so the \c aut field of the result is + /// guaranteed not to be null if \c errors is empty. (This is + /// unlike automaton_stream_parser::parse() where a null \c aut + /// denotes the end of a stream.) /// /// \warning This function is not reentrant. SPOT_API parsed_aut_ptr diff --git a/tests/python/parsetgba.py b/tests/python/parsetgba.py index 038b33a19..a91b702fb 100755 --- a/tests/python/parsetgba.py +++ b/tests/python/parsetgba.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2012, 2014, 2015, 2022 Laboratoire de Recherche et +# Copyright (C) 2012, 2014, 2015, 2022, 2023 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -35,11 +35,60 @@ out.write(contents) out.close() a = spot.parse_aut(filename, spot.make_bdd_dict()) - tc.assertFalse(a.errors) - spot.print_dot(spot.get_cout(), a.aut) - del a - os.unlink(filename) + + +autstr = """ +HOA: v1 +States: 2 +Start: 0 +AP: 0 +Acceptance: 0 t +spot.highlight.edges: 1 1 2 2 3 3 4 4 +--BODY-- +State: 0 +[t] 1 +[f] 0 +State: 1 +[f] 0 +[t] 0 +--END-- +""" + +a1 = spot.automaton(autstr) +tc.assertEqual(a1.to_str("hoa", "1.1"), """HOA: v1.1 +States: 2 +Start: 0 +AP: 0 +acc-name: all +Acceptance: 0 t +properties: trans-labels explicit-labels state-acc complete +properties: deterministic weak +spot.highlight.edges: 1 1 2 4 +--BODY-- +State: 0 +[t] 1 +State: 1 +[t] 0 +--END--""") +a2 = spot.automaton(autstr, drop_false_edges=False) +tc.assertEqual(a2.to_str("hoa", "1.1"), """HOA: v1.1 +States: 2 +Start: 0 +AP: 0 +acc-name: all +Acceptance: 0 t +properties: trans-labels explicit-labels state-acc complete +properties: deterministic weak +spot.highlight.edges: 1 1 2 2 3 3 4 4 +--BODY-- +State: 0 +[t] 1 +[f] 0 +State: 1 +[f] 0 +[t] 0 +--END--""") From b7995fcc5dd7f36d621168cdc980795daaa23d85 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 15 Nov 2023 15:06:30 +0100 Subject: [PATCH 362/606] * .gitlab-ci.yml (rpm-pkg): Start from the make-dist tarball. --- .gitlab-ci.yml | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c9ed140ff..805f34db1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -376,7 +376,12 @@ debpkg-unstable-i386: - _build_unstable/ rpm-pkg: - stage: build + stage: build2 + needs: + - job: make-dist + artifacts: true + variables: + GIT_STRATEGY: none only: - /-rpm$/ - master @@ -384,12 +389,10 @@ rpm-pkg: - stable image: gitlab-registry.lre.epita.fr/spot/buildenv/fedora script: - - autoreconf -vfi - - ./configure - - make - - make dist - - cp spot-*.tar.gz ~/rpmbuild/SOURCES/ - - cp spot.spec ~/rpmbuild/SPECS/ + - VERSION=`cat VERSION` + - tar xvf spot-$VERSION.tar.gz spot-$VERSION/spot.spec + - cp spot-$VERSION.tar.gz ~/rpmbuild/SOURCES/ + - cp spot-$VERSION/spot.spec ~/rpmbuild/SPECS/ - rpmbuild -bb ~/rpmbuild/SPECS/spot.spec - mv ~/rpmbuild/RPMS/x86_64/*.rpm . allow_failure: true From 5ed56c054b6124459797762778f82864f445de81 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 15 Nov 2023 17:00:19 +0100 Subject: [PATCH 363/606] ltsmin: make it easier to find the README Fixes #550, reported by Daniel Stan. * tests/ltsmin/README: Move... * README.ltsmin: ... here. * Makefile.am (EXTRA_DIST): Add README.ltsmin. * README: Mention README.ltsmin. * spot/ltsmin/spins_interface.cc: Mention README.ltsmin in the error message. * tests/ltsmin/check.test, tests/ltsmin/check3.test: Adjust reference to README. * NEWS: Mention this fix. * THANKS: Add Danial. --- Makefile.am | 6 +++--- NEWS | 3 +++ README | 4 ++++ tests/ltsmin/README => README.ltsmin | 0 THANKS | 1 + spot/ltsmin/spins_interface.cc | 26 +++++++++++++++++++------- tests/ltsmin/check.test | 6 +++--- tests/ltsmin/check3.test | 6 +----- 8 files changed, 34 insertions(+), 18 deletions(-) rename tests/ltsmin/README => README.ltsmin (100%) diff --git a/Makefile.am b/Makefile.am index db7a60d9b..5cd8257d2 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,6 +1,6 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2011-2017, 2020, 2022 Laboratoire de Recherche et Développement -## de l'Epita (LRDE). +## Copyright (C) 2011-2017, 2020, 2022-2023 Laboratoire de Recherche +## et Développement de l'Epita (LRDE). ## Copyright (C) 2003, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), ## département Systèmes Répartis Coopératifs (SRC), Université Pierre ## et Marie Curie. @@ -69,7 +69,7 @@ EXTRA_DIST = HACKING ChangeLog.1 tools/gitlog-to-changelog \ tools/help2man tools/man2html.pl \ tools/test-driver-teamcity $(UTF8) $(DEBIAN) \ m4/gnulib-cache.m4 .dir-locals.el \ - spot.spec spot.spec.in + spot.spec spot.spec.in README.ltsmin dist-hook: gen-ChangeLog diff --git a/NEWS b/NEWS index 021548c09..72e24d612 100644 --- a/NEWS +++ b/NEWS @@ -108,6 +108,9 @@ New in spot 2.11.6.dev (not yet released) removal of superfluous APs that is now performed by ltlsynt (search for --polarity and --global-equivalence above). + - ltsmin's interface will now point to README.ltsmin in case an + error is found while running divine or spins. + Python: - The spot.automata() and spot.automaton() functions now accept a diff --git a/README b/README index 458da2d99..a19fc8473 100644 --- a/README +++ b/README @@ -93,6 +93,10 @@ automata. If the SAT-solver glucose is found on your system, it will be used by our test suite to test our SAT-based minimization algorithm. +If you want to use Spot with DiVinE2 (for model checking DVE models) +or with SpinS (for model checking Promela models), please read the +file named "README.ltsmin" for installation instructions. + Spot used to distribute a modified version of LBTT (an LTL to Büchi test bench), mostly fixing errors reported by recent compilers. However Spot now distributes its own reimplementation of LBTT, called diff --git a/tests/ltsmin/README b/README.ltsmin similarity index 100% rename from tests/ltsmin/README rename to README.ltsmin diff --git a/THANKS b/THANKS index ae3a37f8c..46e747d4e 100644 --- a/THANKS +++ b/THANKS @@ -10,6 +10,7 @@ Caroline Lemieux Christian Dax Christopher Ziegler Clément Tamines +Daniel Stan David Dokoupil David Müller Dávid Smolka diff --git a/spot/ltsmin/spins_interface.cc b/spot/ltsmin/spins_interface.cc index bd968b208..f4e1823f2 100644 --- a/spot/ltsmin/spins_interface.cc +++ b/spot/ltsmin/spins_interface.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2019, 2020 Laboratoire de Recherche et Développement +// Copyright (C) 2019, 2020, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE) // // This file is part of Spot, a model checking library. @@ -45,22 +45,26 @@ namespace spot static void compile_model(std::string& filename, const std::string& ext) { + const char* cmd; std::string command; std::string compiled_ext; if (ext == ".gal") { - command = "gal2c " + filename; + cmd = "gal2c "; + command = cmd + filename; compiled_ext = "2C"; } else if (ext == ".prom" || ext == ".pm" || ext == ".pml") { - command = "spins " + filename; + cmd = "spins "; + command = cmd + filename; compiled_ext = ".spins"; } else if (ext == ".dve") { - command = "divine compile --ltsmin " + filename; + cmd = "divine "; + command = cmd + "compile --ltsmin "s + filename; command += " 2> /dev/null"; // FIXME needed for Clang on MacOSX compiled_ext = "2C"; } @@ -92,9 +96,17 @@ namespace spot int res = system(command.c_str()); if (res) - throw std::runtime_error("Execution of '"s - + command.c_str() + "' returned exit code " - + std::to_string(WEXITSTATUS(res))); + { + std::ostringstream os; + int status = WEXITSTATUS(res); + os << "Execution of '" << command << "' returned exit code " + << status << '.'; + if (status == 127) + os << "\nIs " << cmd << "installed and on your $PATH?\n" + "Read README.ltsmin in Spot's sources for " + "installation instructions."; + throw std::runtime_error(os.str()); + } } } diff --git a/tests/ltsmin/check.test b/tests/ltsmin/check.test index 2beb82cc3..c4ce7d49b 100755 --- a/tests/ltsmin/check.test +++ b/tests/ltsmin/check.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2011, 2012, 2014, 2015, 2016, 2017, 2019, 2020 Laboratoire -# de Recherche et Développement de l'Epita (LRDE). +# Copyright (C) 2011-2012, 2014-2017, 2019-2020, 2023 Laboratoire de +# Recherche et Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -48,7 +48,7 @@ fi # dve2 for opt in '' '--compress 1'; do - # The three examples from the README. + # The three examples from README.ltsmin. # (Don't run the first one using "run 0" because it would take too much # time with valgrind.). diff --git a/tests/ltsmin/check3.test b/tests/ltsmin/check3.test index 5d98cf83f..47df0e4d3 100755 --- a/tests/ltsmin/check3.test +++ b/tests/ltsmin/check3.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2017 Laboratoire de Recherche +# Copyright (C) 2016-2017, 2023 Laboratoire de Recherche # et Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -28,10 +28,6 @@ fi set -e for opt in '' '--compress 1'; do - # The three examples from the README. - # (Don't run the first one using "run 0" because it would take too much - # time with valgrind.). - run 1 ../modelcheck $opt --is-empty --model $srcdir/beem-peterson.4.gal \ --formula '!G("P_0.state==2" -> F "P_0.state==1")' run 1 ../modelcheck $opt --is-empty --model $srcdir/beem-peterson.4.gal \ From 13377542cdee2434e4de224a7ee719af18b70fd8 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 16 Nov 2023 10:52:55 +0100 Subject: [PATCH 364/606] autfilt: fix a typo in the --help output * bin/common_aoutput.cc: Here. * doc/org/autfilt.org: Adjust the documentation. --- bin/common_aoutput.cc | 2 +- doc/org/autfilt.org | 57 +++++++++++++++++++++++-------------------- 2 files changed, 31 insertions(+), 28 deletions(-) diff --git a/bin/common_aoutput.cc b/bin/common_aoutput.cc index 1311c7c28..0b84e60e0 100644 --- a/bin/common_aoutput.cc +++ b/bin/common_aoutput.cc @@ -213,7 +213,7 @@ static const argp_option io_options[] = 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "number of edges (add one LETTER to select (r) reachable [default], " "(u) unreachable, (a) all).", 0 }, - { "%T, %t, %[LETTER]E, %[LETTER]e", + { "%T, %t, %[LETTER]T, %[LETTER]t", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "number of transitions (add one LETTER to select (r) reachable " "[default], (u) unreachable, (a) all).", 0 }, diff --git a/doc/org/autfilt.org b/doc/org/autfilt.org index 5c8a8f1e5..bcbe8e4dd 100644 --- a/doc/org/autfilt.org +++ b/doc/org/autfilt.org @@ -128,28 +128,30 @@ autfilt --stats='%s states, %e edges, %a acc-sets, %c SCCs, det=%d' The following =%= sequences are available: #+BEGIN_SRC sh :exports results -ltl2tgba --help | sed -n '/ sequences:/,/^$/p' | sed '1d;$d' +autfilt --help | sed -n '/ for output):/,/^$/p' | sed '1d;$d' #+END_SRC #+RESULTS: #+begin_example - %< the part of the line before the formula if it - comes from a column extracted from a CSV file - %> the part of the line after the formula if it comes - from a column extracted from a CSV file %% a single % - %a number of acceptance sets - %c, %[LETTERS]c number of SCCs; you may filter the SCCs to count + %< the part of the line before the automaton if it + comes from a column extracted from a CSV file + %> the part of the line after the automaton if it + comes from a column extracted from a CSV file + %A, %a number of acceptance sets + %C, %c, %[LETTERS]C, %[LETTERS]c + number of SCCs; you may filter the SCCs to count using the following LETTERS, possibly concatenated: (a) accepting, (r) rejecting, (c) complete, (v) trivial, (t) terminal, (w) weak, (iw) inherently weak. Use uppercase letters to negate them. - %d 1 if the output is deterministic, 0 otherwise - %e, %[LETTER]e number of edges (add one LETTER to select (r) - reachable [default], (u) unreachable, (a) all). - %f the formula, in Spot's syntax + %D, %d 1 if the automaton is deterministic, 0 otherwise + %E, %e, %[LETTER]E, %[LETTER]e number of edges (add one LETTER to select + (r) reachable [default], (u) unreachable, (a) + all). %F name of the input file - %g, %[LETTERS]g acceptance condition (in HOA syntax); add brackets + %G, %g, %[LETTERS]G, %[LETTERS]g + acceptance condition (in HOA syntax); add brackets to print an acceptance name instead and LETTERS to tweak the format: (0) no parameters, (a) accentuated, (b) abbreviated, (d) style used in @@ -158,31 +160,32 @@ ltl2tgba --help | sed -n '/ sequences:/,/^$/p' | sed '1d;$d' parameter, (p) no parity parameter, (o) name unknown acceptance as 'other', (s) shorthand for 'lo0'. - %h the automaton in HOA format on a single line (use - %[opt]h to specify additional options as in - --hoa=opt) + %H, %h the automaton in HOA format on a single line (use + %[opt]H or %[opt]h to specify additional options + as in --hoa=opt) %L location in the input file - %m name of the automaton - %n number of nondeterministic states in output - %p 1 if the output is complete, 0 otherwise + %l serial number of the output automaton (0-based) + %M, %m name of the automaton + %N, %n number of nondeterministic states + %P, %p 1 if the automaton is complete, 0 otherwise %r wall-clock time elapsed in seconds (excluding parsing) %R, %[LETTERS]R CPU time (excluding parsing), in seconds; Add - LETTERS to restrict to(u) user time, (s) system + LETTERS to restrict to (u) user time, (s) system time, (p) parent process, or (c) children processes. - %s, %[LETTER]s number of states (add one LETTER to select (r) - reachable [default], (u) unreachable, (a) all). - %t, %[LETTER]t number of transitions (add one LETTER to select + %S, %s, %[LETTER]S, %[LETTER]s number of states (add one LETTER to select (r) reachable [default], (u) unreachable, (a) all). - %u, %[e]u number of states (or [e]dges) with universal - branching - %u, %[LETTER]u 1 if the automaton contains some universal + %T, %t, %[LETTER]T, %[LETTER]t number of transitions (add one LETTER to + select (r) reachable [default], (u) unreachable, + (a) all). + %U, %u, %[LETTER]U, %[LETTER]u 1 if the automaton contains some universal branching (or a number of [s]tates or [e]dges with universal branching) - %w one word accepted by the output automaton - %x, %[LETTERS]x number of atomic propositions declared in the + %W, %w one word accepted by the automaton + %X, %x, %[LETTERS]X, %[LETTERS]x + number of atomic propositions declared in the automaton; add LETTERS to list atomic propositions with (n) no quoting, (s) occasional double-quotes with C-style escape, (d) From 0e71dd70c157d008b6bfa9299bcf1efe77be21fd Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 17 Nov 2023 13:41:19 +0100 Subject: [PATCH 365/606] sccfilter: some inherently-weak automata should have t acceptance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * spot/twaalgos/sccfilter.cc: If an inherently-weak automaton has no rejecting cycle, reduce its acceptance to t instead of Büchi. * spot/twa/acc.hh (operator==, operator<): Fix comparisons of true acceptances. * NEWS: Mention these two changes. * spot/twaalgos/sccfilter.hh: Update documentation. * spot/twaalgos/determinize.cc (tgba_determinize): The call to scc_filter assume that the input BA is never reduced to t acceptance. Call scc_filter with an extra option to ensure that. * spot/twaalgos/postproc.cc (do_scc_filter): Adjust to add the extra option when we want to build Büchi or coBuchi. (ensure_ba): Don't mark trivial SCCs as accepting. * tests/core/complement.test, tests/core/dstar.test, tests/core/ltlsynt.test, tests/core/readsave.test, tests/core/wdba2.test, tests/python/_product_susp.ipynb, tests/python/automata-io.ipynb, tests/python/dualize.py, tests/python/highlighting.ipynb, tests/python/intrun.py, tests/python/setacc.py, tests/python/simstate.py, tests/python/stutter-inv.ipynb, tests/python/zlktree.py: Adjust test cases. --- NEWS | 13 + spot/twa/acc.hh | 12 +- spot/twaalgos/determinize.cc | 2 +- spot/twaalgos/postproc.cc | 27 +- spot/twaalgos/sccfilter.cc | 40 ++- spot/twaalgos/sccfilter.hh | 17 +- tests/core/complement.test | 4 +- tests/core/dstar.test | 6 +- tests/core/ltlsynt.test | 14 +- tests/core/readsave.test | 29 +- tests/core/wdba2.test | 8 +- tests/python/_product_susp.ipynb | 548 +++++++++++++++---------------- tests/python/automata-io.ipynb | 498 ++++++++++++++-------------- tests/python/dualize.py | 4 +- tests/python/highlighting.ipynb | 327 +++++++++--------- tests/python/intrun.py | 16 +- tests/python/setacc.py | 6 +- tests/python/simstate.py | 3 +- tests/python/stutter-inv.ipynb | 124 +++---- tests/python/zlktree.py | 1 + 20 files changed, 857 insertions(+), 842 deletions(-) diff --git a/NEWS b/NEWS index 72e24d612..d89e92050 100644 --- a/NEWS +++ b/NEWS @@ -108,6 +108,15 @@ New in spot 2.11.6.dev (not yet released) removal of superfluous APs that is now performed by ltlsynt (search for --polarity and --global-equivalence above). + - scc_filter used to reduce automata tagged with the inherently-weak + property to weak Büchi automata (unless the acceptance was already + t or co-Büchi). In case where the input automaton had no + rejecting cycle, the Büchi acceptance was overkill: scc_filter + will now use "t" acceptance. This change may have unexpected + conseqences in code paths that assume running scc_filter on a + Büchi automaton will always return a Büchi automaton. For those, + a "keep_one_color" option has been added to scc_filter. + - ltsmin's interface will now point to README.ltsmin in case an error is found while running divine or spins. @@ -143,6 +152,10 @@ New in spot 2.11.6.dev (not yet released) - The automaton parser forgot to update the list of highlighted edges while dropping edges labeled by bddfalse. (issue #548.) + - The comparison operators for acceptance condition (==, !=) + could fail to equate two "t" condition, because we have two ways + to represent "t": the empty condition, or the empty "Inf({})". + New in spot 2.11.6 (2023-08-01) Bug fixes: diff --git a/spot/twa/acc.hh b/spot/twa/acc.hh index 766dd5224..b7817aa0b 100644 --- a/spot/twa/acc.hh +++ b/spot/twa/acc.hh @@ -482,6 +482,9 @@ namespace spot bool operator==(const acc_code& other) const { + // We have two ways to represent t, unfortunately. + if (is_t() && other.is_t()) + return true; unsigned pos = size(); if (other.size() != pos) return false; @@ -513,6 +516,9 @@ namespace spot bool operator<(const acc_code& other) const { + // We have two ways to represent t, unfortunately. + if (is_t() && other.is_t()) + return false; unsigned pos = size(); auto osize = other.size(); if (pos < osize) @@ -1560,7 +1566,11 @@ namespace spot bool operator==(const acc_cond& other) const { - return other.num_sets() == num_ && other.get_acceptance() == code_; + if (other.num_sets() != num_) + return false; + const acc_code& ocode = other.get_acceptance(); + // We have two ways to represent t, unfortunately. + return (ocode == code_ || (ocode.is_t() && code_.is_t())); } bool operator!=(const acc_cond& other) const diff --git a/spot/twaalgos/determinize.cc b/spot/twaalgos/determinize.cc index 2bc84cd6a..c87d992dd 100644 --- a/spot/twaalgos/determinize.cc +++ b/spot/twaalgos/determinize.cc @@ -881,7 +881,7 @@ namespace spot aut_tmp->copy_state_names_from(a); if (use_simulation) { - aut_tmp = spot::scc_filter(aut_tmp); + aut_tmp = spot::scc_filter(aut_tmp, true, nullptr, true); auto aut2 = simulation(aut_tmp, &implications, trans_pruning); if (pretty_print) aut2->copy_state_names_from(aut_tmp); diff --git a/spot/twaalgos/postproc.cc b/spot/twaalgos/postproc.cc index 1a2915dda..cbf677414 100644 --- a/spot/twaalgos/postproc.cc +++ b/spot/twaalgos/postproc.cc @@ -47,13 +47,28 @@ namespace spot namespace { static twa_graph_ptr - ensure_ba(twa_graph_ptr& a) + ensure_ba(twa_graph_ptr& a, bool no_trivial) { if (a->acc().is_t()) { auto m = a->set_buchi(); - for (auto& t: a->edges()) - t.acc = m; + if (!no_trivial) + { + for (auto& t: a->edges()) + t.acc = m; + } + else + { + scc_info si(a); + unsigned nc = si.scc_count(); + for (unsigned i = 0; i < nc; ++i) + // Cannot use "is_accepting_scc" because the + // acceptance condition was already changed. + if (!si.is_trivial(i)) + for (auto& e: si.edges_of(i)) + const_cast(e.acc) = m; + } + a->prop_state_acc(true); } return a; } @@ -219,7 +234,7 @@ namespace spot if (state_based_ && a->prop_state_acc().is_true()) return scc_filter_states(a, arg); else - return scc_filter(a, arg); + return scc_filter(a, arg, nullptr, type_ == CoBuchi || type_ == Buchi); } twa_graph_ptr @@ -251,7 +266,7 @@ namespace spot if (state_based_) tmp = sbacc(tmp); if (type_ == Buchi) - tmp = ensure_ba(tmp); + tmp = ensure_ba(tmp, level_ == High); if (want_parity) { if (!acd_was_used_ || (COMP_ && !was_complete)) @@ -480,7 +495,7 @@ namespace spot // We just need to add an acceptance set if there is none. dba_is_minimal = dba_is_wdba = true; if (type_ == Buchi) - ensure_ba(dba); + ensure_ba(dba, level_ == High); } else { diff --git a/spot/twaalgos/sccfilter.cc b/spot/twaalgos/sccfilter.cc index 20dea3f7e..9d8f2cfca 100644 --- a/spot/twaalgos/sccfilter.cc +++ b/spot/twaalgos/sccfilter.cc @@ -124,12 +124,14 @@ namespace spot } }; - // Transform inherently weak automata into weak Büchi automata. - template + // Transform inherently weak automata into weak Büchi automata, or + // t automata. + template struct weak_filter: next_filter { acc_cond::mark_t acc_m = {0}; acc_cond::mark_t rej_m = {}; + bool true_acc = false; template weak_filter(scc_info* si, Args&&... args) @@ -141,6 +143,23 @@ namespace spot if (si->get_aut()->acc().is_co_buchi()) rej_m = {0}; } + if (!keep_one_color) + { + unsigned ns = si->scc_count(); + bool may_reject = false; + for (unsigned i = 0; i < ns; ++i) + if (!si->is_trivial(i) && !si->is_accepting_scc(i)) + { + may_reject = true; + break; + } + if (!may_reject) + { + true_acc = true; + acc_m = {}; + rej_m = {}; + } + } } filtered_trans trans(unsigned src, unsigned dst, @@ -164,7 +183,9 @@ namespace spot void fix_acceptance(const twa_graph_ptr& out) { - if (buchi) + if (true_acc) + out->set_generalized_buchi(0); + else if (buchi) out->set_buchi(); else out->copy_acceptance_of(this->si->get_aut()); @@ -216,8 +237,8 @@ namespace spot // // The above rules are made more complex with two flags: // - // - If PreserveSBA is set, we have to tree a transition - // leaving an SCC as other transitions inside the SCC, + // - If PreserveSBA is set, we have to treat a transition + // leaving an SCC like other transitions inside the SCC, // otherwise we will break the property that all // transitions leaving the same state have identical set // membership. @@ -442,7 +463,7 @@ namespace spot twa_graph_ptr scc_filter(const const_twa_graph_ptr& aut, bool remove_all_useless, - scc_info* given_si) + scc_info* given_si, bool keep_one_color) { twa_graph_ptr res; scc_info* si = given_si; @@ -455,10 +476,13 @@ namespace spot | scc_info_options::TRACK_STATES_IF_FIN_USED); if (aut->acc().is_t() || aut->acc().is_co_buchi()) res = - scc_filter_apply>>(aut, si); + scc_filter_apply>>(aut, si); + else if (keep_one_color) + res = + scc_filter_apply>>(aut, si); else res = - scc_filter_apply>>(aut, si); + scc_filter_apply>>(aut, si); } else if (aut->acc().is_generalized_buchi()) { diff --git a/spot/twaalgos/sccfilter.hh b/spot/twaalgos/sccfilter.hh index 6c1451912..4d34ca5d4 100644 --- a/spot/twaalgos/sccfilter.hh +++ b/spot/twaalgos/sccfilter.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2010, 2012, 2013, 2014, 2015, 2018 Laboratoire de +// Copyright (C) 2009, 2010, 2012, 2013, 2014, 2015, 2018, 2023 Laboratoire de // Recherche et Developpement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -51,20 +51,25 @@ namespace spot /// accepting SCC are accepting. /// /// If the input is inherently weak, the output will be a weak - /// automaton with state-based acceptance. The acceptance condition - /// is set to Büchi unless the input was co-Büchi or t (in which - /// case we keep this acceptance). + /// automaton with state-based acceptance. If the automaton had no + /// rejecting SCC, the acceptance condition is set to "t". + /// Otherwise, the acceptance condition is set to Büchi unless the + /// input was co-Büchi (in which case we keep this acceptance). /// - /// If \a given_sm is supplied, the function will use its result + /// If \a given_si is supplied, the function will use its result /// without computing a map of its own. /// + /// If \a keep_one_color is set, the output will keep at least color + /// if the input had colors. Normally scc_filter removes as many + /// colors as possible. + /// /// \warning Calling scc_filter on a TωA that is not inherently weak /// and has the SBA property (i.e., transitions leaving accepting /// states are all marked as accepting) may destroy this property. /// Use scc_filter_states() instead. SPOT_API twa_graph_ptr scc_filter(const const_twa_graph_ptr& aut, bool remove_all_useless = false, - scc_info* given_si = nullptr); + scc_info* given_si = nullptr, bool keep_one_color = false); /// \brief Prune unaccepting SCCs. /// diff --git a/tests/core/complement.test b/tests/core/complement.test index ebaafa1c2..d6d0eebd5 100755 --- a/tests/core/complement.test +++ b/tests/core/complement.test @@ -76,10 +76,10 @@ Acceptance: 1 Fin(0) properties: trans-labels explicit-labels state-acc complete properties: deterministic very-weak --BODY-- -State: 0 +State: 0 {0} [0] 2 [!0] 3 -State: 1 +State: 1 {0} [t] 0 State: 2 {0} [t] 2 diff --git a/tests/core/dstar.test b/tests/core/dstar.test index 80ad5ac37..c745ced4e 100755 --- a/tests/core/dstar.test +++ b/tests/core/dstar.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013-2016, 2018, 2020, 2022 Laboratoire de Recherche -# et Développement de l'Epita (LRDE). +# Copyright (C) 2013-2016, 2018, 2020, 2022, 2023 Laboratoire de +# Recherche et Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -298,7 +298,7 @@ digraph "aut.dsa" { I [label="", style=invis, width=0] I -> 0 0 [label="0"] - 0 -> 0 [label="1\n{0}"] + 0 -> 0 [label="1"] } EOF diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index f11e1dbd1..d1a7a9dee 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -736,7 +736,7 @@ there are 2 subformulas trying to create strategy directly for (b & (b | y)) -> y direct strategy might exist but was not found. translating formula done in X seconds -automaton has 2 states and 1 colors +automaton has 2 states and 0 colors LAR construction done in X seconds DPA has 2 states, 0 colors split inputs and outputs done in X seconds @@ -747,7 +747,7 @@ simplification took X seconds trying to create strategy directly for (a | x) -> x direct strategy might exist but was not found. translating formula done in X seconds -automaton has 2 states and 1 colors +automaton has 2 states and 0 colors LAR construction done in X seconds DPA has 2 states, 0 colors split inputs and outputs done in X seconds @@ -850,7 +850,7 @@ there are 3 subformulas trying to create strategy directly for a -> b direct strategy might exist but was not found. translating formula done in X seconds -automaton has 2 states and 1 colors +automaton has 2 states and 0 colors LAR construction done in X seconds DPA has 2 states, 0 colors split inputs and outputs done in X seconds @@ -861,7 +861,7 @@ simplification took X seconds trying to create strategy directly for a -> c direct strategy might exist but was not found. translating formula done in X seconds -automaton has 2 states and 1 colors +automaton has 2 states and 0 colors LAR construction done in X seconds DPA has 2 states, 0 colors split inputs and outputs done in X seconds @@ -872,7 +872,7 @@ simplification took X seconds trying to create strategy directly for a -> d direct strategy might exist but was not found. translating formula done in X seconds -automaton has 2 states and 1 colors +automaton has 2 states and 0 colors LAR construction done in X seconds DPA has 2 states, 0 colors split inputs and outputs done in X seconds @@ -934,7 +934,7 @@ ltlsynt -f 'G(c) & (G(a) <-> GFb)' --outs=b,c --decompose=yes --pol=no \ cat >exp <expected <, 5, 1 , 5, 1 , 4, 1 -, 4, 1 , 4, 1 , 6, 1 <(a & !b & (b | (!b M F!a))) | (!a & (b | (!b & (b W Ga)))), 3 states>, 5, 1 <(a & (a U !b)) | (!a & (!a R b)), 3 states>, 5, 1 -, 4, 1 -, 3, 1 +, 4, 1 +<((a & F!b) | (!a & Gb)) U (Fa & G!b), 3 states>, 6, 1 +, 4, 1 EOF diff output expected @@ -580,12 +580,12 @@ digraph "" { rankdir=LR node [shape="ellipse",width="0.5",height="0.5"] I [label="", style=invis, width=0] - 0 [label="6", peripheries=2] + 0 [label="6"] u0 [label="...", shape=none, width=0, height=0, tooltip="hidden successors"] - 1 [label="0", peripheries=2] - 2 [label="1", peripheries=2] - 3 [label="2", peripheries=2] - 4 [label="3", peripheries=2] + 1 [label="0"] + 2 [label="1"] + 3 [label="2"] + 4 [label="3"] } EOF @@ -806,8 +806,8 @@ HOA: v1 States: 3 Start: 1 AP: 2 "a" "b" -acc-name: Buchi -Acceptance: 1 Inf(0) +acc-name: all +Acceptance: 0 t properties: trans-labels explicit-labels state-acc deterministic properties: very-weak --BODY-- @@ -815,7 +815,7 @@ State: 0 [1] 2 State: 1 [0] 0 -State: 2 {0} +State: 2 [0] 2 --END-- EOF @@ -824,14 +824,15 @@ diff output4 expect4 diff output4b expect4 diff output4c expect4 -autfilt -Hv --small input4 >output5 test `autfilt --is-weak -c output4` = 1 +test `autfilt -B --small output4d | autfilt --is-terminal -c` = 0 test `autfilt --is-terminal -c output4` = 0 sed 's/\[0\]/[t]/g' expect4 > output4d -test `autfilt --is-terminal -c output4d` = 1 - +test `autfilt -B --small output4d | autfilt --is-terminal -c` = 1 +test `autfilt --is-terminal -c output4d` = 0 # FIXME: Issue #553 +autfilt -B -Hv --small input4 >output5 cat >expect5<\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "6\n", - "\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "I->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6->6\n", - "\n", - "\n", - "a & b & c\n", + "\n", + "\n", + "a & b & c\n", "\n", "\n", "\n", "0\n", - "\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "6->0\n", - "\n", - "\n", - "!a & !b & c\n", + "\n", + "\n", + "!a & !b & c\n", "\n", "\n", "\n", "1\n", - "\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "6->1\n", - "\n", - "\n", - "!a & b & c\n", + "\n", + "\n", + "!a & b & c\n", "\n", "\n", "\n", "2\n", - "\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "6->2\n", - "\n", - "\n", - "!a & b & !c\n", + "\n", + "\n", + "!a & b & !c\n", "\n", "\n", "\n", "3\n", - "\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "6->3\n", - "\n", - "\n", - "a & !b & !c\n", + "\n", + "\n", + "a & !b & !c\n", "\n", "\n", "\n", "4\n", - "\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "6->4\n", - "\n", - "\n", - "a & !b & c\n", + "\n", + "\n", + "a & !b & c\n", "\n", "\n", "\n", "5\n", - "\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "6->5\n", - "\n", - "\n", - "a & b & !c\n", + "\n", + "\n", + "a & b & !c\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "c\n", + "\n", + "\n", + "c\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!b & c\n", + "\n", + "\n", + "!b & c\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "b & c\n", + "\n", + "\n", + "b & c\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "b & !c\n", + "\n", + "\n", + "b & !c\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!a & c\n", + "\n", + "\n", + "!a & c\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "a & !c\n", + "\n", + "\n", + "a & !c\n", "\n", "\n", "\n", "4->4\n", - "\n", - "\n", - "a & c\n", + "\n", + "\n", + "a & c\n", "\n", "\n", "\n", "5->2\n", - "\n", - "\n", - "!a & b\n", + "\n", + "\n", + "!a & b\n", "\n", "\n", "\n", "5->3\n", - "\n", - "\n", - "a & !b\n", + "\n", + "\n", + "a & !b\n", "\n", "\n", "\n", "5->5\n", - "\n", - "\n", - "a & b\n", + "\n", + "\n", + "a & b\n", "\n", "\n", "\n", @@ -1131,199 +1125,193 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "6\n", - "\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "I->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6->6\n", - "\n", - "\n", - "a & b & c\n", + "\n", + "\n", + "a & b & c\n", "\n", "\n", "\n", "0\n", - "\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "6->0\n", - "\n", - "\n", - "!a & !b & c\n", + "\n", + "\n", + "!a & !b & c\n", "\n", "\n", "\n", "1\n", - "\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "6->1\n", - "\n", - "\n", - "!a & b & c\n", + "\n", + "\n", + "!a & b & c\n", "\n", "\n", "\n", "2\n", - "\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "6->2\n", - "\n", - "\n", - "!a & b & !c\n", + "\n", + "\n", + "!a & b & !c\n", "\n", "\n", "\n", "3\n", - "\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "6->3\n", - "\n", - "\n", - "a & !b & !c\n", + "\n", + "\n", + "a & !b & !c\n", "\n", "\n", "\n", "4\n", - "\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "6->4\n", - "\n", - "\n", - "a & !b & c\n", + "\n", + "\n", + "a & !b & c\n", "\n", "\n", "\n", "5\n", - "\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "6->5\n", - "\n", - "\n", - "a & b & !c\n", + "\n", + "\n", + "a & b & !c\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "c\n", + "\n", + "\n", + "c\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!b & c\n", + "\n", + "\n", + "!b & c\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "b & c\n", + "\n", + "\n", + "b & c\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "b & !c\n", + "\n", + "\n", + "b & !c\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!a & c\n", + "\n", + "\n", + "!a & c\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "a & !c\n", + "\n", + "\n", + "a & !c\n", "\n", "\n", "\n", "4->4\n", - "\n", - "\n", - "a & c\n", + "\n", + "\n", + "a & c\n", "\n", "\n", "\n", "5->2\n", - "\n", - "\n", - "!a & b\n", + "\n", + "\n", + "!a & b\n", "\n", "\n", "\n", "5->3\n", - "\n", - "\n", - "a & !b\n", + "\n", + "\n", + "a & !b\n", "\n", "\n", "\n", "5->5\n", - "\n", - "\n", - "a & b\n", + "\n", + "\n", + "a & b\n", "\n", "\n", "\n", @@ -1719,65 +1707,64 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "2\n", - "\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", @@ -1934,65 +1921,64 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "2\n", - "\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", @@ -2067,126 +2053,128 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "!a\n", - "\n", + "\n", + "\n", + "!a\n", + "\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "!a\n", - "\n", + "\n", + "\n", + "!a\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "!a\n", - "\n", + "\n", + "\n", + "!a\n", + "\n", "\n", "\n", "\n", "4->4\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", diff --git a/tests/python/automata-io.ipynb b/tests/python/automata-io.ipynb index 092fbbff2..6de56f9f5 100644 --- a/tests/python/automata-io.ipynb +++ b/tests/python/automata-io.ipynb @@ -121,60 +121,60 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "I->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "a & !b\n", + "\n", + "\n", + "a & !b\n", "\n", "\n", "\n", "0\n", - "\n", - "\n", - "0\n", + "\n", + "\n", + "0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f5770723f00> >" + " *' at 0x7fc24c50bf00> >" ] }, "execution_count": 3, @@ -250,60 +250,60 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "I->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "a & !b\n", + "\n", + "\n", + "a & !b\n", "\n", "\n", "\n", "0\n", - "\n", - "\n", - "0\n", + "\n", + "\n", + "0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f57706f5690> >" + " *' at 0x7fc24c50b780> >" ] }, "metadata": {}, @@ -315,60 +315,60 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "a & !b\n", + "\n", + "\n", + "a & !b\n", "\n", "\n", "\n", "1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f57706daa80> >" + " *' at 0x7fc24c50bb70> >" ] }, "metadata": {}, @@ -437,60 +437,60 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "I->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "a & !b\n", + "\n", + "\n", + "a & !b\n", "\n", "\n", "\n", "0\n", - "\n", - "\n", - "0\n", + "\n", + "\n", + "0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f57706f5840> >" + " *' at 0x7fc24c50be10> >" ] }, "metadata": {}, @@ -527,61 +527,61 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Hello world\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "I->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "a & !b\n", + "\n", + "\n", + "a & !b\n", "\n", "\n", "\n", "0\n", - "\n", - "\n", - "0\n", + "\n", + "\n", + "0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f57707760f0> >" + " *' at 0x7fc24c50b750> >" ] }, "metadata": {}, @@ -593,55 +593,55 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Hello world 2\n", - "\n", - "Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")\n", - "[gen. Büchi 2]\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 2]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "a & !b\n", - "\n", + "\n", + "\n", + "a & !b\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f5770685930> >" + " *' at 0x7fc24c50bde0> >" ] }, "metadata": {}, @@ -698,60 +698,60 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "a & !b\n", + "\n", + "\n", + "a & !b\n", "\n", "\n", "\n", "1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f57706f5720> >" + " *' at 0x7fc24c50b690> >" ] }, "metadata": {}, @@ -763,53 +763,53 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f57706f5900> >" + " *' at 0x7fc24c50bd20> >" ] }, "metadata": {}, @@ -821,51 +821,51 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", + "\n", "GFa\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f5770798e70> >" + " *' at 0x7fc24c50b690> >" ] }, "metadata": {}, @@ -877,64 +877,64 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", + "\n", "a & GFb\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!b\n", + "\n", + "\n", + "!b\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f57706f5720> >" + " *' at 0x7fc24c50bae0> >" ] }, "metadata": {}, @@ -964,60 +964,60 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "a & !b\n", + "\n", + "\n", + "a & !b\n", "\n", "\n", "\n", "1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f577072d060> >" + " *' at 0x7fc24c50b6c0> >" ] }, "execution_count": 10, @@ -1041,7 +1041,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -1055,7 +1055,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.5" + "version": "3.11.6" } }, "nbformat": 4, diff --git a/tests/python/dualize.py b/tests/python/dualize.py index b870e1e5e..b4e459a18 100755 --- a/tests/python/dualize.py +++ b/tests/python/dualize.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017-2019, 2021-2022 Laboratoire de Recherche et +# Copyright (C) 2017-2019, 2021-2023 Laboratoire de Recherche et # Développement de l'EPITA. # # This file is part of Spot, a model checking library. @@ -114,7 +114,7 @@ properties: deterministic stutter-invariant very-weak --BODY-- State: 0 {0} [t] 0 -State: 1 +State: 1 {0} [0] 0 [!0] 2 State: 2 diff --git a/tests/python/highlighting.ipynb b/tests/python/highlighting.ipynb index 6d9ac1ed8..a35f88334 100644 --- a/tests/python/highlighting.ipynb +++ b/tests/python/highlighting.ipynb @@ -54,11 +54,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -170,11 +170,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -247,7 +247,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc20027d80> >" + " *' at 0x7fb7ec3bb720> >" ] }, "execution_count": 4, @@ -282,11 +282,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -359,7 +359,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc2002e2a0> >" + " *' at 0x7fb7ec3bbb40> >" ] }, "execution_count": 5, @@ -392,11 +392,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -469,7 +469,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc20027d80> >" + " *' at 0x7fb7ec3bb720> >" ] }, "execution_count": 6, @@ -702,7 +702,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc2002e360> >" + " *' at 0x7fb7ec3bba50> >" ] }, "execution_count": 8, @@ -897,7 +897,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc2002e360> >" + " *' at 0x7fb7ec3bba50> >" ] }, "execution_count": 11, @@ -1235,7 +1235,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc2002edb0> >" + " *' at 0x7fb7ec3dc8d0> >" ] }, "metadata": {}, @@ -1496,7 +1496,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc20027de0> >" + " *' at 0x7fb7ef60adc0> >" ] }, "metadata": {}, @@ -1679,7 +1679,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc2002e570> >" + " *' at 0x7fb7ef60a7f0> >" ] }, "metadata": {}, @@ -1746,11 +1746,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1796,7 +1796,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc2002e480> >" + " *' at 0x7fb7ec3bbdb0> >" ] }, "metadata": {}, @@ -1851,7 +1851,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc2002e6c0> >" + " *' at 0x7fb7ef60b810> >" ] }, "metadata": {}, @@ -1945,7 +1945,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc2002ec60> >" + " *' at 0x7fb7ec3bbc60> >" ] }, "execution_count": 14, @@ -2074,7 +2074,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc2002ec60> >" + " *' at 0x7fb7ec3bbc60> >" ] }, "metadata": {}, @@ -2089,11 +2089,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -2139,7 +2139,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc2002e480> >" + " *' at 0x7fb7ec3bbdb0> >" ] }, "metadata": {}, @@ -2194,7 +2194,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc2002e6c0> >" + " *' at 0x7fb7ef60b810> >" ] }, "metadata": {}, @@ -2232,7 +2232,7 @@ " | a & b\t{0}\n", "Cycle:\n", " 1 * 4\n", - " | a\t{0,1}\n", + " | a\t{0}\n", "\n" ] }, @@ -2245,143 +2245,141 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")\n", - "[gen. Büchi 2]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0 * 3\n", + "\n", + "0 * 3\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1 * 2\n", + "\n", + "1 * 2\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "a & !b\n", + "\n", + "\n", + "a & !b\n", "\n", "\n", "\n", "2\n", - "\n", - "2 * 2\n", + "\n", + "2 * 2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!b\n", + "\n", + "\n", + "!b\n", "\n", "\n", "\n", "3\n", - "\n", - "1 * 1\n", + "\n", + "1 * 1\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n", "2->3\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "4\n", - "\n", - "2 * 1\n", + "\n", + "2 * 1\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "5\n", - "\n", - "1 * 0\n", + "\n", + "1 * 0\n", "\n", "\n", "\n", "3->5\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n", "4->5\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "6\n", - "\n", - "2 * 0\n", + "\n", + "2 * 0\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "7\n", - "\n", - "1 * 4\n", + "\n", + "1 * 4\n", "\n", "\n", "\n", "5->7\n", - "\n", - "\n", - "a & b\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", "\n", "\n", "\n", "6->7\n", - "\n", - "\n", - "a & b\n", + "\n", + "\n", + "a & b\n", "\n", "\n", "\n", @@ -2392,40 +2390,37 @@ "\n", "\n", "6->8\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "7->7\n", - "\n", - "\n", - "a\n", - "\n", - "\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n", "8->7\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "8->8\n", "\n", "\n", - "1\n", - "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7efc20044780> >" + " *' at 0x7fb7ef60a7f0> >" ] }, "metadata": {}, @@ -2440,11 +2435,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -2510,7 +2505,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc200449c0> >" + " *' at 0x7fb7ef60af10> >" ] }, "metadata": {}, @@ -2525,89 +2520,89 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "I->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "4\n", - "\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "4->4\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7efc200448d0> >" + " *' at 0x7fb7ef60b840> >" ] }, "metadata": {}, @@ -2776,7 +2771,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc20044f30> >" + " *' at 0x7fb7ec3dd860> >" ] }, "execution_count": 19, @@ -2944,7 +2939,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc20044f30> >" + " *' at 0x7fb7ec3dd860> >" ] }, "execution_count": 20, @@ -3107,7 +3102,7 @@ "\n" ], "text/plain": [ - " *' at 0x7efc20044f30> >" + " *' at 0x7fb7ec3dd860> >" ] }, "metadata": {}, @@ -3601,7 +3596,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -3615,7 +3610,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.1+" + "version": "3.11.6" } }, "nbformat": 4, diff --git a/tests/python/intrun.py b/tests/python/intrun.py index 02a7aedd6..1494d4665 100644 --- a/tests/python/intrun.py +++ b/tests/python/intrun.py @@ -52,27 +52,27 @@ tc.assertEqual(str(r), """Prefix: 1 | a 0 - | 1 {0} + | 1 0 - | a {0} + | a Cycle: 0 - | 1 {0} + | 1 """) tc.assertEqual(r.as_twa().to_str(), """HOA: v1 States: 4 Start: 0 AP: 1 "a" -acc-name: Buchi -Acceptance: 1 Inf(0) +acc-name: all +Acceptance: 0 t properties: trans-labels explicit-labels state-acc deterministic --BODY-- State: 0 [0] 1 -State: 1 {0} +State: 1 [t] 2 -State: 2 {0} +State: 2 [0] 3 -State: 3 {0} +State: 3 [t] 3 --END--""") diff --git a/tests/python/setacc.py b/tests/python/setacc.py index 7246bf5cc..c61b3262a 100644 --- a/tests/python/setacc.py +++ b/tests/python/setacc.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016, 2018, 2021, 2022 Laboratoire de Recherche et +# Copyright (C) 2016, 2018, 2021, 2022, 2023 Laboratoire de Recherche et # Développement de l'EPITA. # # This file is part of Spot, a model checking library. @@ -104,10 +104,10 @@ except RuntimeError as e: from gc import collect acc = spot.translate('a').acc() collect() -tc.assertEqual(acc, spot.acc_cond('Inf(0)')) +tc.assertEqual(acc, spot.acc_cond('t')) acc = spot.translate('b').get_acceptance() collect() -tc.assertEqual(acc, spot.acc_code('Inf(0)')) +tc.assertEqual(acc, spot.acc_code('t')) c = spot.acc_cond('Fin(0)&Fin(1)&(Inf(2)|Fin(3))') diff --git a/tests/python/simstate.py b/tests/python/simstate.py index 4874fd478..6a52124f9 100644 --- a/tests/python/simstate.py +++ b/tests/python/simstate.py @@ -538,7 +538,8 @@ tc.assertEqual(spot.reduce_iterated_sba(aut).to_str(), '''HOA: v1 States: 1 Start: 0 AP: 1 "a" -Acceptance: 1 t +acc-name: all +Acceptance: 0 t properties: trans-labels explicit-labels state-acc deterministic properties: very-weak --BODY-- diff --git a/tests/python/stutter-inv.ipynb b/tests/python/stutter-inv.ipynb index 4b02a9c0f..627a6a826 100644 --- a/tests/python/stutter-inv.ipynb +++ b/tests/python/stutter-inv.ipynb @@ -12,7 +12,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -24,7 +23,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -70,7 +68,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -98,7 +95,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -106,7 +102,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -137,7 +132,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -145,7 +139,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -171,7 +164,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -196,7 +188,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -221,7 +212,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -249,7 +239,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -313,7 +302,7 @@ "\n" ], "text/plain": [ - " *' at 0x7faad816be40> >" + " *' at 0x7ff008667960> >" ] }, "metadata": {}, @@ -342,7 +331,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -350,7 +338,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -407,7 +394,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -415,7 +401,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -425,7 +410,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -619,7 +603,7 @@ "\n" ], "text/plain": [ - " *' at 0x7faad816be70> >" + " *' at 0x7ff0086679f0> >" ] }, "metadata": {}, @@ -632,7 +616,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -662,7 +645,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -830,7 +812,7 @@ "\n" ], "text/plain": [ - " *' at 0x7faad816be70> >" + " *' at 0x7ff0086679f0> >" ] }, "metadata": {}, @@ -843,7 +825,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -851,7 +832,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -859,7 +839,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -899,7 +878,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -986,7 +964,7 @@ "\n" ], "text/plain": [ - " *' at 0x7faad8057690> >" + " *' at 0x7ff008667cc0> >" ] }, "metadata": {}, @@ -1084,7 +1062,7 @@ "\n" ], "text/plain": [ - " *' at 0x7faad8253210> >" + " *' at 0x7ff008667cf0> >" ] }, "metadata": {}, @@ -1104,7 +1082,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1295,7 +1272,7 @@ "\n" ], "text/plain": [ - " *' at 0x7faad8057ea0> >" + " *' at 0x7ff0086677b0> >" ] }, "metadata": {}, @@ -1314,7 +1291,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1425,7 +1401,7 @@ "\n" ], "text/plain": [ - " *' at 0x7faad8247a20> >" + " *' at 0x7ff0086677e0> >" ] }, "metadata": {}, @@ -1439,7 +1415,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1447,7 +1422,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1478,87 +1452,83 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "I->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "!b\n", + "\n", + "\n", + "!b\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", - "\n", + "\n", + "1\n", "\n", "\n", "\n", "3->1\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", - "\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7faad8057450> >" + " *' at 0x7ff008667ed0> >" ] }, "metadata": {}, @@ -1573,7 +1543,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1605,7 +1574,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1820,7 +1788,7 @@ "\n" ], "text/plain": [ - " *' at 0x7faad816b390> >" + " *' at 0x7ff0086676c0> >" ] }, "metadata": {}, @@ -1845,7 +1813,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1874,7 +1841,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2136,7 +2102,7 @@ "\n" ], "text/plain": [ - " *' at 0x7faad816b390> >" + " *' at 0x7ff0086676c0> >" ] }, "metadata": {}, @@ -2158,7 +2124,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2186,7 +2151,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2309,7 +2273,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2337,7 +2300,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2367,7 +2329,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -2381,7 +2343,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.6rc1" + "version": "3.11.6" } }, "nbformat": 4, diff --git a/tests/python/zlktree.py b/tests/python/zlktree.py index c3cb262f2..c86c4a8b0 100644 --- a/tests/python/zlktree.py +++ b/tests/python/zlktree.py @@ -27,6 +27,7 @@ trans-acc --BODY-- State: 0 [!0&!1] 3 [!0&!1] 4 State: 1 [!0&!1] 4 {3} [0&!1] 0 {2} [!0&1] 1 {2} State: 2 [!0&1] 0 {0 2} [!0&!1] 1 State: 3 [!0&1] 2 State: 4 [0&!1] 3 --END--""") b = spot.zielonka_tree_transform(a) +spot.is_weak_automaton(b) tc.assertTrue(spot.are_equivalent(a, b)) tc.assertTrue(b.acc().is_buchi()) From f0928f2b5249aa63a317591e94e0d42d1fb8dd50 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 17 Nov 2023 13:48:17 +0100 Subject: [PATCH 366/606] translate: fix relabel-overlap setting * spot/twaalgos/translate.cc: Adjust the default relabel-overlap value to match the doc. --- spot/twaalgos/translate.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spot/twaalgos/translate.cc b/spot/twaalgos/translate.cc index 7ad57347f..1db1f1aa4 100644 --- a/spot/twaalgos/translate.cc +++ b/spot/twaalgos/translate.cc @@ -48,7 +48,7 @@ namespace spot return; relabel_bool_ = opt->get("relabel-bool", 4); - relabel_overlap_ = opt->get("relabel-overlap", 6); + relabel_overlap_ = opt->get("relabel-overlap", 8); comp_susp_ = opt->get("comp-susp", 0); if (comp_susp_ == 1) { From 313e43c84bc2b238b54dd4bd5fed8d773fd6ed9e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 17 Nov 2023 14:04:22 +0100 Subject: [PATCH 367/606] translate: fix #551 Reported by Yann Thierry-Mieg. * spot/twaalgos/translate.cc: Run scc_filter if relabel_here reduced the number of edges, because maybe we have more to remove. * tests/core/ltl2tgba2.test: Add test case. --- spot/twaalgos/translate.cc | 7 ++++++- tests/core/ltl2tgba2.test | 13 +++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/spot/twaalgos/translate.cc b/spot/twaalgos/translate.cc index 1db1f1aa4..9bc6690d6 100644 --- a/spot/twaalgos/translate.cc +++ b/spot/twaalgos/translate.cc @@ -576,7 +576,12 @@ namespace spot auto aut = run_aux(r); if (!m.empty()) - relabel_here(aut, &m); + { + unsigned ne = aut->num_edges(); + relabel_here(aut, &m); + if (aut->num_edges() < ne) + return finalize(do_scc_filter(aut)); + } return aut; } diff --git a/tests/core/ltl2tgba2.test b/tests/core/ltl2tgba2.test index 6e64e7081..370e744f9 100755 --- a/tests/core/ltl2tgba2.test +++ b/tests/core/ltl2tgba2.test @@ -527,3 +527,16 @@ v29)))& (G F (v11 -> (v46 & v29)))& (G F (v13 -> (v47 & v29)))& (v11 | (v13 | (v15 | (v17 | v19))))))))))))' ltl2tgba -p'min even' -D -C "$f" --stats='%s %e'>out test '22 288' = "`cat out`" + +# Make sure relabel-overlap still reduce empty automata. Issue #551. +f='(G(!(G(p16) U G(p5))))&&((p5&&!p3&&p4&&!p2)) && +((p16&&!p14&&p5&&p15)) && (p5) && (p16) && (X(p5)) && +(X(!(p2&&!p3&&p4&&!p5))) && (X(!(p3&&p4&&!p5))) && (X(X(!(p3&&!p5)))) +&& (X(X(!(p2&&!p3&&!p5)))) && (X(X(p5))) && (X((!p14&&p5))) && +(X(X((!p14&&p5)))) && (F(G(p5))) && (F(G(!p16))) && (G((!p2||p3||p5))) && +(G((!p3||p5))) && (G((!p2||p3||!p4||p5))) && (G((!p3||!p4||p5)))' + +s8=`ltl2tgba -x relabel-overlap=8 -f "$f" --stats=%g,%s,%e` +test "$s8" = "t,1,0" +s7=`ltl2tgba -x relabel-overlap=7 -f "$f" --stats=%g,%s,%e` +test "$s7" = "t,1,0" From 205df0139046d71a17cc2982eb4b67864db5dfbb Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 17 Nov 2023 22:07:40 +0100 Subject: [PATCH 368/606] never iterate on the edge_vector() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes #552, reported by Rüdiger and Ayrat. * tests/sanity/style.test: Warn aginst iterations on edge_vector. * spot/parseaut/parseaut.yy, spot/twaalgos/complete.cc, spot/twaalgos/parity.cc: Iterate over edges(), not edge_vector(). * tests/core/ltlcross.test: Add a test case for #552. * NEWS: Mention the bug. --- NEWS | 10 ++++++++++ spot/parseaut/parseaut.yy | 4 ++-- spot/twaalgos/complete.cc | 2 +- spot/twaalgos/parity.cc | 2 +- tests/core/ltlcross.test | 11 ++++++++--- tests/sanity/style.test | 5 +++++ 6 files changed, 27 insertions(+), 7 deletions(-) diff --git a/NEWS b/NEWS index d89e92050..7ea126243 100644 --- a/NEWS +++ b/NEWS @@ -156,6 +156,16 @@ New in spot 2.11.6.dev (not yet released) could fail to equate two "t" condition, because we have two ways to represent "t": the empty condition, or the empty "Inf({})". + - Functions complement() and change_parity() could incorrectly read + or write the unused edge #0. In the case of complement(), writing + that edge was usually harmless. However in some scenario, + complement could need to stick a ⓪ acceptance mark on edge #0, + then the acceptance condition could be simplified to "t", and + finally change_parity could be confused to find such an accepting + mark in an automaton that declares no colors, and perform some + computation on that color that caused it to crash with a "Too many + acceptance sets used" message. (issue #552) + New in spot 2.11.6 (2023-08-01) Bug fixes: diff --git a/spot/parseaut/parseaut.yy b/spot/parseaut/parseaut.yy index 8de9d9fab..328977f2b 100644 --- a/spot/parseaut/parseaut.yy +++ b/spot/parseaut/parseaut.yy @@ -2556,7 +2556,7 @@ static void fix_acceptance(result_& r) auto onlyneg = r.neg_acc_sets - r.pos_acc_sets; if (onlyneg) { - for (auto& t: r.h->aut->edge_vector()) + for (auto& t: r.h->aut->edges()) t.acc ^= onlyneg; } @@ -2570,7 +2570,7 @@ static void fix_acceptance(result_& r) if (both) { base = acc.add_sets(both.count()); - for (auto& t: r.h->aut->edge_vector()) + for (auto& t: r.h->aut->edges()) { unsigned i = 0; if ((t.acc & both) != both) diff --git a/spot/twaalgos/complete.cc b/spot/twaalgos/complete.cc index 803b3f440..b6ace400e 100644 --- a/spot/twaalgos/complete.cc +++ b/spot/twaalgos/complete.cc @@ -144,7 +144,7 @@ namespace spot if (need_acc_fix) { auto a = aut->set_buchi(); - for (auto& t: aut->edge_vector()) + for (auto& t: aut->edges()) t.acc = a; if (aut->num_edges()) acc = a; diff --git a/spot/twaalgos/parity.cc b/spot/twaalgos/parity.cc index 98ac010fe..4da129bfb 100644 --- a/spot/twaalgos/parity.cc +++ b/spot/twaalgos/parity.cc @@ -50,7 +50,7 @@ namespace spot change_acc(twa_graph_ptr& aut, unsigned num_sets, bool change_kind, bool change_style, bool output_max, bool input_max) { - for (auto& e: aut->edge_vector()) + for (auto& e: aut->edges()) if (e.acc) { unsigned msb = (input_max ? e.acc.max_set() : e.acc.min_set()) - 1; diff --git a/tests/core/ltlcross.test b/tests/core/ltlcross.test index 381cf7a66..885269685 100755 --- a/tests/core/ltlcross.test +++ b/tests/core/ltlcross.test @@ -69,7 +69,12 @@ ltlcross --verbose ltl2tgba ltl2tgba \ # Issue #524. ltlcross ltl2tgba -f '!(X(v3 | G!v5) | ((Xv5 & !(v5 & !X!v3)) U !v5))' -# Issue #546. -ltlcross 'ltl2tgba --medium -p' 'ltl2tgba -p' 'ltl2tgba --medium -D -p' \ +# Issue #546, Issue #552. +ltlcross 'ltl2tgba --medium -p' \ + 'ltl2tgba -p' \ + 'ltl2tgba --medium -D -p' \ + 'ltl2tgba --medium --colored-parity="min even" -C -D' \ + 'ltl2tgba --colored-parity="min even" -C -D' \ -f 'a | FGa | GF(!b | Gb)' \ - -f '(~ v1 U ~ v5) | G(F v9 & F G v1) | G F(~ v7 | G v7 | G v3)' + -f '(~ v1 U ~ v5) | G(F v9 & F G v1) | G F(~ v7 | G v7 | G v3)' \ + -f 'FG((a | Fb | FG!b) & !G(c & d))' diff --git a/tests/sanity/style.test b/tests/sanity/style.test index 064079f37..372bbeba2 100755 --- a/tests/sanity/style.test +++ b/tests/sanity/style.test @@ -298,6 +298,11 @@ for dir in "$TOP/spot" "$TOP/bin" "$TOP/tests"; do $GREP 'catch *([^.]' $tmp | $GREP -v 'const.*&' && diag 'Always capture exceptions by const reference.' + # iterating over edge_vector() is suspicious, because the + # first edge should not be used. + $GREP 'for (.*:.*edge_vector()' $tmp && + diag 'Did you mean to iterate over edges()?' + case $file in *.hh | *.hxx) if $GREP -E '(<<|>>)' $tmp >/dev/null; then From 3be62a907c53f41519f5867ea6a9ecff890cacbf Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 17 Nov 2023 23:06:09 +0100 Subject: [PATCH 369/606] parity: don't change_parity to the same acceptance Follow up to #552. * spot/twaalgos/parity.cc (change_parity): Do not work if the output is supposed to have the same acceptance as the input and 0 or 1 colors. --- spot/twaalgos/parity.cc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/spot/twaalgos/parity.cc b/spot/twaalgos/parity.cc index 4da129bfb..46eed1ff9 100644 --- a/spot/twaalgos/parity.cc +++ b/spot/twaalgos/parity.cc @@ -150,8 +150,20 @@ namespace spot { auto new_acc = acc_cond::acc_code::parity(output_max, output_odd, num_sets); + // Even if change_kind || change_style, it may be + // the case that the new acceptance is the same as the old. + // For instance "t" can be both "min even" or "max odd". + if (num_sets <= 1 && new_acc == aut->get_acceptance()) + return aut; aut->set_acceptance(num_sets, new_acc); } + else + { + if (num_sets <= 1) + return aut; // Nothing to do + } + // even if the acceptance is not changed, this code will remove + // superfluous colors in case a transition has more than one. change_acc(aut, old_num_sets, change_kind, change_style, output_max, current_max); return aut; From 63362d535fd9c6d7bf839ad9d18831401e56413e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sat, 18 Nov 2023 21:17:05 +0100 Subject: [PATCH 370/606] Upgrade the Copyright strings to point to AUTHORS and drop years Fixes #539. * AUTHORS: Update by indicating the status of each contributor. * Makefile.am, bench/Makefile.am, bench/dtgbasat/Makefile.am, bench/dtgbasat/gen.py, bench/emptchk/Makefile.am, bench/emptchk/defs.in, bench/ltl2tgba/Makefile.am, bench/ltl2tgba/defs.in, bench/ltl2tgba/sum.py, bench/ltlclasses/Makefile.am, bench/ltlcounter/Makefile.am, bench/spin13/Makefile.am, bench/stutter/Makefile.am, bench/stutter/stutter_invariance_formulas.cc, bench/stutter/stutter_invariance_randomgraph.cc, bench/wdba/Makefile.am, bin/Makefile.am, bin/autcross.cc, bin/autfilt.cc, bin/common_aoutput.cc, bin/common_aoutput.hh, bin/common_color.cc, bin/common_color.hh, bin/common_conv.cc, bin/common_conv.hh, bin/common_cout.cc, bin/common_cout.hh, bin/common_file.cc, bin/common_file.hh, bin/common_finput.cc, bin/common_finput.hh, bin/common_hoaread.cc, bin/common_hoaread.hh, bin/common_output.cc, bin/common_output.hh, bin/common_post.cc, bin/common_post.hh, bin/common_r.cc, bin/common_r.hh, bin/common_range.cc, bin/common_range.hh, bin/common_setup.cc, bin/common_setup.hh, bin/common_sys.hh, bin/common_trans.cc, bin/common_trans.hh, bin/dstar2tgba.cc, bin/genaut.cc, bin/genltl.cc, bin/ltl2tgba.cc, bin/ltl2tgta.cc, bin/ltlcross.cc, bin/ltldo.cc, bin/ltlfilt.cc, bin/ltlgrind.cc, bin/ltlsynt.cc, bin/man/Makefile.am, bin/options.py, bin/randaut.cc, bin/randltl.cc, bin/spot-x.cc, bin/spot.cc, configure.ac, debian/copyright, doc/Makefile.am, doc/tl/Makefile.am, elisp/Makefile.am, python/Makefile.am, python/buddy.i, python/spot/__init__.py, python/spot/aux_.py, python/spot/gen.i, python/spot/impl.i, python/spot/jupyter.py, python/spot/ltsmin.i, spot/Makefile.am, spot/gen/Makefile.am, spot/gen/automata.cc, spot/gen/automata.hh, spot/gen/formulas.cc, spot/gen/formulas.hh, spot/graph/Makefile.am, spot/graph/graph.hh, spot/graph/ngraph.hh, spot/kripke/Makefile.am, spot/kripke/fairkripke.cc, spot/kripke/fairkripke.hh, spot/kripke/fwd.hh, spot/kripke/kripke.cc, spot/kripke/kripke.hh, spot/kripke/kripkegraph.hh, spot/ltsmin/Makefile.am, spot/ltsmin/ltsmin.cc, spot/ltsmin/ltsmin.hh, spot/ltsmin/spins_interface.cc, spot/ltsmin/spins_interface.hh, spot/ltsmin/spins_kripke.hh, spot/ltsmin/spins_kripke.hxx, spot/mc/Makefile.am, spot/mc/bloemen.hh, spot/mc/bloemen_ec.hh, spot/mc/cndfs.hh, spot/mc/deadlock.hh, spot/mc/intersect.hh, spot/mc/lpar13.hh, spot/mc/mc.hh, spot/mc/mc_instanciator.hh, spot/mc/unionfind.cc, spot/mc/unionfind.hh, spot/mc/utils.hh, spot/misc/Makefile.am, spot/misc/bareword.cc, spot/misc/bareword.hh, spot/misc/bddlt.hh, spot/misc/bitset.cc, spot/misc/bitset.hh, spot/misc/bitvect.cc, spot/misc/bitvect.hh, spot/misc/casts.hh, spot/misc/clz.hh, spot/misc/common.hh, spot/misc/escape.cc, spot/misc/escape.hh, spot/misc/fixpool.hh, spot/misc/formater.cc, spot/misc/formater.hh, spot/misc/hash.hh, spot/misc/hashfunc.hh, spot/misc/intvcmp2.cc, spot/misc/intvcmp2.hh, spot/misc/intvcomp.cc, spot/misc/intvcomp.hh, spot/misc/ltstr.hh, spot/misc/memusage.cc, spot/misc/memusage.hh, spot/misc/minato.cc, spot/misc/minato.hh, spot/misc/mspool.hh, spot/misc/optionmap.cc, spot/misc/optionmap.hh, spot/misc/random.cc, spot/misc/random.hh, spot/misc/satsolver.cc, spot/misc/satsolver.hh, spot/misc/timer.cc, spot/misc/timer.hh, spot/misc/tmpfile.cc, spot/misc/tmpfile.hh, spot/misc/trival.hh, spot/misc/version.cc, spot/misc/version.hh, spot/parseaut/Makefile.am, spot/parseaut/fmterror.cc, spot/parseaut/parseaut.yy, spot/parseaut/parsedecl.hh, spot/parseaut/public.hh, spot/parseaut/scanaut.ll, spot/parsetl/Makefile.am, spot/parsetl/fmterror.cc, spot/parsetl/parsedecl.hh, spot/parsetl/parsetl.yy, spot/parsetl/scantl.ll, spot/priv/Makefile.am, spot/priv/accmap.hh, spot/priv/bddalloc.cc, spot/priv/bddalloc.hh, spot/priv/freelist.cc, spot/priv/freelist.hh, spot/priv/partitioned_relabel.cc, spot/priv/partitioned_relabel.hh, spot/priv/satcommon.cc, spot/priv/satcommon.hh, spot/priv/trim.cc, spot/priv/trim.hh, spot/priv/weight.cc, spot/priv/weight.hh, spot/ta/Makefile.am, spot/ta/ta.cc, spot/ta/ta.hh, spot/ta/taexplicit.cc, spot/ta/taexplicit.hh, spot/ta/taproduct.cc, spot/ta/taproduct.hh, spot/ta/tgta.hh, spot/ta/tgtaexplicit.cc, spot/ta/tgtaexplicit.hh, spot/ta/tgtaproduct.cc, spot/ta/tgtaproduct.hh, spot/taalgos/Makefile.am, spot/taalgos/dot.cc, spot/taalgos/dot.hh, spot/taalgos/emptinessta.cc, spot/taalgos/emptinessta.hh, spot/taalgos/minimize.cc, spot/taalgos/minimize.hh, spot/taalgos/reachiter.cc, spot/taalgos/reachiter.hh, spot/taalgos/statessetbuilder.cc, spot/taalgos/statessetbuilder.hh, spot/taalgos/stats.cc, spot/taalgos/stats.hh, spot/taalgos/tgba2ta.cc, spot/taalgos/tgba2ta.hh, spot/tl/Makefile.am, spot/tl/apcollect.cc, spot/tl/apcollect.hh, spot/tl/contain.cc, spot/tl/contain.hh, spot/tl/declenv.cc, spot/tl/declenv.hh, spot/tl/defaultenv.cc, spot/tl/defaultenv.hh, spot/tl/dot.cc, spot/tl/dot.hh, spot/tl/environment.hh, spot/tl/exclusive.cc, spot/tl/exclusive.hh, spot/tl/formula.cc, spot/tl/formula.hh, spot/tl/hierarchy.cc, spot/tl/hierarchy.hh, spot/tl/length.cc, spot/tl/length.hh, spot/tl/ltlf.cc, spot/tl/ltlf.hh, spot/tl/mark.cc, spot/tl/mark.hh, spot/tl/mutation.cc, spot/tl/mutation.hh, spot/tl/nenoform.cc, spot/tl/nenoform.hh, spot/tl/parse.hh, spot/tl/print.cc, spot/tl/print.hh, spot/tl/randomltl.cc, spot/tl/randomltl.hh, spot/tl/relabel.cc, spot/tl/relabel.hh, spot/tl/remove_x.cc, spot/tl/remove_x.hh, spot/tl/simplify.cc, spot/tl/simplify.hh, spot/tl/snf.cc, spot/tl/snf.hh, spot/tl/sonf.cc, spot/tl/sonf.hh, spot/tl/unabbrev.cc, spot/tl/unabbrev.hh, spot/twa/Makefile.am, spot/twa/acc.cc, spot/twa/acc.hh, spot/twa/bdddict.cc, spot/twa/bdddict.hh, spot/twa/bddprint.cc, spot/twa/bddprint.hh, spot/twa/formula2bdd.cc, spot/twa/formula2bdd.hh, spot/twa/fwd.hh, spot/twa/taatgba.cc, spot/twa/taatgba.hh, spot/twa/twa.cc, spot/twa/twa.hh, spot/twa/twagraph.cc, spot/twa/twagraph.hh, spot/twa/twaproduct.cc, spot/twa/twaproduct.hh, spot/twaalgos/Makefile.am, spot/twaalgos/aiger.cc, spot/twaalgos/aiger.hh, spot/twaalgos/alternation.cc, spot/twaalgos/alternation.hh, spot/twaalgos/are_isomorphic.cc, spot/twaalgos/are_isomorphic.hh, spot/twaalgos/bfssteps.cc, spot/twaalgos/bfssteps.hh, spot/twaalgos/canonicalize.cc, spot/twaalgos/canonicalize.hh, spot/twaalgos/cleanacc.cc, spot/twaalgos/cleanacc.hh, spot/twaalgos/cobuchi.cc, spot/twaalgos/cobuchi.hh, spot/twaalgos/complement.cc, spot/twaalgos/complement.hh, spot/twaalgos/complete.cc, spot/twaalgos/complete.hh, spot/twaalgos/compsusp.cc, spot/twaalgos/compsusp.hh, spot/twaalgos/contains.cc, spot/twaalgos/contains.hh, spot/twaalgos/copy.hh, spot/twaalgos/couvreurnew.cc, spot/twaalgos/couvreurnew.hh, spot/twaalgos/cycles.cc, spot/twaalgos/cycles.hh, spot/twaalgos/dbranch.cc, spot/twaalgos/dbranch.hh, spot/twaalgos/degen.cc, spot/twaalgos/degen.hh, spot/twaalgos/determinize.cc, spot/twaalgos/determinize.hh, spot/twaalgos/dot.cc, spot/twaalgos/dot.hh, spot/twaalgos/dtbasat.cc, spot/twaalgos/dtbasat.hh, spot/twaalgos/dtwasat.cc, spot/twaalgos/dtwasat.hh, spot/twaalgos/dualize.cc, spot/twaalgos/dualize.hh, spot/twaalgos/emptiness.cc, spot/twaalgos/emptiness.hh, spot/twaalgos/emptiness_stats.hh, spot/twaalgos/forq_contains.cc, spot/twaalgos/forq_contains.hh, spot/twaalgos/game.cc, spot/twaalgos/game.hh, spot/twaalgos/genem.cc, spot/twaalgos/genem.hh, spot/twaalgos/gfguarantee.cc, spot/twaalgos/gfguarantee.hh, spot/twaalgos/gtec/Makefile.am, spot/twaalgos/gtec/ce.cc, spot/twaalgos/gtec/ce.hh, spot/twaalgos/gtec/gtec.cc, spot/twaalgos/gtec/gtec.hh, spot/twaalgos/gtec/sccstack.cc, spot/twaalgos/gtec/sccstack.hh, spot/twaalgos/gtec/status.cc, spot/twaalgos/gtec/status.hh, spot/twaalgos/gv04.cc, spot/twaalgos/gv04.hh, spot/twaalgos/hoa.cc, spot/twaalgos/hoa.hh, spot/twaalgos/iscolored.cc, spot/twaalgos/iscolored.hh, spot/twaalgos/isdet.cc, spot/twaalgos/isdet.hh, spot/twaalgos/isunamb.cc, spot/twaalgos/isunamb.hh, spot/twaalgos/isweakscc.cc, spot/twaalgos/isweakscc.hh, spot/twaalgos/langmap.cc, spot/twaalgos/langmap.hh, spot/twaalgos/lbtt.cc, spot/twaalgos/lbtt.hh, spot/twaalgos/ltl2taa.cc, spot/twaalgos/ltl2taa.hh, spot/twaalgos/ltl2tgba_fm.cc, spot/twaalgos/ltl2tgba_fm.hh, spot/twaalgos/magic.cc, spot/twaalgos/magic.hh, spot/twaalgos/mask.cc, spot/twaalgos/mask.hh, spot/twaalgos/mealy_machine.cc, spot/twaalgos/mealy_machine.hh, spot/twaalgos/minimize.cc, spot/twaalgos/minimize.hh, spot/twaalgos/ndfs_result.hxx, spot/twaalgos/neverclaim.cc, spot/twaalgos/neverclaim.hh, spot/twaalgos/parity.cc, spot/twaalgos/parity.hh, spot/twaalgos/postproc.cc, spot/twaalgos/postproc.hh, spot/twaalgos/powerset.cc, spot/twaalgos/powerset.hh, spot/twaalgos/product.cc, spot/twaalgos/product.hh, spot/twaalgos/randomgraph.cc, spot/twaalgos/randomgraph.hh, spot/twaalgos/randomize.cc, spot/twaalgos/randomize.hh, spot/twaalgos/reachiter.cc, spot/twaalgos/reachiter.hh, spot/twaalgos/relabel.cc, spot/twaalgos/relabel.hh, spot/twaalgos/remfin.cc, spot/twaalgos/remfin.hh, spot/twaalgos/remprop.cc, spot/twaalgos/remprop.hh, spot/twaalgos/sbacc.cc, spot/twaalgos/sbacc.hh, spot/twaalgos/sccfilter.cc, spot/twaalgos/sccfilter.hh, spot/twaalgos/sccinfo.cc, spot/twaalgos/sccinfo.hh, spot/twaalgos/se05.cc, spot/twaalgos/se05.hh, spot/twaalgos/sepsets.cc, spot/twaalgos/sepsets.hh, spot/twaalgos/simulation.cc, spot/twaalgos/simulation.hh, spot/twaalgos/split.cc, spot/twaalgos/split.hh, spot/twaalgos/stats.cc, spot/twaalgos/stats.hh, spot/twaalgos/strength.cc, spot/twaalgos/strength.hh, spot/twaalgos/stripacc.cc, spot/twaalgos/stripacc.hh, spot/twaalgos/stutter.cc, spot/twaalgos/stutter.hh, spot/twaalgos/sum.cc, spot/twaalgos/sum.hh, spot/twaalgos/synthesis.cc, spot/twaalgos/synthesis.hh, spot/twaalgos/tau03.cc, spot/twaalgos/tau03.hh, spot/twaalgos/tau03opt.cc, spot/twaalgos/tau03opt.hh, spot/twaalgos/toparity.cc, spot/twaalgos/toparity.hh, spot/twaalgos/totgba.cc, spot/twaalgos/totgba.hh, spot/twaalgos/toweak.cc, spot/twaalgos/toweak.hh, spot/twaalgos/translate.cc, spot/twaalgos/translate.hh, spot/twaalgos/word.cc, spot/twaalgos/word.hh, spot/twaalgos/zlktree.cc, spot/twaalgos/zlktree.hh, spot/twacube/Makefile.am, spot/twacube/cube.cc, spot/twacube/cube.hh, spot/twacube/fwd.hh, spot/twacube/twacube.cc, spot/twacube/twacube.hh, spot/twacube_algos/Makefile.am, spot/twacube_algos/convert.cc, spot/twacube_algos/convert.hh, tests/Makefile.am, tests/core/385.test, tests/core/500.test, tests/core/521.test, tests/core/522.test, tests/core/acc.cc, tests/core/acc.test, tests/core/acc2.test, tests/core/acc_word.test, tests/core/accsimpl.test, tests/core/alternating.test, tests/core/autcross.test, tests/core/autcross2.test, tests/core/autcross3.test, tests/core/autcross4.test, tests/core/autcross5.test, tests/core/babiak.test, tests/core/bare.test, tests/core/basimul.test, tests/core/bdd.test, tests/core/bdddict.cc, tests/core/bdddict.test, tests/core/bitvect.cc, tests/core/bitvect.test, tests/core/bricks.cc, tests/core/bricks.test, tests/core/checkpsl.cc, tests/core/checkta.cc, tests/core/complement.test, tests/core/complementation.test, tests/core/complete.test, tests/core/consterm.cc, tests/core/consterm.test, tests/core/cube.cc, tests/core/cube.test, tests/core/cycles.test, tests/core/dbacomp.test, tests/core/dca.test, tests/core/dca2.test, tests/core/defs.in, tests/core/degendet.test, tests/core/degenid.test, tests/core/degenlskip.test, tests/core/degenscc.test, tests/core/det.test, tests/core/dfs.test, tests/core/dnfstreett.test, tests/core/dot2tex.test, tests/core/dra2dba.test, tests/core/dstar.test, tests/core/dualize.test, tests/core/dupexp.test, tests/core/emptchk.cc, tests/core/emptchk.test, tests/core/emptchke.test, tests/core/emptchkr.test, tests/core/equals.test, tests/core/equalsf.cc, tests/core/eventuniv.test, tests/core/exclusive-ltl.test, tests/core/exclusive-tgba.test, tests/core/explpro2.test, tests/core/explpro3.test, tests/core/explpro4.test, tests/core/explprod.test, tests/core/explsum.test, tests/core/format.test, tests/core/full.test, tests/core/gamehoa.test, tests/core/genaut.test, tests/core/genltl.test, tests/core/gragsa.test, tests/core/graph.cc, tests/core/graph.test, tests/core/hierarchy.test, tests/core/highlightstate.test, tests/core/ikwiad.cc, tests/core/included.test, tests/core/intvcmp2.cc, tests/core/intvcomp.cc, tests/core/intvcomp.test, tests/core/isomorph.test, tests/core/isop.test, tests/core/kind.cc, tests/core/kind.test, tests/core/kripke.test, tests/core/kripkecat.cc, tests/core/latex.test, tests/core/lbt.test, tests/core/lbttparse.test, tests/core/length.cc, tests/core/length.test, tests/core/lenient.test, tests/core/ltl2dstar.test, tests/core/ltl2dstar2.test, tests/core/ltl2dstar3.test, tests/core/ltl2dstar4.test, tests/core/ltl2neverclaim-lbtt.test, tests/core/ltl2neverclaim.test, tests/core/ltl2ta.test, tests/core/ltl2ta2.test, tests/core/ltl2tgba.test, tests/core/ltl2tgba2.test, tests/core/ltl3ba.test, tests/core/ltl3dra.test, tests/core/ltlcounter.test, tests/core/ltlcross.test, tests/core/ltlcross2.test, tests/core/ltlcross3.test, tests/core/ltlcross4.test, tests/core/ltlcross5.test, tests/core/ltlcross6.test, tests/core/ltlcrossce.test, tests/core/ltlcrossce2.test, tests/core/ltlcrossgrind.test, tests/core/ltldo.test, tests/core/ltldo2.test, tests/core/ltlf.test, tests/core/ltlfilt.test, tests/core/ltlgrind.test, tests/core/ltlrel.cc, tests/core/ltlrel.test, tests/core/ltlsynt-pgame.test, tests/core/ltlsynt.test, tests/core/ltlsynt2.test, tests/core/lunabbrev.test, tests/core/maskacc.test, tests/core/maskkeep.test, tests/core/mempool.cc, tests/core/mempool.test, tests/core/minterm.cc, tests/core/minterm.test, tests/core/minusx.test, tests/core/monitor.test, tests/core/nenoform.test, tests/core/neverclaimread.test, tests/core/ngraph.cc, tests/core/ngraph.test, tests/core/nondet.test, tests/core/obligation.test, tests/core/optba.test, tests/core/parity.cc, tests/core/parity.test, tests/core/parity2.test, tests/core/parse.test, tests/core/parseaut.test, tests/core/parseerr.test, tests/core/pdegen.test, tests/core/pgsolver.test, tests/core/prodchain.test, tests/core/prodor.test, tests/core/rabin2parity.test, tests/core/rand.test, tests/core/randaut.test, tests/core/randomize.test, tests/core/randpsl.test, tests/core/randtgba.cc, tests/core/randtgba.test, tests/core/readltl.cc, tests/core/readsave.test, tests/core/reduc.cc, tests/core/reduc.test, tests/core/reduc0.test, tests/core/reduccmp.test, tests/core/reducpsl.test, tests/core/remfin.test, tests/core/remove_x.test, tests/core/remprop.test, tests/core/renault.test, tests/core/safra.cc, tests/core/safra.test, tests/core/satmin.test, tests/core/satmin2.test, tests/core/satmin3.test, tests/core/sbacc.test, tests/core/scc.test, tests/core/sccdot.test, tests/core/sccif.cc, tests/core/sccif.test, tests/core/sccsimpl.test, tests/core/semidet.test, tests/core/sepsets.test, tests/core/serial.test, tests/core/sim2.test, tests/core/sim3.test, tests/core/sonf.test, tests/core/split.test, tests/core/spotlbtt.test, tests/core/spotlbtt2.test, tests/core/streett.test, tests/core/strength.test, tests/core/stutter-ltl.test, tests/core/stutter-tgba.test, tests/core/sugar.test, tests/core/syfco.test, tests/core/syntimpl.cc, tests/core/syntimpl.test, tests/core/taatgba.cc, tests/core/taatgba.test, tests/core/tgbagraph.test, tests/core/tostring.cc, tests/core/tostring.test, tests/core/tripprod.test, tests/core/trival.cc, tests/core/trival.test, tests/core/tunabbrev.test, tests/core/tunenoform.test, tests/core/twacube.cc, tests/core/twacube.test, tests/core/twagraph.cc, tests/core/unabbrevwm.test, tests/core/unambig.test, tests/core/unambig2.test, tests/core/uniq.test, tests/core/utf8.test, tests/core/uwrm.test, tests/core/wdba.test, tests/core/wdba2.test, tests/ltsmin/check.test, tests/ltsmin/check2.test, tests/ltsmin/check3.test, tests/ltsmin/finite.test, tests/ltsmin/finite2.test, tests/ltsmin/finite3.test, tests/ltsmin/kripke.test, tests/ltsmin/modelcheck.cc, tests/ltsmin/testconvert.cc, tests/ltsmin/testconvert.test, tests/python/298.py, tests/python/341.py, tests/python/471.py, tests/python/acc.py, tests/python/accparse2.py, tests/python/aiger.py, tests/python/alarm.py, tests/python/aliases.py, tests/python/alternating.py, tests/python/bdddict.py, tests/python/bdditer.py, tests/python/bddnqueen.py, tests/python/bugdet.py, tests/python/complement_semidet.py, tests/python/dbranch.py, tests/python/declenv.py, tests/python/decompose_scc.py, tests/python/det.py, tests/python/dualize.py, tests/python/ecfalse.py, tests/python/except.py, tests/python/forq_contains.py, tests/python/game.py, tests/python/gen.py, tests/python/genem.py, tests/python/implies.py, tests/python/interdep.py, tests/python/intrun.py, tests/python/kripke.py, tests/python/langmap.py, tests/python/ltl2tgba.py, tests/python/ltl2tgba.test, tests/python/ltlf.py, tests/python/ltlparse.py, tests/python/ltlsimple.py, tests/python/mealy.py, tests/python/merge.py, tests/python/mergedge.py, tests/python/minato.py, tests/python/misc-ec.py, tests/python/optionmap.py, tests/python/origstate.py, tests/python/otfcrash.py, tests/python/parity.py, tests/python/parsetgba.py, tests/python/pdegen.py, tests/python/powerset.py, tests/python/prodexpt.py, tests/python/randgen.py, tests/python/relabel.py, tests/python/remfin.py, tests/python/removeap.py, tests/python/rs_like.py, tests/python/satmin.py, tests/python/sbacc.py, tests/python/sccfilter.py, tests/python/sccinfo.py, tests/python/sccsplit.py, tests/python/semidet.py, tests/python/setacc.py, tests/python/setxor.py, tests/python/simplacc.py, tests/python/simstate.py, tests/python/sonf.py, tests/python/split.py, tests/python/splitedge.py, tests/python/streett_totgba.py, tests/python/streett_totgba2.py, tests/python/stutter.py, tests/python/sum.py, tests/python/synthesis.py, tests/python/toparity.py, tests/python/toweak.py, tests/python/tra2tba.py, tests/python/trival.py, tests/python/twagraph.py, tests/python/zlktree.py, tests/run.in, tests/sanity/80columns.test, tests/sanity/bin.test, tests/sanity/getenv.test, tests/sanity/includes.test, tests/sanity/ipynb.pl, tests/sanity/namedprop.test, tests/sanity/private.test, tests/sanity/readme.pl, tests/sanity/style.test, tools/man2html.pl: Update all copyright headers. --- AUTHORS | 130 +++++++++++++----- Makefile.am | 6 +- bench/Makefile.am | 6 +- bench/dtgbasat/Makefile.am | 3 +- bench/dtgbasat/gen.py | 3 +- bench/emptchk/Makefile.am | 4 +- bench/emptchk/defs.in | 4 +- bench/ltl2tgba/Makefile.am | 3 +- bench/ltl2tgba/defs.in | 6 +- bench/ltl2tgba/sum.py | 3 +- bench/ltlclasses/Makefile.am | 3 +- bench/ltlcounter/Makefile.am | 3 +- bench/spin13/Makefile.am | 3 +- bench/stutter/Makefile.am | 3 +- bench/stutter/stutter_invariance_formulas.cc | 3 +- .../stutter/stutter_invariance_randomgraph.cc | 3 +- bench/wdba/Makefile.am | 3 +- bin/Makefile.am | 3 +- bin/autcross.cc | 3 +- bin/autfilt.cc | 3 +- bin/common_aoutput.cc | 3 +- bin/common_aoutput.hh | 3 +- bin/common_color.cc | 3 +- bin/common_color.hh | 3 +- bin/common_conv.cc | 3 +- bin/common_conv.hh | 3 +- bin/common_cout.cc | 3 +- bin/common_cout.hh | 3 +- bin/common_file.cc | 3 +- bin/common_file.hh | 3 +- bin/common_finput.cc | 3 +- bin/common_finput.hh | 3 +- bin/common_hoaread.cc | 3 +- bin/common_hoaread.hh | 3 +- bin/common_output.cc | 3 +- bin/common_output.hh | 3 +- bin/common_post.cc | 3 +- bin/common_post.hh | 3 +- bin/common_r.cc | 3 +- bin/common_r.hh | 3 +- bin/common_range.cc | 3 +- bin/common_range.hh | 3 +- bin/common_setup.cc | 5 +- bin/common_setup.hh | 3 +- bin/common_sys.hh | 3 +- bin/common_trans.cc | 3 +- bin/common_trans.hh | 3 +- bin/dstar2tgba.cc | 3 +- bin/genaut.cc | 3 +- bin/genltl.cc | 3 +- bin/ltl2tgba.cc | 3 +- bin/ltl2tgta.cc | 3 +- bin/ltlcross.cc | 3 +- bin/ltldo.cc | 3 +- bin/ltlfilt.cc | 3 +- bin/ltlgrind.cc | 3 +- bin/ltlsynt.cc | 3 +- bin/man/Makefile.am | 3 +- bin/options.py | 3 +- bin/randaut.cc | 3 +- bin/randltl.cc | 3 +- bin/spot-x.cc | 3 +- bin/spot.cc | 3 +- configure.ac | 6 +- debian/copyright | 5 +- doc/Makefile.am | 6 +- doc/tl/Makefile.am | 3 +- elisp/Makefile.am | 3 +- python/Makefile.am | 6 +- python/buddy.i | 6 +- python/spot/__init__.py | 3 +- python/spot/aux_.py | 3 +- python/spot/gen.i | 3 +- python/spot/impl.i | 6 +- python/spot/jupyter.py | 3 +- python/spot/ltsmin.i | 3 +- spot/Makefile.am | 6 +- spot/gen/Makefile.am | 3 +- spot/gen/automata.cc | 3 +- spot/gen/automata.hh | 3 +- spot/gen/formulas.cc | 3 +- spot/gen/formulas.hh | 3 +- spot/graph/Makefile.am | 3 +- spot/graph/graph.hh | 3 +- spot/graph/ngraph.hh | 3 +- spot/kripke/Makefile.am | 3 +- spot/kripke/fairkripke.cc | 3 +- spot/kripke/fairkripke.hh | 3 +- spot/kripke/fwd.hh | 3 +- spot/kripke/kripke.cc | 3 +- spot/kripke/kripke.hh | 3 +- spot/kripke/kripkegraph.hh | 3 +- spot/ltsmin/Makefile.am | 3 +- spot/ltsmin/ltsmin.cc | 3 +- spot/ltsmin/ltsmin.hh | 3 +- spot/ltsmin/spins_interface.cc | 3 +- spot/ltsmin/spins_interface.hh | 3 +- spot/ltsmin/spins_kripke.hh | 3 +- spot/ltsmin/spins_kripke.hxx | 3 +- spot/mc/Makefile.am | 3 +- spot/mc/bloemen.hh | 3 +- spot/mc/bloemen_ec.hh | 3 +- spot/mc/cndfs.hh | 3 +- spot/mc/deadlock.hh | 3 +- spot/mc/intersect.hh | 3 +- spot/mc/lpar13.hh | 3 +- spot/mc/mc.hh | 3 +- spot/mc/mc_instanciator.hh | 3 +- spot/mc/unionfind.cc | 3 +- spot/mc/unionfind.hh | 3 +- spot/mc/utils.hh | 3 +- spot/misc/Makefile.am | 6 +- spot/misc/bareword.cc | 6 +- spot/misc/bareword.hh | 6 +- spot/misc/bddlt.hh | 6 +- spot/misc/bitset.cc | 3 +- spot/misc/bitset.hh | 3 +- spot/misc/bitvect.cc | 3 +- spot/misc/bitvect.hh | 3 +- spot/misc/casts.hh | 3 +- spot/misc/clz.hh | 3 +- spot/misc/common.hh | 3 +- spot/misc/escape.cc | 6 +- spot/misc/escape.hh | 6 +- spot/misc/fixpool.hh | 3 +- spot/misc/formater.cc | 3 +- spot/misc/formater.hh | 3 +- spot/misc/hash.hh | 6 +- spot/misc/hashfunc.hh | 6 +- spot/misc/intvcmp2.cc | 3 +- spot/misc/intvcmp2.hh | 3 +- spot/misc/intvcomp.cc | 3 +- spot/misc/intvcomp.hh | 3 +- spot/misc/ltstr.hh | 6 +- spot/misc/memusage.cc | 6 +- spot/misc/memusage.hh | 6 +- spot/misc/minato.cc | 6 +- spot/misc/minato.hh | 6 +- spot/misc/mspool.hh | 3 +- spot/misc/optionmap.cc | 6 +- spot/misc/optionmap.hh | 6 +- spot/misc/random.cc | 6 +- spot/misc/random.hh | 6 +- spot/misc/satsolver.cc | 3 +- spot/misc/satsolver.hh | 3 +- spot/misc/timer.cc | 6 +- spot/misc/timer.hh | 6 +- spot/misc/tmpfile.cc | 3 +- spot/misc/tmpfile.hh | 3 +- spot/misc/trival.hh | 3 +- spot/misc/version.cc | 6 +- spot/misc/version.hh | 6 +- spot/parseaut/Makefile.am | 3 +- spot/parseaut/fmterror.cc | 3 +- spot/parseaut/parseaut.yy | 3 +- spot/parseaut/parsedecl.hh | 3 +- spot/parseaut/public.hh | 3 +- spot/parseaut/scanaut.ll | 3 +- spot/parsetl/Makefile.am | 6 +- spot/parsetl/fmterror.cc | 6 +- spot/parsetl/parsedecl.hh | 6 +- spot/parsetl/parsetl.yy | 6 +- spot/parsetl/scantl.ll | 6 +- spot/priv/Makefile.am | 3 +- spot/priv/accmap.hh | 3 +- spot/priv/bddalloc.cc | 6 +- spot/priv/bddalloc.hh | 6 +- spot/priv/freelist.cc | 6 +- spot/priv/freelist.hh | 6 +- spot/priv/partitioned_relabel.cc | 3 +- spot/priv/partitioned_relabel.hh | 3 +- spot/priv/satcommon.cc | 3 +- spot/priv/satcommon.hh | 3 +- spot/priv/trim.cc | 3 +- spot/priv/trim.hh | 3 +- spot/priv/weight.cc | 6 +- spot/priv/weight.hh | 6 +- spot/ta/Makefile.am | 3 +- spot/ta/ta.cc | 3 +- spot/ta/ta.hh | 3 +- spot/ta/taexplicit.cc | 3 +- spot/ta/taexplicit.hh | 3 +- spot/ta/taproduct.cc | 3 +- spot/ta/taproduct.hh | 3 +- spot/ta/tgta.hh | 3 +- spot/ta/tgtaexplicit.cc | 3 +- spot/ta/tgtaexplicit.hh | 3 +- spot/ta/tgtaproduct.cc | 3 +- spot/ta/tgtaproduct.hh | 3 +- spot/taalgos/Makefile.am | 3 +- spot/taalgos/dot.cc | 3 +- spot/taalgos/dot.hh | 3 +- spot/taalgos/emptinessta.cc | 3 +- spot/taalgos/emptinessta.hh | 3 +- spot/taalgos/minimize.cc | 3 +- spot/taalgos/minimize.hh | 3 +- spot/taalgos/reachiter.cc | 3 +- spot/taalgos/reachiter.hh | 3 +- spot/taalgos/statessetbuilder.cc | 3 +- spot/taalgos/statessetbuilder.hh | 3 +- spot/taalgos/stats.cc | 6 +- spot/taalgos/stats.hh | 3 +- spot/taalgos/tgba2ta.cc | 3 +- spot/taalgos/tgba2ta.hh | 3 +- spot/tl/Makefile.am | 3 +- spot/tl/apcollect.cc | 6 +- spot/tl/apcollect.hh | 6 +- spot/tl/contain.cc | 6 +- spot/tl/contain.hh | 6 +- spot/tl/declenv.cc | 6 +- spot/tl/declenv.hh | 6 +- spot/tl/defaultenv.cc | 6 +- spot/tl/defaultenv.hh | 6 +- spot/tl/dot.cc | 6 +- spot/tl/dot.hh | 6 +- spot/tl/environment.hh | 6 +- spot/tl/exclusive.cc | 3 +- spot/tl/exclusive.hh | 3 +- spot/tl/formula.cc | 3 +- spot/tl/formula.hh | 3 +- spot/tl/hierarchy.cc | 3 +- spot/tl/hierarchy.hh | 3 +- spot/tl/length.cc | 6 +- spot/tl/length.hh | 6 +- spot/tl/ltlf.cc | 3 +- spot/tl/ltlf.hh | 3 +- spot/tl/mark.cc | 3 +- spot/tl/mark.hh | 3 +- spot/tl/mutation.cc | 3 +- spot/tl/mutation.hh | 3 +- spot/tl/nenoform.cc | 6 +- spot/tl/nenoform.hh | 6 +- spot/tl/parse.hh | 6 +- spot/tl/print.cc | 6 +- spot/tl/print.hh | 6 +- spot/tl/randomltl.cc | 6 +- spot/tl/randomltl.hh | 6 +- spot/tl/relabel.cc | 3 +- spot/tl/relabel.hh | 3 +- spot/tl/remove_x.cc | 3 +- spot/tl/remove_x.hh | 3 +- spot/tl/simplify.cc | 3 +- spot/tl/simplify.hh | 3 +- spot/tl/snf.cc | 3 +- spot/tl/snf.hh | 3 +- spot/tl/sonf.cc | 3 +- spot/tl/sonf.hh | 3 +- spot/tl/unabbrev.cc | 3 +- spot/tl/unabbrev.hh | 3 +- spot/twa/Makefile.am | 6 +- spot/twa/acc.cc | 3 +- spot/twa/acc.hh | 3 +- spot/twa/bdddict.cc | 6 +- spot/twa/bdddict.hh | 6 +- spot/twa/bddprint.cc | 6 +- spot/twa/bddprint.hh | 6 +- spot/twa/formula2bdd.cc | 6 +- spot/twa/formula2bdd.hh | 6 +- spot/twa/fwd.hh | 3 +- spot/twa/taatgba.cc | 3 +- spot/twa/taatgba.hh | 3 +- spot/twa/twa.cc | 6 +- spot/twa/twa.hh | 6 +- spot/twa/twagraph.cc | 3 +- spot/twa/twagraph.hh | 3 +- spot/twa/twaproduct.cc | 6 +- spot/twa/twaproduct.hh | 6 +- spot/twaalgos/Makefile.am | 6 +- spot/twaalgos/aiger.cc | 3 +- spot/twaalgos/aiger.hh | 3 +- spot/twaalgos/alternation.cc | 3 +- spot/twaalgos/alternation.hh | 3 +- spot/twaalgos/are_isomorphic.cc | 6 +- spot/twaalgos/are_isomorphic.hh | 3 +- spot/twaalgos/bfssteps.cc | 6 +- spot/twaalgos/bfssteps.hh | 6 +- spot/twaalgos/canonicalize.cc | 3 +- spot/twaalgos/canonicalize.hh | 3 +- spot/twaalgos/cleanacc.cc | 3 +- spot/twaalgos/cleanacc.hh | 3 +- spot/twaalgos/cobuchi.cc | 3 +- spot/twaalgos/cobuchi.hh | 3 +- spot/twaalgos/complement.cc | 3 +- spot/twaalgos/complement.hh | 3 +- spot/twaalgos/complete.cc | 3 +- spot/twaalgos/complete.hh | 3 +- spot/twaalgos/compsusp.cc | 3 +- spot/twaalgos/compsusp.hh | 3 +- spot/twaalgos/contains.cc | 3 +- spot/twaalgos/contains.hh | 3 +- spot/twaalgos/copy.hh | 6 +- spot/twaalgos/couvreurnew.cc | 3 +- spot/twaalgos/couvreurnew.hh | 2 +- spot/twaalgos/cycles.cc | 3 +- spot/twaalgos/cycles.hh | 3 +- spot/twaalgos/dbranch.cc | 3 +- spot/twaalgos/dbranch.hh | 3 +- spot/twaalgos/degen.cc | 3 +- spot/twaalgos/degen.hh | 3 +- spot/twaalgos/determinize.cc | 3 +- spot/twaalgos/determinize.hh | 3 +- spot/twaalgos/dot.cc | 6 +- spot/twaalgos/dot.hh | 6 +- spot/twaalgos/dtbasat.cc | 3 +- spot/twaalgos/dtbasat.hh | 3 +- spot/twaalgos/dtwasat.cc | 3 +- spot/twaalgos/dtwasat.hh | 3 +- spot/twaalgos/dualize.cc | 3 +- spot/twaalgos/dualize.hh | 3 +- spot/twaalgos/emptiness.cc | 6 +- spot/twaalgos/emptiness.hh | 6 +- spot/twaalgos/emptiness_stats.hh | 6 +- spot/twaalgos/forq_contains.cc | 3 +- spot/twaalgos/forq_contains.hh | 3 +- spot/twaalgos/game.cc | 3 +- spot/twaalgos/game.hh | 3 +- spot/twaalgos/genem.cc | 3 +- spot/twaalgos/genem.hh | 3 +- spot/twaalgos/gfguarantee.cc | 3 +- spot/twaalgos/gfguarantee.hh | 3 +- spot/twaalgos/gtec/Makefile.am | 6 +- spot/twaalgos/gtec/ce.cc | 6 +- spot/twaalgos/gtec/ce.hh | 6 +- spot/twaalgos/gtec/gtec.cc | 6 +- spot/twaalgos/gtec/gtec.hh | 6 +- spot/twaalgos/gtec/sccstack.cc | 6 +- spot/twaalgos/gtec/sccstack.hh | 6 +- spot/twaalgos/gtec/status.cc | 6 +- spot/twaalgos/gtec/status.hh | 6 +- spot/twaalgos/gv04.cc | 6 +- spot/twaalgos/gv04.hh | 6 +- spot/twaalgos/hoa.cc | 3 +- spot/twaalgos/hoa.hh | 3 +- spot/twaalgos/iscolored.cc | 3 +- spot/twaalgos/iscolored.hh | 3 +- spot/twaalgos/isdet.cc | 3 +- spot/twaalgos/isdet.hh | 3 +- spot/twaalgos/isunamb.cc | 3 +- spot/twaalgos/isunamb.hh | 3 +- spot/twaalgos/isweakscc.cc | 3 +- spot/twaalgos/isweakscc.hh | 3 +- spot/twaalgos/langmap.cc | 3 +- spot/twaalgos/langmap.hh | 3 +- spot/twaalgos/lbtt.cc | 6 +- spot/twaalgos/lbtt.hh | 6 +- spot/twaalgos/ltl2taa.cc | 3 +- spot/twaalgos/ltl2taa.hh | 3 +- spot/twaalgos/ltl2tgba_fm.cc | 6 +- spot/twaalgos/ltl2tgba_fm.hh | 6 +- spot/twaalgos/magic.cc | 6 +- spot/twaalgos/magic.hh | 6 +- spot/twaalgos/mask.cc | 3 +- spot/twaalgos/mask.hh | 3 +- spot/twaalgos/mealy_machine.cc | 3 +- spot/twaalgos/mealy_machine.hh | 3 +- spot/twaalgos/minimize.cc | 3 +- spot/twaalgos/minimize.hh | 3 +- spot/twaalgos/ndfs_result.hxx | 6 +- spot/twaalgos/neverclaim.cc | 6 +- spot/twaalgos/neverclaim.hh | 6 +- spot/twaalgos/parity.cc | 3 +- spot/twaalgos/parity.hh | 3 +- spot/twaalgos/postproc.cc | 3 +- spot/twaalgos/postproc.hh | 3 +- spot/twaalgos/powerset.cc | 6 +- spot/twaalgos/powerset.hh | 6 +- spot/twaalgos/product.cc | 3 +- spot/twaalgos/product.hh | 3 +- spot/twaalgos/randomgraph.cc | 6 +- spot/twaalgos/randomgraph.hh | 6 +- spot/twaalgos/randomize.cc | 3 +- spot/twaalgos/randomize.hh | 3 +- spot/twaalgos/reachiter.cc | 6 +- spot/twaalgos/reachiter.hh | 6 +- spot/twaalgos/relabel.cc | 3 +- spot/twaalgos/relabel.hh | 3 +- spot/twaalgos/remfin.cc | 3 +- spot/twaalgos/remfin.hh | 3 +- spot/twaalgos/remprop.cc | 3 +- spot/twaalgos/remprop.hh | 3 +- spot/twaalgos/sbacc.cc | 3 +- spot/twaalgos/sbacc.hh | 3 +- spot/twaalgos/sccfilter.cc | 3 +- spot/twaalgos/sccfilter.hh | 3 +- spot/twaalgos/sccinfo.cc | 3 +- spot/twaalgos/sccinfo.hh | 3 +- spot/twaalgos/se05.cc | 6 +- spot/twaalgos/se05.hh | 6 +- spot/twaalgos/sepsets.cc | 3 +- spot/twaalgos/sepsets.hh | 3 +- spot/twaalgos/simulation.cc | 3 +- spot/twaalgos/simulation.hh | 3 +- spot/twaalgos/split.cc | 3 +- spot/twaalgos/split.hh | 3 +- spot/twaalgos/stats.cc | 6 +- spot/twaalgos/stats.hh | 6 +- spot/twaalgos/strength.cc | 3 +- spot/twaalgos/strength.hh | 3 +- spot/twaalgos/stripacc.cc | 3 +- spot/twaalgos/stripacc.hh | 3 +- spot/twaalgos/stutter.cc | 3 +- spot/twaalgos/stutter.hh | 3 +- spot/twaalgos/sum.cc | 3 +- spot/twaalgos/sum.hh | 3 +- spot/twaalgos/synthesis.cc | 3 +- spot/twaalgos/synthesis.hh | 3 +- spot/twaalgos/tau03.cc | 6 +- spot/twaalgos/tau03.hh | 6 +- spot/twaalgos/tau03opt.cc | 6 +- spot/twaalgos/tau03opt.hh | 6 +- spot/twaalgos/toparity.cc | 3 +- spot/twaalgos/toparity.hh | 3 +- spot/twaalgos/totgba.cc | 3 +- spot/twaalgos/totgba.hh | 3 +- spot/twaalgos/toweak.cc | 3 +- spot/twaalgos/toweak.hh | 3 +- spot/twaalgos/translate.cc | 3 +- spot/twaalgos/translate.hh | 3 +- spot/twaalgos/word.cc | 3 +- spot/twaalgos/word.hh | 3 +- spot/twaalgos/zlktree.cc | 3 +- spot/twaalgos/zlktree.hh | 3 +- spot/twacube/Makefile.am | 6 +- spot/twacube/cube.cc | 3 +- spot/twacube/cube.hh | 3 +- spot/twacube/fwd.hh | 3 +- spot/twacube/twacube.cc | 3 +- spot/twacube/twacube.hh | 3 +- spot/twacube_algos/Makefile.am | 6 +- spot/twacube_algos/convert.cc | 3 +- spot/twacube_algos/convert.hh | 3 +- tests/Makefile.am | 6 +- tests/core/385.test | 3 +- tests/core/500.test | 3 +- tests/core/521.test | 3 +- tests/core/522.test | 3 +- tests/core/acc.cc | 3 +- tests/core/acc.test | 3 +- tests/core/acc2.test | 3 +- tests/core/acc_word.test | 3 +- tests/core/accsimpl.test | 3 +- tests/core/alternating.test | 3 +- tests/core/autcross.test | 3 +- tests/core/autcross2.test | 3 +- tests/core/autcross3.test | 3 +- tests/core/autcross4.test | 3 +- tests/core/autcross5.test | 3 +- tests/core/babiak.test | 3 +- tests/core/bare.test | 3 +- tests/core/basimul.test | 3 +- tests/core/bdd.test | 3 +- tests/core/bdddict.cc | 3 +- tests/core/bdddict.test | 3 +- tests/core/bitvect.cc | 3 +- tests/core/bitvect.test | 3 +- tests/core/bricks.cc | 3 +- tests/core/bricks.test | 3 +- tests/core/checkpsl.cc | 3 +- tests/core/checkta.cc | 3 +- tests/core/complement.test | 3 +- tests/core/complementation.test | 3 +- tests/core/complete.test | 3 +- tests/core/consterm.cc | 3 +- tests/core/consterm.test | 3 +- tests/core/cube.cc | 3 +- tests/core/cube.test | 3 +- tests/core/cycles.test | 3 +- tests/core/dbacomp.test | 3 +- tests/core/dca.test | 3 +- tests/core/dca2.test | 3 +- tests/core/defs.in | 6 +- tests/core/degendet.test | 3 +- tests/core/degenid.test | 3 +- tests/core/degenlskip.test | 3 +- tests/core/degenscc.test | 3 +- tests/core/det.test | 3 +- tests/core/dfs.test | 6 +- tests/core/dnfstreett.test | 3 +- tests/core/dot2tex.test | 3 +- tests/core/dra2dba.test | 3 +- tests/core/dstar.test | 3 +- tests/core/dualize.test | 3 +- tests/core/dupexp.test | 6 +- tests/core/emptchk.cc | 3 +- tests/core/emptchk.test | 6 +- tests/core/emptchke.test | 6 +- tests/core/emptchkr.test | 6 +- tests/core/equals.test | 6 +- tests/core/equalsf.cc | 6 +- tests/core/eventuniv.test | 3 +- tests/core/exclusive-ltl.test | 3 +- tests/core/exclusive-tgba.test | 3 +- tests/core/explpro2.test | 6 +- tests/core/explpro3.test | 6 +- tests/core/explpro4.test | 6 +- tests/core/explprod.test | 6 +- tests/core/explsum.test | 3 +- tests/core/format.test | 3 +- tests/core/full.test | 3 +- tests/core/gamehoa.test | 3 +- tests/core/genaut.test | 3 +- tests/core/genltl.test | 3 +- tests/core/gragsa.test | 3 +- tests/core/graph.cc | 3 +- tests/core/graph.test | 3 +- tests/core/hierarchy.test | 3 +- tests/core/highlightstate.test | 3 +- tests/core/ikwiad.cc | 6 +- tests/core/included.test | 3 +- tests/core/intvcmp2.cc | 3 +- tests/core/intvcomp.cc | 3 +- tests/core/intvcomp.test | 3 +- tests/core/isomorph.test | 3 +- tests/core/isop.test | 3 +- tests/core/kind.cc | 3 +- tests/core/kind.test | 3 +- tests/core/kripke.test | 3 +- tests/core/kripkecat.cc | 3 +- tests/core/latex.test | 3 +- tests/core/lbt.test | 3 +- tests/core/lbttparse.test | 3 +- tests/core/length.cc | 3 +- tests/core/length.test | 3 +- tests/core/lenient.test | 3 +- tests/core/ltl2dstar.test | 3 +- tests/core/ltl2dstar2.test | 3 +- tests/core/ltl2dstar3.test | 3 +- tests/core/ltl2dstar4.test | 3 +- tests/core/ltl2neverclaim-lbtt.test | 3 +- tests/core/ltl2neverclaim.test | 3 +- tests/core/ltl2ta.test | 3 +- tests/core/ltl2ta2.test | 3 +- tests/core/ltl2tgba.test | 6 +- tests/core/ltl2tgba2.test | 6 +- tests/core/ltl3ba.test | 3 +- tests/core/ltl3dra.test | 3 +- tests/core/ltlcounter.test | 3 +- tests/core/ltlcross.test | 3 +- tests/core/ltlcross2.test | 3 +- tests/core/ltlcross3.test | 3 +- tests/core/ltlcross4.test | 3 +- tests/core/ltlcross5.test | 3 +- tests/core/ltlcross6.test | 3 +- tests/core/ltlcrossce.test | 3 +- tests/core/ltlcrossce2.test | 3 +- tests/core/ltlcrossgrind.test | 3 +- tests/core/ltldo.test | 3 +- tests/core/ltldo2.test | 3 +- tests/core/ltlf.test | 3 +- tests/core/ltlfilt.test | 3 +- tests/core/ltlgrind.test | 3 +- tests/core/ltlrel.cc | 3 +- tests/core/ltlrel.test | 3 +- tests/core/ltlsynt-pgame.test | 3 +- tests/core/ltlsynt.test | 3 +- tests/core/ltlsynt2.test | 3 +- tests/core/lunabbrev.test | 6 +- tests/core/maskacc.test | 3 +- tests/core/maskkeep.test | 3 +- tests/core/mempool.cc | 3 +- tests/core/mempool.test | 2 +- tests/core/minterm.cc | 3 +- tests/core/minterm.test | 2 +- tests/core/minusx.test | 3 +- tests/core/monitor.test | 3 +- tests/core/nenoform.test | 6 +- tests/core/neverclaimread.test | 3 +- tests/core/ngraph.cc | 3 +- tests/core/ngraph.test | 3 +- tests/core/nondet.test | 3 +- tests/core/obligation.test | 3 +- tests/core/optba.test | 3 +- tests/core/parity.cc | 3 +- tests/core/parity.test | 3 +- tests/core/parity2.test | 3 +- tests/core/parse.test | 6 +- tests/core/parseaut.test | 3 +- tests/core/parseerr.test | 6 +- tests/core/pdegen.test | 3 +- tests/core/pgsolver.test | 3 +- tests/core/prodchain.test | 3 +- tests/core/prodor.test | 3 +- tests/core/rabin2parity.test | 3 +- tests/core/rand.test | 3 +- tests/core/randaut.test | 3 +- tests/core/randomize.test | 3 +- tests/core/randpsl.test | 3 +- tests/core/randtgba.cc | 6 +- tests/core/randtgba.test | 3 +- tests/core/readltl.cc | 6 +- tests/core/readsave.test | 6 +- tests/core/reduc.cc | 6 +- tests/core/reduc.test | 6 +- tests/core/reduc0.test | 3 +- tests/core/reduccmp.test | 6 +- tests/core/reducpsl.test | 3 +- tests/core/remfin.test | 3 +- tests/core/remove_x.test | 3 +- tests/core/remprop.test | 3 +- tests/core/renault.test | 3 +- tests/core/safra.cc | 3 +- tests/core/safra.test | 3 +- tests/core/satmin.test | 3 +- tests/core/satmin2.test | 3 +- tests/core/satmin3.test | 3 +- tests/core/sbacc.test | 3 +- tests/core/scc.test | 3 +- tests/core/sccdot.test | 3 +- tests/core/sccif.cc | 3 +- tests/core/sccif.test | 3 +- tests/core/sccsimpl.test | 3 +- tests/core/semidet.test | 3 +- tests/core/sepsets.test | 3 +- tests/core/serial.test | 3 +- tests/core/sim2.test | 3 +- tests/core/sim3.test | 3 +- tests/core/sonf.test | 3 +- tests/core/split.test | 3 +- tests/core/spotlbtt.test | 6 +- tests/core/spotlbtt2.test | 3 +- tests/core/streett.test | 3 +- tests/core/strength.test | 3 +- tests/core/stutter-ltl.test | 3 +- tests/core/stutter-tgba.test | 3 +- tests/core/sugar.test | 3 +- tests/core/syfco.test | 3 +- tests/core/syntimpl.cc | 6 +- tests/core/syntimpl.test | 6 +- tests/core/taatgba.cc | 3 +- tests/core/taatgba.test | 3 +- tests/core/tgbagraph.test | 3 +- tests/core/tostring.cc | 6 +- tests/core/tostring.test | 6 +- tests/core/tripprod.test | 6 +- tests/core/trival.cc | 3 +- tests/core/trival.test | 3 +- tests/core/tunabbrev.test | 6 +- tests/core/tunenoform.test | 6 +- tests/core/twacube.cc | 3 +- tests/core/twacube.test | 3 +- tests/core/twagraph.cc | 3 +- tests/core/unabbrevwm.test | 3 +- tests/core/unambig.test | 3 +- tests/core/unambig2.test | 3 +- tests/core/uniq.test | 3 +- tests/core/utf8.test | 3 +- tests/core/uwrm.test | 3 +- tests/core/wdba.test | 3 +- tests/core/wdba2.test | 3 +- tests/ltsmin/check.test | 3 +- tests/ltsmin/check2.test | 3 +- tests/ltsmin/check3.test | 3 +- tests/ltsmin/finite.test | 3 +- tests/ltsmin/finite2.test | 3 +- tests/ltsmin/finite3.test | 3 +- tests/ltsmin/kripke.test | 3 +- tests/ltsmin/modelcheck.cc | 3 +- tests/ltsmin/testconvert.cc | 2 +- tests/ltsmin/testconvert.test | 3 +- tests/python/298.py | 3 +- tests/python/341.py | 3 +- tests/python/471.py | 3 +- tests/python/acc.py | 3 +- tests/python/accparse2.py | 3 +- tests/python/aiger.py | 3 +- tests/python/alarm.py | 3 +- tests/python/aliases.py | 3 +- tests/python/alternating.py | 3 +- tests/python/bdddict.py | 3 +- tests/python/bdditer.py | 3 +- tests/python/bddnqueen.py | 6 +- tests/python/bugdet.py | 3 +- tests/python/complement_semidet.py | 3 +- tests/python/dbranch.py | 3 +- tests/python/declenv.py | 3 +- tests/python/decompose_scc.py | 3 +- tests/python/det.py | 3 +- tests/python/dualize.py | 3 +- tests/python/ecfalse.py | 3 +- tests/python/except.py | 3 +- tests/python/forq_contains.py | 3 +- tests/python/game.py | 3 +- tests/python/gen.py | 3 +- tests/python/genem.py | 3 +- tests/python/implies.py | 3 +- tests/python/interdep.py | 6 +- tests/python/intrun.py | 3 +- tests/python/kripke.py | 3 +- tests/python/langmap.py | 3 +- tests/python/ltl2tgba.py | 6 +- tests/python/ltl2tgba.test | 6 +- tests/python/ltlf.py | 3 +- tests/python/ltlparse.py | 6 +- tests/python/ltlsimple.py | 6 +- tests/python/mealy.py | 3 +- tests/python/merge.py | 3 +- tests/python/mergedge.py | 3 +- tests/python/minato.py | 6 +- tests/python/misc-ec.py | 3 +- tests/python/optionmap.py | 6 +- tests/python/origstate.py | 3 +- tests/python/otfcrash.py | 3 +- tests/python/parity.py | 3 +- tests/python/parsetgba.py | 3 +- tests/python/pdegen.py | 3 +- tests/python/powerset.py | 3 +- tests/python/prodexpt.py | 3 +- tests/python/randgen.py | 3 +- tests/python/relabel.py | 3 +- tests/python/remfin.py | 3 +- tests/python/removeap.py | 3 +- tests/python/rs_like.py | 3 +- tests/python/satmin.py | 3 +- tests/python/sbacc.py | 3 +- tests/python/sccfilter.py | 3 +- tests/python/sccinfo.py | 3 +- tests/python/sccsplit.py | 3 +- tests/python/semidet.py | 3 +- tests/python/setacc.py | 3 +- tests/python/setxor.py | 3 +- tests/python/simplacc.py | 3 +- tests/python/simstate.py | 3 +- tests/python/sonf.py | 3 +- tests/python/split.py | 3 +- tests/python/splitedge.py | 3 +- tests/python/streett_totgba.py | 3 +- tests/python/streett_totgba2.py | 3 +- tests/python/stutter.py | 3 +- tests/python/sum.py | 3 +- tests/python/synthesis.py | 3 +- tests/python/toparity.py | 3 +- tests/python/toweak.py | 3 +- tests/python/tra2tba.py | 3 +- tests/python/trival.py | 3 +- tests/python/twagraph.py | 3 +- tests/python/zlktree.py | 3 +- tests/run.in | 6 +- tests/sanity/80columns.test | 6 +- tests/sanity/bin.test | 3 +- tests/sanity/getenv.test | 3 +- tests/sanity/includes.test | 6 +- tests/sanity/ipynb.pl | 3 +- tests/sanity/namedprop.test | 3 +- tests/sanity/private.test | 3 +- tests/sanity/readme.pl | 3 +- tests/sanity/style.test | 6 +- tools/man2html.pl | 3 +- 747 files changed, 847 insertions(+), 2018 deletions(-) diff --git a/AUTHORS b/AUTHORS index d33ca89e1..d997df26d 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,33 +1,101 @@ The following people have contributed code to Spot: -Ala-Eddine Ben-Salem -Alexandre Duret-Lutz -Alexandre Gbaguidi Aïsse -Alexandre Lewkowicz -Amaury Fauchille -Antoine Martin -Arthur Remaud -Clément Gillard -Damien Lefortier -Denis Poitrenaud -Elie Abi Saad -Étienne Renault -Félix Abecassis -Florian Renkin -Guillaume Sadegh -Heikki Tauriainen -Henrich Lauko -Jérôme Dubois -Jonah Romero -Laurent Xu -Maximilien Colange -Philipp Schlehuber -Pierre Parutto -Rachid Rebiha -Souheib Baarir -Thibaud Michaud -Thomas Badie -Thomas Martinez -Thomas Medioni -Tomáš Babiak -Vincent Tourneur +- Ala-Eddine Ben-Salem (2010-2012) + During is PhD thesis at EPITA. + +- Alexandre Duret-Lutz (2002-) + During his master and then PhD at Université Pierre et Marie Curie. + Then as an employee of EPITA. + +- Alexandre Gbaguidi Aïsse (2016-2017) + While a student at EPITA. + +- Alexandre Lewkowicz (2014-2016) + While a student at EPITA. + +- Amaury Fauchille (2016) + While a student at EPITA. + +- Antoine Martin (2018-) + While a student at EPITA, and then during his PhD at EPITA. + +- Arthur Remaud (2017) + While a student at EPITA. + +- Clément Gillard (2017-2019) + While a student at EPITA. + +- Damien Lefortier (2008-2010) + While a student at EPITA. + +- Denis Poitrenaud (2004-2005) + As an employee of Univesité Paris V. + +- Elie Abi Saad (2013) + While a student at EPITA. + +- Étienne Renault (2013-2020) + During his PhD at EPITA & Université Pierre et Marie Curie. + Then as an employee of EPITA. + +- Félix Abecassis (2009-2010) + While a student at EPITA. + +- Florian Perlié-Long (2017) + While a student at EPITA. + +- Florian Renkin (2020-) + During his PhD at EPITA. + Then as an employee of Université Paris Cité. + +- Guillaume Sadegh (2008-2010) + While a student at EPITA. + +- Heikki Tauriainen (2004-2005) + During his PhD at Helsinki University of Technology. + +- Henrich Lauko (2017) + As an ERASMUS student from Masaryk University (Brno) visiting EPITA. + +- Jérôme Dubois (2020-2021) + While a student at EPITA. + +- Jonah Romero (2023) + During an internship at IMDEA Software Institute, supervised by Pierre Ganty. + +- Laurent Xu (2016-2017) + While a student at EPITA. + +- Maximilien Colange (2016-2018) + As an employee of EPITA. + +- Philipp Schlehuber (2020-) + As an employee of EPITA. + +- Pierre Parutto (2012) + While a student at EPITA. + +- Rachid Rebiha (2003) + During his master's internship at Université Pierre et Marie Curie. + +- Souheib Baarir (2004, 2008, 2013) + During his PhD at Université Pierre et Marie Curie. + Then during a sabatical visit at EPITA. + +- Thibaud Michaud (2014, 2017) + While a student at EPITA. + +- Thomas Badie (2011-2013) + While a student at EPITA. + +- Thomas Martinez (2004) + While a student at EPITA. + +- Thomas Medioni (2017) + During a master internship done at EPITA. + +- Tomáš Babiak (2012) + During his PHD at Masaryk University (Brno). + +- Vincent Tourneur (2017) + While a student at EPITA. diff --git a/Makefile.am b/Makefile.am index 5cd8257d2..e198a977c 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,9 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2011-2017, 2020, 2022-2023 Laboratoire de Recherche -## et Développement de l'Epita (LRDE). -## Copyright (C) 2003, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -## département Systèmes Répartis Coopératifs (SRC), Université Pierre -## et Marie Curie. +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/bench/Makefile.am b/bench/Makefile.am index 406c4511a..b5f0cd98d 100644 --- a/bench/Makefile.am +++ b/bench/Makefile.am @@ -1,8 +1,4 @@ -## Copyright (C) 2008, 2009, 2010, 2012, 2013, 2014 Laboratoire de Recherche -## et Dveloppement de l'Epita (LRDE). -## Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -## dpartement Systmes Rpartis Coopratifs (SRC), Universit Pierre -## et Marie Curie. +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/bench/dtgbasat/Makefile.am b/bench/dtgbasat/Makefile.am index 75ce40b37..48f2438c7 100644 --- a/bench/dtgbasat/Makefile.am +++ b/bench/dtgbasat/Makefile.am @@ -1,5 +1,4 @@ -## Copyright (C) 2013 Laboratoire de Recherche et Développement de -## l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/bench/dtgbasat/gen.py b/bench/dtgbasat/gen.py index dabf77971..60a707e91 100755 --- a/bench/dtgbasat/gen.py +++ b/bench/dtgbasat/gen.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 -# Copyright (C) 2016-2018, 2023 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/bench/emptchk/Makefile.am b/bench/emptchk/Makefile.am index ea5c10125..cbf3a03e9 100644 --- a/bench/emptchk/Makefile.am +++ b/bench/emptchk/Makefile.am @@ -1,6 +1,4 @@ -## Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -## dpartement Systmes Rpartis Coopratifs (SRC), Universit Pierre -## et Marie Curie. +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/bench/emptchk/defs.in b/bench/emptchk/defs.in index 6616cbbd2..b6b4b41ba 100644 --- a/bench/emptchk/defs.in +++ b/bench/emptchk/defs.in @@ -1,7 +1,5 @@ # -*- shell-script -*- -# Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -# dpartement Systmes Rpartis Coopratifs (SRC), Universit Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/bench/ltl2tgba/Makefile.am b/bench/ltl2tgba/Makefile.am index 7be974138..e0ec7b0dc 100644 --- a/bench/ltl2tgba/Makefile.am +++ b/bench/ltl2tgba/Makefile.am @@ -1,6 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2013, 2016 Laboratoire de Recherche et Développement -## de l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/bench/ltl2tgba/defs.in b/bench/ltl2tgba/defs.in index a27953353..d673c858a 100644 --- a/bench/ltl2tgba/defs.in +++ b/bench/ltl2tgba/defs.in @@ -1,9 +1,5 @@ # -*- mode: shell-script; coding: utf-8 -*- -# Copyright (C) 2012, 2013, 2016, 2018 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). -# Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/bench/ltl2tgba/sum.py b/bench/ltl2tgba/sum.py index 17b81cc91..be2eb136c 100755 --- a/bench/ltl2tgba/sum.py +++ b/bench/ltl2tgba/sum.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 ## -*- coding: utf-8 -*- -## Copyright (C) 2013 Laboratoire de Recherche et Développement de -## l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/bench/ltlclasses/Makefile.am b/bench/ltlclasses/Makefile.am index 97f42d5d4..5d1383506 100644 --- a/bench/ltlclasses/Makefile.am +++ b/bench/ltlclasses/Makefile.am @@ -1,5 +1,4 @@ -# Copyright (C) 2009, 2010 Laboratoire de Recherche et Dveloppement de -# l'EPITA (LRDE) +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/bench/ltlcounter/Makefile.am b/bench/ltlcounter/Makefile.am index a18041d68..07789ee70 100644 --- a/bench/ltlcounter/Makefile.am +++ b/bench/ltlcounter/Makefile.am @@ -1,5 +1,4 @@ -# Copyright (C) 2009, 2014 Laboratoire de Recherche et Dveloppement -# de l'EPITA (LRDE) +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/bench/spin13/Makefile.am b/bench/spin13/Makefile.am index bccddb174..16d940c4e 100644 --- a/bench/spin13/Makefile.am +++ b/bench/spin13/Makefile.am @@ -1,5 +1,4 @@ -## Copyright (C) 2013 Laboratoire de Recherche et Développement de -## l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/bench/stutter/Makefile.am b/bench/stutter/Makefile.am index 5f7a85c8d..acaf1e342 100644 --- a/bench/stutter/Makefile.am +++ b/bench/stutter/Makefile.am @@ -1,6 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2014, 2015, 2017 Laboratoire de Recherche et Développement -## de l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/bench/stutter/stutter_invariance_formulas.cc b/bench/stutter/stutter_invariance_formulas.cc index 32bc45083..a2ad684ca 100644 --- a/bench/stutter/stutter_invariance_formulas.cc +++ b/bench/stutter/stutter_invariance_formulas.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2015, 2016, 2017, 2022 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bench/stutter/stutter_invariance_randomgraph.cc b/bench/stutter/stutter_invariance_randomgraph.cc index 69c007bbb..39e32543e 100644 --- a/bench/stutter/stutter_invariance_randomgraph.cc +++ b/bench/stutter/stutter_invariance_randomgraph.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2015, 2017 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bench/wdba/Makefile.am b/bench/wdba/Makefile.am index 6cff187c3..c30b6a500 100644 --- a/bench/wdba/Makefile.am +++ b/bench/wdba/Makefile.am @@ -1,5 +1,4 @@ -# Copyright (C) 2010 Laboratoire de Recherche et Dveloppement de -# l'EPITA (LRDE) +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/bin/Makefile.am b/bin/Makefile.am index 14be7285a..78d189dc9 100644 --- a/bin/Makefile.am +++ b/bin/Makefile.am @@ -1,6 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2012-2017 Laboratoire de Recherche et Développement -## de l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/bin/autcross.cc b/bin/autcross.cc index b3e504bb3..6ada83f0b 100644 --- a/bin/autcross.cc +++ b/bin/autcross.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2020, 2022-2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/autfilt.cc b/bin/autfilt.cc index 820b37f49..87677e253 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_aoutput.cc b/bin/common_aoutput.cc index 0b84e60e0..9726659c9 100644 --- a/bin/common_aoutput.cc +++ b/bin/common_aoutput.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_aoutput.hh b/bin/common_aoutput.hh index 051212b3d..24066699a 100644 --- a/bin/common_aoutput.hh +++ b/bin/common_aoutput.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2018, 2020, 2022, 2023 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_color.cc b/bin/common_color.cc index 5c49e2021..3c63cae91 100644 --- a/bin/common_color.cc +++ b/bin/common_color.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2019 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_color.hh b/bin/common_color.hh index 657d9647d..82dab4de8 100644 --- a/bin/common_color.hh +++ b/bin/common_color.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_conv.cc b/bin/common_conv.cc index b23a67c51..1c9f16104 100644 --- a/bin/common_conv.cc +++ b/bin/common_conv.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2018, 2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_conv.hh b/bin/common_conv.hh index 617e97e23..a3a43f8eb 100644 --- a/bin/common_conv.hh +++ b/bin/common_conv.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_cout.cc b/bin/common_cout.cc index 2f5ada858..53e4ff10c 100644 --- a/bin/common_cout.cc +++ b/bin/common_cout.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2016 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_cout.hh b/bin/common_cout.hh index 7b55ad802..1fc52979f 100644 --- a/bin/common_cout.hh +++ b/bin/common_cout.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_file.cc b/bin/common_file.cc index 68a69a2af..88f54eddd 100644 --- a/bin/common_file.cc +++ b/bin/common_file.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2022, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_file.hh b/bin/common_file.hh index 51000d18c..b8b3b9b19 100644 --- a/bin/common_file.hh +++ b/bin/common_file.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2016, 2022-2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_finput.cc b/bin/common_finput.cc index dbcdb3849..df0343dd1 100644 --- a/bin/common_finput.cc +++ b/bin/common_finput.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2017, 2019, 2021-2023 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_finput.hh b/bin/common_finput.hh index 9ecb5b025..30b7f333c 100644 --- a/bin/common_finput.hh +++ b/bin/common_finput.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2017, 2022, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_hoaread.cc b/bin/common_hoaread.cc index d77c10771..c3029f0f0 100644 --- a/bin/common_hoaread.cc +++ b/bin/common_hoaread.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2019 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_hoaread.hh b/bin/common_hoaread.hh index e66967393..0acc840b1 100644 --- a/bin/common_hoaread.hh +++ b/bin/common_hoaread.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2017, 2018, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_output.cc b/bin/common_output.cc index 13988688a..3e44c9ab1 100644 --- a/bin/common_output.cc +++ b/bin/common_output.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2019, 2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_output.hh b/bin/common_output.hh index 30fe9e7d1..9d117bc39 100644 --- a/bin/common_output.hh +++ b/bin/common_output.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2018, 2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_post.cc b/bin/common_post.cc index 089473210..fdb1dc903 100644 --- a/bin/common_post.cc +++ b/bin/common_post.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2016, 2018-2020 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_post.hh b/bin/common_post.hh index fd75e3c98..332b65252 100644 --- a/bin/common_post.hh +++ b/bin/common_post.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_r.cc b/bin/common_r.cc index 599aaace6..da1354745 100644 --- a/bin/common_r.cc +++ b/bin/common_r.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2014 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_r.hh b/bin/common_r.hh index add0ddb91..e3c1b5dc7 100644 --- a/bin/common_r.hh +++ b/bin/common_r.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2015 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_range.cc b/bin/common_range.cc index 98e568b41..bcfe40336 100644 --- a/bin/common_range.cc +++ b/bin/common_range.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2014, 2016, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_range.hh b/bin/common_range.hh index 2727ef934..3e8ed5f32 100644 --- a/bin/common_range.hh +++ b/bin/common_range.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2014, 2015, 2016 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_setup.cc b/bin/common_setup.cc index c59ec0695..e0636b802 100644 --- a/bin/common_setup.cc +++ b/bin/common_setup.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // @@ -36,7 +35,7 @@ display_version(FILE *stream, struct argp_state*) fputs(program_name, stream); fputs(" (" PACKAGE_NAME ") " PACKAGE_VERSION "\n\ \n\ -Copyright (C) 2023 Laboratoire de Recherche de l'Epita (LRE)\n\ +Copyright (C) 2023 by the Spot authors, see the AUTHORS File for details.\n\ License GPLv3+: \ GNU GPL version 3 or later .\n\ This is free software: you are free to change and redistribute it.\n\ diff --git a/bin/common_setup.hh b/bin/common_setup.hh index 94cd16f4f..e48073920 100644 --- a/bin/common_setup.hh +++ b/bin/common_setup.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2013, 2018-2019, 2023 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_sys.hh b/bin/common_sys.hh index dbcc8fdbf..2c2392b52 100644 --- a/bin/common_sys.hh +++ b/bin/common_sys.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_trans.cc b/bin/common_trans.cc index b93535173..dd7ccc0ba 100644 --- a/bin/common_trans.cc +++ b/bin/common_trans.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/common_trans.hh b/bin/common_trans.hh index 0ebe59e8c..7a1f5f227 100644 --- a/bin/common_trans.hh +++ b/bin/common_trans.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2018, 2020, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/dstar2tgba.cc b/bin/dstar2tgba.cc index 6d7414587..65a634e2c 100644 --- a/bin/dstar2tgba.cc +++ b/bin/dstar2tgba.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2019, 2022, 2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/genaut.cc b/bin/genaut.cc index fbcffb48d..e873a263c 100644 --- a/bin/genaut.cc +++ b/bin/genaut.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019, 2022-2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/genltl.cc b/bin/genltl.cc index 79b71b699..3a3cb169e 100644 --- a/bin/genltl.cc +++ b/bin/genltl.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2015-2019, 2022-2023 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/ltl2tgba.cc b/bin/ltl2tgba.cc index 1229cd422..88f41a234 100644 --- a/bin/ltl2tgba.cc +++ b/bin/ltl2tgba.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2019, 2022-2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/ltl2tgta.cc b/bin/ltl2tgta.cc index 60afcf9e8..e5a4a7340 100644 --- a/bin/ltl2tgta.cc +++ b/bin/ltl2tgta.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2020, 2022-2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/ltlcross.cc b/bin/ltlcross.cc index 3219beb75..2a24732ac 100644 --- a/bin/ltlcross.cc +++ b/bin/ltlcross.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2020, 2022, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/ltldo.cc b/bin/ltldo.cc index c695631df..015276264 100644 --- a/bin/ltldo.cc +++ b/bin/ltldo.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2020, 2022-2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/ltlfilt.cc b/bin/ltlfilt.cc index 5ed7a2fe1..689c26e53 100644 --- a/bin/ltlfilt.cc +++ b/bin/ltlfilt.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/ltlgrind.cc b/bin/ltlgrind.cc index 5e56f7d2c..626211adc 100644 --- a/bin/ltlgrind.cc +++ b/bin/ltlgrind.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2019, 2022, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index fb24a4e6b..78dfb8829 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/man/Makefile.am b/bin/man/Makefile.am index 64f1809df..1b6319766 100644 --- a/bin/man/Makefile.am +++ b/bin/man/Makefile.am @@ -1,6 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2012-2017 Laboratoire de Recherche et Développement -## de l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/bin/options.py b/bin/options.py index 0d41562d1..c2ef4de72 100755 --- a/bin/options.py +++ b/bin/options.py @@ -1,7 +1,6 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright (C) 2014 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/bin/randaut.cc b/bin/randaut.cc index ec1a06a88..0df0509c1 100644 --- a/bin/randaut.cc +++ b/bin/randaut.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2016, 2018-2020, 2022, 2023 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/randltl.cc b/bin/randltl.cc index 2a95def20..23adacc33 100644 --- a/bin/randltl.cc +++ b/bin/randltl.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2016, 2018-2019, 2022, 2023 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/spot-x.cc b/bin/spot-x.cc index 19721daeb..4f077c60d 100644 --- a/bin/spot-x.cc +++ b/bin/spot-x.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/bin/spot.cc b/bin/spot.cc index c6bad3c70..75401ddbc 100644 --- a/bin/spot.cc +++ b/bin/spot.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/configure.ac b/configure.ac index 41261d84c..209153007 100644 --- a/configure.ac +++ b/configure.ac @@ -1,9 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2008-2023, Laboratoire de Recherche et Développement -# de l'Epita (LRDE). -# Copyright (C) 2003-2007 Laboratoire d'Informatique de Paris 6 -# (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -# Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/debian/copyright b/debian/copyright index 792afcec1..ae9290987 100644 --- a/debian/copyright +++ b/debian/copyright @@ -3,8 +3,7 @@ Upstream-Name: spot Source: http://www.lrde.epita.fr/dload/spot/ Files: * -Copyright: 2003-2007 Laboratoire d'Informatique de Paris 6 (LIP6) - 2007-2022 Laboratoire de Recherche et Développement de l'Epita (LRDE) +Copyright: 2003-2023 the Spot authors License: GPL-3+ Spot is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -161,7 +160,7 @@ License: GPL-3+ Public License version 3 can be found in "/usr/share/common-licenses/GPL-3". Files: debian/* -Copyright: 2015-2018 Laboratoire de Recherche et Développement de l'Epita (LRDE) +Copyright: 2015-2023 the Spot authors License: GPL-3+ This package is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/doc/Makefile.am b/doc/Makefile.am index fed301d2a..30c9cfa69 100644 --- a/doc/Makefile.am +++ b/doc/Makefile.am @@ -1,9 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2010-2011, 2013-2021 Laboratoire de Recherche et -## Développement de l'Epita (LRDE). -## Copyright (C) 2003-2005 Laboratoire d'Informatique de Paris 6 -## (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -## Pierre et Marie Curie. +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/doc/tl/Makefile.am b/doc/tl/Makefile.am index e4a840aee..febeac1f8 100644 --- a/doc/tl/Makefile.am +++ b/doc/tl/Makefile.am @@ -1,6 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2011, 2012, 2013, 2015, 2019 Laboratoire de Recherche et -## Développement de l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/elisp/Makefile.am b/elisp/Makefile.am index c67a969e8..74800d125 100644 --- a/elisp/Makefile.am +++ b/elisp/Makefile.am @@ -1,6 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2015-2018, 2022 Laboratoire de Recherche et -## Développement de l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/python/Makefile.am b/python/Makefile.am index bab25349c..ae51bd2b3 100644 --- a/python/Makefile.am +++ b/python/Makefile.am @@ -1,9 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2010, 2011, 2013-2021 Laboratoire de Recherche -## et Development de l'Epita (LRDE). -## Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -## département Systèmes Répartis Coopératifs (SRC), Université Pierre -## et Marie Curie. +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/python/buddy.i b/python/buddy.i index 6d5c11f1c..99eb1cbb1 100644 --- a/python/buddy.i +++ b/python/buddy.i @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2011, 2012, 2014, 2016, 2021 Laboratoire de -// Recherche et Développement de l'EPITA. -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/python/spot/__init__.py b/python/spot/__init__.py index cefb59b77..da366b0ee 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2014-2023 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/python/spot/aux_.py b/python/spot/aux_.py index 3c6435296..211a988bd 100644 --- a/python/spot/aux_.py +++ b/python/spot/aux_.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2019-2020 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/python/spot/gen.i b/python/spot/gen.i index 190e00b8f..acd970dbc 100644 --- a/python/spot/gen.i +++ b/python/spot/gen.i @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/python/spot/impl.i b/python/spot/impl.i index 668ccff89..e8160c362 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). -// Copyright (C) 2003-2006 Laboratoire d'Informatique de Paris 6 -// (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -// Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/python/spot/jupyter.py b/python/spot/jupyter.py index ea16c865d..136db6e5d 100644 --- a/python/spot/jupyter.py +++ b/python/spot/jupyter.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2018 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/python/spot/ltsmin.i b/python/spot/ltsmin.i index bd49c5a3f..94b4ca93b 100644 --- a/python/spot/ltsmin.i +++ b/python/spot/ltsmin.i @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016-2017, 2019, 2021 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/Makefile.am b/spot/Makefile.am index 806b299ad..9e431a6ca 100644 --- a/spot/Makefile.am +++ b/spot/Makefile.am @@ -1,9 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2009, 2010, 2012, 2013, 2014, 2015, 2016, 2017, 2020, 2022 -## Laboratoire de Recherche et Développement de l'Epita (LRDE). -## Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -## département Systèmes Répartis Coopératifs (SRC), Université Pierre -## et Marie Curie. +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/gen/Makefile.am b/spot/gen/Makefile.am index b8249f1e5..b4aa91253 100644 --- a/spot/gen/Makefile.am +++ b/spot/gen/Makefile.am @@ -1,6 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2017, 2018 Laboratoire de Recherche et Développement -## de l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/gen/automata.cc b/spot/gen/automata.cc index 73c057a00..2e6c49458 100644 --- a/spot/gen/automata.cc +++ b/spot/gen/automata.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019, 2021-2022 Laboratoire de Recherche et -// Developpement de l'EPITA (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/gen/automata.hh b/spot/gen/automata.hh index a54f75ac1..fdaa0a6d5 100644 --- a/spot/gen/automata.hh +++ b/spot/gen/automata.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2019, 2022 Laboratoire de Recherche et Developpement de -// l'EPITA (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/gen/formulas.cc b/spot/gen/formulas.cc index 10841e820..d888f4053 100644 --- a/spot/gen/formulas.cc +++ b/spot/gen/formulas.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2019, 2022 Laboratoire de Recherche et Developpement -// de l'EPITA (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/gen/formulas.hh b/spot/gen/formulas.hh index ef5a0d850..131b234b5 100644 --- a/spot/gen/formulas.hh +++ b/spot/gen/formulas.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019, 2022 Laboratoire de Recherche et -// Developpement de l'EPITA (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/graph/Makefile.am b/spot/graph/Makefile.am index 87723057a..fcb498600 100644 --- a/spot/graph/Makefile.am +++ b/spot/graph/Makefile.am @@ -1,6 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2014, 2018 Laboratoire de Recherche et Développement de -## l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/graph/graph.hh b/spot/graph/graph.hh index d2a97d1c5..dc3c221c1 100644 --- a/spot/graph/graph.hh +++ b/spot/graph/graph.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2018, 2020-2023 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/graph/ngraph.hh b/spot/graph/ngraph.hh index 0e883ad2b..a6f25d099 100644 --- a/spot/graph/ngraph.hh +++ b/spot/graph/ngraph.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2015, 2016 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/kripke/Makefile.am b/spot/kripke/Makefile.am index 90161378e..7bb302e75 100644 --- a/spot/kripke/Makefile.am +++ b/spot/kripke/Makefile.am @@ -1,6 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2009, 2011, 2013-2015, 2018 Laboratoire de Recherche -## et Developpement de l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/kripke/fairkripke.cc b/spot/kripke/fairkripke.cc index 70d44a2b4..814dc6134 100644 --- a/spot/kripke/fairkripke.cc +++ b/spot/kripke/fairkripke.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2010, 2014, 2016, 2018 Laboratoire de Recherche -// et Developpement de l'Epita +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/kripke/fairkripke.hh b/spot/kripke/fairkripke.hh index 41639a86f..9e4038425 100644 --- a/spot/kripke/fairkripke.hh +++ b/spot/kripke/fairkripke.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2010, 2013, 2014, 2015, 2016 Laboratoire de -// Recherche et Developpement de l'Epita +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/kripke/fwd.hh b/spot/kripke/fwd.hh index 2b4e81af3..f758cd536 100644 --- a/spot/kripke/fwd.hh +++ b/spot/kripke/fwd.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2016 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/kripke/kripke.cc b/spot/kripke/kripke.cc index 56b2addbd..2feb03874 100644 --- a/spot/kripke/kripke.cc +++ b/spot/kripke/kripke.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2010, 2014, 2016, 2018 Laboratoire de Recherche -// et Developpement de l'Epita +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/kripke/kripke.hh b/spot/kripke/kripke.hh index ed540ac41..8dd0b5da5 100644 --- a/spot/kripke/kripke.hh +++ b/spot/kripke/kripke.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2010, 2013, 2014, 2016, 2017, 2019, 2020, 2023 -// Laboratoire de Recherche et Developpement de l'Epita +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/kripke/kripkegraph.hh b/spot/kripke/kripkegraph.hh index d6292c070..48a6caeb9 100644 --- a/spot/kripke/kripkegraph.hh +++ b/spot/kripke/kripkegraph.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2019 Laboratoire de Recherche et Développement de -// l'Epita (LRDE) +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/ltsmin/Makefile.am b/spot/ltsmin/Makefile.am index e42bc730e..de85941ce 100644 --- a/spot/ltsmin/Makefile.am +++ b/spot/ltsmin/Makefile.am @@ -1,6 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2011, 2013, 2014, 2015, 2016, 2017, 2018, 2019 Laboratoire de -## Recherche et Developpement de l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/ltsmin/ltsmin.cc b/spot/ltsmin/ltsmin.cc index 46104a8d8..27e869f46 100644 --- a/spot/ltsmin/ltsmin.cc +++ b/spot/ltsmin/ltsmin.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2012, 2014-2019 Laboratoire de -// Recherche et Développement de l'Epita (LRDE) +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/ltsmin/ltsmin.hh b/spot/ltsmin/ltsmin.hh index 130188a62..1611375fe 100644 --- a/spot/ltsmin/ltsmin.hh +++ b/spot/ltsmin/ltsmin.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013-2016, 2017, 2019 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE) +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/ltsmin/spins_interface.cc b/spot/ltsmin/spins_interface.cc index f4e1823f2..edf115ed3 100644 --- a/spot/ltsmin/spins_interface.cc +++ b/spot/ltsmin/spins_interface.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2019, 2020, 2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE) +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/ltsmin/spins_interface.hh b/spot/ltsmin/spins_interface.hh index c427ed3c2..de64a5c99 100644 --- a/spot/ltsmin/spins_interface.hh +++ b/spot/ltsmin/spins_interface.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2019 Laboratoire de Recherche et Développement de -// l'Epita (LRDE) +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/ltsmin/spins_kripke.hh b/spot/ltsmin/spins_kripke.hh index 8f5d66913..c122913ee 100644 --- a/spot/ltsmin/spins_kripke.hh +++ b/spot/ltsmin/spins_kripke.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2018, 2019, 2020 Laboratoire de Recherche et -// Développement de l'Epita (LRDE) +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/ltsmin/spins_kripke.hxx b/spot/ltsmin/spins_kripke.hxx index bdf47fbb6..5ef77244c 100644 --- a/spot/ltsmin/spins_kripke.hxx +++ b/spot/ltsmin/spins_kripke.hxx @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2018, 2020, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE) +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/mc/Makefile.am b/spot/mc/Makefile.am index e84815433..191af1a99 100644 --- a/spot/mc/Makefile.am +++ b/spot/mc/Makefile.am @@ -1,6 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2015, 2016, 2017, 2019, 2020 Laboratoire de Recherche et -## Développement de l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/mc/bloemen.hh b/spot/mc/bloemen.hh index 432badb76..995f5a00b 100644 --- a/spot/mc/bloemen.hh +++ b/spot/mc/bloemen.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2020, 2022 Laboratoire de Recherche et -// Developpement de l'Epita +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/mc/bloemen_ec.hh b/spot/mc/bloemen_ec.hh index 6e581a0ac..b91e0bbf8 100644 --- a/spot/mc/bloemen_ec.hh +++ b/spot/mc/bloemen_ec.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2020, 2022 Laboratoire de Recherche et -// Developpement de l'Epita +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/mc/cndfs.hh b/spot/mc/cndfs.hh index 02768144b..5cec44471 100644 --- a/spot/mc/cndfs.hh +++ b/spot/mc/cndfs.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2020, 2022 Laboratoire de Recherche et -// Developpement de l'Epita +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/mc/deadlock.hh b/spot/mc/deadlock.hh index 90c6c0556..3ce4d0ade 100644 --- a/spot/mc/deadlock.hh +++ b/spot/mc/deadlock.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2017, 2018, 2019, 2020 Laboratoire de Recherche et -// Developpement de l'Epita +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/mc/intersect.hh b/spot/mc/intersect.hh index 378bcf638..e34c8dc2d 100644 --- a/spot/mc/intersect.hh +++ b/spot/mc/intersect.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2018, 2019, 2020 Laboratoire de Recherche et -// Developpement de l'Epita +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/mc/lpar13.hh b/spot/mc/lpar13.hh index 77396fb9d..1abbd9faf 100644 --- a/spot/mc/lpar13.hh +++ b/spot/mc/lpar13.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2016, 2018-, 20222022 Laboratoire de Recherche et -// Developpement de l'Epita +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/mc/mc.hh b/spot/mc/mc.hh index a9ab6fc20..383e6e1fa 100644 --- a/spot/mc/mc.hh +++ b/spot/mc/mc.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2017, 2019, 2020 Laboratoire de Recherche et -// Developpement de l'Epita +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/mc/mc_instanciator.hh b/spot/mc/mc_instanciator.hh index db7b35373..aef392738 100644 --- a/spot/mc/mc_instanciator.hh +++ b/spot/mc/mc_instanciator.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2019-2021 Laboratoire de Recherche et -// Developpement de l'Epita +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/mc/unionfind.cc b/spot/mc/unionfind.cc index 2193d2080..7fe2baf5c 100644 --- a/spot/mc/unionfind.cc +++ b/spot/mc/unionfind.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2018, 2021 Laboratoire de Recherche et -// Developpement de l'Epita +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/mc/unionfind.hh b/spot/mc/unionfind.hh index 39f2d5e24..37c0fd7e4 100644 --- a/spot/mc/unionfind.hh +++ b/spot/mc/unionfind.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016 Laboratoire de Recherche et -// Developpement de l'Epita +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/mc/utils.hh b/spot/mc/utils.hh index 9f80c8764..0fc39cd53 100644 --- a/spot/mc/utils.hh +++ b/spot/mc/utils.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2020 Laboratoire de Recherche et -// Developpement de l'Epita +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/Makefile.am b/spot/misc/Makefile.am index 6b771dbb5..747153500 100644 --- a/spot/misc/Makefile.am +++ b/spot/misc/Makefile.am @@ -1,9 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2011-2014, 2016-2018, 2020-2022 Laboratoire de -## Recherche et Développement de l'Epita (LRDE). -## Copyright (C) 2003, 2004, 2005, 2006 Laboratoire d'Informatique de -## Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -## Université Pierre et Marie Curie. +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/misc/bareword.cc b/spot/misc/bareword.cc index 91a259a2f..a64d11511 100644 --- a/spot/misc/bareword.cc +++ b/spot/misc/bareword.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013, 2015 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/bareword.hh b/spot/misc/bareword.hh index 5d3b56902..4b5d96794 100644 --- a/spot/misc/bareword.hh +++ b/spot/misc/bareword.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2015 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/bddlt.hh b/spot/misc/bddlt.hh index 46e24ed33..34a9ea372 100644 --- a/spot/misc/bddlt.hh +++ b/spot/misc/bddlt.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2014, 2017, 2021, 2022 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/bitset.cc b/spot/misc/bitset.cc index 632b0ded1..9ab2a2e6c 100644 --- a/spot/misc/bitset.cc +++ b/spot/misc/bitset.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/bitset.hh b/spot/misc/bitset.hh index d17d26409..6b53ff712 100644 --- a/spot/misc/bitset.hh +++ b/spot/misc/bitset.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018, 2021 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/bitvect.cc b/spot/misc/bitvect.cc index c39ff2ed1..90eb2a55f 100644 --- a/spot/misc/bitvect.cc +++ b/spot/misc/bitvect.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014, 2017, 2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/bitvect.hh b/spot/misc/bitvect.hh index 74ab2bf3f..f20c24324 100644 --- a/spot/misc/bitvect.hh +++ b/spot/misc/bitvect.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2021, 2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/casts.hh b/spot/misc/casts.hh index 0b0b9720c..3776a2a5f 100644 --- a/spot/misc/casts.hh +++ b/spot/misc/casts.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2015-2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/clz.hh b/spot/misc/clz.hh index 648b6615c..50345595a 100644 --- a/spot/misc/clz.hh +++ b/spot/misc/clz.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018, 2021 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/common.hh b/spot/misc/common.hh index 8b066b0a5..4d3a12766 100644 --- a/spot/misc/common.hh +++ b/spot/misc/common.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/escape.cc b/spot/misc/escape.cc index 2943ee503..73d232fe0 100644 --- a/spot/misc/escape.cc +++ b/spot/misc/escape.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2015, 2016, 2018 Laboratoire de Recherche -// et Developpement de l'Epita (LRDE) -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/escape.hh b/spot/misc/escape.hh index 8779dd2e2..e025cfa6d 100644 --- a/spot/misc/escape.hh +++ b/spot/misc/escape.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2013, 2015, 2018, 2020 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/fixpool.hh b/spot/misc/fixpool.hh index 0b7c1e92f..30b8a9b3e 100644 --- a/spot/misc/fixpool.hh +++ b/spot/misc/fixpool.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2015-2018, 2020, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE) +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/formater.cc b/spot/misc/formater.cc index 38896a420..8864f0bae 100644 --- a/spot/misc/formater.cc +++ b/spot/misc/formater.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2016 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/formater.hh b/spot/misc/formater.hh index 753ca0a8b..2e387fe55 100644 --- a/spot/misc/formater.hh +++ b/spot/misc/formater.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2016 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/hash.hh b/spot/misc/hash.hh index cad845a68..31031f61e 100644 --- a/spot/misc/hash.hh +++ b/spot/misc/hash.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2011, 2014, 2015-2018, 2021, 2022 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004, 2005 Laboratoire d'Informatique de -// Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/hashfunc.hh b/spot/misc/hashfunc.hh index 0ab4fba0c..8c032ff9d 100644 --- a/spot/misc/hashfunc.hh +++ b/spot/misc/hashfunc.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE) -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/intvcmp2.cc b/spot/misc/intvcmp2.cc index b8b412236..4dad936cf 100644 --- a/spot/misc/intvcmp2.cc +++ b/spot/misc/intvcmp2.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013, 2014, 2015, 2016 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/intvcmp2.hh b/spot/misc/intvcmp2.hh index 95da48be6..f679aa5af 100644 --- a/spot/misc/intvcmp2.hh +++ b/spot/misc/intvcmp2.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013, 2015 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/intvcomp.cc b/spot/misc/intvcomp.cc index 65059f040..b400d230c 100644 --- a/spot/misc/intvcomp.cc +++ b/spot/misc/intvcomp.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013, 2014, 2016 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/intvcomp.hh b/spot/misc/intvcomp.hh index 8bafc1527..64af1a6c5 100644 --- a/spot/misc/intvcomp.hh +++ b/spot/misc/intvcomp.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013, 2015 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/ltstr.hh b/spot/misc/ltstr.hh index 15f7d11ef..9ecdbb875 100644 --- a/spot/misc/ltstr.hh +++ b/spot/misc/ltstr.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE) -// Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/memusage.cc b/spot/misc/memusage.cc index 968096d59..cec5eda1d 100644 --- a/spot/misc/memusage.cc +++ b/spot/misc/memusage.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013 Laboratoire de Recherche et Developpement de -// l'Epita (LRDE). -// Copyright (C) 2006 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/memusage.hh b/spot/misc/memusage.hh index 0185941dc..37603944b 100644 --- a/spot/misc/memusage.hh +++ b/spot/misc/memusage.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013 Laboratoire de Recherche et Developpement de -// l'Epita (LRDE). -// Copyright (C) 2006 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/minato.cc b/spot/misc/minato.cc index 331eeaa1e..53e23690e 100644 --- a/spot/misc/minato.cc +++ b/spot/misc/minato.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2013-2015, 2020 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris -// 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/minato.hh b/spot/misc/minato.hh index ff1e1f8b1..26bb631c8 100644 --- a/spot/misc/minato.hh +++ b/spot/misc/minato.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2013-2015, 2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/mspool.hh b/spot/misc/mspool.hh index c2eb0428b..fd58669c2 100644 --- a/spot/misc/mspool.hh +++ b/spot/misc/mspool.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013, 2015-2016, 2018, 2022 Laboratoire de -// Recherche et Developpement de l'Epita (LRDE) +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/optionmap.cc b/spot/misc/optionmap.cc index 8be8d1adc..e4d56c4c1 100644 --- a/spot/misc/optionmap.cc +++ b/spot/misc/optionmap.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2013-2016, 2018, 2022 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). -// Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/optionmap.hh b/spot/misc/optionmap.hh index 11ec8c456..6eb106071 100644 --- a/spot/misc/optionmap.hh +++ b/spot/misc/optionmap.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2015, 2016-2017, 2022 Laboratoire de Recherche -// et Developpement de l'Epita (LRDE) -// Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/random.cc b/spot/misc/random.cc index 8bbe1a67f..da48f4dc6 100644 --- a/spot/misc/random.cc +++ b/spot/misc/random.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2015, 2017 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/random.hh b/spot/misc/random.hh index be0db2c1d..0eda68bdb 100644 --- a/spot/misc/random.hh +++ b/spot/misc/random.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2017 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/satsolver.cc b/spot/misc/satsolver.cc index 3ef31dbb8..9180b7f7c 100644 --- a/spot/misc/satsolver.cc +++ b/spot/misc/satsolver.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/satsolver.hh b/spot/misc/satsolver.hh index 3b5bedccd..83d24d86c 100644 --- a/spot/misc/satsolver.hh +++ b/spot/misc/satsolver.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2017-2018, 2020, 2022 Laboratoire de Recherche -// et Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/timer.cc b/spot/misc/timer.cc index 1004aa19b..51541a8fd 100644 --- a/spot/misc/timer.cc +++ b/spot/misc/timer.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2013, 2014 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/timer.hh b/spot/misc/timer.hh index 9a7d754c3..e2a607376 100644 --- a/spot/misc/timer.hh +++ b/spot/misc/timer.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2011, 2012, 2013, 2014, 2015, 2016, 2022 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/tmpfile.cc b/spot/misc/tmpfile.cc index b068d2abe..154ee603c 100644 --- a/spot/misc/tmpfile.cc +++ b/spot/misc/tmpfile.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2015, 2017-2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/tmpfile.hh b/spot/misc/tmpfile.hh index 8ae6452ac..125b4c18f 100644 --- a/spot/misc/tmpfile.hh +++ b/spot/misc/tmpfile.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2015, 2017 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/trival.hh b/spot/misc/trival.hh index 472717f1a..c7675aa29 100644 --- a/spot/misc/trival.hh +++ b/spot/misc/trival.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2018-2019 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/version.cc b/spot/misc/version.cc index 8daa1d2ad..de74474bd 100644 --- a/spot/misc/version.cc +++ b/spot/misc/version.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/misc/version.hh b/spot/misc/version.hh index f5d67b8e5..41b8f6962 100644 --- a/spot/misc/version.hh +++ b/spot/misc/version.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/parseaut/Makefile.am b/spot/parseaut/Makefile.am index 8d34c5605..9c08cd619 100644 --- a/spot/parseaut/Makefile.am +++ b/spot/parseaut/Makefile.am @@ -1,6 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2013, 2014, 2018 Laboratoire de Recherche et -## Développement de l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/parseaut/fmterror.cc b/spot/parseaut/fmterror.cc index 49db70b41..babd775e6 100644 --- a/spot/parseaut/fmterror.cc +++ b/spot/parseaut/fmterror.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2015, 2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/parseaut/parseaut.yy b/spot/parseaut/parseaut.yy index 328977f2b..865bf6afd 100644 --- a/spot/parseaut/parseaut.yy +++ b/spot/parseaut/parseaut.yy @@ -1,6 +1,5 @@ /* -*- coding: utf-8 -*- -** Copyright (C) 2014-2023 Laboratoire de Recherche et Développement -** de l'Epita (LRDE). +** Copyright (C) by the Spot authors, see the AUTHORS file for details. ** ** This file is part of Spot, a model checking library. ** diff --git a/spot/parseaut/parsedecl.hh b/spot/parseaut/parsedecl.hh index 14fcc9958..28ce2b8fa 100644 --- a/spot/parseaut/parsedecl.hh +++ b/spot/parseaut/parsedecl.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2015, 2017, 2021 Laboratoire de Recherche et -// Développement de l'EPITA. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/parseaut/public.hh b/spot/parseaut/public.hh index 2a5cfff76..ee9a0e671 100644 --- a/spot/parseaut/public.hh +++ b/spot/parseaut/public.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2017, 2022-2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/parseaut/scanaut.ll b/spot/parseaut/scanaut.ll index c04834975..3b3d9ab4c 100644 --- a/spot/parseaut/scanaut.ll +++ b/spot/parseaut/scanaut.ll @@ -1,6 +1,5 @@ /* -*- coding: utf-8 -*- -** Copyright (C) 2014-2018, 2020, 2021, 2022 Laboratoire de Recherche et Développement -** de l'Epita (LRDE). +** Copyright (C) by the Spot authors, see the AUTHORS file for details. ** ** This file is part of Spot, a model checking library. ** diff --git a/spot/parsetl/Makefile.am b/spot/parsetl/Makefile.am index f218ca067..d67ac88ed 100644 --- a/spot/parsetl/Makefile.am +++ b/spot/parsetl/Makefile.am @@ -1,9 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2008-2015, 2018, 2022 Laboratoire de Recherche et -## Développement de l'Epita (LRDE). -## Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris -## 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -## Université Pierre et Marie Curie. +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/parsetl/fmterror.cc b/spot/parsetl/fmterror.cc index eb13c9459..9cc6e55e7 100644 --- a/spot/parsetl/fmterror.cc +++ b/spot/parsetl/fmterror.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2012-2013, 2015-2016, 2018 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004, 2005 Laboratoire d'Informatique de Paris -// 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/parsetl/parsedecl.hh b/spot/parsetl/parsedecl.hh index a8e764ec4..0a4728329 100644 --- a/spot/parsetl/parsedecl.hh +++ b/spot/parsetl/parsedecl.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2012, 2013, 2014, 2015, 2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE) -// Copyright (C) 2003, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/parsetl/parsetl.yy b/spot/parsetl/parsetl.yy index 117695404..bbef42df5 100644 --- a/spot/parsetl/parsetl.yy +++ b/spot/parsetl/parsetl.yy @@ -1,9 +1,5 @@ /* -*- coding: utf-8 -*- -** Copyright (C) 2009-2019, 2021, 2022 Laboratoire de Recherche et -** Développement de l'Epita (LRDE). -** Copyright (C) 2003-2006 Laboratoire d'Informatique de Paris 6 -** (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -** Pierre et Marie Curie. +** Copyright (C) by the Spot authors, see the AUTHORS file for details. ** ** This file is part of Spot, a model checking library. ** diff --git a/spot/parsetl/scantl.ll b/spot/parsetl/scantl.ll index 33667a849..68c436d60 100644 --- a/spot/parsetl/scantl.ll +++ b/spot/parsetl/scantl.ll @@ -1,9 +1,5 @@ /* -*- coding: utf-8 -*- -** Copyright (C) 2010-2015, 2017-2019, 2021-2022, Laboratoire de -** Recherche et Développement de l'Epita (LRDE). -** Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 -** (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -** Pierre et Marie Curie. +** Copyright (C) by the Spot authors, see the AUTHORS file for details. ** ** This file is part of Spot, a model checking library. ** diff --git a/spot/priv/Makefile.am b/spot/priv/Makefile.am index 9a23caaa3..2dd29f541 100644 --- a/spot/priv/Makefile.am +++ b/spot/priv/Makefile.am @@ -1,6 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2013-2019, 2021-2023 Laboratoire de Recherche et -## Développement de l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/priv/accmap.hh b/spot/priv/accmap.hh index f83a58f15..b5df7fd5f 100644 --- a/spot/priv/accmap.hh +++ b/spot/priv/accmap.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2018 Laboratoire de Recherche et Developpement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/priv/bddalloc.cc b/spot/priv/bddalloc.cc index 3ce691c0f..8d6a0f768 100644 --- a/spot/priv/bddalloc.cc +++ b/spot/priv/bddalloc.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2007, 2011, 2014, 2015, 2017, 2018 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004, 2006, 2007 Laboratoire d'Informatique de -// Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/priv/bddalloc.hh b/spot/priv/bddalloc.hh index 590c40d54..5634e5906 100644 --- a/spot/priv/bddalloc.hh +++ b/spot/priv/bddalloc.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2016, 2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/priv/freelist.cc b/spot/priv/freelist.cc index f93c9428f..c9db1f6ae 100644 --- a/spot/priv/freelist.cc +++ b/spot/priv/freelist.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2018 Laboratoire de Recherche et Développement de -// l'Epita. -// Copyright (C) 2004, 2006 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/priv/freelist.hh b/spot/priv/freelist.hh index 9dee2d28d..7f6395640 100644 --- a/spot/priv/freelist.hh +++ b/spot/priv/freelist.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008,2013,2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2004, 2006 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/priv/partitioned_relabel.cc b/spot/priv/partitioned_relabel.cc index f28ea5554..0e415d944 100644 --- a/spot/priv/partitioned_relabel.cc +++ b/spot/priv/partitioned_relabel.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2022 Laboratoire de Recherche -// de l'Epita (LRE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. (LRE). // // This file is part of Spot, a model checking library. // diff --git a/spot/priv/partitioned_relabel.hh b/spot/priv/partitioned_relabel.hh index cd19ffaea..27d82f406 100644 --- a/spot/priv/partitioned_relabel.hh +++ b/spot/priv/partitioned_relabel.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2022 Laboratoire de Recherche -// de l'Epita (LRE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. (LRE). // // This file is part of Spot, a model checking library. // diff --git a/spot/priv/satcommon.cc b/spot/priv/satcommon.cc index aec73d104..b79c9cbea 100644 --- a/spot/priv/satcommon.cc +++ b/spot/priv/satcommon.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2019, 2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/priv/satcommon.hh b/spot/priv/satcommon.hh index 653212a92..ce3fcffde 100644 --- a/spot/priv/satcommon.hh +++ b/spot/priv/satcommon.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2016, 2018, 2019 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/priv/trim.cc b/spot/priv/trim.cc index a5f6c9c50..3fb449b86 100644 --- a/spot/priv/trim.cc +++ b/spot/priv/trim.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2018, 2019 Laboratoire de Recherche et Developpement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/priv/trim.hh b/spot/priv/trim.hh index 8073e16e8..0f606f7d6 100644 --- a/spot/priv/trim.hh +++ b/spot/priv/trim.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015 Laboratoire de Recherche et Developpement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/priv/weight.cc b/spot/priv/weight.cc index ec3e02baf..7a6164204 100644 --- a/spot/priv/weight.cc +++ b/spot/priv/weight.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2014, 2016, 2017, 2018 Laboratoire de Recherche -// et Developpement de l'Epita. -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/priv/weight.hh b/spot/priv/weight.hh index fb3063755..b49dee43c 100644 --- a/spot/priv/weight.hh +++ b/spot/priv/weight.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2017 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). -// Copyright (C) 2004, 2014 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/ta/Makefile.am b/spot/ta/Makefile.am index 12f7196b1..a5e9124da 100644 --- a/spot/ta/Makefile.am +++ b/spot/ta/Makefile.am @@ -1,6 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2010, 2012, 2013, 2015, 2018 Laboratoire de Recherche -## et Développement de l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/ta/ta.cc b/spot/ta/ta.cc index f69df726c..deaf5233f 100644 --- a/spot/ta/ta.cc +++ b/spot/ta/ta.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2011, 2014, 2018 Laboratoire de Recherche et -// Developpement de l Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/ta/ta.hh b/spot/ta/ta.hh index 72b481894..cd3024ff6 100644 --- a/spot/ta/ta.hh +++ b/spot/ta/ta.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2012-2017 Laboratoire de Recherche et -// Developpement de l Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/ta/taexplicit.cc b/spot/ta/taexplicit.cc index 3c8a42cf7..17a7b6038 100644 --- a/spot/ta/taexplicit.cc +++ b/spot/ta/taexplicit.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010-2018 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/ta/taexplicit.hh b/spot/ta/taexplicit.hh index 4f833329a..05906d05c 100644 --- a/spot/ta/taexplicit.hh +++ b/spot/ta/taexplicit.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2018 Laboratoire -// de Recherche et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/ta/taproduct.cc b/spot/ta/taproduct.cc index 244ce9467..3b5a06527 100644 --- a/spot/ta/taproduct.cc +++ b/spot/ta/taproduct.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2012, 2014-2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // // This file is part of Spot, a model checking library. diff --git a/spot/ta/taproduct.hh b/spot/ta/taproduct.hh index 051e9bc79..ba0cf4cf3 100644 --- a/spot/ta/taproduct.hh +++ b/spot/ta/taproduct.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2012, 2013, 2014, 2016 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/ta/tgta.hh b/spot/ta/tgta.hh index b997eeb3b..bed332805 100644 --- a/spot/ta/tgta.hh +++ b/spot/ta/tgta.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015, 2016 Laboratoire -// de Recherche et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/ta/tgtaexplicit.cc b/spot/ta/tgtaexplicit.cc index 694636583..14c8f0b14 100644 --- a/spot/ta/tgtaexplicit.cc +++ b/spot/ta/tgtaexplicit.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010-2012, 2014-2016, 2018 Laboratoire de -// Recherche et Developpement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/ta/tgtaexplicit.hh b/spot/ta/tgtaexplicit.hh index 551f0551e..f0214d606 100644 --- a/spot/ta/tgtaexplicit.hh +++ b/spot/ta/tgtaexplicit.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015, 2016 Laboratoire -// de Recherche et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/ta/tgtaproduct.cc b/spot/ta/tgtaproduct.cc index 17b240bc6..81fbef2db 100644 --- a/spot/ta/tgtaproduct.cc +++ b/spot/ta/tgtaproduct.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2014-2018 Laboratoire de Recherche et Développement de -// l Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/ta/tgtaproduct.hh b/spot/ta/tgtaproduct.hh index 46c201476..8b6da0088 100644 --- a/spot/ta/tgtaproduct.hh +++ b/spot/ta/tgtaproduct.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2012, 2013, 2014, 2016 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/taalgos/Makefile.am b/spot/taalgos/Makefile.am index 958900947..45f35448c 100644 --- a/spot/taalgos/Makefile.am +++ b/spot/taalgos/Makefile.am @@ -1,6 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2010, 2012, 2013, 2015, 2018 Laboratoire de Recherche -## et Développement de l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/taalgos/dot.cc b/spot/taalgos/dot.cc index ba0f3d517..d82e34270 100644 --- a/spot/taalgos/dot.cc +++ b/spot/taalgos/dot.cc @@ -1,7 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2012, 2014-2016, 2018-2019 Laboratoire de -// Recherche et Developpement de l Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/taalgos/dot.hh b/spot/taalgos/dot.hh index 76e6d453f..f9792496a 100644 --- a/spot/taalgos/dot.hh +++ b/spot/taalgos/dot.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2013, 2014, 2015 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/taalgos/emptinessta.cc b/spot/taalgos/emptinessta.cc index b5a9ec93d..e91852388 100644 --- a/spot/taalgos/emptinessta.cc +++ b/spot/taalgos/emptinessta.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010-2016, 2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/taalgos/emptinessta.hh b/spot/taalgos/emptinessta.hh index 46a5fc64e..934b16f13 100644 --- a/spot/taalgos/emptinessta.hh +++ b/spot/taalgos/emptinessta.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2014, 2016, 2018, 2019 Laboratoire de Recherche -// et Dévelopment de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/taalgos/minimize.cc b/spot/taalgos/minimize.cc index b2b12cb12..28749ecdb 100644 --- a/spot/taalgos/minimize.cc +++ b/spot/taalgos/minimize.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010-2016, 2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/taalgos/minimize.hh b/spot/taalgos/minimize.hh index 2d7af4741..855b426a9 100644 --- a/spot/taalgos/minimize.hh +++ b/spot/taalgos/minimize.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2019 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/taalgos/reachiter.cc b/spot/taalgos/reachiter.cc index 44f52e4d0..b57f99a0d 100644 --- a/spot/taalgos/reachiter.cc +++ b/spot/taalgos/reachiter.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2012, 2014-2016, 2018 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/taalgos/reachiter.hh b/spot/taalgos/reachiter.hh index 448aac65d..733bc7b68 100644 --- a/spot/taalgos/reachiter.hh +++ b/spot/taalgos/reachiter.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2011, 2012, 2013, 2014, 2016 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/taalgos/statessetbuilder.cc b/spot/taalgos/statessetbuilder.cc index 68c4046fa..563a96a92 100644 --- a/spot/taalgos/statessetbuilder.cc +++ b/spot/taalgos/statessetbuilder.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2014, 2016, 2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/taalgos/statessetbuilder.hh b/spot/taalgos/statessetbuilder.hh index 245e244cb..dada7323c 100644 --- a/spot/taalgos/statessetbuilder.hh +++ b/spot/taalgos/statessetbuilder.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2013, 2014 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/taalgos/stats.cc b/spot/taalgos/stats.cc index 1787f6a27..37a64ca79 100644 --- a/spot/taalgos/stats.cc +++ b/spot/taalgos/stats.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2014, 2015, 2016, 2018 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/taalgos/stats.hh b/spot/taalgos/stats.hh index e6f9553bc..0583829be 100644 --- a/spot/taalgos/stats.hh +++ b/spot/taalgos/stats.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013, 2014, 2015 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/taalgos/tgba2ta.cc b/spot/taalgos/tgba2ta.cc index fc9073aa5..096080d6f 100644 --- a/spot/taalgos/tgba2ta.cc +++ b/spot/taalgos/tgba2ta.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010-2018, 2021 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/taalgos/tgba2ta.hh b/spot/taalgos/tgba2ta.hh index 50b06ff8c..99240893b 100644 --- a/spot/taalgos/tgba2ta.hh +++ b/spot/taalgos/tgba2ta.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2012-2015, 2017, 2019 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/Makefile.am b/spot/tl/Makefile.am index cdedddffd..a1f0ce104 100644 --- a/spot/tl/Makefile.am +++ b/spot/tl/Makefile.am @@ -1,6 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2015-2018 Laboratoire de Recherche et -## Développement de l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/tl/apcollect.cc b/spot/tl/apcollect.cc index 0a2c2d259..8e6cdefea 100644 --- a/spot/tl/apcollect.cc +++ b/spot/tl/apcollect.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2014, 2015, 2018, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/apcollect.hh b/spot/tl/apcollect.hh index 42424b4ac..a4ccfdaa6 100644 --- a/spot/tl/apcollect.hh +++ b/spot/tl/apcollect.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2014, 2015, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/contain.cc b/spot/tl/contain.cc index 858afc52d..6782f6211 100644 --- a/spot/tl/contain.cc +++ b/spot/tl/contain.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2012, 2014-2016, 2018-2020 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). -// Copyright (C) 2006, 2007 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/contain.hh b/spot/tl/contain.hh index 28d83172f..5fadbf1f1 100644 --- a/spot/tl/contain.hh +++ b/spot/tl/contain.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2016, 2019, 2020 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). -// Copyright (C) 2006 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/declenv.cc b/spot/tl/declenv.cc index cad6466cd..4b24fdcb1 100644 --- a/spot/tl/declenv.cc +++ b/spot/tl/declenv.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2012, 2014, 2015, 2018 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/declenv.hh b/spot/tl/declenv.hh index 863391aa2..6d561a332 100644 --- a/spot/tl/declenv.hh +++ b/spot/tl/declenv.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2012, 2013, 2014, 2015, 2016 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/defaultenv.cc b/spot/tl/defaultenv.cc index ae3c22643..533a4dea8 100644 --- a/spot/tl/defaultenv.cc +++ b/spot/tl/defaultenv.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2014, 2015, 2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2003 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/defaultenv.hh b/spot/tl/defaultenv.hh index 6bd4468cb..455641b12 100644 --- a/spot/tl/defaultenv.hh +++ b/spot/tl/defaultenv.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2014, 2015, 2016 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/dot.cc b/spot/tl/dot.cc index 2bfbb43da..7d8e936f7 100644 --- a/spot/tl/dot.cc +++ b/spot/tl/dot.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2015, 2018-2019, 2021 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 -// (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -// Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/dot.hh b/spot/tl/dot.hh index 1c1543432..efa0c0024 100644 --- a/spot/tl/dot.hh +++ b/spot/tl/dot.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2015 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/environment.hh b/spot/tl/environment.hh index a44b3582b..29d7e515a 100644 --- a/spot/tl/environment.hh +++ b/spot/tl/environment.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2012, 2014, 2015 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris -// 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/exclusive.cc b/spot/tl/exclusive.cc index 4d863eaf0..2cc22af3e 100644 --- a/spot/tl/exclusive.cc +++ b/spot/tl/exclusive.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/exclusive.hh b/spot/tl/exclusive.hh index b0310c34d..b3c1808ef 100644 --- a/spot/tl/exclusive.hh +++ b/spot/tl/exclusive.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/formula.cc b/spot/tl/formula.cc index 294c8cb5b..a1e67b475 100644 --- a/spot/tl/formula.cc +++ b/spot/tl/formula.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2019, 2021, 2022, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/formula.hh b/spot/tl/formula.hh index 7b7a5c174..3ab6dd100 100644 --- a/spot/tl/formula.hh +++ b/spot/tl/formula.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/hierarchy.cc b/spot/tl/hierarchy.cc index d069fd65b..5d29202b5 100644 --- a/spot/tl/hierarchy.cc +++ b/spot/tl/hierarchy.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019 Laboratoire de Recherche et Développement de -// l'Epita (LRDE) +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/hierarchy.hh b/spot/tl/hierarchy.hh index 347d175f8..ae7d5c343 100644 --- a/spot/tl/hierarchy.hh +++ b/spot/tl/hierarchy.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2018, 2019 Laboratoire de Recherche et Développement -// de l'Epita (LRDE) +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/length.cc b/spot/tl/length.cc index 2793a6af5..02a4c242c 100644 --- a/spot/tl/length.cc +++ b/spot/tl/length.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2012, 2014, 2015, 2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/length.hh b/spot/tl/length.hh index 83f34117e..cbf828c6f 100644 --- a/spot/tl/length.hh +++ b/spot/tl/length.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2015, 2016 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/ltlf.cc b/spot/tl/ltlf.cc index 8e9d3d64c..8a3e80e55 100644 --- a/spot/tl/ltlf.cc +++ b/spot/tl/ltlf.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2018, 2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/ltlf.hh b/spot/tl/ltlf.hh index 1042e2f65..b74b6ed7c 100644 --- a/spot/tl/ltlf.hh +++ b/spot/tl/ltlf.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/mark.cc b/spot/tl/mark.cc index fd022fb7e..b20d367b9 100644 --- a/spot/tl/mark.cc +++ b/spot/tl/mark.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2012, 2014-2015, 2018-2019 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/mark.hh b/spot/tl/mark.hh index ee917a440..74e34571e 100644 --- a/spot/tl/mark.hh +++ b/spot/tl/mark.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2011, 2012, 2013, 2015, 2016 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/mutation.cc b/spot/tl/mutation.cc index bf447fc92..e378adade 100644 --- a/spot/tl/mutation.cc +++ b/spot/tl/mutation.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2016, 2018-2019 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/mutation.hh b/spot/tl/mutation.hh index bef46345f..1d12a0476 100644 --- a/spot/tl/mutation.hh +++ b/spot/tl/mutation.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2015 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/nenoform.cc b/spot/tl/nenoform.cc index 8705960df..3779048a5 100644 --- a/spot/tl/nenoform.cc +++ b/spot/tl/nenoform.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2013, 2015, 2018 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/nenoform.hh b/spot/tl/nenoform.hh index 3522e4ca1..c05d77f63 100644 --- a/spot/tl/nenoform.hh +++ b/spot/tl/nenoform.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2012, 2013, 2015 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/parse.hh b/spot/tl/parse.hh index f42aa8f6f..5907d2756 100644 --- a/spot/tl/parse.hh +++ b/spot/tl/parse.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Laboratoire -// de Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004, 2005, 2006 Laboratoire d'Informatique de -// Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/print.cc b/spot/tl/print.cc index 57c7f6a5f..044f83d94 100644 --- a/spot/tl/print.cc +++ b/spot/tl/print.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2010, 2012-2016, 2018, 2019 Laboratoire de -// Recherche et Développement de l'Epita (LRDE) -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/print.hh b/spot/tl/print.hh index 07d475be7..ce3e50879 100644 --- a/spot/tl/print.hh +++ b/spot/tl/print.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/randomltl.cc b/spot/tl/randomltl.cc index 3157f6fe8..21e6b61b3 100644 --- a/spot/tl/randomltl.cc +++ b/spot/tl/randomltl.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008-2012, 2014-2016, 2018-2019 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). -// Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 -// (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -// Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/randomltl.hh b/spot/tl/randomltl.hh index 2811c18ac..99b664b00 100644 --- a/spot/tl/randomltl.hh +++ b/spot/tl/randomltl.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010-2016, 2018 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/relabel.cc b/spot/tl/relabel.cc index 7fe94842c..1a376f11b 100644 --- a/spot/tl/relabel.cc +++ b/spot/tl/relabel.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2016, 2018-2020, 2022, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/relabel.hh b/spot/tl/relabel.hh index 5d076f10c..1f8e32f22 100644 --- a/spot/tl/relabel.hh +++ b/spot/tl/relabel.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2015, 2019, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/remove_x.cc b/spot/tl/remove_x.cc index 4ab27d1d7..06f48179c 100644 --- a/spot/tl/remove_x.cc +++ b/spot/tl/remove_x.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014, 2015, 2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/remove_x.hh b/spot/tl/remove_x.hh index 2171077a7..fc6b1bad4 100644 --- a/spot/tl/remove_x.hh +++ b/spot/tl/remove_x.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2015, 2019 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/simplify.cc b/spot/tl/simplify.cc index 4eac97282..c670fc730 100644 --- a/spot/tl/simplify.cc +++ b/spot/tl/simplify.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2022 Laboratoire de Recherche et Developpement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/simplify.hh b/spot/tl/simplify.hh index ec102a205..72a3664a4 100644 --- a/spot/tl/simplify.hh +++ b/spot/tl/simplify.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2022 Laboratoire de Recherche et Developpement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/snf.cc b/spot/tl/snf.cc index 941ac1b10..220a62e98 100644 --- a/spot/tl/snf.cc +++ b/spot/tl/snf.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2014, 2015, 2016, 2018, 2019 Laboratoire de Recherche -// et Developpement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/snf.hh b/spot/tl/snf.hh index 839b0496b..38e055e0a 100644 --- a/spot/tl/snf.hh +++ b/spot/tl/snf.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2015, 2019 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/sonf.cc b/spot/tl/sonf.cc index 29a319039..29b613eaa 100644 --- a/spot/tl/sonf.cc +++ b/spot/tl/sonf.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021 Laboratoire de Recherche et Developpement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/sonf.hh b/spot/tl/sonf.hh index 37ef5d05d..4c3595321 100644 --- a/spot/tl/sonf.hh +++ b/spot/tl/sonf.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/unabbrev.cc b/spot/tl/unabbrev.cc index 1df1334a3..61391c3a7 100644 --- a/spot/tl/unabbrev.cc +++ b/spot/tl/unabbrev.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2018-2020 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/tl/unabbrev.hh b/spot/tl/unabbrev.hh index f691e58ed..96eac566f 100644 --- a/spot/tl/unabbrev.hh +++ b/spot/tl/unabbrev.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/Makefile.am b/spot/twa/Makefile.am index f99598a8f..08bf32e72 100644 --- a/spot/twa/Makefile.am +++ b/spot/twa/Makefile.am @@ -1,9 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2009-2016, 2018 Laboratoire de Recherche et -## Développement de l'Epita (LRDE). -## Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -## département Systèmes Répartis Coopératifs (SRC), Université Pierre -## et Marie Curie. +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/twa/acc.cc b/spot/twa/acc.cc index 0549ce462..e7b2a563e 100644 --- a/spot/twa/acc.cc +++ b/spot/twa/acc.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2023 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/acc.hh b/spot/twa/acc.hh index b7817aa0b..069b9de07 100644 --- a/spot/twa/acc.hh +++ b/spot/twa/acc.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2023 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/bdddict.cc b/spot/twa/bdddict.cc index f1dc88450..e849ede7a 100644 --- a/spot/twa/bdddict.cc +++ b/spot/twa/bdddict.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2012-2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2003-2006 Laboratoire d'Informatique de Paris 6 -// (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -// Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/bdddict.hh b/spot/twa/bdddict.hh index f9c2ed6df..504e4b089 100644 --- a/spot/twa/bdddict.hh +++ b/spot/twa/bdddict.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2017, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004, 2006 Laboratoire d'Informatique de Paris -// 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/bddprint.cc b/spot/twa/bddprint.cc index ea0d84c2c..5313bfb33 100644 --- a/spot/twa/bddprint.cc +++ b/spot/twa/bddprint.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2012, 2014, 2015, 2018, 2019 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 -// (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -// Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/bddprint.hh b/spot/twa/bddprint.hh index e748079ba..a85acb765 100644 --- a/spot/twa/bddprint.hh +++ b/spot/twa/bddprint.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2014, 2015 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de -// Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/formula2bdd.cc b/spot/twa/formula2bdd.cc index 15434395f..540426d85 100644 --- a/spot/twa/formula2bdd.cc +++ b/spot/twa/formula2bdd.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2019, 2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris -// 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/formula2bdd.hh b/spot/twa/formula2bdd.hh index a84d27996..b9005295c 100644 --- a/spot/twa/formula2bdd.hh +++ b/spot/twa/formula2bdd.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2015, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2003 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/fwd.hh b/spot/twa/fwd.hh index 4563f8766..839844875 100644 --- a/spot/twa/fwd.hh +++ b/spot/twa/fwd.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2015, 2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/taatgba.cc b/spot/twa/taatgba.cc index 1ae35ff1b..95ddc85ee 100644 --- a/spot/twa/taatgba.cc +++ b/spot/twa/taatgba.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2018 Laboratoire de Recherche et Développement de -// l'Epita (LRDE) +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/taatgba.hh b/spot/twa/taatgba.hh index 6a5b1c470..7a84e3884 100644 --- a/spot/twa/taatgba.hh +++ b/spot/twa/taatgba.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2011-2019, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/twa.cc b/spot/twa/twa.cc index 6ada4b6f4..ab4c12ef6 100644 --- a/spot/twa/twa.cc +++ b/spot/twa/twa.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2014-2019, 2021, 2022, 2023 Laboratoire de Recherche et -// Developpement de l'EPITA (LRDE). -// Copyright (C) 2003, 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/twa.hh b/spot/twa/twa.hh index 819a90962..85ce382e3 100644 --- a/spot/twa/twa.hh +++ b/spot/twa/twa.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2011, 2013-2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2003-2005 Laboratoire d'Informatique de Paris 6 -// (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -// Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index e7a875680..1b9e2f484 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2023 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/twagraph.hh b/spot/twa/twagraph.hh index 1540692c6..30a023c68 100644 --- a/spot/twa/twagraph.hh +++ b/spot/twa/twagraph.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2023 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/twaproduct.cc b/spot/twa/twaproduct.cc index caa0ad3ef..a9b324835 100644 --- a/spot/twa/twaproduct.cc +++ b/spot/twa/twaproduct.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2011-2012, 2014-2018 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004, 2006 Laboratoire d'Informatique de -// Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twa/twaproduct.hh b/spot/twa/twaproduct.hh index ec998122a..7720672e8 100644 --- a/spot/twa/twaproduct.hh +++ b/spot/twa/twaproduct.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013, 2014, 2015, 2016, 2019 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004, 2006 Laboratoire d'Informatique de Paris -// 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/Makefile.am b/spot/twaalgos/Makefile.am index 7a09928f1..80884cdbb 100644 --- a/spot/twaalgos/Makefile.am +++ b/spot/twaalgos/Makefile.am @@ -1,9 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2008-2018, 2020-2022 Laboratoire de Recherche et -## Développement de l'Epita (LRDE). -## Copyright (C) 2003-2005 Laboratoire d'Informatique de Paris 6 -## (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -## Pierre et Marie Curie. +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index df3d05bdb..d41e2ae8a 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/aiger.hh b/spot/twaalgos/aiger.hh index 4737a80be..1fe2ffddf 100644 --- a/spot/twaalgos/aiger.hh +++ b/spot/twaalgos/aiger.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2020-2021, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/alternation.cc b/spot/twaalgos/alternation.cc index bdbe07982..b247a4e57 100644 --- a/spot/twaalgos/alternation.cc +++ b/spot/twaalgos/alternation.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016-2019, 2021, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/alternation.hh b/spot/twaalgos/alternation.hh index a4665aacf..e782856af 100644 --- a/spot/twaalgos/alternation.hh +++ b/spot/twaalgos/alternation.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2018, 2019 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/are_isomorphic.cc b/spot/twaalgos/are_isomorphic.cc index 77fb95741..75bf6733b 100644 --- a/spot/twaalgos/are_isomorphic.cc +++ b/spot/twaalgos/are_isomorphic.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/are_isomorphic.hh b/spot/twaalgos/are_isomorphic.hh index 5d199b430..9c00e920e 100644 --- a/spot/twaalgos/are_isomorphic.hh +++ b/spot/twaalgos/are_isomorphic.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2015 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/bfssteps.cc b/spot/twaalgos/bfssteps.cc index af722d5a6..28ab1c3e5 100644 --- a/spot/twaalgos/bfssteps.cc +++ b/spot/twaalgos/bfssteps.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2015, 2018, 2020 Laboratoire de Recherche et -// Développement de l'Epita (LRDE) -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/bfssteps.hh b/spot/twaalgos/bfssteps.hh index 015a41328..3c834b396 100644 --- a/spot/twaalgos/bfssteps.hh +++ b/spot/twaalgos/bfssteps.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013, 2014, 2018 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/canonicalize.cc b/spot/twaalgos/canonicalize.cc index 1feca0113..3053445a4 100644 --- a/spot/twaalgos/canonicalize.cc +++ b/spot/twaalgos/canonicalize.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/canonicalize.hh b/spot/twaalgos/canonicalize.hh index f302fe4d7..079fd44ee 100644 --- a/spot/twaalgos/canonicalize.hh +++ b/spot/twaalgos/canonicalize.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014 Laboratoire de Recherche et -// Developpement de l Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/cleanacc.cc b/spot/twaalgos/cleanacc.cc index 913c2f10b..454b531c1 100644 --- a/spot/twaalgos/cleanacc.cc +++ b/spot/twaalgos/cleanacc.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2017-2020 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/cleanacc.hh b/spot/twaalgos/cleanacc.hh index de637c63b..3c2c38070 100644 --- a/spot/twaalgos/cleanacc.hh +++ b/spot/twaalgos/cleanacc.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2017-2020 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/cobuchi.cc b/spot/twaalgos/cobuchi.cc index 23d4871a0..6eacdb489 100644 --- a/spot/twaalgos/cobuchi.cc +++ b/spot/twaalgos/cobuchi.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2018, 2021, 2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/cobuchi.hh b/spot/twaalgos/cobuchi.hh index b02c0535d..f2b5e4900 100644 --- a/spot/twaalgos/cobuchi.hh +++ b/spot/twaalgos/cobuchi.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019, 2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/complement.cc b/spot/twaalgos/complement.cc index 8db33225a..728b764d5 100644 --- a/spot/twaalgos/complement.cc +++ b/spot/twaalgos/complement.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2015, 2017-2021 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/complement.hh b/spot/twaalgos/complement.hh index 902f11363..4b74f27b8 100644 --- a/spot/twaalgos/complement.hh +++ b/spot/twaalgos/complement.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2015, 2017, 2019, 2022 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/complete.cc b/spot/twaalgos/complete.cc index b6ace400e..9218140d2 100644 --- a/spot/twaalgos/complete.cc +++ b/spot/twaalgos/complete.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018, 2022 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/complete.hh b/spot/twaalgos/complete.hh index 3525904be..318f143e9 100644 --- a/spot/twaalgos/complete.hh +++ b/spot/twaalgos/complete.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2015, 2017, 2022 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/compsusp.cc b/spot/twaalgos/compsusp.cc index 0d7b624ac..859602b3f 100644 --- a/spot/twaalgos/compsusp.cc +++ b/spot/twaalgos/compsusp.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2015, 2018, 2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/compsusp.hh b/spot/twaalgos/compsusp.hh index 4da48f0e8..a660e81d6 100644 --- a/spot/twaalgos/compsusp.hh +++ b/spot/twaalgos/compsusp.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2014, 2015, 2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/contains.cc b/spot/twaalgos/contains.cc index 7170fbedf..3df305379 100644 --- a/spot/twaalgos/contains.cc +++ b/spot/twaalgos/contains.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018, 2019, 2022, 2023 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/contains.hh b/spot/twaalgos/contains.hh index 5c30a66b9..ac3be3f1e 100644 --- a/spot/twaalgos/contains.hh +++ b/spot/twaalgos/contains.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018, 2022, 2023 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/copy.hh b/spot/twaalgos/copy.hh index 47c762d5a..f08b94804 100644 --- a/spot/twaalgos/copy.hh +++ b/spot/twaalgos/copy.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2017 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2003-2005 Laboratoire d'Informatique de Paris -// 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/couvreurnew.cc b/spot/twaalgos/couvreurnew.cc index ee6130226..25797d27d 100644 --- a/spot/twaalgos/couvreurnew.cc +++ b/spot/twaalgos/couvreurnew.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016-2020 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/couvreurnew.hh b/spot/twaalgos/couvreurnew.hh index 18b451bff..b60bc6bca 100644 --- a/spot/twaalgos/couvreurnew.hh +++ b/spot/twaalgos/couvreurnew.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016 Laboratoire de Recherche et Developpement de l'EPITA. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/cycles.cc b/spot/twaalgos/cycles.cc index 51b528584..716252cb1 100644 --- a/spot/twaalgos/cycles.cc +++ b/spot/twaalgos/cycles.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2014-2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/cycles.hh b/spot/twaalgos/cycles.hh index 47c8d5ad4..bd613a1c2 100644 --- a/spot/twaalgos/cycles.hh +++ b/spot/twaalgos/cycles.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2015, 2018-2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/dbranch.cc b/spot/twaalgos/dbranch.cc index 7cf1b262e..d4ba24db0 100644 --- a/spot/twaalgos/dbranch.cc +++ b/spot/twaalgos/dbranch.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2022-2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/dbranch.hh b/spot/twaalgos/dbranch.hh index 022c1a75b..f0f080b34 100644 --- a/spot/twaalgos/dbranch.hh +++ b/spot/twaalgos/dbranch.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2022, 2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/degen.cc b/spot/twaalgos/degen.cc index d79844b84..6e718459b 100644 --- a/spot/twaalgos/degen.cc +++ b/spot/twaalgos/degen.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2020, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/degen.hh b/spot/twaalgos/degen.hh index e9ae13021..b04ae7bb8 100644 --- a/spot/twaalgos/degen.hh +++ b/spot/twaalgos/degen.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2015, 2017-2020, 2022 Laboratoire de -// Recherche et Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/determinize.cc b/spot/twaalgos/determinize.cc index c87d992dd..c99933467 100644 --- a/spot/twaalgos/determinize.cc +++ b/spot/twaalgos/determinize.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2023 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/determinize.hh b/spot/twaalgos/determinize.hh index aa8196eaa..b7dbb8137 100644 --- a/spot/twaalgos/determinize.hh +++ b/spot/twaalgos/determinize.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2016, 2019-2021, 2023 Laboratoire de Recherche -// et Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/dot.cc b/spot/twaalgos/dot.cc index 19a638b9e..fd76e173f 100644 --- a/spot/twaalgos/dot.cc +++ b/spot/twaalgos/dot.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2012, 2014-2022 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/dot.hh b/spot/twaalgos/dot.hh index 1ef8c3e27..fbf63e641 100644 --- a/spot/twaalgos/dot.hh +++ b/spot/twaalgos/dot.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2012, 2013, 2014, 2015, 2016 Laboratoire de Recherche -// et Developpement de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/dtbasat.cc b/spot/twaalgos/dtbasat.cc index c4bf3d1bc..bc556d264 100644 --- a/spot/twaalgos/dtbasat.cc +++ b/spot/twaalgos/dtbasat.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018, 2021-2023 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/dtbasat.hh b/spot/twaalgos/dtbasat.hh index 79f096a1f..f4fa1e20f 100644 --- a/spot/twaalgos/dtbasat.hh +++ b/spot/twaalgos/dtbasat.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2015, 2021 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/dtwasat.cc b/spot/twaalgos/dtwasat.cc index 2ecf38fd1..9aee79d57 100644 --- a/spot/twaalgos/dtwasat.cc +++ b/spot/twaalgos/dtwasat.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2023 Laboratoire de Recherche -// et Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/dtwasat.hh b/spot/twaalgos/dtwasat.hh index ac5837741..f51ca1405 100644 --- a/spot/twaalgos/dtwasat.hh +++ b/spot/twaalgos/dtwasat.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014, 2015, 2018 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/dualize.cc b/spot/twaalgos/dualize.cc index 1b60a0d17..4e2261e12 100644 --- a/spot/twaalgos/dualize.cc +++ b/spot/twaalgos/dualize.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019, 2021-2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/dualize.hh b/spot/twaalgos/dualize.hh index 53c600dab..d84be8173 100644 --- a/spot/twaalgos/dualize.hh +++ b/spot/twaalgos/dualize.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2018, 2019 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/emptiness.cc b/spot/twaalgos/emptiness.cc index ef8890f95..d3f05ca38 100644 --- a/spot/twaalgos/emptiness.cc +++ b/spot/twaalgos/emptiness.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2011-2019, 2021, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/emptiness.hh b/spot/twaalgos/emptiness.hh index 66bf8ca56..cd9b40c3f 100644 --- a/spot/twaalgos/emptiness.hh +++ b/spot/twaalgos/emptiness.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013-2018, 2020-2021, 2023 Laboratoire de -// Recherche et Developpement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/emptiness_stats.hh b/spot/twaalgos/emptiness_stats.hh index c56915efa..936929bc3 100644 --- a/spot/twaalgos/emptiness_stats.hh +++ b/spot/twaalgos/emptiness_stats.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2017 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 -// (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -// Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/forq_contains.cc b/spot/twaalgos/forq_contains.cc index 1e397106f..1b6b54f01 100644 --- a/spot/twaalgos/forq_contains.cc +++ b/spot/twaalgos/forq_contains.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2023 Laboratoire de Recherche et Développement -// de l'Epita. IMDEA Software Institute. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/forq_contains.hh b/spot/twaalgos/forq_contains.hh index 9e7c792af..f880aeb3c 100644 --- a/spot/twaalgos/forq_contains.hh +++ b/spot/twaalgos/forq_contains.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2023 Laboratoire de Recherche et Développement -// de l'Epita. IMDEA Software Institute. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index add0926fe..4850e07b2 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2018, 2020-2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/game.hh b/spot/twaalgos/game.hh index d4937e46c..c376304be 100644 --- a/spot/twaalgos/game.hh +++ b/spot/twaalgos/game.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/genem.cc b/spot/twaalgos/genem.cc index 77597db9f..63ee7ce24 100644 --- a/spot/twaalgos/genem.cc +++ b/spot/twaalgos/genem.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2023 Laboratoire de Recherche et Developpement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/genem.hh b/spot/twaalgos/genem.hh index 3fefcdc77..0a6de1040 100644 --- a/spot/twaalgos/genem.hh +++ b/spot/twaalgos/genem.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2022 Laboratoire de Recherche et Developpement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/gfguarantee.cc b/spot/twaalgos/gfguarantee.cc index f25c378c4..8a4477a7a 100644 --- a/spot/twaalgos/gfguarantee.cc +++ b/spot/twaalgos/gfguarantee.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018, 2021, 2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/gfguarantee.hh b/spot/twaalgos/gfguarantee.hh index 32edae439..204eca98d 100644 --- a/spot/twaalgos/gfguarantee.hh +++ b/spot/twaalgos/gfguarantee.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018, 2022, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/gtec/Makefile.am b/spot/twaalgos/gtec/Makefile.am index 98198e6ef..9463d592c 100644 --- a/spot/twaalgos/gtec/Makefile.am +++ b/spot/twaalgos/gtec/Makefile.am @@ -1,9 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2011, 2013, 2014, 2016, 2018 Laboratoire de Recherche -## et Developpement de l'Epita (LRDE). -## Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -## département Systèmes Répartis Coopératifs (SRC), Université Pierre -## et Marie Curie. +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/twaalgos/gtec/ce.cc b/spot/twaalgos/gtec/ce.cc index 9b167d00c..846d55532 100644 --- a/spot/twaalgos/gtec/ce.cc +++ b/spot/twaalgos/gtec/ce.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010-2011, 2013-2016, 2018 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/gtec/ce.hh b/spot/twaalgos/gtec/ce.hh index 4509f0bb6..adccb08ab 100644 --- a/spot/twaalgos/gtec/ce.hh +++ b/spot/twaalgos/gtec/ce.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014, 2016 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/gtec/gtec.cc b/spot/twaalgos/gtec/gtec.cc index 12eba32c8..e50e0b7ad 100644 --- a/spot/twaalgos/gtec/gtec.cc +++ b/spot/twaalgos/gtec/gtec.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2011, 2014-2016, 2018-2020 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004, 2005, 2006 Laboratoire d'Informatique de -// Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/gtec/gtec.hh b/spot/twaalgos/gtec/gtec.hh index a9e3e8292..e2e3f92c7 100644 --- a/spot/twaalgos/gtec/gtec.hh +++ b/spot/twaalgos/gtec/gtec.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2013-2016, 2018-2020 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004, 2005, 2006 Laboratoire d'Informatique de -// Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/gtec/sccstack.cc b/spot/twaalgos/gtec/sccstack.cc index eb2c5e73e..84b2a9dab 100644 --- a/spot/twaalgos/gtec/sccstack.cc +++ b/spot/twaalgos/gtec/sccstack.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2018 Laboratoire de Recherche et Developpement de -// l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/gtec/sccstack.hh b/spot/twaalgos/gtec/sccstack.hh index 7285cafab..d4ae65b37 100644 --- a/spot/twaalgos/gtec/sccstack.hh +++ b/spot/twaalgos/gtec/sccstack.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/gtec/status.cc b/spot/twaalgos/gtec/status.cc index d9b6d8fc8..5ac07365d 100644 --- a/spot/twaalgos/gtec/status.cc +++ b/spot/twaalgos/gtec/status.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2016, 2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/gtec/status.hh b/spot/twaalgos/gtec/status.hh index 708bf8631..970059fcc 100644 --- a/spot/twaalgos/gtec/status.hh +++ b/spot/twaalgos/gtec/status.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014, 2016 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/gv04.cc b/spot/twaalgos/gv04.cc index fceec436a..cdaebcc4a 100644 --- a/spot/twaalgos/gv04.cc +++ b/spot/twaalgos/gv04.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2010, 2011, 2013-2020 Laboratoire de -// recherche et développement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 -// (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -// Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/gv04.hh b/spot/twaalgos/gv04.hh index 4bef9bff1..695c0e983 100644 --- a/spot/twaalgos/gv04.hh +++ b/spot/twaalgos/gv04.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014, 2019 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/hoa.cc b/spot/twaalgos/hoa.cc index c8e7b13b9..0f2a8e8b1 100644 --- a/spot/twaalgos/hoa.cc +++ b/spot/twaalgos/hoa.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2023 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/hoa.hh b/spot/twaalgos/hoa.hh index 441b9ed16..70e2c98c6 100644 --- a/spot/twaalgos/hoa.hh +++ b/spot/twaalgos/hoa.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2015, 2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/iscolored.cc b/spot/twaalgos/iscolored.cc index 73a0dad56..f6b45b554 100644 --- a/spot/twaalgos/iscolored.cc +++ b/spot/twaalgos/iscolored.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2018, 2020 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/iscolored.hh b/spot/twaalgos/iscolored.hh index 7f53d7990..826afed78 100644 --- a/spot/twaalgos/iscolored.hh +++ b/spot/twaalgos/iscolored.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017 Laboratoire de Recherche et Développement de l'Epita -// (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/isdet.cc b/spot/twaalgos/isdet.cc index 864a9315c..485998efe 100644 --- a/spot/twaalgos/isdet.cc +++ b/spot/twaalgos/isdet.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2018, 2021 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/isdet.hh b/spot/twaalgos/isdet.hh index 2717c877f..7015391e6 100644 --- a/spot/twaalgos/isdet.hh +++ b/spot/twaalgos/isdet.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/isunamb.cc b/spot/twaalgos/isunamb.cc index 01497edcc..22fc0cdd8 100644 --- a/spot/twaalgos/isunamb.cc +++ b/spot/twaalgos/isunamb.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2015-2018 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/isunamb.hh b/spot/twaalgos/isunamb.hh index 4ec9c48f8..c63d9b3d2 100644 --- a/spot/twaalgos/isunamb.hh +++ b/spot/twaalgos/isunamb.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2015, 2018 Laboratoire de Recherche et Developpement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/isweakscc.cc b/spot/twaalgos/isweakscc.cc index ff2254302..6f4502e30 100644 --- a/spot/twaalgos/isweakscc.cc +++ b/spot/twaalgos/isweakscc.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2019 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/isweakscc.hh b/spot/twaalgos/isweakscc.hh index bc296519f..e1258c0f2 100644 --- a/spot/twaalgos/isweakscc.hh +++ b/spot/twaalgos/isweakscc.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2014, 2017 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/langmap.cc b/spot/twaalgos/langmap.cc index a3a77715d..50f70d037 100644 --- a/spot/twaalgos/langmap.cc +++ b/spot/twaalgos/langmap.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016-2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/langmap.hh b/spot/twaalgos/langmap.hh index afebe811c..98e783f41 100644 --- a/spot/twaalgos/langmap.hh +++ b/spot/twaalgos/langmap.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2017 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/lbtt.cc b/spot/twaalgos/lbtt.cc index 70750eaff..cb0d1abe5 100644 --- a/spot/twaalgos/lbtt.cc +++ b/spot/twaalgos/lbtt.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2016, 2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2003-2005 Laboratoire d'Informatique de Paris 6 -// (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -// Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/lbtt.hh b/spot/twaalgos/lbtt.hh index 5f26ecc81..5dbc39f2b 100644 --- a/spot/twaalgos/lbtt.hh +++ b/spot/twaalgos/lbtt.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2014, 2015 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/ltl2taa.cc b/spot/twaalgos/ltl2taa.cc index eaba49e92..e9405da5e 100644 --- a/spot/twaalgos/ltl2taa.cc +++ b/spot/twaalgos/ltl2taa.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2010, 2012-2016, 2018-2019, 2021 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/ltl2taa.hh b/spot/twaalgos/ltl2taa.hh index 755490334..b819b7678 100644 --- a/spot/twaalgos/ltl2taa.hh +++ b/spot/twaalgos/ltl2taa.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2010, 2013-2015, 2019 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/ltl2tgba_fm.cc b/spot/twaalgos/ltl2tgba_fm.cc index 9768dfbfd..3ce8f86db 100644 --- a/spot/twaalgos/ltl2tgba_fm.cc +++ b/spot/twaalgos/ltl2tgba_fm.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008-2019, 2021-2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2003-2006 Laboratoire d'Informatique de Paris 6 -// (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -// Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/ltl2tgba_fm.hh b/spot/twaalgos/ltl2tgba_fm.hh index 8c1827490..554b8019d 100644 --- a/spot/twaalgos/ltl2tgba_fm.hh +++ b/spot/twaalgos/ltl2tgba_fm.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010-2015, 2017, 2019-2020 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004, 2005, 2006 Laboratoire d'Informatique de -// Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/magic.cc b/spot/twaalgos/magic.cc index b8b472af7..4fb3d5b07 100644 --- a/spot/twaalgos/magic.cc +++ b/spot/twaalgos/magic.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013-2020 Laboratoire de recherche et -// développement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/magic.hh b/spot/twaalgos/magic.hh index 744c81669..c8e3e4261 100644 --- a/spot/twaalgos/magic.hh +++ b/spot/twaalgos/magic.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014, 2019 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/mask.cc b/spot/twaalgos/mask.cc index 5a6e6a6aa..69c27eecd 100644 --- a/spot/twaalgos/mask.cc +++ b/spot/twaalgos/mask.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/mask.hh b/spot/twaalgos/mask.hh index 8d21022d1..564622bed 100644 --- a/spot/twaalgos/mask.hh +++ b/spot/twaalgos/mask.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2017 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index fc36a42f1..89f44cc6a 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021, 2022, 2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/mealy_machine.hh b/spot/twaalgos/mealy_machine.hh index 3bdb71b73..9fd3c084e 100644 --- a/spot/twaalgos/mealy_machine.hh +++ b/spot/twaalgos/mealy_machine.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021-2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/minimize.cc b/spot/twaalgos/minimize.cc index 889028e57..4628df900 100644 --- a/spot/twaalgos/minimize.cc +++ b/spot/twaalgos/minimize.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010-2020, 2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/minimize.hh b/spot/twaalgos/minimize.hh index 0b454654a..e9390c6c1 100644 --- a/spot/twaalgos/minimize.hh +++ b/spot/twaalgos/minimize.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2016, 2018-2020 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/ndfs_result.hxx b/spot/twaalgos/ndfs_result.hxx index 2e17fc3fa..c2c584e2b 100644 --- a/spot/twaalgos/ndfs_result.hxx +++ b/spot/twaalgos/ndfs_result.hxx @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013-2016, 2018, 2021 Laboratoire de recherche -// et développement de l'Epita (LRDE). -// Copyright (C) 2004, 2005, 2006 Laboratoire d'Informatique de Paris -// 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/neverclaim.cc b/spot/twaalgos/neverclaim.cc index 674cfe49a..59cb152a0 100644 --- a/spot/twaalgos/neverclaim.cc +++ b/spot/twaalgos/neverclaim.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2011, 2012, 2014-2016, 2018 Laboratoire -// de Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/neverclaim.hh b/spot/twaalgos/neverclaim.hh index a3d0e9a3c..7186db0e8 100644 --- a/spot/twaalgos/neverclaim.hh +++ b/spot/twaalgos/neverclaim.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2011, 2012, 2013, 2014, 2015 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/parity.cc b/spot/twaalgos/parity.cc index 46eed1ff9..a54942400 100644 --- a/spot/twaalgos/parity.cc +++ b/spot/twaalgos/parity.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2018, 2019, 2022, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/parity.hh b/spot/twaalgos/parity.hh index 188e92483..4043affc2 100644 --- a/spot/twaalgos/parity.hh +++ b/spot/twaalgos/parity.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016-2019, 2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/postproc.cc b/spot/twaalgos/postproc.cc index cbf677414..1a1ceb9cc 100644 --- a/spot/twaalgos/postproc.cc +++ b/spot/twaalgos/postproc.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/postproc.hh b/spot/twaalgos/postproc.hh index f470dcf5b..2a162501d 100644 --- a/spot/twaalgos/postproc.hh +++ b/spot/twaalgos/postproc.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/powerset.cc b/spot/twaalgos/powerset.cc index c69ffd75d..c192d72fb 100644 --- a/spot/twaalgos/powerset.cc +++ b/spot/twaalgos/powerset.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2011, 2013-2019, 2021, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/powerset.hh b/spot/twaalgos/powerset.hh index 00f3efc51..59a798ab3 100644 --- a/spot/twaalgos/powerset.hh +++ b/spot/twaalgos/powerset.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013-2015, 2019 Laboratoire de Recherche et -// Développement de l'Epita. -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/product.cc b/spot/twaalgos/product.cc index dc12b34f8..e494da1bb 100644 --- a/spot/twaalgos/product.cc +++ b/spot/twaalgos/product.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2020, 2022, 2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/product.hh b/spot/twaalgos/product.hh index 784a3cb49..796e4c23c 100644 --- a/spot/twaalgos/product.hh +++ b/spot/twaalgos/product.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2015, 2018-2020, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/randomgraph.cc b/spot/twaalgos/randomgraph.cc index 32e624f81..b66b6df01 100644 --- a/spot/twaalgos/randomgraph.cc +++ b/spot/twaalgos/randomgraph.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008-2010, 2012-2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2004, 2005, 2007 Laboratoire d'Informatique de -// Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/randomgraph.hh b/spot/twaalgos/randomgraph.hh index 8e8a5dd08..174f89ca8 100644 --- a/spot/twaalgos/randomgraph.hh +++ b/spot/twaalgos/randomgraph.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013-2015, 2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/randomize.cc b/spot/twaalgos/randomize.cc index c3b09a004..32765ecbe 100644 --- a/spot/twaalgos/randomize.cc +++ b/spot/twaalgos/randomize.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/randomize.hh b/spot/twaalgos/randomize.hh index faf020c27..3eb6dcd7c 100644 --- a/spot/twaalgos/randomize.hh +++ b/spot/twaalgos/randomize.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2015, 2016 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/reachiter.cc b/spot/twaalgos/reachiter.cc index a85c20005..528d62f84 100644 --- a/spot/twaalgos/reachiter.cc +++ b/spot/twaalgos/reachiter.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2011, 2013-2016, 2018 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/reachiter.hh b/spot/twaalgos/reachiter.hh index 044794642..f1faf8b99 100644 --- a/spot/twaalgos/reachiter.hh +++ b/spot/twaalgos/reachiter.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2009, 2011, 2013, 2016 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 -// (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -// Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/relabel.cc b/spot/twaalgos/relabel.cc index 2ba9e7d52..66c566846 100644 --- a/spot/twaalgos/relabel.cc +++ b/spot/twaalgos/relabel.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2018, 2020, 2022, 2023 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/relabel.hh b/spot/twaalgos/relabel.hh index 34f7a0a41..3e963b42a 100644 --- a/spot/twaalgos/relabel.hh +++ b/spot/twaalgos/relabel.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2017 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/remfin.cc b/spot/twaalgos/remfin.cc index 0b9bb3cc9..6274c8255 100644 --- a/spot/twaalgos/remfin.cc +++ b/spot/twaalgos/remfin.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2020 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/remfin.hh b/spot/twaalgos/remfin.hh index 5b71e5f29..08cb786a4 100644 --- a/spot/twaalgos/remfin.hh +++ b/spot/twaalgos/remfin.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2017, 2018, 2020 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/remprop.cc b/spot/twaalgos/remprop.cc index 8d4be8fbc..380ba1a71 100644 --- a/spot/twaalgos/remprop.cc +++ b/spot/twaalgos/remprop.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2019, 2022, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/remprop.hh b/spot/twaalgos/remprop.hh index 09d75ffac..ab234fed9 100644 --- a/spot/twaalgos/remprop.hh +++ b/spot/twaalgos/remprop.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2022 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/sbacc.cc b/spot/twaalgos/sbacc.cc index b23c95b6d..b67dc53b8 100644 --- a/spot/twaalgos/sbacc.cc +++ b/spot/twaalgos/sbacc.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2018, 2021, 2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/sbacc.hh b/spot/twaalgos/sbacc.hh index 4d91ce75a..5169b8e22 100644 --- a/spot/twaalgos/sbacc.hh +++ b/spot/twaalgos/sbacc.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2021 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/sccfilter.cc b/spot/twaalgos/sccfilter.cc index 9d8f2cfca..9ec3f1c0e 100644 --- a/spot/twaalgos/sccfilter.cc +++ b/spot/twaalgos/sccfilter.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2018, 2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/sccfilter.hh b/spot/twaalgos/sccfilter.hh index 4d34ca5d4..c3d264c59 100644 --- a/spot/twaalgos/sccfilter.hh +++ b/spot/twaalgos/sccfilter.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2010, 2012, 2013, 2014, 2015, 2018, 2023 Laboratoire de -// Recherche et Developpement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/sccinfo.cc b/spot/twaalgos/sccinfo.cc index 3abc6fcbd..947c7cd94 100644 --- a/spot/twaalgos/sccinfo.cc +++ b/spot/twaalgos/sccinfo.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/sccinfo.hh b/spot/twaalgos/sccinfo.hh index dc275c517..d7aee5000 100644 --- a/spot/twaalgos/sccinfo.hh +++ b/spot/twaalgos/sccinfo.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2021, 2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/se05.cc b/spot/twaalgos/se05.cc index 508a1dca1..73558fcf7 100644 --- a/spot/twaalgos/se05.cc +++ b/spot/twaalgos/se05.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013-2020 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/se05.hh b/spot/twaalgos/se05.hh index 268762f38..640d37f0a 100644 --- a/spot/twaalgos/se05.hh +++ b/spot/twaalgos/se05.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014, 2019 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 -// (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -// Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/sepsets.cc b/spot/twaalgos/sepsets.cc index cce0c935e..264a9c128 100644 --- a/spot/twaalgos/sepsets.cc +++ b/spot/twaalgos/sepsets.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2019 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/sepsets.hh b/spot/twaalgos/sepsets.hh index 5fd7c895e..cf699776e 100644 --- a/spot/twaalgos/sepsets.hh +++ b/spot/twaalgos/sepsets.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2018 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/simulation.cc b/spot/twaalgos/simulation.cc index aa3ed2a15..bdcb21846 100644 --- a/spot/twaalgos/simulation.cc +++ b/spot/twaalgos/simulation.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2023 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/simulation.hh b/spot/twaalgos/simulation.hh index 3965f10d4..07f5d8832 100644 --- a/spot/twaalgos/simulation.hh +++ b/spot/twaalgos/simulation.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2015, 2017, 2019, 2021 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/split.cc b/spot/twaalgos/split.cc index 09a1a2eb0..8ac5682f3 100644 --- a/spot/twaalgos/split.cc +++ b/spot/twaalgos/split.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2023 Laboratoire de Recherche et Développement -// de l'Epita. IMDEA Software Institute. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/split.hh b/spot/twaalgos/split.hh index 54490ab8b..63304db1e 100644 --- a/spot/twaalgos/split.hh +++ b/spot/twaalgos/split.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2018, 2020, 2023 Laboratoire de Recherche -// et Développement de l'Epita. IMDEA Software Institute. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/stats.cc b/spot/twaalgos/stats.cc index 4a905e542..4b3b6185c 100644 --- a/spot/twaalgos/stats.cc +++ b/spot/twaalgos/stats.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2011-2018, 2020, 2022 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/stats.hh b/spot/twaalgos/stats.hh index 24353fc31..63aa3c4e9 100644 --- a/spot/twaalgos/stats.hh +++ b/spot/twaalgos/stats.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2011-2017, 2020, 2022 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/strength.cc b/spot/twaalgos/strength.cc index 8140b1b0a..230063c38 100644 --- a/spot/twaalgos/strength.cc +++ b/spot/twaalgos/strength.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010-2011, 2013-2018, 2023 Laboratoire de Recherche -// et Développement de l'Epita (LRDE) +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/strength.hh b/spot/twaalgos/strength.hh index 809fdc1d9..c979feebe 100644 --- a/spot/twaalgos/strength.hh +++ b/spot/twaalgos/strength.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010-2011, 2013-2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE) +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/stripacc.cc b/spot/twaalgos/stripacc.cc index c85af14dc..b3825efb7 100644 --- a/spot/twaalgos/stripacc.cc +++ b/spot/twaalgos/stripacc.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2014-2015, 2017-2018 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/stripacc.hh b/spot/twaalgos/stripacc.hh index df673d7ff..31aa254b5 100644 --- a/spot/twaalgos/stripacc.hh +++ b/spot/twaalgos/stripacc.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2014, 2017 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/stutter.cc b/spot/twaalgos/stutter.cc index f9108048f..37fb0690f 100644 --- a/spot/twaalgos/stutter.cc +++ b/spot/twaalgos/stutter.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2021 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/stutter.hh b/spot/twaalgos/stutter.hh index 908f47e3c..a0a54d412 100644 --- a/spot/twaalgos/stutter.hh +++ b/spot/twaalgos/stutter.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2017, 2019 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/sum.cc b/spot/twaalgos/sum.cc index 93476228e..51dcaf995 100644 --- a/spot/twaalgos/sum.cc +++ b/spot/twaalgos/sum.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/sum.hh b/spot/twaalgos/sum.hh index 2ad5a2e1b..1acbfe45b 100644 --- a/spot/twaalgos/sum.hh +++ b/spot/twaalgos/sum.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 11a154da3..3f5bcf505 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2020-2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/synthesis.hh b/spot/twaalgos/synthesis.hh index 2d9c0600a..d3d5d3271 100644 --- a/spot/twaalgos/synthesis.hh +++ b/spot/twaalgos/synthesis.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2020-2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/tau03.cc b/spot/twaalgos/tau03.cc index a67001874..ec75d7bb6 100644 --- a/spot/twaalgos/tau03.cc +++ b/spot/twaalgos/tau03.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013-2021 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/tau03.hh b/spot/twaalgos/tau03.hh index 324379af0..a4c7ad1e6 100644 --- a/spot/twaalgos/tau03.hh +++ b/spot/twaalgos/tau03.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014, 2019 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/tau03opt.cc b/spot/twaalgos/tau03opt.cc index 831211764..a518160fb 100644 --- a/spot/twaalgos/tau03opt.cc +++ b/spot/twaalgos/tau03opt.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013-2019, 2021 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/tau03opt.hh b/spot/twaalgos/tau03opt.hh index 120bc7323..2d6f529b2 100644 --- a/spot/twaalgos/tau03opt.hh +++ b/spot/twaalgos/tau03opt.hh @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/toparity.cc b/spot/twaalgos/toparity.cc index 7abec3e15..960f4daaa 100644 --- a/spot/twaalgos/toparity.cc +++ b/spot/twaalgos/toparity.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018-2020, 2022-2023 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/toparity.hh b/spot/twaalgos/toparity.hh index d82403aa5..5283b637a 100644 --- a/spot/twaalgos/toparity.hh +++ b/spot/twaalgos/toparity.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018-2020 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/totgba.cc b/spot/twaalgos/totgba.cc index 4e2c6c802..35759d577 100644 --- a/spot/twaalgos/totgba.cc +++ b/spot/twaalgos/totgba.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2018 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/totgba.hh b/spot/twaalgos/totgba.hh index a011b920a..51af4d9f6 100644 --- a/spot/twaalgos/totgba.hh +++ b/spot/twaalgos/totgba.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2016, 2018-2019 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/toweak.cc b/spot/twaalgos/toweak.cc index ae7a0f58a..e3a0264be 100644 --- a/spot/twaalgos/toweak.cc +++ b/spot/twaalgos/toweak.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2018, 2021, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/toweak.hh b/spot/twaalgos/toweak.hh index 27661cb56..7b4612432 100644 --- a/spot/twaalgos/toweak.hh +++ b/spot/twaalgos/toweak.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2019 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/translate.cc b/spot/twaalgos/translate.cc index 9bc6690d6..99c15ca46 100644 --- a/spot/twaalgos/translate.cc +++ b/spot/twaalgos/translate.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018, 2020-2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/translate.hh b/spot/twaalgos/translate.hh index 4e534b1f7..d1dbe5e27 100644 --- a/spot/twaalgos/translate.hh +++ b/spot/twaalgos/translate.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018, 2020, 2022, 2023 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/word.cc b/spot/twaalgos/word.cc index 1f24f9537..bc9f5a52b 100644 --- a/spot/twaalgos/word.cc +++ b/spot/twaalgos/word.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2019 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/word.hh b/spot/twaalgos/word.hh index 979a4070b..68538d2d3 100644 --- a/spot/twaalgos/word.hh +++ b/spot/twaalgos/word.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2016, 2018-2019, 2023 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/zlktree.cc b/spot/twaalgos/zlktree.cc index da6a5e208..521d9630a 100644 --- a/spot/twaalgos/zlktree.cc +++ b/spot/twaalgos/zlktree.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021, 2022, 2023 Laboratoire de Recherche et Developpement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twaalgos/zlktree.hh b/spot/twaalgos/zlktree.hh index 6d8b3270c..e0ec2c3e3 100644 --- a/spot/twaalgos/zlktree.hh +++ b/spot/twaalgos/zlktree.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021, 2022 Laboratoire de Recherche et Developpement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twacube/Makefile.am b/spot/twacube/Makefile.am index 1db234c96..27704fc57 100644 --- a/spot/twacube/Makefile.am +++ b/spot/twacube/Makefile.am @@ -1,9 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Laboratoire -## de Recherche et Développement de l'Epita (LRDE). -## Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -## département Systèmes Répartis Coopératifs (SRC), Université Pierre -## et Marie Curie. +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/twacube/cube.cc b/spot/twacube/cube.cc index 0293288dd..b901c94a5 100644 --- a/spot/twacube/cube.cc +++ b/spot/twacube/cube.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2018, 2020, 2021 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twacube/cube.hh b/spot/twacube/cube.hh index dcdecf6ab..fc46249f0 100644 --- a/spot/twacube/cube.hh +++ b/spot/twacube/cube.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2018 Laboratoire de Recherche et Developpement de -// l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twacube/fwd.hh b/spot/twacube/fwd.hh index 3ea15ceea..893d80993 100644 --- a/spot/twacube/fwd.hh +++ b/spot/twacube/fwd.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twacube/twacube.cc b/spot/twacube/twacube.cc index 39cf7a17c..3d03039be 100644 --- a/spot/twacube/twacube.cc +++ b/spot/twacube/twacube.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2018, 2020 Laboratoire de Recherche et -// Developpement de l'EPITA (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twacube/twacube.hh b/spot/twacube/twacube.hh index cc55aac95..37ab9abcf 100644 --- a/spot/twacube/twacube.hh +++ b/spot/twacube/twacube.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2020 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twacube_algos/Makefile.am b/spot/twacube_algos/Makefile.am index 310ad3307..8cf06f2ee 100644 --- a/spot/twacube_algos/Makefile.am +++ b/spot/twacube_algos/Makefile.am @@ -1,9 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Laboratoire -## de Recherche et Développement de l'Epita (LRDE). -## Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -## département Systèmes Répartis Coopératifs (SRC), Université Pierre -## et Marie Curie. +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/spot/twacube_algos/convert.cc b/spot/twacube_algos/convert.cc index bdbca1ca9..95ebd0021 100644 --- a/spot/twacube_algos/convert.cc +++ b/spot/twacube_algos/convert.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2016, 2018, 2020-2021 Laboratoire de Recherche -// et Developpement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/spot/twacube_algos/convert.hh b/spot/twacube_algos/convert.hh index ba739f470..6f120eb63 100644 --- a/spot/twacube_algos/convert.hh +++ b/spot/twacube_algos/convert.hh @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2020, 2022 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/Makefile.am b/tests/Makefile.am index b26f128ed..f6c303dd5 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -1,9 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2009-2023 Laboratoire de Recherche et Développement -## de l'Epita (LRDE). -## Copyright (C) 2003-2006 Laboratoire d'Informatique de Paris 6 -## (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -## Pierre et Marie Curie. +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## diff --git a/tests/core/385.test b/tests/core/385.test index b680c071f..fc3506bdf 100755 --- a/tests/core/385.test +++ b/tests/core/385.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2019 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/500.test b/tests/core/500.test index 60d5c6365..b86d41bc2 100755 --- a/tests/core/500.test +++ b/tests/core/500.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/521.test b/tests/core/521.test index 002ab1ca2..11189bdc2 100755 --- a/tests/core/521.test +++ b/tests/core/521.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2022 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/522.test b/tests/core/522.test index 3f1596514..9d5ccfec6 100755 --- a/tests/core/522.test +++ b/tests/core/522.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2023 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/acc.cc b/tests/core/acc.cc index eb5fe20d1..d1107f5ca 100644 --- a/tests/core/acc.cc +++ b/tests/core/acc.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*-x -// Copyright (C) 2014, 2015, 2017, 2018, 2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/acc.test b/tests/core/acc.test index aac7874ab..aed8ab09e 100755 --- a/tests/core/acc.test +++ b/tests/core/acc.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014, 2015, 2017, 2019 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/acc2.test b/tests/core/acc2.test index 2d885749e..a56851a93 100755 --- a/tests/core/acc2.test +++ b/tests/core/acc2.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015, 2017 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/acc_word.test b/tests/core/acc_word.test index 4fc5a5602..767be5834 100644 --- a/tests/core/acc_word.test +++ b/tests/core/acc_word.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016-2019, 2023 Laboratoire de Recherche -# et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/accsimpl.test b/tests/core/accsimpl.test index 1dbf06095..6844676ad 100755 --- a/tests/core/accsimpl.test +++ b/tests/core/accsimpl.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2018, 2020 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/alternating.test b/tests/core/alternating.test index 6706eddc8..152e09c40 100755 --- a/tests/core/alternating.test +++ b/tests/core/alternating.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016-2018, 2020-2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/autcross.test b/tests/core/autcross.test index b3d27ec0a..8b720a085 100755 --- a/tests/core/autcross.test +++ b/tests/core/autcross.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2022 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/autcross2.test b/tests/core/autcross2.test index 942aea94a..f0055487d 100755 --- a/tests/core/autcross2.test +++ b/tests/core/autcross2.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2019 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/autcross3.test b/tests/core/autcross3.test index 2062d7faf..63f943729 100755 --- a/tests/core/autcross3.test +++ b/tests/core/autcross3.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2021 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/autcross4.test b/tests/core/autcross4.test index 13f770d1c..fa7240002 100755 --- a/tests/core/autcross4.test +++ b/tests/core/autcross4.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2018, 2019, 2022 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/autcross5.test b/tests/core/autcross5.test index 1dc4a29d5..25a05b0ef 100755 --- a/tests/core/autcross5.test +++ b/tests/core/autcross5.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/babiak.test b/tests/core/babiak.test index 95b8affae..26b171f54 100755 --- a/tests/core/babiak.test +++ b/tests/core/babiak.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2011, 2012, 2013 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/bare.test b/tests/core/bare.test index 719fdd663..73fec8a72 100755 --- a/tests/core/bare.test +++ b/tests/core/bare.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2015 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/basimul.test b/tests/core/basimul.test index 5b423d67a..d33eb8f09 100755 --- a/tests/core/basimul.test +++ b/tests/core/basimul.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2014, 2017 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/bdd.test b/tests/core/bdd.test index 85d410f8d..bddce80e7 100755 --- a/tests/core/bdd.test +++ b/tests/core/bdd.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2020, 2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/bdddict.cc b/tests/core/bdddict.cc index 7763ff0d7..9638d1fd2 100644 --- a/tests/core/bdddict.cc +++ b/tests/core/bdddict.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*-x -// Copyright (C) 2017 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/bdddict.test b/tests/core/bdddict.test index 3dd380af1..47245ba87 100755 --- a/tests/core/bdddict.test +++ b/tests/core/bdddict.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/bitvect.cc b/tests/core/bitvect.cc index cf74dcddf..1fbe00baf 100644 --- a/tests/core/bitvect.cc +++ b/tests/core/bitvect.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2016, 2018, 2021 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/bitvect.test b/tests/core/bitvect.test index c76b3808f..ae9af36b4 100755 --- a/tests/core/bitvect.test +++ b/tests/core/bitvect.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2015, 2016, 2021 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/bricks.cc b/tests/core/bricks.cc index 615bd0acc..1c173783c 100644 --- a/tests/core/bricks.cc +++ b/tests/core/bricks.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2018, 2020 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/bricks.test b/tests/core/bricks.test index 37ff57cb0..a87c1c6c6 100644 --- a/tests/core/bricks.test +++ b/tests/core/bricks.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/checkpsl.cc b/tests/core/checkpsl.cc index 620001255..2b4f05dc1 100644 --- a/tests/core/checkpsl.cc +++ b/tests/core/checkpsl.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2016, 2018-2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/checkta.cc b/tests/core/checkta.cc index 111aa4d06..88cdd9edf 100644 --- a/tests/core/checkta.cc +++ b/tests/core/checkta.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2016, 2018, 2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/complement.test b/tests/core/complement.test index d6d0eebd5..4c6fe37fc 100755 --- a/tests/core/complement.test +++ b/tests/core/complement.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015-2019, 2021, 2023 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/complementation.test b/tests/core/complementation.test index c233a5140..7c738025e 100755 --- a/tests/core/complementation.test +++ b/tests/core/complementation.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2011, 2014, 2015, 2016 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/complete.test b/tests/core/complete.test index 1d1be12bc..f64443ec4 100755 --- a/tests/core/complete.test +++ b/tests/core/complete.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015-2017, 2022, 2023 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/consterm.cc b/tests/core/consterm.cc index 67d563b8e..4ce350f6d 100644 --- a/tests/core/consterm.cc +++ b/tests/core/consterm.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010-2012, 2015-2016, 2018-2019 Laboratoire de Recherche -// et Dévelopement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/consterm.test b/tests/core/consterm.test index f95a1710f..5fdba2a55 100755 --- a/tests/core/consterm.test +++ b/tests/core/consterm.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2010, 2015 Laboratoire de Recherche et Devéloppement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/cube.cc b/tests/core/cube.cc index 7a93c3c87..bcd5e8c4c 100644 --- a/tests/core/cube.cc +++ b/tests/core/cube.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2016, 2018, 2020-2021 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/cube.test b/tests/core/cube.test index 5e38d30c3..9298f1103 100755 --- a/tests/core/cube.test +++ b/tests/core/cube.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015, 2016, 2020 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/cycles.test b/tests/core/cycles.test index dbaee144e..2c26b50fb 100755 --- a/tests/core/cycles.test +++ b/tests/core/cycles.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2014, 2015 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/dbacomp.test b/tests/core/dbacomp.test index 43d59d53b..7ebb1e022 100755 --- a/tests/core/dbacomp.test +++ b/tests/core/dbacomp.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2015 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/dca.test b/tests/core/dca.test index 8d9f3fcd6..d0e0c09bb 100644 --- a/tests/core/dca.test +++ b/tests/core/dca.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2018 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/dca2.test b/tests/core/dca2.test index 74a24b2e7..6d5edfc8d 100755 --- a/tests/core/dca2.test +++ b/tests/core/dca2.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2018 Laboratoire de Recherche et -# Développement de l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/defs.in b/tests/core/defs.in index d06a3b67d..cf1d82ac1 100644 --- a/tests/core/defs.in +++ b/tests/core/defs.in @@ -1,9 +1,5 @@ # -*- mode: shell-script; coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2012, 2013, 2015, 2022 Laboratoire de Recherche -# et Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004, 2006 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/degendet.test b/tests/core/degendet.test index bb8c2d8c4..cd07eac4b 100755 --- a/tests/core/degendet.test +++ b/tests/core/degendet.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2011, 2015-2017 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/degenid.test b/tests/core/degenid.test index b13162685..b71527fed 100755 --- a/tests/core/degenid.test +++ b/tests/core/degenid.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2011, 2013, 2014, 2015, 2017, 2018 Laboratoire de -# Recherche et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/degenlskip.test b/tests/core/degenlskip.test index bbdf0410f..0eedb6df8 100755 --- a/tests/core/degenlskip.test +++ b/tests/core/degenlskip.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2014, 2015, 2016 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/degenscc.test b/tests/core/degenscc.test index b2bd74fc5..ce08952b8 100644 --- a/tests/core/degenscc.test +++ b/tests/core/degenscc.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2019, 2021 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/det.test b/tests/core/det.test index d6f76226e..f3249ca27 100755 --- a/tests/core/det.test +++ b/tests/core/det.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013-2022 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/dfs.test b/tests/core/dfs.test index 5a9d58022..85cd1dd6e 100755 --- a/tests/core/dfs.test +++ b/tests/core/dfs.test @@ -1,10 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2015 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004, 2005 Laboratoire d'Informatique de -# Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -# Université Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/dnfstreett.test b/tests/core/dnfstreett.test index ad289cc1e..1b3e1fe81 100644 --- a/tests/core/dnfstreett.test +++ b/tests/core/dnfstreett.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/dot2tex.test b/tests/core/dot2tex.test index e7a957166..14ea2b78a 100755 --- a/tests/core/dot2tex.test +++ b/tests/core/dot2tex.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/dra2dba.test b/tests/core/dra2dba.test index b2bf0faf4..5a194de1b 100755 --- a/tests/core/dra2dba.test +++ b/tests/core/dra2dba.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014, 2017, 2019 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/dstar.test b/tests/core/dstar.test index c745ced4e..fdc31ce38 100755 --- a/tests/core/dstar.test +++ b/tests/core/dstar.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013-2016, 2018, 2020, 2022, 2023 Laboratoire de -# Recherche et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/dualize.test b/tests/core/dualize.test index da5a65747..c0ba567d4 100755 --- a/tests/core/dualize.test +++ b/tests/core/dualize.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2019, 2021, 2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/dupexp.test b/tests/core/dupexp.test index e3730631c..46e44a40a 100755 --- a/tests/core/dupexp.test +++ b/tests/core/dupexp.test @@ -1,10 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2014, 2015 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/emptchk.cc b/tests/core/emptchk.cc index 1a7e2519c..9d5a55a38 100644 --- a/tests/core/emptchk.cc +++ b/tests/core/emptchk.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2016, 2018-2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/emptchk.test b/tests/core/emptchk.test index 1c0c5a705..0050f7805 100755 --- a/tests/core/emptchk.test +++ b/tests/core/emptchk.test @@ -1,10 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2008, 2009, 2010, 2014 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004, 2005 Laboratoire d'Informatique de -# Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -# Université Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/emptchke.test b/tests/core/emptchke.test index 954245834..02826a7e8 100755 --- a/tests/core/emptchke.test +++ b/tests/core/emptchke.test @@ -1,10 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2014, 2015 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004, 2005 Laboratoire d'Informatique de -# Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -# Université Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/emptchkr.test b/tests/core/emptchkr.test index b3604646e..1eed0f6c5 100755 --- a/tests/core/emptchkr.test +++ b/tests/core/emptchkr.test @@ -1,10 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2015 Laboratoire de Recherche de -# Développement de l'Epita (LRDE). -# Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/equals.test b/tests/core/equals.test index a67c4b1ef..b36f9edc3 100755 --- a/tests/core/equals.test +++ b/tests/core/equals.test @@ -1,10 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009-2012, 2014-2015, 2021, 2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/equalsf.cc b/tests/core/equalsf.cc index 54bfe5f68..0d0070a18 100644 --- a/tests/core/equalsf.cc +++ b/tests/core/equalsf.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008-2012, 2014-2016, 2018-2019 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2003, 2004, 2006 Laboratoire d'Informatique de -// Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/eventuniv.test b/tests/core/eventuniv.test index 223b811bc..da1b1308e 100755 --- a/tests/core/eventuniv.test +++ b/tests/core/eventuniv.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2013, 2014, 2017 Laboratoire de Recherche et -# Developpement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/exclusive-ltl.test b/tests/core/exclusive-ltl.test index 019533bb7..ddfc24ae5 100755 --- a/tests/core/exclusive-ltl.test +++ b/tests/core/exclusive-ltl.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/exclusive-tgba.test b/tests/core/exclusive-tgba.test index 87f57d78d..354a21799 100755 --- a/tests/core/exclusive-tgba.test +++ b/tests/core/exclusive-tgba.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015-2016, 2018-2019 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/explpro2.test b/tests/core/explpro2.test index c57826585..ee6c531e2 100755 --- a/tests/core/explpro2.test +++ b/tests/core/explpro2.test @@ -1,10 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2014 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). -# Copyright (C) 2003, 2004, 2005, 2006, 2008, 2009 Laboratoire -# d'Informatique de Paris 6 (LIP6), département Systèmes Répartis -# Coopératifs (SRC), Université Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/explpro3.test b/tests/core/explpro3.test index e682e5d3b..b23ee7fba 100755 --- a/tests/core/explpro3.test +++ b/tests/core/explpro3.test @@ -1,10 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2014 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/explpro4.test b/tests/core/explpro4.test index a62444aa8..242d8fcfc 100755 --- a/tests/core/explpro4.test +++ b/tests/core/explpro4.test @@ -1,10 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2014, 2015 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). -# Copyright (C) 2006 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/explprod.test b/tests/core/explprod.test index e994e2f1c..2b7720b68 100755 --- a/tests/core/explprod.test +++ b/tests/core/explprod.test @@ -1,10 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2008, 2009, 2013, 2014, 2016 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004, 2005 Laboratoire d'Informatique de -# Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -# Université Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/explsum.test b/tests/core/explsum.test index e589e1dc5..54e3a7140 100755 --- a/tests/core/explsum.test +++ b/tests/core/explsum.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/format.test b/tests/core/format.test index da78e3e7e..f59c2bf92 100644 --- a/tests/core/format.test +++ b/tests/core/format.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2017, 2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/full.test b/tests/core/full.test index a1df7b83d..17cddf539 100644 --- a/tests/core/full.test +++ b/tests/core/full.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2020 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/gamehoa.test b/tests/core/gamehoa.test index 0bdb81a40..f50602e34 100755 --- a/tests/core/gamehoa.test +++ b/tests/core/gamehoa.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/genaut.test b/tests/core/genaut.test index f364569e1..c69b87f2a 100644 --- a/tests/core/genaut.test +++ b/tests/core/genaut.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017-2020, 2023 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/genltl.test b/tests/core/genltl.test index ce5584a21..add4b1d99 100755 --- a/tests/core/genltl.test +++ b/tests/core/genltl.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016-2022 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/gragsa.test b/tests/core/gragsa.test index 45425897c..9bdcf28ef 100755 --- a/tests/core/gragsa.test +++ b/tests/core/gragsa.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2018 Laboratoire de Recherche -# et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/graph.cc b/tests/core/graph.cc index caaf073c0..3a89c672b 100644 --- a/tests/core/graph.cc +++ b/tests/core/graph.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2018 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/graph.test b/tests/core/graph.test index 425b47a09..4ef85deb0 100755 --- a/tests/core/graph.test +++ b/tests/core/graph.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014, 2016 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/hierarchy.test b/tests/core/hierarchy.test index 0c962d211..330b49070 100755 --- a/tests/core/hierarchy.test +++ b/tests/core/hierarchy.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2019 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/highlightstate.test b/tests/core/highlightstate.test index 879ee2ce8..967b4781b 100755 --- a/tests/core/highlightstate.test +++ b/tests/core/highlightstate.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016-2019, 2023 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ikwiad.cc b/tests/core/ikwiad.cc index 026706aaf..a46168651 100644 --- a/tests/core/ikwiad.cc +++ b/tests/core/ikwiad.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2007-2019, 2021 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2003-2007 Laboratoire d'Informatique de Paris 6 -// (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -// Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/included.test b/tests/core/included.test index e9f2cea08..922f42836 100755 --- a/tests/core/included.test +++ b/tests/core/included.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2022, 2023 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/intvcmp2.cc b/tests/core/intvcmp2.cc index 8024673af..12335a7bf 100644 --- a/tests/core/intvcmp2.cc +++ b/tests/core/intvcmp2.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2014, 2015, 2018 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/intvcomp.cc b/tests/core/intvcomp.cc index b4d61de76..93aa5f382 100644 --- a/tests/core/intvcomp.cc +++ b/tests/core/intvcomp.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2014, 2015, 2018 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/intvcomp.test b/tests/core/intvcomp.test index 97e3cc36b..dc5b638f5 100755 --- a/tests/core/intvcomp.test +++ b/tests/core/intvcomp.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2011, 2015 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/isomorph.test b/tests/core/isomorph.test index a331f7399..8536f2ec0 100755 --- a/tests/core/isomorph.test +++ b/tests/core/isomorph.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014, 2015, 2020 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/isop.test b/tests/core/isop.test index cdd35d003..515f45252 100755 --- a/tests/core/isop.test +++ b/tests/core/isop.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2015 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/kind.cc b/tests/core/kind.cc index 1dfa7830b..00ca22b05 100644 --- a/tests/core/kind.cc +++ b/tests/core/kind.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010, 2012, 2015, 2016, 2018, 2019 Laboratoire de -// Recherche et Developement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/kind.test b/tests/core/kind.test index d6413211a..93c7dcef4 100755 --- a/tests/core/kind.test +++ b/tests/core/kind.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2010-2012, 2015, 2017, 2019, 2021 Laboratoire de -# Recherche et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/kripke.test b/tests/core/kripke.test index 958386e5c..93991529f 100755 --- a/tests/core/kripke.test +++ b/tests/core/kripke.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2011, 2012, 2015 Laboratoire de Recherche et -# Développement de l'Epita (LRDE) +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/kripkecat.cc b/tests/core/kripkecat.cc index d3ea0fd86..41874f5ad 100644 --- a/tests/core/kripkecat.cc +++ b/tests/core/kripkecat.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2014, 2015, 2018 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE) +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/latex.test b/tests/core/latex.test index bd27964c5..4101e25bb 100755 --- a/tests/core/latex.test +++ b/tests/core/latex.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2015, 2023 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/lbt.test b/tests/core/lbt.test index 73ecaa54c..9199ba30f 100755 --- a/tests/core/lbt.test +++ b/tests/core/lbt.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2016, 2017, 2019, 2020 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/lbttparse.test b/tests/core/lbttparse.test index 8bcf380fe..f186ad1fc 100755 --- a/tests/core/lbttparse.test +++ b/tests/core/lbttparse.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2013, 2014, 2015 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/length.cc b/tests/core/length.cc index f49de9358..665bfa50c 100644 --- a/tests/core/length.cc +++ b/tests/core/length.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2015-2016, 2018-2019 Laboratoire de Recherche -// et Developement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/length.test b/tests/core/length.test index 443a11910..b60a475c0 100755 --- a/tests/core/length.test +++ b/tests/core/length.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2015 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/lenient.test b/tests/core/lenient.test index 9097be40f..0c64b4cb5 100755 --- a/tests/core/lenient.test +++ b/tests/core/lenient.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2017 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltl2dstar.test b/tests/core/ltl2dstar.test index cf8458f7c..0e9dcae47 100755 --- a/tests/core/ltl2dstar.test +++ b/tests/core/ltl2dstar.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013-2018 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltl2dstar2.test b/tests/core/ltl2dstar2.test index 25f5f31a1..955ee3370 100755 --- a/tests/core/ltl2dstar2.test +++ b/tests/core/ltl2dstar2.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2015-2017 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltl2dstar3.test b/tests/core/ltl2dstar3.test index 592bd8bef..7c076fa1f 100755 --- a/tests/core/ltl2dstar3.test +++ b/tests/core/ltl2dstar3.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015, 2017 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltl2dstar4.test b/tests/core/ltl2dstar4.test index 55eaf6dbb..88392053f 100755 --- a/tests/core/ltl2dstar4.test +++ b/tests/core/ltl2dstar4.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013-2015, 2017, 2020 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltl2neverclaim-lbtt.test b/tests/core/ltl2neverclaim-lbtt.test index 7b773e5af..ba88e1b0a 100755 --- a/tests/core/ltl2neverclaim-lbtt.test +++ b/tests/core/ltl2neverclaim-lbtt.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2010, 2012, 2013 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltl2neverclaim.test b/tests/core/ltl2neverclaim.test index bb41c4e7f..4a603b509 100755 --- a/tests/core/ltl2neverclaim.test +++ b/tests/core/ltl2neverclaim.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2010, 2012, 2013 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltl2ta.test b/tests/core/ltl2ta.test index ec5298a8c..e132f8849 100755 --- a/tests/core/ltl2ta.test +++ b/tests/core/ltl2ta.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015, 2016 Laboratoire -# de Recherche et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltl2ta2.test b/tests/core/ltl2ta2.test index c73013efe..9c0104480 100755 --- a/tests/core/ltl2ta2.test +++ b/tests/core/ltl2ta2.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014, 2015 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltl2tgba.test b/tests/core/ltl2tgba.test index c3491c412..89e741e9f 100755 --- a/tests/core/ltl2tgba.test +++ b/tests/core/ltl2tgba.test @@ -1,10 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009-2019, 2022 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). -# Copyright (C) 2003-2004 Laboratoire d'Informatique de Paris 6 -# (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -# Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltl2tgba2.test b/tests/core/ltl2tgba2.test index 370e744f9..8c0ca1dc0 100755 --- a/tests/core/ltl2tgba2.test +++ b/tests/core/ltl2tgba2.test @@ -1,10 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009-2023 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltl3ba.test b/tests/core/ltl3ba.test index acc68a2c5..291502334 100755 --- a/tests/core/ltl3ba.test +++ b/tests/core/ltl3ba.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016-2018, 2021, 2023 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltl3dra.test b/tests/core/ltl3dra.test index 25fed502d..033de4747 100755 --- a/tests/core/ltl3dra.test +++ b/tests/core/ltl3dra.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015, 2016, 2021 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlcounter.test b/tests/core/ltlcounter.test index 9d08b6c66..7d6b900cb 100755 --- a/tests/core/ltlcounter.test +++ b/tests/core/ltlcounter.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2011, 2012, 2014 Laboratoire de Recherche -# et Développement de l'EPITA (LRDE) +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlcross.test b/tests/core/ltlcross.test index 885269685..e3e6e664b 100755 --- a/tests/core/ltlcross.test +++ b/tests/core/ltlcross.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012-2014, 2016, 2019, 2023 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlcross2.test b/tests/core/ltlcross2.test index 31c4528a7..a8c76c3ac 100755 --- a/tests/core/ltlcross2.test +++ b/tests/core/ltlcross2.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012-2017, 2020 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlcross3.test b/tests/core/ltlcross3.test index 6e19b445d..5178bf918 100755 --- a/tests/core/ltlcross3.test +++ b/tests/core/ltlcross3.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012-2021 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlcross4.test b/tests/core/ltlcross4.test index f171876a9..22442b5e1 100755 --- a/tests/core/ltlcross4.test +++ b/tests/core/ltlcross4.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012-2014, 2017, 2020, 2023 Laboratoire de Recherche -# et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlcross5.test b/tests/core/ltlcross5.test index c89a7bd0b..36f48b73a 100644 --- a/tests/core/ltlcross5.test +++ b/tests/core/ltlcross5.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2019, 2023 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlcross6.test b/tests/core/ltlcross6.test index 4b0a4b188..64c64d3bb 100755 --- a/tests/core/ltlcross6.test +++ b/tests/core/ltlcross6.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlcrossce.test b/tests/core/ltlcrossce.test index 1316c4330..9d18b85ea 100755 --- a/tests/core/ltlcrossce.test +++ b/tests/core/ltlcrossce.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2016, 2019, 2020 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlcrossce2.test b/tests/core/ltlcrossce2.test index 60233fa59..72ec5db77 100755 --- a/tests/core/ltlcrossce2.test +++ b/tests/core/ltlcrossce2.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015-2017 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlcrossgrind.test b/tests/core/ltlcrossgrind.test index 1814bcd15..8e0a035e9 100755 --- a/tests/core/ltlcrossgrind.test +++ b/tests/core/ltlcrossgrind.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltldo.test b/tests/core/ltldo.test index 1504a24c0..de8d2cb4b 100755 --- a/tests/core/ltldo.test +++ b/tests/core/ltldo.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015-2021 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltldo2.test b/tests/core/ltldo2.test index 4998ce98d..bef75dbb3 100755 --- a/tests/core/ltldo2.test +++ b/tests/core/ltldo2.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015-2017, 2019 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlf.test b/tests/core/ltlf.test index 74a2da79e..b8691adc8 100755 --- a/tests/core/ltlf.test +++ b/tests/core/ltlf.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2022, 2023 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlfilt.test b/tests/core/ltlfilt.test index f28ee445d..426734851 100755 --- a/tests/core/ltlfilt.test +++ b/tests/core/ltlfilt.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013-2020, 2022, 2023 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlgrind.test b/tests/core/ltlgrind.test index 09e75ee4e..f508c4826 100755 --- a/tests/core/ltlgrind.test +++ b/tests/core/ltlgrind.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014, 2015, 2019, 2023 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlrel.cc b/tests/core/ltlrel.cc index 88735a1e9..14d138ab1 100644 --- a/tests/core/ltlrel.cc +++ b/tests/core/ltlrel.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2016, 2018-2019 Laboratoire de Recherche et -// Developement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/ltlrel.test b/tests/core/ltlrel.test index 1b14534cb..7a7e3f08d 100755 --- a/tests/core/ltlrel.test +++ b/tests/core/ltlrel.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2016, 2020 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlsynt-pgame.test b/tests/core/ltlsynt-pgame.test index b4bada798..900e90120 100755 --- a/tests/core/ltlsynt-pgame.test +++ b/tests/core/ltlsynt-pgame.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index d1a7a9dee..3944c2076 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2019-2023 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ltlsynt2.test b/tests/core/ltlsynt2.test index f6c7787fe..5e26b28d3 100755 --- a/tests/core/ltlsynt2.test +++ b/tests/core/ltlsynt2.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2022, 2023 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/lunabbrev.test b/tests/core/lunabbrev.test index 0b4911e24..bfa9cdf44 100755 --- a/tests/core/lunabbrev.test +++ b/tests/core/lunabbrev.test @@ -1,10 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2014 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/maskacc.test b/tests/core/maskacc.test index c5eda7fb8..419d872e3 100755 --- a/tests/core/maskacc.test +++ b/tests/core/maskacc.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014, 2015, 2016 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/maskkeep.test b/tests/core/maskkeep.test index 56a6c4ac0..678b8ca01 100755 --- a/tests/core/maskkeep.test +++ b/tests/core/maskkeep.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015, 2016, 2021 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/mempool.cc b/tests/core/mempool.cc index 1431a24b2..1df9f85fc 100644 --- a/tests/core/mempool.cc +++ b/tests/core/mempool.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2018, 2022 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/mempool.test b/tests/core/mempool.test index 59b3e61cc..0b23039b6 100644 --- a/tests/core/mempool.test +++ b/tests/core/mempool.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2018 Laboratoire de Recherche et Développement de l'Epita. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/minterm.cc b/tests/core/minterm.cc index fe849aa61..1ca03e9a8 100644 --- a/tests/core/minterm.cc +++ b/tests/core/minterm.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/minterm.test b/tests/core/minterm.test index 893813b6e..5469e7c14 100644 --- a/tests/core/minterm.test +++ b/tests/core/minterm.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2021 Laboratoire de Recherche et Développement de l'Epita. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/minusx.test b/tests/core/minusx.test index 238eeaf89..555209c35 100755 --- a/tests/core/minusx.test +++ b/tests/core/minusx.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2020-2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/monitor.test b/tests/core/monitor.test index 26f5c5667..416d655fb 100755 --- a/tests/core/monitor.test +++ b/tests/core/monitor.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014-2018 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/nenoform.test b/tests/core/nenoform.test index 737ab16ac..5f03df788 100755 --- a/tests/core/nenoform.test +++ b/tests/core/nenoform.test @@ -1,10 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2011, 2014 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/neverclaimread.test b/tests/core/neverclaimread.test index 09af8af58..abfe2725a 100755 --- a/tests/core/neverclaimread.test +++ b/tests/core/neverclaimread.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2010-2015, 2017-2018, 2020, 2022 Laboratoire -# de Recherche et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/ngraph.cc b/tests/core/ngraph.cc index 0a64f5f73..d31a42673 100644 --- a/tests/core/ngraph.cc +++ b/tests/core/ngraph.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2018 Laboratoire de Recherche et Développement de -// l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/ngraph.test b/tests/core/ngraph.test index 900534820..6090fe7ae 100755 --- a/tests/core/ngraph.test +++ b/tests/core/ngraph.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/nondet.test b/tests/core/nondet.test index 4e635efc8..6f4766c03 100755 --- a/tests/core/nondet.test +++ b/tests/core/nondet.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2013 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/obligation.test b/tests/core/obligation.test index 1e3c9f30a..ea9162c5c 100755 --- a/tests/core/obligation.test +++ b/tests/core/obligation.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2010, 2011, 2014, 2015, 2017 Laboratoire de Recherche -# et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/optba.test b/tests/core/optba.test index 52b52e725..916794e66 100755 --- a/tests/core/optba.test +++ b/tests/core/optba.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015, 2017, 2019 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/parity.cc b/tests/core/parity.cc index 7ff391745..2e3949ae9 100644 --- a/tests/core/parity.cc +++ b/tests/core/parity.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2018-2019, 2023 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/parity.test b/tests/core/parity.test index be220e723..a3ff85eef 100755 --- a/tests/core/parity.test +++ b/tests/core/parity.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2017 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/parity2.test b/tests/core/parity2.test index 56efe94f9..8fb35e365 100755 --- a/tests/core/parity2.test +++ b/tests/core/parity2.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2018-2019, 2021, 2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/parse.test b/tests/core/parse.test index 233d25d0f..bbcccd8a2 100755 --- a/tests/core/parse.test +++ b/tests/core/parse.test @@ -1,10 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2011, 2012, 2013, 2016 Laboratoire de -# Recherche et Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/parseaut.test b/tests/core/parseaut.test index 52748b07c..d440a686f 100755 --- a/tests/core/parseaut.test +++ b/tests/core/parseaut.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014-2018, 2020-2023 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/parseerr.test b/tests/core/parseerr.test index f9e1b79b7..16dca142c 100755 --- a/tests/core/parseerr.test +++ b/tests/core/parseerr.test @@ -1,10 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009-2016, 2020, 2021 Laboratoire -# de Recherche et Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/pdegen.test b/tests/core/pdegen.test index 875e78946..2c222d8c7 100755 --- a/tests/core/pdegen.test +++ b/tests/core/pdegen.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/pgsolver.test b/tests/core/pgsolver.test index e767e1953..415341fb4 100755 --- a/tests/core/pgsolver.test +++ b/tests/core/pgsolver.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2022 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/prodchain.test b/tests/core/prodchain.test index c2d6091c7..889ac435d 100755 --- a/tests/core/prodchain.test +++ b/tests/core/prodchain.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/prodor.test b/tests/core/prodor.test index 03d8cd458..9dd69716e 100755 --- a/tests/core/prodor.test +++ b/tests/core/prodor.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015, 2017-2018, 2021-2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/rabin2parity.test b/tests/core/rabin2parity.test index 09a4854f5..1be674e92 100644 --- a/tests/core/rabin2parity.test +++ b/tests/core/rabin2parity.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017-2018 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/rand.test b/tests/core/rand.test index fd901a737..db01ab4dc 100755 --- a/tests/core/rand.test +++ b/tests/core/rand.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014, 2015, 2017, 2018, 2019 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/randaut.test b/tests/core/randaut.test index 50558e790..e652c0719 100755 --- a/tests/core/randaut.test +++ b/tests/core/randaut.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014-2018, 2020, 2022, 2023 Laboratoire de Recherche -# et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/randomize.test b/tests/core/randomize.test index d6ab5aee9..d7d3c0b88 100755 --- a/tests/core/randomize.test +++ b/tests/core/randomize.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014, 2015, 2017-2018, 2023 Laboratoire de Recherche -# et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/randpsl.test b/tests/core/randpsl.test index 293226027..5e7192894 100755 --- a/tests/core/randpsl.test +++ b/tests/core/randpsl.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2011, 2012, 2014, 2015, 2016, 2019 Laboratoire de Recherche -# et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/randtgba.cc b/tests/core/randtgba.cc index 460bf9cd9..c7cbcc074 100644 --- a/tests/core/randtgba.cc +++ b/tests/core/randtgba.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008-2012, 2014-2019, 2022 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). -// Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris -// 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -// Université Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/randtgba.test b/tests/core/randtgba.test index 6ba012cc9..e22f59189 100755 --- a/tests/core/randtgba.test +++ b/tests/core/randtgba.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2010, 2014, 2015 Laboratoire de Recherche et Development de -# l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/readltl.cc b/tests/core/readltl.cc index 6e445bd1a..85fd98bcc 100644 --- a/tests/core/readltl.cc +++ b/tests/core/readltl.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2009, 2012, 2015, 2016, 2018 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2003 Laboratoire d'Informatique de Paris 6 -// (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -// Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/readsave.test b/tests/core/readsave.test index f27c8aaf0..9be1ee4c2 100755 --- a/tests/core/readsave.test +++ b/tests/core/readsave.test @@ -1,10 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2012, 2014-2023 Laboratoire de -# Recherche et Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/reduc.cc b/tests/core/reduc.cc index 5e820fe61..664b34765 100644 --- a/tests/core/reduc.cc +++ b/tests/core/reduc.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*_ -// Copyright (C) 2008-2012, 2014-2016, 2018-2019 Laboratoire -// de Recherche et Dveloppement de l'Epita (LRDE). -// Copyright (C) 2004, 2006, 2007 Laboratoire d'Informatique de Paris -// 6 (LIP6), dpartement Systmes Rpartis Coopratifs (SRC), -// Universit Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/reduc.test b/tests/core/reduc.test index ed10c1a6b..ab12a6a52 100755 --- a/tests/core/reduc.test +++ b/tests/core/reduc.test @@ -1,10 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2012, 2015, 2016 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). -# Copyright (C) 2004, 2005, 2006 Laboratoire d'Informatique de Paris 6 -# (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -# Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/reduc0.test b/tests/core/reduc0.test index 99a89195c..d6e37e25f 100755 --- a/tests/core/reduc0.test +++ b/tests/core/reduc0.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2014, 2015, 2016 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/reduccmp.test b/tests/core/reduccmp.test index 67ef7daaf..42505ed46 100755 --- a/tests/core/reduccmp.test +++ b/tests/core/reduccmp.test @@ -1,10 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009-2014, 2016-2019 Laboratoire de Recherche et -# Developpement de l'Epita (LRDE). -# Copyright (C) 2004, 2006 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/reducpsl.test b/tests/core/reducpsl.test index 317cbf4b5..edc928ccc 100755 --- a/tests/core/reducpsl.test +++ b/tests/core/reducpsl.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2011, 2012, 2015, 2016 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/remfin.test b/tests/core/remfin.test index 83936d536..493de1e7d 100755 --- a/tests/core/remfin.test +++ b/tests/core/remfin.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015-2018, 2020, 2023 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/remove_x.test b/tests/core/remove_x.test index e72202e45..df57448ca 100755 --- a/tests/core/remove_x.test +++ b/tests/core/remove_x.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2014, 2015 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/remprop.test b/tests/core/remprop.test index 19c7aa6b5..5045b64f0 100755 --- a/tests/core/remprop.test +++ b/tests/core/remprop.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015-2017 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/renault.test b/tests/core/renault.test index b43755290..484ba2200 100755 --- a/tests/core/renault.test +++ b/tests/core/renault.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2015 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/safra.cc b/tests/core/safra.cc index ca4591559..d01613d02 100644 --- a/tests/core/safra.cc +++ b/tests/core/safra.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/safra.test b/tests/core/safra.test index c5b2277ff..4e7571b59 100755 --- a/tests/core/safra.test +++ b/tests/core/safra.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015, 2016 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/satmin.test b/tests/core/satmin.test index 325d2ef63..12feb1411 100755 --- a/tests/core/satmin.test +++ b/tests/core/satmin.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2017-2019, 2021 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/satmin2.test b/tests/core/satmin2.test index 113e23d86..53e769faf 100755 --- a/tests/core/satmin2.test +++ b/tests/core/satmin2.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2015, 2017, 2018 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/satmin3.test b/tests/core/satmin3.test index f9fcb4403..cac22dc9d 100755 --- a/tests/core/satmin3.test +++ b/tests/core/satmin3.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2019, 2020 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/sbacc.test b/tests/core/sbacc.test index 715001b7b..6533cfb34 100755 --- a/tests/core/sbacc.test +++ b/tests/core/sbacc.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015-2017, 2020-2021 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/scc.test b/tests/core/scc.test index 77e058e40..d372defbc 100755 --- a/tests/core/scc.test +++ b/tests/core/scc.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2015, 2017, 2018 Laboratoire de Recherche et -# Developpement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/sccdot.test b/tests/core/sccdot.test index 68a4aedee..fb0552736 100755 --- a/tests/core/sccdot.test +++ b/tests/core/sccdot.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015, 2017, 2018, 2020, 2021 Laboratoire de Recherche -# et Développement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/sccif.cc b/tests/core/sccif.cc index 96f289936..f55ef8935 100644 --- a/tests/core/sccif.cc +++ b/tests/core/sccif.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/sccif.test b/tests/core/sccif.test index 2f3441f66..d29986a2e 100755 --- a/tests/core/sccif.test +++ b/tests/core/sccif.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2020 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/sccsimpl.test b/tests/core/sccsimpl.test index 591b3ec02..238ad98ae 100755 --- a/tests/core/sccsimpl.test +++ b/tests/core/sccsimpl.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2011, 2013, 2015, 2018, 2019, 2023 Laboratoire de Recherche -# et Développement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/semidet.test b/tests/core/semidet.test index be15515dd..0c001417c 100755 --- a/tests/core/semidet.test +++ b/tests/core/semidet.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/sepsets.test b/tests/core/sepsets.test index 9957da874..7d002711b 100755 --- a/tests/core/sepsets.test +++ b/tests/core/sepsets.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015, 2019 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/serial.test b/tests/core/serial.test index 389335eff..ff1e84be1 100755 --- a/tests/core/serial.test +++ b/tests/core/serial.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2023 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/sim2.test b/tests/core/sim2.test index 88cc6dcb6..d448b8f06 100755 --- a/tests/core/sim2.test +++ b/tests/core/sim2.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2014, 2015 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/sim3.test b/tests/core/sim3.test index c6f6cd638..fc2f5abd1 100755 --- a/tests/core/sim3.test +++ b/tests/core/sim3.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015, 2018-2021 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/sonf.test b/tests/core/sonf.test index 0febfc342..846ea5cee 100644 --- a/tests/core/sonf.test +++ b/tests/core/sonf.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2021 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/split.test b/tests/core/split.test index 97b2c7075..720133e2e 100755 --- a/tests/core/split.test +++ b/tests/core/split.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/spotlbtt.test b/tests/core/spotlbtt.test index a3603cf05..40bcbdbce 100755 --- a/tests/core/spotlbtt.test +++ b/tests/core/spotlbtt.test @@ -1,10 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2011, 2012, 2014, 2015 Laboratoire de -# Recherche et Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004, 2005, 2006, 2007 Laboratoire -# d'Informatique de Paris 6 (LIP6), département Systèmes Répartis -# Coopératifs (SRC), Université Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/spotlbtt2.test b/tests/core/spotlbtt2.test index 7c5092c62..cfe75169a 100755 --- a/tests/core/spotlbtt2.test +++ b/tests/core/spotlbtt2.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/streett.test b/tests/core/streett.test index 7793fe813..885f93e4c 100755 --- a/tests/core/streett.test +++ b/tests/core/streett.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016-2018 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/strength.test b/tests/core/strength.test index 095922895..c533dde0e 100755 --- a/tests/core/strength.test +++ b/tests/core/strength.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015, 2016, 2017, 2023 Laboratoire de Recherche et Developpement -# de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/stutter-ltl.test b/tests/core/stutter-ltl.test index 663f9fdbf..346bd3740 100755 --- a/tests/core/stutter-ltl.test +++ b/tests/core/stutter-ltl.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2014, 2015, 2017 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/stutter-tgba.test b/tests/core/stutter-tgba.test index 6344068c0..a3050977c 100755 --- a/tests/core/stutter-tgba.test +++ b/tests/core/stutter-tgba.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014-2020 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/sugar.test b/tests/core/sugar.test index 92c2095e0..386793595 100755 --- a/tests/core/sugar.test +++ b/tests/core/sugar.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2018-2020 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/syfco.test b/tests/core/syfco.test index 453aa19bb..0df7f0571 100755 --- a/tests/core/syfco.test +++ b/tests/core/syfco.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/syntimpl.cc b/tests/core/syntimpl.cc index 2a2a8dc22..65995898a 100644 --- a/tests/core/syntimpl.cc +++ b/tests/core/syntimpl.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008-2012, 2014-2016, 2018-2019 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). -// Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 -// (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -// Pierre et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/syntimpl.test b/tests/core/syntimpl.test index aa981e7e0..663672c4c 100755 --- a/tests/core/syntimpl.test +++ b/tests/core/syntimpl.test @@ -1,10 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2012 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). -# Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/taatgba.cc b/tests/core/taatgba.cc index beba2d3de..745168b6d 100644 --- a/tests/core/taatgba.cc +++ b/tests/core/taatgba.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2014, 2015, 2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/taatgba.test b/tests/core/taatgba.test index d9b029f70..25cc9bd74 100755 --- a/tests/core/taatgba.test +++ b/tests/core/taatgba.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2015 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/tgbagraph.test b/tests/core/tgbagraph.test index 737fa9638..8c326d5af 100755 --- a/tests/core/tgbagraph.test +++ b/tests/core/tgbagraph.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014-2018, 2020, 2021 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/tostring.cc b/tests/core/tostring.cc index 895c1a490..d32386fd1 100644 --- a/tests/core/tostring.cc +++ b/tests/core/tostring.cc @@ -1,9 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2009, 2012, 2015, 2016, 2018 Laboratoire de -// Recherche et Développement de l'Epita (LRDE). -// Copyright (C) 2003 Laboratoire d'Informatique de Paris 6 (LIP6), -// département Systèmes Répartis Coopératifs (SRC), Université Pierre -// et Marie Curie. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/tostring.test b/tests/core/tostring.test index 7067a8b2c..bdf436375 100755 --- a/tests/core/tostring.test +++ b/tests/core/tostring.test @@ -1,10 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009-2011, 2013, 2016, 2022 Laboratoire de Recherche -# et Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/tripprod.test b/tests/core/tripprod.test index 4650eb010..fe44d8c90 100755 --- a/tests/core/tripprod.test +++ b/tests/core/tripprod.test @@ -1,10 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2008, 2009, 2013, 2014, 2015 Laboratoire de Recherche -# et Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004, 2005 Laboratoire d'Informatique de -# Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), -# Université Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/trival.cc b/tests/core/trival.cc index 203bf8177..7c6706116 100644 --- a/tests/core/trival.cc +++ b/tests/core/trival.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2018 Laboratoire de Recherche et Developpement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/trival.test b/tests/core/trival.test index c808f87ae..ddf9126b1 100755 --- a/tests/core/trival.test +++ b/tests/core/trival.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/tunabbrev.test b/tests/core/tunabbrev.test index dfb733a9f..d6f9d32dc 100755 --- a/tests/core/tunabbrev.test +++ b/tests/core/tunabbrev.test @@ -1,10 +1,6 @@ #! /bin/sh # -*- coding: utf-8- -# Copyright (C) 2009, 2010, 2014 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 -# (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -# Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/tunenoform.test b/tests/core/tunenoform.test index b900ed447..4d691dfd8 100755 --- a/tests/core/tunenoform.test +++ b/tests/core/tunenoform.test @@ -1,10 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2014 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). -# Copyright (C) 2003 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/twacube.cc b/tests/core/twacube.cc index 43118d6a5..2a47f3bc6 100644 --- a/tests/core/twacube.cc +++ b/tests/core/twacube.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2018, 2020, 2023 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/twacube.test b/tests/core/twacube.test index 3d906afd2..fa3f0536c 100755 --- a/tests/core/twacube.test +++ b/tests/core/twacube.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014, 2015, 2016, 2018, 2020 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/twagraph.cc b/tests/core/twagraph.cc index 7b4515268..7e33d6748 100644 --- a/tests/core/twagraph.cc +++ b/tests/core/twagraph.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2019, 2021 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/core/unabbrevwm.test b/tests/core/unabbrevwm.test index 622b5f7c2..6848e1213 100755 --- a/tests/core/unabbrevwm.test +++ b/tests/core/unabbrevwm.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2015, 2017, 2018 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/unambig.test b/tests/core/unambig.test index dc9467979..3d51bde8c 100755 --- a/tests/core/unambig.test +++ b/tests/core/unambig.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2015-2019 Laboratoire de Recherche et -# Developpement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/unambig2.test b/tests/core/unambig2.test index c5b428fe3..7fd30bf4b 100755 --- a/tests/core/unambig2.test +++ b/tests/core/unambig2.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2019 Laboratoire de Recherche et -# Developpement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/uniq.test b/tests/core/uniq.test index a86afed8e..10e4fdde0 100755 --- a/tests/core/uniq.test +++ b/tests/core/uniq.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2014, 2015 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/utf8.test b/tests/core/utf8.test index b0bfef043..741ecf5ee 100755 --- a/tests/core/utf8.test +++ b/tests/core/utf8.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2013, 2015, 2016, 2019, 2023 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/uwrm.test b/tests/core/uwrm.test index 28a8e2764..c3d1c3e50 100755 --- a/tests/core/uwrm.test +++ b/tests/core/uwrm.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2014, 2015 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/wdba.test b/tests/core/wdba.test index e3e5175f0..a2d580d24 100755 --- a/tests/core/wdba.test +++ b/tests/core/wdba.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2010, 2014, 2015, 2018 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/core/wdba2.test b/tests/core/wdba2.test index 40b1a52c2..97bc30774 100755 --- a/tests/core/wdba2.test +++ b/tests/core/wdba2.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2014-2015, 2018-2019, 2023 Laboratoire de -# Recherche et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/ltsmin/check.test b/tests/ltsmin/check.test index c4ce7d49b..b84bee6aa 100755 --- a/tests/ltsmin/check.test +++ b/tests/ltsmin/check.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2011-2012, 2014-2017, 2019-2020, 2023 Laboratoire de -# Recherche et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/ltsmin/check2.test b/tests/ltsmin/check2.test index 2fa9a5dae..f482c9809 100755 --- a/tests/ltsmin/check2.test +++ b/tests/ltsmin/check2.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2011, 2012, 2014, 2015, 2016, 2017 Laboratoire de Recherche -# et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/ltsmin/check3.test b/tests/ltsmin/check3.test index 47df0e4d3..be05b84b3 100755 --- a/tests/ltsmin/check3.test +++ b/tests/ltsmin/check3.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016-2017, 2023 Laboratoire de Recherche -# et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/ltsmin/finite.test b/tests/ltsmin/finite.test index ad1497035..67cbe5e70 100755 --- a/tests/ltsmin/finite.test +++ b/tests/ltsmin/finite.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2011, 2013, 2014, 2016, 2020 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/ltsmin/finite2.test b/tests/ltsmin/finite2.test index ef35e113c..f44b3b612 100755 --- a/tests/ltsmin/finite2.test +++ b/tests/ltsmin/finite2.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2011, 2013, 2014, 2016, 2017 Laboratoire de Recherche -# et Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/ltsmin/finite3.test b/tests/ltsmin/finite3.test index 752ffe2b2..50b65e815 100755 --- a/tests/ltsmin/finite3.test +++ b/tests/ltsmin/finite3.test @@ -1,7 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2011, 2013, 2014, 2016, 2017 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/ltsmin/kripke.test b/tests/ltsmin/kripke.test index a9dbe7dd7..0768b48f0 100755 --- a/tests/ltsmin/kripke.test +++ b/tests/ltsmin/kripke.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2011, 2014-2016, 2023 Laboratoire de Recherche et -# Developpement de l'Epita (LRDE) +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/ltsmin/modelcheck.cc b/tests/ltsmin/modelcheck.cc index 9c529f0e8..f73e2158d 100644 --- a/tests/ltsmin/modelcheck.cc +++ b/tests/ltsmin/modelcheck.cc @@ -1,6 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2020, 2022 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE) +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/ltsmin/testconvert.cc b/tests/ltsmin/testconvert.cc index cfd84333c..20c43fd4c 100644 --- a/tests/ltsmin/testconvert.cc +++ b/tests/ltsmin/testconvert.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2020 Laboratoire de Recherche et Développement de l'Epita. +// Copyright (C) by the Spot authors, see the AUTHORS file for details. // // This file is part of Spot, a model checking library. // diff --git a/tests/ltsmin/testconvert.test b/tests/ltsmin/testconvert.test index 52ce1bc10..dfe4076b3 100644 --- a/tests/ltsmin/testconvert.test +++ b/tests/ltsmin/testconvert.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Developpement -# de l'Epita (LRDE) +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/298.py b/tests/python/298.py index 89ddbdb0c..6f71a989f 100644 --- a/tests/python/298.py +++ b/tests/python/298.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/341.py b/tests/python/341.py index e828ab07c..67b58fa3c 100644 --- a/tests/python/341.py +++ b/tests/python/341.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2022 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/471.py b/tests/python/471.py index 0fe180554..561be5a0a 100644 --- a/tests/python/471.py +++ b/tests/python/471.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2021, 2022 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/acc.py b/tests/python/acc.py index 8a23dcd46..b215428b1 100644 --- a/tests/python/acc.py +++ b/tests/python/acc.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2022 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/accparse2.py b/tests/python/accparse2.py index d9c7274a0..c2cddfd07 100644 --- a/tests/python/accparse2.py +++ b/tests/python/accparse2.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2017-2018, 2022 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/aiger.py b/tests/python/aiger.py index f490465b0..d29e87019 100644 --- a/tests/python/aiger.py +++ b/tests/python/aiger.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2021, 2022 Laboratoire de Recherche et -# Développement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/alarm.py b/tests/python/alarm.py index 679dc1867..28b5c32ad 100755 --- a/tests/python/alarm.py +++ b/tests/python/alarm.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2012, 2014, 2015, 2016 Laboratoire de Recherche et -# Développement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/aliases.py b/tests/python/aliases.py index 40dd4d0ec..dcf360d45 100644 --- a/tests/python/aliases.py +++ b/tests/python/aliases.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/alternating.py b/tests/python/alternating.py index 5b38ca378..44a207f78 100755 --- a/tests/python/alternating.py +++ b/tests/python/alternating.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016-2017, 2021-2022 Laboratoire de Recherche -# et Développement de l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/bdddict.py b/tests/python/bdddict.py index b7b442b1f..91d55e1f8 100644 --- a/tests/python/bdddict.py +++ b/tests/python/bdddict.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2019, 2021, 2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/bdditer.py b/tests/python/bdditer.py index 4a2afeea1..2cee87a4e 100644 --- a/tests/python/bdditer.py +++ b/tests/python/bdditer.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2021, 2022, 2023 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/bddnqueen.py b/tests/python/bddnqueen.py index 95809979e..a2a4985fb 100755 --- a/tests/python/bddnqueen.py +++ b/tests/python/bddnqueen.py @@ -1,9 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2010, 2011, 2012, 2014, 2019 Laboratoire de Recherche et -# Développement de l'EPITA. -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 -# (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -# Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/bugdet.py b/tests/python/bugdet.py index 19434c967..370a14f1f 100644 --- a/tests/python/bugdet.py +++ b/tests/python/bugdet.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016, 2022 Laboratoire de Recherche et Développement -# de l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/complement_semidet.py b/tests/python/complement_semidet.py index da06749a3..1aa3bcf5d 100644 --- a/tests/python/complement_semidet.py +++ b/tests/python/complement_semidet.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/dbranch.py b/tests/python/dbranch.py index 99c81bdf6..f4bcdaee7 100644 --- a/tests/python/dbranch.py +++ b/tests/python/dbranch.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2022, 2023 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/declenv.py b/tests/python/declenv.py index 3ab47736b..7baad256b 100644 --- a/tests/python/declenv.py +++ b/tests/python/declenv.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2022 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/decompose_scc.py b/tests/python/decompose_scc.py index 47741fb72..80b6dc746 100644 --- a/tests/python/decompose_scc.py +++ b/tests/python/decompose_scc.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2021, 2022 Laboratoire de Recherche et -# Développement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/det.py b/tests/python/det.py index 36fa31ff3..b89a55b85 100644 --- a/tests/python/det.py +++ b/tests/python/det.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2021, 2022 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/dualize.py b/tests/python/dualize.py index b4e459a18..bfeb20b38 100755 --- a/tests/python/dualize.py +++ b/tests/python/dualize.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017-2019, 2021-2023 Laboratoire de Recherche et -# Développement de l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/ecfalse.py b/tests/python/ecfalse.py index ccbaa2693..b112db061 100644 --- a/tests/python/ecfalse.py +++ b/tests/python/ecfalse.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/except.py b/tests/python/except.py index 03076c01b..cb8b39c3c 100644 --- a/tests/python/except.py +++ b/tests/python/except.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018-2022 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/forq_contains.py b/tests/python/forq_contains.py index 5c94c3946..165f9030f 100644 --- a/tests/python/forq_contains.py +++ b/tests/python/forq_contains.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2020, 2022 Laboratoire de Recherche et -# Développement de l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/game.py b/tests/python/game.py index 857390335..e0d880647 100644 --- a/tests/python/game.py +++ b/tests/python/game.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/gen.py b/tests/python/gen.py index a9fed6890..a80196012 100644 --- a/tests/python/gen.py +++ b/tests/python/gen.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2022 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/genem.py b/tests/python/genem.py index 970fe705b..0b1826a72 100644 --- a/tests/python/genem.py +++ b/tests/python/genem.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018-2023 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/implies.py b/tests/python/implies.py index 24d74b720..3b6fe367b 100755 --- a/tests/python/implies.py +++ b/tests/python/implies.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2012, 2022 Laboratoire de Recherche et Développement -# de l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/interdep.py b/tests/python/interdep.py index efe17c4cb..d1c19b932 100755 --- a/tests/python/interdep.py +++ b/tests/python/interdep.py @@ -1,9 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2010, 2012, 2014, 2015, 2016 Laboratoire de Recherche et -# Développement de l'EPITA. -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 -# (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -# Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/intrun.py b/tests/python/intrun.py index 1494d4665..033b9cdb4 100644 --- a/tests/python/intrun.py +++ b/tests/python/intrun.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020, 2022, 2023 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/kripke.py b/tests/python/kripke.py index 2d65ebc5d..4e6cd05ef 100644 --- a/tests/python/kripke.py +++ b/tests/python/kripke.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2019, 2022, 2023 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/langmap.py b/tests/python/langmap.py index 723a5c0d5..00217d0cc 100644 --- a/tests/python/langmap.py +++ b/tests/python/langmap.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2017, 2020, 2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE) +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/ltl2tgba.py b/tests/python/ltl2tgba.py index 913c557be..7296450c2 100755 --- a/tests/python/ltl2tgba.py +++ b/tests/python/ltl2tgba.py @@ -1,9 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2012, 2014-2016, 2021-2022 Laboratoire de -# Recherche et Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/ltl2tgba.test b/tests/python/ltl2tgba.test index a0a19096a..d6aa4d2a7 100755 --- a/tests/python/ltl2tgba.test +++ b/tests/python/ltl2tgba.test @@ -1,10 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2014 Laboratoire de Recherche et -# Développement de l'EPITA. -# Copyright (C) 2003 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/ltlf.py b/tests/python/ltlf.py index b13432d3e..afd114855 100644 --- a/tests/python/ltlf.py +++ b/tests/python/ltlf.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016, 2022 Laboratoire de Recherche et Développement de -# l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/ltlparse.py b/tests/python/ltlparse.py index 07753060a..994a36dec 100755 --- a/tests/python/ltlparse.py +++ b/tests/python/ltlparse.py @@ -1,9 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2009-2012, 2014-2017, 2019, 2021-2023 Laboratoire de -# Recherche et Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/ltlsimple.py b/tests/python/ltlsimple.py index c21c3b7f1..0d257d711 100755 --- a/tests/python/ltlsimple.py +++ b/tests/python/ltlsimple.py @@ -1,9 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2012, 2015, 2018, 2021-2022 Laboratoire de -# Recherche et Développement de l'Epita (LRDE). -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systemes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/mealy.py b/tests/python/mealy.py index ef61daca9..38f942a50 100644 --- a/tests/python/mealy.py +++ b/tests/python/mealy.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2021-2023 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/merge.py b/tests/python/merge.py index 893916953..e40f4e836 100644 --- a/tests/python/merge.py +++ b/tests/python/merge.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2020, 2022 Laboratoire de Recherche et -# Développement de l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/mergedge.py b/tests/python/mergedge.py index e2c88874e..e995bca8b 100644 --- a/tests/python/mergedge.py +++ b/tests/python/mergedge.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020-2023 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/minato.py b/tests/python/minato.py index ead000fcc..7f343d6a8 100755 --- a/tests/python/minato.py +++ b/tests/python/minato.py @@ -1,9 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2010, 2012, 2013, 2014 Laboratoire de Recherche et -# Développement de l'Epita -# Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/misc-ec.py b/tests/python/misc-ec.py index 85d4aaa47..283103d2c 100644 --- a/tests/python/misc-ec.py +++ b/tests/python/misc-ec.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2020, 2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/optionmap.py b/tests/python/optionmap.py index ad526f510..ba73282b3 100755 --- a/tests/python/optionmap.py +++ b/tests/python/optionmap.py @@ -1,9 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2010, 2012, 2018, 2022 Laboratoire de Recherche et -# Développement de l'EPITA. -# Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6), -# département Systèmes Répartis Coopératifs (SRC), Université Pierre -# et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/origstate.py b/tests/python/origstate.py index 15a7ab0ad..594eafd52 100644 --- a/tests/python/origstate.py +++ b/tests/python/origstate.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2017, 2022 Laboratoire de Recherche et -# Développement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/otfcrash.py b/tests/python/otfcrash.py index 8e30cb501..6afc4f139 100644 --- a/tests/python/otfcrash.py +++ b/tests/python/otfcrash.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016, 2018, 2022 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/parity.py b/tests/python/parity.py index 6ced51c40..e507cb3a7 100644 --- a/tests/python/parity.py +++ b/tests/python/parity.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018, 2019, 2022 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/parsetgba.py b/tests/python/parsetgba.py index a91b702fb..d6f665ae5 100755 --- a/tests/python/parsetgba.py +++ b/tests/python/parsetgba.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2012, 2014, 2015, 2022, 2023 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/pdegen.py b/tests/python/pdegen.py index 00f3df7e0..1e886f280 100644 --- a/tests/python/pdegen.py +++ b/tests/python/pdegen.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2019, 2020, 2021, 2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/powerset.py b/tests/python/powerset.py index 91575c263..e9f49bea2 100644 --- a/tests/python/powerset.py +++ b/tests/python/powerset.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2023 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/prodexpt.py b/tests/python/prodexpt.py index 4d00b4dae..c58b6b8c8 100644 --- a/tests/python/prodexpt.py +++ b/tests/python/prodexpt.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016-2017, 2019-2020, 2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/randgen.py b/tests/python/randgen.py index 32762d02e..3d13f7c3d 100755 --- a/tests/python/randgen.py +++ b/tests/python/randgen.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2022 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/relabel.py b/tests/python/relabel.py index b32ebd752..3b68c2bc8 100644 --- a/tests/python/relabel.py +++ b/tests/python/relabel.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2017-2019, 2022 Laboratoire de Recherche et -# Développement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/remfin.py b/tests/python/remfin.py index ffff3e22a..c477d8975 100644 --- a/tests/python/remfin.py +++ b/tests/python/remfin.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015-2018, 2020, 2022 Laboratoire de Recherche et -# Développement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/removeap.py b/tests/python/removeap.py index ba656ac89..3e92b45f6 100644 --- a/tests/python/removeap.py +++ b/tests/python/removeap.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2019, 2022 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/rs_like.py b/tests/python/rs_like.py index 669af5885..9638373ef 100644 --- a/tests/python/rs_like.py +++ b/tests/python/rs_like.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2022 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/satmin.py b/tests/python/satmin.py index f9fa466f8..2fa9ace02 100644 --- a/tests/python/satmin.py +++ b/tests/python/satmin.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2020, 2021, 2022 Laboratoire de Recherche et -# Développement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/sbacc.py b/tests/python/sbacc.py index 22d937014..a7d76d2a5 100644 --- a/tests/python/sbacc.py +++ b/tests/python/sbacc.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017-2018, 2021, 2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/sccfilter.py b/tests/python/sccfilter.py index 7728b70a6..954ca5180 100644 --- a/tests/python/sccfilter.py +++ b/tests/python/sccfilter.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016, 2022 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/sccinfo.py b/tests/python/sccinfo.py index 197dd7254..43dfd3c3d 100644 --- a/tests/python/sccinfo.py +++ b/tests/python/sccinfo.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2021, 2022, 2023 Laboratoire de Recherche et -# Développement de l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/sccsplit.py b/tests/python/sccsplit.py index 4a1781475..2e076365e 100644 --- a/tests/python/sccsplit.py +++ b/tests/python/sccsplit.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement -# de l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/semidet.py b/tests/python/semidet.py index 9072f5917..485c24665 100644 --- a/tests/python/semidet.py +++ b/tests/python/semidet.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2022 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/setacc.py b/tests/python/setacc.py index c61b3262a..ffae43a6b 100644 --- a/tests/python/setacc.py +++ b/tests/python/setacc.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016, 2018, 2021, 2022, 2023 Laboratoire de Recherche et -# Développement de l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/setxor.py b/tests/python/setxor.py index 2fe69cd99..728258096 100755 --- a/tests/python/setxor.py +++ b/tests/python/setxor.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2010, 2011, 2022 Laboratoire de Recherche et -# Développement de l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/simplacc.py b/tests/python/simplacc.py index 50dc2d74a..2c20d70c4 100644 --- a/tests/python/simplacc.py +++ b/tests/python/simplacc.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/simstate.py b/tests/python/simstate.py index 6a52124f9..10bbd1d3b 100644 --- a/tests/python/simstate.py +++ b/tests/python/simstate.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2017-2018, 2020-2023 Laboratoire de Recherche -# et Développement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/sonf.py b/tests/python/sonf.py index 40af758b0..47f682a67 100644 --- a/tests/python/sonf.py +++ b/tests/python/sonf.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/split.py b/tests/python/split.py index b916f494f..a953b82e2 100644 --- a/tests/python/split.py +++ b/tests/python/split.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018-2022 Laboratoire de Recherche et -# Développement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/splitedge.py b/tests/python/splitedge.py index fa9ff4358..4911b10c2 100644 --- a/tests/python/splitedge.py +++ b/tests/python/splitedge.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020-2022 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/streett_totgba.py b/tests/python/streett_totgba.py index 8a18defbc..fba65bbe5 100644 --- a/tests/python/streett_totgba.py +++ b/tests/python/streett_totgba.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017-2018, 2021-2022 Laboratoire de Recherche et -# Développement de l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/streett_totgba2.py b/tests/python/streett_totgba2.py index 5ff97a369..69e6b2b6e 100644 --- a/tests/python/streett_totgba2.py +++ b/tests/python/streett_totgba2.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement -# de l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/stutter.py b/tests/python/stutter.py index 05c28fda9..0a96cd591 100644 --- a/tests/python/stutter.py +++ b/tests/python/stutter.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2019-2022 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/sum.py b/tests/python/sum.py index 1f7c6e0a1..72814a2b6 100644 --- a/tests/python/sum.py +++ b/tests/python/sum.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017-2019, 2022 Laboratoire de Recherche et -# Développement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/synthesis.py b/tests/python/synthesis.py index 98ac889d8..991e1cbf4 100644 --- a/tests/python/synthesis.py +++ b/tests/python/synthesis.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2021, 2022 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/toparity.py b/tests/python/toparity.py index 80c2c19ef..d737bacae 100644 --- a/tests/python/toparity.py +++ b/tests/python/toparity.py @@ -1,7 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018-2022 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/toweak.py b/tests/python/toweak.py index 23dcf66fa..3f88f5953 100644 --- a/tests/python/toweak.py +++ b/tests/python/toweak.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2020, 2022 Laboratoire de Recherche et -# Développement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/tra2tba.py b/tests/python/tra2tba.py index 354ced630..659736516 100644 --- a/tests/python/tra2tba.py +++ b/tests/python/tra2tba.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016-2018, 2020-2022 Laboratoire de Recherche -# et Développement de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/trival.py b/tests/python/trival.py index ea844e29c..44b3343a7 100644 --- a/tests/python/trival.py +++ b/tests/python/trival.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016, 2018, 2022 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/twagraph.py b/tests/python/twagraph.py index 1ebcb8ac5..9e9d89bb3 100644 --- a/tests/python/twagraph.py +++ b/tests/python/twagraph.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2021-2022 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/python/zlktree.py b/tests/python/zlktree.py index c86c4a8b0..2abd4d4ef 100644 --- a/tests/python/zlktree.py +++ b/tests/python/zlktree.py @@ -1,6 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2021, 2022 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/run.in b/tests/run.in index 7502b88f8..bdd2d0a45 100755 --- a/tests/run.in +++ b/tests/run.in @@ -1,10 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2010-2011, 2014-2016, 2018-2022 Laboratoire de Recherche -# et Developpement de l'EPITA (LRDE). -# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 -# (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -# Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/sanity/80columns.test b/tests/sanity/80columns.test index 9942946d8..a298cb77e 100755 --- a/tests/sanity/80columns.test +++ b/tests/sanity/80columns.test @@ -1,10 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2016-2017, 2019-2020 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). -# Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 -# (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -# Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/sanity/bin.test b/tests/sanity/bin.test index 3c331020c..6e9e5eafb 100644 --- a/tests/sanity/bin.test +++ b/tests/sanity/bin.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2020 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/sanity/getenv.test b/tests/sanity/getenv.test index f26d405e6..a0359e820 100644 --- a/tests/sanity/getenv.test +++ b/tests/sanity/getenv.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/sanity/includes.test b/tests/sanity/includes.test index 0692ee42b..7d14be87d 100755 --- a/tests/sanity/includes.test +++ b/tests/sanity/includes.test @@ -1,10 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2008, 2011-2012, 2016-2019 Laboratoire de -# Recherche et Développement de l'Epita (LRDE). -# Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 -# (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -# Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/sanity/ipynb.pl b/tests/sanity/ipynb.pl index dfebecf51..4f29d35b7 100755 --- a/tests/sanity/ipynb.pl +++ b/tests/sanity/ipynb.pl @@ -1,8 +1,7 @@ #! /usr/bin/perl -w # -*- cperl; coding: utf-8 -*- # -# Copyright (C) 2015, 2016 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/sanity/namedprop.test b/tests/sanity/namedprop.test index 46bbc9fd1..4f4b1347c 100755 --- a/tests/sanity/namedprop.test +++ b/tests/sanity/namedprop.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2021 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/sanity/private.test b/tests/sanity/private.test index 8e10605de..10f2b0e83 100755 --- a/tests/sanity/private.test +++ b/tests/sanity/private.test @@ -1,7 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2013, 2016 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/sanity/readme.pl b/tests/sanity/readme.pl index bfbc85c64..2bc6d70ef 100755 --- a/tests/sanity/readme.pl +++ b/tests/sanity/readme.pl @@ -1,8 +1,7 @@ #! /usr/bin/perl -w # -*- cperl; coding: utf-8 -*- # -# Copyright (C) 2010, 2015, 2016 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tests/sanity/style.test b/tests/sanity/style.test index 372bbeba2..52ce2a2f2 100755 --- a/tests/sanity/style.test +++ b/tests/sanity/style.test @@ -1,10 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009-2023 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). -# Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 -# (LIP6), département Systèmes Répartis Coopératifs (SRC), Université -# Pierre et Marie Curie. +# Copyright (C) by the Spot authors, see the AUTHORS file for details. # # This file is part of Spot, a model checking library. # diff --git a/tools/man2html.pl b/tools/man2html.pl index 84a0cc654..34e43eaba 100755 --- a/tools/man2html.pl +++ b/tools/man2html.pl @@ -1,7 +1,6 @@ #!/usr/bin/perl ## -*- coding: utf-8 -*- -## Copyright (C) 2015, 2016 Laboratoire de Recherche et Développement de -## l'Epita (LRDE). +## Copyright (C) by the Spot authors, see the AUTHORS file for details. ## ## This file is part of Spot, a model checking library. ## From 62fb0c354e6f839be1b7c131925235d73c2d888c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 21 Nov 2023 16:40:33 +0100 Subject: [PATCH 371/606] stength: fix detection of terminal automata Fixes issue #553. * spot/twaalgos/strength.cc (is_type_automaton): Make sure an accepting SCC is not followed by a rejecting one. (is_terminal_automaton): Mark the third-argument version deprecated. * spot/twaalgos/strength.hh: Adjust. * spot/twaalgos/couvreurnew.cc: Remove the inappropriate terminal optimization. * bin/ltlfilt.cc, spot/tl/hierarchy.cc, spot/twaalgos/gfguarantee.cc, tests/core/ikwiad.cc: Remove usage of the third argument of is_terminal_automaton. * tests/core/readsave.test, tests/core/strength.test: Adjust test cases. * NEWS: Mention the bug. --- NEWS | 16 +++++++++++ bin/ltlfilt.cc | 2 +- spot/tl/hierarchy.cc | 4 +-- spot/twaalgos/couvreurnew.cc | 55 +++++++++++------------------------- spot/twaalgos/gfguarantee.cc | 6 ++-- spot/twaalgos/strength.cc | 48 +++++++++++++++---------------- spot/twaalgos/strength.hh | 19 ++++++++----- tests/core/ikwiad.cc | 3 +- tests/core/readsave.test | 2 +- tests/core/strength.test | 2 +- 10 files changed, 75 insertions(+), 82 deletions(-) diff --git a/NEWS b/NEWS index 7ea126243..23204222e 100644 --- a/NEWS +++ b/NEWS @@ -166,6 +166,22 @@ New in spot 2.11.6.dev (not yet released) computation on that color that caused it to crash with a "Too many acceptance sets used" message. (issue #552) + - The detection of terminal automata used did not exactly + match the definition used in the HOA format. The definition + of a terminal automaton is supposed to be: + 1. the automaton is weak + 2. its accepting SCCs are complete + 3. no accepting cycle can reach a rejecting cycle + However the implementation actually replaced the last point + by the following variant: + 3'. no accepting edge can reach a rejecting cycle + This caused issues in automata with t acceptance. Fixing the code + by replacing 3' by 3 this also made the third argument of + is_terminal_automaton(), an optional boolean indicating whether + transition between SCCs should be ignored when computing 3', + completely obsolete. This third argument has been marked as + depreated. (Issue #553) + New in spot 2.11.6 (2023-08-01) Bug fixes: diff --git a/bin/ltlfilt.cc b/bin/ltlfilt.cc index 689c26e53..0403ea76c 100644 --- a/bin/ltlfilt.cc +++ b/bin/ltlfilt.cc @@ -843,7 +843,7 @@ namespace { spot::scc_info si(min); matched &= !guarantee - || is_terminal_automaton(min, &si, true); + || is_terminal_automaton(min, &si); matched &= !safety || is_safety_automaton(min, &si); } } diff --git a/spot/tl/hierarchy.cc b/spot/tl/hierarchy.cc index 5d29202b5..a94938009 100644 --- a/spot/tl/hierarchy.cc +++ b/spot/tl/hierarchy.cc @@ -286,9 +286,7 @@ namespace spot if (aut != min) // An obligation. { scc_info si(min); - // The minimba WDBA can have some trivial accepting SCCs - // that we should ignore in is_terminal_automaton(). - bool g = is_terminal_automaton(min, &si, true); + bool g = is_terminal_automaton(min, &si); bool s = is_safety_automaton(min, &si); if (g) return s ? 'B' : 'G'; diff --git a/spot/twaalgos/couvreurnew.cc b/spot/twaalgos/couvreurnew.cc index 25797d27d..62cbb7aa1 100644 --- a/spot/twaalgos/couvreurnew.cc +++ b/spot/twaalgos/couvreurnew.cc @@ -549,10 +549,7 @@ namespace spot return run_; } - // A simple enum for the different automata strengths. - enum twa_strength { STRONG, WEAK, TERMINAL }; - - template + template class couvreur99_new : public emptiness_check, public ec_statistics { using T = twa_iteration; @@ -696,7 +693,7 @@ namespace spot state_t init = T::initial_state(ecs_->aut); ecs_->h[init] = 1; ecs_->root.push(1); - if (strength == STRONG) + if (is_strong) arc.push({}); auto iter = T::succ(ecs_->aut, init); todo.emplace(init, iter); @@ -706,7 +703,7 @@ namespace spot while (!todo.empty()) { - if (strength == STRONG) + if (is_strong) assert(ecs_->root.size() == arc.size()); // We are looking at the next successor in SUCC. @@ -727,7 +724,7 @@ namespace spot assert(!ecs_->root.empty()); if (ecs_->root.top().index == ecs_->h[curr]) { - if (strength == STRONG) + if (is_strong) { assert(!arc.empty()); arc.pop(); @@ -759,21 +756,7 @@ namespace spot } // Fetch the values we are interested in... - auto acc = succ->acc(); - if (!need_accepting_run) - if (strength == TERMINAL && ecs_->aut->acc().accepting(acc)) - { - // We have found an accepting SCC. - // Release all iterators in todo. - while (!todo.empty()) - { - T::it_destroy(ecs_->aut, todo.top().second); - todo.pop(); - dec_depth(); - } - // We do not need an accepting run. - return true; - } + acc_cond::mark_t acc = succ->acc(); state_t dest = succ->dst(); // ... and point the iterator to the next successor, for // the next iteration. @@ -788,7 +771,7 @@ namespace spot // Yes. Bump number, stack the stack, and register its // successors for later processing. ecs_->root.push(++num); - if (strength == STRONG) + if (is_strong) arc.push(acc); iterator_t iter = T::succ(ecs_->aut, dest); todo.emplace(dest, iter); @@ -818,7 +801,7 @@ namespace spot while (threshold < ecs_->root.top().index) { assert(!ecs_->root.empty()); - if (strength == STRONG) + if (is_strong) { assert(!arc.empty()); acc |= ecs_->root.top().condition; @@ -864,20 +847,18 @@ namespace spot } // anonymous namespace - template - using cna = couvreur99_new; - template - using cne = couvreur99_new; + template + using cna = couvreur99_new; + template + using cne = couvreur99_new; emptiness_check_ptr get_couvreur99_new_abstract(const const_twa_ptr& a, option_map o) { - // NB: The order of the if's matter. - if (a->prop_terminal()) - return SPOT_make_shared_enabled__(cna, a, o); if (a->prop_weak()) - return SPOT_make_shared_enabled__(cna, a, o); - return SPOT_make_shared_enabled__(cna, a, o); + return SPOT_make_shared_enabled__(cna, a, o); + else + return SPOT_make_shared_enabled__(cna, a, o); } emptiness_check_ptr @@ -886,12 +867,10 @@ namespace spot const_twa_graph_ptr ag = std::dynamic_pointer_cast(a); if (ag) // the automaton is explicit { - // NB: The order of the if's matter. - if (a->prop_terminal()) - return SPOT_make_shared_enabled__(cne, ag, o); if (a->prop_weak()) - return SPOT_make_shared_enabled__(cne, ag, o); - return SPOT_make_shared_enabled__(cne, ag, o); + return SPOT_make_shared_enabled__(cne, ag, o); + else + return SPOT_make_shared_enabled__(cne, ag, o); } else // the automaton is abstract { diff --git a/spot/twaalgos/gfguarantee.cc b/spot/twaalgos/gfguarantee.cc index 8a4477a7a..606e7f77b 100644 --- a/spot/twaalgos/gfguarantee.cc +++ b/spot/twaalgos/gfguarantee.cc @@ -57,7 +57,7 @@ namespace spot bool want_merge_edges = false; twa_graph_ptr aut = std::const_pointer_cast(si.get_aut()); - if (!is_terminal_automaton(aut, &si, true)) + if (!is_terminal_automaton(aut, &si)) throw std::runtime_error("g_f_terminal() expects a terminal automaton"); // If a terminal automaton has only one SCC, it is either @@ -490,13 +490,13 @@ namespace spot return nullptr; scc_info si(reduced); - if (!is_terminal_automaton(reduced, &si, true)) + if (!is_terminal_automaton(reduced, &si)) return nullptr; do_g_f_terminal_inplace(si, state_based); if (!deterministic) { scc_info si2(aut); - if (!is_terminal_automaton(aut, &si2, true)) + if (!is_terminal_automaton(aut, &si2)) return reduced; do_g_f_terminal_inplace(si2, state_based); if (aut->num_states() < reduced->num_states()) diff --git a/spot/twaalgos/strength.cc b/spot/twaalgos/strength.cc index 230063c38..cd89c6892 100644 --- a/spot/twaalgos/strength.cc +++ b/spot/twaalgos/strength.cc @@ -32,8 +32,7 @@ namespace spot namespace { template - bool is_type_automaton(const twa_graph_ptr& aut, scc_info* si, - bool ignore_trivial_term = false) + bool is_type_automaton(const twa_graph_ptr& aut, scc_info* si) { // Create an scc_info if the user did not give one to us. bool need_si = !si; @@ -85,36 +84,29 @@ namespace spot break; } } - if (terminal && si->is_accepting_scc(i) && !is_complete_scc(*si, i)) + if (terminal && is_term && si->is_accepting_scc(i)) { - is_term = false; - if (!set) + is_term = is_complete_scc(*si, i); + if (is_term) + { + for (unsigned j: si->succ(i)) + if (si->is_rejecting_scc(j)) + { + is_term = false; + break; + } + } + if (!is_term && !set) break; } } - // A terminal automaton should accept any word that has a prefix - // leading to an accepting edge. In other words, we cannot have - // an accepting edge that goes into a rejecting SCC. - if (terminal && is_term && !ignore_trivial_term) - for (auto& e: aut->edges()) - if (si->is_rejecting_scc(si->scc_of(e.dst)) - && aut->acc().accepting(e.acc)) - { - is_term = false; - break; - } exit: if (need_si) delete si; if (set) { if (terminal) - { - if (!ignore_trivial_term) - aut->prop_terminal(is_term && is_weak); - else if (is_term && is_weak) - aut->prop_terminal(true); - } + aut->prop_terminal(is_term && is_weak); aut->prop_weak(is_weak); aut->prop_very_weak(is_single_state_scc && is_weak); if (inweak) @@ -127,19 +119,23 @@ namespace spot } bool - is_terminal_automaton(const const_twa_graph_ptr& aut, scc_info* si, - bool ignore_trivial_term) + is_terminal_automaton(const const_twa_graph_ptr& aut, scc_info* si) { trival v = aut->prop_terminal(); if (v.is_known()) return v.is_true(); bool res = - is_type_automaton(std::const_pointer_cast(aut), si, - ignore_trivial_term); + is_type_automaton(std::const_pointer_cast(aut), si); std::const_pointer_cast(aut)->prop_terminal(res); return res; } + bool + is_terminal_automaton(const const_twa_graph_ptr& aut, scc_info* si, bool) + { + return is_terminal_automaton(aut, si); + } + bool is_weak_automaton(const const_twa_graph_ptr& aut, scc_info* si) { diff --git a/spot/twaalgos/strength.hh b/spot/twaalgos/strength.hh index c979feebe..15122518a 100644 --- a/spot/twaalgos/strength.hh +++ b/spot/twaalgos/strength.hh @@ -25,11 +25,8 @@ namespace spot /// \brief Check whether an automaton is terminal. /// /// An automaton is terminal if it is weak, all its accepting SCCs - /// are complete, and no accepting transitions lead to a - /// non-accepting SCC. - /// - /// If ignore_trivial_scc is set, accepting transitions from trivial - /// SCCs are ignored. + /// are complete, and no accepting SCC may lead to a non-accepting + /// SCC. /// /// This property guarantees that a word is accepted if it has some /// prefix that reaches an accepting transition. @@ -43,8 +40,16 @@ namespace spot /// the prop_terminal() property of the automaton as a side-effect, /// so further calls will return in constant-time. SPOT_API bool - is_terminal_automaton(const const_twa_graph_ptr& aut, scc_info* sm = nullptr, - bool ignore_trivial_scc = false); + is_terminal_automaton(const const_twa_graph_ptr& aut, + scc_info* sm = nullptr); + + + // 3-arg form was deprecated in Spot 2.12 + SPOT_DEPRECATED("is third argument of is_terminal_automaton()" + " is now ignored") + SPOT_API bool + is_terminal_automaton(const const_twa_graph_ptr& aut, + scc_info* sm, bool); /// \brief Check whether an automaton is weak. /// diff --git a/tests/core/ikwiad.cc b/tests/core/ikwiad.cc index a46168651..ff97ea78f 100644 --- a/tests/core/ikwiad.cc +++ b/tests/core/ikwiad.cc @@ -1368,8 +1368,7 @@ checked_main(int argc, char** argv) } else { - bool g = is_terminal_automaton(ensure_digraph(a), - nullptr, true); + bool g = is_terminal_automaton(ensure_digraph(a)); bool s = is_safety_automaton(ensure_digraph(a)); if (g && !s) { diff --git a/tests/core/readsave.test b/tests/core/readsave.test index 9be1ee4c2..a8c301f70 100755 --- a/tests/core/readsave.test +++ b/tests/core/readsave.test @@ -826,7 +826,7 @@ test `autfilt --is-terminal -c output4` = 0 sed 's/\[0\]/[t]/g' expect4 > output4d test `autfilt -B --small output4d | autfilt --is-terminal -c` = 1 -test `autfilt --is-terminal -c output4d` = 0 # FIXME: Issue #553 +test `autfilt --is-terminal -c output4d` = 1 # Issue #553 autfilt -B -Hv --small input4 >output5 cat >expect5< Date: Wed, 22 Nov 2023 15:45:10 +0100 Subject: [PATCH 372/606] acc: improve the "too many acceptance sets used" message * spot/twa/acc.cc (report_too_many_sets): Mention --enable-max-sets and the email address for reporting problems. --- spot/twa/acc.cc | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/spot/twa/acc.cc b/spot/twa/acc.cc index e7b2a563e..a06e784a2 100644 --- a/spot/twa/acc.cc +++ b/spot/twa/acc.cc @@ -39,7 +39,14 @@ namespace spot #define STR(x) #x #define VALUESTR(x) STR(x) throw std::runtime_error("Too many acceptance sets used. " - "The limit is " VALUESTR(SPOT_MAX_ACCSETS) "."); + "The limit is " VALUESTR(SPOT_MAX_ACCSETS) + ".\n\nIf you do not mind a slowdown, you could" + " try recompiling Spot with a larger limit.\n" + "See the --enable-max-accsets=N " + "configure option.\n\nIf you believe Spot could" + " be improved to solve your problem\n" + "using the current limit, please email" + " ."); } std::ostream& operator<<(std::ostream& os, spot::acc_cond::mark_t m) From 0dd623b3580b4a09b165d18671aae87f90d04d44 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 22 Nov 2023 16:15:00 +0100 Subject: [PATCH 373/606] hoa: improve the diagnostic for unregistered propositions * spot/twaalgos/hoa.cc: Here. --- spot/twaalgos/hoa.cc | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/spot/twaalgos/hoa.cc b/spot/twaalgos/hoa.cc index 0f2a8e8b1..7ad37db28 100644 --- a/spot/twaalgos/hoa.cc +++ b/spot/twaalgos/hoa.cc @@ -411,9 +411,19 @@ namespace spot for (auto& i: sup) all &= bdd_support(i.first); all_ap = aut->ap_vars(); + // Complains if the transitions use variables that hasn't been + // declared in the AP set of the automaton. The error seems + // to be frequent enough to warrant a longer message. We only + // do this diagnostic in the HOA printer and not in the dot + // printer so that we still have one way to print the + // automaton for debugging. if (bdd_exist(all, all_ap) != bddtrue) - throw std::runtime_error("print_hoa(): automaton uses " - "unregistered atomic propositions"); + throw std::runtime_error + ("print_hoa(): automaton uses unregistered atomic propositions\n\n" + "This error usually occurs if the algorithm that created " + "this automaton is\nmissing a call to copy_ap_of() or " + "register_ap(). If you believe this is\na bug in Spot itself, " + "please email "); all = all_ap; while (all != bddtrue) From 193fdd6f95beee5012968bb5ddd6074f2774437d Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 23 Nov 2023 17:08:31 +0100 Subject: [PATCH 374/606] python: add easy ways to remove highlights MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #554, reported by Dávid Smolka. * python/spot/impl.i (highlight_edge, highlight_state): Add versions where the color is nullptr and map that to None. (remove_highlight_states, remove_highlight_edges): New function. * tests/python/highlighting.ipynb: Demonstrate those new methods. --- NEWS | 5 + python/spot/impl.i | 40 ++++ tests/python/highlighting.ipynb | 311 +++++++++++++++++++++++++++----- 3 files changed, 315 insertions(+), 41 deletions(-) diff --git a/NEWS b/NEWS index 23204222e..fb8177c7a 100644 --- a/NEWS +++ b/NEWS @@ -126,6 +126,11 @@ New in spot 2.11.6.dev (not yet released) drop_false_edges=False argument to disable the historical behavior of ignoring edges labeled by False. + - Calling aut.highlight_state(s, None) or aut.highlight_edge(e, + None) may now be used to remove the highlighting color of some + given state or edge. Use aut.remove_highlight_states() or + aut.remove_highlight_edges() to remove all colors. (Issue #554.) + Bugs fixed: - tgba_determinize()'s use_simulation option would cause it to diff --git a/python/spot/impl.i b/python/spot/impl.i index e8160c362..e1dd7f296 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -407,6 +407,16 @@ namespace swig $result = SWIG_FromCharPtr($1->c_str()); } +%typemap(typecheck, precedence=2000) std::nullptr_t { + $1 = $input == Py_None; +} + +%typemap(in) std::nullptr_t { + if ($input != Py_None) + %argument_fail(SWIG_TypeError, "std::nullptr_t", $symname, $argnum); + $1 = nullptr; +} + // For some reason, Swig can convert [aut1,aut2,...] into // std::vector, but not into // std::vector. Let's fix that by using @@ -1037,6 +1047,21 @@ static void* ptr_for_bdddict(PyObject* obj) return self; } + twa* highlight_state(unsigned state, std::nullptr_t color) // color=None + { + (void) color; + if (std::map* hs = + self->get_named_prop>("highlight-states")) + hs->erase(state); + return self; + } + + twa* remove_highlight_states() + { + self->set_named_prop("highlight-states", nullptr); + return self; + } + twa* highlight_edge(unsigned edge, unsigned color) { auto ht = @@ -1049,6 +1074,21 @@ static void* ptr_for_bdddict(PyObject* obj) (*ht)[edge] = color; return self; } + + twa* highlight_edge(unsigned edge, std::nullptr_t color) // color=None + { + (void) color; + if (std::map* hs = + self->get_named_prop>("highlight-edges")) + hs->erase(edge); + return self; + } + + twa* remove_highlight_edges() + { + self->set_named_prop("highlight-edges", nullptr); + return self; + } } %extend spot::internal::state_out> { diff --git a/tests/python/highlighting.ipynb b/tests/python/highlighting.ipynb index a35f88334..6fe0856a1 100644 --- a/tests/python/highlighting.ipynb +++ b/tests/python/highlighting.ipynb @@ -247,7 +247,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ec3bb720> >" + " *' at 0x7fe86e410bd0> >" ] }, "execution_count": 4, @@ -359,7 +359,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ec3bbb40> >" + " *' at 0x7fe86e4110e0> >" ] }, "execution_count": 5, @@ -469,7 +469,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ec3bb720> >" + " *' at 0x7fe86e410bd0> >" ] }, "execution_count": 6, @@ -549,6 +549,235 @@ "print(a.to_str('HOA', '1.1'))" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Removing highlights" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use `None` as the color to remove some specific highlights." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "I->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fe86e411e30> >" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "a.highlight_state(0, None).highlight_edges([4, 2], None)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Or use `remove_highlight_states()` or `remove_highlight_edges()` to remove all highlights." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "I->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fe86e411c50> >" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "a.remove_highlight_states().remove_highlight_edges()" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -562,7 +791,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -702,10 +931,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ec3bba50> >" + " *' at 0x7fe86e4123a0> >" ] }, - "execution_count": 8, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -716,7 +945,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -741,7 +970,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ @@ -757,7 +986,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -897,10 +1126,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ec3bba50> >" + " *' at 0x7fe86e4123a0> >" ] }, - "execution_count": 11, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -920,7 +1149,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -1235,7 +1464,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ec3dc8d0> >" + " *' at 0x7fe86e4121f0> >" ] }, "metadata": {}, @@ -1496,7 +1725,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ef60adc0> >" + " *' at 0x7fe86e410f90> >" ] }, "metadata": {}, @@ -1679,7 +1908,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ef60a7f0> >" + " *' at 0x7fe86e410ff0> >" ] }, "metadata": {}, @@ -1734,7 +1963,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -1796,7 +2025,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ec3bbdb0> >" + " *' at 0x7fe86e413030> >" ] }, "metadata": {}, @@ -1851,7 +2080,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ef60b810> >" + " *' at 0x7fe86e412280> >" ] }, "metadata": {}, @@ -1866,7 +2095,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -1945,10 +2174,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ec3bbc60> >" + " *' at 0x7fe86e412310> >" ] }, - "execution_count": 14, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } @@ -1959,7 +2188,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 17, "metadata": {}, "outputs": [ { @@ -1982,7 +2211,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 18, "metadata": {}, "outputs": [], "source": [ @@ -1995,7 +2224,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 19, "metadata": {}, "outputs": [ { @@ -2074,7 +2303,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ec3bbc60> >" + " *' at 0x7fe86e412310> >" ] }, "metadata": {}, @@ -2139,7 +2368,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ec3bbdb0> >" + " *' at 0x7fe86e413030> >" ] }, "metadata": {}, @@ -2194,7 +2423,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ef60b810> >" + " *' at 0x7fe86e412280> >" ] }, "metadata": {}, @@ -2214,7 +2443,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 20, "metadata": {}, "outputs": [ { @@ -2420,7 +2649,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ef60a7f0> >" + " *' at 0x7fe86e413c00> >" ] }, "metadata": {}, @@ -2505,7 +2734,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ef60af10> >" + " *' at 0x7fe86e4109c0> >" ] }, "metadata": {}, @@ -2602,7 +2831,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ef60b840> >" + " *' at 0x7fe86e413bd0> >" ] }, "metadata": {}, @@ -2631,7 +2860,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 21, "metadata": {}, "outputs": [ { @@ -2771,10 +3000,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ec3dd860> >" + " *' at 0x7fe86e412b50> >" ] }, - "execution_count": 19, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } @@ -2799,7 +3028,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 22, "metadata": {}, "outputs": [ { @@ -2939,10 +3168,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ec3dd860> >" + " *' at 0x7fe86e412b50> >" ] }, - "execution_count": 20, + "execution_count": 22, "metadata": {}, "output_type": "execute_result" } @@ -2962,7 +3191,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 23, "metadata": {}, "outputs": [ { @@ -3102,7 +3331,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7ec3dd860> >" + " *' at 0x7fe86e412b50> >" ] }, "metadata": {}, @@ -3336,7 +3565,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 24, "metadata": {}, "outputs": [ { @@ -3582,7 +3811,7 @@ "" ] }, - "execution_count": 22, + "execution_count": 24, "metadata": {}, "output_type": "execute_result" } From 55575a11e31c687f3ad7596e75ecbb7d8fb8fbd6 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 23 Nov 2023 20:27:05 +0100 Subject: [PATCH 375/606] twagraph: fix highlight-edges in defrag_states MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #555 reported by Dávid Smolka. * spot/twa/twagraph.cc (defrag_states): Update highlight-edge. * spot/graph/graph.hh (defrag_states): Just point to twa_graph::defrag_states in a comment, because the latter relies on the implementation detail of graph::defrag_state. * tests/python/parsetgba.py: Add test case. * NEWS: Mention the bug. --- NEWS | 4 ++++ spot/graph/graph.hh | 5 +++++ spot/twa/twagraph.cc | 25 +++++++++++++++++++++++++ tests/python/parsetgba.py | 32 ++++++++++++++++++++++++++++++++ 4 files changed, 66 insertions(+) diff --git a/NEWS b/NEWS index fb8177c7a..a9cb0a830 100644 --- a/NEWS +++ b/NEWS @@ -187,6 +187,10 @@ New in spot 2.11.6.dev (not yet released) completely obsolete. This third argument has been marked as depreated. (Issue #553) + - twa::defrag_states(), which is called for instance by + purge_dead_state(), did not update the highlight-edges property. + (Issue #555.) + New in spot 2.11.6 (2023-08-01) Bug fixes: diff --git a/spot/graph/graph.hh b/spot/graph/graph.hh index dc3c221c1..4f62f3dcd 100644 --- a/spot/graph/graph.hh +++ b/spot/graph/graph.hh @@ -1485,6 +1485,11 @@ namespace spot // Shift all edges in edges_. The algorithm is // similar to remove_if, but it also keeps the correspondence // between the old and new index as newidx[old] = new. + // + // If you change anything to this logic, you might want to + // double check twa_graph::defrag_states where we need to + // predict the new edges indices in order to update + // highlight-edges. unsigned tend = edges_.size(); std::vector newidx(tend); unsigned dest = 1; diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 1b9e2f484..2bcce5fed 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -1283,6 +1283,31 @@ namespace spot } std::swap(*hs, hs2); } + if (auto he = get_named_prop> + ("highlight-edges")) + { + // Unfortunately, the underlying graph, who might remove some + // edges, know nothing about named properties. So we have to + // predict the indices of the edges after + // graph::defrag_states() will run. This might break if + // graph::defrag_states() is changed. + auto& ev = edge_vector(); + unsigned es = ev.size(); + std::vector newedges(es, -1U); + unsigned edgeidx = 1; + for (unsigned e = 1; e < es; ++e) + { + if (is_dead_edge(e) || newst[ev[e].dst] == -1U) + newedges[e] = -1U; + else + newedges[e] = edgeidx++; + } + std::map he2; + for (auto [e, c]: *he) + if (newedges[e] != -1U) + he2.emplace(newedges[e], c); + std::swap(*he, he2); + } for (const char* prop: {"original-classes", "original-states", "degen-levels"}) diff --git a/tests/python/parsetgba.py b/tests/python/parsetgba.py index d6f665ae5..ad657dfcc 100755 --- a/tests/python/parsetgba.py +++ b/tests/python/parsetgba.py @@ -91,3 +91,35 @@ State: 1 [f] 0 [t] 0 --END--""") + +# Issue #555. +a3 = spot.automaton("""HOA: v1.1 States: 6 Start: 0 AP: 3 "a" "b" "c" +acc-name: Buchi Acceptance: 1 Inf(0) properties: trans-labels +explicit-labels state-acc !complete properties: !deterministic +exist-branch spot.highlight.edges: 5 3 6 1 7 3 8 2 --BODY-- State: 0 [0] 1 [0] 2 +State: 1 [0] 3 State: 2 [0] 5 State: 3 {0} [0] 3 State: 4 {0} [0] 4 +[1] 4 State: 5 {0} [1] 5 [2] 5 --END--""") +a3.purge_dead_states() +tc.assertEqual(a3.to_str("hoa", "1.1"), """HOA: v1.1 +States: 5 +Start: 0 +AP: 3 "a" "b" "c" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels state-acc !complete +properties: !deterministic exist-branch +spot.highlight.edges: 5 3 6 2 +--BODY-- +State: 0 +[0] 1 +[0] 2 +State: 1 +[0] 3 +State: 2 +[0] 4 +State: 3 {0} +[0] 3 +State: 4 {0} +[1] 4 +[2] 4 +--END--""") From 738d62e0b94ba928f92df246eca1627ecabbcac2 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 23 Nov 2023 20:37:25 +0100 Subject: [PATCH 376/606] hoa: do not output empty highlighting maps * spot/twaalgos/hoa.cc: Here. * tests/python/parsetgba.py: Test it. --- spot/twaalgos/hoa.cc | 6 ++++-- tests/python/parsetgba.py | 23 +++++++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/spot/twaalgos/hoa.cc b/spot/twaalgos/hoa.cc index 7ad37db28..902af0007 100644 --- a/spot/twaalgos/hoa.cc +++ b/spot/twaalgos/hoa.cc @@ -762,7 +762,8 @@ namespace spot if (v1_1) { if (auto hstates = aut->get_named_prop - >("highlight-states")) + >("highlight-states"); + hstates && !hstates->empty()) { os << "spot.highlight.states:"; for (auto& p: *hstates) @@ -770,7 +771,8 @@ namespace spot os << nl; } if (auto hedges = aut->get_named_prop - >("highlight-edges")) + >("highlight-edges"); + hedges && !hedges->empty()) { // Numbering edges is a delicate process. The // "highlight-edges" property uses edges numbers that are diff --git a/tests/python/parsetgba.py b/tests/python/parsetgba.py index ad657dfcc..3964ed28d 100755 --- a/tests/python/parsetgba.py +++ b/tests/python/parsetgba.py @@ -123,3 +123,26 @@ State: 4 {0} [1] 4 [2] 4 --END--""") +a3.highlight_edges([5, 6], None) +tc.assertEqual(a3.to_str("hoa", "1.1"), """HOA: v1.1 +States: 5 +Start: 0 +AP: 3 "a" "b" "c" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels state-acc !complete +properties: !deterministic exist-branch +--BODY-- +State: 0 +[0] 1 +[0] 2 +State: 1 +[0] 3 +State: 2 +[0] 4 +State: 3 {0} +[0] 3 +State: 4 {0} +[1] 4 +[2] 4 +--END--""") From 234ba2bb841dade5a8f81b1139bc58afc20fc7da Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 28 Nov 2023 17:50:28 +0100 Subject: [PATCH 377/606] work around some gcc 9.4 warnings * spot/twa/twagraph.cc, spot/twaalgos/split.cc: Here. --- spot/twa/twagraph.cc | 3 ++- spot/twaalgos/split.cc | 6 +++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 2bcce5fed..0fc9aa292 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -368,11 +368,12 @@ namespace spot unsigned n_edges = e_vec.size(); if (n_edges <= 1) { - if (n_states == 1) + if (n_states <= 1) return 0; // We don't have a very convenient way to resize the state // vector. std::vector remap(n_states, -1U); + SPOT_ASSUME(remap.data() != nullptr); // help GCC 9.4 remap[0] = 0; get_graph().defrag_states(remap, 1); SPOT_ASSERT(num_states() == 1); diff --git a/spot/twaalgos/split.cc b/spot/twaalgos/split.cc index 8ac5682f3..11146837d 100644 --- a/spot/twaalgos/split.cc +++ b/spot/twaalgos/split.cc @@ -232,7 +232,11 @@ namespace spot // number of a BDD that as been (or will be) split, and begin/end // denotes a range of existing transition numbers that cover the // split. - typedef std::pair cached_t; + // + // std::pair causes some noexcept warnings when used in + // robin_hood::unordered_map with GCC 9.4. Use robin_hood::pair + // instead. + typedef robin_hood::pair cached_t; robin_hood::unordered_map split_cond; bdd all = aut->ap_vars(); From cee2819a4565671e6bee7fe6dafe192b73d4abe7 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Wed, 29 Nov 2023 09:16:01 +0000 Subject: [PATCH 378/606] auts_to_aiger: Fix output name index * spot/twaalgos/aiger.cc: Correct the position of an output in a realizability_simplifier. * tests/core/ltlsynt.test: Add test. --- spot/twaalgos/aiger.cc | 10 +++++----- tests/core/ltlsynt.test | 4 ++++ 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index d41e2ae8a..8f3bfdcc7 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -2042,9 +2042,10 @@ namespace repr = repr[0]; } // is repr an input? + auto ap_name = repr.ap_name(); if (auto it2 = std::find(input_names_all.begin(), input_names_all.end(), - repr.ap_name()); + ap_name); it2 != input_names_all.end()) { unsigned ivar = @@ -2055,10 +2056,9 @@ namespace // is repr an output? else { - assert(std::find(output_names_all.begin(), - output_names_all.end(), - repr.ap_name()) == - output_names_all.end()); + it2 = std::find(output_names_all.begin(), + output_names_all.end(), ap_name); + assert(it2 != output_names_all.end()); unsigned outnum = it2 - output_names_all.begin(); unsigned outvar = circuit.output(outnum); circuit.set_output(i, outvar + neg_repr); diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 3944c2076..537872c23 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -1106,3 +1106,7 @@ REALIZABLE REALIZABLE EOF diff out expected + +f1="((G ((p0) <-> (! (p1)))) && (((((F ((b) && (G (F (a))))) ||\ + (F ((c) && (G (F (! (a))))))) && (F (b))) && (F (c))) <-> (G (F (p0)))))" +ltlsynt -f "$f1" --outs="p1, p0" --aiger > /dev/null \ No newline at end of file From c9d9c10cb2bbadcc717a964452f7df6135f49966 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 29 Nov 2023 16:58:00 +0100 Subject: [PATCH 379/606] * HACKING: Mention bear, to build compile_commands.json. --- HACKING | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/HACKING b/HACKING index 8029fe6db..59a0fc401 100644 --- a/HACKING +++ b/HACKING @@ -290,6 +290,26 @@ would understand with: make check LOG_DRIVER=$PWD/tools/test-driver-teamcity +Generating compile_commands.json +-------------------------------- + +The file compile_commands.json is used by many clang tools in order to +know how a single file is compiled (in particular, which include paths +should be used). Autotools-based build systems do not support the +generation of this file, but there is a tool called "bear" (for "Build +EAR") that is packaged with most distribution that can be used here. +Simply run a full build through "bear" using something like this: + + % ./configure CC=clang-17 CXX=clang++-17 + % make -j8 clean # make sure your will rebuild everything + % bear -- make -j8 + +This will simply intercept all command executions are record them in +the compile_commands.json database. Depending on the tools you plan to +use, you probably want to compile everything with clang, as shown above. + + + C++ Coding conventions ====================== From 3d05ecb4ac35c182018b70190431dab87f9a0b68 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 29 Nov 2023 16:59:45 +0100 Subject: [PATCH 380/606] remove many useless includes Removal suggestions from clang-include-cleaner-17 applied manually. * spot/gen/automata.cc, spot/ltsmin/ltsmin.cc, spot/misc/bitvect.cc, spot/misc/intvcomp.cc, spot/misc/satsolver.cc, spot/misc/tmpfile.cc, spot/priv/trim.cc, spot/priv/weight.cc, spot/ta/taexplicit.cc, spot/ta/tgtaexplicit.cc, spot/ta/tgtaproduct.cc, spot/taalgos/emptinessta.cc, spot/taalgos/minimize.cc, spot/taalgos/reachiter.cc, spot/taalgos/statessetbuilder.cc, spot/taalgos/tgba2ta.cc, spot/tl/apcollect.cc, spot/tl/contain.cc, spot/tl/exclusive.cc, spot/tl/formula.cc, spot/tl/mark.cc, spot/tl/randomltl.cc, spot/tl/relabel.cc, spot/tl/remove_x.cc, spot/twa/acc.cc, spot/twa/bdddict.cc, spot/twa/taatgba.cc, spot/twa/twagraph.cc, spot/twaalgos/aiger.cc, spot/twaalgos/alternation.cc, spot/twaalgos/canonicalize.cc, spot/twaalgos/cobuchi.cc, spot/twaalgos/complement.cc, spot/twaalgos/compsusp.cc, spot/twaalgos/dbranch.cc, spot/twaalgos/degen.cc, spot/twaalgos/determinize.cc, spot/twaalgos/dot.cc, spot/twaalgos/dtbasat.cc, spot/twaalgos/dtwasat.cc, spot/twaalgos/emptiness.cc, spot/twaalgos/forq_contains.cc, spot/twaalgos/game.cc, spot/twaalgos/genem.cc, spot/twaalgos/gv04.cc, spot/twaalgos/hoa.cc, spot/twaalgos/isunamb.cc, spot/twaalgos/isweakscc.cc, spot/twaalgos/lbtt.cc, spot/twaalgos/ltl2tgba_fm.cc, spot/twaalgos/magic.cc, spot/twaalgos/mealy_machine.cc, spot/twaalgos/minimize.cc, spot/twaalgos/neverclaim.cc, spot/twaalgos/parity.cc, spot/twaalgos/powerset.cc, spot/twaalgos/product.cc, spot/twaalgos/randomgraph.cc, spot/twaalgos/randomize.cc, spot/twaalgos/relabel.cc, spot/twaalgos/remfin.cc, spot/twaalgos/remprop.cc, spot/twaalgos/sccinfo.cc, spot/twaalgos/se05.cc, spot/twaalgos/sepsets.cc, spot/twaalgos/simulation.cc, spot/twaalgos/split.cc, spot/twaalgos/strength.cc, spot/twaalgos/stutter.cc, spot/twaalgos/synthesis.cc, spot/twaalgos/tau03.cc, spot/twaalgos/tau03opt.cc, spot/twaalgos/translate.cc, spot/twacube/cube.cc: Remove useless includes. --- spot/gen/automata.cc | 1 - spot/ltsmin/ltsmin.cc | 5 ----- spot/misc/bitvect.cc | 1 - spot/misc/intvcomp.cc | 1 - spot/misc/satsolver.cc | 2 +- spot/misc/tmpfile.cc | 2 +- spot/priv/trim.cc | 1 - spot/priv/weight.cc | 1 - spot/ta/taexplicit.cc | 1 - spot/ta/tgtaexplicit.cc | 2 -- spot/ta/tgtaproduct.cc | 3 --- spot/taalgos/emptinessta.cc | 2 -- spot/taalgos/minimize.cc | 1 - spot/taalgos/reachiter.cc | 1 - spot/taalgos/statessetbuilder.cc | 1 - spot/taalgos/tgba2ta.cc | 3 --- spot/tl/apcollect.cc | 4 +--- spot/tl/contain.cc | 2 -- spot/tl/exclusive.cc | 1 - spot/tl/formula.cc | 1 - spot/tl/mark.cc | 2 -- spot/tl/randomltl.cc | 3 --- spot/tl/relabel.cc | 1 - spot/tl/remove_x.cc | 1 - spot/twa/acc.cc | 1 - spot/twa/bdddict.cc | 2 -- spot/twa/taatgba.cc | 2 -- spot/twa/twagraph.cc | 1 - spot/twaalgos/aiger.cc | 1 - spot/twaalgos/alternation.cc | 2 +- spot/twaalgos/canonicalize.cc | 1 - spot/twaalgos/cobuchi.cc | 2 +- spot/twaalgos/complement.cc | 1 - spot/twaalgos/compsusp.cc | 4 +--- spot/twaalgos/dbranch.cc | 2 -- spot/twaalgos/degen.cc | 6 +----- spot/twaalgos/determinize.cc | 2 -- spot/twaalgos/dot.cc | 2 -- spot/twaalgos/dtbasat.cc | 2 -- spot/twaalgos/dtwasat.cc | 2 -- spot/twaalgos/emptiness.cc | 1 - spot/twaalgos/forq_contains.cc | 4 ---- spot/twaalgos/game.cc | 1 - spot/twaalgos/genem.cc | 1 - spot/twaalgos/gv04.cc | 1 - spot/twaalgos/hoa.cc | 1 - spot/twaalgos/isunamb.cc | 2 -- spot/twaalgos/isweakscc.cc | 1 - spot/twaalgos/lbtt.cc | 12 ++++-------- spot/twaalgos/ltl2tgba_fm.cc | 2 -- spot/twaalgos/magic.cc | 1 - spot/twaalgos/mealy_machine.cc | 3 --- spot/twaalgos/minimize.cc | 6 +----- spot/twaalgos/neverclaim.cc | 2 -- spot/twaalgos/parity.cc | 4 ---- spot/twaalgos/powerset.cc | 4 ---- spot/twaalgos/product.cc | 1 - spot/twaalgos/randomgraph.cc | 3 --- spot/twaalgos/randomize.cc | 1 - spot/twaalgos/relabel.cc | 3 --- spot/twaalgos/remfin.cc | 2 -- spot/twaalgos/remprop.cc | 1 - spot/twaalgos/sccinfo.cc | 3 --- spot/twaalgos/se05.cc | 1 - spot/twaalgos/sepsets.cc | 1 - spot/twaalgos/simulation.cc | 2 -- spot/twaalgos/split.cc | 4 ---- spot/twaalgos/strength.cc | 1 - spot/twaalgos/stutter.cc | 5 ----- spot/twaalgos/synthesis.cc | 5 +---- spot/twaalgos/tau03.cc | 2 -- spot/twaalgos/tau03opt.cc | 1 - spot/twaalgos/translate.cc | 1 - spot/twacube/cube.cc | 1 - 74 files changed, 13 insertions(+), 147 deletions(-) diff --git a/spot/gen/automata.cc b/spot/gen/automata.cc index 2e6c49458..a7f858a64 100644 --- a/spot/gen/automata.cc +++ b/spot/gen/automata.cc @@ -19,7 +19,6 @@ #include "config.h" #include #include -#include #include namespace spot diff --git a/spot/ltsmin/ltsmin.cc b/spot/ltsmin/ltsmin.cc index 27e869f46..145d91845 100644 --- a/spot/ltsmin/ltsmin.cc +++ b/spot/ltsmin/ltsmin.cc @@ -17,13 +17,10 @@ // along with this program. If not, see . #include "config.h" -#include #include #include #include #include -#include -#include #include #include @@ -32,9 +29,7 @@ #include #include -#include #include -#include using namespace std::string_literals; diff --git a/spot/misc/bitvect.cc b/spot/misc/bitvect.cc index 90eb2a55f..69de2d76f 100644 --- a/spot/misc/bitvect.cc +++ b/spot/misc/bitvect.cc @@ -25,7 +25,6 @@ #include #include #include -#include namespace spot { diff --git a/spot/misc/intvcomp.cc b/spot/misc/intvcomp.cc index b400d230c..c4d60619a 100644 --- a/spot/misc/intvcomp.cc +++ b/spot/misc/intvcomp.cc @@ -21,7 +21,6 @@ #include #include #include -#include namespace spot { diff --git a/spot/misc/satsolver.cc b/spot/misc/satsolver.cc index 9180b7f7c..fa5075f4f 100644 --- a/spot/misc/satsolver.cc +++ b/spot/misc/satsolver.cc @@ -26,7 +26,7 @@ #include #include #include -#include +#include // could be our replacement WIFEXITED from lib/ namespace spot { diff --git a/spot/misc/tmpfile.cc b/spot/misc/tmpfile.cc index 154ee603c..e68cde7da 100644 --- a/spot/misc/tmpfile.cc +++ b/spot/misc/tmpfile.cc @@ -20,7 +20,7 @@ #include #include #include -#include +#include // unlink #include using namespace std::string_literals; diff --git a/spot/priv/trim.cc b/spot/priv/trim.cc index 3fb449b86..db162dde2 100644 --- a/spot/priv/trim.cc +++ b/spot/priv/trim.cc @@ -20,7 +20,6 @@ #include #include #include -#include namespace spot { diff --git a/spot/priv/weight.cc b/spot/priv/weight.cc index 7a6164204..786ecd5c8 100644 --- a/spot/priv/weight.cc +++ b/spot/priv/weight.cc @@ -17,7 +17,6 @@ // along with this program. If not, see . #include "config.h" -#include #include #include diff --git a/spot/ta/taexplicit.cc b/spot/ta/taexplicit.cc index 17a7b6038..6a842ad1f 100644 --- a/spot/ta/taexplicit.cc +++ b/spot/ta/taexplicit.cc @@ -27,7 +27,6 @@ #endif #include -#include #include #include diff --git a/spot/ta/tgtaexplicit.cc b/spot/ta/tgtaexplicit.cc index 14c8f0b14..74d0ab107 100644 --- a/spot/ta/tgtaexplicit.cc +++ b/spot/ta/tgtaexplicit.cc @@ -18,8 +18,6 @@ #include "config.h" #include -#include -#include namespace spot { diff --git a/spot/ta/tgtaproduct.cc b/spot/ta/tgtaproduct.cc index 81fbef2db..591a1c97b 100644 --- a/spot/ta/tgtaproduct.cc +++ b/spot/ta/tgtaproduct.cc @@ -28,9 +28,6 @@ #endif #include -#include -#include -#include #include namespace spot diff --git a/spot/taalgos/emptinessta.cc b/spot/taalgos/emptinessta.cc index e91852388..ba89b3e15 100644 --- a/spot/taalgos/emptinessta.cc +++ b/spot/taalgos/emptinessta.cc @@ -27,9 +27,7 @@ #endif #include -#include #include -#include namespace spot { diff --git a/spot/taalgos/minimize.cc b/spot/taalgos/minimize.cc index 28749ecdb..bda3a60f5 100644 --- a/spot/taalgos/minimize.cc +++ b/spot/taalgos/minimize.cc @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/spot/taalgos/reachiter.cc b/spot/taalgos/reachiter.cc index b57f99a0d..b21d7b0ad 100644 --- a/spot/taalgos/reachiter.cc +++ b/spot/taalgos/reachiter.cc @@ -20,7 +20,6 @@ #include #include -#include using namespace std; namespace spot diff --git a/spot/taalgos/statessetbuilder.cc b/spot/taalgos/statessetbuilder.cc index 563a96a92..399f6af41 100644 --- a/spot/taalgos/statessetbuilder.cc +++ b/spot/taalgos/statessetbuilder.cc @@ -17,7 +17,6 @@ // along with this program. If not, see . #include "config.h" -#include #include #include #include diff --git a/spot/taalgos/tgba2ta.cc b/spot/taalgos/tgba2ta.cc index 096080d6f..40e4b92cc 100644 --- a/spot/taalgos/tgba2ta.cc +++ b/spot/taalgos/tgba2ta.cc @@ -26,12 +26,9 @@ #define trace while (0) std::clog #endif -#include #include -#include #include #include -#include #include using namespace std; diff --git a/spot/tl/apcollect.cc b/spot/tl/apcollect.cc index 8e6cdefea..61a570f3d 100644 --- a/spot/tl/apcollect.cc +++ b/spot/tl/apcollect.cc @@ -19,12 +19,10 @@ #include "config.h" #include #include -#include #include #include -#include +#include #include -#include #include #include diff --git a/spot/tl/contain.cc b/spot/tl/contain.cc index 6782f6211..3df0134db 100644 --- a/spot/tl/contain.cc +++ b/spot/tl/contain.cc @@ -18,11 +18,9 @@ #include "config.h" #include -#include #include #include #include -#include #include #include diff --git a/spot/tl/exclusive.cc b/spot/tl/exclusive.cc index 2cc22af3e..4a284d139 100644 --- a/spot/tl/exclusive.cc +++ b/spot/tl/exclusive.cc @@ -19,7 +19,6 @@ #include "config.h" #include #include -#include #include #include diff --git a/spot/tl/formula.cc b/spot/tl/formula.cc index a1e67b475..3c5afc8d1 100644 --- a/spot/tl/formula.cc +++ b/spot/tl/formula.cc @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/spot/tl/mark.cc b/spot/tl/mark.cc index b20d367b9..7bff532c7 100644 --- a/spot/tl/mark.cc +++ b/spot/tl/mark.cc @@ -18,8 +18,6 @@ #include "config.h" #include -#include -#include #include #include diff --git a/spot/tl/randomltl.cc b/spot/tl/randomltl.cc index 21e6b61b3..9aa604ee2 100644 --- a/spot/tl/randomltl.cc +++ b/spot/tl/randomltl.cc @@ -18,14 +18,11 @@ #include "config.h" #include -#include #include #include #include #include #include -#include -#include namespace spot { diff --git a/spot/tl/relabel.cc b/spot/tl/relabel.cc index 1a376f11b..385445ab7 100644 --- a/spot/tl/relabel.cc +++ b/spot/tl/relabel.cc @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include diff --git a/spot/tl/remove_x.cc b/spot/tl/remove_x.cc index 06f48179c..9a9c71127 100644 --- a/spot/tl/remove_x.cc +++ b/spot/tl/remove_x.cc @@ -17,7 +17,6 @@ // along with this program. If not, see . #include "config.h" -#include #include #include diff --git a/spot/twa/acc.cc b/spot/twa/acc.cc index a06e784a2..fb7373ead 100644 --- a/spot/twa/acc.cc +++ b/spot/twa/acc.cc @@ -20,7 +20,6 @@ #include "config.h" #include #include -#include #include #include #include diff --git a/spot/twa/bdddict.cc b/spot/twa/bdddict.cc index e849ede7a..abb0704a5 100644 --- a/spot/twa/bdddict.cc +++ b/spot/twa/bdddict.cc @@ -18,11 +18,9 @@ #include "config.h" #include -#include #include #include #include -#include #include "spot/priv/bddalloc.hh" #include diff --git a/spot/twa/taatgba.cc b/spot/twa/taatgba.cc index 95ddc85ee..ccd28ede9 100644 --- a/spot/twa/taatgba.cc +++ b/spot/twa/taatgba.cc @@ -17,10 +17,8 @@ // along with this program. If not, see . #include "config.h" -#include #include #include -#include #include #include #include diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 0fc9aa292..9cf0a00ff 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index 8f3bfdcc7..c5a2f8961 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -29,7 +29,6 @@ #include #include #include -#include #include #include diff --git a/spot/twaalgos/alternation.cc b/spot/twaalgos/alternation.cc index b247a4e57..88ca240eb 100644 --- a/spot/twaalgos/alternation.cc +++ b/spot/twaalgos/alternation.cc @@ -20,8 +20,8 @@ #include #include #include +#include #include -#include namespace spot { diff --git a/spot/twaalgos/canonicalize.cc b/spot/twaalgos/canonicalize.cc index 3053445a4..27a5134d8 100644 --- a/spot/twaalgos/canonicalize.cc +++ b/spot/twaalgos/canonicalize.cc @@ -19,7 +19,6 @@ #include "config.h" #include #include -#include #include namespace diff --git a/spot/twaalgos/cobuchi.cc b/spot/twaalgos/cobuchi.cc index 6eacdb489..986c982fb 100644 --- a/spot/twaalgos/cobuchi.cc +++ b/spot/twaalgos/cobuchi.cc @@ -21,7 +21,7 @@ #include #include -#include +#include #include #include #include diff --git a/spot/twaalgos/complement.cc b/spot/twaalgos/complement.cc index 728b764d5..0a7f0cc16 100644 --- a/spot/twaalgos/complement.cc +++ b/spot/twaalgos/complement.cc @@ -18,7 +18,6 @@ #include "config.h" #include -#include #include #include #include diff --git a/spot/twaalgos/compsusp.cc b/spot/twaalgos/compsusp.cc index 859602b3f..e738b7060 100644 --- a/spot/twaalgos/compsusp.cc +++ b/spot/twaalgos/compsusp.cc @@ -24,11 +24,9 @@ #include #include #include -#include #include -#include +#include #include -#include namespace spot { diff --git a/spot/twaalgos/dbranch.cc b/spot/twaalgos/dbranch.cc index d4ba24db0..3e46c56c3 100644 --- a/spot/twaalgos/dbranch.cc +++ b/spot/twaalgos/dbranch.cc @@ -19,9 +19,7 @@ #include "config.h" #include -#include #include -#include #include #include diff --git a/spot/twaalgos/degen.cc b/spot/twaalgos/degen.cc index 6e718459b..fc737e3f7 100644 --- a/spot/twaalgos/degen.cc +++ b/spot/twaalgos/degen.cc @@ -19,16 +19,12 @@ #include "config.h" #include #include -#include #include +#include #include #include #include -#include #include -#include -#include -#include //#define DEGEN_DEBUG diff --git a/spot/twaalgos/determinize.cc b/spot/twaalgos/determinize.cc index c99933467..a364ffa48 100644 --- a/spot/twaalgos/determinize.cc +++ b/spot/twaalgos/determinize.cc @@ -21,10 +21,8 @@ #include #include #include -#include #include #include - #include #include #include diff --git a/spot/twaalgos/dot.cc b/spot/twaalgos/dot.cc index fd76e173f..2b5e68d01 100644 --- a/spot/twaalgos/dot.cc +++ b/spot/twaalgos/dot.cc @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -32,7 +31,6 @@ #include #include #include -#include #include using namespace std::string_literals; diff --git a/spot/twaalgos/dtbasat.cc b/spot/twaalgos/dtbasat.cc index bc556d264..90319bd20 100644 --- a/spot/twaalgos/dtbasat.cc +++ b/spot/twaalgos/dtbasat.cc @@ -17,8 +17,6 @@ // along with this program. If not, see . #include "config.h" -#include -#include #include #include #include diff --git a/spot/twaalgos/dtwasat.cc b/spot/twaalgos/dtwasat.cc index 9aee79d57..64dfbd5f6 100644 --- a/spot/twaalgos/dtwasat.cc +++ b/spot/twaalgos/dtwasat.cc @@ -17,8 +17,6 @@ // along with this program. If not, see . #include "config.h" -#include -#include #include #include #include diff --git a/spot/twaalgos/emptiness.cc b/spot/twaalgos/emptiness.cc index d3f05ca38..10cc89553 100644 --- a/spot/twaalgos/emptiness.cc +++ b/spot/twaalgos/emptiness.cc @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include diff --git a/spot/twaalgos/forq_contains.cc b/spot/twaalgos/forq_contains.cc index 1b6b54f01..106553160 100644 --- a/spot/twaalgos/forq_contains.cc +++ b/spot/twaalgos/forq_contains.cc @@ -19,9 +19,6 @@ #include "config.h" #include #include - -#include -#include #include #include #include @@ -34,7 +31,6 @@ #include #include #include -#include #include #include diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index 4850e07b2..3c7758161 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -20,7 +20,6 @@ #include -#include #include #include #include diff --git a/spot/twaalgos/genem.cc b/spot/twaalgos/genem.cc index 63ee7ce24..6e26d43fe 100644 --- a/spot/twaalgos/genem.cc +++ b/spot/twaalgos/genem.cc @@ -18,7 +18,6 @@ #include "config.h" #include -#include namespace spot { diff --git a/spot/twaalgos/gv04.cc b/spot/twaalgos/gv04.cc index cdaebcc4a..4440437c3 100644 --- a/spot/twaalgos/gv04.cc +++ b/spot/twaalgos/gv04.cc @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/spot/twaalgos/hoa.cc b/spot/twaalgos/hoa.cc index 902af0007..644d8f166 100644 --- a/spot/twaalgos/hoa.cc +++ b/spot/twaalgos/hoa.cc @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include diff --git a/spot/twaalgos/isunamb.cc b/spot/twaalgos/isunamb.cc index 22fc0cdd8..4a9fc45d0 100644 --- a/spot/twaalgos/isunamb.cc +++ b/spot/twaalgos/isunamb.cc @@ -21,8 +21,6 @@ #include #include #include -#include -#include namespace spot { diff --git a/spot/twaalgos/isweakscc.cc b/spot/twaalgos/isweakscc.cc index 6f4502e30..80fc09c26 100644 --- a/spot/twaalgos/isweakscc.cc +++ b/spot/twaalgos/isweakscc.cc @@ -17,7 +17,6 @@ // along with this program. If not, see . #include "config.h" -#include #include #include diff --git a/spot/twaalgos/lbtt.cc b/spot/twaalgos/lbtt.cc index cb0d1abe5..9d8e58670 100644 --- a/spot/twaalgos/lbtt.cc +++ b/spot/twaalgos/lbtt.cc @@ -17,15 +17,11 @@ // along with this program. If not, see . #include "config.h" -#include -#include -#include -#include -#include -#include -#include -#include "spot/priv/accmap.hh" #include +#include +#include +#include +#include using namespace std::string_literals; diff --git a/spot/twaalgos/ltl2tgba_fm.cc b/spot/twaalgos/ltl2tgba_fm.cc index 3ce8f86db..24c797f89 100644 --- a/spot/twaalgos/ltl2tgba_fm.cc +++ b/spot/twaalgos/ltl2tgba_fm.cc @@ -17,10 +17,8 @@ // along with this program. If not, see . #include "config.h" -#include #include #include -#include #include #include #include diff --git a/spot/twaalgos/magic.cc b/spot/twaalgos/magic.cc index 4fb3d5b07..635bba286 100644 --- a/spot/twaalgos/magic.cc +++ b/spot/twaalgos/magic.cc @@ -28,7 +28,6 @@ #include #include -#include #include #include #include diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index 89f44cc6a..512848edb 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -33,9 +32,7 @@ #include #include #include -#include #include -#include #include #include diff --git a/spot/twaalgos/minimize.cc b/spot/twaalgos/minimize.cc index 4628df900..faaca1cae 100644 --- a/spot/twaalgos/minimize.cc +++ b/spot/twaalgos/minimize.cc @@ -25,17 +25,14 @@ #endif #include "config.h" -#include -#include #include #include #include +#include #include #include -#include #include #include -#include #include #include #include @@ -44,7 +41,6 @@ #include #include #include -#include namespace spot { diff --git a/spot/twaalgos/neverclaim.cc b/spot/twaalgos/neverclaim.cc index 59cb152a0..5afb0a0d4 100644 --- a/spot/twaalgos/neverclaim.cc +++ b/spot/twaalgos/neverclaim.cc @@ -18,9 +18,7 @@ #include "config.h" #include -#include #include -#include #include #include #include diff --git a/spot/twaalgos/parity.cc b/spot/twaalgos/parity.cc index a54942400..f3765305f 100644 --- a/spot/twaalgos/parity.cc +++ b/spot/twaalgos/parity.cc @@ -19,14 +19,10 @@ #include "config.h" #include #include -#include -#include #include #include #include #include -#include -#include namespace spot { diff --git a/spot/twaalgos/powerset.cc b/spot/twaalgos/powerset.cc index c192d72fb..044ea21b9 100644 --- a/spot/twaalgos/powerset.cc +++ b/spot/twaalgos/powerset.cc @@ -21,16 +21,12 @@ #include #include #include -#include #include #include -#include #include -#include #include #include #include -#include #include #include diff --git a/spot/twaalgos/product.cc b/spot/twaalgos/product.cc index e494da1bb..ccb6085d5 100644 --- a/spot/twaalgos/product.cc +++ b/spot/twaalgos/product.cc @@ -24,7 +24,6 @@ #include #include #include -#include using namespace std::string_literals; diff --git a/spot/twaalgos/randomgraph.cc b/spot/twaalgos/randomgraph.cc index b66b6df01..fe0f1edb9 100644 --- a/spot/twaalgos/randomgraph.cc +++ b/spot/twaalgos/randomgraph.cc @@ -20,9 +20,6 @@ #include #include #include -#include -#include -#include #include #include #include diff --git a/spot/twaalgos/randomize.cc b/spot/twaalgos/randomize.cc index 32765ecbe..19c32bea0 100644 --- a/spot/twaalgos/randomize.cc +++ b/spot/twaalgos/randomize.cc @@ -19,7 +19,6 @@ #include "config.h" #include #include -#include #include #include diff --git a/spot/twaalgos/relabel.cc b/spot/twaalgos/relabel.cc index 66c566846..594b8bdeb 100644 --- a/spot/twaalgos/relabel.cc +++ b/spot/twaalgos/relabel.cc @@ -20,12 +20,9 @@ #include #include #include - #include - #include #include -#include namespace spot diff --git a/spot/twaalgos/remfin.cc b/spot/twaalgos/remfin.cc index 6274c8255..435fdfa6c 100644 --- a/spot/twaalgos/remfin.cc +++ b/spot/twaalgos/remfin.cc @@ -22,8 +22,6 @@ #include #include #include -#include -#include #include #include diff --git a/spot/twaalgos/remprop.cc b/spot/twaalgos/remprop.cc index 380ba1a71..14570f148 100644 --- a/spot/twaalgos/remprop.cc +++ b/spot/twaalgos/remprop.cc @@ -19,7 +19,6 @@ #include "config.h" #include #include -#include #include #include diff --git a/spot/twaalgos/sccinfo.cc b/spot/twaalgos/sccinfo.cc index 947c7cd94..e8d2258b3 100644 --- a/spot/twaalgos/sccinfo.cc +++ b/spot/twaalgos/sccinfo.cc @@ -21,11 +21,8 @@ #include #include #include -#include -#include #include #include -#include namespace spot { diff --git a/spot/twaalgos/se05.cc b/spot/twaalgos/se05.cc index 73558fcf7..a4eb21af5 100644 --- a/spot/twaalgos/se05.cc +++ b/spot/twaalgos/se05.cc @@ -28,7 +28,6 @@ #include #include -#include #include #include #include diff --git a/spot/twaalgos/sepsets.cc b/spot/twaalgos/sepsets.cc index 264a9c128..8aa54d42b 100644 --- a/spot/twaalgos/sepsets.cc +++ b/spot/twaalgos/sepsets.cc @@ -18,7 +18,6 @@ #include "config.h" #include -#include namespace spot diff --git a/spot/twaalgos/simulation.cc b/spot/twaalgos/simulation.cc index bdcb21846..1beb41b75 100644 --- a/spot/twaalgos/simulation.cc +++ b/spot/twaalgos/simulation.cc @@ -20,8 +20,6 @@ #include #include #include -#include -#include #include #include #include diff --git a/spot/twaalgos/split.cc b/spot/twaalgos/split.cc index 11146837d..d26877373 100644 --- a/spot/twaalgos/split.cc +++ b/spot/twaalgos/split.cc @@ -18,12 +18,8 @@ #include "config.h" #include -#include #include #include - -#include -#include #include namespace std diff --git a/spot/twaalgos/strength.cc b/spot/twaalgos/strength.cc index cd89c6892..0ba9d0637 100644 --- a/spot/twaalgos/strength.cc +++ b/spot/twaalgos/strength.cc @@ -18,7 +18,6 @@ #include "config.h" #include -#include #include #include #include diff --git a/spot/twaalgos/stutter.cc b/spot/twaalgos/stutter.cc index 37fb0690f..622c8edeb 100644 --- a/spot/twaalgos/stutter.cc +++ b/spot/twaalgos/stutter.cc @@ -19,25 +19,20 @@ #include "config.h" #include #include -#include #include -#include #include #include #include #include #include #include -#include #include #include #include #include #include #include -#include #include -#include namespace spot { diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 3f5bcf505..fef412e09 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -29,14 +29,11 @@ #include #include #include -#include -#include #include #include - - #include #include +#include // Helper function/structures for split_2step namespace{ diff --git a/spot/twaalgos/tau03.cc b/spot/twaalgos/tau03.cc index ec75d7bb6..307370dd0 100644 --- a/spot/twaalgos/tau03.cc +++ b/spot/twaalgos/tau03.cc @@ -30,8 +30,6 @@ #endif #include -#include -#include #include #include #include diff --git a/spot/twaalgos/tau03opt.cc b/spot/twaalgos/tau03opt.cc index a518160fb..0e7b0f1a4 100644 --- a/spot/twaalgos/tau03opt.cc +++ b/spot/twaalgos/tau03opt.cc @@ -41,7 +41,6 @@ #include #include #include -#include #include #include #include diff --git a/spot/twaalgos/translate.cc b/spot/twaalgos/translate.cc index 99c15ca46..1a5125f91 100644 --- a/spot/twaalgos/translate.cc +++ b/spot/twaalgos/translate.cc @@ -27,7 +27,6 @@ #include #include #include -#include #include namespace spot diff --git a/spot/twacube/cube.cc b/spot/twacube/cube.cc index b901c94a5..0f3531fb7 100644 --- a/spot/twacube/cube.cc +++ b/spot/twacube/cube.cc @@ -20,7 +20,6 @@ #include #include #include -#include namespace spot { From 444d4f773d25bef3738adda8a56ec66759580efe Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sun, 3 Dec 2023 22:26:12 +0100 Subject: [PATCH 381/606] twa: fix issue #555 better MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reported by Dávid Smolka. * spot/twa/twagraph.cc (defrag_states): Also ignore edges with erased source when updating highlight-edges. * tests/python/parsetgba.py: Add Dávid's test case. --- spot/twa/twagraph.cc | 6 ++++-- tests/python/parsetgba.py | 43 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 2 deletions(-) diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 9cf0a00ff..b83263931 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -1287,7 +1287,7 @@ namespace spot ("highlight-edges")) { // Unfortunately, the underlying graph, who might remove some - // edges, know nothing about named properties. So we have to + // edges, knows nothing about named properties. So we have to // predict the indices of the edges after // graph::defrag_states() will run. This might break if // graph::defrag_states() is changed. @@ -1297,7 +1297,9 @@ namespace spot unsigned edgeidx = 1; for (unsigned e = 1; e < es; ++e) { - if (is_dead_edge(e) || newst[ev[e].dst] == -1U) + if (is_dead_edge(e) + || newst[ev[e].dst] == -1U + || newst[ev[e].src] == -1U) newedges[e] = -1U; else newedges[e] = edgeidx++; diff --git a/tests/python/parsetgba.py b/tests/python/parsetgba.py index 3964ed28d..feeaad0dd 100755 --- a/tests/python/parsetgba.py +++ b/tests/python/parsetgba.py @@ -146,3 +146,46 @@ State: 4 {0} [1] 4 [2] 4 --END--""") + +# Issue #555 again. +a4 = spot.automaton("""HOA: v1.1 States: 5 Start: 2 AP: 3 "p36" "p38" +"p37" acc-name: Buchi Acceptance: 1 Inf(0) properties: trans-labels +explicit-labels state-acc !complete properties: !deterministic +exist-branch spot.highlight.edges: 1 1 3 1 8 1 12 1 --BODY-- State: 0 +[t] 1 State: 1 {0} [!0] 1 State: 2 [!0 | !1 | 2] 0 [t] 3 State: 3 [2] +1 [!2] 4 [2] 1 [!2] 4 State: 4 [!0&2] 1 [!0&!2] 4 [!0&2] 1 [!0&!2] 4 +--END--""") +oi = a4.out_iteraser(2) +while oi: + n = a4.edge_number(oi.current()) + if n == 3: + oi.erase() + else: + oi.advance() +a4.purge_dead_states() +tc.assertEqual(a4.to_str("hoa", "1.1"), +"""HOA: v1.1 +States: 4 +Start: 1 +AP: 3 "p36" "p38" "p37" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels state-acc !complete +properties: !deterministic exist-branch +spot.highlight.edges: 6 1 10 1 +--BODY-- +State: 0 {0} +[!0] 0 +State: 1 +[t] 2 +State: 2 +[2] 0 +[!2] 3 +[2] 0 +[!2] 3 +State: 3 +[!0&2] 0 +[!0&!2] 3 +[!0&2] 0 +[!0&!2] 3 +--END--""") From ba86dc6b180b1876b6decd5f5fd0c1c2b46e2fe9 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 4 Dec 2023 17:59:09 +0100 Subject: [PATCH 382/606] twa: guard against highlighting of non-existing edges and states MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For issue #556, reported by Dávid Smolka. Not sure if this is a complete fix yet, but that's at least part of the issue. * spot/twa/twagraph.cc (defrag_states): Skip those unexisting edges and states while remaping highlight-states and highlight-edges. * spot/parseaut/parseaut.yy: Likewise for dropped edges. * tests/python/parsetgba.py: Augment test case. --- spot/parseaut/parseaut.yy | 5 ++++- spot/twa/twagraph.cc | 12 +++++++++++- tests/python/parsetgba.py | 12 +++++++----- 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/spot/parseaut/parseaut.yy b/spot/parseaut/parseaut.yy index 865bf6afd..6410d00de 100644 --- a/spot/parseaut/parseaut.yy +++ b/spot/parseaut/parseaut.yy @@ -2943,9 +2943,12 @@ namespace spot { // Update the highlight_edges map to deal with removed/added // edges. + unsigned ems = r.edge_map.size(); std::map remap; for (auto [edgnum, color]: *r.highlight_edges) - if (edgnum > 0) /* not expected, but can't trust input data */ + /* edge numbers outside of the actual number of edges read are + not expected, but we can't trust input data */ + if (SPOT_LIKELY(edgnum > 0 && edgnum <= ems)) if (unsigned newnum = r.edge_map[edgnum - 1]; newnum > 0) remap[newnum] = color; std::swap(remap, *r.highlight_edges); diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index b83263931..eb74dd496 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -1274,9 +1274,15 @@ namespace spot if (auto hs = get_named_prop> ("highlight-states")) { + unsigned ns = newst.size(); std::map hs2; for (auto p: *hs) { + // Let's just ignore unexisting states. Raising an + // exception here would leave the automaton in a strange + // state. + if (SPOT_UNLIKELY(p.first >= ns)) + continue; unsigned dst = newst[p.first]; if (dst != -1U) hs2[dst] = p.second; @@ -1306,7 +1312,11 @@ namespace spot } std::map he2; for (auto [e, c]: *he) - if (newedges[e] != -1U) + // Let's just ignore unexisting edges. Raising an exception + // here would leave the automaton in a strange state. + if (SPOT_UNLIKELY(e > es)) + continue; + else if (newedges[e] != -1U) he2.emplace(newedges[e], c); std::swap(*he, he2); } diff --git a/tests/python/parsetgba.py b/tests/python/parsetgba.py index feeaad0dd..493d2341f 100755 --- a/tests/python/parsetgba.py +++ b/tests/python/parsetgba.py @@ -147,15 +147,17 @@ State: 4 {0} [2] 4 --END--""") -# Issue #555 again. +# Issue #555 again. The pairs 30 1 for states and edges are for issue #556 a4 = spot.automaton("""HOA: v1.1 States: 5 Start: 2 AP: 3 "p36" "p38" "p37" acc-name: Buchi Acceptance: 1 Inf(0) properties: trans-labels explicit-labels state-acc !complete properties: !deterministic -exist-branch spot.highlight.edges: 1 1 3 1 8 1 12 1 --BODY-- State: 0 -[t] 1 State: 1 {0} [!0] 1 State: 2 [!0 | !1 | 2] 0 [t] 3 State: 3 [2] -1 [!2] 4 [2] 1 [!2] 4 State: 4 [!0&2] 1 [!0&!2] 4 [!0&2] 1 [!0&!2] 4 ---END--""") +exist-branch spot.highlight.edges: 1 1 3 1 8 1 12 1 30 1 +spot.highlight.states: 30 1 --BODY-- State: 0 [t] 1 State: 1 {0} [!0] +1 State: 2 [!0 | !1 | 2] 0 [t] 3 State: 3 [2] 1 [!2] 4 [2] 1 [!2] 4 +State: 4 [!0&2] 1 [!0&!2] 4 [!0&2] 1 [!0&!2] 4 --END--""") oi = a4.out_iteraser(2) +a4.highlight_edge(40, 10); +a4.highlight_state(40, 10); while oi: n = a4.edge_number(oi.current()) if n == 3: From 3c638f2a88cf7b57a8eb7a17587ea6b749b3b798 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 6 Dec 2023 16:38:56 +0100 Subject: [PATCH 383/606] python: add get_highlight_state and get_highlight_edge * python/spot/impl.i: Here. * tests/python/highlighting.ipynb: Test it. * NEWS: Mention it. --- NEWS | 3 + python/spot/impl.i | 32 ++++++++ tests/python/highlighting.ipynb | 131 ++++++++++++++++++++------------ 3 files changed, 119 insertions(+), 47 deletions(-) diff --git a/NEWS b/NEWS index a9cb0a830..e3b2a9329 100644 --- a/NEWS +++ b/NEWS @@ -131,6 +131,9 @@ New in spot 2.11.6.dev (not yet released) given state or edge. Use aut.remove_highlight_states() or aut.remove_highlight_edges() to remove all colors. (Issue #554.) + - Calling aut.get_hight_state(s) or get.highlight_edge(e) will + return the highlight color of that state/edge or None. + Bugs fixed: - tgba_determinize()'s use_simulation option would cause it to diff --git a/python/spot/impl.i b/python/spot/impl.i index e1dd7f296..680e90149 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -594,6 +594,14 @@ namespace std { %} // Must occur before the twa declaration +%typemap(out) unsigned* spot::twa::get_highlight_state, + unsigned* spot::twa::get_highlight_edge %{ + if (!$1) + $result = SWIG_Py_Void(); + else + $result = swig::from(*$1); +%} + %typemap(out) SWIGTYPE* spot::twa::get_product_states %{ if (!$1) $result = SWIG_Py_Void(); @@ -1056,6 +1064,18 @@ static void* ptr_for_bdddict(PyObject* obj) return self; } + unsigned* get_highlight_state(unsigned state) + { + std::map* hs = + self->get_named_prop>("highlight-states"); + if (!hs) + return nullptr; + auto it = hs->find(state); + if (it == hs->end()) + return nullptr; + return &it->second; + } + twa* remove_highlight_states() { self->set_named_prop("highlight-states", nullptr); @@ -1084,6 +1104,18 @@ static void* ptr_for_bdddict(PyObject* obj) return self; } + unsigned* get_highlight_edge(unsigned edge) + { + std::map* he = + self->get_named_prop>("highlight-edges"); + if (!he) + return nullptr; + auto it = he->find(edge); + if (it == he->end()) + return nullptr; + return &it->second; + } + twa* remove_highlight_edges() { self->set_named_prop("highlight-edges", nullptr); diff --git a/tests/python/highlighting.ipynb b/tests/python/highlighting.ipynb index 6fe0856a1..13e29bb75 100644 --- a/tests/python/highlighting.ipynb +++ b/tests/python/highlighting.ipynb @@ -247,7 +247,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e410bd0> >" + " *' at 0x7fe2982e4de0> >" ] }, "execution_count": 4, @@ -359,7 +359,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e4110e0> >" + " *' at 0x7fe2982e57d0> >" ] }, "execution_count": 5, @@ -469,7 +469,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e410bd0> >" + " *' at 0x7fe2982e4de0> >" ] }, "execution_count": 6, @@ -549,6 +549,43 @@ "print(a.to_str('HOA', '1.1'))" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Querying highlights\n", + "\n", + "Use `get_highlight_state(s)` or `get_highlight_edge(e)` to retrieve the highlighting color of some state/edge. If no highlighting is present for this state/edge, `None` is returned." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "state 0: 0\n", + "state 1: 5\n", + "state 2: 5\n", + "edge 1: None\n", + "edge 2: 1\n", + "edge 3: None\n", + "edge 4: 1\n", + "edge 5: 1\n", + "edge 6: 2\n" + ] + } + ], + "source": [ + "for i in range(0, a.num_states()):\n", + " print(\"state {}: {}\".format(i, a.get_highlight_state(i)))\n", + "for i in range(1, a.num_edges() + 1):\n", + " print(\"edge {}: {}\".format(i, a.get_highlight_edge(i)))" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -565,7 +602,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "metadata": {}, "outputs": [ { @@ -654,10 +691,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e411e30> >" + " *' at 0x7fe2982e6a30> >" ] }, - "execution_count": 8, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -675,7 +712,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "metadata": { "scrolled": true }, @@ -766,10 +803,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e411c50> >" + " *' at 0x7fe2982e71e0> >" ] }, - "execution_count": 9, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -791,7 +828,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -931,10 +968,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e4123a0> >" + " *' at 0x7fe2982e6f10> >" ] }, - "execution_count": 10, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -945,7 +982,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -970,7 +1007,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ @@ -986,7 +1023,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -1126,10 +1163,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e4123a0> >" + " *' at 0x7fe2982e6f10> >" ] }, - "execution_count": 13, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -1149,7 +1186,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -1464,7 +1501,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e4121f0> >" + " *' at 0x7fe2982e6be0> >" ] }, "metadata": {}, @@ -1725,7 +1762,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e410f90> >" + " *' at 0x7fe2982e67c0> >" ] }, "metadata": {}, @@ -1908,7 +1945,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e410ff0> >" + " *' at 0x7fe2982e6610> >" ] }, "metadata": {}, @@ -1963,7 +2000,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -2025,7 +2062,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e413030> >" + " *' at 0x7fe2982e63a0> >" ] }, "metadata": {}, @@ -2080,7 +2117,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e412280> >" + " *' at 0x7fe2982e52c0> >" ] }, "metadata": {}, @@ -2095,7 +2132,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 17, "metadata": {}, "outputs": [ { @@ -2174,10 +2211,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e412310> >" + " *' at 0x7fe2982e66d0> >" ] }, - "execution_count": 16, + "execution_count": 17, "metadata": {}, "output_type": "execute_result" } @@ -2188,7 +2225,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 18, "metadata": {}, "outputs": [ { @@ -2211,7 +2248,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 19, "metadata": {}, "outputs": [], "source": [ @@ -2224,7 +2261,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 20, "metadata": {}, "outputs": [ { @@ -2303,7 +2340,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e412310> >" + " *' at 0x7fe2982e66d0> >" ] }, "metadata": {}, @@ -2368,7 +2405,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e413030> >" + " *' at 0x7fe2982e63a0> >" ] }, "metadata": {}, @@ -2423,7 +2460,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e412280> >" + " *' at 0x7fe2982e52c0> >" ] }, "metadata": {}, @@ -2443,7 +2480,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 21, "metadata": {}, "outputs": [ { @@ -2649,7 +2686,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e413c00> >" + " *' at 0x7fe2982e61c0> >" ] }, "metadata": {}, @@ -2734,7 +2771,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e4109c0> >" + " *' at 0x7fe2982e5fe0> >" ] }, "metadata": {}, @@ -2831,7 +2868,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e413bd0> >" + " *' at 0x7fe2982e78a0> >" ] }, "metadata": {}, @@ -2860,7 +2897,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 22, "metadata": {}, "outputs": [ { @@ -3000,10 +3037,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e412b50> >" + " *' at 0x7fe2982e7030> >" ] }, - "execution_count": 21, + "execution_count": 22, "metadata": {}, "output_type": "execute_result" } @@ -3028,7 +3065,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 23, "metadata": {}, "outputs": [ { @@ -3168,10 +3205,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e412b50> >" + " *' at 0x7fe2982e7030> >" ] }, - "execution_count": 22, + "execution_count": 23, "metadata": {}, "output_type": "execute_result" } @@ -3191,7 +3228,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 24, "metadata": {}, "outputs": [ { @@ -3331,7 +3368,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe86e412b50> >" + " *' at 0x7fe2982e7030> >" ] }, "metadata": {}, @@ -3565,7 +3602,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 25, "metadata": {}, "outputs": [ { @@ -3811,7 +3848,7 @@ "" ] }, - "execution_count": 24, + "execution_count": 25, "metadata": {}, "output_type": "execute_result" } From e8c2b27ad20342fcff89dc7c33ce18378be5b092 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 15 Dec 2023 11:31:55 +0100 Subject: [PATCH 384/606] * spot/tl/hierarchy.cc: Typo in comment. --- spot/tl/hierarchy.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spot/tl/hierarchy.cc b/spot/tl/hierarchy.cc index a94938009..3ab6f5930 100644 --- a/spot/tl/hierarchy.cc +++ b/spot/tl/hierarchy.cc @@ -293,7 +293,7 @@ namespace spot else return s ? 'S' : 'O'; } - // Not an obligation. Could by 'P', 'R', or 'T'. + // Not an obligation. Could be 'P', 'R', or 'T'. if (is_recurrence(f, aut)) return 'R'; if (is_persistence(f, aut)) From 983964d0376130f544d8566995724beda4692661 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sat, 16 Dec 2023 00:09:24 +0100 Subject: [PATCH 385/606] strength: generalize is_safety_automaton to any type of automata Reported by Samuel Judson. * spot/twaalgos/strength.cc (is_safety_automaton): Reimplement it. * spot/twaalgos/strength.hh (is_safety_automaton): Update documentation. * tests/python/safety.py: New file. * tests/Makefile.am: Add it. * NEWS: Mention this change. * THANKS: Add Samuel. --- NEWS | 6 +++++ THANKS | 1 + spot/twaalgos/strength.cc | 56 +++++++++++++++++++++++++++++++-------- spot/twaalgos/strength.hh | 22 +++++++++++---- tests/Makefile.am | 1 + tests/python/safety.py | 52 ++++++++++++++++++++++++++++++++++++ 6 files changed, 122 insertions(+), 16 deletions(-) create mode 100644 tests/python/safety.py diff --git a/NEWS b/NEWS index e3b2a9329..958ea1645 100644 --- a/NEWS +++ b/NEWS @@ -120,6 +120,12 @@ New in spot 2.11.6.dev (not yet released) - ltsmin's interface will now point to README.ltsmin in case an error is found while running divine or spins. + - spot::is_safety_automaton() was generalized to detect any + automaton for which the acceptance could be changed to "t" without + changing the language. In previous versions this function assumed + weak automata as input, but the documentation did not reflect + this. + Python: - The spot.automata() and spot.automaton() functions now accept a diff --git a/THANKS b/THANKS index 46e747d4e..4eb4a598c 100644 --- a/THANKS +++ b/THANKS @@ -56,6 +56,7 @@ Raven Beutner Reuben Rowe Roei Nahum Rüdiger Ehlers +Samuel Judson Shachar Itzhaky Shengping Shaw Shufang Zhu diff --git a/spot/twaalgos/strength.cc b/spot/twaalgos/strength.cc index 0ba9d0637..ff7a69266 100644 --- a/spot/twaalgos/strength.cc +++ b/spot/twaalgos/strength.cc @@ -23,6 +23,8 @@ #include #include #include +#include +#include using namespace std::string_literals; @@ -182,23 +184,55 @@ namespace spot { if (aut->acc().is_t()) return true; + if (!aut->is_existential()) + throw std::runtime_error + ("is_safety_automaton() does not support alternation"); - bool need_si = !si; - if (need_si) - si = new scc_info(aut); + std::unique_ptr localsi; + if (!si) + { + localsi = std::make_unique(aut); + si = localsi.get(); + } + si->determine_unknown_acceptance(); - bool res = true; - unsigned scount = si->scc_count(); - for (unsigned scc = 0; scc < scount; ++scc) - if (!si->is_trivial(scc) && si->is_rejecting_scc(scc)) + // a trim automaton without rejecting cycle is a safety automaton + bool has_rejecting_cycle = false; + + // first, look for rejecting SCCs. + unsigned scccount = si->scc_count(); + for (unsigned scc = 0; scc < scccount; ++scc) + if (si->is_useful_scc(scc) + && !si->is_trivial(scc) + && si->is_rejecting_scc(scc)) { - res = false; + has_rejecting_cycle = true; break; } + if (!has_rejecting_cycle && !aut->prop_inherently_weak().is_true()) + { + // maybe we have rejecting cycles inside accepting SCCs? + for (unsigned scc = 0; scc < scccount; ++scc) + if (si->is_useful_scc(scc) + && !si->is_trivial(scc) + && si->is_accepting_scc(scc) + && scc_has_rejecting_cycle(*si, scc)) + { + has_rejecting_cycle = true; + break; + } + } + if (!has_rejecting_cycle) + return true; - if (need_si) - delete si; - return res; + // If the automaton has a rejecting loop and is deterministic, it + // cannot be a safety automaton. + if (is_universal(aut)) + return false; + + twa_graph_ptr b = make_twa_graph(aut, twa::prop_set::all()); + strip_acceptance_here(b); + return spot::contains(aut, b); } diff --git a/spot/twaalgos/strength.hh b/spot/twaalgos/strength.hh index 15122518a..baaaefe99 100644 --- a/spot/twaalgos/strength.hh +++ b/spot/twaalgos/strength.hh @@ -104,12 +104,24 @@ namespace spot /// \brief Check whether an automaton is a safety automaton. /// - /// A safety automaton has only accepting SCCs (or trivial - /// SCCs). + /// An automaton is a safety automaton if its acceptance condition + /// can be changed to "true" without changing its language. /// - /// A minimized WDBA (as returned by a successful run of - /// minimize_obligation()) represents safety property if it is a - /// safety automaton. + /// The test performed by this function differs depending on + /// the nature of the input \a aut. + /// + /// If \a aut is an automaton with `t` acceptance, it is necessarily + /// a safety automaton. + /// + /// Else we check for the absence of rejecting cycle in the + /// useful part of the automaton. This absence is only a sufficient + /// condition in the non-deterministic case, because a rejecting + /// run might correspond to a word that is accepted by another run. + /// + /// If the previous test could not conclude, we build the automaton + /// B that is a copy of \a aut with acceptance set to true, and we + /// check that \a aut contains all words of B. This last test + /// requires complementing \a aut. /// /// \param aut the automaton to check /// diff --git a/tests/Makefile.am b/tests/Makefile.am index f6c303dd5..3609ec03b 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -446,6 +446,7 @@ TESTS_python = \ python/relabel.py \ python/remfin.py \ python/removeap.py \ + python/safety.py \ python/satmin.py \ python/sbacc.py \ python/sccfilter.py \ diff --git a/tests/python/safety.py b/tests/python/safety.py new file mode 100644 index 000000000..e6376ef4f --- /dev/null +++ b/tests/python/safety.py @@ -0,0 +1,52 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (C) by the Spot authors, see the AUTHORS file for details. +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import spot +from unittest import TestCase +tc = TestCase() + + +for f in ['Fb', 'GFa & GFb', + '(p0 W Fp0) R ((Gp1 & Fp1) | (F!p1 & G!p1)) & GFp0', + 'GFp0 xor GFp1']: + aut = spot.translate(f, 'BA') + tc.assertFalse(spot.is_safety_automaton(aut)) + + aut = spot.translate(f, 'BA') + tc.assertFalse(spot.is_safety_automaton(aut)) + + aut = spot.translate(f, 'deterministic', 'complete') + tc.assertFalse(spot.is_safety_automaton(aut)) + + aut = spot.translate(f, 'generic', 'sbacc') + tc.assertFalse(spot.is_safety_automaton(aut)) + +for f in ['Gb', 'Ga|Gb|Gc', 'Fr->(!p U r)', 'p1 M F(p1 U (Gp0 U X(0)))', + '((p1 U !p0) M !FXp1) W p0', 'p0 & ((!p1 | (p1 W X!p1)) M p1)', + '(p0 W Fp0) R ((Gp1 & Fp1) | (F!p1 & G!p1))']: + aut = spot.translate(f, 'BA') + tc.assertTrue(spot.is_safety_automaton(aut)) + + ba = spot.translate(f, 'BA', 'complete') + tc.assertTrue(spot.is_safety_automaton(aut)) + + ba = spot.translate(f, 'deterministic', 'complete') + tc.assertTrue(spot.is_safety_automaton(aut)) + + ba = spot.translate(f, 'generic', 'sbacc') + tc.assertTrue(spot.is_safety_automaton(aut)) From 55992b1ca2eb3e00ac498c9293d168a34ef34082 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sun, 17 Dec 2023 21:34:51 +0100 Subject: [PATCH 386/606] * spot/bricks/brick-assert: include . --- spot/bricks/brick-assert | 1 + 1 file changed, 1 insertion(+) diff --git a/spot/bricks/brick-assert b/spot/bricks/brick-assert index 72515de00..7a180ce27 100644 --- a/spot/bricks/brick-assert +++ b/spot/bricks/brick-assert @@ -23,6 +23,7 @@ #include #include #include +#include #ifndef TEST #define TEST(n) void n() From 69c8187330ce74ab2634c5d83692d3753636b308 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 18 Dec 2023 10:04:15 +0100 Subject: [PATCH 387/606] * spot/twaalgos/aiger.cc: Work around gcc snapshot diagnostics. --- spot/twaalgos/aiger.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index c5a2f8961..659dd281d 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -1331,6 +1331,7 @@ namespace spot std::deque todo; todo.push_back(0); + assert(n_max_states > 0); std::vector seen(n_max_states, false); seen[0] = true; From 9957aa1a3a43c0382ddff566683d7b8c87b1713f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sat, 13 Jan 2024 12:53:54 +0100 Subject: [PATCH 388/606] bump copyright to 2024 * bin/common_setup.cc, debian/copyright: Here. --- bin/common_setup.cc | 2 +- debian/copyright | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/common_setup.cc b/bin/common_setup.cc index e0636b802..1b23833df 100644 --- a/bin/common_setup.cc +++ b/bin/common_setup.cc @@ -35,7 +35,7 @@ display_version(FILE *stream, struct argp_state*) fputs(program_name, stream); fputs(" (" PACKAGE_NAME ") " PACKAGE_VERSION "\n\ \n\ -Copyright (C) 2023 by the Spot authors, see the AUTHORS File for details.\n\ +Copyright (C) 2024 by the Spot authors, see the AUTHORS File for details.\n\ License GPLv3+: \ GNU GPL version 3 or later .\n\ This is free software: you are free to change and redistribute it.\n\ diff --git a/debian/copyright b/debian/copyright index ae9290987..66fdb75c2 100644 --- a/debian/copyright +++ b/debian/copyright @@ -3,7 +3,7 @@ Upstream-Name: spot Source: http://www.lrde.epita.fr/dload/spot/ Files: * -Copyright: 2003-2023 the Spot authors +Copyright: 2003-2024 the Spot authors License: GPL-3+ Spot is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by From 690e5a213d5f29a505ffbc2594fdcd91daf5c32f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 24 Jan 2024 16:11:25 +0100 Subject: [PATCH 389/606] remove_alternation: option to return nullptr if too many sets needed * spot/twaalgos/alternation.hh, spot/twaalgos/alternation.cc: Add the new options. * spot/twaalgos/complement.cc, spot/twaalgos/minimize.cc: Use it. * tests/core/optba.test: Add a test case from Yann. * NEWS: Mention those changes. --- NEWS | 7 + spot/twaalgos/alternation.cc | 13 +- spot/twaalgos/alternation.hh | 9 +- spot/twaalgos/complement.cc | 6 +- spot/twaalgos/minimize.cc | 5 +- tests/core/optba.test | 824 +++++++++++++++++++++++++++++++++++ 6 files changed, 857 insertions(+), 7 deletions(-) diff --git a/NEWS b/NEWS index 958ea1645..7a15bd1ac 100644 --- a/NEWS +++ b/NEWS @@ -126,6 +126,10 @@ New in spot 2.11.6.dev (not yet released) weak automata as input, but the documentation did not reflect this. + - spot::remove_alternation() has a new argument to decide whether it + should raise an exception of return nullptr if it requires more + acceptance sets than supported. + Python: - The spot.automata() and spot.automaton() functions now accept a @@ -200,6 +204,9 @@ New in spot 2.11.6.dev (not yet released) purge_dead_state(), did not update the highlight-edges property. (Issue #555.) + - spot::minimize_obligation will skip attempts to complement very + weak automata when those would require too many acceptance sets. + New in spot 2.11.6 (2023-08-01) Bug fixes: diff --git a/spot/twaalgos/alternation.cc b/spot/twaalgos/alternation.cc index 88ca240eb..1de366b66 100644 --- a/spot/twaalgos/alternation.cc +++ b/spot/twaalgos/alternation.cc @@ -347,7 +347,8 @@ namespace spot } - twa_graph_ptr run(bool named_states, const output_aborter* aborter) + twa_graph_ptr run(bool named_states, const output_aborter* aborter, + bool raise_if_too_many_sets) { // First, we classify each SCC into three possible classes: // @@ -356,6 +357,10 @@ namespace spot // 3) rejecting of size >1 classify_each_scc(); + if (!raise_if_too_many_sets && + (has_reject_more_ + reject_1_count_) > SPOT_MAX_ACCSETS) + return nullptr; + // Rejecting SCCs of size 1 can be handled using genralized // Büchi acceptance, using one set per SCC, as in Gastin & // Oddoux CAV'01. See also Boker & et al. ICALP'10. Larger @@ -367,6 +372,7 @@ namespace spot // We preserve deterministic-like properties, and // stutter-invariance. res->prop_copy(aut_, {false, false, false, true, true, true}); + // This will raise an exception if we request too many sets. res->set_generalized_buchi(has_reject_more_ + reject_1_count_); // We for easier computation of outgoing sets, we will @@ -502,14 +508,15 @@ namespace spot twa_graph_ptr remove_alternation(const const_twa_graph_ptr& aut, bool named_states, - const output_aborter* aborter) + const output_aborter* aborter, + bool raise_if_too_many_sets) { if (aut->is_existential()) // Nothing to do, why was this function called at all? return std::const_pointer_cast(aut); alternation_remover ar(aut); - return ar.run(named_states, aborter); + return ar.run(named_states, aborter, raise_if_too_many_sets); } diff --git a/spot/twaalgos/alternation.hh b/spot/twaalgos/alternation.hh index e782856af..1e0ba87ed 100644 --- a/spot/twaalgos/alternation.hh +++ b/spot/twaalgos/alternation.hh @@ -100,12 +100,17 @@ namespace spot /// \param named_states name each state for easier debugging /// /// \param aborter Return nullptr if the built automaton would - /// be larger than the size specified by the \a aborter. + /// be larger than the size specified by the \a aborter, or + /// if it would require too many acceptance sets. + /// + /// \param raise_if_too_many_sets when set to false, return + /// nullptr in cases where we would need too many colors /// @} SPOT_API twa_graph_ptr remove_alternation(const const_twa_graph_ptr& aut, bool named_states = false, - const output_aborter* aborter = nullptr); + const output_aborter* aborter = nullptr, + bool raise_if_too_many_sets = true); // Remove universal edges on the fly. diff --git a/spot/twaalgos/complement.cc b/spot/twaalgos/complement.cc index 0a7f0cc16..7e38e519a 100644 --- a/spot/twaalgos/complement.cc +++ b/spot/twaalgos/complement.cc @@ -512,7 +512,11 @@ namespace spot if (!aut->is_existential() || is_universal(aut)) return dualize(aut); if (is_very_weak_automaton(aut)) - return remove_alternation(dualize(aut), aborter); + // removing alternation may need more acceptance sets than we support. + // in this case res==nullptr and we try the other determinization. + if (twa_graph_ptr res = remove_alternation(dualize(aut), false, + aborter, false)) + return res; // Determinize spot::option_map m; if (aborter) diff --git a/spot/twaalgos/minimize.cc b/spot/twaalgos/minimize.cc index faaca1cae..40e176d2e 100644 --- a/spot/twaalgos/minimize.cc +++ b/spot/twaalgos/minimize.cc @@ -681,7 +681,10 @@ namespace spot else if (is_very_weak_automaton(aut_f)) { // Very weak automata are easy to complement. - aut_neg_f = remove_alternation(dualize(aut_f)); + aut_neg_f = remove_alternation(dualize(aut_f), false, + nullptr, false); + if (!aut_neg_f) // this required too many colors + return nullptr; } else { diff --git a/tests/core/optba.test b/tests/core/optba.test index 916794e66..c31d7081f 100755 --- a/tests/core/optba.test +++ b/tests/core/optba.test @@ -170,3 +170,827 @@ State: 1 "T2" [0&1] 1 [0&1] 2 [!0&!1] 2 [0&1] 3 [!0&!1] 1 State: 2 "T1" {0} [0&1] 2 State: 3 "all" {0} [t] 3 --END-- EOF test '3,6' = `autfilt --small in --stats=%s,%e` + + + +# The following TGBA was supplied by Yann Thierry-Mieg +# and caused minimize_obligation to call remove_alternation +# for complementation, but it required too many colors. + +cat >in.hoa < Date: Fri, 26 Jan 2024 22:31:17 +0100 Subject: [PATCH 390/606] ltlsynt: fix --global-equiv Fixes #557. * spot/tl/apcollect.cc (realizability_simplifier): When detecting global equivalence such as o1 := i2, the left is always an output, so it should never be marked as input. * tests/core/ltlsynt.test: Add test case. --- spot/tl/apcollect.cc | 4 ++-- tests/core/ltlsynt.test | 12 +++++++++++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/spot/tl/apcollect.cc b/spot/tl/apcollect.cc index 61a570f3d..72e4335f9 100644 --- a/spot/tl/apcollect.cc +++ b/spot/tl/apcollect.cc @@ -411,9 +411,9 @@ namespace spot continue; SPOT_ASSUME(lit != nullptr); if (lit.is(spot::op::Not)) - add_to_mapping(lit[0], repr_is_input, not_repr); + add_to_mapping(lit[0], false, not_repr); else - add_to_mapping(lit, repr_is_input, repr); + add_to_mapping(lit, false, repr); rm_has_new_terms = true; } } diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 537872c23..7165f00c5 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -1109,4 +1109,14 @@ diff out expected f1="((G ((p0) <-> (! (p1)))) && (((((F ((b) && (G (F (a))))) ||\ (F ((c) && (G (F (! (a))))))) && (F (b))) && (F (c))) <-> (G (F (p0)))))" -ltlsynt -f "$f1" --outs="p1, p0" --aiger > /dev/null \ No newline at end of file +ltlsynt -f "$f1" --outs="p1, p0" --aiger > /dev/null + +# issue #557 +ltlsynt -f 'G(in1 <-> out0) & G(in0 <-> out1)' --ins=in1,in0 --verb 2>err >out +grep := err > err2 +cat >err2.ex < Date: Tue, 6 Feb 2024 14:09:42 +0100 Subject: [PATCH 391/606] postproc: fix default for acd and interaction with colored * spot/twaalgos/postproc.hh (postprocess::acd_): Default to true. * spot/twaalgos/postproc.cc (postprocess::run): When acd is used to color an automaton, do not run scc_filter to remove color from transiant edges. * tests/python/acd.py: New file. * tests/Makefile.am: Add it. --- NEWS | 12 +++++++ spot/twaalgos/postproc.cc | 22 +++--------- spot/twaalgos/postproc.hh | 2 +- tests/Makefile.am | 1 + tests/python/acd.py | 71 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 90 insertions(+), 18 deletions(-) create mode 100644 tests/python/acd.py diff --git a/NEWS b/NEWS index 7a15bd1ac..5246909ea 100644 --- a/NEWS +++ b/NEWS @@ -207,6 +207,18 @@ New in spot 2.11.6.dev (not yet released) - spot::minimize_obligation will skip attempts to complement very weak automata when those would require too many acceptance sets. + - acd_transform() was not used by spot::postprocessor unless an + option_map was passed. This was due to some bad default for the + "acd" option: it defaulted to true when an option_map was given, + and to false otherwise. This had no consequences on + ltl2tgba/autfilt were some option_map is always passed, but for + instance the parity automata generated by spot.postprocessor in + Python were not using ACD by default. + + - Using spot::postprocessor to produce colored parity automata could + fail to color some transiant edges when the "acd" option was + activated. + New in spot 2.11.6 (2023-08-01) Bug fixes: diff --git a/spot/twaalgos/postproc.cc b/spot/twaalgos/postproc.cc index 1a1ceb9cc..0a5979064 100644 --- a/spot/twaalgos/postproc.cc +++ b/spot/twaalgos/postproc.cc @@ -407,7 +407,7 @@ namespace spot // ignored. a = scc_filter_states(a); else - a = do_scc_filter(a, (PREF_ == Any)); + a = do_scc_filter(a, (PREF_ == Any) && !COLORED_); if (type_ == Monitor) { @@ -721,23 +721,11 @@ namespace spot sim = nullptr; } - if (level_ == High && scc_filter_ != 0) - { - if (dba) - { - // Do that even for WDBA, to remove marks from transitions - // leaving trivial SCCs. - dba = do_scc_filter(dba, true); - assert(!sim); - } - else if (sim) - { - sim = do_scc_filter(sim, true); - assert(!dba); - } - } - sim = dba ? dba : sim; + if (level_ == High && scc_filter_ != 0 && !(acd_was_used_ && COLORED_)) + // Do that even for WDBA, to remove marks from transitions + // leaving trivial SCCs. + sim = do_scc_filter(sim, true); if (type_ == CoBuchi) { diff --git a/spot/twaalgos/postproc.hh b/spot/twaalgos/postproc.hh index 2a162501d..73da4baab 100644 --- a/spot/twaalgos/postproc.hh +++ b/spot/twaalgos/postproc.hh @@ -269,7 +269,7 @@ namespace spot int simul_max_ = 4096; int merge_states_min_ = 128; int wdba_det_max_ = 4096; - bool acd_ = false; + bool acd_ = true; bool acd_was_used_; }; /// @} diff --git a/tests/Makefile.am b/tests/Makefile.am index 3609ec03b..67e5bb4d6 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -395,6 +395,7 @@ TESTS_python = \ python/_autparserr.ipynb \ python/_aux.ipynb \ python/acc.py \ + python/acd.py \ python/accparse2.py \ python/alarm.py \ python/aliases.py \ diff --git a/tests/python/acd.py b/tests/python/acd.py new file mode 100644 index 000000000..6e393a781 --- /dev/null +++ b/tests/python/acd.py @@ -0,0 +1,71 @@ +#!/usr/bin/python3 +# -*- mode: python; coding: utf-8 -*- +# Copyright (C) by the Spot authors, see the AUTHORS file for details. +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import spot +from unittest import TestCase +tc = TestCase() + + +a = spot.automaton(""" +HOA: v1 +States: 3 +Start: 0 +AP: 3 "p0" "p1" "p2" +Acceptance: 3 Fin(0) & Inf(1) & Fin(2) +properties: trans-labels explicit-labels trans-acc deterministic +--BODY-- +State: 0 +[!0] 1 +[0&!1&2] 0 +[0&!1&!2] 0 {2} +[0&1&2] 0 {1} +[0&1&!2] 0 {1 2} +State: 1 +[!1&2] 2 +[!1&!2] 2 {2} +[1&2] 2 {1} +[1&!2] 2 {1 2} +State: 2 +[0&!1&2] 2 +[0&!1&!2] 2 {2} +[0&1&2] 2 {1} +[0&1&!2] 2 {1 2} +--END--""") +res = a.postprocess("small", "high", "parity min odd", "colored") +tc.assertEqual(res.to_str(), """HOA: v1 +States: 3 +Start: 0 +AP: 3 "p0" "p1" "p2" +acc-name: parity min odd 3 +Acceptance: 3 Fin(0) & (Inf(1) | Fin(2)) +properties: trans-labels explicit-labels trans-acc colored +properties: deterministic +--BODY-- +State: 0 +[0&!2] 0 {0} +[0&1&2] 0 {1} +[0&!1&2] 0 {2} +[!0] 1 {0} +State: 1 +[t] 2 {0} +State: 2 +[0&!2] 2 {0} +[0&1&2] 2 {1} +[0&!1&2] 2 {2} +--END--""") From 342360f8caab004d8138e43764509ff1ccced1d1 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 6 Feb 2024 14:11:42 +0100 Subject: [PATCH 392/606] fix some preprocessor directive Apparently using `#if defined(X) or defined(Y)` did not trouve the compilers, but Swig was confused by the "or". * spot/misc/common.hh, spot/tl/formula.hh: Use || instead. --- spot/misc/common.hh | 2 +- spot/tl/formula.hh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/spot/misc/common.hh b/spot/misc/common.hh index 4d3a12766..a7fb1e076 100644 --- a/spot/misc/common.hh +++ b/spot/misc/common.hh @@ -67,7 +67,7 @@ // Else use SPOT_ASSERT so the assert() are removed from user's // builds. #define spot_assert__ assert -#if defined(SPOT_BUILD) or defined(SPOT_DEBUG) +#if defined(SPOT_BUILD) || defined(SPOT_DEBUG) #define SPOT_ASSERT(x) spot_assert__(x) #else // Do not replace by SPOT_ASSUME(x), as x can have some costly diff --git a/spot/tl/formula.hh b/spot/tl/formula.hh index 3ab6dd100..4f56c38bc 100644 --- a/spot/tl/formula.hh +++ b/spot/tl/formula.hh @@ -59,7 +59,7 @@ // The strong_X operator was introduced in Spot 2.8.2 to fix an issue // with from_ltlf(). As adding a new operator is a backward // incompatibility, causing new warnings from the compiler. -#if defined(SPOT_BUILD) or defined(SPOT_USES_STRONG_X) +#if defined(SPOT_BUILD) || defined(SPOT_USES_STRONG_X) // Use #if SPOT_HAS_STRONG_X in code that need to be backward // compatible with older Spot versions. # define SPOT_HAS_STRONG_X 1 From 94ab42612ad96a27bd1ea4890e570adc1f1649ad Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 6 Feb 2024 17:28:48 +0100 Subject: [PATCH 393/606] work around some Swig 4.2 change Several test cases started failing after updating to Swig 4.2 because the spot.ltsmin module and the spot.impl module were using different names for atomic_prop_set. This seems to work around it. * python/spot/impl.i: Specify the full type for atomic_prop_set. --- python/spot/impl.i | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/python/spot/impl.i b/python/spot/impl.i index 680e90149..778983aac 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -529,7 +529,13 @@ namespace std { %template(vectorstring) vector; %template(vectorint) vector; %template(pair_formula_vectorstring) pair>; - %template(atomic_prop_set) set; + // We specify the optional std::less/std::allocator arguments of set + // to work around an issue that appeared with Swig 4.2. Without those + // arguments the type of set used in module ltlsmin.i + // does not match the type of set used here! + %template(atomic_prop_set) set, + std::allocator>; %template(vectorofvectorofformulas) vector>; %template(setunsigned) set; %template(relabeling_map) map; @@ -813,7 +819,9 @@ def state_is_accepting(self, src) -> "bool": %include %include -%extend std::set { +%extend std::set, + std::allocator> { std::string __str__() { std::ostringstream os; From db168f97e696fdd0c276c47d8fa60c0e54b1d663 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 6 Feb 2024 22:37:34 +0100 Subject: [PATCH 394/606] tl: fix detection of goto Fixes #559. * spot/tl/print.cc (strip_star_not): Only match a full star. * tests/core/sugar.test: Add test case. --- NEWS | 4 ++++ spot/tl/print.cc | 2 ++ tests/core/sugar.test | 8 ++++++++ 3 files changed, 14 insertions(+) diff --git a/NEWS b/NEWS index 5246909ea..d27c76f98 100644 --- a/NEWS +++ b/NEWS @@ -219,6 +219,10 @@ New in spot 2.11.6.dev (not yet released) fail to color some transiant edges when the "acd" option was activated. + - The formula printer incorrectly replaced a SERE like "(!a)[*3];a" + by "a[->]". The latter should only replace "(!a)[*];a". + (Issue #559.) + New in spot 2.11.6 (2023-08-01) Bug fixes: diff --git a/spot/tl/print.cc b/spot/tl/print.cc index 044f83d94..ef998ba86 100644 --- a/spot/tl/print.cc +++ b/spot/tl/print.cc @@ -348,6 +348,8 @@ namespace spot static formula strip_star_not(formula f) { + if (!f.is_Kleene_star()) + return nullptr; return f.get_child_of({op::Star, op::Not}); } diff --git a/tests/core/sugar.test b/tests/core/sugar.test index 386793595..344a403df 100755 --- a/tests/core/sugar.test +++ b/tests/core/sugar.test @@ -59,6 +59,10 @@ F[]a|G[]b|X[]c {##[1..2] b*}|->e {a ##[+] b}|->e {##[*] b}|->e +{(!a)[*];a;b}! +{(!a)[+];a;b}! +{(!a)[*3];a;b}! +{((!a)[*];a)[*2..3];b}! EOF ltlfilt -F ok.in > ok.out @@ -99,6 +103,10 @@ FGa | Gb | XGc {[*1..2];b[*]}[]-> e {a;[*];b}[]-> e {[*];b}[]-> e +{a[->];b}! +{{!a}[+];a;b}! +{{!a}[*3];a;b}! +{a[->2..3];b}! EOF diff ok.out expect From ca739ce8166920dd8337b310a242a38deadbc4eb Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 6 Feb 2024 22:39:07 +0100 Subject: [PATCH 395/606] * NEWS: Fix some typos. --- NEWS | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/NEWS b/NEWS index d27c76f98..c19ab9634 100644 --- a/NEWS +++ b/NEWS @@ -7,8 +7,8 @@ New in spot 2.11.6.dev (not yet released) to produce the 0-based serial number of the produced object. This differs from the existing '%L' that is usually related to the line number of the input (when that makes sense). For instance to - split a file that contains many automaton into several files, one - per automata, do + split a file that contains many automata into several files, one + per automaton, do autfilt input.hoa -o output-%l.hoa @@ -46,11 +46,11 @@ New in spot 2.11.6.dev (not yet released) - The HOA parser is a bit smarter when merging multiple initial states into a single initial state (Spot's automaton class - supports only one): it now reuse the edges leaving initial states + supports only one): it now reuses the edges leaving initial states without incoming transitions. - The automaton parser has a new option "drop_false_edges" to - specify where edges labeled by "false" should be ignored during + specify whether edges labeled by "false" should be ignored during parsing. It is enabled by default for backward compatibility. - spot::bdd_to_cnf_formula() is a new variant of spot::bdd_to_formula() @@ -85,14 +85,14 @@ New in spot 2.11.6.dev (not yet released) For instance on Alexandre's laptop, running 'ltlsynt --tlsf SPIReadManag.tlsf --aiger' with Spot 2.11.6 used to produce an AIG circuit with 48 nodes in - 36 seconds; it now produce an AIG circuit with 53 nodes in only + 36 seconds; it now produces an AIG circuit with 53 nodes in only 0.1 second. - spot::contains_forq() is a implementation of the paper "FORQ-Based Language Inclusion Formal Testing" (Doveri, Ganty, Mazzocchi; CAV'22) contributed by Jonah Romero. - - spot::contains() still default to the complementation-based + - spot::contains() still defaults to the complementation-based algorithm, however by calling spot::containment_select_version("forq") or by setting SPOT_CONTAINMENT_CHECK=forq in the environment, the @@ -102,7 +102,7 @@ New in spot 2.11.6.dev (not yet released) The above also impacts autfilt --included-in option. - spot::scc_info has a new option PROCESS_UNREACHABLE_STATES that - causes it to enumerated even unreachable SCCs. + causes it to enumerate even unreachable SCCs. - spot::realizability_simplifier is a new class that performs the removal of superfluous APs that is now performed by ltlsynt @@ -149,7 +149,7 @@ New in spot 2.11.6.dev (not yet released) - tgba_determinize()'s use_simulation option would cause it to segfault on automata with more than 2^16 SCCs, due to overflows in computations of indices in the reachability matrix for SCCs. - (Issue #541.) This has been fixed by disabled the use_simulation + (Issue #541.) This has been fixed by disabling the use_simulation optimization in this case. - product_or_susp() and product_susp() would behave incorrectly in From a735c2b72dc494ad70d19fcc131f0ded54a650d1 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 6 Feb 2024 23:12:55 +0100 Subject: [PATCH 396/606] tl_simplifier: add more test cases Fixes #558 by just adding test cases showing there is no issue. * tests/core/reduccmp.test: Add test cases. --- tests/core/reduccmp.test | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/core/reduccmp.test b/tests/core/reduccmp.test index 42505ed46..27580cb34 100755 --- a/tests/core/reduccmp.test +++ b/tests/core/reduccmp.test @@ -487,6 +487,11 @@ GF(a && GF(b) && c), G(F(a & c) & Fb) {first_match(1:e[*0..3])[*]}[]-> c, c W !e {first_match(first_match(a*;e);b)}[]->a, {first_match(a[*];e)}[]-> X(a | !b) {first_match(first_match(a*;e):b*)}[]->a, {first_match(a[*];e)}[]-> (a | !b) + +# issue 558 (was a false alarm, but still good to test) +{(!b)[*3];b}!, !b & X(!b & X(!b & Xb)) +{(!b)[+];b}!, !b & XFb +{(!b)[*];b}!, Fb EOF run 0 ../reduccmp nottau.txt From a6f79c6211020d074fecb7871613ca597a5aef02 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 8 Feb 2024 22:43:44 +0100 Subject: [PATCH 397/606] more doc handling of prop_universal for fused initial states Fixes #560. * spot/parseaut/parseaut.yy: Add more comments about handling of prop_universal in present of multiple initial states. It took me time to figure out that it was done correctly. Also only reset prop_complete() in the case an initial state is reused. * tests/core/det.test: Add a test case for the deterministic property. * tests/python/parsetgba.py: Add tests for complete. * doc/org/hoa.org: Add more text about the effect of fusing initial states. * doc/org/concepts.org (properties): Replace "deterministic" by "universal". The former was obsoleted in Spot 2.4. --- doc/org/concepts.org | 6 +-- doc/org/hoa.org | 108 ++++++++++++++++++++++++++------------ spot/parseaut/parseaut.yy | 40 ++++++++------ tests/core/det.test | 28 ++++++++++ tests/python/parsetgba.py | 31 +++++++++++ 5 files changed, 159 insertions(+), 54 deletions(-) diff --git a/doc/org/concepts.org b/doc/org/concepts.org index 64f982eb8..7dca74b54 100644 --- a/doc/org/concepts.org +++ b/doc/org/concepts.org @@ -1034,7 +1034,7 @@ layers. distributed with the rest of Spot, their source-code is publicly available (in case you want to contribute or run a local version). The [[https://spot-sandbox.lrde.epita.fr/][=spot-sandbox=]] website runs from a Docker container whose - configuration can be found in [[https://gitlab.lre.epita.fr/spot/sandbox/tree/master=][this repository]]. The client and + configuration can be found in [[https://gitlab.lre.epita.fr/spot/sandbox/tree/master][this repository]]. The client and server parts of the [[https://spot.lrde.epita.fr/app/][online LTL translator]] can be found in [[https://gitlab.lre.epita.fr/spot/spot-web-app/][this repository]]. @@ -1065,9 +1065,9 @@ automaton, and that can be queried or set by algorithms: | =very_weak= | weak automaton where all SCCs have size 1 | | =terminal= | automaton is weak, accepting SCCs are complete, accepting edges may not go to rejecting SCCs | | =complete= | for any letter ℓ, each state has is at least one outgoing transition compatible with ℓ | -| =deterministic= | there is at most one run *recognizing* a word, but not necessarily accepting it | +| =universal= | there is at most one run *recognizing* each word, but not necessarily accepting it | | =semi_deterministic= | any nondeterminism occurs before entering an accepting SCC | -| =unambiguous= | there is at most one run *accepting* a word (but it might be recognized several time) | +| =unambiguous= | there is at most one run *accepting* each word (but it might be recognized several time) | | =stutter_invariant= | the property recognized by the automaton is [[https://www.lrde.epita.fr/~adl/dl/adl/michaud.15.spin.pdf][stutter-invariant]] | For each flag =flagname=, the =twa= class has a method diff --git a/doc/org/hoa.org b/doc/org/hoa.org index 6994abdc5..2efb72537 100644 --- a/doc/org/hoa.org +++ b/doc/org/hoa.org @@ -8,7 +8,7 @@ The [[http://adl.github.io/hoaf/][Hanoi Omega-Automata format]] is a textual representation of ω-automata labeled by Boolean formulas over a set of atomic propositions, and using an arbitrary acceptance condition. The -typical acceptances conditions like Büchi, generalized-Büchi, +typical acceptance conditions like Büchi, generalized-Büchi, co-Büchi, Rabin, Streett, parity, ... are all supported, but the main advantage of this format is that any arbitrary acceptance condition can be defined. The HOA format has support for many features such as @@ -20,7 +20,7 @@ automaton. The HOA format is already supported in [[http://adl.github.io/hoaf/support.html][several tools]]. The goal of this page is to detail the support of this format in Spot. It contains some information that are useful to better understand the behavior -of the tools distributed by Spot, and it also look at some lower-level, +of the tools distributed by Spot, and it also looks at some lower-level, discussing details that are interesting when programming with Spot. Spot can read files written using either version 1 or version 1.1 of @@ -69,21 +69,6 @@ the HOA format, the output may not be exactly the same as the input. using option =--enable-max-accsets=N=, but doing so will consume more memory and time. -- Multiple (or missing) initial states are emulated. - - The internal TωA representation used by Spot supports only a single - initial state. When an HOA with multiple initial states is read, it - is transformed into an equivalent TωA by merging the initial states - into a single one. The merged state can either be one of the - original initial states (if one of those has no incoming edge) or a - new state introduced for that purpose. This "conversion" may change - the completeness property of the automaton. - - Similarly, when an automaton with no initial state is loaded (this - includes the case where the automaton has no state), a disconnected - initial state is added. As a consequence, Spot's HOA output always - contains at least one state, even when the input had no state. - - =Fin(!x)= and =Inf(!x)= are rewritten away. Internally Spot only deals with acceptance conditions involving the @@ -100,6 +85,58 @@ the HOA format, the output may not be exactly the same as the input. =1=, that stores the complement of set =0=, and using =Inf(0)&Inf(1)=. +- Multiple (or missing) initial states are emulated. + + The internal TωA representation used by Spot supports only a single + initial state. To make it possible to still process HOA files + that declare multiple initial states, Spot's automaton parser + will automatically transform the parsed automaton into an equivalent + TωA by merging the initial states into a single initial state that + has no incoming edge. This can be achieved in two ways: + + - If one of the original initial states has no incoming edge, it can + serve as the new initial state. The parse simply has to duplicate + the edges leaving the other initial states onto this one. (As an + optimization, if the other initial state alsos do not have any + incoming edge, the source of its outgoing edges are changed to + avoid copying them.) + + - If all of the original initial states has incoming edges, a new + state is created to serve as the new initial states, with a copy + of all the outgoing edges of the original initial states. + + This process is done in such a way that the states are not + renumbered: states are kept even if they become useless after the + above change, and if a new state is added, it is added after all + declared states. + + This fusing of initial states may have puzzling consequences if you + are unaware of it: + + - An automaton with multiple initial states is considered + non-deterministic and can be declared as so in the HOA 1.1 format. + However, fusing its initial states could potentially turn in into a + deterministic automaton if the only nondeterminism was in the + choice of the initial state. For this reason Spot's parser will + keep the [[file:concepts.org::#property-flags][prop_universal()]] set to =maybe()= when multiple initial + states are declared, regardless of any determinism property + specified in the HOA file. + + - Similarly, fusing all initial states into an existing one + can change the [[file:concepts.org::#property-flags][prop_complete()]] property of the automaton. + + - Finally, if an automaton that declares state-based acceptance, and + uses different acceptance marks on their initial states. Only one + of these acceptance marks will be kept on the single initial + state. Remember that the new initial state has no incoming edge, + so it cannot be part of any loop and its acceptance mark is + actually irrelevant. + + Similarly, when an automaton with no initial state is loaded (this + includes the case where the automaton has no state), a disconnected + initial state is added. As a consequence, Spot's HOA output always + contains at least one state, even when the input had no state. + * Internal representations of some features In this section we discuss features of the format that are fully @@ -194,7 +231,7 @@ State: 2 #+END_SRC Even if an input HOA file uses only state-based acceptance, Spot -internally stores it using transition-based acceptance. However in +internally stores it using transition-based acceptance. However, in that case the TωA will have a property flag indicating that it actually represents an automaton with the "state-based acceptance" property: this implies that transitions leaving one state all belong to the same @@ -207,7 +244,7 @@ the automaton satisfies this property. In that case, it outputs the automaton with state-based acceptance. For instance in the following automaton, the outgoing transitions of -each states belong to the same sets: +each state belong to the same sets: #+NAME: state-based-example #+BEGIN_SRC sh :wrap SRC hoa @@ -258,7 +295,7 @@ State: 2 {0 1} --END-- #+END_SRC -The rational for this automatic switch to state-based acceptance is as follows: +The rationale for this automatic switch to state-based acceptance is as follows: - Tools that support transition-based acceptance can easily see state-based acceptance as syntactic sugar, so they should be able to process state-based or transition-based acceptance @@ -302,7 +339,7 @@ State: 2 #+END_SRC By default, the output uses either state-based acceptance, or -transition-based acceptance. However there is no restriction in the +transition-based acceptance. However, there is no restriction in the format to prevents mixing the two: if you use =-Hm=, the decision of using state or transition-based acceptance will be made for each state separately. For instance: @@ -391,7 +428,7 @@ Whenever an HOA file is output, Spot attempts to recognize the acceptance condition to give it a suitable =acc-name:= (even if Spot does not use this line, it is useful to tools that only deal with one specific acceptance condition and that do not want to parse the -=Acceptance:= line). However the HOA output routine has no idea of +=Acceptance:= line). However, the HOA printer has no idea of what type of automata you are trying to output: it is only looking at the acceptance condition and trying to name it as precisely as possible. This could be a problem when a given condition accepts @@ -402,7 +439,7 @@ format]] the condition =Inf(0)= could be called =Buchi=, or =generalized-Buchi 1=, or (why not?) =parity min even 1= or =parity max even 1=. Spot will always call this acceptance condition =Buchi=. -Similarly the acceptance condition =t= is always called =all= (not +Similarly, the acceptance condition =t= is always called =all= (not =generalized-Buchi 0= or =Rabin 0=, etc.), and while =f= is always named =none=. @@ -595,7 +632,7 @@ instance it is easier to complement a deterministic automaton that is known to be inherently weak. Spot stores the properties that matters to its algorithms as -[[file:concepts.org::#property-flags][additional bits attached to each automaton]]. Currently the HOA parser +[[file:concepts.org::#property-flags][additional bits attached to each automaton]]. Currently, the HOA parser ignores all the properties that are unused by Spot. Some of the supported properties are double-checked when the automaton @@ -636,7 +673,7 @@ redundant and useless properties. For instance =deterministic= automata are necessarily =unambiguous=, and people interested in unambiguous automata know that, so Spot only outputs the =unambiguous= property if an unambiguous automaton is non-deterministic. Similarly, -while Spot may output alternating automata, it does not output +while Spot may produce alternating automata, it does not output the =no-univ-branch= property because we cannot think of a situation where this would be useful. This decision can be overridden by passing the =-Hv= (or =--hoa=v=) option to the command-line tools: @@ -754,13 +791,13 @@ State: 2 "so am I" {0 1} --END-- #+end_SRC -However when Spot performs some transformation, and actually has to +However, when Spot performs some transformation, and actually has to construct a new automaton, those properties will not be quarried over to the new automaton. First because it is not obvious that the new automaton should have the same name, and second because if a new automaton is created, there might not be clear correspondence between the old states and the new ones. =autfilt= tries to preserve aliases -by reintroducing them to the automaton before it is outputs it (unless +by reintroducing them to the automaton before it prints it (unless option =--aliases=drop= is used). Here is for instance the result when =autfilt= is instructed to @@ -938,7 +975,7 @@ by tools that produce automata in a stream to cancel the current one. This makes sense for instance when the automaton is constructed on-the-fly, while it is being output. This scenario does not occur in Spot (automata are constructed before they are output), so it does not -emit =--ABORT--=. However the input parser is fully aware of this +emit ~--ABORT--~. However, the input parser is fully aware of this token. Tools like =autfilt= will diagnose aborted automata in the input, and continue processing with the next automaton. The Python bindings for the HOA parser can be configured in two modes: skip @@ -1013,7 +1050,7 @@ autfilt decorate.hoa -d'.#' On the above example, we call =autfilt= with option =-d#= to display -edges numbers, which helps identifying the edges to highlight. The +edges numbers, which helps identify the edges to highlight. The headers ~spot.highlight.states:~ and ~spot.highlight.edges:~ are both followed by a list of alternating state/edges numbers and color numbers. @@ -1040,7 +1077,7 @@ acceptance sets. This might change in the future. The automaton parser will not complain if these headers are used in some =HOA: v1= file, even if =v1= disallows dots in header names. -However [[https://en.wikipedia.org/wiki/Robustness_principle][the automaton printer is more rigorous]] and will only output +However, [[https://en.wikipedia.org/wiki/Robustness_principle][the automaton printer is more rigorous]] and will only output these lines when version 1.1 is selected. Compare: @@ -1155,11 +1192,6 @@ State: 2 {0} rm -f stvstracc.hoa sba.hoa stvstrlab.hoa decorate.hoa #+END_SRC -# LocalWords: html args Büchi accsets BDD SRC stvstracc EOF sed sba -# LocalWords: acc Buchi Hm tgba GFa Fb encodings parametered ary Hk -# LocalWords: bitsets randaut stvstrlab aut Hv hw bigwedge mathsf -# LocalWords: genltl gf GFp Fp parser's rankdir br labelloc ffffa -# LocalWords: fillcolor fontname svg txt Xa ** Arenas for two-player games An automaton can be seen as a two-player game by simply annotating @@ -1240,3 +1272,9 @@ $txt #+RESULTS: [[file:exgame.svg]] + +# LocalWords: html args Büchi accsets BDD SRC stvstracc EOF sed sba +# LocalWords: acc Buchi Hm tgba GFa Fb encodings parametered ary Hk +# LocalWords: bitsets randaut stvstrlab aut Hv hw bigwedge mathsf +# LocalWords: genltl gf GFp Fp parser's rankdir br labelloc ffffa +# LocalWords: fillcolor fontname svg txt Xa diff --git a/spot/parseaut/parseaut.yy b/spot/parseaut/parseaut.yy index 6410d00de..acfc92276 100644 --- a/spot/parseaut/parseaut.yy +++ b/spot/parseaut/parseaut.yy @@ -559,19 +559,18 @@ header: format-version header-items if (ss > 1) { if (det) - { - error(det.loc, - "deterministic automata should have at most " - "one initial state"); - res.universal = spot::trival::maybe(); - } + error(det.loc, + "deterministic automata should have at most " + "one initial state"); else if (no_exist) - { - error(no_exist.loc, - "universal automata should have at most " - "one initial state"); - res.universal = spot::trival::maybe(); - } + error(no_exist.loc, + "universal automata should have at most " + "one initial state"); + // res.universal defaults to maybe() and this is what + // we need here. In presence of multiple initial + // state, fix_initial_state() will have to fuse them, + // and this could turn a non-deterministic automaton + // into a deterministic one. } else { @@ -2662,10 +2661,6 @@ static void fix_initial_state(result_& r) return; } auto& aut = r.h->aut; - // Fiddling with initial state may turn an incomplete automaton - // into a complete one. - if (aut->prop_complete().is_false()) - aut->prop_complete(spot::trival::maybe()); // Multiple initial states. We might need to add a fake one, // unless one of the actual initial state has no incoming edge. std::vector has_incoming(aut->num_states(), 0); @@ -2695,6 +2690,19 @@ static void fix_initial_state(result_& r) if (!found || init_alternation) // We do need a fake initial state init = aut->new_state(); + else + // Modifying one existing initial state may turn an incomplete + // automaton into a complete one. For instance if the state + // that we elected as the future initial state was the only + // incomplete state of the automaton. Similarly this could + // also turn a non-deterministic automaton into a + // deterministic one, but we don't have to deal with this are + // automata with multiple initial states have prop_universal() + // set to maybe() already in prevision of what this function + // will do. + if (aut->prop_complete().is_false()) + aut->prop_complete(spot::trival::maybe()); + aut->set_init_state(init); // The non-alternating case is the easiest, we simply declare diff --git a/tests/core/det.test b/tests/core/det.test index f3249ca27..76125ae7e 100755 --- a/tests/core/det.test +++ b/tests/core/det.test @@ -293,3 +293,31 @@ State: 1 EOF autfilt -q --is-deterministic in.hoa && exit 1 autfilt --merge-transitions in.hoa | autfilt --is-deterministic + + + +# This is a peculiarity of Spot: because it supports only one initial +# state, but the HOA format allows for more, Spot's automaton parser +# will fuse multiple initial states to fit into the Spot definition of +# an automaton. While this is preserving the original language, this +# fusing of initial states may turn a non-deterministic automaton +# (because of the multiple declared initial states) into a +# deterministic one. (Issue #560.) +cat >560.hoa < Date: Fri, 9 Feb 2024 12:16:52 +0100 Subject: [PATCH 398/606] org: fix many errors Most of those errors were pointed out by the language-check tool. However while fixing those I found a few other issues that I fixed. In particular I updated the bibliographic reference for ltlsynt, added some DOI links for some cited papers that had no link, and fixed the broken introduction of ltlgrind. * doc/org/autcross.org, doc/org/autfilt.org, doc/org/citing.org, doc/org/compile.org, doc/org/concepts.org, doc/org/csv.org, doc/org/dstar2tgba.org, doc/org/genaut.org, doc/org/hierarchy.org, doc/org/install.org, doc/org/ioltl.org, doc/org/ltl2tgba.org, doc/org/ltl2tgta.org, doc/org/ltlcross.org, doc/org/ltldo.org, doc/org/ltlfilt.org, doc/org/ltlgrind.org, doc/org/ltlsynt.org, doc/org/oaut.org, doc/org/randaut.org, doc/org/randltl.org, doc/org/satmin.org, doc/org/tut01.org, doc/org/tut02.org, doc/org/tut03.org, doc/org/tut10.org, doc/org/tut11.org, doc/org/tut12.org, doc/org/tut20.org, doc/org/tut22.org, doc/org/tut24.org, doc/org/tut30.org, doc/org/tut40.org, doc/org/tut50.org, doc/org/tut51.org, doc/org/tut52.org, doc/org/tut90.org, doc/org/upgrade2.org: Fix errors. * bin/autfilt.cc, bin/common_aoutput.cc, bin/genaut.cc: Fix some typos in --help text that appeared in the above org files. --- bin/autfilt.cc | 2 +- bin/common_aoutput.cc | 4 +-- bin/genaut.cc | 2 +- doc/org/autcross.org | 30 ++++++++-------- doc/org/autfilt.org | 36 ++++++++++++------- doc/org/citing.org | 23 ++++++++---- doc/org/compile.org | 16 ++++----- doc/org/concepts.org | 31 ++++++++--------- doc/org/csv.org | 6 ++-- doc/org/dstar2tgba.org | 74 ++++++++++++++++++++++----------------- doc/org/genaut.org | 6 +++- doc/org/hierarchy.org | 26 +++++++------- doc/org/install.org | 6 ++-- doc/org/ioltl.org | 8 ++--- doc/org/ltl2tgba.org | 79 ++++++++++++++++++++++++------------------ doc/org/ltl2tgta.org | 2 +- doc/org/ltlcross.org | 48 ++++++++++++------------- doc/org/ltldo.org | 45 ++++++++++++------------ doc/org/ltlfilt.org | 33 +++++++++++++----- doc/org/ltlgrind.org | 7 ++-- doc/org/ltlsynt.org | 13 +++++-- doc/org/oaut.org | 14 ++++---- doc/org/randaut.org | 6 ++-- doc/org/randltl.org | 14 ++++---- doc/org/satmin.org | 48 ++++++++++++------------- doc/org/tut01.org | 4 +-- doc/org/tut02.org | 2 +- doc/org/tut03.org | 16 ++++----- doc/org/tut10.org | 2 +- doc/org/tut11.org | 10 +++--- doc/org/tut12.org | 6 ++-- doc/org/tut20.org | 8 ++--- doc/org/tut22.org | 2 +- doc/org/tut24.org | 2 +- doc/org/tut30.org | 6 ++-- doc/org/tut40.org | 12 +++---- doc/org/tut50.org | 14 ++++---- doc/org/tut51.org | 6 ++-- doc/org/tut52.org | 7 ++-- doc/org/tut90.org | 28 +++++++-------- doc/org/upgrade2.org | 14 ++++---- 41 files changed, 393 insertions(+), 325 deletions(-) diff --git a/bin/autfilt.cc b/bin/autfilt.cc index 87677e253..39a8f46b8 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -230,7 +230,7 @@ static const argp_option options[] = { "is-alternating", OPT_IS_ALTERNATING, nullptr, 0, "keep only automata using universal branching", 0 }, { "intersect", OPT_INTERSECT, "FILENAME", 0, - "keep automata whose languages have an non-empty intersection with" + "keep automata whose languages have a non-empty intersection with" " the automaton from FILENAME", 0 }, { "included-in", OPT_INCLUDED_IN, "FILENAME", 0, "keep automata whose languages are included in that of the " diff --git a/bin/common_aoutput.cc b/bin/common_aoutput.cc index 9726659c9..ad221812e 100644 --- a/bin/common_aoutput.cc +++ b/bin/common_aoutput.cc @@ -228,7 +228,7 @@ static const argp_option io_options[] = "(iw) inherently weak. Use uppercase letters to negate them.", 0 }, { "%R, %[LETTERS]R", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "CPU time (excluding parsing), in seconds; Add LETTERS to restrict to " + "CPU time (excluding parsing), in seconds; add LETTERS to restrict to " "(u) user time, (s) system time, (p) parent process, " "or (c) children processes.", 0 }, { "%N, %n", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, @@ -297,7 +297,7 @@ static const argp_option o_options[] = "(iw) inherently weak. Use uppercase letters to negate them.", 0 }, { "%R, %[LETTERS]R", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "CPU time (excluding parsing), in seconds; Add LETTERS to restrict to" + "CPU time (excluding parsing), in seconds; add LETTERS to restrict to" "(u) user time, (s) system time, (p) parent process, " "or (c) children processes.", 0 }, { "%n", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, diff --git a/bin/genaut.cc b/bin/genaut.cc index e873a263c..7c3bbf70b 100644 --- a/bin/genaut.cc +++ b/bin/genaut.cc @@ -61,7 +61,7 @@ static const argp_option options[] = "equivalent deterministic Rabin automaton of less than N! states.", 0}, { "m-nba", gen::AUT_M_NBA, "RANGE", 0, "An NBA with N+1 states whose determinization needs at least " - "N! states", 0}, + "N! states.", 0}, { "cyclist-trace-nba", gen::AUT_CYCLIST_TRACE_NBA, "RANGE", 0, "An NBA with N+2 states that should include cyclist-proof-dba=B.", 0}, { "cyclist-proof-dba", gen::AUT_CYCLIST_PROOF_DBA, "RANGE", 0, diff --git a/doc/org/autcross.org b/doc/org/autcross.org index 9e4972cf6..5f04375c9 100644 --- a/doc/org/autcross.org +++ b/doc/org/autcross.org @@ -16,7 +16,7 @@ The core of =autcross= is a loop that does the following steps: will be named =A0=, =A1=, and =A2=. - Ensure that all produced automata are equivalent. -Statistics about the results of each tools can optionally be saved in +Statistics about the results of each tool can optionally be saved in a CSV file. And in case only those statistics matters, it is also possible to disable the equivalence checks. @@ -46,7 +46,7 @@ following character sequences: : %O filename for the automaton output in HOA, never : claim, LBTT, or ltl2dstar's format -For instance we can use =autfilt --complement %H >%O= to indicate that +For instance, we can use =autfilt --complement %H >%O= to indicate that =autfilt= reads one file (=%H=) in the HOA format, and to redirect the output in file so that =autcross= can find it. The output format is automatically detected, so a generic =%O= is used for the output file @@ -203,7 +203,7 @@ time) will appear with empty columns at the end of the CSV line. Those lines with missing data can be omitted with the =--omit-missing= option. -However data for bogus automata are still included: as shown below +However, data for bogus automata are still included: as shown below =autcross= will report inconsistencies between automata as errors, but it does not try to guess who is incorrect. @@ -251,9 +251,9 @@ EOF * Transformation that preserve or complement languages -By default =autcross= assumes that for a given input the automata -produced by all tools should be equivalent. However it does not -assume that those language should be equivalent to the input (it is +By default, =autcross= assumes that for a given input the automata +produced by all tools should be equivalent. However, it does not +assume that those languages should be equivalent to the input (it is clearly not the case in our complementation test above). If the transformation being tested does preserve the language of an @@ -277,19 +277,19 @@ If a translator exits with a non-zero status code, or fails to output an automaton =autcross= can read, and error will be displayed and the result of the tool will be discarded. -Otherwise =autcross= performs equivalence checks between each pair of +Otherwise, =autcross= performs equivalence checks between each pair of automata. This is done in two steps. First, all produced automata =A0=, =A1=, etc. are complemented: the complement automata are named =Comp(A0)=, =Comp(A1)= etc. Second, =autcross= ensures that =Ai*Comp(Aj)= is empty for all =i= and =j=. If the =--language-preserved= option is passed, the =input= automaton -also participate to these equivalence checks. +also participates to these equivalence checks. -To simulate a problem, let's compare pretend we want verify that -=autfilt --complement= preserves the input language (clearly it does -not, since it actually complement the language of the automaton). +To simulate a problem, let's pretend we want to verify that =autfilt +--complement= preserves the input language (clearly it does not, since +it actually complements the language of the automaton). #+BEGIN_SRC sh :prologue "exec 2>&1" :epilogue true randaut -B -n 3 a b --name="automaton %L" | @@ -334,15 +334,15 @@ examples would not exit if the language was really preserved by the tool. Incoherence between the output of several tools (even with -=--language-preserved=) are reported in a similar way. +=--language-preserved=) are reported similarly. * Miscellaneous options ** =--stop-on-error= The =--stop-on-error= option will cause =autcross= to abort on the -first detected error. This include failure to start some tool, -read its output, or failure to passe the sanity checks. Timeouts are +first detected error. This includes failure to start some tool, +read its output, or failure to pass the sanity checks. Timeouts are allowed unless =--fail-on-timeout= is also given. One use for this option is when =autcross= is used in combination with @@ -472,7 +472,7 @@ Performing sanity checks and gathering statistics... No problem detected. #+end_example -However in practice you could also use the =name:= field of the input +However, in practice you could also use the =name:= field of the input automaton, combined with =%M= in the tool specification, to designate an alternate filename to load, or some key to look up somewhere. diff --git a/doc/org/autfilt.org b/doc/org/autfilt.org index bcbe8e4dd..829fac62c 100644 --- a/doc/org/autfilt.org +++ b/doc/org/autfilt.org @@ -29,7 +29,7 @@ process them in batch. (The only restriction is that inside a file an automaton in LBTT's format may not follow an automaton in =ltl2dstar='s format.) -By default the output uses the HOA format. This can be changed using +By default, the output uses the HOA format. This can be changed using [[file:oaut.org][the common output options]] like =--spin=, =--lbtt=, =--dot=, =--stats=... @@ -196,7 +196,7 @@ autfilt --help | sed -n '/ for output):/,/^$/p' | sed '1d;$d' #+end_example When a letter is available both as uppercase and lowercase, the -uppercase version refer to the input automaton, while the lowercase +uppercase version refers to the input automaton, while the lowercase refer to the output automaton. Of course this distinction makes sense only if =autfilt= was instructed to perform an operation on the input automaton. @@ -237,7 +237,7 @@ autfilt --help | sed -n '/Filtering options.*:/,/^$/p' | sed '1d;$d' inherently-weak SCCs is in RANGE. An accepting SCC is inherently weak if it does not have a rejecting cycle. - --intersect=FILENAME keep automata whose languages have an non-empty + --intersect=FILENAME keep automata whose languages have a non-empty intersection with the automaton from FILENAME --is-alternating keep only automata using universal branching --is-colored keep colored automata (i.e., exactly one @@ -349,7 +349,7 @@ autfilt --help | sed -n '/Simplification level:/,/^$/p' | sed '1d;$d' By default, =--any --low= is used, which cause all simplifications to -be skipped. However if any goal is given, than the simplification level +be skipped. However, if any goal is given, then the simplification level defaults to =--high= (unless specified otherwise). If a simplification level is given without specifying any goal, then the goal default to =--small=. @@ -370,7 +370,7 @@ depending on the constraints on the acceptance conditions: in the output, but it may not always succeed and may output non-deterministic automata. Note that if =autfilt --deterministic --tgba= fails to output a deterministic automaton, it does not - necessarily implies that a deterministic TGBA does not exist: it + necessarily imply that a deterministic TGBA does not exist: it just implies that =autfilt= could not find it. @@ -442,19 +442,27 @@ autfilt --help | sed -n '/Transformations:/,/^$/p' | sed '1d;$d' generalized Rabin definition from the HOA format; the "share-inf" option allows clauses to share Inf sets, therefore reducing the number of sets - --generalized-streett[=unique-fin|share-fin], --gsa[=unique-fin|share-fin] rewrite the - acceptance condition as generalized Streett; the - "share-fin" option allows clauses to share Fin - sets, therefore reducing the number of sets; the - default "unique-fin" does not + --generalized-streett[=unique-fin|share-fin], --gsa[=unique-fin|share-fin] + rewrite the acceptance condition as generalized + Streett; the "share-fin" option allows clauses to + share Fin sets, therefore reducing the number of + sets; the default "unique-fin" does not --instut[=1|2] allow more stuttering (two possible algorithms) --keep-states=NUM[,NUM...] only keep specified states. The first state will be the new initial state. Implies --remove-unreachable-states. + --kill-states=NUM[,NUM...] mark the specified states as dead (no + successor), and remove them. Implies + --remove-dead-states. --mask-acc=NUM[,NUM...] remove all transitions in specified acceptance sets --merge-transitions merge transitions with same destination and acceptance + --partial-degeneralize[=NUM1,NUM2,...] + Degeneralize automata according to sets + NUM1,NUM2,... If no sets are given, partial + degeneralization is performed for all conjunctions + of Inf and disjunctions of Fin. --product=FILENAME, --product-and=FILENAME build the product with the automaton in FILENAME to intersect languages @@ -467,8 +475,8 @@ autfilt --help | sed -n '/Transformations:/,/^$/p' | sed '1d;$d' quantification, or by assigning them 0 or 1 --remove-dead-states remove states that are unreachable, or that cannot belong to an infinite path - --remove-fin rewrite the automaton without using Fin acceptance - + --remove-fin rewrite the automaton without using Fin + acceptance --remove-unreachable-states remove states that are unreachable from the initial state @@ -505,6 +513,10 @@ autfilt --help | sed -n '/Transformations:/,/^$/p' | sed '1d;$d' sum languages --sum-and=FILENAME build the sum with the automaton in FILENAME to intersect languages + --to-finite[=alive] Convert an automaton with "alive" and "!alive" + propositions into a Büchi automaton interpretable + as a finite automaton. States with a outgoing + "!alive" edge are marked as accepting. #+end_example * Decorations diff --git a/doc/org/citing.org b/doc/org/citing.org index 8d669ae69..bc17e0d6c 100644 --- a/doc/org/citing.org +++ b/doc/org/citing.org @@ -71,12 +71,6 @@ be more specific about a particular aspect of Spot. Presents the automaton format [[file:hoa.org][supported by Spot]] and [[http://adl.github.io/hoaf/support.html][several other tools]]. -- *Reactive Synthesis from LTL Specification with Spot*, - /Thibaud Michaud/, /Maximilien Colange/. - In Proc. of SYNT@CAV'18. ([[https://www.lrde.epita.fr/~max/bibtexbrowser.php?key=michaud.18.synt&bib=perso.bib][bib]] | [[https://www.lrde.epita.fr/dload/papers/michaud.18.synt.pdf][pdf]]) - - Presents the tool [[file:ltlsynt.org][=ltlsynt=]]. - - *Generic Emptiness Check for Fun and Profit*, /Christel Baier/, /František Blahoudek/, /Alexandre Duret-Lutz/, /Joachim Klein/, /David Müller/, and /Jan Strejček/. @@ -91,6 +85,15 @@ be more specific about a particular aspect of Spot. In. Proc. of TACAS'22, LNCS 13244, pp. 99--117, Apr 2022. ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#casares.22.tacas][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/casares.22.tacas.pdf][pdf]] | [[https://www.lrde.epita.fr/~adl/dl/adl/casares.22.tacas.slides.pdf][slides1]] | [[https://www.lrde.epita.fr/~adl/dl/adl/casares.22.tacas.slides2.pdf][slides2]]) + Shows applications of the ACD implemented in Spot. + +- *Dissecting ltlsynt*, + /Florian Renkin/, /Philipp Schlehuber-Caissier/, /Alexandre Duret-Lutz/, + and Adrien Pommellet. + In Formal Methods in System Design, 2023. ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#renkin.23.scp][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/renkin.23.scp.pdf][pdf]]) + + Discuss the implementation of [[file:ltlsynt.org][=ltlsynt=]]. + * Obsolete references - *Spot 2.0 — a framework for LTL and ω-automata manipulation*, @@ -111,6 +114,14 @@ be more specific about a particular aspect of Spot. For a while, this used to be the only paper presenting Spot as a model-checking library. +- *Reactive Synthesis from LTL Specification with Spot*, + /Thibaud Michaud/, /Maximilien Colange/. + In Proc. of SYNT@CAV'18. ([[https://www.lrde.epita.fr/~max/bibtexbrowser.php?key=michaud.18.synt&bib=perso.bib][bib]] | [[https://www.lrde.epita.fr/dload/papers/michaud.18.synt.pdf][pdf]]) + + Was the first presentation of the tool [[file:ltlsynt.org][=ltlsynt=]]. The paper + *Dissecting ltlsynt*, mentioned earlier, is more up-to-date. + + # LocalWords: utf html Alexandre Duret Lutz Lewkowicz Amaury Xu pdf # LocalWords: Fauchille Thibaud Michaud Etienne Proc ATVA LNCS TGBA # LocalWords: ltlfilt randltl ltlcross tgba Eddine Fabrice Kordon diff --git a/doc/org/compile.org b/doc/org/compile.org index 6c2f8e6c6..69bab0a72 100644 --- a/doc/org/compile.org +++ b/doc/org/compile.org @@ -49,7 +49,7 @@ might need to tell the compiler several things: search path with =-I location=. 3. The linker should be able to find the Spot library (on Linux it would be called =libspot.so=, unless you forced a static compilation, in which - case it would be =libspot.a=). This might require appending another + case it would be =libspot.a=). This might require appending another directory to the library search path with =-L location= in addition to passing the =-lspot= option. @@ -120,7 +120,7 @@ to tell the dynamic loader about this location. * Case 3: You compiled Spot yourself, and installed it in a custom directory -For instance you might have used +For instance, you might have used #+BEGIN_SRC sh ./configure --prefix ~/usr make @@ -147,7 +147,7 @@ it every time you want to run a binary that depends on Spot. * Case 4: You compiled Spot yourself, but did not install it We do not recommend this, but it is possible to compile programs -that uses an uninstalled version of Spot. +that use an uninstalled version of Spot. So you would just compile Spot in some directory (let's call it =/dir/spot-X.Y/=) with @@ -164,7 +164,7 @@ There are at least two traps with this scenario: =/usr/local/include/spot/= using the same layout, but it also includes some private, internal headers. These headers are normally not installed, so in the other scenarios you cannot use - them. In this setup however, you might use them by mistake. Also + them. In this setup however, you might use them by mistake. Also, that directory contains =*.cc= files implementing all the features of the library. Clearly those file should be considered private as well. @@ -192,7 +192,7 @@ Using =libtool link g++= instead of =g++= will cause =libtool= to edit the =g++= command line, and replace =/dir/spot-X.Y/spot/libspot.la= by whatever options are needed to link against the library represented by this /Libtool -archive/. Furthermore the resulting =hello= executable will not be a +archive/. Furthermore, the resulting =hello= executable will not be a binary, but a shell script that defines some necessary environment variables (like =LD_LIBRARY_PATH= to make sure the Spot library is found) before running the actual binary. @@ -215,7 +215,7 @@ will need to add =-pthread= to the compiler flags. In the fourth case where =libtool= is used to link against =libspot.la= linking against =libbddx.la= should not be necessary because -Libtool already handles such dependencies. However the version of =libtool= +Libtool already handles such dependencies. However, the version of =libtool= distributed with Debian is patched to ignore those dependencies, so in this case you have to list all dependencies. @@ -237,8 +237,8 @@ will turn on assertions, and debugging options, while #+END_SRC will disable assertions and enable more optimizations. -If you are writing programs against Spot, we recommend to compile Spot -with =--enable-devel= while your are developing your programs (the +If you are writing programs against Spot, we recommend compiling Spot +with =--enable-devel= while you are developing your programs (the assertions in Spot can be useful to diagnose problems in your program, or in Spot), and then use =--disable-devel= once you are confident and desire speed. diff --git a/doc/org/concepts.org b/doc/org/concepts.org index 7dca74b54..c4de7a324 100644 --- a/doc/org/concepts.org +++ b/doc/org/concepts.org @@ -93,7 +93,7 @@ to write that word using [[https://en.wikipedia.org/wiki/Canonical_normal_form#M An ω-automaton is used to represent sets of ω-word. Those look like the classical [[https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton][Nondeterministic Finite Automata]] in the -sense that they also have states and transitions. However ω-automata +sense that they also have states and transitions. However, ω-automata recognize [[#word][ω-words]] instead of finite words. In this context, the notion of /final state/ makes no sense, and is replaced by the notion of [[#acceptance-condition][acceptance condition]]: a run of the automaton (i.e., an infinite @@ -108,7 +108,7 @@ are compatible with the minterms used as letters in the word. The /language/ of an ω-automaton is the set of ω-words it accepts. -There are many kinds of ω-Automata and they mostly differ by their +There are many kinds of ω-Automata, and they mostly differ by their [[#acceptance-condition][acceptance condition]]. The different types of acceptance condition, and whether the automata are deterministic or not can affect their expressive power. @@ -144,7 +144,6 @@ $txt The above automaton would accept the [[#word][ω-word we used previously as an example]]. - As a more concrete example, here is a (complete) Büchi automaton for the [[#ltl][LTL formula]] =G(door_open -> light_on)= that specifies that =light_on= should be true whenever =door_open= is true. @@ -260,15 +259,15 @@ and as many transitions. Spot has some function to merge those "parallel transitions" into larger edges. Limiting the number of edges helps most of the -algorithms that have to explore automata, since they have less +algorithms that have to explore automata, since they have fewer successors to consider. The distinction between *edge* and *transition* is something we try -maintain in the various interfaces of Spot. For instance the +to maintain in the various interfaces of Spot. For instance the [[file:oaut.org::#stats][=--stats= option]] has =%e= or =%t= to count either edges or transitions. The method used to add new edge into an automaton is called =new_edge(...)=, not =new_transition(...)=, because it takes a -[[#bdd][BDD]] (representing a Boolean formula) as label. However that naming +[[#bdd][BDD]] (representing a Boolean formula) as label. However, that naming convention is recent in the history of Spot. Spot versions up to 1.2.6 used to call everything /transition/ (and what we now call /transition/ was sometime called /sub-transition/), and traces of this @@ -396,7 +395,7 @@ $txt #+RESULTS: [[file:concept-tgba1.svg]] -This automaton accept all ω-words that infinitely often match the +This automaton accepts all ω-words that infinitely often match the pattern $a^+;b$ (that is: a positive number of letters where $a$ is true are followed by one letter where $b$ is true). @@ -477,8 +476,8 @@ following formula: =(Fin(0)&Inf(1)) | (Fin(2)&Inf(3)) | The following table gives an overview of how some classical acceptance condition are encoded. The first column gives a name that is more -human readable (those names are defined in the [[#hoa][HOA]] format and are also -recognized by Spot). The second column give the encoding as a +human-readable (those names are defined in the [[#hoa][HOA]] format and are also +recognized by Spot). The second column gives the encoding as a formula. Everything here is case-sensitive. #+BEGIN_SRC python :results verbatim raw :exports results @@ -880,9 +879,9 @@ $txt Since this file format is the only one able to represent the range of -ω-automata supported by Spot, and it its default output format. +ω-automata supported by Spot, and it is its default output format. -However note that Spot does not support all automata that can be +However, note that Spot does not support all automata that can be expressed using the HOA format. The present support for the HOA format in Spot, is discussed on [[file:hoa.org][a separate page]], with a section dedicated to the [[file:hoa.org::#restrictions][restrictions]]. @@ -961,7 +960,7 @@ For example the formula ={(1;1)[*]}[]->a= can be interpreted as follows: - the part =...[]->a= requests that =a= should be true at the end of each matched prefix. -Therefore this formula ensures that =a= is true at every even instant +Therefore, this formula ensures that =a= is true at every even instant (if we consider the first instant to be odd). This is the canonical example of formula that can be expressed in PSL but not in LTL. @@ -1018,7 +1017,7 @@ layers. generate families of automata, useful for benchmarking and testing - all the supplied [[file:tools.org][command-line tools]] distributed with Spot are built upon the =libspot= or =libspotgen= libraries - - =libspotltsmin= is a library that helps interfacing Spot with + - =libspotltsmin= is a library that helps to interface Spot with dynamic libraries that [[http://fmt.cs.utwente.nl/tools/ltsmin/][LTSmin]] uses to represent state-spaces. It currently supports libraries generated from Promela models using SpinS or a patched version of DiVinE, but you have to install @@ -1067,7 +1066,7 @@ automaton, and that can be queried or set by algorithms: | =complete= | for any letter ℓ, each state has is at least one outgoing transition compatible with ℓ | | =universal= | there is at most one run *recognizing* each word, but not necessarily accepting it | | =semi_deterministic= | any nondeterminism occurs before entering an accepting SCC | -| =unambiguous= | there is at most one run *accepting* each word (but it might be recognized several time) | +| =unambiguous= | there is at most one run *accepting* each word (but it might be recognized several times) | | =stutter_invariant= | the property recognized by the automaton is [[https://www.lrde.epita.fr/~adl/dl/adl/michaud.15.spin.pdf][stutter-invariant]] | For each flag =flagname=, the =twa= class has a method @@ -1097,7 +1096,7 @@ existential. These automata properties are encoded into the [[file:hoa.org::#property-bits][HOA format]], so they can be preserved when building a processing pipeline using the shell. -However the HOA format has support for more properties that do not +However, the HOA format has support for more properties that do not correspond to any =twa= flag. * Named properties for automata @@ -1125,7 +1124,7 @@ Here is a list of named properties currently used inside Spot: | ~degen-levels~ | ~std::vector~ | level associated to each state by the degeneralization algorithm | | ~highlight-edges~ | ~std::map~ | map of (edge number, color number) for highlighting the output | | ~highlight-states~ | ~std::map~ | map of (state number, color number) for highlighting the output | -| ~incomplete-states~ | ~std::set~ | set of states numbers that should be displayed as incomplete (used internally by ~print_dot()~ when truncating large automata) | +| ~incomplete-states~ | ~std::set~ | set of states numbers that should be displayed as incomplete (used internally by ~print_dot()~ when truncating large automata) | | ~original-classes~ | ~std::vector~ | class number associated to each state of a construction (used by some algorithms like =tgba_deternize()=) | | ~original-clauses~ | ~std::vector~ | original DNF clause associated to each state in automata created by =dnf_to_streett()= | | ~original-states~ | ~std::vector~ | original state number before transformation (used by some algorithms like =degeneralize()=) | diff --git a/doc/org/csv.org b/doc/org/csv.org index 33830f563..ffe8ab8ce 100644 --- a/doc/org/csv.org +++ b/doc/org/csv.org @@ -6,7 +6,7 @@ #+PROPERTY: header-args:sh :results verbatim :exports both This page discusses features available in Spot's command-line -tools to produce an consume CSV files. +tools to produce and consume CSV files. * Producing CSV files @@ -176,7 +176,7 @@ Note that if the =--format= option is not specified, the default format is one of: =%f=, =%<,%f=, =%f,%>=, or =%<,%f,%>= depending on whether the input CSV had column before and after the selected one. Furthermore, the formula field is automatically double-quoted if the -formula actually use double quotes, and the input CSV file had more +formula actually uses double quotes, and the input CSV file had more than one column. Typical uses of =ltlfilt= on CSV file include: @@ -251,7 +251,7 @@ cat csv-aut.csv Note that when producing CSV files, it is important to surround =%h= with double quotes to indicate that double quotes from the HOA format -(output by =%h=) should be escaped. Otherwise the result would not be +(output by =%h=) should be escaped. Otherwise, the result would not be a valid CSV file. [[file:autfilt.org][=autfilt=]] can process a column of such a CSV file using the same diff --git a/doc/org/dstar2tgba.org b/doc/org/dstar2tgba.org index 00fae5f20..5233d68c3 100644 --- a/doc/org/dstar2tgba.org +++ b/doc/org/dstar2tgba.org @@ -7,19 +7,18 @@ This tool converts automata into transition-based generalized Büchi automata, a.k.a., TGBA. It can also produce Büchi automata on request -(=-B=). It's usage is almost similar to [[file:ltl2tgba.org][=ltl2tgba=]] except that +(=-B=). Its usage is almost similar to [[file:ltl2tgba.org][=ltl2tgba=]] except that instead of supplying a formula to translate, you should specify a filename containing the automaton to convert. In earlier version (before Spot 1.99.4) =dstar2tgba= was only able to -read automata written in [[http://www.ltl2dstar.de/docs/ltl2dstar.html][the format output by =ltl2dstar=]]. However -nowadays it can read automata in any of the supported formats ([[file:hoa.org][HOA]], -LBTT's format, ltl2dstar's format, and never claims). Also -=dstar2tgba= used to be the only tool being able to read ltl2dstar's -format, but today this format can also be read by any of the tool that -read automata. So in practice, running =dstar2tgba some files...= -produces the same result as running =autfilt --tgba --high --small -some files...=. +read automata written in [[http://www.ltl2dstar.de/docs/ltl2dstar.html][the format output by =ltl2dstar=]]. Nowadays, +it can read automata in any of the supported formats ([[file:hoa.org][HOA]], LBTT's +format, ltl2dstar's format, and never claims). Also, =dstar2tgba= used +to be the only tool being able to read ltl2dstar's format, but today +this format can also be read by any of the tool that read automata. +So in practice, running =dstar2tgba some files...= produces the same +result as running =autfilt --tgba --high --small some files...=. * Two quick examples @@ -33,7 +32,7 @@ The following command instructs =ltl2dstar= to: 1. run =ltl2tgba -Ds= to build a Büchi automaton for =(a U b) & GFb=, and then 2. convert that Büchi automaton into a deterministic Rabin automaton (DRA) stored in =fagfb=. -Additionally we use =ltlfilt= to convert our formula to the +Additionally, we use =ltlfilt= to convert our formula to the prefix format used by =ltl2dstar=. #+BEGIN_SRC sh :results silent @@ -247,23 +246,33 @@ dstar2tgba --help | sed -n '/Output format:/,/^$/p' | sed '1d;$d' --lbtt or --spin) --check[=PROP] test for the additional property PROP and output the result in the HOA format (implies -H). PROP - may be any prefix of 'all' (default), - 'unambiguous', 'stutter-invariant', or 'strength'. - - -d, --dot[=1|a|b|B|c|e|f(FONT)|h|n|N|o|r|R|s|t|v|+INT] + may be some prefix of 'all' (default), + 'unambiguous', 'stutter-invariant', + 'stutter-sensitive-example', 'semi-determinism', + or 'strength'. + -d, --dot[=1|a|A|b|B|c|C(COLOR)|e|E|f(FONT)|h|i(ID)|k|K|n|N|o|r|R|s|t|u|v|y|+INT|BA @@ -346,7 +356,7 @@ An important point you should be aware of when comparing these numbers of states is that the deterministic automata produced by =ltl2dstar= are complete, while the automata produced by =dstar2tgba= (deterministic or not) are not complete by default. This can explain -a difference of one state (the so called "sink" state). +a difference of one state (the so-called "sink" state). You can instruct =dstar2tgba= to output a complete automaton using the =--complete= option (or =-C= for short). @@ -380,7 +390,7 @@ create one new Fin-accepting set for each conjunct of the CNF. The combination of these two algorithms is implemented by the =to_generalized_buchi()= function in Spot. -Finally a TGBA can easily be converted into a BA with classical +Finally, a TGBA can easily be converted into a BA with classical degeneralization algorithms (our version of that includes several SCC-based optimizations described in our [[https://www.lrde.epita.fr/~adl/dl/adl/babiak.13.spin.pdf][SPIN'13 paper]]). @@ -396,7 +406,7 @@ to Rabin by adding some extra Fin or Inf terms to the acceptance conditions and ensuring that those terms are always true. The conversion implemented is a variation of Krishnan et al.'s -"Deterministic ω-Automata vis-a-vis Deterministic Büchi Automata" +[[https://doi.org/10.1007/3-540-58325-4_202]["Deterministic ω-Automata vis-a-vis Deterministic Büchi Automata"]] (ISAAC'94) paper. They explain how to convert a deterministic Rabin automaton (DRA) into a deterministic Büchi automaton (DBA) when such an automaton exist. The surprising result is that when a DRA is @@ -408,8 +418,8 @@ SCC-wise: any DRA will be converted into a BA, and the determinism will be conserved only for strongly connected components where determinism can be conserved. (If some SCC is not DBA-realizable, it will be cloned into several deterministic SCC, but the jumps between -these SCCs will be nondeterministic.) Our implementation also work on -automata with transition-based acceptance. +these SCCs will be nondeterministic.) Our implementation also works +on automata with transition-based acceptance. This specialized conversion is built in the =remove_fin()= procedure described above. diff --git a/doc/org/genaut.org b/doc/org/genaut.org index 2acc22ecb..7de898d5c 100644 --- a/doc/org/genaut.org +++ b/doc/org/genaut.org @@ -16,6 +16,10 @@ genaut --help | sed -n '/Pattern selection:/,/^$/p' | sed '1d;$d' #+RESULTS: #+begin_example + --cyclist-proof-dba=RANGE A DBA with N+2 states that should be included + in cyclist-trace-nba=B. + --cyclist-trace-nba=RANGE An NBA with N+2 states that should include + cyclist-proof-dba=B. --ks-nca=RANGE A co-Büchi automaton with 2N+1 states for which any equivalent deterministic co-Büchi automaton has at least 2^N/(2N+1) states. @@ -26,7 +30,7 @@ genaut --help | sed -n '/Pattern selection:/,/^$/p' | sed '1d;$d' complementary Streett automaton needs at least N! states. --m-nba=RANGE An NBA with N+1 states whose determinization needs - at least N! states + at least N! states. #+end_example diff --git a/doc/org/hierarchy.org b/doc/org/hierarchy.org index 3b079c4b4..f8975cb5d 100644 --- a/doc/org/hierarchy.org +++ b/doc/org/hierarchy.org @@ -73,7 +73,7 @@ makes it possible to express constraints on finite prefixes. and /guarantee/ properties, while /reactivity/ properties are Boolean combinations of /recurrence/ and /persistence/ properties. The negation of a /safety/ property is a /guarantee/ property (and -vice-versa), and the same duality hold for /persistence/ and +vice versa), and the same duality hold for /persistence/ and /recurrence/ properties. The red letters in each of these seven classes are keys used in @@ -176,13 +176,13 @@ G({[*]}[]-> Fa) #+end_example Note that the order of the =ltlfilt= filters could be changed provided -the =-n10= stays at the end. For instance we could first keep all +the =-n10= stays at the end. For instance, we could first keep all recurrence before removing obligations and then removing LTL formulas. The order given above actually starts with the easier checks first and -keep the most complex tests at the end of the pipeline so they are +keep the most complex tests at the end of the pipeline, so they are applied to fewer formulas. Testing whether a formula is an LTL formula is very cheap, testing if a formula is an obligation is harder -(it may involves a translation to automata and a powerset +(it may involve a translation to automata and a powerset construction), and testing whether a formula is a recurrence is the most costly procedure (it involves a translation as well, plus conversion to deterministic Rabin automata, and an attempt to convert @@ -225,10 +225,10 @@ is not in class syntactic-C (we just know that some equivalent formula is in class syntactic-C). =ltlfilt= has options like =--syntactic-guarantee=, -=--syntactic-persistence=, etc. to match formulas from this classes. +=--syntactic-persistence=, etc. to match formulas from these classes. Here is how to generate 10 random LTL formulas that describe safety -properties but that are not in the syntactic-safety class: +properties, but that are not in the syntactic-safety class: #+BEGIN_SRC sh randltl -n-1 a b | @@ -262,7 +262,7 @@ ltlfilt --simplify -f 'b M Gb' #+RESULTS: : Gb -However in the general case Spot is not able to provide the equivalent +However, in the general case Spot is not able to provide the equivalent formula from the appropriate syntactic class. * What to do with each class? @@ -318,9 +318,9 @@ $txt Note that the default translation used by =ltl2tgba= will turn any syntactic persistence formulas (this includes obligations formulas) into a weak automaton. In a weak automaton, the acceptance condition -could be defined in term of SCCs, i.e., the cycles of some SCCs are -either all accepting, or all rejecting. As a consequence, it there is -no incentive to use transition-based acceptance; instead, state-based +could be defined in terms of SCCs, i.e., the cycles of some SCCs are +either all accepting, or all rejecting. As a consequence, if there is +no incentive to use transition-based acceptance, state-based acceptance is output by default. With =ltl2tgba -D= we get a (minimal) deterministic weak Büchi @@ -437,7 +437,7 @@ already be =t= (meaning that all runs are accepting). However since the translator does not do anything particular about safety formulas, it is possible to find some pathological formulas for which the translator outputs a non-deterministic Büchi automaton where not all -run are accepting. +runs are accepting. Here is an example: @@ -622,7 +622,7 @@ requirement. ** Persistence -Since /persistence/ properties are outside of the /recurrence/ class, +Since /persistence/ properties are outside the /recurrence/ class, they cannot be represented by deterministic Büchi automata. The typical persistence formula is =FGa=, and using =-D= on this is hopeless. @@ -665,7 +665,7 @@ return an automaton whose acceptance is one of =Fin(0)=, =t=, or =f=. The translator is aware of that, so when it detects that the input formula is a syntactic-persistence, it simplifies its translation slightly to ensure that the output will use at most one acceptance -set. (It is possible to define a persistence properties using an LTL +set. (It is possible to define a persistence property using an LTL formula that is not a syntactic-persistence, in that case this optimization is simply not applied.) diff --git a/doc/org/install.org b/doc/org/install.org index b65c02074..0a08677e1 100644 --- a/doc/org/install.org +++ b/doc/org/install.org @@ -133,14 +133,14 @@ sudo dnf install spot python3-spot spot-devel spot-doc or a subset of those packages. The package =spot= contains the command-line tools, =python3-spot= contains the Python bindings, =spot-devel= contains the C++ header files, and =spot-doc= the -documentation that you can also find online. Those packages depends +documentation that you can also find online. Those packages depend on =libspot= that contains the shared libraries. * Installing as a Conda package Spot is available as a [[https://anaconda.org/conda-forge/spot][Conda-forge package]] for Linux and OS X. - A typical installation would go as follow: + A typical installation would go as follows: #+BEGIN_SRC sh conda create --name myenv python=3.8 # adjust as desired @@ -150,7 +150,7 @@ on =libspot= that contains the shared libraries. Note that this package is built automatically by the conda-forge infrastructure, but this requires some manual trigger after each - release. Therefore there might be a delay between the moment a + release. Therefore, there might be a delay between the moment a release of Spot is announced, and the availability of the Conda package. diff --git a/doc/org/ioltl.org b/doc/org/ioltl.org index 0f6193a3f..24ef72549 100644 --- a/doc/org/ioltl.org +++ b/doc/org/ioltl.org @@ -165,7 +165,7 @@ The following operators are supported: As an extension to LBT's syntax, alphanumeric atomic propositions that follow the "=p= + number" rule will be accepted if they do not conflict with one of the operators (e.g., =i=, the /implies/ operator, -cannot be used as an atomic proposition). Also any atomic proposition +cannot be used as an atomic proposition). Also, any atomic proposition may be double-quoted. These extensions are compatible with the syntax used by [[http://www.ltl2dstar.de][ltl2dstar]]. @@ -224,7 +224,7 @@ that case discussing associativity and parentheses makes no sense. The =--csv= causes the formulas to be double-quoted (with inner double-quotes doubled, as per RFC 4180), regardless of the selected format. This is needed if the formula should appear in a CSV file, -and you want to be robust to formulas that contains commas or +and you want to be robust to formulas that contain commas or double-quotes. We have [[file:csv.org][examples of reading or writing CSV files on a separate page]]. @@ -244,7 +244,7 @@ Other =%=-sequences are supported by these tools, and documented in the output of =--help=. For instance =%s= can be used to compute the size of a formula. -By default everything is output to standard output, so that you can +By default, everything is output to standard output, so that you can redirect the output to a file, and pipe it to another tool. The =--output= (or =-o=) allows you to construct a filename using some of the above =%=-sequences. @@ -267,7 +267,7 @@ wc -l example-*.ltl Option =-0= is useful if the list of formulas is passed to =xargs=. =xargs= normally splits its input on white space (which are frequent in LTL formulas), but you can use =xargs -0= to split the input on -null characters. So for instance the following two invocations have +null characters. So for instance the following two invocations have nearly the same output: #+BEGIN_SRC sh diff --git a/doc/org/ltl2tgba.org b/doc/org/ltl2tgba.org index f0c7e4231..d35ef98ff 100644 --- a/doc/org/ltl2tgba.org +++ b/doc/org/ltl2tgba.org @@ -32,7 +32,7 @@ less frequent. Except for =-B= which forces state-based acceptance, these options build transition-based automata by default. Internally, Spot only supports transition-based automata. However, while transition-based -automata can be smaller then their state-based counterpart, there are +automata can be smaller than their state-based counterpart, there are many cases where transition-based acceptance does not bring any benefits. In case where it is detected that the transition-based automaton looks @@ -105,7 +105,7 @@ $txt Characters like ⓿, ❶, etc. denotes the acceptance sets a transition belongs to. In this case, there is only one acceptance set, called -=0=, containing a single transition. An acceptance set can contains +=0=, containing a single transition. An acceptance set can contain multiple transitions, and a transition may also belong to multiple acceptance sets. An infinite path through this automaton is accepting iff it visits each acceptance set infinitely often. Therefore, in the @@ -238,7 +238,7 @@ ltl2tgba --help | sed -n '/Output format:/,/^$/p' | sed '1d;$d' 'unambiguous', 'stutter-invariant', 'stutter-sensitive-example', 'semi-determinism', or 'strength'. - -d, --dot[=1|a|A|b|B|c|C(COLOR)|e|E|f(FONT)|h|k|K|n|N|o|r|R|s|t|u|v|y|+INT|%O=' will produce deterministic automata for all obligation properties and many recurrence properties. Using @@ -312,7 +312,7 @@ positive and negative formulas by the ith translator). - Cross-comparison checks: for some state-space $S$, all $P_i\otimes S$ are either all empty, or all non-empty. - Similarly all $N_i\otimes S$ are either all empty, or all non-empty. + Similarly, all $N_i\otimes S$ are either all empty, or all non-empty. A cross-comparison failure could be displayed as: @@ -328,7 +328,7 @@ positive and negative formulas by the ith translator). These products tests may sometime catch errors that were not captured by the first two tests if one non-deterministic automaton - recognize less words than what it should. If the input automata + recognize fewer words than what it should. If the input automata are all deterministic or the =--determinize= option is used, this test is redundant and can be disabled. (In fact, the =--determinize= option implies option =--product=0= to do so.) @@ -349,7 +349,7 @@ positive and negative formulas by the ith translator). printed. This test may catch errors that were not captured by the first two - tests if one non-deterministic automaton recognize less words than + tests if one non-deterministic automaton recognize fewer words than what it should. If the input automata are deterministic or the =--determinize= option is used, this test is redundant and can be disabled. (In fact, the =--determinize= option implies option @@ -569,7 +569,7 @@ since most statistics cannot be computed without an automaton... Those lines with missing data can be omitted with the =--omit-missing= option (this used to be the default up to Spot 1.2). -However data for bogus automata are still included: as shown below +However, data for bogus automata are still included: as shown below =ltlcross= will report inconsistencies between automata as errors, but it does not try to guess who is incorrect. @@ -579,9 +579,9 @@ The number of column output in the CSV or JSON outputs depend on the options passed to =ltlcross=. Additional columns will be output if =--strength=, =--ambiguous=, =--automata=, or =--product=+N= are used. -Columns =formula= and =tool= contain the formula translated and the +Columns =formula= and =tool= contain the formula translated, and the command run to translate it. In the CSV, these columns contain the -actual text. In the JSON output, these column contains an index into +actual text. In the JSON output, these columns contain an index into the =formula= and =tool= table declared separately. =exit_status= and =exit_code= are used to indicate if the translator @@ -634,7 +634,7 @@ These SCC strengths can be used to compute the strength of the automaton as a whole: - an automaton is terminal if it contains only non-accepting or terminal SCCs, -- an automaton is weak if it it contains only non-accepting, +- an automaton is weak if it contains only non-accepting, terminal, or weak SCCs, - an automaton is strong if it contains at least one strong SCC. @@ -645,7 +645,7 @@ usually prefer terminal automata over weak automata, and weak automata over strong automata, because the emptiness check of terminal (and weak) automata is easier. When working with alternating automata, all those strength-related columns will be empty, because the routines -used to compute those statistic do not yet support universal edges. +used to compute those statistics do not yet support universal edges. =nondetstates= counts the number of non-deterministic states in the automaton. =nondeterministic= is a Boolean value indicating if the @@ -669,7 +669,7 @@ count the number of state, transitions and strongly-connect components in the product that has been built between the translated automaton and a random model. For a given formula, the same random model is of course used against the automata translated by all tools. Comparing -the size of these product might give another indication of the +the size of these products might give another indication of the "conciseness" of a translated automaton. There is of course a certain "luck factor" in the size of the product. @@ -868,7 +868,7 @@ Classes ‘data.table’ and 'data.frame': 20 obs. of 16 variables: - attr(*, ".internal.selfref")= #+end_example -Currently the data frame shows one line per couple (formula, tool). +Currently, the data frame shows one line per couple (formula, tool). This makes comparing tools quite difficult, as their results are on different lines. @@ -952,8 +952,8 @@ ggplot(dt2, aes(x=states.small, y=states.deter)) + ** =--stop-on-error= The =--stop-on-error= option will cause =ltlcross= to abort on the -first detected error. This include failure to start some translator, -read its output, or failure to passe the sanity checks. Timeouts are +first detected error. This includes failure to start some translator, +read its output, or failure to pass the sanity checks. Timeouts are allowed unless =--fail-on-time= is also given. One use for this option is when =ltlcross= is used in combination with @@ -1003,7 +1003,7 @@ Here is the procedure used: them by length (as [[file:ltlgrind.org][=ltlgrind --sort=]] would do) - process every mutation until one is found that exhibit the bug - repeat the process with this new formula, and again until a formula - is found for which no mutation exhibit the bug + is found for which no mutation exhibits the bug - output that last formula in =FILENAME= If =--save-bogus=OTHERFILENAME= is provided, every bogus formula found @@ -1172,7 +1172,7 @@ The =--no-check= option disables all sanity checks, and only use the supplied formulas in their positive form. When checks are enabled, the negated formulas are intermixed with the -positives ones in the results. Therefore the =--no-check= option can +positives ones in the results. Therefore, the =--no-check= option can be used to gather statistics about a specific set of formulas. ** =--verbose= @@ -1183,7 +1183,7 @@ be used to gather statistics about a specific set of formulas. The verbose option can be useful to troubleshoot problems or simply follow the list of transformations and tests performed by =ltlcross=. -For instance here is what happens if we try to cross check =ltl2tgba= +For instance here is what happens if we try to cross-check =ltl2tgba= and =ltl3ba -H1= on the formula =FGa=. Note that =ltl2tgba= will produce transition-based generalized Büchi automata, while =ltl3ba -H1= produces co-Büchi alternating automata. @@ -1231,8 +1231,8 @@ First =FGa= and its negations =!FGa= are translated with the two tools, resulting in four automata: two positive automata =P0= and =P1= for =FGa=, and two negative automata =N0= and =N1=. -Some basic information about the collected automata are displayed. -For instance we can see that although =ltl3ba -H1= outputs co-Büchi +Some basic information about the collected automata is displayed. +For instance, we can see that although =ltl3ba -H1= outputs co-Büchi alternating automata, only automaton =N1= uses universal edges: the automaton =P1= can be used like a non-alternating co-Büchi automaton. @@ -1250,9 +1250,9 @@ rewriting them to get rid of any =Fin= acceptance. After this preparatory work, it is time to actually compare these automata. Together, the tests =P0*N0= and =Comp(N0)*Comp(P0)= ensure -that the automaton =N0= is really the complement of =P0=. Similarly +that the automaton =N0= is really the complement of =P0=. Similarly, =P1*N1= and =Comp(N1)*Comp(P1)= ensure that =N1= is the complement of -=P1=. Finally =P0*N1= and =P1*N0= ensure that =P1= is equivalent to +=P1=. Finally, =P0*N1= and =P1*N0= ensure that =P1= is equivalent to =P0= and =N1= is equivalent to =N0=. Note that if we reduce =ltlcross='s ability to determinize @@ -1377,7 +1377,7 @@ No problem detected. :END: The =ltlcross= command itself has no built-in support for - parallelization (patches welcome). However its interface makes it + parallelization (patches welcome). However, its interface makes it rather easy to parallelize =ltlcross= runs with third-party tools such as: @@ -1405,7 +1405,7 @@ No problem detected. with 8 processes in parallel. Here =ltlcross= is called with option =-q= to silence most its regular output as the 8 instances of =ltlcross= would be otherwise writing to the same terminal. - With =-q=, only errors are displayed. Additionally =--save-bogus= + With =-q=, only errors are displayed. Additionally, =--save-bogus= is used to keep track of all formulas causing errors. The =>>bugs.ltl= syntax means to open =bugs.ltl= in append mode, so that =bugs.ltl= does not get overwritten each time a new =ltlcross= instance finds a bug. diff --git a/doc/org/ltldo.org b/doc/org/ltldo.org index b46932c8b..71a39c994 100644 --- a/doc/org/ltldo.org +++ b/doc/org/ltldo.org @@ -22,7 +22,7 @@ any other tool. As a motivating example, consider a scenario where we want to run [[https://sourceforge.net/projects/ltl3ba/][=ltl3ba=]] on a set of 10 formulas stored in a file. For each formula -we would like to compute compute the number of states and edges in the +we would like to compute the number of states and edges in the Büchi automaton produced by =ltl3ba=. Here is the input file: @@ -192,7 +192,7 @@ ltldo --help | sed -n '/character sequences:/,/^$/p' | sed '1d;$d' : ltl2dstar's format Contrarily to =ltlcross=, it this not mandatory to specify an output -filename using one of the sequence for that last line. For instance +filename using one of the sequence for that last line. For instance, we could simply run a formula though =echo= to compare different output syntaxes: @@ -328,7 +328,7 @@ will be changed into '{DRA} ~/mytools/ltl2dstar-0.5.2 --output-format=hoa %[MW]L %O' #+end_example -Therefore you can type the following to obtain a Dot output (as +Therefore, you can type the following to obtain a Dot output (as requested with =-d=) for the neverclaim produced by =ltl2ba -f a=. #+BEGIN_SRC sh :prologue export SPOT_DOTEXTRA= SPOT_DOTDEFAULT= @@ -354,7 +354,7 @@ The =ltl2ba= argument passed to =ltldo= was interpreted as if you had typed ={ltl2ba}ltl2ba -f %s>%O=. Those shorthand patterns are only tested if the command string does -not contains any =%= character. They should always patch a prefix of +not contain any =%= character. They should always patch a prefix of the command, ignoring any leading directory. This makes it possible to add options: @@ -415,7 +415,7 @@ syntax, but cannot cope with double-quoted atomic propositions). There are some cases where the renaming is not completely transparent. For instance if a translator tool outputs some HOA file named after the formula translated, the name will be output unmodified (since this -can be any text string, there is not way for =ltldo= to assume it is +can be any text string, there is no way for =ltldo= to assume it is an LTL formula). In the following example, you can see that the automaton uses the atomic proposition =Error=, but its name contains a reference to =p0=. @@ -518,9 +518,9 @@ The sorting criterion can be specified using =--smallest= or =--greatest=, optionally followed by a format string with =%=-sequences. The default criterion is =%s,%e=, so the number of states will be compared first, and in case of equality the number of -edges. If we desire the automaton that has the fewest states, and in -case of equality the smallest number of non-deterministic states, we -can use the following command instead. +edges. If we desire the automaton that has the fewest states (=%s=), +and in case of equality the smallest number of non-deterministic +states (=%n=), we can use the following command instead. #+BEGIN_SRC sh ltldo ltl2ba ltl3ba 'ltl2tgba -s' -f 'F(a & Xa | FGa)' --smallest=%s,%n @@ -549,20 +549,19 @@ State: 2 {0} --END-- #+end_example -We can of course apply this on a large number of formulas. For -instance here is a more complex pipeline, where we take 11 patterns -from Dwyer et al. (FMSP'98), and print which translator among -=ltl2ba=, =ltl3ba=, and =ltl2tgba -s= would produce the smallest -automaton. +We can of course apply this to a stream of formulas. For instance +here is a more complex pipeline, where we take 11 patterns from [[https://doi.org/10.1145/302405.302672][Dwyer +et al. (FMSP'98)]], and print which translator among =ltl2ba=, +=ltl3ba=, and =ltl2tgba -s= would produce the smallest automaton. #+BEGIN_SRC sh -genltl --dac=10..20 --format=%F:%L,%f | + genltl --dac=10..20 --format=%F:%L,%f | ltldo -F-/2 ltl2ba ltl3ba 'ltl2tgba -s' --smallest --stats='%<,%T' #+END_SRC #+RESULTS: #+begin_example dac-patterns:10,ltl2ba -dac-patterns:11,ltl3ba +dac-patterns:11,ltl2ba dac-patterns:12,ltl2tgba -s dac-patterns:13,ltl2tgba -s dac-patterns:14,ltl2tgba -s @@ -603,22 +602,22 @@ dac-patterns:20,G((p0 & !p1) -> (p2 W p1)) This is a two-column CSV file where each line is a description of the origin of the formula (=%F:%L=), followed by the formula itself (=%f=). The =ltldo= from the previous pipeline simply takes its input -from the second column of its standard input (=-F-/2=), run that -formula through the three translator, pick the smallest automaton -(=--smallest=), and for this automaton, it display the translator that +from the second column of its standard input (=-F-/2=), runs that +formula through the three translators, picks the smallest automaton +(=--smallest=), and for this automaton, it displays the translator that was used (=%T=) along with the portion of the CSV file that was before the input column (=%<=). -If you are curious about the actually size of the automata produced by +If you are curious about the actual size of the automata produced by =ltl2ba=, =ltl3ba=, and =ltl2tgba -s= in the above example, you can quickly build a CSV file using the following pipeline where each -command append a new column. We wrap =ltl2ba= and =ltl3ba= with +command appends a new column. We wrap =ltl2ba= and =ltl3ba= with =ltldo= so that they can process one column of the CSV that is input, and output statistics in CSV as output. =ltl2tgba= does not need that, as it already supports those features. In the resulting CSV file, displayed as a table below, entries like =2s 4e 0d= represent an -automaton with 2 states, 4 edges, and that is not deterministic. . +automaton with 2 states, 4 edges, and that is not deterministic. (We have a [[file:csv.org][separate page]] with more examples of reading and writing CSV files.) @@ -679,8 +678,8 @@ When a timeout occurs a warning is printed on stderr, and no automaton command/formula. The processing then continue with other formulas and tools. Timeouts are not considered as errors, so they have no effect on the exit status of =ltldo=. This behavior can be changed with -option =--fail-on-timeout=, in which case timeouts are considered -as errors. +option =--fail-on-timeout=, in which case timeouts are considered as +errors. For each command (that does not terminate with a timeout) the runtime can be printed using the =%r= escape sequence. This makes =ltldo= an diff --git a/doc/org/ltlfilt.org b/doc/org/ltlfilt.org index 0a60479ca..6609afb4e 100644 --- a/doc/org/ltlfilt.org +++ b/doc/org/ltlfilt.org @@ -79,14 +79,22 @@ ltlfilt --help | sed -n '/Transformation options.*:/,/^$/p' | sed '1d;$d' --nnf rewrite formulas in negative normal form --relabel[=abc|pnn] relabel all atomic propositions, alphabetically unless specified otherwise - --relabel-bool[=abc|pnn] relabel Boolean subexpressions, alphabetically + --relabel-bool[=abc|pnn] relabel Boolean subexpressions that do not + share atomic propositions, relabel alphabetically unless specified otherwise + --relabel-overlapping-bool[=abc|pnn] + relabel Boolean subexpressions even if they share + atomic propositions, relabel alphabetically unless + specified otherwise --remove-wm rewrite operators W and M using U and R (this is an alias for --unabbreviate=WM) --remove-x remove X operators (valid only for stutter-insensitive properties) -r, --simplify[=LEVEL] simplify formulas according to LEVEL (see below); LEVEL is set to 3 if omitted + --sonf[=PREFIX] rewrite formulas in suffix operator normal form + --sonf-aps[=FILENAME] when used with --sonf, output the newly introduced + atomic propositions --unabbreviate[=STR] remove all occurrences of the operators specified by STR, which must be a substring of "eFGiMRW^", where 'e', 'i', and '^' stand respectively for @@ -110,7 +118,7 @@ be reordered by =ltlfilt= even when the formula is not changed otherwise. This is because Spot internally order all operands of commutative and associative operators, and that this order depends on the order in which the subformulas are first encountered. Adding -transformation options such as =-r= may alter this order. However +transformation options such as =-r= may alter this order. However, this difference is semantically insignificant. Formulas can be easily negated using the =-n= option, rewritten into @@ -161,14 +169,14 @@ ltlfilt -f '(a & !b) & GF(a & !b) & FG(!c & a)' --relabel-bool=pnn In the first formula, the independent =a & !b= and =!c= subformulas were respectively renamed =p0= and =p1=. In the second formula, =a & -!b= and =!c & a= are dependent so they could not be renamed; instead +!b= and =!c & a= are dependent, so they could not be renamed; instead =a=, =!b= and =c= were renamed as =p0=, =p1= and =p2=. This option was originally developed to remove superfluous formulas from benchmarks of LTL translators. For instance the automata generated for =GF(a|b)= and =GF(p0)= should be structurally equivalent: replacing =p0= by =a|b= in the second automaton should -turn in into the first automaton, and vice-versa. (However algorithms +turn in into the first automaton, and vice versa. (However algorithms dealing with =GF(a|b)= might be slower because they have to deal with more atomic propositions.) So given a long list of LTL formulas, we can combine =--relabel-bool= and =-u= to keep only one instance of @@ -284,7 +292,7 @@ ltldo ltl3ba -f '"proc@loc1" U "proc@loc2"' --spin : } This case also relabels the formula before calling =ltl3ba=, and it -then rename all the atomic propositions in the output. +then renames all the atomic propositions in the output. An example showing how to use the =--from-ltlf= option is on [[file:tut12.org][a separate page]]. @@ -308,13 +316,19 @@ ltlfilt --help | sed -n '/Filtering options.*:/,/^$/p' | sed '1d;$d' --guarantee match guarantee formulas (even pathological) --implied-by=FORMULA match formulas implied by FORMULA --imply=FORMULA match formulas implying FORMULA + --liveness match liveness properties --ltl match only LTL formulas (no PSL operator) + -N, --nth=RANGE assuming input formulas are numbered from 1, keep + only those in RANGE --obligation match obligation formulas (even pathological) + --persistence match persistence formulas (even pathological) + --recurrence match recurrence formulas (even pathological) --reject-word=WORD keep formulas that reject WORD --safety match safety formulas (even pathological) --size=RANGE match formulas with size in RANGE --stutter-insensitive, --stutter-invariant match stutter-insensitive LTL formulas + --suspendable synonym for --universal --eventual --syntactic-guarantee match syntactic-guarantee formulas --syntactic-obligation match syntactic-obligation formulas --syntactic-persistence match syntactic-persistence formulas @@ -471,8 +485,8 @@ size. So =F(a & b & c)= would have Boolean-size 2. This type of size is probably a better way to classify formulas that are going to be translated as automata, since transitions are labeled by Boolean formulas: the complexity of the Boolean subformulas has little -influence on the overall translation. Here are 10 random formula with -Boolean-size 5: +influence on the overall translation. Here are 10 random formulas +with Boolean-size 5: #+BEGIN_SRC sh randltl -n -1 --tree-size=12 a b | ltlfilt --bsize=5 -n 10 @@ -513,6 +527,7 @@ ltlfilt --help | sed -n '/ sequences:/,/^$/p' | sed '1d;$d' %h, %[vw]h the class of the formula is the Manna-Pnueli hierarchy ([v] replaces abbreviations by class names, [w] for all compatible classes) + %l the serial number of the output formula %L the original line number in the input file %[OP]n the nesting depth of operator OP. OP should be a single letter denoting the operator to count, or @@ -522,7 +537,7 @@ ltlfilt --help | sed -n '/ sequences:/,/^$/p' | sed '1d;$d' %r wall-clock time elapsed in seconds (excluding parsing) %R, %[LETTERS]R CPU time (excluding parsing), in seconds; Add - LETTERS to restrict to(u) user time, (s) system + LETTERS to restrict to (u) user time, (s) system time, (p) parent process, or (c) children processes. %s the length (or size) of the formula @@ -540,7 +555,7 @@ As a trivial example, use #+HTML: --latex --format='$%f$' to enclose formula in LaTeX format with =$...$=. -But =--format= can be useful in more complex scenarios. For instance +But =--format= can be useful in more complex scenarios. For instance, you could print only the line numbers containing formulas matching some criterion. In the following, we print only the numbers of the lines of =scheck.ltl= that contain guarantee formulas: diff --git a/doc/org/ltlgrind.org b/doc/org/ltlgrind.org index 424fb2f59..77c724e3e 100644 --- a/doc/org/ltlgrind.org +++ b/doc/org/ltlgrind.org @@ -5,9 +5,8 @@ #+HTML_LINK_UP: tools.html #+PROPERTY: header-args:sh :results verbatim :exports both -:results scalar: Is the same as :results verbatim. - -:results table: Interprets the results as an Org This tool lists +This tool lists formulas that are similar to but simpler than a given +formula by applying simple mutations to it, like removing operands or formulas that are similar to but simpler than a given formula by applying simple mutations to it, like removing operands or operators. This is meant to be used with ltlcross to simplify a @@ -66,7 +65,7 @@ The idea behind this tool is that when a bogus algorithm is found with =ltlcross=, you probably want to debug it using a smaller formula than the one found by =ltlcross=. So you would give the formula found by =ltlcross= as an argument to =ltlgrind= and then use the resulting -mutations as an new input for =ltlcross=. It might report an error on +mutations as a new input for =ltlcross=. It might report an error on one of the mutation, which is guaranteed to be simpler than the initial formula. The process can then be repeated until no error is reported by =ltlcross=. diff --git a/doc/org/ltlsynt.org b/doc/org/ltlsynt.org index e4fbc66e4..cd3c23d62 100644 --- a/doc/org/ltlsynt.org +++ b/doc/org/ltlsynt.org @@ -27,7 +27,7 @@ controller, the acceptance condition is irrelevant and trivially true. - =--formula= or =--file=: a specification in LTL or PSL. One of =--ins= or =--outs= may be omitted, as any atomic proposition not listed -as input can be assumed to be output and vice-versa. +as input can be assumed to be output and vice versa. The following example illustrates the synthesis of a controller ensuring that input =i1= and =i2= are both true initially if and only @@ -272,11 +272,20 @@ Further improvements are described in the following paper: /Alexandre Duret-Lutz/, and /Adrien Pommellet/. Presented at the SYNT'21 workshop. ([[https://www.lrde.epita.fr/~adl/dl/adl/renkin.21.synt.pdf][pdf]] | [[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#renkin.21.synt][bib]]) -Simplification of Mealy machines is discussed in: +Simplification of Mealy machines is discussed in the following papers: - *Effective reductions of Mealy machines*, /Florian Renkin/, /Philipp Schlehuber-Caissier/, /Alexandre Duret-Lutz/, and /Adrien Pommellet/. Presented at FORTE'22. ([[https://www.lrde.epita.fr/~adl/dl/adl/renkin.22.forte.pdf][pdf]] | [[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#renkin.22.forte][bib]]) +- *The Mealy-machine reduction functions of Spot*, /Florian Renkin/, + /Philipp Schlehuber-Caissier/, /Alexandre Duret-Lutz/, and /Adrien Pommellet/. + Science of Computer Programming, 230(102995), August 2023. ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#renkin.23.fmsd][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/renkin.23.fmsd.pdf][pdf]]) + +A more recent paper covering many aspects of =ltlsynt= is the following + +- *Dissecting ltlsynt*, /Florian Renkin/, /Philipp + Schlehuber-Caissier/, /Alexandre Duret-Lutz/, and Adrien Pommellet. + In Formal Methods in System Design, 2023. ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#renkin.23.scp][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/renkin.23.scp.pdf][pdf]]) # LocalWords: utf ltlsynt AIGER html args mapsto SRC acc aiger TLSF # LocalWords: UNREALIZABLE unrealizable SYNTCOMP realizability Proc diff --git a/doc/org/oaut.org b/doc/org/oaut.org index 1d5d8a7cb..a33f4c638 100644 --- a/doc/org/oaut.org +++ b/doc/org/oaut.org @@ -198,7 +198,7 @@ State: 1 #+END_SRC Option =m= uses mixed acceptance, i.e, some states might use -state-based acceptance while other will not: +state-based acceptance while others will not: #+BEGIN_SRC sh :wrap SRC hoa ltl2tgba -Hm '(Ga -> Gb) W c' @@ -569,16 +569,16 @@ The dot output can also be customized via two environment variables: - =SPOT_DOTDEFAULT= contains default arguments for the =--dot= option (for when it is used implicitly, or used as just =--dot= without argument). For instance after =export SPOT_DOTDEFAULT=vcsn=, using - =--dot= is equivalent to =--dot=vcsn=. However using =--dot=xyz= + =--dot= is equivalent to =--dot=vcsn=. However, using =--dot=xyz= (for any value of =xyz=, even empty) will ignore the =SPOT_DOTDEFAULT= variable. If the argument of =--dot= contains a dot character, then this dot is replaced by the contents of =SPOT_DOTDEFAULT=. So ~--dot=.A~ would be equivalent to =--dot=vcsnA= with our example definition of =SPOT_DOTDEFAULT=. -- =SPOT_DOTEXTRA= may contains an arbitrary string that will be emitted +- =SPOT_DOTEXTRA= may contain an arbitrary string that will be emitted in the dot output before the first state. This can be used to modify any attribute. For instance (except for this page, where we had - do demonstrate the various options of =--dot=, and a few pages where + to demonstrate the various options of =--dot=, and a few pages where we show the =--dot= output verbatim) all the automata displayed in this documentation are generated with the following environment variables set: @@ -598,7 +598,7 @@ passing option =i= to the dot printer, this unique number will be used to form a unique =id= attribute for these elements: a prefix =S= (for state), =E= (for edge), or "SCC=" is simply added to the unique number. Additionally, using =i(graphid)= will define =graphid= as -that =id= of the automaton. GraphViz will keep these identifier in +that =id= of the automaton. GraphViz will keep these identifiers in the generated SVG, so this makes it possible to modify rendering of the automaton using CSS or javascript. @@ -683,7 +683,7 @@ ltl2tgba -G -D '!a & FGa' --dot=sbarx | dot2tex --autosize --nominsize > out.tex Caveats: - =dot2tex= should be called with option =--autosize= in order to - compute the size of each label before calling GraphViz to layout the + compute the size of each label before calling GraphViz to lay out the graph. This is because GraphViz cannot compute the correct size of mathematical formulas. Make sure you use =dot2tex= version 2.11 or later, as earlier releases had a bug where sizes were interpreted @@ -836,7 +836,7 @@ current process and any of its children: adding =p= (parent) and =c= (children) will show only the selected time. Note that few tools actually execute other processes: [[file:autfilt.org][=autfilt=]] and [[file:ltl2tgba.org][=ltl2tgba=]] can do so when calling a SAT solver for [[file:satmin.org][SAT-based minimization]], and [[file:ltldo.org][=ltldo=]] will -obviously call any listed tool. However in the case of =ltldo= the +obviously call any listed tool. However, in the case of =ltldo= the measured time is that of executing the other tools, so the result of =%[p]R= is likely to be always 0. diff --git a/doc/org/randaut.org b/doc/org/randaut.org index ec4cd44b8..ed2dd560d 100644 --- a/doc/org/randaut.org +++ b/doc/org/randaut.org @@ -8,7 +8,7 @@ The =randaut= tool generates random (connected) automata. By default, it will generate a random automaton with 10 states, no -acceptance sets, and using a set of atomic propositions you have to +acceptance set, and using a set of atomic propositions you have to supply. #+NAME: randaut1 @@ -37,7 +37,7 @@ In an automaton with $Q$ states and density $e$, the degree of each state will follow a normal distribution with mean $1+(Q-1)d$ and variance $(Q-1)e(1-e)$. -In particular =-e0= will cause all states to have 1 successors, and +In particular =-e0= will cause all states to have 1 successor, and =-e1= will cause all states to be interconnected. #+NAME: randaut2 @@ -212,7 +212,7 @@ The output format can be controlled using [[file:oaut.org][the common output opt like =--hoaf=, =--dot==, =--lbtt=, and =--spin=. Note that =--spin= automatically implies =--ba=. -Automata are send to standard output by default, by you can use =-o= +Automata are sent to standard output by default, by you can use =-o= to give a filename, or even a pattern for filenames. For instance the following generates 20 automata, but store them in different files according to the acceptance condition. The format =%g= represent the diff --git a/doc/org/randltl.org b/doc/org/randltl.org index 989f56987..7a3d5a97f 100644 --- a/doc/org/randltl.org +++ b/doc/org/randltl.org @@ -108,13 +108,13 @@ them. Rather than running =randltl= several times with different seeds, we can use the =-n= option to specify a number of formulas to produce as seen in the very first example of this page. -By default =randltl= will never output the same formula twice (this +By default, =randltl= will never output the same formula twice (this can be changed with the =--allow-dups= option), so it may generate more formulas internally than it eventually prints. To ensure termination, for each output formula the number of ignored (because duplicated) random formulas that are generated is limited to 100000. -Therefore in some situations, most likely when generating small -formulas, with few atomic proposition, you may see =randltl= stop +Therefore, in some situations, most likely when generating small +formulas with few atomic propositions, you may see =randltl= stop before the requested number of formulas has been output with an error message. @@ -142,7 +142,7 @@ randltl -n 5 a b c --tree-size=22..30 : 1 The tree size is just the number of nodes in the syntax tree of the -formula during its construction. However because Spot automatically +formula during its construction. However, because Spot automatically applies some /trivial simplifications/ during the construction of its formulas (e.g., =F(F(a)= is reduced to =F(a)=, =a&0= to =0=, etc.), the actual size of the formula output may be smaller than the @@ -150,7 +150,7 @@ tree size specified. It is pretty common to obtain the formulas =0= or =1= among the first formulas output, since many random formulas trivially simplify to -these. However because duplicate formulas are suppressed by default, +these. However, because duplicate formulas are suppressed by default, they shall only occur once. Stronger simplifications may be requested using the =-r= option, that @@ -257,7 +257,7 @@ randltl -B -n 5 a b c In that case, priorities should be set with =--boolean-priorities=. -Finally, PSL formulas may be output using the =-P= option. However +Finally, PSL formulas may be output using the =-P= option. However, keep in mind that since LTL formulas are PSL formulas, generating random PSL formula may produce many LTL formulas that do not use any PSL operator (this is even more so the case when simplifications are @@ -329,7 +329,7 @@ or 1 The =--ltl-priorities= option we have seen previously now recognize some new PSL-specific operators: =Closure= is the ={sere}= operator, =EConcat= is the ={sere}<>->f= operator, and =UConcat= is the -={sere}[]->f= operator. When these operator are selected, they +={sere}[]->f= operator. When these operators are selected, they require a SERE argument which is generated according to the priorities set by =--sere-priorities=: =eword= is the empty word, =boolform= is a Boolean formula (generated using the priorities set by diff --git a/doc/org/satmin.org b/doc/org/satmin.org index 4ea80c8a0..055f82814 100644 --- a/doc/org/satmin.org +++ b/doc/org/satmin.org @@ -51,7 +51,7 @@ Let us first state a few facts about this minimization procedure. * How to change the SAT solver used -By default Spot uses PicoSAT call_version()[:results raw], this SAT-solver +By default, Spot uses PicoSAT call_version()[:results raw], this SAT-solver is built into the Spot library, so that no temporary files are used to store the problem. @@ -79,10 +79,10 @@ post-processing routine used by both tools to prefer a deterministic automaton over a smaller equivalent nondeterministic automaton. -However =-D= is not a guarantee to obtain a deterministic automaton, +However, =-D= is not a guarantee to obtain a deterministic automaton, even if one exists. For instance, =-D= fails to produce a -deterministic automaton for =a U X(b | GF!b)=. Instead we get a 4-state -non-deterministic automaton. +deterministic automaton for =a U X(b | GF!b)=. Instead, we get a +4-state non-deterministic automaton. #+BEGIN_SRC sh ltl2tgba -D 'a U X(b | GF!b)' --stats='states=%s, det=%d' @@ -161,7 +161,7 @@ some help from [[http://www.ltl2dstar.de/][=ltl2dstar=]]. The first is purely syntactic. If a formula belongs to the class of "syntactic recurrence formulas", it expresses a syntactic property. -(Of course there are formulas that expresses a syntactic properties +(Of course there are formulas that expresses syntactic properties without being syntactic recurrences.) [[file:ltlfilt.org][=ltlfilt=]] can be instructed to print only formulas that are syntactic recurrences: @@ -220,9 +220,9 @@ minimized into an even smaller automaton if we use multiple acceptance sets. Unfortunately because =dstar2tgba= does not know the formula being -translated, and it always convert a DRA into a DBA (with a single +translated, and it always converts a DRA into a DBA (with a single acceptance set) before further processing, it does not know if using -more acceptance sets could be useful to further minimize it. This +more acceptance sets could be useful to further minimize it. This number of acceptance sets can however be specified on the command-line with option =-x sat-acc=M=. For instance: @@ -257,10 +257,10 @@ options). The picture is slightly inaccurate in the sense that both =ltl2tgba= and =dstar2tgba= are actually using the same post-processing chain: only the initial translation to TGBA or conversion to DBA differs, the -rest is the same. However in the case of =dstar2tgba=, no +rest is the same. However, in the case of =dstar2tgba=, no degeneration or determinization are needed. -Also the picture does not show what happens when =-B= is used: any +Also, the picture does not show what happens when =-B= is used: any DTBA is degeneralized into a DBA, before being sent to "DTBA SAT minimization", with a special option to request state-based output. @@ -285,7 +285,7 @@ The following options can be used to fine-tune this procedure: resulting DTBA is equivalent to the input. - =-x sat-minimize= :: enable SAT-based minimization. It is the same as =-x sat-minimize=1= (which is the default value). It performs a dichotomy - to find the correct automaton size.This option implies =-x tba-det=. + to find the correct automaton size. This option implies =-x tba-det=. - =-x sat-minimize=[2|3]= :: enable SAT-based minimization. Let us consider each intermediate automaton as a =step= towards the minimal automaton and assume =N= as the size of the starting @@ -311,7 +311,7 @@ The following options can be used to fine-tune this procedure: - =-x sat-incr-steps=N= :: set the value of =sat-incr-steps= to N. It does not make sense to use it without =-x sat-minimize=2= or =-x sat-minimize=3=. - =-x sat-acc=$m$= :: attempt to build a minimal DTGBA with $m$ acceptance sets. - This options implies =-x sat-minimize=. + This option implies =-x sat-minimize=. - =-x sat-states=$n$= :: attempt to build an equivalent DTGBA with $n$ states. This also implies =-x sat-minimize= but won't perform any loop to lower the number of states. Note that $n$ should be @@ -319,7 +319,7 @@ The following options can be used to fine-tune this procedure: and =dstar2tgba= both remove sink states in their output by default (use option =--complete= to output a complete automaton). Also note that even with the =--complete= option, the output - automaton may have appear to have less states because the other + automaton may appear to have fewer states because the other are unreachable. - =-x state-based= :: for all outgoing transition of each state to belong to the same acceptance sets. @@ -332,7 +332,7 @@ is implied. * Using =autfilt --sat-minimize= to minimize any deterministic ω-automaton -This interface is new in Spot 1.99 and allows to minimize any +This interface is new in Spot 1.99 and allows minimizing any deterministic ω-automaton, regardless of the acceptance condition used. By default, the procedure will try to use the same acceptance condition (or any inferior one) and produce transition-based @@ -389,7 +389,7 @@ $txt This is clearly smaller than the input automaton. In this example the acceptance condition did not change. The SAT-based minimization only -tries to minimize the number of states, but sometime the +tries to minimize the number of states, but sometimes the simplifications algorithms that are run before we attempt SAT-solving will simplify the acceptance, because even removing a single acceptance set can halve the run time. @@ -411,7 +411,7 @@ $txt Note that instead of naming the acceptance condition, you can actually -give an acceptance formula in the [[http://adl.github.io/hoaf/#acceptance][HOA syntax]]. For example we can +give an acceptance formula in the [[http://adl.github.io/hoaf/#acceptance][HOA syntax]]. For example, we can attempt to create a co-Büchi automaton with #+NAME: autfiltsm5 @@ -444,7 +444,7 @@ obtain an upper bound on the number of states if you haven't specified specify by hand. Here is an example demonstrating the case where the input automaton is -smaller than the output. Let's take this small TGBA as input: +smaller than the output. Let's take this small TGBA as input: #+NAME: autfiltsm6 #+BEGIN_SRC sh :exports code @@ -472,7 +472,7 @@ echo $? #+RESULTS: autfiltsm7 : 1 -However if we allow more states, it will work: +However, if we allow more states, it will work: #+NAME: autfiltsm8 #+BEGIN_SRC sh :exports code @@ -491,7 +491,7 @@ By default, the SAT-based minimization tries to find a smaller automaton by performing a binary search starting from =N/2= (N being the size of the starting automaton). After various benchmarks, this algorithm proves to be the best. However, in some cases, other rather similar methods might be better. The -algorithm to execute and some other parameters can be set thanks to the +algorithm to execute, and some other parameters can be set thanks to the =--sat-minimize= option. The =--sat-minimize= option takes a comma separated list of arguments @@ -530,9 +530,9 @@ that can be any of the following: - =sat-naive= :: use the =naive= algorithm to find a smaller automaton. It starts from =N= and then checks =N-1=, =N-2=, etc. until the last successful check. -- =sat-langmap= :: Find the lower bound of default sat-minimize procedure. This +- =sat-langmap= :: Find the lower bound of default sat-minimize procedure. This relies on the fact that the size of the minimal automaton is at least equal - to the total number of different languages recognized by the automaton's + to the total number of different languages recognized by the automaton's states. - =colored= :: force all transitions (or all states if =-S= is used) to belong to exactly one acceptance condition. @@ -559,7 +559,7 @@ $txt [[file:autfiltsm9.svg]] ... to the following, where the automaton is colored, i.e., each state -belong to exactly one acceptance set: +belongs to exactly one acceptance set: #+NAME: autfiltsm10 #+BEGIN_SRC sh :exports code @@ -589,7 +589,7 @@ dstar2tgba -D -x sat-minimize,sat-acc=2 --stats='input(states=%S) output(states= #+RESULTS: : input(states=11) output(states=5, acc-sets=2, det=1) -Here is the contents of the =stats.csv= file: +Here are the contents of the =stats.csv= file: #+begin_src sh :exports results :results output raw sed '1a\ |-| @@ -623,8 +623,8 @@ file follows RFC4180 in escaping double-quote by doubling them. In the above example, the DRA produced by =ltl2dstar= had 11 states. In the first line of the =stats.csv= file, you can see the -minimization function had a 8-state input, which means that -=dstar2tgba= first reduced the 11-state (complete) DRA into a 8-state +minimization function had an 8-state input, which means that +=dstar2tgba= first reduced the 11-state (complete) DRA into an 8-state (complete) DBA before calling the SAT-based minimization (the fact that the input was reduced to a *DBA* is not very obvious from this trace), This first line shows the SAT-based minimization for a diff --git a/doc/org/tut01.org b/doc/org/tut01.org index 91154944c..9d446e3cc 100644 --- a/doc/org/tut01.org +++ b/doc/org/tut01.org @@ -12,7 +12,7 @@ Our first task is to read formulas and print them in another syntax. * Shell command Using =ltlfilt=, you can easily read an LTL formula in one syntax, and -output it in another syntax. By default the parser will accept a +output it in another syntax. By default, the parser will accept a formula in [[file:ioltl.org][any infix syntax]], but if the input is in the prefix syntax of LBT, you should use [[file:ioltl.org][=--lbt-input=]]. The output syntax is controlled using different options such as (=--spin=, =--lbt=, =--latex=, etc.). @@ -364,7 +364,7 @@ atomically output in a way that Spin can parse. This Spin syntax is not accepted by default by the infix parser, but it has an option for that. This is called /lenient parsing/: when the parser finds a parenthetical block it does not understand, it simply -assume that this block represents an atomic proposition. +assumes that this block represents an atomic proposition. #+BEGIN_SRC sh ltlfilt --lenient -f '(a > 4) U (b < 5)' diff --git a/doc/org/tut02.org b/doc/org/tut02.org index 0aaddb59a..5d63b35c9 100644 --- a/doc/org/tut02.org +++ b/doc/org/tut02.org @@ -141,7 +141,7 @@ ltlfilt -ps --relabel-bool=pnn --define -f '"Proc@Here" U ("var > 10" | "var < 4 For instance =a U (a & b)= will not be relabeled into =(p0) U (p1)= because that would hide the fact that both =p0= and =p1= check for - =a=. Instead we get this: + =a=. Instead, we get this: #+BEGIN_SRC sh ltlfilt -ps --relabel-bool=pnn --define -f 'a U (a & b)' diff --git a/doc/org/tut03.org b/doc/org/tut03.org index c70a3dab3..40b59d82b 100644 --- a/doc/org/tut03.org +++ b/doc/org/tut03.org @@ -10,7 +10,7 @@ This page explains how to build formulas and how to iterate over their syntax trees. We will first describe how to build a formula from scratch, by using -the constructors associated to each operators, and show the basic +the constructors associated to each operator, and show the basic accessor methods for formulas. We will do that for C++ first, and then Python. Once these basics are covered, we will show examples for traversing and transforming formulas (again in C++ then Python). @@ -166,7 +166,7 @@ The Python equivalent is similar: for child in f: print(" *", child) # the type of the operator can be accessed with kind(), which returns - # an op_XXX constant (corresponding the the spot::op enum of C++) + # an op_XXX constant (corresponding to the spot::op enum of C++) print(f[1][0], "is F" if f[1][0].kind() == spot.op_F else "is not F") # "is" is keyword in Python, the so shortcut is called _is: print(f[1][1], "is G" if f[1][1]._is(spot.op_G) else "is not G") @@ -191,8 +191,8 @@ formulas (for instance the [[file:tut02.org][relabeling function]]) actually rec traverse the input formula to construct the output formula. Using the operators described in the previous section is enough to -write algorithms on formulas. However there are two special methods -that makes it a lot easier: =traverse= and =map=. +write algorithms on formulas. However, there are two special methods +that make it a lot easier: =traverse= and =map=. =traverse= takes a function =fun=, and applies it to each subformulas of a given formula, including that starting formula itself. The @@ -206,7 +206,7 @@ in the formula. We also print each subformula to show the recursion, and stop the recursion as soon as we encounter a subformula without sugar (the =is_sugar_free_ltl()= method is a constant-time operation that tells whether a formula contains a =F= or =G= operator) to save -time time by not exploring further. +time by not exploring further. #+NAME: gcount_cpp #+BEGIN_SRC C++ @@ -375,11 +375,11 @@ without lambda: : exchanges: 6 Now let's pretend that we want to define =xchg_fg= as a lambda, and -=count= to by captured by reference. In order to call pass the lambda -recursively to =map=, the lambda needs to know its address. +that we want =count= to be captured by reference. In order to pass +the lambda recursively to =map=, the lambda needs to know its address. Unfortunately, if the lambda is stored with type =auto=, it cannot capture itself. A solution is to use =std::function= but that has a -large penalty cost. We can work around that by assuming that that +large penalty cost. We can work around that by assuming that the address will be passed as an argument (=self=) to the lambda: #+BEGIN_SRC C++ diff --git a/doc/org/tut10.org b/doc/org/tut10.org index d4c45708a..1071b74cd 100644 --- a/doc/org/tut10.org +++ b/doc/org/tut10.org @@ -124,7 +124,7 @@ All the translation pipeline (this includes simplifying the formula, translating the simplified formula into an automaton, and simplifying the resulting automaton) is handled by the =spot::translator= class. An instance of this class can configured by calling =set_type()= to -chose the type of automaton to output, =set_level()= to set the level +choose the type of automaton to output, =set_level()= to set the level of optimization (it's high by default), and =set_pref()= to set various preferences (like small or deterministic) or characteristic (complete, unambiguous, state-based acceptance) for the resulting diff --git a/doc/org/tut11.org b/doc/org/tut11.org index 8cb77fe16..a6026de2a 100644 --- a/doc/org/tut11.org +++ b/doc/org/tut11.org @@ -145,9 +145,9 @@ State: 1 If you drop the =-D= option from =ltl2tgba=, or the =det= argument from =spot.translate()=, or the =set_pref(spot::postprocessor::Deterministic)= in C++, then a -non-deterministic monitor can be output. By default Spot will build -both a deterministic and a non-deterministic monitor, it will output -the smallest one. +non-deterministic monitor can be output. By default, Spot will try to +build both a deterministic and a non-deterministic monitor, then it will +keep the smallest one. * Details @@ -198,7 +198,7 @@ ltl2tgba -D -M 'G(press -> red U green)' -d This monitor will report violations if both *red* and *green* are off when the button is pressed, and also if *red* goes off without *green* -going on. However note that in the original formula, =red U green= +going on. However, note that in the original formula, =red U green= implies that *green* will eventually become true, and the monitor cannot ensure that: a system where *red* is continuously on, and *green* is continuously off would not trigger any violation. The @@ -280,7 +280,7 @@ State: 1 * Further reading -If your application requires monitors and you plan to build them with +If your application requires monitors, and you plan to build them with Spot, it is very likely that you will want to convert the resulting automata to your own data structure. See [[file:tut21.org][how to print an automaton in a custom format]] to learn all you need to iterate over Spot's automata. diff --git a/doc/org/tut12.org b/doc/org/tut12.org index 57444e230..559a1dc63 100644 --- a/doc/org/tut12.org +++ b/doc/org/tut12.org @@ -16,7 +16,7 @@ automata over infinite words. ltlfilt --from-ltlf -f "$f" #+end_src -However there is a trick we can use in case we want to use Spot to +However, there is a trick we can use in case we want to use Spot to build a finite automaton that recognize some LTLf (i.e. LTL with finite semantics) property. The plan is as follows: @@ -233,8 +233,8 @@ you could replace =alive= by =!dead= by using ~ltlfilt When working with LTLf, there are two different semantics for the next operator: -- The weak next: =X a= is true if =a= hold in the next step or if - there are no next step. In particular, =X(0)= is true iff there are +- The weak next: =X a= is true if =a= hold in the next step, or if + there is no next step. In particular, =X(0)= is true iff there is no successor. (By the way, you cannot write =X0= because that is an atomic proposition: use =X(0)= or =X 0=.) - The strong next: =X[!] a= is true if =a= hold in the next step *and* diff --git a/doc/org/tut20.org b/doc/org/tut20.org index 57a939a7d..9820a06e7 100644 --- a/doc/org/tut20.org +++ b/doc/org/tut20.org @@ -7,7 +7,8 @@ #+PROPERTY: header-args:python :results output :exports both #+PROPERTY: header-args:C+++ :results verbatim :exports both -The goal is to start from a never claim, as produced by Spin, e.g.: +Our goal convert never claim produced by Spin into an automaton in [[file:hoa.org][the +HOA format]]. We will use the following never claim as input: #+BEGIN_SRC sh spin -f '[]<>foo U bar' > tut20.never @@ -42,11 +43,10 @@ accept_all: } #+end_example -and convert this into an automaton in [[file:hoa.org][the HOA format]]. Note that the automaton parser of Spot can read automata written either as never claims, in LBTT's format, in ltl2dstar's format or in -the HOA format, and there is no need to specify which format you +the HOA format, and there is no need to specify which format it should expect. Even if our example uses a never claim as input, the code we write will read any of those formats. @@ -203,7 +203,7 @@ existing atomic propositions will reuse the existing variable. In the example for [[file:tut10.org][translating LTL into BA]], we did not specify any =bdd_dict=, because the =translator= object will create a new one by -default. However it is possible to supply such a =bdd_dict= to the +default. However, it is possible to supply such a =bdd_dict= to the translator as well. Similarly, in the Python bindings, there is a global =bdd_dict= that is implicitly used for all operations, but it can be specified if needed. diff --git a/doc/org/tut22.org b/doc/org/tut22.org index 2c14eebcb..8538754ce 100644 --- a/doc/org/tut22.org +++ b/doc/org/tut22.org @@ -158,7 +158,7 @@ faked as follows: - additionally, we set =prop_state_acc(true)= to indicate that the automaton should output as if it were state-based. -Some algorithm recognize the =prop_state_acc()= properties and trigger +Some algorithms recognize the =prop_state_acc()= properties and trigger some special handling of the automaton, maybe to preserve its "fake state-based nature". diff --git a/doc/org/tut24.org b/doc/org/tut24.org index efbdaa5f0..fd561eec8 100644 --- a/doc/org/tut24.org +++ b/doc/org/tut24.org @@ -123,7 +123,7 @@ The "universality" of an edge can be tested using the =twa_graph::is_univ_dest()= method: it takes a destination state as input, as in =aut->is_univ_dest(t.dst)= or =aut->is_univ_dest(aut->get_init_state_number())=. For convenience -this method can also be called on a edge, as in =aut->is_univ_dest(t)=. +this method can also be called on an edge, as in =aut->is_univ_dest(t)=. The set of destination states of a universal edge can be iterated over via the =twa_graph::univ_dests()= method. This takes either a diff --git a/doc/org/tut30.org b/doc/org/tut30.org index 164d868f9..1da9302ae 100644 --- a/doc/org/tut30.org +++ b/doc/org/tut30.org @@ -26,7 +26,7 @@ $txt [[file:tut30in.svg]] Our goal is to generate an equivalent Büchi automaton, preserving -determinism if possible. However nothing of what we will write is +determinism if possible. However, nothing of what we will write is specific to Rabin acceptance: the same code will convert automata with any acceptance to Büchi acceptance. @@ -82,9 +82,9 @@ $txt #+RESULTS: [[file:tut30out.svg]] -In the general case transforming an automaton with a complex +In the general case, transforming an automaton with a complex acceptance condition into a Büchi automaton can make the output -bigger. However the post-processing routines may manage to simplify +bigger. However, the post-processing routines may manage to simplify the result further. diff --git a/doc/org/tut40.org b/doc/org/tut40.org index 8d9b004da..eb96e4e3c 100644 --- a/doc/org/tut40.org +++ b/doc/org/tut40.org @@ -71,7 +71,7 @@ autfilt --dot='.#' tut40.hoa Whether two states are in simulation can be decided as a game between two players. If the game is in state $(q,q')$, spoiler (player 0) first selects a transition from state $q$, and duplicator (player 1) -then has to chose a compatible transition from state $q'$. Duplicator +then has to choose a compatible transition from state $q'$. Duplicator of course wins if it always manages to select compatibles transitions, otherwise spoiler wins. @@ -136,7 +136,7 @@ $txt Since player 1 is winning from state $(4,0)$, we know that state 4 simulates state 0. Also since player 1 would also win from state $(5,1)$, we can tell that state 5 simulates state 1. We also learn -that state 5 does not simulates states 2 and 3. We could build other +that state 5 does not simulate states 2 and 3. We could build other games, or add more state to this game, to learn about other pairs of states. @@ -225,13 +225,13 @@ To solve a safety game =g= that has been created by the above method, it is enough to just call =solve_safety_game(g)=. The function =solve_game(g)= used below is a more generic interface that looks at the acceptance condition of the game to dispatch to the more specific -game solver. These functions returns the player winning in the -initial state. However, as a side-effect they define additional +game solver. These functions return the player winning in the +initial state. However, as a side effect they define additional automaton properties that indicate the winner of each state, and the associated strategy. -Therefore to list all simulation pairs we learned from a game starting -in state $(i,j)$, we could proceed as follow: +Therefore, to list all simulation pairs we learned from a game starting +in state $(i,j)$, we could proceed as follows: #+NAME: computesim_tut40 #+BEGIN_SRC python :exports code diff --git a/doc/org/tut50.org b/doc/org/tut50.org index 789e3e26a..2c6c97a6f 100644 --- a/doc/org/tut50.org +++ b/doc/org/tut50.org @@ -12,7 +12,7 @@ there are two different interfaces that can be used: 2. the *explicit* =twa_graph= interface. To demonstrate the difference between the two interfaces, we will -write an small depth-first search that prints all states accessible +write a small depth-first search that prints all states accessible from the initial state of an automaton. * The explicit interface @@ -543,12 +543,12 @@ which returns a pointer to a =state=. Then, calling that allows iterating over all successors. Different subclasses of =twa= will instantiate different subclasses of -=state= and =twa_succ_iterator= . In the case of =twa_graph=, the +=state= and ~twa_succ_iterator~. In the case of =twa_graph=, the subclasses used are =twa_graph_succ_iterator= and =twa_graph_state=, but you can ignore that until you have to write your own =twa= subclass. -The interface puts few requirement on memory management: we want to be +The interface puts few requirements on memory management: we want to be able to write automata that can forget about their states (and recompute them), so there is no guarantee that reaching the same state twice will return the same pointer twice. Even calling @@ -625,7 +625,7 @@ are $n$ successors, there will be $1$ call to =first()=, $n$ calls to =next()=, and $n+1$ calls to =done()=, so a total of $2n+2$ virtual method calls. -However =first()= and =next()= also return a Boolean stating whether +However, =first()= and =next()= also return a Boolean stating whether the loop could continue. This allows rewriting the above code as follows: @@ -688,13 +688,13 @@ following equivalent code: : 0->1 : 0->2 -This works in a similar way as =out(s)= in the explicit interface. +This works similarly to =out(s)= in the explicit interface. Calling =aut->succ(s)= creates a fake container (=internal::succ_iterable=) with =begin()= and =end()= methods that return STL-like iterators (=internal::succ_iterator=). Incrementing the =internal::succ_iterator= will actually increment the =twa_succ_iterator= they hold. Upon completion of the loop, the -temporary =internal::succ_iterable= is destroyed and its destructor +temporary =internal::succ_iterable= is destroyed, and its destructor passes the iterator back to =aut->release_iter()= for recycling. ** Recursive DFS (v1) @@ -823,7 +823,7 @@ They are performed in =state_unicity_table::is_new()= and in ** Iterative DFS For a non-recursive version, let us use a stack of -=twa_succ_iterator=. However these iterators do not know their +=twa_succ_iterator=. However, these iterators do not know their source, so we better store that in the stack as well if we want to print it. diff --git a/doc/org/tut51.org b/doc/org/tut51.org index c9fbf3d11..4f2519aee 100644 --- a/doc/org/tut51.org +++ b/doc/org/tut51.org @@ -37,7 +37,7 @@ often holds as well. ** What needs to be done In Spot, Kripke structures are implemented as subclass of =twa=, but -some operations have specialized versions that takes advantages of the +some operations have specialized versions that take advantage of the state-labeled nature of Kripke structure. For instance the on-the-fly product of a Kripke structure with a =twa= is slightly more efficient than the on-the-fly product of two =twa=. @@ -202,7 +202,7 @@ implement =compare()= using =hash()=. #+RESULTS: demo-state Note that a state does not know how to print itself, this -a job for the automaton. +is a job for the automaton. ** Implementing the =kripke_succ_iterator= subclass @@ -575,7 +575,7 @@ It is perfectly possible to write a =kripke= (or even =twa=) subclass that returns pointers to preallocated states. In that case =state::destroy()= would have to be overridden with an empty body so that no deallocation occurs, and the automaton would have to get rid -of the allocated states in its destructor. Also the =state::clone()= +of the allocated states in its destructor. Also, the =state::clone()= methods is overridden by a function that returns the identity. An example of class following this convention is =twa_graph=, were states returned by the on-the-fly interface are just pointers into the actual diff --git a/doc/org/tut52.org b/doc/org/tut52.org index d68254ceb..bc1a324f5 100644 --- a/doc/org/tut52.org +++ b/doc/org/tut52.org @@ -18,8 +18,8 @@ put for a toy example. This document shows how to create a Kripke structure that is stored as an explicit graph. The class for those is =spot::kripke_graph= and -works in a similar way as the class =spot::twa_graph= used for -automata. The main difference between those two classes is that +works similarly to the class =spot::twa_graph= (used for +automata). The main difference between those two classes is that Kripke structures labels the states instead of the transitions. Using =spot::kripke_graph= instead of =spot::twa_graph= saves a bit of memory. @@ -188,8 +188,7 @@ int main() Note that this main function is similar to the main function we used for [[file:tut51.org::#check-prop][the on-the-fly version]] except for [[(ck)][the line that creates the Kripke -structure]]. You can modify it to display the counterexample in a -similar way. +structure]]. You can modify it to display the counterexample similarly. * Python implementation ** Building the state space diff --git a/doc/org/tut90.org b/doc/org/tut90.org index 18536e94f..c352c356d 100644 --- a/doc/org/tut90.org +++ b/doc/org/tut90.org @@ -19,7 +19,7 @@ There are other algorithms where BDDs are used from different tasks. For instance, our simulation-based reduction function computes a *signature* of each state as a BDD that is essentially the disjunction of all outgoing edges, represented by their guard, their acceptance -sets, and their destination *classes*. Also the translation of LTL +sets, and their destination *classes*. Also, the translation of LTL formulas to transition-based generalized Büchi automata is using an intermediate representation of states that is similar to the aforementioned signatures, excepts that classes are replaced by @@ -125,7 +125,7 @@ is implicitly used in both cases. Similarly, when we call =spot.translate()= the same global =bdd_dict= is used by default. What really confuses people, is that the association between an atomic -proposition (=a=, =b=, ...) and a BDD variable (=0=, =1=, ...) will +proposition (=a=, =b=, ...) and a BDD variable (=0=, =1=, ...) will only be held by the =bdd_dict= for the lifetime of the objects (here the automata) that registered this association to the =bdd_dict=. @@ -232,9 +232,9 @@ interface: const void* for_me); #+END_SRC -The last function may be bit tricky to use, because we need to be sure -that another object has registered some variables. You can rely on -the fact that each =twa= automaton register its variables this way. +The last function may be a bit tricky to use, because we need to be +sure that another object has registered some variables. You can rely +on the fact that each =twa= automaton register its variables this way. Now, in most cases, there is no need to worry about the =bdd_dict=. Automata will register and unregister variables as needed. Other @@ -290,7 +290,7 @@ The above code has two definitions. 2. The =accepting_set= function iterates over an automaton, and saves all transitions that belong to a given acceptance set number. -For instance we can now translate an automaton, compute its acceptance +For instance, we can now translate an automaton, compute its acceptance set 0, and print it as follows: #+begin_src python :noweb strip-export @@ -325,15 +325,15 @@ In this case, the temporary automaton constructed by =spot.translate()= and passed to the =accepting_set()= function is destroyed right after the =ts= object has been constructed. When the automaton is destroyed, it removes all its associations from the -=bdd_dict=. This means that before the =print(ts)= the dictionary -that was used by the automaton, and that is still stored in the =ts= -objects is now empty: calling =bdd_format_formula()= raises an -exception. +=bdd_dict=. This means that before the =print(ts)=, the dictionary +that was used by the automaton and that is still stored in the =ts= +objects is now empty. Consequently, calling =bdd_format_formula()= +raises an exception. This can be fixed in a couple of ways. The easy way is to store the automaton inside the =trans_set= object, to ensure that it will live at least as long as the =trans_set= object. But maybe the automaton -is too big and we really want to get rid of it? In this case +is too big, and we really want to get rid of it? In this case =trans_set= should tell the =bdd_dict= that it want to retain the associations. The easiest way in this case is to call the =register_all_variables_of()= method, because we know that each @@ -398,15 +398,15 @@ int bdd_dict::register_anonymous_variables(int n, const void* for_me); A range of =n= variables will be allocated starting at the returned index. -For instance, let's say the our =trans_set= should now store a -symbolic representation of a transition relation. For simplicity we +For instance, let's say that our =trans_set= should now store a +symbolic representation of a transition relation. For simplicity, we assume we just want to store set of pairs =(src,dst)=: each pair will be a conjunction $v_{src}\land v'_{dst}$ between two BDD variables taken from two ranges ($v_i$ representing a source state $i$ and $v'i$ representing a destination state $i$), and the entire set will be a disjunction of all these pairs. If the automaton has $n$ states, we want to allocate $2n$ BDD variables for this purpose. We call these -variables *anonymous* because their meaning is unknown the the +variables *anonymous* because their meaning is unknown to the =bdd_dict=. #+begin_src python diff --git a/doc/org/upgrade2.org b/doc/org/upgrade2.org index 2406af991..a27fc0518 100644 --- a/doc/org/upgrade2.org +++ b/doc/org/upgrade2.org @@ -35,7 +35,7 @@ experience of updating a couple of projects that are using Spot. 4. [[#formulas][The implementation of LTL formulas has been rewritten]]. - They are no longer pointers but plain objects that performs their + They are no longer pointers but plain objects that perform their own reference counting, freeing the programmer from this tedious and error-prone task. They could be handled as if they were shared pointer, with the small difference that they are not using @@ -72,7 +72,7 @@ experience of updating a couple of projects that are using Spot. using preconditions: the acceptance condition does not appear in the type of the C++ object representing the automaton. - 7. [[*Various renamings][Several class, functions, and methods, have been renamed]]. Some + 7. [[*Various renamings][Several classes, functions, and methods, have been renamed]]. Some have been completely reimplemented, with different interfaces. In particular the =tgba_explicit_*= family of classes (=tgba_explicit_formula=, =tgba_explicit_number=, @@ -110,7 +110,7 @@ experience of updating a couple of projects that are using Spot. If Spot 1.2.6 was installed in =/usr/local=, its headers are -in =/usr/local/include/spot=. One would to write include statements +in =/usr/local/include/spot=. One would to write include statements such as #+BEGIN_SRC C++ #include @@ -370,7 +370,7 @@ removed, ~8200 lines added), and brings some nice benefits: friendly, and several algorithms that spanned a few pages have been reduced to a few lines. [[file:tut03.org][This page]] illustrates the new interface. -Also the =spot::ltl= namespace has been removed: everything is +Also, the =spot::ltl= namespace has been removed: everything is directly in =spot= now. In code where formulas are just parsed from input string, and then @@ -455,7 +455,7 @@ name: #+BEGIN_SRC C++ if (!input->acc().is_generalized_buchi()) throw std::runtime_error - ("myalgorithm() can only works with generalized Büchi acceptance"); + ("myalgorithm() can only work with generalized Büchi acceptance"); #+END_SRC - Some methods of the =tgba= class have been removed, include some @@ -518,7 +518,7 @@ name: So not only do we save the calls to =new= and =delete=, but we also save the time it takes to construct the objects (including setting up the virtual table), and via a =recycle()= method that has to be - added to the iterator, we update only the attributes that needs to + added to the iterator, we update only the attributes that need to be updated (for instance if the iterator contains a pointer back to the automaton, this pointer requires no update when the iterator is recycled). @@ -586,7 +586,7 @@ for (auto i: aut->succ(s)) - Each =twa= now has a BDD dictionary, so the =get_dict()= method is implemented once for all in =twa=, and should not be implemented - anymore in sub-classes. + anymore in subclasses. - There should now be very few cases where it is necessary to call methods of the BDD dictionary attached to a =twa=. Registering From 3034e8fcc3ef03b1c13e0e51a8c9c1b7ecb748fe Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 9 Feb 2024 15:06:07 +0100 Subject: [PATCH 399/606] python: render via _repr_html_ Work around a recent decision in Jupyter Lab and Notebook to render is inline , breaking tooltips or text selection. (Rerendering all notebooks was painful.) * NEWS: Mention the change. * python/spot/__init__.py: Add a _repr_html_ method to all classes that had a _repr_svg_. It seems Jupyter will use _repr_html_ by default. * python/spot/jupyter.py: SVG replace the _repr_svg_ method by a _repr_html. * tests/python/_altscc.ipynb, tests/python/_autparserr.ipynb, tests/python/_aux.ipynb, tests/python/_mealy.ipynb, tests/python/_partitioned_relabel.ipynb, tests/python/_product_susp.ipynb, tests/python/_product_weak.ipynb, tests/python/_synthesis.ipynb, tests/python/aliases.ipynb, tests/python/alternation.ipynb, tests/python/atva16-fig2a.ipynb, tests/python/atva16-fig2b.ipynb, tests/python/automata-io.ipynb, tests/python/automata.ipynb, tests/python/cav22-figs.ipynb, tests/python/contains.ipynb, tests/python/decompose.ipynb, tests/python/formulas.ipynb, tests/python/games.ipynb, tests/python/gen.ipynb, tests/python/highlighting.ipynb, tests/python/ltsmin-dve.ipynb, tests/python/ltsmin-pml.ipynb, tests/python/parity.ipynb, tests/python/product.ipynb, tests/python/randaut.ipynb, tests/python/satmin.ipynb, tests/python/stutter-inv.ipynb, tests/python/synthesis.ipynb, tests/python/testingaut.ipynb, tests/python/twagraph-internals.ipynb, tests/python/word.ipynb, tests/python/zlktree.ipynb: Update all notebooks. --- NEWS | 11 + python/spot/__init__.py | 12 + python/spot/jupyter.py | 25 +- tests/python/_altscc.ipynb | 895 ++- tests/python/_autparserr.ipynb | 238 +- tests/python/_aux.ipynb | 15 +- tests/python/_mealy.ipynb | 790 ++- tests/python/_partitioned_relabel.ipynb | 2992 ++++++-- tests/python/_product_susp.ipynb | 2 +- tests/python/_product_weak.ipynb | 754 +- tests/python/_synthesis.ipynb | 3300 ++++++++- tests/python/aliases.ipynb | 12 +- tests/python/alternation.ipynb | 4 +- tests/python/atva16-fig2a.ipynb | 113 +- tests/python/atva16-fig2b.ipynb | 247 +- tests/python/automata-io.ipynb | 624 +- tests/python/automata.ipynb | 1874 ++++- tests/python/cav22-figs.ipynb | 378 +- tests/python/contains.ipynb | 84 +- tests/python/decompose.ipynb | 5245 +++++++++++++- tests/python/formulas.ipynb | 30 +- tests/python/games.ipynb | 1481 +++- tests/python/gen.ipynb | 38 +- tests/python/highlighting.ipynb | 2569 ++++++- tests/python/ltsmin-dve.ipynb | 855 ++- tests/python/ltsmin-pml.ipynb | 288 +- tests/python/parity.ipynb | 124 +- tests/python/product.ipynb | 1634 ++--- tests/python/randaut.ipynb | 2310 +++--- tests/python/satmin.ipynb | 3190 ++++++++- tests/python/stutter-inv.ipynb | 1328 +++- tests/python/synthesis.ipynb | 5610 ++++++++++----- tests/python/testingaut.ipynb | 78 +- tests/python/twagraph-internals.ipynb | 8579 +++++++++++++---------- tests/python/word.ipynb | 234 +- tests/python/zlktree.ipynb | 5871 +++++++++++++++- 36 files changed, 43249 insertions(+), 8585 deletions(-) diff --git a/NEWS b/NEWS index c19ab9634..0da1a2847 100644 --- a/NEWS +++ b/NEWS @@ -144,6 +144,17 @@ New in spot 2.11.6.dev (not yet released) - Calling aut.get_hight_state(s) or get.highlight_edge(e) will return the highlight color of that state/edge or None. + - Recent version Jupyter Notebook and Jupyter Lab started to render + SVG elements using tag to make it easier to copy/paste + those image. This breaks several usages, including the + possibility to have informative tooltips on states and edges (used + in Spot). See the following issues for more details. + https://github.com/jupyter/notebook/issues/7114 + https://github.com/jupyterlab/jupyterlab/issues/10464 + + This version of Spot now declares its svg outputs as HTML to + prevent Jypyter from wrapping them is images. + Bugs fixed: - tgba_determinize()'s use_simulation option would cause it to diff --git a/python/spot/__init__.py b/python/spot/__init__.py index da366b0ee..c44ce9555 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -145,6 +145,10 @@ class aig: print_dot(ostr, self, opt) return _ostream_to_svg(ostr) + # see spot.jupyter.SVG for why we need _repr_html_ instead of _repr_svg_ + def _repr_html_(self): + return self._repr_svg_() + def show(self, opt=None): from spot.jupyter import SVG return SVG(self._repr_svg_(opt)) @@ -210,6 +214,10 @@ class twa: print_dot(ostr, self, opt) return _ostream_to_svg(ostr) + # see spot.jupyter.SVG for why we need _repr_html_ instead of _repr_svg_ + def _repr_html_(self): + return self._repr_svg_() + def show(self, opt=None): """Display the automaton as SVG, in the IPython/Jupyter notebook""" if opt is None: @@ -479,6 +487,10 @@ class zielonka_tree: self.dot(ostr) return _ostream_to_svg(ostr) + # see spot.jupyter.SVG for why we need _repr_html_ instead of _repr_svg_ + def _repr_html_(self): + return self._repr_svg_() + _acdnum = 0 @_extend(acd) diff --git a/python/spot/jupyter.py b/python/spot/jupyter.py index 136db6e5d..0bfea81a0 100644 --- a/python/spot/jupyter.py +++ b/python/spot/jupyter.py @@ -23,19 +23,24 @@ Auxiliary functions for Spot's Python bindings. from IPython.display import display, HTML, DisplayObject class SVG(DisplayObject): - """ - Replacement for IPython.display.SVG that does not use + """Replacement for IPython.display.SVG that does not use minidom to extract the element. - We need that because prior to Python 3.8, minidom used - sort all attributes, and in Python 3.8 this was changed - to keep the same order, causing test failures in our - diff-based test suite. + We need that because prior to Python 3.8, minidom used sort all + attributes, and in Python 3.8 this was changed to keep the same + order, causing test failures in our diff-based test suite. We do not need the extraction when processing GraphViz output. + + Also nowadays Jupyter Notebook 7 started to render as inline + instead of inlining the directly, breaking many useful + usages of SVG in the name of easier copy/paste. + + https://github.com/jupyter/notebook/issues/7114 + https://github.com/jupyterlab/jupyterlab/issues/10464 """ - def _repr_svg_(self): + def _repr_html_(self): return self.data def display_inline(*args, per_row=None, show=None): @@ -52,11 +57,11 @@ def display_inline(*args, per_row=None, show=None): for arg in args: dpy = 'inline-block' if show is not None and hasattr(arg, 'show'): - rep = arg.show(show)._repr_svg_() + arg = arg.show(show) + if hasattr(arg, '_repr_html_'): + rep = arg._repr_html_() elif hasattr(arg, '_repr_svg_'): rep = arg._repr_svg_() - elif hasattr(arg, '_repr_html_'): - rep = arg._repr_html_() elif hasattr(arg, '_repr_latex_'): rep = arg._repr_latex_() if not per_row: diff --git a/tests/python/_altscc.ipynb b/tests/python/_altscc.ipynb index f23d48dfc..d7d32bb42 100644 --- a/tests/python/_altscc.ipynb +++ b/tests/python/_altscc.ipynb @@ -36,10 +36,10 @@ " viewBox=\"0.00 0.00 221.60 212.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", "\n", "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "cluster_0\n", "\n", @@ -89,7 +89,91 @@ "1->1\n", "\n", "\n", - "b\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-1\n", + "\n", + "\n", + "\n", + "\n", + "I->-1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "-1->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "-1->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", @@ -110,7 +194,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa0243a6e10> >" + " *' at 0x7fdd081ee370> >" ] }, "execution_count": 2, @@ -161,10 +245,10 @@ " viewBox=\"0.00 0.00 153.60 224.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", "\n", "\n", - "Fin(\n", - "\n", - ")\n", - "[co-Büchi]\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", "cluster_0\n", "\n", @@ -214,7 +298,84 @@ "1->1\n", "\n", "\n", - "b\n", + "b\n", + "\n", + "\n", + "\n", + "0->-1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-1\n", + "\n", + "\n", + "\n", + "\n", + "I->-1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "-1->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "-1->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", @@ -228,7 +389,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa0243a63c0> >" + " *' at 0x7fdd081edef0> >" ] }, "execution_count": 3, @@ -272,10 +433,10 @@ " viewBox=\"0.00 0.00 221.60 231.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", "\n", "\n", - "Fin(\n", - "\n", - ")\n", - "[co-Büchi]\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", "cluster_0\n", "\n", @@ -336,13 +497,93 @@ "1->-1\n", "\n", "\n", - "b\n", + "b\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-1\n", + "\n", + "\n", + "\n", + "\n", + "I->-1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "-1->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "-1->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "1->-1\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fa0243a6900> >" + " *' at 0x7fdd081ee700> >" ] }, "execution_count": 4, @@ -369,7 +610,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -381,17 +622,17 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Fin(\n", - "\n", - ")\n", - "[co-Büchi]\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", "cluster_0\n", - "\n", + "\n", "\n", "\n", "\n", @@ -430,35 +671,115 @@ "\n", "\n", "-1\n", - "\n", + "\n", "\n", "\n", "\n", "1->-1\n", - "\n", - "\n", + "\n", + "\n", "b\n", "\n", "\n", "\n", "-1->0\n", - "\n", + "\n", "\n", "\n", "\n", "\n", "-1->1\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "-1\n", + "\n", + "\n", + "\n", + "\n", + "1->-1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "-1->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-1->1\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fa0243b3090> >" + " *' at 0x7fdd081ee070> >" ] }, - "execution_count": 8, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -482,7 +803,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -498,10 +819,10 @@ " viewBox=\"0.00 0.00 236.60 160.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", "\n", "\n", - "Fin(\n", - "\n", - ")\n", - "[co-Büchi]\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", "cluster_0\n", "\n", @@ -550,7 +871,88 @@ "1->-1\n", "\n", "\n", - "b\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "-1->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-1->1\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "-1\n", + "\n", + "\n", + "\n", + "\n", + "1->-1\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", @@ -569,10 +971,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fa0243b31b0> >" + " *' at 0x7fdd081eeb50> >" ] }, - "execution_count": 9, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -603,7 +1005,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -619,10 +1021,10 @@ " viewBox=\"0.00 0.00 226.60 191.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", "\n", "\n", - "Fin(\n", - "\n", - ")\n", - "[co-Büchi]\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", "cluster_0\n", "\n", @@ -671,7 +1073,7 @@ "2->2\n", "\n", "\n", - "b\n", + "b\n", "\n", "\n", "\n", @@ -684,7 +1086,101 @@ "1->-1\n", "\n", "\n", - "b\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "-1->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-1->1\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-1\n", + "\n", + "\n", + "\n", + "\n", + "0->-1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->-1\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", @@ -703,7 +1199,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa0243b3390> >" + " *' at 0x7fdd081eef10> >" ] }, "metadata": {}, @@ -722,10 +1218,10 @@ " viewBox=\"0.00 0.00 217.60 253.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", "\n", "\n", - "Fin(\n", - "\n", - ")\n", - "[co-Büchi]\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", "cluster_0\n", "\n", @@ -774,7 +1270,7 @@ "1->1\n", "\n", "\n", - "b\n", + "b\n", "\n", "\n", "\n", @@ -788,7 +1284,101 @@ "2->2\n", "\n", "\n", - "b\n", + "b\n", + "\n", + "\n", + "\n", + "-1->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-1->2\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-1\n", + "\n", + "\n", + "\n", + "\n", + "0->-1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", @@ -806,7 +1396,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa0243a6ba0> >" + " *' at 0x7fdd081eeee0> >" ] }, "metadata": {}, @@ -821,17 +1411,17 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ")\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", "cluster_0\n", - "\n", + "\n", "\n", "\n", "cluster_1\n", @@ -852,14 +1442,14 @@ "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "-4->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", @@ -889,7 +1479,7 @@ "1->-1\n", "\n", "\n", - "b\n", + "b\n", "\n", "\n", "\n", @@ -901,14 +1491,121 @@ "\n", "\n", "-1->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->-1\n", - "\n", - "\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->-1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-4\n", + "\n", + "\n", + "\n", + "\n", + "I->-4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "-4->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "-4->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "-1\n", + "\n", + "\n", + "\n", + "\n", + "1->-1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "-1->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-1->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->-1\n", + "\n", + "\n", "b\n", "\n", "\n", @@ -922,7 +1619,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa0243b3270> >" + " *' at 0x7fdd081eef40> >" ] }, "metadata": {}, @@ -930,24 +1627,24 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Fin(\n", - "\n", - ")\n", - "[co-Büchi]\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", "cluster_0\n", - "\n", + "\n", "\n", "\n", "cluster_1\n", @@ -1005,7 +1702,7 @@ "1->-1.2\n", "\n", "\n", - "b\n", + "b\n", "\n", "\n", "\n", @@ -1023,33 +1720,33 @@ "\n", "\n", "-1\n", - "\n", + "\n", "\n", "\n", "\n", "2->-1\n", - "\n", - "\n", + "\n", + "\n", "b\n", "\n", "\n", "\n", "-1->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "-1->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->-1\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n" @@ -1122,7 +1819,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -1136,7 +1833,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.5" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/_autparserr.ipynb b/tests/python/_autparserr.ipynb index 232135130..6b86ceeac 100644 --- a/tests/python/_autparserr.ipynb +++ b/tests/python/_autparserr.ipynb @@ -74,11 +74,60 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -117,7 +166,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb740213180> >" + " *' at 0x7f695c599f50> >" ] }, "metadata": {}, @@ -129,10 +178,10 @@ "output_type": "error", "traceback": [ "Traceback \u001b[0;36m(most recent call last)\u001b[0m:\n", - " File \u001b[1;32m\"/usr/lib/python3/dist-packages/IPython/core/interactiveshell.py\"\u001b[0m, line \u001b[1;32m3437\u001b[0m, in \u001b[1;35mrun_code\u001b[0m\n exec(code_obj, self.user_global_ns, self.user_ns)\n", - " File \u001b[1;32m\"\"\u001b[0m, line \u001b[1;32m1\u001b[0m, in \u001b[1;35m\u001b[0m\n for a in spot.automata('_example.aut'):\n", - "\u001b[0;36m File \u001b[0;32m\"/home/adl/git/spot/python/spot/__init__.py\"\u001b[0;36m, line \u001b[0;32m586\u001b[0;36m, in \u001b[0;35mautomata\u001b[0;36m\u001b[0m\n\u001b[0;31m res = p.parse(_bdd_dict)\u001b[0m\n", - "\u001b[0;36m File \u001b[0;32m\"\"\u001b[0;36m, line \u001b[0;32munknown\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m \n_example.aut:20.2: syntax error, unexpected identifier\n_example.aut:20.1-3: ignoring this invalid label\n_example.aut:20.5: state number is larger than state count...\n_example.aut:14.1-9: ... declared here.\n\n" + "\u001b[0m File \u001b[1;32m/usr/lib/python3/dist-packages/IPython/core/interactiveshell.py:3553\u001b[0m in \u001b[1;35mrun_code\u001b[0m\n exec(code_obj, self.user_global_ns, self.user_ns)\u001b[0m\n", + "\u001b[0m Cell \u001b[1;32mIn[3], line 1\u001b[0m\n for a in spot.automata('_example.aut'):\u001b[0m\n", + "\u001b[0;36m File \u001b[0;32m~/git/spot/python/spot/__init__.py:718\u001b[0;36m in \u001b[0;35mautomata\u001b[0;36m\n\u001b[0;31m res = p.parse(_bdd_dict)\u001b[0;36m\n", + "\u001b[0;36m File \u001b[0;32m\u001b[0;36m\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m \n_example.aut:20.2: syntax error, unexpected identifier\n_example.aut:20.1-3: ignoring this invalid label\n_example.aut:20.5: state number is larger than state count...\n_example.aut:14.1-9: ... declared here.\n\n" ] } ], @@ -155,11 +204,60 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -198,7 +296,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb740213bd0> >" + " *' at 0x7f695c59a370> >" ] }, "execution_count": 4, @@ -231,9 +329,9 @@ "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mCalledProcessError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mspot\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautomaton\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'non-existing-cmd 2>/dev/null |'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;32m~/git/spot/python/spot/__init__.py\u001b[0m in \u001b[0;36mautomaton\u001b[0;34m(filename, **kwargs)\u001b[0m\n\u001b[1;32m 613\u001b[0m See `spot.automata` for a list of supported formats.\"\"\"\n\u001b[1;32m 614\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 615\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mnext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mautomata\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilename\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 616\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 617\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mRuntimeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Failed to read automaton from {}\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilename\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/git/spot/python/spot/__init__.py\u001b[0m in \u001b[0;36mautomata\u001b[0;34m(timeout, ignore_abort, trust_hoa, no_sid, debug, want_kripke, *sources)\u001b[0m\n\u001b[1;32m 598\u001b[0m \u001b[0;31m# an exception.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 599\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mret\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexc_info\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 600\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0msubprocess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCalledProcessError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mret\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfilename\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 601\u001b[0m \u001b[0;31m# deleting o explicitly now prevents Python 3.5 from\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 602\u001b[0m \u001b[0;31m# reporting the following error: \" 1\u001b[0m \u001b[43mspot\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mautomaton\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mnon-existing-cmd 2>/dev/null |\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/git/spot/python/spot/__init__.py:747\u001b[0m, in \u001b[0;36mautomaton\u001b[0;34m(filename, **kwargs)\u001b[0m\n\u001b[1;32m 743\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Read a single automaton from a file.\u001b[39;00m\n\u001b[1;32m 744\u001b[0m \n\u001b[1;32m 745\u001b[0m \u001b[38;5;124;03mSee `spot.automata` for a list of supported formats.\"\"\"\u001b[39;00m\n\u001b[1;32m 746\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 747\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mautomata\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilename\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 748\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mStopIteration\u001b[39;00m:\n\u001b[1;32m 749\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFailed to read automaton from \u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mformat(filename))\n", + "File \u001b[0;32m~/git/spot/python/spot/__init__.py:732\u001b[0m, in \u001b[0;36mautomata\u001b[0;34m(timeout, ignore_abort, trust_hoa, no_sid, debug, want_kripke, drop_false_edges, *sources)\u001b[0m\n\u001b[1;32m 729\u001b[0m \u001b[38;5;66;03m# Do not complain about the exit code if we are already raising\u001b[39;00m\n\u001b[1;32m 730\u001b[0m \u001b[38;5;66;03m# an exception.\u001b[39;00m\n\u001b[1;32m 731\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m ret \u001b[38;5;129;01mand\u001b[39;00m sys\u001b[38;5;241m.\u001b[39mexc_info()[\u001b[38;5;241m0\u001b[39m] \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 732\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m subprocess\u001b[38;5;241m.\u001b[39mCalledProcessError(ret, filename[:\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m])\n\u001b[1;32m 733\u001b[0m \u001b[38;5;66;03m# deleting o explicitly now prevents Python 3.5 from\u001b[39;00m\n\u001b[1;32m 734\u001b[0m \u001b[38;5;66;03m# reporting the following error: \" returned a result with\u001b[39;00m\n\u001b[1;32m 736\u001b[0m \u001b[38;5;66;03m# an error set\". It's not clear to me if the bug is in Python\u001b[39;00m\n\u001b[1;32m 737\u001b[0m \u001b[38;5;66;03m# or Swig. At least it's related to the use of generators.\u001b[39;00m\n\u001b[1;32m 738\u001b[0m \u001b[38;5;28;01mdel\u001b[39;00m o\n", "\u001b[0;31mCalledProcessError\u001b[0m: Command 'non-existing-cmd 2>/dev/null ' returned non-zero exit status 127." ] } @@ -254,12 +352,12 @@ "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mTimeoutExpired\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mspot\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautomaton\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'sleep 3; cat _example.aut |'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtimeout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;32m~/git/spot/python/spot/__init__.py\u001b[0m in \u001b[0;36mautomaton\u001b[0;34m(filename, **kwargs)\u001b[0m\n\u001b[1;32m 613\u001b[0m See `spot.automata` for a list of supported formats.\"\"\"\n\u001b[1;32m 614\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 615\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mnext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mautomata\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilename\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 616\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 617\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mRuntimeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Failed to read automaton from {}\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilename\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/git/spot/python/spot/__init__.py\u001b[0m in \u001b[0;36mautomata\u001b[0;34m(timeout, ignore_abort, trust_hoa, no_sid, debug, want_kripke, *sources)\u001b[0m\n\u001b[1;32m 553\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 554\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 555\u001b[0;31m \u001b[0mout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mproc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcommunicate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 556\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0msubprocess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTimeoutExpired\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 557\u001b[0m \u001b[0;31m# Using subprocess.check_output() with timeout\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/lib/python3.9/subprocess.py\u001b[0m in \u001b[0;36mcommunicate\u001b[0;34m(self, input, timeout)\u001b[0m\n\u001b[1;32m 1132\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1133\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1134\u001b[0;31m \u001b[0mstdout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstderr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_communicate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mendtime\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtimeout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1135\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mKeyboardInterrupt\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1136\u001b[0m \u001b[0;31m# https://bugs.python.org/issue25942\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/lib/python3.9/subprocess.py\u001b[0m in \u001b[0;36m_communicate\u001b[0;34m(self, input, endtime, orig_timeout)\u001b[0m\n\u001b[1;32m 1980\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1981\u001b[0m \u001b[0mready\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mselector\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mselect\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1982\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_check_timeout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mendtime\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0morig_timeout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstdout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstderr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1983\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1984\u001b[0m \u001b[0;31m# XXX Rewrite these to use non-blocking I/O on the file\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/lib/python3.9/subprocess.py\u001b[0m in \u001b[0;36m_check_timeout\u001b[0;34m(self, endtime, orig_timeout, stdout_seq, stderr_seq, skip_check_and_raise)\u001b[0m\n\u001b[1;32m 1176\u001b[0m \u001b[0;32mreturn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1177\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mskip_check_and_raise\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_time\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0mendtime\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1178\u001b[0;31m raise TimeoutExpired(\n\u001b[0m\u001b[1;32m 1179\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0morig_timeout\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1180\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34mb''\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstdout_seq\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mstdout_seq\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "Cell \u001b[0;32mIn[6], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mspot\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mautomaton\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43msleep 3; cat _example.aut |\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/git/spot/python/spot/__init__.py:747\u001b[0m, in \u001b[0;36mautomaton\u001b[0;34m(filename, **kwargs)\u001b[0m\n\u001b[1;32m 743\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Read a single automaton from a file.\u001b[39;00m\n\u001b[1;32m 744\u001b[0m \n\u001b[1;32m 745\u001b[0m \u001b[38;5;124;03mSee `spot.automata` for a list of supported formats.\"\"\"\u001b[39;00m\n\u001b[1;32m 746\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 747\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mautomata\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilename\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 748\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mStopIteration\u001b[39;00m:\n\u001b[1;32m 749\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFailed to read automaton from \u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mformat(filename))\n", + "File \u001b[0;32m~/git/spot/python/spot/__init__.py:687\u001b[0m, in \u001b[0;36mautomata\u001b[0;34m(timeout, ignore_abort, trust_hoa, no_sid, debug, want_kripke, drop_false_edges, *sources)\u001b[0m\n\u001b[1;32m 685\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 686\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 687\u001b[0m out, err \u001b[38;5;241m=\u001b[39m \u001b[43mproc\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcommunicate\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 688\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m subprocess\u001b[38;5;241m.\u001b[39mTimeoutExpired:\n\u001b[1;32m 689\u001b[0m \u001b[38;5;66;03m# Using subprocess.check_output() with timeout\u001b[39;00m\n\u001b[1;32m 690\u001b[0m \u001b[38;5;66;03m# would just kill the shell, not its children.\u001b[39;00m\n\u001b[1;32m 691\u001b[0m os\u001b[38;5;241m.\u001b[39mkillpg(proc\u001b[38;5;241m.\u001b[39mpid, signal\u001b[38;5;241m.\u001b[39mSIGKILL)\n", + "File \u001b[0;32m/usr/lib/python3.11/subprocess.py:1209\u001b[0m, in \u001b[0;36mPopen.communicate\u001b[0;34m(self, input, timeout)\u001b[0m\n\u001b[1;32m 1206\u001b[0m endtime \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 1208\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m-> 1209\u001b[0m stdout, stderr \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_communicate\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mendtime\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1210\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mKeyboardInterrupt\u001b[39;00m:\n\u001b[1;32m 1211\u001b[0m \u001b[38;5;66;03m# https://bugs.python.org/issue25942\u001b[39;00m\n\u001b[1;32m 1212\u001b[0m \u001b[38;5;66;03m# See the detailed comment in .wait().\u001b[39;00m\n\u001b[1;32m 1213\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m timeout \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n", + "File \u001b[0;32m/usr/lib/python3.11/subprocess.py:2109\u001b[0m, in \u001b[0;36mPopen._communicate\u001b[0;34m(self, input, endtime, orig_timeout)\u001b[0m\n\u001b[1;32m 2104\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m( \u001b[38;5;66;03m# Impossible :)\u001b[39;00m\n\u001b[1;32m 2105\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m_check_timeout(..., skip_check_and_raise=True) \u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m 2106\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mfailed to raise TimeoutExpired.\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 2108\u001b[0m ready \u001b[38;5;241m=\u001b[39m selector\u001b[38;5;241m.\u001b[39mselect(timeout)\n\u001b[0;32m-> 2109\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_check_timeout\u001b[49m\u001b[43m(\u001b[49m\u001b[43mendtime\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43morig_timeout\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstdout\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstderr\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2111\u001b[0m \u001b[38;5;66;03m# XXX Rewrite these to use non-blocking I/O on the file\u001b[39;00m\n\u001b[1;32m 2112\u001b[0m \u001b[38;5;66;03m# objects; they are no longer using C stdio!\u001b[39;00m\n\u001b[1;32m 2114\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m key, events \u001b[38;5;129;01min\u001b[39;00m ready:\n", + "File \u001b[0;32m/usr/lib/python3.11/subprocess.py:1253\u001b[0m, in \u001b[0;36mPopen._check_timeout\u001b[0;34m(self, endtime, orig_timeout, stdout_seq, stderr_seq, skip_check_and_raise)\u001b[0m\n\u001b[1;32m 1251\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m\n\u001b[1;32m 1252\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m skip_check_and_raise \u001b[38;5;129;01mor\u001b[39;00m _time() \u001b[38;5;241m>\u001b[39m endtime:\n\u001b[0;32m-> 1253\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m TimeoutExpired(\n\u001b[1;32m 1254\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs, orig_timeout,\n\u001b[1;32m 1255\u001b[0m output\u001b[38;5;241m=\u001b[39m\u001b[38;5;124mb\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mjoin(stdout_seq) \u001b[38;5;28;01mif\u001b[39;00m stdout_seq \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 1256\u001b[0m stderr\u001b[38;5;241m=\u001b[39m\u001b[38;5;124mb\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mjoin(stderr_seq) \u001b[38;5;28;01mif\u001b[39;00m stderr_seq \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m)\n", "\u001b[0;31mTimeoutExpired\u001b[0m: Command 'sleep 3; cat _example.aut ' timed out after 1 seconds" ] } @@ -282,12 +380,69 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "a U b\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "a U b\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -333,7 +488,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb740040270> >" + " *' at 0x7f6945207210> >" ] }, "metadata": {}, @@ -346,8 +501,8 @@ "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mCalledProcessError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0ma\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mspot\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautomata\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"ltl2tgba 'a U b'|\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'ltl2tgba \"syntax U U error\" 2>/dev/null |'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mdisplay\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/git/spot/python/spot/__init__.py\u001b[0m in \u001b[0;36mautomata\u001b[0;34m(timeout, ignore_abort, trust_hoa, no_sid, debug, want_kripke, *sources)\u001b[0m\n\u001b[1;32m 598\u001b[0m \u001b[0;31m# an exception.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 599\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mret\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexc_info\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 600\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0msubprocess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCalledProcessError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mret\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfilename\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 601\u001b[0m \u001b[0;31m# deleting o explicitly now prevents Python 3.5 from\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 602\u001b[0m \u001b[0;31m# reporting the following error: \" 1\u001b[0m \u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43ma\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mspot\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mautomata\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mltl2tgba \u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ma U b\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m|\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mltl2tgba \u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43msyntax U U error\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m 2>/dev/null |\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m:\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[43mdisplay\u001b[49m\u001b[43m(\u001b[49m\u001b[43ma\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/git/spot/python/spot/__init__.py:732\u001b[0m, in \u001b[0;36mautomata\u001b[0;34m(timeout, ignore_abort, trust_hoa, no_sid, debug, want_kripke, drop_false_edges, *sources)\u001b[0m\n\u001b[1;32m 729\u001b[0m \u001b[38;5;66;03m# Do not complain about the exit code if we are already raising\u001b[39;00m\n\u001b[1;32m 730\u001b[0m \u001b[38;5;66;03m# an exception.\u001b[39;00m\n\u001b[1;32m 731\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m ret \u001b[38;5;129;01mand\u001b[39;00m sys\u001b[38;5;241m.\u001b[39mexc_info()[\u001b[38;5;241m0\u001b[39m] \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 732\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m subprocess\u001b[38;5;241m.\u001b[39mCalledProcessError(ret, filename[:\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m])\n\u001b[1;32m 733\u001b[0m \u001b[38;5;66;03m# deleting o explicitly now prevents Python 3.5 from\u001b[39;00m\n\u001b[1;32m 734\u001b[0m \u001b[38;5;66;03m# reporting the following error: \" returned a result with\u001b[39;00m\n\u001b[1;32m 736\u001b[0m \u001b[38;5;66;03m# an error set\". It's not clear to me if the bug is in Python\u001b[39;00m\n\u001b[1;32m 737\u001b[0m \u001b[38;5;66;03m# or Swig. At least it's related to the use of generators.\u001b[39;00m\n\u001b[1;32m 738\u001b[0m \u001b[38;5;28;01mdel\u001b[39;00m o\n", "\u001b[0;31mCalledProcessError\u001b[0m: Command 'ltl2tgba \"syntax U U error\" 2>/dev/null ' returned non-zero exit status 2." ] } @@ -376,12 +531,12 @@ "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mStopIteration\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m~/git/spot/python/spot/__init__.py\u001b[0m in \u001b[0;36mautomaton\u001b[0;34m(filename, **kwargs)\u001b[0m\n\u001b[1;32m 614\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 615\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mnext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mautomata\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilename\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 616\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "File \u001b[0;32m~/git/spot/python/spot/__init__.py:747\u001b[0m, in \u001b[0;36mautomaton\u001b[0;34m(filename, **kwargs)\u001b[0m\n\u001b[1;32m 746\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 747\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mautomata\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilename\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 748\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mStopIteration\u001b[39;00m:\n", "\u001b[0;31mStopIteration\u001b[0m: ", "\nDuring handling of the above exception, another exception occurred:\n", "\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mspot\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautomaton\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'true|'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;32m~/git/spot/python/spot/__init__.py\u001b[0m in \u001b[0;36mautomaton\u001b[0;34m(filename, **kwargs)\u001b[0m\n\u001b[1;32m 615\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mnext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mautomata\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilename\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 616\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 617\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mRuntimeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Failed to read automaton from {}\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilename\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 618\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 619\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "Cell \u001b[0;32mIn[8], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mspot\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mautomaton\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mtrue|\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/git/spot/python/spot/__init__.py:749\u001b[0m, in \u001b[0;36mautomaton\u001b[0;34m(filename, **kwargs)\u001b[0m\n\u001b[1;32m 747\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mnext\u001b[39m(automata(filename, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs))\n\u001b[1;32m 748\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mStopIteration\u001b[39;00m:\n\u001b[0;32m--> 749\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFailed to read automaton from \u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mformat(filename))\n", "\u001b[0;31mRuntimeError\u001b[0m: Failed to read automaton from true|" ] } @@ -398,11 +553,18 @@ "source": [ "!rm _example.aut" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -416,7 +578,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.2" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/_aux.ipynb b/tests/python/_aux.ipynb index c90d7995c..e82a15edf 100644 --- a/tests/python/_aux.ipynb +++ b/tests/python/_aux.ipynb @@ -37,8 +37,8 @@ "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mCalledProcessError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mspot\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maux\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstr_to_svg\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mb'syntax error'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;32m/home/adl/git/spot/python/spot/aux.py\u001b[0m in \u001b[0;36mstr_to_svg\u001b[0;34m(str)\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdot\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwait\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 63\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mret\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 64\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0msubprocess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCalledProcessError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mret\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'dot'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 65\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mstdout\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdecode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'utf-8'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 66\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "Cell \u001b[0;32mIn[2], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mspot\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43maux\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstr_to_svg\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43mb\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43msyntax error\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/git/spot/python/spot/aux_.py:90\u001b[0m, in \u001b[0;36mstr_to_svg\u001b[0;34m(str)\u001b[0m\n\u001b[1;32m 88\u001b[0m ret \u001b[38;5;241m=\u001b[39m dot\u001b[38;5;241m.\u001b[39mwait()\n\u001b[1;32m 89\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m ret:\n\u001b[0;32m---> 90\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m subprocess\u001b[38;5;241m.\u001b[39mCalledProcessError(ret, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mdot\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 91\u001b[0m out \u001b[38;5;241m=\u001b[39m stdout\u001b[38;5;241m.\u001b[39mdecode(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mutf-8\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 92\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m svgscale_regex\u001b[38;5;241m.\u001b[39msub(_gvfix, out)\n", "\u001b[0;31mCalledProcessError\u001b[0m: Command 'dot' returned non-zero exit status 1." ] } @@ -46,11 +46,18 @@ "source": [ "spot.aux.str_to_svg(b'syntax error')" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -64,7 +71,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.4" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/_mealy.ipynb b/tests/python/_mealy.ipynb index 20ea5fd4e..0270317dd 100644 --- a/tests/python/_mealy.ipynb +++ b/tests/python/_mealy.ipynb @@ -128,8 +128,77 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a | c\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!b & !d\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "b | d\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f8877796550> >" + " *' at 0x7f27698b4ff0> >" ] }, "execution_count": 4, @@ -208,8 +277,57 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "!a & !c\n", + "/\n", + "\n", + "!b & !d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "a | c\n", + "/\n", + "\n", + "b | d\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f8877796820> >" + " *' at 0x7f27698b51d0> >" ] }, "execution_count": 6, @@ -282,8 +400,51 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "!a & !c\n", + "/\n", + "\n", + "!b & !d\n", + "\n", + "a | c\n", + "/\n", + "\n", + "b | d\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f8877796820> >" + " *' at 0x7f27698b51d0> >" ] }, "execution_count": 8, @@ -386,8 +547,91 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "!i\n", + "/\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "i\n", + "/\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f8877797b40> >" + " *' at 0x7f27698b6c70> >" ] }, "execution_count": 9, @@ -531,8 +775,116 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!i\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "i\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f88777970f0> >" + " *' at 0x7f27698b6970> >" ] }, "execution_count": 10, @@ -600,15 +952,15 @@ " \n", " 0\n", " presat\n", - " 7.7176e-05\n", - " 2.863e-06\n", - " 1.6553e-05\n", - " 0.000186061\n", - " 7.753e-06\n", - " 1.0616e-05\n", - " 1.1804e-05\n", - " 8.101e-06\n", - " 6.7328e-05\n", + " 4.8381e-05\n", + " 7.11e-07\n", + " 7.143e-06\n", + " 7.917e-05\n", + " 5.22e-06\n", + " 4.519e-06\n", + " 4.539e-06\n", + " 2.965e-06\n", + " 3.167e-05\n", " ...\n", " NaN\n", " NaN\n", @@ -634,7 +986,7 @@ " NaN\n", " NaN\n", " ...\n", - " 0.000496302\n", + " 0.000223141\n", " 2\n", " 0\n", " 7\n", @@ -652,16 +1004,16 @@ ], "text/plain": [ " task premin_time reorg_time partsol_time player_incomp_time incomp_time \\\n", - "0 presat 7.7176e-05 2.863e-06 1.6553e-05 0.000186061 7.753e-06 \n", + "0 presat 4.8381e-05 7.11e-07 7.143e-06 7.917e-05 5.22e-06 \n", "1 sat NaN NaN NaN NaN NaN \n", "\n", " split_all_let_time split_min_let_time split_cstr_time prob_init_build_time \\\n", - "0 1.0616e-05 1.1804e-05 8.101e-06 6.7328e-05 \n", + "0 4.519e-06 4.539e-06 2.965e-06 3.167e-05 \n", "1 NaN NaN NaN NaN \n", "\n", " ... total_time n_classes n_refinement n_lit n_clauses n_iteration \\\n", "0 ... NaN NaN NaN NaN NaN NaN \n", - "1 ... 0.000496302 2 0 7 12 0 \n", + "1 ... 0.000223141 2 0 7 12 0 \n", "\n", " n_letters_part n_bisim_let n_min_states done \n", "0 3 2 NaN NaN \n", @@ -757,8 +1109,90 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "i\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!i\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f8877796eb0> >" + " *' at 0x7f27698b6850> >" ] }, "execution_count": 11, @@ -860,8 +1294,80 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "o1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "(o0 & !o1) | (!o0 & o1)\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f8877797120> >" + " *' at 0x7f27698b7c30> >" ] }, "execution_count": 12, @@ -999,8 +1505,109 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "o1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "(o0 & !o1) | (!o0 & o1)\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f8877797990> >" + " *' at 0x7f27698b6130> >" ] }, "execution_count": 13, @@ -1067,15 +1674,15 @@ " \n", " 0\n", " presat\n", - " 3.282e-06\n", - " 3.702e-06\n", - " 1.4248e-05\n", - " 0.000109094\n", - " 6.705e-06\n", - " 9.219e-06\n", - " 8.52e-06\n", - " 1.0407e-05\n", - " 3.2896e-05\n", + " 6.011e-06\n", + " 1.473e-06\n", + " 7.765e-06\n", + " 5.8501e-05\n", + " 4.298e-06\n", + " 4.589e-06\n", + " 5.119e-06\n", + " 3.496e-06\n", + " 2.0078e-05\n", " ...\n", " NaN\n", " NaN\n", @@ -1149,7 +1756,7 @@ " NaN\n", " NaN\n", " ...\n", - " 0.00041242\n", + " 0.000184208\n", " 2\n", " 0\n", " 17\n", @@ -1167,22 +1774,22 @@ ], "text/plain": [ " task premin_time reorg_time partsol_time player_incomp_time \\\n", - "0 presat 3.282e-06 3.702e-06 1.4248e-05 0.000109094 \n", + "0 presat 6.011e-06 1.473e-06 7.765e-06 5.8501e-05 \n", "1 sat NaN NaN NaN NaN \n", "2 refinement NaN NaN NaN NaN \n", "3 sat NaN NaN NaN NaN \n", "\n", " incomp_time split_all_let_time split_min_let_time split_cstr_time \\\n", - "0 6.705e-06 9.219e-06 8.52e-06 1.0407e-05 \n", + "0 4.298e-06 4.589e-06 5.119e-06 3.496e-06 \n", "1 NaN NaN NaN NaN \n", "2 NaN NaN NaN NaN \n", "3 NaN NaN NaN NaN \n", "\n", - " prob_init_build_time ... total_time n_classes n_refinement n_lit \\\n", - "0 3.2896e-05 ... NaN NaN NaN NaN \n", - "1 NaN ... NaN 1 0 3 \n", - "2 NaN ... NaN 1 1 10 \n", - "3 NaN ... 0.00041242 2 0 17 \n", + " prob_init_build_time ... total_time n_classes n_refinement n_lit \\\n", + "0 2.0078e-05 ... NaN NaN NaN NaN \n", + "1 NaN ... NaN 1 0 3 \n", + "2 NaN ... NaN 1 1 10 \n", + "3 NaN ... 0.000184208 2 0 17 \n", "\n", " n_clauses n_iteration n_letters_part n_bisim_let n_min_states done \n", "0 NaN NaN 1 1 NaN NaN \n", @@ -1285,8 +1892,83 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "o0 & o1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "o0 & !o1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f8877797cc0> >" + " *' at 0x7f27fb647c90> >" ] }, "execution_count": 14, @@ -1365,15 +2047,15 @@ " \n", " 0\n", " presat\n", - " 1.956e-06\n", - " 2.445e-06\n", - " 8.171e-06\n", - " 5.0007e-05\n", - " 4.819e-06\n", - " 6.077e-06\n", - " 5.797e-06\n", - " 4.33e-06\n", - " 2.242e-05\n", + " 6.713e-06\n", + " 1.964e-06\n", + " 1.2063e-05\n", + " 6.2799e-05\n", + " 6.502e-06\n", + " 8.035e-06\n", + " 8.616e-06\n", + " 7.534e-06\n", + " 2.7923e-05\n", " ...\n", " NaN\n", " NaN\n", @@ -1447,7 +2129,7 @@ " NaN\n", " NaN\n", " ...\n", - " 0.000252132\n", + " 0.000316247\n", " 2\n", " 0\n", " 17\n", @@ -1465,22 +2147,22 @@ ], "text/plain": [ " task premin_time reorg_time partsol_time player_incomp_time \\\n", - "0 presat 1.956e-06 2.445e-06 8.171e-06 5.0007e-05 \n", + "0 presat 6.713e-06 1.964e-06 1.2063e-05 6.2799e-05 \n", "1 sat NaN NaN NaN NaN \n", "2 refinement NaN NaN NaN NaN \n", "3 sat NaN NaN NaN NaN \n", "\n", " incomp_time split_all_let_time split_min_let_time split_cstr_time \\\n", - "0 4.819e-06 6.077e-06 5.797e-06 4.33e-06 \n", + "0 6.502e-06 8.035e-06 8.616e-06 7.534e-06 \n", "1 NaN NaN NaN NaN \n", "2 NaN NaN NaN NaN \n", "3 NaN NaN NaN NaN \n", "\n", " prob_init_build_time ... total_time n_classes n_refinement n_lit \\\n", - "0 2.242e-05 ... NaN NaN NaN NaN \n", + "0 2.7923e-05 ... NaN NaN NaN NaN \n", "1 NaN ... NaN 1 0 3 \n", "2 NaN ... NaN 1 1 10 \n", - "3 NaN ... 0.000252132 2 0 17 \n", + "3 NaN ... 0.000316247 2 0 17 \n", "\n", " n_clauses n_iteration n_letters_part n_bisim_let n_min_states done \n", "0 NaN NaN 1 1 NaN NaN \n", @@ -1705,7 +2387,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.5" + "version": "3.11.7" }, "vscode": { "interpreter": { diff --git a/tests/python/_partitioned_relabel.ipynb b/tests/python/_partitioned_relabel.ipynb index 549eb04be..446e18a68 100644 --- a/tests/python/_partitioned_relabel.ipynb +++ b/tests/python/_partitioned_relabel.ipynb @@ -7,7 +7,8 @@ "metadata": {}, "outputs": [], "source": [ - "import spot, buddy" + "import spot, buddy\n", + "spot.setup()" ] }, { @@ -46,82 +47,163 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "a & b0 & b1 & b2\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f936415fbd0> >" + " *' at 0x7f483c46fc00> >" ] }, "execution_count": 2, @@ -173,82 +255,163 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "__nv0 | __nv1\n", + "\n", + "\n", + "__nv0 | __nv1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!__nv0 & __nv1\n", + "\n", + "\n", + "!__nv0 & __nv1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "__nv0 & __nv1\n", + "\n", + "\n", + "__nv0 & __nv1\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "__nv0 | __nv1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "__nv0 & __nv1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f936415fbd0> >" + " *' at 0x7f483c46fc00> >" ] }, "execution_count": 3, @@ -278,82 +441,163 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "a & b0 & b1 & b2\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f936415fbd0> >" + " *' at 0x7f483c46fc00> >" ] }, "execution_count": 4, @@ -382,82 +626,163 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "a & b0 & b1 & b2\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f936415bf30> >" + " *' at 0x7f483c46dfb0> >" ] }, "metadata": {}, @@ -501,117 +826,233 @@ "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!__nv0 & !__nv1\n", + "\n", + "\n", + "!__nv0 & !__nv1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "__nv0 & !__nv1\n", + "\n", + "\n", + "__nv0 & !__nv1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "__nv0 & __nv1\n", + "\n", + "\n", + "__nv0 & __nv1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!__nv0 & __nv1\n", + "\n", + "\n", + "!__nv0 & __nv1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "__nv0 & !__nv1\n", + "\n", + "\n", + "__nv0 & !__nv1\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "__nv0 & __nv1\n", + "\n", + "\n", + "__nv0 & __nv1\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!__nv0 & __nv1\n", + "\n", + "\n", + "!__nv0 & __nv1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!__nv0 & __nv1\n", + "\n", + "\n", + "!__nv0 & __nv1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "__nv0 & __nv1\n", + "\n", + "\n", + "__nv0 & __nv1\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!__nv0 & !__nv1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "__nv0 & !__nv1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "__nv0 & !__nv1\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "__nv0 & __nv1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f936415bf30> >" + " *' at 0x7f483c46dfb0> >" ] }, "execution_count": 5, @@ -659,117 +1100,233 @@ "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(a & !b0 & b2) | (a & b0 & !b2) | (a & !b1 & b2) | (a & b1 & !b2)\n", + "\n", + "\n", + "(a & !b0 & b2) | (a & b0 & !b2) | (a & !b1 & b2) | (a & b1 & !b2)\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "a & b0 & b1 & b2\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(a & !b0 & b2) | (a & b0 & !b2) | (a & !b1 & b2) | (a & b1 & !b2)\n", + "\n", + "\n", + "(a & !b0 & b2) | (a & b0 & !b2) | (a & !b1 & b2) | (a & b1 & !b2)\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a & b0 & b1 & b2\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "a & b0 & b1 & b2\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "(a & !b0 & b2) | (a & b0 & !b2) | (a & !b1 & b2) | (a & b1 & !b2)\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "(a & !b0 & b2) | (a & b0 & !b2) | (a & !b1 & b2) | (a & b1 & !b2)\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f936415bf30> >" + " *' at 0x7f483c46dfb0> >" ] }, "execution_count": 6, @@ -798,95 +1355,189 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "a & b\n", + "\n", + "\n", + "a & b\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "a & !b\n", + "\n", + "\n", + "a & !b\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "0->5\n", - "\n", - "\n", - "c\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "c\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f936c3c6090> >" + " *' at 0x7f483c46e9d0> >" ] }, "metadata": {}, @@ -927,95 +1578,189 @@ "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!__nv0 | !__nv1\n", + "\n", + "\n", + "!__nv0 | !__nv1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(__nv0 & !__nv1) | (!__nv0 & __nv1)\n", + "\n", + "\n", + "(__nv0 & !__nv1) | (!__nv0 & __nv1)\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!__nv0 & __nv1\n", + "\n", + "\n", + "!__nv0 & __nv1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "__nv0 & !__nv1\n", + "\n", + "\n", + "__nv0 & !__nv1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "0->5\n", - "\n", - "\n", - "c\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!__nv0 | !__nv1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "(__nv0 & !__nv1) | (!__nv0 & __nv1)\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "__nv0 & !__nv1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "c\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f936c3c6090> >" + " *' at 0x7f483c46e9d0> >" ] }, "execution_count": 7, @@ -1096,95 +1841,189 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "a & b\n", + "\n", + "\n", + "a & b\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "a & !b\n", + "\n", + "\n", + "a & !b\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "0->5\n", - "\n", - "\n", - "c\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "c\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f936c3c6090> >" + " *' at 0x7f483c46e9d0> >" ] }, "execution_count": 8, @@ -1229,90 +2068,243 @@ "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "/\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "/\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "/\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "/\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "/\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "/\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b / (label too long)\n", + "\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", + "/\n", + "\n", + "(label too long)\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "1 / (label too long)\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "(label too long)\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "/\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "/\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "/\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "/\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "/\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "/\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", + "/\n", + "\n", + "(label too long)\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "(label too long)\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f936415f510> >" + " *' at 0x7f483c46f4b0> >" ] }, "metadata": {}, @@ -1368,183 +2360,365 @@ "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "0->5\n", - "\n", - "\n", - "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "0->6\n", - "\n", - "\n", - "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "0->7\n", - "\n", - "\n", - "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "0->8\n", - "\n", - "\n", - "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "5->0\n", - "\n", - "\n", - "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "6->0\n", - "\n", - "\n", - "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "7->0\n", - "\n", - "\n", - "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "8->1\n", - "\n", - "\n", - "(label too long)\n", + "\n", + "\n", + "(label too long)\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "0->7\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "0->8\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "7->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->1\n", + "\n", + "\n", + "(label too long)\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f936415f990> >" + " *' at 0x7f483c46eca0> >" ] }, "metadata": {}, @@ -1612,267 +2786,533 @@ "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!__nv_in0 & !__nv_in1 & !__nv_in2 & !__nv_in3\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & !__nv_in2 & !__nv_in3\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "__nv_in0 & !__nv_in1 & !__nv_in2 & !__nv_in3\n", + "\n", + "\n", + "__nv_in0 & !__nv_in1 & !__nv_in2 & !__nv_in3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "!__nv_in0 & __nv_in1 & !__nv_in2 & !__nv_in3\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & !__nv_in2 & !__nv_in3\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "0->5\n", - "\n", - "\n", - "__nv_in0 & __nv_in1 & !__nv_in2 & !__nv_in3\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & !__nv_in2 & !__nv_in3\n", "\n", "\n", "\n", "0->5\n", - "\n", - "\n", - "!__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "0->6\n", - "\n", - "\n", - "!__nv_in0 & !__nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & __nv_in2 & !__nv_in3\n", "\n", "\n", "\n", "0->6\n", - "\n", - "\n", - "__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "0->7\n", - "\n", - "\n", - "__nv_in0 & !__nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "__nv_in0 & !__nv_in1 & __nv_in2 & !__nv_in3\n", "\n", "\n", "\n", "0->7\n", - "\n", - "\n", - "!__nv_in0 & !__nv_in1 & __nv_in2 & __nv_in3\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & __nv_in2 & __nv_in3\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "0->8\n", - "\n", - "\n", - "!__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "(!__nv_out0 & !__nv_out1 & !__nv_out2 & !__nv_out3) | (!__nv_out0 & __nv_out1 & __nv_out2 & !__nv_out3)\n", + "\n", + "\n", + "(!__nv_out0 & !__nv_out1 & !__nv_out2 & !__nv_out3) | (!__nv_out0 & __nv_out1 & __nv_out2 & !__nv_out3)\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "(__nv_out0 & !__nv_out1 & !__nv_out2 & !__nv_out3) | (__nv_out0 & __nv_out1 & __nv_out2 & !__nv_out3)\n", + "\n", + "\n", + "(__nv_out0 & !__nv_out1 & !__nv_out2 & !__nv_out3) | (__nv_out0 & __nv_out1 & __nv_out2 & !__nv_out3)\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "(!__nv_out0 & __nv_out1 & !__nv_out2 & !__nv_out3) | (!__nv_out0 & !__nv_out1 & !__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "(!__nv_out0 & __nv_out1 & !__nv_out2 & !__nv_out3) | (!__nv_out0 & !__nv_out1 & !__nv_out2 & __nv_out3)\n", "\n", "\n", "\n", "5->0\n", - "\n", - "\n", - "(__nv_out0 & __nv_out1 & !__nv_out2 & !__nv_out3) | (__nv_out0 & !__nv_out1 & !__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "(__nv_out0 & __nv_out1 & !__nv_out2 & !__nv_out3) | (__nv_out0 & !__nv_out1 & !__nv_out2 & __nv_out3)\n", "\n", "\n", "\n", "6->0\n", - "\n", - "\n", - "(!__nv_out0 & !__nv_out1 & __nv_out2 & !__nv_out3) | (!__nv_out0 & __nv_out1 & !__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "(!__nv_out0 & !__nv_out1 & __nv_out2 & !__nv_out3) | (!__nv_out0 & __nv_out1 & !__nv_out2 & __nv_out3)\n", "\n", "\n", "\n", "7->0\n", - "\n", - "\n", - "(__nv_out0 & !__nv_out1 & __nv_out2 & !__nv_out3) | (__nv_out0 & __nv_out1 & !__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "(__nv_out0 & !__nv_out1 & __nv_out2 & !__nv_out3) | (__nv_out0 & __nv_out1 & !__nv_out2 & __nv_out3)\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "8->1\n", - "\n", - "\n", - "(!__nv_out0 & !__nv_out1 & __nv_out3) | (__nv_out1 & __nv_out2 & !__nv_out3) | (!__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "(!__nv_out0 & !__nv_out1 & __nv_out3) | (__nv_out1 & __nv_out2 & !__nv_out3) | (!__nv_out2 & __nv_out3)\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "!__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "!__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "!__nv_in0 & !__nv_in1 & __nv_in2 & __nv_in3\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & __nv_in2 & __nv_in3\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "!__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & !__nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "__nv_in0 & !__nv_in1 & !__nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & !__nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & !__nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "0->7\n", + "\n", + "\n", + "__nv_in0 & !__nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->7\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & __nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "0->8\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "(!__nv_out0 & !__nv_out1 & !__nv_out2 & !__nv_out3) | (!__nv_out0 & __nv_out1 & __nv_out2 & !__nv_out3)\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "(__nv_out0 & !__nv_out1 & !__nv_out2 & !__nv_out3) | (__nv_out0 & __nv_out1 & __nv_out2 & !__nv_out3)\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "(!__nv_out0 & __nv_out1 & !__nv_out2 & !__nv_out3) | (!__nv_out0 & !__nv_out1 & !__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "(__nv_out0 & __nv_out1 & !__nv_out2 & !__nv_out3) | (__nv_out0 & !__nv_out1 & !__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "(!__nv_out0 & !__nv_out1 & __nv_out2 & !__nv_out3) | (!__nv_out0 & __nv_out1 & !__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "\n", + "7->0\n", + "\n", + "\n", + "(__nv_out0 & !__nv_out1 & __nv_out2 & !__nv_out3) | (__nv_out0 & __nv_out1 & !__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->1\n", + "\n", + "\n", + "(!__nv_out0 & !__nv_out1 & __nv_out3) | (__nv_out1 & __nv_out2 & !__nv_out3) | (!__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & __nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f936415f990> >" + " *' at 0x7f483c46eca0> >" ] }, "metadata": {}, @@ -1985,267 +3425,533 @@ "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "0\n", + "\n", + "\n", + "0\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "0\n", + "\n", + "\n", + "0\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "0\n", + "\n", + "\n", + "0\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "0->5\n", - "\n", - "\n", - "0\n", + "\n", + "\n", + "0\n", "\n", "\n", "\n", "0->5\n", - "\n", - "\n", - "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "0->6\n", - "\n", - "\n", - "0\n", + "\n", + "\n", + "0\n", "\n", "\n", "\n", "0->6\n", - "\n", - "\n", - "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "0->7\n", - "\n", - "\n", - "0\n", + "\n", + "\n", + "0\n", "\n", "\n", "\n", "0->7\n", - "\n", - "\n", - "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "0->8\n", - "\n", - "\n", - "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "5->0\n", - "\n", - "\n", - "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "6->0\n", - "\n", - "\n", - "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "7->0\n", - "\n", - "\n", - "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "8->1\n", - "\n", - "\n", - "(label too long)\n", + "\n", + "\n", + "(label too long)\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", "\n", "\n", "\n", "1->8\n", - "\n", - "\n", - "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "0->7\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->7\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "0->8\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "7->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->1\n", + "\n", + "\n", + "(label too long)\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f936415f990> >" + " *' at 0x7f483c46eca0> >" ] }, "metadata": {}, @@ -2345,7 +4051,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/_product_susp.ipynb b/tests/python/_product_susp.ipynb index a19c8e975..cae2781cf 100644 --- a/tests/python/_product_susp.ipynb +++ b/tests/python/_product_susp.ipynb @@ -4674,7 +4674,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.6" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/_product_weak.ipynb b/tests/python/_product_weak.ipynb index 745688fa1..64fbc4cfd 100644 --- a/tests/python/_product_weak.ipynb +++ b/tests/python/_product_weak.ipynb @@ -202,8 +202,58 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "a\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f26743d3720> >" + " *' at 0x7faa1c7666a0> >" ] }, "metadata": {}, @@ -1607,7 +1657,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -1720,7 +1770,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -1893,8 +1943,65 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fb\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f26743d36c0> >" + " *' at 0x7faa1c766670> >" ] }, "metadata": {}, @@ -3177,7 +3284,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -3302,7 +3409,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -3451,8 +3558,65 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fb\n", + "\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f26743d37e0> >" + " *' at 0x7faa1c766700> >" ] }, "metadata": {}, @@ -4736,7 +4900,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -4861,7 +5025,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -5000,8 +5164,55 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "GFc\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f26743d3870> >" + " *' at 0x7faa1c7663d0> >" ] }, "metadata": {}, @@ -6053,7 +6264,7 @@ ")) & Fin(\n", "\n", ")\n", - "[Streett-like 3]\n", + "[Streett-like 3]\n", "\n", "\n", "\n", @@ -6193,7 +6404,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -6241,7 +6452,7 @@ ") | Fin(\n", "\n", "))\n", - "[Streett-like 2]\n", + "[Streett-like 2]\n", "\n", "\n", "\n", @@ -6427,8 +6638,55 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f26743d3900> >" + " *' at 0x7faa1c766400> >" ] }, "metadata": {}, @@ -7223,7 +7481,7 @@ ") | Inf(\n", "\n", ")\n", - "[Streett 1]\n", + "[Streett 1]\n", "\n", "\n", "\n", @@ -7478,7 +7736,7 @@ ") & Fin(\n", "\n", ")\n", - "[Streett-like 3]\n", + "[Streett-like 3]\n", "\n", "\n", "\n", @@ -7618,7 +7876,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -7666,7 +7924,7 @@ ") | Fin(\n", "\n", "))\n", - "[Streett-like 2]\n", + "[Streett-like 2]\n", "\n", "\n", "\n", @@ -7853,8 +8111,56 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")\n", + "[Rabin-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f26743d3990> >" + " *' at 0x7faa1c766550> >" ] }, "metadata": {}, @@ -8607,7 +8913,7 @@ ")) & Fin(\n", "\n", ")\n", - "[Streett-like 3]\n", + "[Streett-like 3]\n", "\n", "\n", "\n", @@ -8794,7 +9100,7 @@ ") & Fin(\n", "\n", ")\n", - "[Streett-like 3]\n", + "[Streett-like 3]\n", "\n", "\n", "\n", @@ -8984,7 +9290,7 @@ ") & Fin(\n", "\n", ")\n", - "[Streett-like 4]\n", + "[Streett-like 4]\n", "\n", "\n", "\n", @@ -9094,7 +9400,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -9144,7 +9450,7 @@ ") | Fin(\n", "\n", "))\n", - "[Streett-like 3]\n", + "[Streett-like 3]\n", "\n", "\n", "\n", @@ -9303,8 +9609,58 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "a\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f26743d3720> >" + " *' at 0x7faa1c7666a0> >" ] }, "metadata": {}, @@ -10971,7 +11327,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -11287,8 +11643,65 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fb\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f26743d36c0> >" + " *' at 0x7faa1c766670> >" ] }, "metadata": {}, @@ -12649,7 +13062,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -12923,8 +13336,65 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fb\n", + "\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f26743d37e0> >" + " *' at 0x7faa1c766700> >" ] }, "metadata": {}, @@ -14293,7 +14763,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -14557,8 +15027,55 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "GFc\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f26743d3870> >" + " *' at 0x7faa1c7663d0> >" ] }, "metadata": {}, @@ -15869,7 +16386,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -16113,8 +16630,55 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f26743d3900> >" + " *' at 0x7faa1c766400> >" ] }, "metadata": {}, @@ -17425,7 +17989,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -17670,8 +18234,56 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")\n", + "[Rabin-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f26743d3990> >" + " *' at 0x7faa1c766550> >" ] }, "metadata": {}, @@ -18979,7 +19591,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -19168,7 +19780,55 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -19201,7 +19861,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f26743d3a20> >" + " *' at 0x7faa1c766730> >" ] }, "metadata": {}, @@ -19340,7 +20000,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -19586,7 +20246,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -19813,7 +20473,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -19953,7 +20613,7 @@ ")) & Inf(\n", "\n", ")\n", - "[Streett-like 2]\n", + "[Streett-like 2]\n", "\n", "\n", "\n", @@ -20140,7 +20800,7 @@ ")) & Fin(\n", "\n", ")\n", - "[Streett-like 2]\n", + "[Streett-like 2]\n", "\n", "\n", "\n", @@ -20330,7 +20990,7 @@ ") & Fin(\n", "\n", ")\n", - "[Streett-like 3]\n", + "[Streett-like 3]\n", "\n", "\n", "\n", @@ -20440,7 +21100,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", "\n", "\n", "\n", @@ -20490,7 +21150,7 @@ ") | Fin(\n", "\n", "))\n", - "[Streett-like 2]\n", + "[Streett-like 2]\n", "\n", "\n", "\n", @@ -20908,7 +21568,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.6" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/_synthesis.ipynb b/tests/python/_synthesis.ipynb index 2d92236b7..1520b0c2f 100644 --- a/tests/python/_synthesis.ipynb +++ b/tests/python/_synthesis.ipynb @@ -737,8 +737,57 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "(!i & !o) | (i & o)\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc33a0f0> >" + " *' at 0x7f08a8777750> >" ] }, "execution_count": 8, @@ -820,8 +869,68 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "\n", + "!i\n", + "/\n", + "\n", + "!o\n", + "\n", + "i\n", + "/\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc33a0f0> >" + " *' at 0x7f08a8777750> >" ] }, "execution_count": 9, @@ -944,8 +1053,103 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!i\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "i\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "o\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc345660> >" + " *' at 0x7f08a8777660> >" ] }, "execution_count": 10, @@ -1042,8 +1246,83 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "i0\n", + "/\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "\n", + "i0\n", + "/\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc3486c0> >" + " *' at 0x7f08a8774840> >" ] }, "execution_count": 11, @@ -1210,8 +1489,136 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "i0\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "1->5\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "i0 & i1\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "!o\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc345ae0> >" + " *' at 0x7f08a8774930> >" ] }, "execution_count": 12, @@ -1419,8 +1826,187 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!i0\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "i0\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "i0 & i1\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "8->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->2\n", + "\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc345e40> >" + " *' at 0x7f08a8776f10> >" ] }, "execution_count": 13, @@ -1577,8 +2163,133 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc364a20> >" + " *' at 0x7f08a9ff0db0> >" ] }, "execution_count": 14, @@ -1721,8 +2432,106 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc364a20> >" + " *' at 0x7f08a9ff0db0> >" ] }, "execution_count": 15, @@ -1868,8 +2677,133 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc35a2d0> >" + " *' at 0x7f08a8775800> >" ] }, "execution_count": 16, @@ -2013,8 +2947,106 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc35a2d0> >" + " *' at 0x7f08a8775800> >" ] }, "execution_count": 17, @@ -2035,7 +3067,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -2560,8 +3592,172 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc35af00> >" + " *' at 0x7f08a876e310> >" ] }, "execution_count": 18, @@ -2714,8 +3910,112 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc35af00> >" + " *' at 0x7f08a876e310> >" ] }, "execution_count": 19, @@ -2872,8 +4172,133 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc36d240> >" + " *' at 0x7f08a8776850> >" ] }, "metadata": {}, @@ -2881,7 +4306,7 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -4214,8 +5639,434 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!a & b & c\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "1->9\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "1->10\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "1->11\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "1->12\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "2->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "3->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "14->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "!a & b & c\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "15\n", + "\n", + "\n", + "\n", + "4->15\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "4->16\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "4->17\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "4->18\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "15->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc3910f0> >" + " *' at 0x7f08a9ff1350> >" ] }, "execution_count": 20, @@ -4697,8 +6548,434 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!a & b & c\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "1->9\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "1->10\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "1->11\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "1->12\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "2->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "3->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "14->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "!a & b & c\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "15\n", + "\n", + "\n", + "\n", + "4->15\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "4->16\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "4->17\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "4->18\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "15->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc3910f0> >" + " *' at 0x7f08a9ff1350> >" ] }, "execution_count": 21, @@ -4830,8 +7107,119 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc364570> >" + " *' at 0x7f08a8776970> >" ] }, "metadata": {}, @@ -4839,7 +7227,7 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -5508,8 +7896,218 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "2->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "3->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc35a9c0> >" + " *' at 0x7f08a8775620> >" ] }, "execution_count": 22, @@ -5747,8 +8345,191 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "1->5\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "3->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc35a9c0> >" + " *' at 0x7f08a8775620> >" ] }, "execution_count": 23, @@ -5837,8 +8618,68 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "i\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc3646c0> >" + " *' at 0x7f08a876e100> >" ] }, "metadata": {}, @@ -5931,8 +8772,93 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "i\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!i\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc3646c0> >" + " *' at 0x7f08a876e100> >" ] }, "metadata": {}, @@ -6053,8 +8979,90 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!a & !b & !c & !d\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a | b | c | d\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbccc3911e0> >" + " *' at 0x7f08a876f570> >" ] }, "execution_count": 25, @@ -6177,8 +9185,90 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !b & !c & !d\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a | b | c | d\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fbcd407ca20> >" + " *' at 0x7f08a876fae0> >" ] }, "execution_count": 27, @@ -6241,7 +9331,7 @@ "14\n", "\n", "\n", - "\n", + "\n", "12->14\n", "\n", "\n", @@ -6253,7 +9343,7 @@ "16\n", "\n", "\n", - "\n", + "\n", "14->16\n", "\n", "\n", @@ -6289,7 +9379,7 @@ "a\n", "\n", "\n", - "\n", + "\n", "2->16\n", "\n", "\n", @@ -6301,7 +9391,7 @@ "b\n", "\n", "\n", - "\n", + "\n", "4->14\n", "\n", "\n", @@ -6313,7 +9403,7 @@ "c\n", "\n", "\n", - "\n", + "\n", "6->12\n", "\n", "\n", @@ -6325,7 +9415,153 @@ "d\n", "\n", "\n", + "\n", + "8->12\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "False\n", + "\n", + "\n", + "\n", + "0->L0\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "L0_out\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "10->18\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "12->14\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "14->16\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "16->18\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o0\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "18->o0:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "L0\n", + "\n", + "L0_in\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->16\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->14\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "c\n", + "\n", + "\n", "\n", + "6->12\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "d\n", + "\n", + "\n", + "\n", "8->12\n", "\n", "\n", @@ -6346,7 +9582,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fbccc33adb0> >" + " *' at 0x7f08a876f2a0> >" ] }, "execution_count": 28, @@ -6384,7 +9620,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/aliases.ipynb b/tests/python/aliases.ipynb index 7b3194335..a87887d33 100644 --- a/tests/python/aliases.ipynb +++ b/tests/python/aliases.ipynb @@ -351,7 +351,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -445,7 +445,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -559,7 +559,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -697,7 +697,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -830,7 +830,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -984,7 +984,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.2" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/alternation.ipynb b/tests/python/alternation.ipynb index 3d3cdae51..c16e5cab1 100644 --- a/tests/python/alternation.ipynb +++ b/tests/python/alternation.ipynb @@ -4282,7 +4282,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -4296,7 +4296,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.2" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/atva16-fig2a.ipynb b/tests/python/atva16-fig2a.ipynb index 1a62abff7..f4b1cc251 100644 --- a/tests/python/atva16-fig2a.ipynb +++ b/tests/python/atva16-fig2a.ipynb @@ -154,8 +154,115 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a | b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f12cf944300> >" + " *' at 0x7fdd801ca760> >" ] }, "execution_count": 3, @@ -224,7 +331,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -238,7 +345,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.1+" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/atva16-fig2b.ipynb b/tests/python/atva16-fig2b.ipynb index 72a83dc99..2686dd4e5 100644 --- a/tests/python/atva16-fig2b.ipynb +++ b/tests/python/atva16-fig2b.ipynb @@ -82,9 +82,244 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "c=1, x1=0, x2=0, a1=0, a2=0\n", + "a1.Q & !"c==17" & !dead\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "c=1, x1=1, x2=0, a1=1, a2=0\n", + "!a1.Q & !"c==17" & !dead\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "c=1, x1=0, x2=1, a1=0, a2=1\n", + "a1.Q & !"c==17" & !dead\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "c=1, x1=2, x2=0, a1=2, a2=0\n", + "!a1.Q & !"c==17" & !dead\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "c=1, x1=1, x2=1, a1=1, a2=1\n", + "!a1.Q & !"c==17" & !dead\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "c=1, x1=0, x2=2, a1=0, a2=2\n", + "a1.Q & !"c==17" & !dead\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "c=2, x1=2, x2=0, a1=0, a2=0\n", + "...\n", + "\n", + "\n", + "\n", + "3->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "c=1, x1=2, x2=1, a1=2, a2=1\n", + "...\n", + "\n", + "\n", + "\n", + "3->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "c=1, x1=1, x2=2, a1=1, a2=2\n", + "...\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "c=2, x1=0, x2=2, a1=0, a2=0\n", + "...\n", + "\n", + "\n", + "\n", + "5->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "u6\n", + "\n", + "\n", + "...\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6->u6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "u7\n", + "\n", + "\n", + "...\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7->u7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "u8\n", + "\n", + "\n", + "...\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8->u8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "u9\n", + "\n", + "\n", + "...\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9->u9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", @@ -311,7 +546,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f9a40554b10> >" + " *' at 0x7f43c06f2ac0> >" ] }, "execution_count": 4, @@ -358,7 +593,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -372,7 +607,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.2" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/automata-io.ipynb b/tests/python/automata-io.ipynb index 6de56f9f5..a461d8d27 100644 --- a/tests/python/automata-io.ipynb +++ b/tests/python/automata-io.ipynb @@ -173,8 +173,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fc24c50bf00> >" + " *' at 0x7f89ac1d28e0> >" ] }, "execution_count": 3, @@ -302,8 +358,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fc24c50b780> >" + " *' at 0x7f89ac1d3060> >" ] }, "metadata": {}, @@ -367,8 +479,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fc24c50bb70> >" + " *' at 0x7f89ac1d2850> >" ] }, "metadata": {}, @@ -489,8 +657,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fc24c50be10> >" + " *' at 0x7f89ac1d2af0> >" ] }, "metadata": {}, @@ -580,8 +804,65 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Hello world\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fc24c50b750> >" + " *' at 0x7f89ac1d2550> >" ] }, "metadata": {}, @@ -640,8 +921,59 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Hello world 2\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fc24c50bde0> >" + " *' at 0x7f89ac1d2610> >" ] }, "metadata": {}, @@ -750,8 +1082,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fc24c50b690> >" + " *' at 0x7f89ac1d32a0> >" ] }, "metadata": {}, @@ -808,8 +1196,57 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fc24c50bd20> >" + " *' at 0x7f89ac1d3330> >" ] }, "metadata": {}, @@ -864,8 +1301,55 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "GFa\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fc24c50b690> >" + " *' at 0x7f89ac1d32a0> >" ] }, "metadata": {}, @@ -933,8 +1417,68 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "a & GFb\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fc24c50bae0> >" + " *' at 0x7f89ac1d3360> >" ] }, "metadata": {}, @@ -1016,8 +1560,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fc24c50b6c0> >" + " *' at 0x7f89ae256040> >" ] }, "execution_count": 10, @@ -1055,7 +1655,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.6" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/automata.ipynb b/tests/python/automata.ipynb index 3f98bf3aa..d2dee06df 100644 --- a/tests/python/automata.ipynb +++ b/tests/python/automata.ipynb @@ -177,8 +177,159 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c & d\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "b & !d\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "b & !c & d\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f773078f870> >" + " *' at 0x7f504c18e310> >" ] }, "execution_count": 2, @@ -204,7 +355,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -382,7 +533,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -656,8 +807,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307ac6c0> >" + " *' at 0x7f504c18ef70> >" ] }, "execution_count": 6, @@ -732,8 +939,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307ac840> >" + " *' at 0x7f504c18f2d0> >" ] }, "execution_count": 7, @@ -815,8 +1078,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307ac930> >" + " *' at 0x7f504c18f3c0> >" ] }, "execution_count": 8, @@ -865,7 +1184,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -976,7 +1295,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -1348,8 +1667,149 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a | b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "a | b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a | b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307ad320> >" + " *' at 0x7f504c1ac1e0> >" ] }, "execution_count": 12, @@ -1462,8 +1922,95 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307ad500> >" + " *' at 0x7f504c1ac2d0> >" ] }, "execution_count": 13, @@ -1593,8 +2140,112 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307add70> >" + " *' at 0x7f504c1ac720> >" ] }, "execution_count": 14, @@ -1815,8 +2466,138 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")) | Inf(\n", + "\n", + ")\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307ae280> >" + " *' at 0x7f504c1ace70> >" ] }, "metadata": {}, @@ -1973,8 +2754,157 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307ae250> >" + " *' at 0x7f504c1ace40> >" ] }, "metadata": {}, @@ -2131,8 +3061,157 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "!a | !b\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307adda0> >" + " *' at 0x7f504c1ac5a0> >" ] }, "metadata": {}, @@ -2279,8 +3358,147 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307adbc0> >" + " *' at 0x7f504c18fc60> >" ] }, "metadata": {}, @@ -2468,8 +3686,165 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")) | Inf(\n", + "\n", + ")\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307adad0> >" + " *' at 0x7f504c1ad470> >" ] }, "execution_count": 19, @@ -2544,8 +3919,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307ad440> >" + " *' at 0x7f504c1ad2f0> >" ] }, "execution_count": 20, @@ -3086,8 +4517,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307af1e0> >" + " *' at 0x7f504c1ae250> >" ] }, "metadata": {}, @@ -3186,8 +4673,78 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307afa20> >" + " *' at 0x7f504c1aedf0> >" ] }, "execution_count": 24, @@ -3259,8 +4816,54 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307ac750> >" + " *' at 0x7f504c1ac570> >" ] }, "execution_count": 25, @@ -3286,7 +4889,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -3430,8 +5033,59 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[parity max odd 3]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307aef40> >" + " *' at 0x7f504c1aec70> >" ] }, "execution_count": 27, @@ -3513,8 +5167,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307af1e0> >" + " *' at 0x7f504c1ae250> >" ] }, "metadata": {}, @@ -3578,8 +5288,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307af1e0> >" + " *' at 0x7f504c1ae250> >" ] }, "metadata": {}, @@ -3665,8 +5431,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f77307af1e0> >" + " *' at 0x7f504c1ae250> >" ] }, "execution_count": 29, @@ -3706,7 +5528,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.6" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/cav22-figs.ipynb b/tests/python/cav22-figs.ipynb index d232bebe2..320cc5414 100644 --- a/tests/python/cav22-figs.ipynb +++ b/tests/python/cav22-figs.ipynb @@ -140,8 +140,109 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fb6a430f5a0> >" + " *' at 0x7fb3e849e6a0> >" ] }, "execution_count": 2, @@ -162,7 +263,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -717,7 +818,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -1273,8 +1374,275 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "I->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "4->12\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "7->4\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "8->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "8->5\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "2->9\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "2->10\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "9->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "10->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->9\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "3->11\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "11->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "12->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "5->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "13->5\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fb6a430f300> >" + " *' at 0x7fb3e849e8b0> >" ] }, "execution_count": 7, @@ -1574,7 +1942,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.5" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/contains.ipynb b/tests/python/contains.ipynb index b16d896db..391cdf489 100644 --- a/tests/python/contains.ipynb +++ b/tests/python/contains.ipynb @@ -326,7 +326,7 @@ "$\\mathsf{cycle}\\{a; \\lnot a\\}$" ], "text/plain": [ - " *' at 0x7f7c7c425630> >" + " *' at 0x7f05002c3c90> >" ] }, "execution_count": 16, @@ -379,43 +379,43 @@ "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", @@ -423,54 +423,54 @@ "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "a\n", + "\n", + "\n", + "a\n", "\n", "\n", "\n", @@ -491,7 +491,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -505,7 +505,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/decompose.ipynb b/tests/python/decompose.ipynb index 898ed3f91..9d1728f21 100644 --- a/tests/python/decompose.ipynb +++ b/tests/python/decompose.ipynb @@ -12,7 +12,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -200,8 +199,167 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "cluster_3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f56404b14b0> >" + " *' at 0x7f328c1623d0> >" ] }, "execution_count": 2, @@ -215,7 +373,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -331,8 +488,102 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f56404b11b0> >" + " *' at 0x7f328c162340> >" ] }, "execution_count": 3, @@ -345,7 +596,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -491,8 +741,140 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f56404b1270> >" + " *' at 0x7f328c161fe0> >" ] }, "execution_count": 4, @@ -505,7 +887,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -590,8 +971,79 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f56404b18d0> >" + " *' at 0x7f328c162070> >" ] }, "execution_count": 5, @@ -604,7 +1056,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -673,8 +1124,58 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f56404b1c00> >" + " *' at 0x7f328c162eb0> >" ] }, "execution_count": 6, @@ -687,7 +1188,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -801,8 +1301,106 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "option: sw\n", + "\n", + "option: sw\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f56404b1f60> >" + " *' at 0x7f328c163390> >" ] }, "metadata": {}, @@ -946,8 +1544,144 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "option: st\n", + "\n", + "option: st\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f56404b1d80> >" + " *' at 0x7f328c163360> >" ] }, "metadata": {}, @@ -1114,8 +1848,167 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "option: wt\n", + "\n", + "option: wt\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "cluster_3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f56404b1f90> >" + " *' at 0x7f328c163390> >" ] }, "metadata": {}, @@ -1130,7 +2023,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1518,8 +2410,372 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "[Rabin 2]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "cluster_3\n", + "\n", + "\n", + "\n", + "cluster_4\n", + "\n", + "\n", + "\n", + "cluster_5\n", + "\n", + "\n", + "\n", + "cluster_6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "I->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "\n", + "8->6\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "8->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "8->8\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "\n", + "5->7\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "7->6\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "7->1\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "7->0\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "7->5\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "3->6\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "4->5\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f56404b1ed0> >" + " *' at 0x7f328c162b50> >" ] }, "execution_count": 8, @@ -1543,7 +2799,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1905,8 +3160,356 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "terminal\n", + "\n", + "terminal\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "cluster_3\n", + "\n", + "\n", + "\n", + "cluster_4\n", + "\n", + "\n", + "\n", + "cluster_5\n", + "\n", + "\n", + "\n", + "cluster_6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "7->4\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "8->4\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "8->7\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "8->8\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "5->7\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "3->6\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "6->4\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "6->8\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "6->5\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "1->5\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "2->7\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f56404b1b70> >" + " *' at 0x7f328c162c70> >" ] }, "metadata": {}, @@ -2137,8 +3740,231 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "strictly weak\n", + "\n", + "strictly weak\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "cluster_3\n", + "\n", + "\n", + "\n", + "cluster_4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f56404b1f60> >" + " *' at 0x7f328c163390> >" ] }, "metadata": {}, @@ -2347,8 +4173,209 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "strong\n", + "\n", + "strong\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "[Rabin 2]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "cluster_3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f56404b1ab0> >" + " *' at 0x7f328c162c70> >" ] }, "metadata": {}, @@ -2363,7 +4390,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2736,8 +4762,365 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "[Rabin 2]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "cluster_3\n", + "\n", + "\n", + "\n", + "cluster_4\n", + "\n", + "\n", + "\n", + "cluster_5\n", + "\n", + "\n", + "\n", + "cluster_6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "7->4\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "8->4\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "8->7\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "8->8\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "5->7\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "\n", + "3->6\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "6->4\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "6->8\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "6->5\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "1->5\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "2->7\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f56404b1c90> >" + " *' at 0x7f328c163c00> >" ] }, "execution_count": 10, @@ -2750,7 +5133,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2898,8 +5280,142 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "inherently terminal\n", + "\n", + "inherently terminal\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "I->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f5640045330> >" + " *' at 0x7f328c163b40> >" ] }, "metadata": {}, @@ -3037,8 +5553,138 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "strictly inherently weak\n", + "\n", + "strictly inherently weak\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "I->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f5640d27930> >" + " *' at 0x7f328c163a80> >" ] }, "metadata": {}, @@ -3182,8 +5828,144 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "strong\n", + "\n", + "strong\n", + "Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "[parity min even 3]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "(b & !c) | (!a & !c)\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f56400456f0> >" + " *' at 0x7f328c163e10> >" ] }, "metadata": {}, @@ -3198,7 +5980,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -3237,7 +6018,326 @@ ") | Inf(\n", "\n", "))\n", - "[Streett 2]\n", + "[Streett 2]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "cluster_3\n", + "\n", + "\n", + "\n", + "cluster_4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "\n", + "I->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7->4\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "\n", + "7->6\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2->7\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "6->4\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "6->5\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")) & (Fin(\n", + "\n", + ") | Inf(\n", + "\n", + "))\n", + "[Streett 2]\n", "\n", "cluster_0\n", "\n", @@ -3537,7 +6637,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f5640045450> >" + " *' at 0x7f328c163150> >" ] }, "execution_count": 12, @@ -3879,8 +6979,321 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "inherently terminal\n", + "\n", + "inherently terminal\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "cluster_3\n", + "\n", + "\n", + "\n", + "cluster_4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "7->4\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "6->4\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "6->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f5640045540> >" + " *' at 0x7f328c162c10> >" ] }, "metadata": {}, @@ -4056,8 +7469,176 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "strictly inherently weak\n", + "\n", + "strictly inherently weak\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f5640d3abd0> >" + " *' at 0x7f328c163e10> >" ] }, "metadata": {}, @@ -4087,7 +7668,156 @@ ") | Inf(\n", "\n", "))\n", - "[Streett 2]\n", + "[Streett 2]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "strong\n", + "\n", + "strong\n", + "(Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")) & (Fin(\n", + "\n", + ") | Inf(\n", + "\n", + "))\n", + "[Streett 2]\n", "\n", "cluster_0\n", "\n", @@ -4215,7 +7945,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f5640d3a8a0> >" + " *' at 0x7f328c162c10> >" ] }, "metadata": {}, @@ -4230,7 +7960,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -4266,7 +7995,325 @@ ") | Inf(\n", "\n", "))\n", - "[Streett 2]\n", + "[Streett 2]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "cluster_3\n", + "\n", + "\n", + "\n", + "cluster_4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "7->4\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "6->4\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "6->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")) & (Fin(\n", + "\n", + ") | Inf(\n", + "\n", + "))\n", + "[Streett 2]\n", "\n", "cluster_0\n", "\n", @@ -4565,7 +8612,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f5640d3ae10> >" + " *' at 0x7f328c163240> >" ] }, "execution_count": 14, @@ -4578,7 +8625,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -4688,8 +8734,96 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f5640045930> >" + " *' at 0x7f328c163d50> >" ] }, "execution_count": 15, @@ -4786,8 +8920,76 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f56400452a0> >" + " *' at 0x7f328c17c870> >" ] }, "metadata": {}, @@ -4862,8 +9064,75 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f5640d3a930> >" + " *' at 0x7f328c1634b0> >" ] }, "metadata": {}, @@ -4876,7 +9145,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -4957,8 +9225,75 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f5640045090> >" + " *' at 0x7f328c17c540> >" ] }, "execution_count": 18, @@ -4971,7 +9306,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -4979,7 +9313,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -5011,7 +9344,121 @@ ") | Fin(\n", "\n", ")\n", - "[Streett-like 1]\n", + "[Streett-like 1]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett-like 1]\n", "\n", "cluster_0\n", "\n", @@ -5110,7 +9557,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f5640045b70> >" + " *' at 0x7f328c17c570> >" ] }, "execution_count": 19, @@ -5143,7 +9590,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -5270,8 +9716,121 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "terminal\n", + "\n", + "terminal\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f5640052300> >" + " *' at 0x7f328c17db90> >" ] }, "metadata": {}, @@ -5372,8 +9931,101 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "strictly weak\n", + "\n", + "strictly weak\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f5640d34a80> >" + " *' at 0x7f328c162c10> >" ] }, "metadata": {}, @@ -5507,8 +10159,127 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "all strengths\n", + "\n", + "all strengths\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f5640052360> >" + " *' at 0x7f328c1626d0> >" ] }, "metadata": {}, @@ -5526,7 +10297,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -5712,8 +10482,167 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "cluster_3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f5640045f00> >" + " *' at 0x7f328c17d410> >" ] }, "metadata": {}, @@ -5853,8 +10782,140 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "cluster_1\n", + "\n", + "\n", + "\n", + "cluster_2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f5640d34ae0> >" + " *' at 0x7f328c17d3e0> >" ] }, "execution_count": 21, @@ -5872,7 +10933,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -5957,8 +11017,79 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "cluster_0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f5640045e40> >" + " *' at 0x7f328c17e160> >" ] }, "execution_count": 22, @@ -5973,7 +11104,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -5987,7 +11118,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/formulas.ipynb b/tests/python/formulas.ipynb index 714220c2a..f20769a72 100644 --- a/tests/python/formulas.ipynb +++ b/tests/python/formulas.ipynb @@ -502,7 +502,7 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -517,8 +517,8 @@ "\n", "\n", "0\n", - "\n", - "EConcat\n", + "\n", + "EConcat\n", "\n", "\n", "\n", @@ -529,9 +529,9 @@ "\n", "\n", "0->1\n", - "\n", - "\n", - "L\n", + "\n", + "\n", + "L\n", "\n", "\n", "\n", @@ -542,9 +542,9 @@ "\n", "\n", "0->11\n", - "\n", - "\n", - "R\n", + "\n", + "\n", + "R\n", "\n", "\n", "\n", @@ -555,14 +555,14 @@ "\n", "\n", "1->2\n", - "\n", + "\n", "\n", - "1\n", + "1\n", "\n", "\n", "\n", "3\n", - "\n", + "\n", "first_match\n", "\n", "\n", @@ -734,7 +734,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", @@ -1036,7 +1036,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -1050,7 +1050,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.2" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/games.ipynb b/tests/python/games.ipynb index 863e7efdc..5127b0bbb 100644 --- a/tests/python/games.ipynb +++ b/tests/python/games.ipynb @@ -43,7 +43,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -281,7 +281,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -497,7 +497,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -896,8 +896,222 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "7->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "2->7\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "3->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "9->4\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "4->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fcbe436f840> >" + " *' at 0x7f84987be250> >" ] }, "execution_count": 8, @@ -1223,8 +1437,222 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "7->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "2->7\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "3->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "9->4\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "4->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fcbe436e9a0> >" + " *' at 0x7f84987bea90> >" ] }, "execution_count": 11, @@ -1254,7 +1682,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -1411,7 +1839,7 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -1427,7 +1855,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett 1]\n", + "[Streett 1]\n", "\n", "\n", "\n", @@ -2024,8 +2452,351 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "7->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "8->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "10->11\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "12->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "12->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->12\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "15\n", + "\n", + "\n", + "\n", + "14->15\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "14->16\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "15->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "17->18\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "19\n", + "\n", + "19\n", + "\n", + "\n", + "\n", + "19->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20\n", + "\n", + "20\n", + "\n", + "\n", + "\n", + "19->20\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20->19\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fcbe4382370> >" + " *' at 0x7f84987be8e0> >" ] }, "execution_count": 14, @@ -2407,8 +3178,351 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "7->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "8->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "10->11\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "12->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "12->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->12\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "15\n", + "\n", + "\n", + "\n", + "14->15\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "14->16\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "15->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "17->18\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "19\n", + "\n", + "19\n", + "\n", + "\n", + "\n", + "19->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20\n", + "\n", + "20\n", + "\n", + "\n", + "\n", + "19->20\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20->19\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fcbe4382370> >" + " *' at 0x7f84987be8e0> >" ] }, "execution_count": 15, @@ -2783,8 +3897,351 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "7->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "8->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "10->11\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "12->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "12->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->12\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "15\n", + "\n", + "\n", + "\n", + "14->15\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "14->16\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "15->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "17->18\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "19\n", + "\n", + "19\n", + "\n", + "\n", + "\n", + "19->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20\n", + "\n", + "20\n", + "\n", + "\n", + "\n", + "19->20\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20->19\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fcbe4382370> >" + " *' at 0x7f84987be8e0> >" ] }, "execution_count": 16, @@ -2818,7 +4275,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.7" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/gen.ipynb b/tests/python/gen.ipynb index 07af746ba..c70aa5895 100644 --- a/tests/python/gen.ipynb +++ b/tests/python/gen.ipynb @@ -524,18 +524,18 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", - "\n", - "\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -738,16 +738,16 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "(Fin(\n", "\n", @@ -762,7 +762,7 @@ ") | Inf(\n", "\n", "))\n", - "[Streett 3]\n", + "[Streett 3]\n", "\n", "\n", "\n", @@ -986,18 +986,18 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1239,7 +1239,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -1253,7 +1253,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.2" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/highlighting.ipynb b/tests/python/highlighting.ipynb index 13e29bb75..07145174d 100644 --- a/tests/python/highlighting.ipynb +++ b/tests/python/highlighting.ipynb @@ -47,7 +47,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -246,8 +246,91 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "I->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e4de0> >" + " *' at 0x7fa7f4156190> >" ] }, "execution_count": 4, @@ -358,8 +441,91 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "I->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e57d0> >" + " *' at 0x7fa7f4155f80> >" ] }, "execution_count": 5, @@ -468,8 +634,91 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "I->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e4de0> >" + " *' at 0x7fa7f4156190> >" ] }, "execution_count": 6, @@ -690,8 +939,91 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "I->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e6a30> >" + " *' at 0x7fa7f4157900> >" ] }, "execution_count": 9, @@ -802,8 +1134,91 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "I->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e71e0> >" + " *' at 0x7fa7f4157780> >" ] }, "execution_count": 10, @@ -967,8 +1382,142 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "I->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a | b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e6f10> >" + " *' at 0x7fa7f41574b0> >" ] }, "execution_count": 11, @@ -1162,8 +1711,142 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "I->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a | b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e6f10> >" + " *' at 0x7fa7f41574b0> >" ] }, "execution_count": 14, @@ -1500,8 +2183,317 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | Fin(\n", + "\n", + ")) & Fin(\n", + "\n", + ")\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "0->9\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9->5\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "9->8\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->9\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "8->9\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "8->7\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->7\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "6->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "7->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "4->5\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e6be0> >" + " *' at 0x7fa7f41745d0> >" ] }, "metadata": {}, @@ -1761,8 +2753,260 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & ((Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Inf(\n", + "\n", + "))) | (Fin(\n", + "\n", + ")|Fin(\n", + "\n", + ")))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "0->8\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "8->3\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6->8\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "1->9\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "9->3\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "9->5\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->9\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "7->5\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e67c0> >" + " *' at 0x7fa7f4156100> >" ] }, "metadata": {}, @@ -1944,8 +3188,182 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "i G F a G F b\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "[Rabin 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e6610> >" + " *' at 0x7fa7f4157480> >" ] }, "metadata": {}, @@ -2061,8 +3479,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e63a0> >" + " *' at 0x7fa7f4174d80> >" ] }, "metadata": {}, @@ -2116,8 +3590,54 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e52c0> >" + " *' at 0x7fa7f4174210> >" ] }, "metadata": {}, @@ -2210,8 +3730,81 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e66d0> >" + " *' at 0x7fa7f4174f30> >" ] }, "execution_count": 17, @@ -2339,8 +3932,81 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e66d0> >" + " *' at 0x7fa7f4174f30> >" ] }, "metadata": {}, @@ -2404,8 +4070,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e63a0> >" + " *' at 0x7fa7f4174d80> >" ] }, "metadata": {}, @@ -2459,8 +4181,54 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e52c0> >" + " *' at 0x7fa7f4174210> >" ] }, "metadata": {}, @@ -2685,8 +4453,189 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0 * 3\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1 * 2\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2 * 2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "1 * 1\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "2 * 1\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "1 * 0\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "4->5\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "2 * 0\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "1 * 4\n", + "\n", + "\n", + "\n", + "5->7\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "2 * 4\n", + "\n", + "\n", + "\n", + "6->8\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "8->7\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "8->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e61c0> >" + " *' at 0x7fa7f4176160> >" ] }, "metadata": {}, @@ -2770,8 +4719,84 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e5fe0> >" + " *' at 0x7fa7f4175320> >" ] }, "metadata": {}, @@ -2867,8 +4892,96 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "I->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e78a0> >" + " *' at 0x7fa7f4176130> >" ] }, "metadata": {}, @@ -3036,8 +5149,142 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "I->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a | b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e7030> >" + " *' at 0x7fa7f4175950> >" ] }, "execution_count": 22, @@ -3204,8 +5451,142 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "I->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a | b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e7030> >" + " *' at 0x7fa7f4175950> >" ] }, "execution_count": 23, @@ -3367,8 +5748,142 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "I->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a | b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe2982e7030> >" + " *' at 0x7fa7f4175950> >" ] }, "metadata": {}, @@ -3376,7 +5891,7 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -3509,7 +6024,7 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -3607,7 +6122,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -3876,7 +6391,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.6" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/ltsmin-dve.ipynb b/tests/python/ltsmin-dve.ipynb index 7b474ce30..ba34a6294 100644 --- a/tests/python/ltsmin-dve.ipynb +++ b/tests/python/ltsmin-dve.ipynb @@ -223,9 +223,252 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "a=0, b=0, Q=0\n", + ""a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "a=1, b=0, Q=0\n", + "!"a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "a=0, b=1, Q=0\n", + ""a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "a=2, b=0, Q=0\n", + "!"a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "a=1, b=1, Q=0\n", + "!"a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "a=0, b=2, Q=0\n", + ""a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "a=3, b=0, Q=0\n", + "!"a<1" & !"b>2" & dead\n", + "\n", + "\n", + "\n", + "3->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "a=2, b=1, Q=0\n", + "...\n", + "\n", + "\n", + "\n", + "3->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "a=1, b=2, Q=0\n", + "...\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "u5\n", + "\n", + "\n", + "...\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->u5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "a=0, b=3, Q=0\n", + "...\n", + "\n", + "\n", + "\n", + "5->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "u7\n", + "\n", + "\n", + "...\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7->u7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "u8\n", + "\n", + "\n", + "...\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8->u8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "u9\n", + "\n", + "\n", + "...\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9->u9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", "\n", "t\n", "[all]\n", @@ -460,7 +703,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe6bc6afba0> >" + " *' at 0x7fd3583bbfc0> >" ] }, "execution_count": 9, @@ -480,16 +723,16 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "t\n", "[all]\n", @@ -844,16 +1087,16 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "t\n", "[all]\n", @@ -1283,11 +1526,67 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + ""a<1" & !"b>2"\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + ""b>2"\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1333,7 +1632,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe6bc6bf660> >" + " *' at 0x7fd3583d4690> >" ] }, "execution_count": 12, @@ -1359,9 +1658,183 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "a=0, b=0, Q=0 * 1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "a=1, b=0, Q=0 * 1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + ""a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "a=0, b=1, Q=0 * 1\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + ""a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "a=1, b=1, Q=0 * 1\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + ""a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "a=0, b=2, Q=0 * 1\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + ""a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "a=1, b=2, Q=0 * 1\n", + "\n", + "\n", + "\n", + "4->5\n", + "\n", + "\n", + ""a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "a=0, b=3, Q=0 * 1\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + ""a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "a=0, b=2, Q=1 * 1\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + ""a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "a=0, b=3, Q=1 * 0\n", + "\n", + "\n", + "\n", + "6->8\n", + "\n", + "\n", + ""a<1" & "b>2" & !dead\n", + "\n", + "\n", + "\n", + "u7\n", + "\n", + "\n", + "...\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7->u7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "a=1, b=2, Q=1 * 1\n", + "\n", + "\n", + "\n", + "7->9\n", + "\n", + "\n", + ""a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "8->8\n", + "\n", + "\n", + ""a<1" & "b>2" & dead\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", "\n", "Inf(\n", "\n", @@ -1527,7 +2000,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe6bc6afc30> >" + " *' at 0x7fd3583d44b0> >" ] }, "execution_count": 13, @@ -1668,9 +2141,84 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "a=0, b=0, Q=0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "a=1, b=0, Q=0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + ""a<1" & !"b > 1" & !dead\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "a=2, b=0, Q=0\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!"a<1" & !"b > 1" & !dead\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "a=3, b=0, Q=0\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!"a<1" & !"b > 1" & !dead\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!"a<1" & !"b > 1" & dead\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", "\n", "t\n", "[all]\n", @@ -1737,7 +2285,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe6bc6cbcc0> >" + " *' at 0x7fd3583d4360> >" ] }, "execution_count": 19, @@ -1856,9 +2404,252 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "a=0, b=0, Q=0\n", + ""a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "a=1, b=0, Q=0\n", + "!"a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "a=0, b=1, Q=0\n", + ""a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "a=2, b=0, Q=0\n", + "!"a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "a=1, b=1, Q=0\n", + "!"a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "a=0, b=2, Q=0\n", + ""a<1" & !"b>2" & !dead\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "a=3, b=0, Q=0\n", + "!"a<1" & !"b>2" & dead\n", + "\n", + "\n", + "\n", + "3->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "a=2, b=1, Q=0\n", + "...\n", + "\n", + "\n", + "\n", + "3->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "a=1, b=2, Q=0\n", + "...\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "u5\n", + "\n", + "\n", + "...\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->u5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "a=0, b=3, Q=0\n", + "...\n", + "\n", + "\n", + "\n", + "5->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "u7\n", + "\n", + "\n", + "...\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7->u7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "u8\n", + "\n", + "\n", + "...\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8->u8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "u9\n", + "\n", + "\n", + "...\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9->u9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", "\n", "t\n", "[all]\n", @@ -2093,7 +2884,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fe6bc6c4210> >" + " *' at 0x7fd3583d4bd0> >" ] }, "execution_count": 21, @@ -2110,7 +2901,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -2124,7 +2915,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.2" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/ltsmin-pml.ipynb b/tests/python/ltsmin-pml.ipynb index 5d25b207f..5787634c9 100644 --- a/tests/python/ltsmin-pml.ipynb +++ b/tests/python/ltsmin-pml.ipynb @@ -40,8 +40,8 @@ "SpinS Promela Compiler - version 1.1 (3-Feb-2015)\n", "(C) University of Twente, Formal Methods and Tools group\n", "\n", - "Parsing tmpwot5yb9c.pml...\n", - "Parsing tmpwot5yb9c.pml done (0.0 sec)\n", + "Parsing tmpl3q0izvh.pml...\n", + "Parsing tmpl3q0izvh.pml done (0.0 sec)\n", "\n", "Optimizing graphs...\n", " StateMerging changed 0 states/transitions.\n", @@ -84,8 +84,8 @@ " Found 2 / 2 (100.0%) Commuting actions \n", "Generating guard dependency matrices done (0.0 sec)\n", "\n", - "Written C code to /home/adl/git/spot/tests/python/tmpwot5yb9c.pml.spins.c\n", - "Compiled C code to PINS library tmpwot5yb9c.pml.spins\n", + "Written C code to /home/adl/git/spot/tests/python/tmpl3q0izvh.pml.spins.c\n", + "Compiled C code to PINS library tmpl3q0izvh.pml.spins\n", "\n" ] } @@ -418,8 +418,280 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "P_0._pc=0, P_0.a=0, P_0.b=0\n", + ""P_0.a < 2" & !"P_0.b > 1" & !dead\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "P_0._pc=0, P_0.a=1, P_0.b=0\n", + ""P_0.a < 2" & !"P_0.b > 1" & !dead\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "P_0._pc=0, P_0.a=0, P_0.b=1\n", + ""P_0.a < 2" & !"P_0.b > 1" & !dead\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "P_0._pc=0, P_0.a=2, P_0.b=0\n", + "!"P_0.a < 2" & !"P_0.b > 1" & !dead\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "P_0._pc=0, P_0.a=1, P_0.b=1\n", + ""P_0.a < 2" & !"P_0.b > 1" & !dead\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "P_0._pc=0, P_0.a=0, P_0.b=2\n", + ""P_0.a < 2" & "P_0.b > 1" & !dead\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "P_0._pc=0, P_0.a=3, P_0.b=0\n", + "!"P_0.a < 2" & !"P_0.b > 1" & dead\n", + "\n", + "\n", + "\n", + "3->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "P_0._pc=0, P_0.a=2, P_0.b=1\n", + "!"P_0.a < 2" & !"P_0.b > 1" & !dead\n", + "\n", + "\n", + "\n", + "3->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "P_0._pc=0, P_0.a=1, P_0.b=2\n", + ""P_0.a < 2" & "P_0.b > 1" & !dead\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "P_0._pc=0, P_0.a=0, P_0.b=3\n", + ""P_0.a < 2" & "P_0.b > 1" & dead\n", + "\n", + "\n", + "\n", + "5->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "P_0._pc=0, P_0.a=3, P_0.b=1\n", + "!"P_0.a < 2" & !"P_0.b > 1" & dead\n", + "\n", + "\n", + "\n", + "7->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "P_0._pc=0, P_0.a=2, P_0.b=2\n", + "!"P_0.a < 2" & "P_0.b > 1" & !dead\n", + "\n", + "\n", + "\n", + "7->11\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8->11\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "P_0._pc=0, P_0.a=1, P_0.b=3\n", + ""P_0.a < 2" & "P_0.b > 1" & dead\n", + "\n", + "\n", + "\n", + "8->12\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "10->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "P_0._pc=0, P_0.a=3, P_0.b=2\n", + "!"P_0.a < 2" & "P_0.b > 1" & dead\n", + "\n", + "\n", + "\n", + "11->13\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "P_0._pc=0, P_0.a=2, P_0.b=3\n", + "!"P_0.a < 2" & "P_0.b > 1" & dead\n", + "\n", + "\n", + "\n", + "11->14\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "12->12\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "13->13\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "14->14\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f7f9849ee20> >" + " *' at 0x7f44c85cf300> >" ] }, "execution_count": 4, @@ -452,7 +724,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -781,7 +1053,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -1313,7 +1585,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.5" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/parity.ipynb b/tests/python/parity.ipynb index 09d4b133e..a47864c92 100644 --- a/tests/python/parity.ipynb +++ b/tests/python/parity.ipynb @@ -3340,7 +3340,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett 1]\n", + "[Streett 1]\n", "\n", "\n", "\n", @@ -4105,7 +4105,7 @@ ") | Fin(\n", "\n", ")\n", - "[Streett 1]\n", + "[Streett 1]\n", "\n", "\n", "\n", @@ -4832,8 +4832,124 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")))\n", + "[parity max even 4]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f2c282eb960> >" + " *' at 0x7fc0d6bea0d0> >" ] }, "metadata": {}, @@ -5344,7 +5460,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.7" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/product.ipynb b/tests/python/product.ipynb index 90dde8243..e5434e0c7 100644 --- a/tests/python/product.ipynb +++ b/tests/python/product.ipynb @@ -39,13 +39,13 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Inf(\n", - "\n", - ")\n", + "\n", + "Inf(\n", + "\n", + ")\n", "[Büchi]\n", "\n", "\n", @@ -90,10 +90,10 @@ "\n", "\n", "1->1\n", - "\n", - "\n", - "a & b\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", "\n", "\n", "\n", @@ -103,90 +103,90 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")\n", - "[gen. Büchi 2]\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 2]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "b\n", - "\n", - "\n", + "\n", + "\n", + "b\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "b & c\n", - "\n", - "\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "b & !c\n", - "\n", + "\n", + "\n", + "b & !c\n", + "\n", "\n", "\n", "\n", @@ -196,189 +196,189 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")\n", - "[gen. Büchi 3]\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 3]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "b\n", - "\n", - "\n", + "\n", + "\n", + "b\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "0,1\n", + "\n", + "0,1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "2\n", - "\n", - "1,0\n", + "\n", + "1,0\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a & b\n", - "\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "b & c\n", - "\n", - "\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "b & !c\n", - "\n", + "\n", + "\n", + "b & !c\n", + "\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "a & b & c\n", - "\n", - "\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "1,1\n", + "\n", + "1,1\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "a & b & !c\n", - "\n", + "\n", + "\n", + "a & b & !c\n", + "\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "a & !b & c\n", - "\n", + "\n", + "\n", + "a & !b & c\n", + "\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "a & b\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "2->3\n", - "\n", - "\n", - "a & !b & !c\n", + "\n", + "\n", + "a & !b & !c\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "a & !b & c\n", - "\n", + "\n", + "\n", + "a & !b & c\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "a & b & c\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "a & !b & !c\n", + "\n", + "\n", + "a & !b & !c\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "a & b & !c\n", - "\n", - "\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", "\n", "\n", "\n", @@ -424,13 +424,13 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Inf(\n", - "\n", - ")\n", + "\n", + "Inf(\n", + "\n", + ")\n", "[Büchi]\n", "\n", "\n", @@ -475,10 +475,10 @@ "\n", "\n", "1->1\n", - "\n", - "\n", - "a & b\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", "\n", "\n", "\n", @@ -488,90 +488,90 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")\n", - "[gen. Büchi 2]\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 2]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "b\n", - "\n", - "\n", + "\n", + "\n", + "b\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "b & c\n", - "\n", - "\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "b & !c\n", - "\n", + "\n", + "\n", + "b & !c\n", + "\n", "\n", "\n", "\n", @@ -581,189 +581,189 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")\n", - "[gen. Büchi 3]\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 3]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "b\n", - "\n", - "\n", + "\n", + "\n", + "b\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "0,1\n", + "\n", + "0,1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "2\n", - "\n", - "1,0\n", + "\n", + "1,0\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a & b\n", - "\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "b & c\n", - "\n", - "\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "b & !c\n", - "\n", + "\n", + "\n", + "b & !c\n", + "\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "a & b & c\n", - "\n", - "\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "1,1\n", + "\n", + "1,1\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "a & b & !c\n", - "\n", + "\n", + "\n", + "a & b & !c\n", + "\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "a & !b & c\n", - "\n", + "\n", + "\n", + "a & !b & c\n", + "\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "a & b\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "2->3\n", - "\n", - "\n", - "a & !b & !c\n", + "\n", + "\n", + "a & !b & !c\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "a & !b & c\n", - "\n", + "\n", + "\n", + "a & !b & c\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "a & b & c\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "a & !b & !c\n", + "\n", + "\n", + "a & !b & !c\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "a & b & !c\n", - "\n", - "\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", "\n", "\n", "\n", @@ -817,13 +817,13 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Inf(\n", - "\n", - ")\n", + "\n", + "Inf(\n", + "\n", + ")\n", "[Büchi]\n", "\n", "\n", @@ -868,10 +868,10 @@ "\n", "\n", "1->1\n", - "\n", - "\n", - "a & b\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", "\n", "\n", "\n", @@ -881,90 +881,90 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")\n", - "[gen. Büchi 2]\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 2]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "b\n", - "\n", - "\n", + "\n", + "\n", + "b\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "b & c\n", - "\n", - "\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "b & !c\n", - "\n", + "\n", + "\n", + "b & !c\n", + "\n", "\n", "\n", "\n", @@ -974,161 +974,161 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!b & c\n", + "\n", + "\n", + "!b & c\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "b\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a & b\n", + "\n", + "\n", + "a & b\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!b & c\n", + "\n", + "\n", + "!b & c\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "b & c\n", + "\n", + "\n", + "b & c\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "b & !c\n", + "\n", + "\n", + "b & !c\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "a & b & c\n", + "\n", + "\n", + "a & b & c\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "a & b & !c\n", + "\n", + "\n", + "a & b & !c\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "a & !b & c\n", + "\n", + "\n", + "a & !b & c\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "a & b\n", + "\n", + "\n", + "a & b\n", "\n", "\n", "\n", "2->3\n", - "\n", - "\n", - "a & !b & !c\n", + "\n", + "\n", + "a & !b & !c\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "a & !b & c\n", + "\n", + "\n", + "a & !b & c\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "a & b & c\n", + "\n", + "\n", + "a & b & c\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "a & !b & !c\n", + "\n", + "\n", + "a & !b & !c\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "a & b & !c\n", + "\n", + "\n", + "a & b & !c\n", "\n", "\n", "\n", @@ -1253,13 +1253,13 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Inf(\n", - "\n", - ")\n", + "\n", + "Inf(\n", + "\n", + ")\n", "[Büchi]\n", "\n", "\n", @@ -1304,10 +1304,10 @@ "\n", "\n", "1->1\n", - "\n", - "\n", - "a & b\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", "\n", "\n", "\n", @@ -1317,90 +1317,90 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")\n", - "[gen. Büchi 2]\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 2]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "b\n", - "\n", - "\n", + "\n", + "\n", + "b\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "b & c\n", - "\n", - "\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "b & !c\n", - "\n", + "\n", + "\n", + "b & !c\n", + "\n", "\n", "\n", "\n", @@ -1410,189 +1410,189 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")\n", - "[gen. Büchi 3]\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 3]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "b\n", - "\n", - "\n", + "\n", + "\n", + "b\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a & b\n", - "\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "b & c\n", - "\n", - "\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "b & !c\n", - "\n", + "\n", + "\n", + "b & !c\n", + "\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "a & b & c\n", - "\n", - "\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "a & b & !c\n", - "\n", + "\n", + "\n", + "a & b & !c\n", + "\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "a & !b & c\n", - "\n", + "\n", + "\n", + "a & !b & c\n", + "\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "a & b\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "2->3\n", - "\n", - "\n", - "a & !b & !c\n", + "\n", + "\n", + "a & !b & !c\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "a & !b & c\n", - "\n", + "\n", + "\n", + "a & !b & c\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "a & b & c\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "a & !b & !c\n", + "\n", + "\n", + "a & !b & !c\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "a & b & !c\n", - "\n", - "\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", "\n", "\n", "\n", @@ -1785,13 +1785,13 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Inf(\n", - "\n", - ")\n", + "\n", + "Inf(\n", + "\n", + ")\n", "[Büchi]\n", "\n", "\n", @@ -1836,10 +1836,10 @@ "\n", "\n", "1->1\n", - "\n", - "\n", - "a & b\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", "\n", "\n", "\n", @@ -1849,90 +1849,90 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")\n", - "[gen. Büchi 2]\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 2]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "b\n", - "\n", - "\n", + "\n", + "\n", + "b\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "b & c\n", - "\n", - "\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "b & !c\n", - "\n", + "\n", + "\n", + "b & !c\n", + "\n", "\n", "\n", "\n", @@ -1942,189 +1942,189 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")\n", - "[gen. Büchi 3]\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 3]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "b\n", - "\n", - "\n", + "\n", + "\n", + "b\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "0,1\n", + "\n", + "0,1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "2\n", - "\n", - "1,0\n", + "\n", + "1,0\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a & b\n", - "\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "b & c\n", - "\n", - "\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "b & !c\n", - "\n", + "\n", + "\n", + "b & !c\n", + "\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "a & b & c\n", - "\n", - "\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "1,1\n", + "\n", + "1,1\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "a & b & !c\n", - "\n", + "\n", + "\n", + "a & b & !c\n", + "\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "a & !b & c\n", - "\n", + "\n", + "\n", + "a & !b & c\n", + "\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "a & b\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "2->3\n", - "\n", - "\n", - "a & !b & !c\n", + "\n", + "\n", + "a & !b & !c\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "a & !b & c\n", - "\n", + "\n", + "\n", + "a & !b & c\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "a & b & c\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "a & !b & !c\n", + "\n", + "\n", + "a & !b & !c\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "a & b & !c\n", - "\n", - "\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", "\n", "\n", "\n", @@ -2216,208 +2216,208 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")\n", - "[gen. Büchi 3]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 3]\n", "\n", "\n", "\n", "0\n", "\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "b\n", - "\n", - "\n", + "\n", + "\n", + "b\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", "\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "2\n", "\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a & b\n", - "\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!b & c\n", - "\n", + "\n", + "\n", + "!b & c\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "b & c\n", - "\n", - "\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!b & !c\n", + "\n", + "\n", + "!b & !c\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "b & !c\n", - "\n", + "\n", + "\n", + "b & !c\n", + "\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "a & b & c\n", - "\n", - "\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", "\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "a & b & !c\n", - "\n", + "\n", + "\n", + "a & b & !c\n", + "\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "a & !b & c\n", - "\n", + "\n", + "\n", + "a & !b & c\n", + "\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "a & b\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "2->3\n", - "\n", - "\n", - "a & !b & !c\n", + "\n", + "\n", + "a & !b & !c\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "a & !b & c\n", - "\n", + "\n", + "\n", + "a & !b & c\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "a & b & c\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "a & !b & !c\n", + "\n", + "\n", + "a & !b & !c\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "a & b & !c\n", - "\n", - "\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", "\n", "\n", "\n" @@ -2458,18 +2458,34 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "87.6 µs ± 982 ns per loop (mean ± std. dev. of 7 runs, 10,000 loops each)\n" + ] + } + ], "source": [ "%timeit product3(a1, a2)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2.14 µs ± 9.61 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n" + ] + } + ], "source": [ "%timeit spot.product(a1, a2)" ] @@ -2495,7 +2511,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -2509,7 +2525,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.5" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/randaut.ipynb b/tests/python/randaut.ipynb index b6ba6d195..1ed69d812 100644 --- a/tests/python/randaut.ipynb +++ b/tests/python/randaut.ipynb @@ -34,123 +34,123 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & Fin(\n", - "\n", - ") & Fin(\n", - "\n", - "))\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", "\n", "\n", "\n", "4->4\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "3->1\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", @@ -160,111 +160,111 @@ "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "4->4\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "3->1\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", @@ -274,125 +274,125 @@ "\n", "\n", - "\n", - "\n", - "\n", - "(Fin(\n", - "\n", - ") & Fin(\n", - "\n", - ")) | (Fin(\n", - "\n", - ") & Inf(\n", - "\n", - "))\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "p0 & p1\n", - "\n", + "\n", + "\n", + "p0 & p1\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "p0 & p1\n", - "\n", + "\n", + "\n", + "p0 & p1\n", + "\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "3->1\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", @@ -402,123 +402,123 @@ "\n", "\n", - "\n", - "\n", - "\n", - "(Fin(\n", - "\n", - ") & Fin(\n", - "\n", - ")) | Inf(\n", - "\n", - ")\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | Inf(\n", + "\n", + ")\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "p0 & p1\n", - "\n", + "\n", + "\n", + "p0 & p1\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "p0 & p1\n", - "\n", + "\n", + "\n", + "p0 & p1\n", + "\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "3->1\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", @@ -528,19 +528,19 @@ "\n", "\n", - "\n", - "\n", - "\n", - "(Inf(\n", - "\n", - ") & Fin(\n", - "\n", - ")) | (Inf(\n", - "\n", - ")&Inf(\n", - "\n", - "))\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", "\n", "\n", "\n", @@ -557,109 +557,109 @@ "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "!p0 & p1\n", - "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "p0 & p1\n", - "\n", - "\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "4->2\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", "4->4\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", @@ -669,18 +669,18 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Inf(\n", - "\n", - ")&Inf(\n", - "\n", - "))\n", - "[Fin-less 3]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", + "[Fin-less 3]\n", "\n", "\n", "\n", @@ -697,109 +697,109 @@ "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "!p0 & p1\n", - "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "p0 & p1\n", - "\n", - "\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "4->2\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", "4->4\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", @@ -809,19 +809,19 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ") | (Inf(\n", - "\n", - ")&Inf(\n", - "\n", - "))\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ") | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", "\n", "\n", "\n", @@ -838,122 +838,122 @@ "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "!p0 & !p1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", + "\n", + "\n", "!p0 & !p1\n", - "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", "4->2\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "2->3\n", - "\n", - "\n", - "p0 & p1\n", - "\n", + "\n", + "\n", + "p0 & p1\n", + "\n", "\n", "\n", "\n", "3->1\n", - "\n", - "\n", - "p0 & p1\n", - "\n", + "\n", + "\n", + "p0 & p1\n", + "\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", @@ -963,19 +963,19 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ") | (Inf(\n", - "\n", - ")&Inf(\n", - "\n", - "))\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ") | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", "\n", "\n", "\n", @@ -992,122 +992,122 @@ "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "!p0 & !p1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", + "\n", + "\n", "!p0 & !p1\n", - "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", "4->2\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "2->3\n", - "\n", - "\n", - "p0 & p1\n", - "\n", + "\n", + "\n", + "p0 & p1\n", + "\n", "\n", "\n", "\n", "3->1\n", - "\n", - "\n", - "p0 & p1\n", - "\n", + "\n", + "\n", + "p0 & p1\n", + "\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", @@ -1117,148 +1117,148 @@ "\n", "\n", - "\n", - "\n", - "\n", - "((Fin(\n", - "\n", - ")|Fin(\n", - "\n", - ")) | Inf(\n", - "\n", - ")) & Inf(\n", - "\n", - ")\n", + "\n", + "\n", + "\n", + "((Fin(\n", + "\n", + ")|Fin(\n", + "\n", + ")) | Inf(\n", + "\n", + ")) & Inf(\n", + "\n", + ")\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "4->1\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "4->2\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", @@ -1268,142 +1268,142 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "4->1\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "4->2\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", @@ -1413,20 +1413,20 @@ "\n", "\n", - "\n", - "\n", - "\n", - "(Fin(\n", - "\n", - ") | Inf(\n", - "\n", - ")) & (Inf(\n", - "\n", - ")&Inf(\n", - "\n", - "))\n", - "[gen. Streett 3]\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")) & (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", + "[gen. Streett 3]\n", "\n", "\n", "\n", @@ -1443,93 +1443,93 @@ "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", + "\n", + "\n", "!p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", + "\n", + "\n", "p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", "\n", "\n", "\n", "4->1\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "4->2\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "2->0\n", - "\n", + "\n", "\n", - "!p0 & !p1\n", - "\n", - "\n", + "!p0 & !p1\n", + "\n", + "\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", @@ -1539,20 +1539,20 @@ "\n", "\n", - "\n", - "\n", - "\n", - "(Fin(\n", - "\n", - ") | Inf(\n", - "\n", - ")) & (Inf(\n", - "\n", - ")&Inf(\n", - "\n", - "))\n", - "[gen. Streett 3]\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")) & (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", + "[gen. Streett 3]\n", "\n", "\n", "\n", @@ -1569,93 +1569,93 @@ "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", + "\n", + "\n", "!p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", + "\n", + "\n", "p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", "\n", "\n", "\n", "4->1\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "4->2\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "2->0\n", - "\n", + "\n", "\n", - "!p0 & !p1\n", - "\n", - "\n", + "!p0 & !p1\n", + "\n", + "\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "!p0 & p1\n", + "\n", + "\n", + "!p0 & p1\n", "\n", "\n", "\n", @@ -1665,20 +1665,20 @@ "\n", "\n", - "\n", - "\n", - "\n", - "(Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ")&Inf(\n", - "\n", - "))) | Fin(\n", - "\n", - ")\n", - "[gen. Rabin 2]\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))) | Fin(\n", + "\n", + ")\n", + "[gen. Rabin 2]\n", "\n", "\n", "\n", @@ -1697,106 +1697,106 @@ "0->0\n", "\n", "\n", - "p0 & p1\n", + "p0 & p1\n", "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", + "\n", + "\n", "!p0 & !p1\n", - "\n", + "\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "3->1\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "4->1\n", - "\n", - "\n", - "p0 & p1\n", - "\n", - "\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", "\n", "\n", "\n", @@ -1806,20 +1806,20 @@ "\n", "\n", - "\n", - "\n", - "\n", - "(Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ")&Inf(\n", - "\n", - "))) | Fin(\n", - "\n", - ")\n", - "[gen. Rabin 2]\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))) | Fin(\n", + "\n", + ")\n", + "[gen. Rabin 2]\n", "\n", "\n", "\n", @@ -1838,106 +1838,106 @@ "0->0\n", "\n", "\n", - "p0 & p1\n", + "p0 & p1\n", "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", + "\n", + "\n", "!p0 & !p1\n", - "\n", + "\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "3->1\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "4->1\n", - "\n", - "\n", - "p0 & p1\n", - "\n", - "\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", "\n", "\n", "\n", @@ -1947,19 +1947,19 @@ "\n", "\n", - "\n", - "\n", - "\n", - "((Fin(\n", - "\n", - ")|Fin(\n", - "\n", - ")) | Inf(\n", - "\n", - ")) & Inf(\n", - "\n", - ")\n", + "\n", + "\n", + "\n", + "((Fin(\n", + "\n", + ")|Fin(\n", + "\n", + ")) | Inf(\n", + "\n", + ")) & Inf(\n", + "\n", + ")\n", "\n", "\n", "\n", @@ -1976,81 +1976,81 @@ "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", + "\n", + "\n", "p0 & p1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", "4->1\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", @@ -2060,17 +2060,17 @@ "\n", "\n", - "\n", - "\n", - "\n", - "(Fin(\n", - "\n", - ")|Fin(\n", - "\n", - ")) & Inf(\n", - "\n", - ")\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ")|Fin(\n", + "\n", + ")) & Inf(\n", + "\n", + ")\n", "\n", "\n", "\n", @@ -2087,81 +2087,81 @@ "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", + "\n", + "\n", "p0 & p1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", "4->1\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", @@ -2171,150 +2171,150 @@ "\n", "\n", - "\n", - "\n", - "\n", - "(Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ")|Fin(\n", - "\n", - "))) & Inf(\n", - "\n", - ")\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ")|Fin(\n", + "\n", + "))) & Inf(\n", + "\n", + ")\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "p0 & p1\n", - "\n", + "\n", + "\n", + "p0 & p1\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "4->1\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "4->2\n", - "\n", - "\n", - "p0 & p1\n", - "\n", + "\n", + "\n", + "p0 & p1\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", "2->3\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", @@ -2324,150 +2324,150 @@ "\n", "\n", - "\n", - "\n", - "\n", - "(Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ")|Fin(\n", - "\n", - "))) & Inf(\n", - "\n", - ")\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ")|Fin(\n", + "\n", + "))) & Inf(\n", + "\n", + ")\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "p0 & p1\n", - "\n", + "\n", + "\n", + "p0 & p1\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "4->1\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "4->2\n", - "\n", - "\n", - "p0 & p1\n", - "\n", + "\n", + "\n", + "p0 & p1\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", "2->3\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", @@ -2477,20 +2477,20 @@ "\n", "\n", - "\n", - "\n", - "\n", - "(Fin(\n", - "\n", - ")|Fin(\n", - "\n", - ")) | (Inf(\n", - "\n", - ") & Fin(\n", - "\n", - "))\n", - "[Rabin-like 3]\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ")|Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 3]\n", "\n", "\n", "\n", @@ -2507,80 +2507,80 @@ "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "p0 & p1\n", - "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", "4->1\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", @@ -2590,20 +2590,20 @@ "\n", "\n", - "\n", - "\n", - "\n", - "(Fin(\n", - "\n", - ")|Fin(\n", - "\n", - ")) | (Inf(\n", - "\n", - ") & Fin(\n", - "\n", - "))\n", - "[Rabin-like 3]\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ")|Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 3]\n", "\n", "\n", "\n", @@ -2620,80 +2620,80 @@ "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "p0 & p1\n", - "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", - "\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "3->2\n", - "\n", - "\n", - "!p0 & !p1\n", + "\n", + "\n", + "!p0 & !p1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "p0 & p1\n", + "\n", + "\n", + "p0 & p1\n", "\n", "\n", "\n", "4->1\n", - "\n", - "\n", - "p0 & !p1\n", + "\n", + "\n", + "p0 & !p1\n", "\n", "\n", "\n", @@ -2719,7 +2719,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -2733,7 +2733,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.5" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/satmin.ipynb b/tests/python/satmin.ipynb index 4442e5495..0c83d85f5 100644 --- a/tests/python/satmin.ipynb +++ b/tests/python/satmin.ipynb @@ -255,8 +255,202 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "\n", + "I->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "4->5\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "3->6\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe138232480> >" + " *' at 0x7f61ac1ce910> >" ] }, "execution_count": 3, @@ -443,8 +637,169 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe1381dadb0> >" + " *' at 0x7f61ac1ce430> >" ] }, "execution_count": 4, @@ -679,8 +1034,217 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a | !b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "1->5\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe138232030> >" + " *' at 0x7f61ac1ce940> >" ] }, "execution_count": 5, @@ -912,8 +1476,207 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "& | F G a F b F G c\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "[Rabin 2]\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "I->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "(!a & b) | (b & !c)\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe1382323f0> >" + " *' at 0x7f61ac1cefa0> >" ] }, "execution_count": 6, @@ -1061,8 +1824,121 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "[Rabin 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "(!a & b & c) | (a & !b & c)\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "(!a & !b & c) | (a & b & c)\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe1382325d0> >" + " *' at 0x7f61ac1cee80> >" ] }, "execution_count": 7, @@ -1233,8 +2109,153 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "[Rabin 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a | !b | c\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe1382327e0> >" + " *' at 0x7f61ac1cec40> >" ] }, "execution_count": 8, @@ -1392,8 +2413,140 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "(a & b) | (b & c)\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a & !b & !c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a | b | c\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe138232a20> >" + " *' at 0x7f61ac1cebb0> >" ] }, "execution_count": 9, @@ -1465,9 +2618,9 @@ " NaN\n", " 996\n", " 48806\n", - " 3\n", - " 1\n", - " 1\n", + " 2\n", + " 0\n", + " 0\n", " 0\n", " \n", "
\n", @@ -1479,9 +2632,9 @@ " 40\n", " 2760\n", " 224707\n", - " 12\n", + " 5\n", " 0\n", - " 9\n", + " 5\n", " 0\n", "
\n", "
\n", @@ -1493,9 +2646,9 @@ " 32\n", " 2008\n", " 155020\n", - " 8\n", + " 3\n", " 0\n", - " 7\n", + " 3\n", " 0\n", "
\n", " \n", @@ -1509,9 +2662,9 @@ "2 5 4 4 11 32 2008 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \n", - "0 48806 3 1 1 0 \n", - "1 224707 12 0 9 0 \n", - "2 155020 8 0 7 0 " + "0 48806 2 0 0 0 \n", + "1 224707 5 0 5 0 \n", + "2 155020 3 0 3 0 " ] }, "metadata": {}, @@ -1651,8 +2804,140 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "(a & b) | (b & c)\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a & !b & !c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a | b | c\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe138232750> >" + " *' at 0x7f611de5a370> >" ] }, "execution_count": 10, @@ -1720,7 +3005,7 @@ " NaN\n", " 348\n", " 15974\n", - " 1\n", + " 0\n", " 0\n", " 1\n", " 0\n", @@ -1734,9 +3019,9 @@ " 40\n", " 960\n", " 73187\n", - " 4\n", + " 2\n", " 0\n", - " 3\n", + " 1\n", " 0\n", " \n", "
\n", @@ -1748,7 +3033,7 @@ " 32\n", " 616\n", " 37620\n", - " 3\n", + " 1\n", " 0\n", " 0\n", " 0\n", @@ -1764,9 +3049,9 @@ "2 2 4 4 11 32 616 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \n", - "0 15974 1 0 1 0 \n", - "1 73187 4 0 3 0 \n", - "2 37620 3 0 0 0 " + "0 15974 0 0 1 0 \n", + "1 73187 2 0 1 0 \n", + "2 37620 1 0 0 0 " ] }, "metadata": {}, @@ -1906,8 +3191,140 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "(a & b) | (b & c)\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & !b & !c\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a | b | c\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe138b8ac30> >" + " *' at 0x7f61ac1cf870> >" ] }, "execution_count": 11, @@ -1977,9 +3394,9 @@ " 40\n", " 2300\n", " 288887\n", - " 19\n", + " 8\n", " 0\n", - " 25\n", + " 12\n", " 0\n", " \n", "
\n", @@ -1993,7 +3410,7 @@ " 18569\n", " 1\n", " 0\n", - " 1\n", + " 0\n", " 0\n", "
\n", "
\n", @@ -2021,8 +3438,8 @@ "2 2 1 NaN NaN NaN 92 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \n", - "0 288887 19 0 25 0 \n", - "1 18569 1 0 1 0 \n", + "0 288887 8 0 12 0 \n", + "1 18569 1 0 0 0 \n", "2 2337 0 0 0 0 " ] }, @@ -2120,8 +3537,97 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[parity min odd 3]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe138232f60> >" + " *' at 0x7f61ac1cf960> >" ] }, "execution_count": 12, @@ -2189,9 +3695,9 @@ " 40\n", " 2742\n", " 173183\n", - " 9\n", + " 4\n", " 0\n", - " 7\n", + " 2\n", " 0\n", " \n", "
\n", @@ -2203,9 +3709,9 @@ " 32\n", " 964\n", " 45412\n", - " 2\n", + " 1\n", + " 0\n", " 0\n", - " 2\n", " 0\n", "
\n", "
\n", @@ -2233,8 +3739,8 @@ "2 4 3 NaN NaN NaN 363 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \n", - "0 173183 9 0 7 0 \n", - "1 45412 2 0 2 0 \n", + "0 173183 4 0 2 0 \n", + "1 45412 1 0 0 0 \n", "2 10496 1 0 0 0 " ] }, @@ -2371,8 +3877,136 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "(!a & b) | (b & !c)\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe138b8a1b0> >" + " *' at 0x7f611de5b3c0> >" ] }, "execution_count": 13, @@ -2453,10 +4087,10 @@ " NaN\n", " 2747\n", " 173427\n", - " 8\n", + " 3\n", + " 0\n", + " 2\n", " 0\n", - " 6\n", - " 1\n", " \n", "
\n", " 1\n", @@ -2469,7 +4103,7 @@ " 173427\n", " 0\n", " 0\n", - " 2\n", + " 0\n", " 0\n", "
\n", "
\n", @@ -2497,8 +4131,8 @@ "2 6 4 4 12 32 2747 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \n", - "0 173427 8 0 6 1 \n", - "1 173427 0 0 2 0 \n", + "0 173427 3 0 2 0 \n", + "1 173427 0 0 0 0 \n", "2 173427 0 0 1 0 " ] }, @@ -2642,8 +4276,143 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "(a & b) | (b & c)\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "(a & b) | (b & c)\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "a | !b | c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe120556e10> >" + " *' at 0x7f611de5b600> >" ] }, "execution_count": 14, @@ -2713,9 +4482,9 @@ " 40\n", " 2742\n", " 173183\n", - " 9\n", + " 4\n", " 0\n", - " 6\n", + " 2\n", " 0\n", " \n", "
\n", @@ -2729,7 +4498,7 @@ " 173279\n", " 0\n", " 0\n", - " 2\n", + " 0\n", " 0\n", "
\n", "
\n", @@ -2743,7 +4512,7 @@ " 173327\n", " 0\n", " 0\n", - " 1\n", + " 0\n", " 0\n", "
\n", " \n", @@ -2757,9 +4526,9 @@ "2 4 3 NaN NaN NaN 2742 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \n", - "0 173183 9 0 6 0 \n", - "1 173279 0 0 2 0 \n", - "2 173327 0 0 1 0 " + "0 173183 4 0 2 0 \n", + "1 173279 0 0 0 0 \n", + "2 173327 0 0 0 0 " ] }, "metadata": {}, @@ -2902,8 +4671,143 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "(a & b) | (b & c)\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "(!a & b) | (b & !c)\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe138232bd0> >" + " *' at 0x7f611de5bd50> >" ] }, "execution_count": 15, @@ -3068,8 +4972,143 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "(a & b) | (b & c)\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "(!a & b) | (b & !c)\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe120556d50> >" + " *' at 0x7f611de5b6f0> >" ] }, "metadata": {}, @@ -3120,9 +5159,9 @@ " 40\n", " 2742\n", " 173183\n", - " 9\n", + " 4\n", " 0\n", - " 7\n", + " 2\n", " 0\n", " HOA: v1 States: 5 Start: 0 AP: 3 \"a\" \"c\" \"b\" a...\n", " \n", @@ -3137,7 +5176,7 @@ " 173279\n", " 0\n", " 0\n", - " 2\n", + " 0\n", " 0\n", " HOA: v1 States: 4 Start: 0 AP: 3 \"a\" \"c\" \"b\" a...\n", " \n", @@ -3150,7 +5189,7 @@ " NaN\n", " 2742\n", " 173327\n", - " 1\n", + " 0\n", " 0\n", " 0\n", " 0\n", @@ -3167,9 +5206,9 @@ "2 4 3 NaN NaN NaN 2742 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \\\n", - "0 173183 9 0 7 0 \n", - "1 173279 0 0 2 0 \n", - "2 173327 1 0 0 0 \n", + "0 173183 4 0 2 0 \n", + "1 173279 0 0 0 0 \n", + "2 173327 0 0 0 0 \n", "\n", " automaton \n", "0 HOA: v1 States: 5 Start: 0 AP: 3 \"a\" \"c\" \"b\" a... \n", @@ -3356,8 +5395,156 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "(!a & b) | (b & !c)\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe1205266c0> >" + " *' at 0x7f611de94660> >" ] }, "metadata": {}, @@ -3507,8 +5694,143 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "(a & b) | (b & c)\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "(!a & b) | (b & !c)\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe120526cc0> >" + " *' at 0x7f611de946f0> >" ] }, "metadata": {}, @@ -3739,8 +6061,207 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "& | F G a F b F G c\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "[Rabin 2]\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "I->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "(!a & b) | (b & !c)\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe1382323f0> >" + " *' at 0x7f61ac1cefa0> >" ] }, "execution_count": 18, @@ -3808,7 +6329,7 @@ " NaN\n", " 687\n", " 21896\n", - " 2\n", + " 1\n", " 0\n", " 0\n", " 0\n", @@ -3822,9 +6343,9 @@ " 32\n", " 1905\n", " 100457\n", - " 6\n", - " 1\n", - " 5\n", + " 2\n", + " 0\n", + " 2\n", " 0\n", " \n", " \n", @@ -3837,8 +6358,8 @@ "1 6 5 4 12 32 1905 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \n", - "0 21896 2 0 0 0 \n", - "1 100457 6 1 5 0 " + "0 21896 1 0 0 0 \n", + "1 100457 2 0 2 0 " ] }, "metadata": {}, @@ -3981,8 +6502,143 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "(!a & b) | (b & !c)\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "(a & !b) | (!b & c)\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "(!a & b) | (b & !c)\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "c | (!a & !b) | (a & b)\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "(!a & b & !c) | (a & !b & !c)\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe138b8aa50> >" + " *' at 0x7f61ac1cf6c0> >" ] }, "execution_count": 19, @@ -4044,9 +6700,9 @@ " 32\n", " 1220\n", " 51612\n", - " 3\n", + " 1\n", " 0\n", - " 2\n", + " 1\n", " 0\n", " \n", "
\n", @@ -4074,7 +6730,7 @@ " 10496\n", " 0\n", " 0\n", - " 1\n", + " 0\n", " 0\n", "
\n", " \n", @@ -4088,9 +6744,9 @@ "2 4 3 NaN NaN NaN 363 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \n", - "0 51612 3 0 2 0 \n", + "0 51612 1 0 1 0 \n", "1 3129 0 0 0 0 \n", - "2 10496 0 0 1 0 " + "2 10496 0 0 0 0 " ] }, "metadata": {}, @@ -4233,8 +6889,143 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "(!a & !b) | (!b & c)\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!b | c\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe120526990> >" + " *' at 0x7f611de5b960> >" ] }, "execution_count": 20, @@ -4305,9 +7096,9 @@ " 56\n", " 1379\n", " 89168\n", - " 5\n", + " 2\n", " 0\n", - " 4\n", + " 1\n", " 0\n", " \n", " \n", @@ -4319,7 +7110,7 @@ "0 2 7 7 23 56 1379 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \n", - "0 89168 5 0 4 0 " + "0 89168 2 0 1 0 " ] }, "metadata": {}, @@ -4558,8 +7349,239 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & !c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "(!a & !b & c) | (a & !b & !c)\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "(a & b) | (b & c)\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "!b | c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "!a & !b & !c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "(!a & !b) | (!b & !c)\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "6->4\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "6->5\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7fe120556f00> >" + " *' at 0x7f611de5b570> >" ] }, "execution_count": 21, @@ -4574,7 +7596,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -4588,7 +7610,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.2" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/stutter-inv.ipynb b/tests/python/stutter-inv.ipynb index 627a6a826..2cfc87ac7 100644 --- a/tests/python/stutter-inv.ipynb +++ b/tests/python/stutter-inv.ipynb @@ -301,8 +301,57 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7ff008667960> >" + " *' at 0x7f47047afc00> >" ] }, "metadata": {}, @@ -602,8 +651,160 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7ff0086679f0> >" + " *' at 0x7f47047d0bd0> >" ] }, "metadata": {}, @@ -811,8 +1012,160 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7ff0086679f0> >" + " *' at 0x7f47047d0bd0> >" ] }, "metadata": {}, @@ -963,8 +1316,80 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "G(F(a & Xa) & F!a)\n", + "\n", + "G(F(a & Xa) & F!a)\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7ff008667cc0> >" + " *' at 0x7f47047d1590> >" ] }, "metadata": {}, @@ -1061,8 +1486,97 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FG(!a | X!a)\n", + "\n", + "FG(!a | X!a)\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7ff008667cf0> >" + " *' at 0x7f47047d0ab0> >" ] }, "metadata": {}, @@ -1271,8 +1785,184 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")) | Inf(\n", + "\n", + ")\n", + "[Fin-less 3]\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "I->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7ff0086677b0> >" + " *' at 0x7f47047d2640> >" ] }, "metadata": {}, @@ -1400,8 +2090,100 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7ff0086677e0> >" + " *' at 0x7f47047d1d10> >" ] }, "metadata": {}, @@ -1527,8 +2309,90 @@ "
\n", "
\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "I->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7ff008667ed0> >" + " *' at 0x7f47047d2370> >" ] }, "metadata": {}, @@ -1787,8 +2651,204 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 2]\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "I->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "7->5\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "7->6\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7ff0086676c0> >" + " *' at 0x7f47047d13e0> >" ] }, "metadata": {}, @@ -2101,8 +3161,254 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 2]\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "I->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "7->5\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "7->6\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "5->8\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "8->8\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "8->8\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "8->9\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "9->8\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "9->9\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7ff0086676c0> >" + " *' at 0x7f47047d13e0> >" ] }, "metadata": {}, @@ -2343,7 +3649,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.6" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/synthesis.ipynb b/tests/python/synthesis.ipynb index b7edb2752..de50aa57e 100644 --- a/tests/python/synthesis.ipynb +++ b/tests/python/synthesis.ipynb @@ -56,590 +56,1176 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ")\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "0->10\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "0->11\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "12\n", - "\n", - "12\n", + "\n", + "12\n", "\n", "\n", "\n", "0->12\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "13\n", - "\n", - "13\n", + "\n", + "13\n", "\n", "\n", "\n", "0->13\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "10->8\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "11->7\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", "\n", "12->9\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "13->5\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "14\n", - "\n", - "14\n", + "\n", + "14\n", "\n", "\n", "\n", "1->14\n", - "\n", - "\n", - "i0\n", + "\n", + "\n", + "i0\n", "\n", "\n", "\n", "16\n", - "\n", - "16\n", + "\n", + "16\n", "\n", "\n", "\n", "1->16\n", - "\n", - "\n", - "!i0\n", + "\n", + "\n", + "!i0\n", "\n", "\n", "\n", "15\n", - "\n", - "15\n", + "\n", + "15\n", "\n", "\n", "\n", "14->15\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "16->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "2->14\n", - "\n", - "\n", - "i1\n", + "\n", + "\n", + "i1\n", "\n", "\n", "\n", "17\n", - "\n", - "17\n", + "\n", + "17\n", "\n", "\n", "\n", "2->17\n", - "\n", - "\n", - "!i1\n", + "\n", + "\n", + "!i1\n", "\n", "\n", "\n", "17->2\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "3->14\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "3->16\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "3->17\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "18\n", - "\n", - "18\n", + "\n", + "18\n", "\n", "\n", "\n", "3->18\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "18->3\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "19\n", - "\n", - "19\n", + "\n", + "19\n", "\n", "\n", "\n", "4->19\n", - "\n", - "\n", - "!i1\n", + "\n", + "\n", + "!i1\n", "\n", "\n", "\n", "20\n", - "\n", - "20\n", + "\n", + "20\n", "\n", "\n", "\n", "4->20\n", - "\n", - "\n", - "i1\n", + "\n", + "\n", + "i1\n", "\n", "\n", "\n", "19->4\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "20->5\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "21\n", - "\n", - "21\n", + "\n", + "21\n", "\n", "\n", "\n", "5->21\n", - "\n", - "\n", - "!i1\n", + "\n", + "\n", + "!i1\n", "\n", "\n", "\n", "22\n", - "\n", - "22\n", + "\n", + "22\n", "\n", "\n", "\n", "5->22\n", - "\n", - "\n", - "i1\n", + "\n", + "\n", + "i1\n", "\n", "\n", "\n", "21->4\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "22->5\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "6->19\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "6->20\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "23\n", - "\n", - "23\n", + "\n", + "23\n", "\n", "\n", "\n", "6->23\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "24\n", - "\n", - "24\n", + "\n", + "24\n", "\n", "\n", "\n", "6->24\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "23->1\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "23->6\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "24->1\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "24->9\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "7->20\n", - "\n", - "\n", - "i1\n", + "\n", + "\n", + "i1\n", "\n", "\n", "\n", "25\n", - "\n", - "25\n", + "\n", + "25\n", "\n", "\n", "\n", "7->25\n", - "\n", - "\n", - "!i1\n", + "\n", + "\n", + "!i1\n", "\n", "\n", "\n", "25->2\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "25->7\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "8->20\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "8->24\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "8->25\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "26\n", - "\n", - "26\n", + "\n", + "26\n", "\n", "\n", "\n", "8->26\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "26->3\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "26->8\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "9->21\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "9->22\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "27\n", - "\n", - "27\n", + "\n", + "27\n", "\n", "\n", "\n", "9->27\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "28\n", - "\n", - "28\n", + "\n", + "28\n", "\n", "\n", "\n", "9->28\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "27->1\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "27->6\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "28->1\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "28->9\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "15->14\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "0->10\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "0->11\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "0->12\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "0->13\n", + "\n", + "\n", + "i0 & i1\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "10->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "11->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "12->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "13->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "1->14\n", + "\n", + "\n", + "i0\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "1->16\n", + "\n", + "\n", + "!i0\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "15\n", + "\n", + "\n", + "\n", + "14->15\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "16->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "2->14\n", + "\n", + "\n", + "i1\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "2->17\n", + "\n", + "\n", + "!i1\n", + "\n", + "\n", + "\n", + "17->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "3->14\n", + "\n", + "\n", + "i0 & i1\n", + "\n", + "\n", + "\n", + "3->16\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", + "\n", + "\n", + "3->17\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "3->18\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", + "\n", + "\n", + "18->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "19\n", + "\n", + "19\n", + "\n", + "\n", + "\n", + "4->19\n", + "\n", + "\n", + "!i1\n", + "\n", + "\n", + "\n", + "20\n", + "\n", + "20\n", + "\n", + "\n", + "\n", + "4->20\n", + "\n", + "\n", + "i1\n", + "\n", + "\n", + "\n", + "19->4\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "20->5\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "21\n", + "\n", + "21\n", + "\n", + "\n", + "\n", + "5->21\n", + "\n", + "\n", + "!i1\n", + "\n", + "\n", + "\n", + "22\n", + "\n", + "22\n", + "\n", + "\n", + "\n", + "5->22\n", + "\n", + "\n", + "i1\n", + "\n", + "\n", + "\n", + "21->4\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "22->5\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "6->19\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", + "\n", + "\n", + "6->20\n", + "\n", + "\n", + "i0 & i1\n", + "\n", + "\n", + "\n", + "23\n", + "\n", + "23\n", + "\n", + "\n", + "\n", + "6->23\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", + "\n", + "\n", + "24\n", + "\n", + "24\n", + "\n", + "\n", + "\n", + "6->24\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", + "\n", + "\n", + "23->1\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "23->6\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "24->1\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "24->9\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "7->20\n", + "\n", + "\n", + "i1\n", + "\n", + "\n", + "\n", + "25\n", + "\n", + "25\n", + "\n", + "\n", + "\n", + "7->25\n", + "\n", + "\n", + "!i1\n", + "\n", + "\n", + "\n", + "25->2\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "25->7\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "8->20\n", + "\n", + "\n", + "i0 & i1\n", + "\n", + "\n", + "\n", + "8->24\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", + "\n", + "\n", + "8->25\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", + "\n", + "\n", + "26\n", + "\n", + "26\n", + "\n", + "\n", + "\n", + "8->26\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", + "\n", + "\n", + "26->3\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "26->8\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "9->21\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", + "\n", + "\n", + "9->22\n", + "\n", + "\n", + "i0 & i1\n", + "\n", + "\n", + "\n", + "27\n", + "\n", + "27\n", + "\n", + "\n", + "\n", + "9->27\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", + "\n", + "\n", + "28\n", + "\n", + "28\n", + "\n", + "\n", + "\n", + "9->28\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", + "\n", + "\n", + "27->1\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "27->6\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "28->1\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "28->9\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "15->14\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e584de570> >" + " *' at 0x7f654cfb32a0> >" ] }, "metadata": {}, @@ -679,533 +1265,533 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ")\n", - "[co-Büchi]\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", "\n", "\n", "0\n", "\n", - "0\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "0->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "0->11\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "12\n", - "\n", - "12\n", + "\n", + "12\n", "\n", "\n", "\n", "0->12\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "13\n", - "\n", - "13\n", + "\n", + "13\n", "\n", "\n", "\n", "0->13\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "10->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "11->7\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", "\n", "12->9\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "13->5\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "14\n", - "\n", - "14\n", + "\n", + "14\n", "\n", "\n", "\n", "1->14\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "16\n", - "\n", - "16\n", + "\n", + "16\n", "\n", "\n", "\n", "1->16\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "15\n", - "\n", - "15\n", + "\n", + "15\n", "\n", "\n", "\n", "14->15\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "16->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "2->14\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "17\n", - "\n", - "17\n", + "\n", + "17\n", "\n", "\n", "\n", "2->17\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "17->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "3->14\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3->16\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3->17\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "18\n", - "\n", - "18\n", + "\n", + "18\n", "\n", "\n", "\n", "3->18\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "18->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "19\n", - "\n", - "19\n", + "\n", + "19\n", "\n", "\n", "\n", "4->19\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "20\n", - "\n", - "20\n", + "\n", + "20\n", "\n", "\n", "\n", "4->20\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "19->4\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "20->5\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "21\n", - "\n", - "21\n", + "\n", + "21\n", "\n", "\n", "\n", "5->21\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "22\n", - "\n", - "22\n", + "\n", + "22\n", "\n", "\n", "\n", "5->22\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "21->4\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "22->5\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "6->19\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6->20\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "23\n", - "\n", - "23\n", + "\n", + "23\n", "\n", "\n", "\n", "6->23\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "24\n", - "\n", - "24\n", + "\n", + "24\n", "\n", "\n", "\n", "6->24\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "23->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "23->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "24->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "24->9\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "7->20\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "25\n", - "\n", - "25\n", + "\n", + "25\n", "\n", "\n", "\n", "7->25\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "25->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "25->7\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8->20\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8->24\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8->25\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "26\n", - "\n", - "26\n", + "\n", + "26\n", "\n", "\n", "\n", "8->26\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "26->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "26->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "9->21\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "9->22\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "27\n", - "\n", - "27\n", + "\n", + "27\n", "\n", "\n", "\n", "9->27\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "28\n", - "\n", - "28\n", + "\n", + "28\n", "\n", "\n", "\n", "9->28\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "27->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "27->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "28->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "28->9\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "15->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n" @@ -1252,309 +1838,614 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "3->5\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "4->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "4->5\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "5->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5->5\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->5\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->6\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "3->6\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "4->5\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "6->4\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "6->5\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e5855c9f0> >" + " *' at 0x7f654cfb2ca0> >" ] }, "metadata": {}, @@ -1573,175 +2464,346 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e5855cb10> >" + " *' at 0x7f654cfb3b40> >" ] }, "metadata": {}, @@ -1760,125 +2822,246 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "I->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e5855ccf0> >" + " *' at 0x7f654cfb3480> >" ] }, "metadata": {}, @@ -1897,81 +3080,158 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e5855cd80> >" + " *' at 0x7f654cfb2dc0> >" ] }, "metadata": {}, @@ -1990,81 +3250,158 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e584defc0> >" + " *' at 0x7f654cfb3e40> >" ] }, "metadata": {}, @@ -2083,125 +3420,246 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e5855ca20> >" + " *' at 0x7f654cfb2ca0> >" ] }, "metadata": {}, @@ -2253,260 +3711,260 @@ "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", @@ -2548,60 +4006,116 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "2\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "2->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i0\n", + "\n", + "i0\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "L0_out\n", + "\n", + "\n", + "\n", + "o0\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "6->o0:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "L0\n", + "\n", + "L0_in\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "i1\n", + "\n", + "\n", + "\n", + "2->L0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "i0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e584deae0> >" + " *' at 0x7f654cfb21f0> >" ] }, "metadata": {}, @@ -2629,58 +4143,58 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:w\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "2\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "2->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n" @@ -2762,151 +4276,298 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "I->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", - "i0\n", + "\n", + "\n", + "i0\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "3->7\n", - "\n", - "\n", - "!i0\n", + "\n", + "\n", + "!i0\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "6->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->2\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "7->1\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "I->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "3->6\n", + "\n", + "\n", + "i0\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "3->7\n", + "\n", + "\n", + "!i0\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "1->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "7->1\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e5855cb70> >" + " *' at 0x7f654cfc9920> >" ] }, "metadata": {}, @@ -2918,112 +4579,220 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "i0\n", + "\n", + "\n", + "i0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "!i0\n", + "\n", + "\n", + "!i0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "4->3\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "i0\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "!i0\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "1->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e5855cc60> >" + " *' at 0x7f654cfb3480> >" ] }, "metadata": {}, @@ -3035,144 +4804,144 @@ "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", - "t\n", - "[all]\n", + " viewBox=\"0.00 0.00 282.00 148.80\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "i0\n", + "\n", + "\n", + "i0\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!i0\n", + "\n", + "\n", + "!i0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "3->1\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "\n", - "!i0\n", - "/\n", + "!i0\n", + "/\n", "\n", - "!o0\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", + "\n", + "\n", "\n", - "1\n", - "/\n", + "1\n", + "/\n", "\n", - "!o0\n", + "!o0\n", "\n", "\n", "\n", @@ -3191,72 +4960,140 @@ "\n", "\n", - "\n", "\n", "\n", + " viewBox=\"0.00 0.00 142.70 352.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", "\n", - "\n", + "\n", "\n", "\n", "4\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "6->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "L0_out\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "L0\n", + "\n", + "L0_in\n", + "\n", + "\n", + "\n", + "6->L0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o0\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "6->o0:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "i0\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e5855c9f0> >" + " *' at 0x7f654cfb2ca0> >" ] }, "metadata": {}, @@ -3296,96 +5133,188 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "6->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "8->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "8->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "0\n", - "\n", - "False\n", + "\n", + "False\n", "\n", "\n", "\n", "0->o1:s\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "L0_out\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "6->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "L0\n", + "\n", + "L0_in\n", + "\n", + "\n", + "\n", + "8->L0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o0\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "8->o0:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o1\n", + "\n", + "o1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "i0\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "i1\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "False\n", + "\n", + "\n", + "\n", + "0->o1:s\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e5855c900> >" + " *' at 0x7f654cfb3bd0> >" ] }, "metadata": {}, @@ -3434,134 +5363,134 @@ "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "o1\n", + "\n", + "\n", + "o1\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "!o1\n", + "\n", + "\n", + "!o1\n", "\n", "\n", "\n", @@ -3587,94 +5516,94 @@ "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "o1\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "o1\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "!o1\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "!o1\n", "\n", "\n", "\n", @@ -3700,108 +5629,212 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "10->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "6->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "8->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o0\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "10->o0:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o1\n", + "\n", + "o1\n", + "\n", + "\n", + "\n", + "10->o1:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "i0\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "i1\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e584def00> >" + " *' at 0x7f655e8c5200> >" ] }, "metadata": {}, @@ -3820,53 +5853,102 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0 & o1\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0 & o1\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0 & !o1\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0 & !o1\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0 & o1\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0 & !o1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e5855cd20> >" + " *' at 0x7f654cfb2ca0> >" ] }, "metadata": {}, @@ -3878,108 +5960,212 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "10->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "6->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "8->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o0\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "10->o0:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o1\n", + "\n", + "o1\n", + "\n", + "\n", + "\n", + "10->o1:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "i0\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "i1\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e5855c930> >" + " *' at 0x7f654cfb2700> >" ] }, "metadata": {}, @@ -4058,108 +6244,212 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "d\n", + "\n", + "d\n", "\n", "\n", "\n", "6->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "c\n", + "\n", + "c\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "a\n", + "\n", + "a\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "b\n", + "\n", + "b\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "6->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o1\n", + "\n", + "d\n", + "\n", + "\n", + "\n", + "6->o1:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "8->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o0\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "10->o0:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e584def90> >" + " *' at 0x7f654cfc9230> >" ] }, "metadata": {}, @@ -4238,52 +6528,100 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!a & !b\n", - "/\n", - "\n", - "!c & !d\n", - "\n", - "a & b\n", - "/\n", - "\n", - "!c & d\n", - "\n", - "(!a & b) | (a & !b)\n", - "/\n", - "\n", - "c & !d\n", + "\n", + "\n", + "\n", + "!a & !b\n", + "/\n", + "\n", + "!c & !d\n", + "\n", + "a & b\n", + "/\n", + "\n", + "!c & d\n", + "\n", + "(!a & b) | (a & !b)\n", + "/\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "!a & !b\n", + "/\n", + "\n", + "!c & !d\n", + "\n", + "a & b\n", + "/\n", + "\n", + "!c & d\n", + "\n", + "(!a & b) | (a & !b)\n", + "/\n", + "\n", + "c & !d\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f0e584ee4e0> >" + " *' at 0x7f654cfcb720> >" ] }, "execution_count": 16, @@ -4316,114 +6654,114 @@ "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", @@ -4464,180 +6802,180 @@ "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", @@ -4672,7 +7010,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/testingaut.ipynb b/tests/python/testingaut.ipynb index 1a9a111d8..6925062aa 100644 --- a/tests/python/testingaut.ipynb +++ b/tests/python/testingaut.ipynb @@ -34,11 +34,67 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -84,7 +140,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb57416b570> >" + " *' at 0x7fd6401ba1c0> >" ] }, "execution_count": 2, @@ -112,7 +168,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -418,7 +474,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -588,7 +644,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", @@ -727,7 +783,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -741,7 +797,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/twagraph-internals.ipynb b/tests/python/twagraph-internals.ipynb index 21b58cb8c..230d47d89 100644 --- a/tests/python/twagraph-internals.ipynb +++ b/tests/python/twagraph-internals.ipynb @@ -1,7 +1,6 @@ { "cells": [ { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -21,7 +20,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -29,7 +27,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -59,102 +56,203 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ")\n", - "[Rabin 1]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "a & !b\n", - "\n", + "\n", + "\n", + "a & !b\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "a & b\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!a & b\n", + "\n", + "\n", + "!a & b\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!a & !b\n", - "\n", + "\n", + "\n", + "!a & !b\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "a & b\n", + "\n", + "\n", + "a & b\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "a & !b\n", - "\n", + "\n", + "\n", + "a & !b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & !b\n", - "\n", + "\n", + "\n", + "!a & !b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & b\n", - "\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & b\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fd3c40c2b70> >" + " *' at 0x7fd9b477e340> >" ] }, "execution_count": 3, @@ -167,7 +265,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -181,227 +278,227 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "g\n", - "\n", + "\n", "\n", "\n", "states\n", - "\n", - "\n", - "states\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "succ\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "5\n", - "\n", - "succ_tail\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "8\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "5\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "8\n", "\n", "\n", "\n", "edges\n", - "\n", - "\n", - "edges\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", - "\n", - "cond\n", - "\n", - "a & !b\n", - "\n", - "a & b\n", - "\n", - "!a & b\n", - "\n", - "!a & !b\n", - "\n", - "a & b\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "!a & b\n", - "\n", - "acc\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{}\n", - "\n", - "{0}\n", - "\n", - "{}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "dst\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "next_succ\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "0\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", - "\n", - "0\n", - "\n", - "src\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "cond\n", + "\n", + "a & !b\n", + "\n", + "a & b\n", + "\n", + "!a & b\n", + "\n", + "!a & !b\n", + "\n", + "a & b\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "!a & b\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "dst\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "0\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "meta\n", "init_state:\n", - "\n", - "0\n", + "\n", + "0\n", "num_sets:\n", - "2\n", + "2\n", "acceptance:\n", - "Fin(0) & Inf(1)\n", + "Fin(0) & Inf(1)\n", "ap_vars:\n", - "b a\n", + "b a\n", "\n", "\n", "\n", "\n", "props\n", - "prop_state_acc:\n", - "maybe\n", - "prop_inherently_weak:\n", - "maybe\n", - "prop_terminal:\n", - "no\n", - "prop_weak:\n", - "maybe\n", - "prop_very_weak:\n", - "maybe\n", - "prop_complete:\n", - "maybe\n", - "prop_universal:\n", - "yes\n", - "prop_unambiguous:\n", - "yes\n", - "prop_semi_deterministic:\n", - "yes\n", - "prop_stutter_invariant:\n", - "maybe\n", + "prop_state_acc:\n", + "maybe\n", + "prop_inherently_weak:\n", + "maybe\n", + "prop_terminal:\n", + "no\n", + "prop_weak:\n", + "maybe\n", + "prop_very_weak:\n", + "maybe\n", + "prop_complete:\n", + "maybe\n", + "prop_universal:\n", + "yes\n", + "prop_unambiguous:\n", + "yes\n", + "prop_semi_deterministic:\n", + "yes\n", + "prop_stutter_invariant:\n", + "maybe\n", "\n", "\n", "\n", @@ -421,7 +518,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -466,115 +562,229 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ")\n", - "[Rabin 1]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "I->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "a & !b\n", - "\n", + "\n", + "\n", + "a & !b\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "a & b\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!a & b\n", + "\n", + "\n", + "!a & b\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!a & !b\n", - "\n", + "\n", + "\n", + "!a & !b\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "a & b\n", + "\n", + "\n", + "a & b\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "a & !b\n", - "\n", + "\n", + "\n", + "a & !b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & !b\n", - "\n", + "\n", + "\n", + "!a & !b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & b\n", - "\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "I->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & b\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fd3c40c2b70> >" + " *' at 0x7fd9b477e340> >" ] }, "metadata": {}, @@ -582,213 +792,213 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "g\n", - "\n", + "\n", "\n", "\n", "states\n", - "\n", - "\n", + "\n", + "\n", "states\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "succ\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "9\n", - "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "9\n", + "\n", "succ_tail\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "9\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", "\n", "\n", "\n", "edges\n", - "\n", - "\n", - "edges\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "9\n", - "\n", - "cond\n", - "\n", - "a & !b\n", - "\n", - "a & b\n", - "\n", - "!a & b\n", - "\n", - "!a & !b\n", - "\n", - "a & b\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "!a & b\n", - "\n", - "1\n", - "\n", - "acc\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{}\n", - "\n", - "{0}\n", - "\n", - "{}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{}\n", - "\n", - "dst\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "next_succ\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "0\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "src\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "cond\n", + "\n", + "a & !b\n", + "\n", + "a & b\n", + "\n", + "!a & b\n", + "\n", + "!a & !b\n", + "\n", + "a & b\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "!a & b\n", + "\n", + "1\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{}\n", + "\n", + "dst\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "0\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", "\n", "\n", "\n" @@ -806,7 +1016,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -838,7 +1047,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -861,116 +1069,231 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ")\n", - "[Rabin 1]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "I->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "a & !b\n", - "\n", + "\n", + "\n", + "a & !b\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "a & b\n", - "\n", + "\n", + "\n", + "a & b\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!a & b\n", - "\n", + "\n", + "\n", + "!a & b\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!a & !b\n", - "\n", + "\n", + "\n", + "!a & !b\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "a & b\n", + "\n", + "\n", + "a & b\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "a & !b\n", - "\n", + "\n", + "\n", + "a & !b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & !b\n", - "\n", + "\n", + "\n", + "!a & !b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & b\n", - "\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "I->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & b\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fd3c40c2b70> >" + " *' at 0x7fd9b477e340> >" ] }, "metadata": {}, @@ -978,213 +1301,213 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "g\n", - "\n", + "\n", "\n", "\n", "states\n", - "\n", - "\n", + "\n", + "\n", "states\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "succ\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "9\n", - "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "9\n", + "\n", "succ_tail\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "9\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", "\n", "\n", "\n", "edges\n", - "\n", - "\n", - "edges\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "9\n", - "\n", - "cond\n", - "\n", - "a & !b\n", - "\n", - "a & b\n", - "\n", - "!a & b\n", - "\n", - "!a & !b\n", - "\n", - "a & b\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "!a & b\n", - "\n", - "1\n", - "\n", - "acc\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{1}\n", - "\n", - "{0}\n", - "\n", - "{}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{}\n", - "\n", - "dst\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "next_succ\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "0\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "src\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "cond\n", + "\n", + "a & !b\n", + "\n", + "a & b\n", + "\n", + "!a & b\n", + "\n", + "!a & !b\n", + "\n", + "a & b\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "!a & b\n", + "\n", + "1\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{1}\n", + "\n", + "{0}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{}\n", + "\n", + "dst\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "0\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", "\n", "\n", "\n" @@ -1206,7 +1529,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1229,108 +1551,215 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ")\n", - "[Rabin 1]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "I->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "a & !b\n", - "\n", + "\n", + "\n", + "a & !b\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "b\n", - "\n", + "\n", + "\n", + "b\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!a & !b\n", - "\n", + "\n", + "\n", + "!a & !b\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "a & b\n", + "\n", + "\n", + "a & b\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "a & !b\n", - "\n", + "\n", + "\n", + "a & !b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & !b\n", - "\n", + "\n", + "\n", + "!a & !b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & b\n", - "\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "I->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & b\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fd3c40c2b70> >" + " *' at 0x7fd9b477e340> >" ] }, "metadata": {}, @@ -1338,197 +1767,197 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "g\n", - "\n", + "\n", "\n", "\n", "states\n", - "\n", - "\n", + "\n", + "\n", "states\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "succ\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "8\n", - "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "8\n", + "\n", "succ_tail\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", "\n", "\n", "\n", "edges\n", - "\n", - "\n", - "edges\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", - "\n", - "cond\n", - "\n", - "a & !b\n", - "\n", - "b\n", - "\n", - "!a & !b\n", - "\n", - "a & b\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "!a & b\n", - "\n", - "1\n", - "\n", - "acc\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{0}\n", - "\n", - "{}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{}\n", - "\n", - "dst\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "next_succ\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "0\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "src\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "cond\n", + "\n", + "a & !b\n", + "\n", + "b\n", + "\n", + "!a & !b\n", + "\n", + "a & b\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "!a & b\n", + "\n", + "1\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{0}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{}\n", + "\n", + "dst\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "0\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", "\n", "\n", "\n" @@ -1547,7 +1976,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1555,7 +1983,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1580,117 +2007,233 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ")\n", - "[Rabin 1]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "I->2\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "a & !b\n", - "\n", + "\n", + "\n", + "a & !b\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "b\n", - "\n", + "\n", + "\n", + "b\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!a & !b\n", - "\n", + "\n", + "\n", + "!a & !b\n", + "\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "a & b\n", + "\n", + "\n", + "a & b\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "a & !b\n", - "\n", + "\n", + "\n", + "a & !b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & !b\n", - "\n", + "\n", + "\n", + "!a & !b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & b\n", - "\n", + "\n", + "\n", + "!a & b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "I->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fd3c40c2b70> >" + " *' at 0x7fd9b477e340> >" ] }, "metadata": {}, @@ -1698,251 +2241,251 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "g\n", - "\n", + "\n", "\n", "\n", "states\n", - "\n", - "\n", - "states\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "succ\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "8\n", - "\n", - "succ_tail\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "9\n", - "\n", - "\n", - "8\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "8\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "8\n", "\n", "\n", "\n", "edges\n", - "\n", - "\n", - "edges\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "9\n", - "\n", - "cond\n", - "\n", - "a & !b\n", - "\n", - "b\n", - "\n", - "!a & !b\n", - "\n", - "a & b\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "!a & b\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "acc\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{0}\n", - "\n", - "{}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{}\n", - "\n", - "{0,1}\n", - "\n", - "dst\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "next_succ\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "0\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "9\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "src\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "cond\n", + "\n", + "a & !b\n", + "\n", + "b\n", + "\n", + "!a & !b\n", + "\n", + "a & b\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "!a & b\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{0}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{}\n", + "\n", + "{0,1}\n", + "\n", + "dst\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "0\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "9\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "meta\n", "init_state:\n", - "\n", - "2\n", + "\n", + "2\n", "num_sets:\n", - "2\n", + "2\n", "acceptance:\n", - "Fin(0) & Inf(1)\n", + "Fin(0) & Inf(1)\n", "ap_vars:\n", - "b a\n", + "b a\n", "\n", "\n", "\n", "\n", "props\n", - "prop_state_acc:\n", - "maybe\n", - "prop_inherently_weak:\n", - "maybe\n", - "prop_terminal:\n", - "no\n", - "prop_weak:\n", - "maybe\n", - "prop_very_weak:\n", - "maybe\n", - "prop_complete:\n", - "maybe\n", - "prop_universal:\n", - "yes\n", - "prop_unambiguous:\n", - "yes\n", - "prop_semi_deterministic:\n", - "yes\n", - "prop_stutter_invariant:\n", - "maybe\n", + "prop_state_acc:\n", + "maybe\n", + "prop_inherently_weak:\n", + "maybe\n", + "prop_terminal:\n", + "no\n", + "prop_weak:\n", + "maybe\n", + "prop_very_weak:\n", + "maybe\n", + "prop_complete:\n", + "maybe\n", + "prop_universal:\n", + "yes\n", + "prop_unambiguous:\n", + "yes\n", + "prop_semi_deterministic:\n", + "yes\n", + "prop_stutter_invariant:\n", + "maybe\n", "\n", "\n", "\n", @@ -1962,7 +2505,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1976,41 +2518,41 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "g\n", - "\n", + "\n", "\n", "\n", "props\n", "prop_state_acc:\n", - "maybe\n", + "maybe\n", "prop_inherently_weak:\n", - "maybe\n", + "maybe\n", "prop_terminal:\n", - "maybe\n", + "maybe\n", "prop_weak:\n", - "maybe\n", + "maybe\n", "prop_very_weak:\n", - "maybe\n", + "maybe\n", "prop_complete:\n", - "maybe\n", + "maybe\n", "prop_universal:\n", - "maybe\n", + "maybe\n", "prop_unambiguous:\n", - "maybe\n", + "maybe\n", "prop_semi_deterministic:\n", - "maybe\n", + "maybe\n", "prop_stutter_invariant:\n", - "maybe\n", + "maybe\n", "\n", "\n", "\n" @@ -2030,7 +2572,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2077,213 +2618,213 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "g\n", - "\n", + "\n", "\n", "\n", "states\n", - "\n", - "\n", + "\n", + "\n", "states\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "succ\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "8\n", - "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "8\n", + "\n", "succ_tail\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "9\n", - "\n", - "\n", - "8\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "8\n", "\n", "\n", "\n", "edges\n", - "\n", - "\n", - "edges\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "9\n", - "\n", - "cond\n", - "\n", - "a & !b\n", - "\n", - "b\n", - "\n", - "!a & !b\n", - "\n", - "a & b\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "!a & b\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "acc\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{0}\n", - "\n", - "{}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{}\n", - "\n", - "{0,1}\n", - "\n", - "dst\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "next_succ\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "2\n", - "\n", - "0\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "9\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "src\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "cond\n", + "\n", + "a & !b\n", + "\n", + "b\n", + "\n", + "!a & !b\n", + "\n", + "a & b\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "!a & b\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{0}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{}\n", + "\n", + "{0,1}\n", + "\n", + "dst\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "next_succ\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "2\n", + "\n", + "0\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "9\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" @@ -2302,7 +2843,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2332,7 +2872,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2365,7 +2904,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2393,7 +2931,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2407,197 +2944,197 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "g\n", - "\n", + "\n", "\n", "\n", "states\n", - "\n", - "\n", + "\n", + "\n", "states\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "succ\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "8\n", - "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "8\n", + "\n", "succ_tail\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", "\n", "\n", "\n", "edges\n", - "\n", - "\n", - "edges\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", - "\n", - "cond\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "a & b\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "!a & b\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "acc\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{0,1}\n", - "\n", - "{}\n", - "\n", - "dst\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "next_succ\n", - "\n", - "\n", - "2\n", - "\n", - "0\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "src\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "cond\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "a & b\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "!a & b\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{0,1}\n", + "\n", + "{}\n", + "\n", + "dst\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "0\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", "\n", "\n", "\n" @@ -2617,7 +3154,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -2631,197 +3167,197 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "g\n", - "\n", + "\n", "\n", "\n", "states\n", - "\n", - "\n", + "\n", + "\n", "states\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "succ\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "8\n", - "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "8\n", + "\n", "succ_tail\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", "\n", "\n", "\n", "edges\n", - "\n", - "\n", - "edges\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", - "\n", - "cond\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "0\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "!a & b\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "acc\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{0,1}\n", - "\n", - "{}\n", - "\n", - "dst\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "next_succ\n", - "\n", - "\n", - "2\n", - "\n", - "0\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "src\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "cond\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "0\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "!a & b\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{0,1}\n", + "\n", + "{}\n", + "\n", + "dst\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "0\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", "\n", "\n", "\n" @@ -2892,181 +3428,181 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "g\n", - "\n", + "\n", "\n", "\n", "states\n", - "\n", - "\n", + "\n", + "\n", "states\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "succ\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "7\n", - "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "7\n", + "\n", "succ_tail\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", "\n", "\n", "\n", "edges\n", - "\n", - "\n", - "edges\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "cond\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "!a & b\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "acc\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{0,1}\n", - "\n", - "{}\n", - "\n", - "dst\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "next_succ\n", - "\n", - "\n", - "2\n", - "\n", - "0\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "src\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "cond\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "!a & b\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{0,1}\n", + "\n", + "{}\n", + "\n", + "dst\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "0\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", "\n", "\n", "\n" @@ -3086,7 +3622,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -3111,16 +3646,16 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ")\n", - "[Rabin 1]\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", "\n", "\n", "\n", @@ -3152,22 +3687,22 @@ "0->0\n", "\n", "\n", - "a & !b\n", + "a & !b\n", "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "!a & !b\n", - "\n", + "\n", "\n", "\n", "\n", @@ -3185,35 +3720,35 @@ "\n", "\n", "1->0\n", - "\n", + "\n", "\n", "a & !b\n", - "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & !b\n", - "\n", + "\n", + "\n", + "!a & !b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & b\n", - "\n", + "\n", + "\n", + "!a & b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", "\n", "\n", "\n", @@ -3224,45 +3759,202 @@ "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "-1->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "-4\n", - "\n", + "\n", "\n", "\n", "\n", "3->-4\n", - "\n", - "\n", - "!a\n", - "\n", + "\n", + "\n", + "!a\n", + "\n", "\n", "\n", "\n", "-4->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "-4->1\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "I->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "-1\n", + "\n", + "\n", + "\n", + "\n", + "0->-1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-1->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "-1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-4\n", + "\n", + "\n", + "\n", + "\n", + "3->-4\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "-4->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-4->1\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fd3c40c2b70> >" + " *' at 0x7fd9b477e340> >" ] }, "execution_count": 22, @@ -3285,294 +3977,294 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "g\n", - "\n", + "\n", "\n", "\n", "states\n", - "\n", - "\n", - "states\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "succ\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "9\n", - "\n", - "succ_tail\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "9\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "9\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "9\n", "\n", "\n", "\n", "edges\n", - "\n", - "\n", - "edges\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "9\n", - "\n", - "cond\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "!a & b\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "a\n", - "\n", - "!a\n", - "\n", - "acc\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{0,1}\n", - "\n", - "{}\n", - "\n", - "{1}\n", - "\n", - "{0}\n", - "\n", - "dst\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "~0\n", - "\n", - "\n", - "~3\n", - "\n", - "next_succ\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "src\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "3\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "cond\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "!a & b\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "a\n", + "\n", + "!a\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{0,1}\n", + "\n", + "{}\n", + "\n", + "{1}\n", + "\n", + "{0}\n", + "\n", + "dst\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "~0\n", + "\n", + "\n", + "~3\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "3\n", "\n", "\n", "\n", "dests\n", - "\n", - "\n", - "dests\n", - "\n", - "\n", - "~0\n", - "\n", - "\n", - "\n", - "\n", - "~3\n", - "\n", - "\n", - "\n", - "#cnt/dst\n", - "\n", - "#2\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "3\n", - "\n", - "#2\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "dests\n", + "\n", + "\n", + "~0\n", + "\n", + "\n", + "\n", + "\n", + "~3\n", + "\n", + "\n", + "\n", + "#cnt/dst\n", + "\n", + "#2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "3\n", + "\n", + "#2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "meta\n", "init_state:\n", - "\n", - "2\n", + "\n", + "2\n", "num_sets:\n", - "2\n", + "2\n", "acceptance:\n", - "Fin(0) & Inf(1)\n", + "Fin(0) & Inf(1)\n", "ap_vars:\n", - "b a\n", + "b a\n", "\n", "\n", "\n", "\n", "props\n", - "prop_state_acc:\n", - "maybe\n", - "prop_inherently_weak:\n", - "maybe\n", - "prop_terminal:\n", - "maybe\n", - "prop_weak:\n", - "maybe\n", - "prop_very_weak:\n", - "maybe\n", - "prop_complete:\n", - "maybe\n", - "prop_universal:\n", - "maybe\n", - "prop_unambiguous:\n", - "maybe\n", - "prop_semi_deterministic:\n", - "maybe\n", - "prop_stutter_invariant:\n", - "maybe\n", + "prop_state_acc:\n", + "maybe\n", + "prop_inherently_weak:\n", + "maybe\n", + "prop_terminal:\n", + "maybe\n", + "prop_weak:\n", + "maybe\n", + "prop_very_weak:\n", + "maybe\n", + "prop_complete:\n", + "maybe\n", + "prop_universal:\n", + "maybe\n", + "prop_unambiguous:\n", + "maybe\n", + "prop_semi_deterministic:\n", + "maybe\n", + "prop_stutter_invariant:\n", + "maybe\n", "\n", "\n", "\n", @@ -3592,7 +4284,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -3628,7 +4319,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -3662,7 +4352,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -3696,7 +4385,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -3726,16 +4414,16 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ")\n", - "[Rabin 1]\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", "\n", "\n", "\n", @@ -3763,14 +4451,14 @@ "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "-7->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", @@ -3789,16 +4477,16 @@ "0->0\n", "\n", "\n", - "a & !b\n", + "a & !b\n", "\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "!a & !b\n", - "\n", + "\n", "\n", "\n", "\n", @@ -3816,35 +4504,35 @@ "\n", "\n", "1->0\n", - "\n", + "\n", "\n", "a & !b\n", - "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & !b\n", - "\n", + "\n", + "\n", + "!a & !b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & b\n", - "\n", + "\n", + "\n", + "!a & b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", "\n", "\n", "\n", @@ -3862,45 +4550,225 @@ "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "-1->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "-4\n", - "\n", + "\n", "\n", "\n", "\n", "3->-4\n", - "\n", - "\n", - "!a\n", - "\n", + "\n", + "\n", + "!a\n", + "\n", "\n", "\n", "\n", "-4->0\n", - "\n", + "\n", "\n", "\n", "\n", "\n", "-4->1\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "-7\n", + "\n", + "\n", + "\n", + "\n", + "I->-7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "-7->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "-7->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "-7->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "-1\n", + "\n", + "\n", + "\n", + "\n", + "0->-1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "-1->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "-1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-4\n", + "\n", + "\n", + "\n", + "\n", + "3->-4\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "-4->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-4->1\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fd3c40c2b70> >" + " *' at 0x7fd9b477e340> >" ] }, "execution_count": 28, @@ -3936,311 +4804,311 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "g\n", - "\n", + "\n", "\n", "\n", "states\n", - "\n", - "\n", - "states\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "succ\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "9\n", - "\n", - "succ_tail\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "9\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "9\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "9\n", "\n", "\n", "\n", "edges\n", - "\n", - "\n", - "edges\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "9\n", - "\n", - "cond\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "!a & b\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "a\n", - "\n", - "!a\n", - "\n", - "acc\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{0,1}\n", - "\n", - "{}\n", - "\n", - "{1}\n", - "\n", - "{0}\n", - "\n", - "dst\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "~0\n", - "\n", - "\n", - "~3\n", - "\n", - "next_succ\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "src\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "3\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "cond\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "!a & b\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "a\n", + "\n", + "!a\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{0,1}\n", + "\n", + "{}\n", + "\n", + "{1}\n", + "\n", + "{0}\n", + "\n", + "dst\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "~0\n", + "\n", + "\n", + "~3\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "3\n", "\n", "\n", "\n", "dests\n", - "\n", - "\n", - "dests\n", - "\n", - "\n", - "~0\n", - "\n", - "\n", - "\n", - "\n", - "~3\n", - "\n", - "\n", - "\n", - "\n", - "~6\n", - "\n", - "\n", - "\n", - "\n", - "#cnt/dst\n", - "\n", - "#2\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "3\n", - "\n", - "#2\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "#3\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", + "\n", + "\n", + "dests\n", + "\n", + "\n", + "~0\n", + "\n", + "\n", + "\n", + "\n", + "~3\n", + "\n", + "\n", + "\n", + "\n", + "~6\n", + "\n", + "\n", + "\n", + "\n", + "#cnt/dst\n", + "\n", + "#2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "3\n", + "\n", + "#2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "#3\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", "\n", "\n", "\n", "meta\n", "init_state:\n", - "\n", - "~6\n", + "\n", + "~6\n", "num_sets:\n", - "2\n", + "2\n", "acceptance:\n", - "Fin(0) & Inf(1)\n", + "Fin(0) & Inf(1)\n", "ap_vars:\n", - "b a\n", + "b a\n", "\n", "\n", "\n", "\n", "props\n", - "prop_state_acc:\n", - "maybe\n", - "prop_inherently_weak:\n", - "maybe\n", - "prop_terminal:\n", - "maybe\n", - "prop_weak:\n", - "maybe\n", - "prop_very_weak:\n", - "maybe\n", - "prop_complete:\n", - "maybe\n", - "prop_universal:\n", - "maybe\n", - "prop_unambiguous:\n", - "maybe\n", - "prop_semi_deterministic:\n", - "maybe\n", - "prop_stutter_invariant:\n", - "maybe\n", + "prop_state_acc:\n", + "maybe\n", + "prop_inherently_weak:\n", + "maybe\n", + "prop_terminal:\n", + "maybe\n", + "prop_weak:\n", + "maybe\n", + "prop_very_weak:\n", + "maybe\n", + "prop_complete:\n", + "maybe\n", + "prop_universal:\n", + "maybe\n", + "prop_unambiguous:\n", + "maybe\n", + "prop_semi_deterministic:\n", + "maybe\n", + "prop_stutter_invariant:\n", + "maybe\n", "\n", "\n", "\n", @@ -4260,7 +5128,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -4281,16 +5148,16 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ")\n", - "[Rabin 1]\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", "\n", "\n", "\n", @@ -4318,14 +5185,14 @@ "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "-7->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", @@ -4344,16 +5211,16 @@ "0->0\n", "\n", "\n", - "a & !b\n", + "a & !b\n", "\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "!a & !b\n", - "\n", + "\n", "\n", "\n", "\n", @@ -4371,35 +5238,35 @@ "\n", "\n", "1->0\n", - "\n", + "\n", "\n", "a & !b\n", - "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & !b\n", - "\n", + "\n", + "\n", + "!a & !b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & b\n", - "\n", + "\n", + "\n", + "!a & b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", "\n", "\n", "\n", @@ -4417,70 +5284,275 @@ "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "-1->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "-4\n", - "\n", + "\n", "\n", "\n", "\n", "3->-4\n", - "\n", - "\n", - "!a\n", - "\n", + "\n", + "\n", + "!a\n", + "\n", "\n", "\n", "\n", "-11\n", - "\n", + "\n", "\n", "\n", "\n", "3->-11\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "-4->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "-4->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "-11->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "-11->3\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "-7\n", + "\n", + "\n", + "\n", + "\n", + "I->-7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "-7->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "-7->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "-7->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "-1\n", + "\n", + "\n", + "\n", + "\n", + "0->-1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "-1->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "-1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-4\n", + "\n", + "\n", + "\n", + "\n", + "3->-4\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "-11\n", + "\n", + "\n", + "\n", + "\n", + "3->-11\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "-4->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-4->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-11->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-11->3\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fd3c40c2b70> >" + " *' at 0x7fd9b477e340> >" ] }, "metadata": {}, @@ -4488,315 +5560,315 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "g\n", - "\n", + "\n", "\n", "\n", "states\n", - "\n", - "\n", - "states\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "succ\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "9\n", - "\n", - "succ_tail\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "10\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "9\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "10\n", "\n", "\n", "\n", "edges\n", - "\n", - "\n", - "edges\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "9\n", - "\n", - "\n", - "10\n", - "\n", - "cond\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "!a & b\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "a\n", - "\n", - "!a\n", - "\n", - "1\n", - "\n", - "acc\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{0,1}\n", - "\n", - "{}\n", - "\n", - "{1}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "dst\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "~0\n", - "\n", - "\n", - "~3\n", - "\n", - "\n", - "~10\n", - "\n", - "next_succ\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "\n", - "10\n", - "\n", - "0\n", - "\n", - "src\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "3\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "cond\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "!a & b\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "a\n", + "\n", + "!a\n", + "\n", + "1\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{0,1}\n", + "\n", + "{}\n", + "\n", + "{1}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "dst\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "~0\n", + "\n", + "\n", + "~3\n", + "\n", + "\n", + "~10\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "10\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "3\n", "\n", "\n", "\n", "dests\n", - "\n", - "\n", - "dests\n", - "\n", - "\n", - "~0\n", - "\n", - "\n", - "\n", - "\n", - "~3\n", - "\n", - "\n", - "\n", - "\n", - "~6\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "dests\n", "\n", "\n", - "~10\n", - "\n", - "\n", - "\n", - "#cnt/dst\n", - "\n", - "#2\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "3\n", - "\n", - "#2\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "#3\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", + "~0\n", + "\n", + "\n", + "\n", + "\n", + "~3\n", + "\n", + "\n", + "\n", + "\n", + "~6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "~10\n", + "\n", + "\n", + "\n", + "#cnt/dst\n", "\n", - "#2\n", - "\n", - "\n", + "#2\n", + "\n", + "\n", "0\n", - "\n", - "\n", - "3\n", + "\n", + "\n", + "3\n", + "\n", + "#2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "#3\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "#2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "3\n", "\n", "\n", "\n", "meta\n", "init_state:\n", - "\n", - "~6\n", + "\n", + "~6\n", "num_sets:\n", - "2\n", + "2\n", "acceptance:\n", - "Fin(0) & Inf(1)\n", + "Fin(0) & Inf(1)\n", "ap_vars:\n", - "b a\n", + "b a\n", "\n", "\n", "\n", @@ -4818,16 +5890,16 @@ "\n", "\n", - "\n", + "\n", "\n", - "\n", - "Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ")\n", - "[Rabin 1]\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", "\n", "\n", "\n", @@ -4855,14 +5927,14 @@ "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "-7->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", @@ -4881,16 +5953,16 @@ "0->0\n", "\n", "\n", - "a & !b\n", + "a & !b\n", "\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "!a & !b\n", - "\n", + "\n", "\n", "\n", "\n", @@ -4908,35 +5980,35 @@ "\n", "\n", "1->0\n", - "\n", + "\n", "\n", "a & !b\n", - "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & !b\n", - "\n", + "\n", + "\n", + "!a & !b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a & b\n", - "\n", + "\n", + "\n", + "!a & b\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", "\n", "\n", "\n", @@ -4954,53 +6026,241 @@ "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "-1->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "3->-1\n", - "\n", - "\n", + "\n", + "\n", "1\n", "\n", "\n", "\n", "\n", "-4\n", - "\n", + "\n", "\n", "\n", "\n", "3->-4\n", - "\n", - "\n", - "!a\n", - "\n", + "\n", + "\n", + "!a\n", + "\n", "\n", "\n", "\n", "-4->0\n", - "\n", + "\n", "\n", "\n", "\n", "\n", "-4->1\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "-7\n", + "\n", + "\n", + "\n", + "\n", + "I->-7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "-7->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "-7->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "-7->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "-1\n", + "\n", + "\n", + "\n", + "\n", + "0->-1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "-1->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "-1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3->-1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "-4\n", + "\n", + "\n", + "\n", + "\n", + "3->-4\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "-4->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-4->1\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fd3c40c2b70> >" + " *' at 0x7fd9b477e340> >" ] }, "metadata": {}, @@ -5008,302 +6268,302 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "g\n", - "\n", + "\n", "\n", "\n", "states\n", - "\n", - "\n", - "states\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "succ\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "9\n", - "\n", - "succ_tail\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "10\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "10\n", "\n", "\n", "\n", "edges\n", - "\n", - "\n", - "edges\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "\n", - "8\n", - "\n", - "\n", - "9\n", - "\n", - "\n", - "10\n", - "\n", - "cond\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "a\n", - "\n", - "a & !b\n", - "\n", - "!a & !b\n", - "\n", - "!a & b\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "!a\n", - "\n", - "1\n", - "\n", - "acc\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "{1}\n", - "\n", - "{0,1}\n", - "\n", - "{}\n", - "\n", - "{0}\n", - "\n", - "{0}\n", - "\n", - "dst\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "~0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "~3\n", - "\n", - "\n", - "~0\n", - "\n", - "next_succ\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "0\n", - "\n", - "\n", - "5\n", - "\n", - "\n", - "6\n", - "\n", - "\n", - "7\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "\n", - "10\n", - "\n", - "0\n", - "\n", - "src\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "3\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "cond\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "a\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "!a & b\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "!a\n", + "\n", + "1\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{0,1}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "dst\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "~0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "~3\n", + "\n", + "\n", + "~0\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "0\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "10\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "3\n", "\n", "\n", "\n", "dests\n", - "\n", - "\n", - "dests\n", - "\n", - "\n", - "~0\n", - "\n", - "\n", - "\n", - "\n", - "~3\n", - "\n", - "\n", - "\n", - "\n", - "~6\n", - "\n", - "\n", - "\n", - "\n", - "#cnt/dst\n", - "\n", - "#2\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "3\n", - "\n", - "#2\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "#3\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", + "\n", + "\n", + "dests\n", + "\n", + "\n", + "~0\n", + "\n", + "\n", + "\n", + "\n", + "~3\n", + "\n", + "\n", + "\n", + "\n", + "~6\n", + "\n", + "\n", + "\n", + "\n", + "#cnt/dst\n", + "\n", + "#2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "3\n", + "\n", + "#2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "#3\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", "\n", "\n", "\n", "meta\n", "init_state:\n", - "\n", - "~6\n", + "\n", + "~6\n", "num_sets:\n", - "2\n", + "2\n", "acceptance:\n", - "Fin(0) & Inf(1)\n", + "Fin(0) & Inf(1)\n", "ap_vars:\n", - "b a\n", + "b a\n", "\n", "\n", "\n", @@ -5325,7 +6585,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -5333,7 +6592,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -5356,76 +6614,151 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "aub * gfa\n", - "\n", - "aub * gfa\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", + "\n", + "aub * gfa\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "1,0\n", + "\n", + "1,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "a & !b\n", + "\n", + "\n", + "a & !b\n", "\n", "\n", "\n", "1\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!a & b\n", + "\n", + "\n", + "!a & b\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "a & b\n", + "\n", + "\n", + "a & b\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "aub * gfa\n", + "\n", + "aub * gfa\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fd3c4098c30> >" + " *' at 0x7fd9b4794630> >" ] }, "metadata": {}, @@ -5433,187 +6766,187 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "g\n", - "\n", + "\n", "\n", "\n", "states\n", - "\n", - "\n", - "states\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "succ\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "4\n", - "\n", - "succ_tail\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "5\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "4\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "5\n", "\n", "\n", "\n", "edges\n", - "\n", - "\n", - "edges\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "cond\n", - "\n", - "!a & b\n", - "\n", - "a & b\n", - "\n", - "a & !b\n", - "\n", - "!a\n", - "\n", - "a\n", - "\n", - "acc\n", - "\n", - "{}\n", - "\n", - "{}\n", - "\n", - "{}\n", - "\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "cond\n", + "\n", + "!a & b\n", + "\n", + "a & b\n", + "\n", + "a & !b\n", + "\n", + "!a\n", + "\n", + "a\n", + "\n", + "acc\n", + "\n", + "{}\n", + "\n", "{}\n", - "\n", - "{0}\n", - "\n", - "dst\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "next_succ\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "0\n", - "\n", - "\n", - "5\n", - "\n", - "0\n", - "\n", - "src\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "dst\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "0\n", + "\n", + "\n", + "5\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "meta\n", - "init_state:\n", - "\n", - "0\n", - "num_sets:\n", - "1\n", - "acceptance:\n", - "Inf(0)\n", - "ap_vars:\n", - "b a\n", + "init_state:\n", + "\n", + "0\n", + "num_sets:\n", + "1\n", + "acceptance:\n", + "Inf(0)\n", + "ap_vars:\n", + "b a\n", "\n", "\n", "\n", "\n", "props\n", - "prop_state_acc:\n", - "maybe\n", - "prop_inherently_weak:\n", - "maybe\n", - "prop_terminal:\n", - "maybe\n", - "prop_weak:\n", - "maybe\n", - "prop_very_weak:\n", - "maybe\n", - "prop_complete:\n", - "maybe\n", - "prop_universal:\n", - "yes\n", - "prop_unambiguous:\n", - "yes\n", - "prop_semi_deterministic:\n", - "yes\n", - "prop_stutter_invariant:\n", - "yes\n", + "prop_state_acc:\n", + "maybe\n", + "prop_inherently_weak:\n", + "maybe\n", + "prop_terminal:\n", + "maybe\n", + "prop_weak:\n", + "maybe\n", + "prop_very_weak:\n", + "maybe\n", + "prop_complete:\n", + "maybe\n", + "prop_universal:\n", + "yes\n", + "prop_unambiguous:\n", + "yes\n", + "prop_semi_deterministic:\n", + "yes\n", + "prop_stutter_invariant:\n", + "yes\n", "\n", "\n", "\n", "\n", "namedprops\n", - "named properties:\n", - "automaton-name\n", - "product-states\n", + "named properties:\n", + "automaton-name\n", + "product-states\n", "\n", "\n", "\n", @@ -5636,7 +6969,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -5657,74 +6989,147 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "a & !b\n", + "\n", + "\n", + "a & !b\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!a & b\n", + "\n", + "\n", + "!a & b\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "a & b\n", + "\n", + "\n", + "a & b\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!a\n", + "\n", + "\n", + "!a\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "a\n", - "\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7fd3c4098c30> >" + " *' at 0x7fd9b4794630> >" ] }, "metadata": {}, @@ -5732,179 +7137,179 @@ }, { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "g\n", - "\n", + "\n", "\n", "\n", "states\n", - "\n", - "\n", - "states\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "succ\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "4\n", - "\n", - "succ_tail\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "5\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "4\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "5\n", "\n", "\n", "\n", "edges\n", - "\n", - "\n", - "edges\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "\n", - "4\n", - "\n", - "\n", - "5\n", - "\n", - "cond\n", - "\n", - "!a & b\n", - "\n", - "a & b\n", - "\n", - "a & !b\n", - "\n", - "!a\n", - "\n", - "a\n", - "\n", - "acc\n", - "\n", - "{}\n", - "\n", - "{}\n", - "\n", - "{}\n", - "\n", - "{}\n", - "\n", - "{0}\n", - "\n", - "dst\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "next_succ\n", - "\n", - "\n", - "2\n", - "\n", - "\n", - "3\n", - "\n", - "0\n", - "\n", - "\n", - "5\n", - "\n", - "0\n", - "\n", - "src\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "cond\n", + "\n", + "!a & b\n", + "\n", + "a & b\n", + "\n", + "a & !b\n", + "\n", + "!a\n", + "\n", + "a\n", + "\n", + "acc\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "dst\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "0\n", + "\n", + "\n", + "5\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "meta\n", - "init_state:\n", - "\n", - "0\n", - "num_sets:\n", - "1\n", - "acceptance:\n", - "Inf(0)\n", - "ap_vars:\n", - "b a\n", + "init_state:\n", + "\n", + "0\n", + "num_sets:\n", + "1\n", + "acceptance:\n", + "Inf(0)\n", + "ap_vars:\n", + "b a\n", "\n", "\n", "\n", "\n", "props\n", - "prop_state_acc:\n", - "maybe\n", - "prop_inherently_weak:\n", - "maybe\n", - "prop_terminal:\n", - "maybe\n", - "prop_weak:\n", - "maybe\n", - "prop_very_weak:\n", - "maybe\n", - "prop_complete:\n", - "maybe\n", - "prop_universal:\n", - "yes\n", - "prop_unambiguous:\n", - "yes\n", - "prop_semi_deterministic:\n", - "yes\n", - "prop_stutter_invariant:\n", - "yes\n", + "prop_state_acc:\n", + "maybe\n", + "prop_inherently_weak:\n", + "maybe\n", + "prop_terminal:\n", + "maybe\n", + "prop_weak:\n", + "maybe\n", + "prop_very_weak:\n", + "maybe\n", + "prop_complete:\n", + "maybe\n", + "prop_universal:\n", + "yes\n", + "prop_unambiguous:\n", + "yes\n", + "prop_semi_deterministic:\n", + "yes\n", + "prop_stutter_invariant:\n", + "yes\n", "\n", "\n", "\n", @@ -5926,7 +7331,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -5940,7 +7345,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.5" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/word.ipynb b/tests/python/word.ipynb index aacc1280a..dec900dd0 100644 --- a/tests/python/word.ipynb +++ b/tests/python/word.ipynb @@ -165,8 +165,149 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f14f57d93c0> >" + " *' at 0x7f8f5c19e5e0> >" ] }, "execution_count": 2, @@ -264,7 +405,7 @@ "$\\lnot a; \\lnot a; \\mathsf{cycle}\\{a \\land b; \\lnot a \\land b\\}$" ], "text/plain": [ - " *' at 0x7f14f57d9ed0> >" + " *' at 0x7f8f5c19e040> >" ] }, "execution_count": 5, @@ -292,7 +433,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", @@ -389,7 +530,7 @@ "$\\lnot a; \\mathsf{cycle}\\{\\lnot a \\land b; a \\land b\\}$" ], "text/plain": [ - " *' at 0x7f14f57d9660> >" + " *' at 0x7f8f5c19f8d0> >" ] }, "execution_count": 9, @@ -440,7 +581,7 @@ "$a; a \\land b; \\mathsf{cycle}\\{\\lnot a \\land \\lnot b; \\lnot a \\land b\\}$" ], "text/plain": [ - " *' at 0x7f14f5799ea0> >" + " *' at 0x7f8f5c19e250> >" ] }, "execution_count": 11, @@ -460,7 +601,7 @@ "outputs": [ { "data": { - "image/svg+xml": [ + "text/html": [ "\n", "\n", "\n", @@ -573,8 +714,83 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f14f5799ed0> >" + " *' at 0x7f8f5c19fab0> >" ] }, "execution_count": 13, @@ -589,7 +805,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -603,7 +819,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.2" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/tests/python/zlktree.ipynb b/tests/python/zlktree.ipynb index 5424366be..fd0f33a07 100644 --- a/tests/python/zlktree.ipynb +++ b/tests/python/zlktree.ipynb @@ -12,7 +12,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "fb656100", "metadata": {}, @@ -26,7 +25,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "4e8b5d3f", "metadata": {}, @@ -217,8 +215,179 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "zielonka_tree\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "{0,1,2,3}\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "{1,2,3}\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "{0,1,3}\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "{2,3}\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "{1,3}\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "{1,3}\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "{0,1}\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "{3}\n", + "<7>\n", + "\n", + "\n", + "\n", + "3->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "{1}\n", + "<8>\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "{3}\n", + "<9>\n", + "\n", + "\n", + "\n", + "4->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "{1}\n", + "<10>\n", + "\n", + "\n", + "\n", + "5->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "{3}\n", + "<11>\n", + "\n", + "\n", + "\n", + "5->11\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "{1}\n", + "<12>\n", + "\n", + "\n", + "\n", + "6->12\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " >" + " >" ] }, "execution_count": 2, @@ -233,7 +402,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "d7629725", "metadata": {}, @@ -269,7 +437,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "15fbd4e6", "metadata": {}, @@ -299,7 +466,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "de4cdc45", "metadata": {}, @@ -334,7 +500,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "4c3bf70b", "metadata": {}, @@ -364,7 +529,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "0d865f30", "metadata": {}, @@ -394,7 +558,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "8b6b3928", "metadata": {}, @@ -438,7 +601,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "656e05f4", "metadata": {}, @@ -449,7 +611,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "5c7014e9", "metadata": {}, @@ -460,7 +621,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "2750cb1d", "metadata": {}, @@ -650,8 +810,182 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))) | ((Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")) & Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c009d7a0> >" + " *' at 0x7f92f048b0c0> >" ] }, "execution_count": 10, @@ -1073,8 +1407,409 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")))\n", + "[parity min odd 4]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0#7\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1#7\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "3#7\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "0#8\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "2#7\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "0#10\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "3#8\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "1#9\n", + "\n", + "\n", + "\n", + "3->7\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "4->5\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "2#8\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "1#11\n", + "\n", + "\n", + "\n", + "5->9\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "6->4\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "6->5\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "7->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "7->4\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "8->5\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "8->7\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "8->8\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "3#9\n", + "\n", + "\n", + "\n", + "8->10\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "9->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "9->4\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "9->9\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "10->4\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "10->5\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "10->6\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "10->7\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c009c630> >" + " *' at 0x7f92f048b510> >" ] }, "execution_count": 11, @@ -1089,7 +1824,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "ea452913", "metadata": {}, @@ -1123,7 +1857,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "da8d9e97", "metadata": {}, @@ -1268,8 +2001,137 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))) | ((Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")) & Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c009c6c0> >" + " *' at 0x7f92f048b540> >" ] }, "execution_count": 13, @@ -1713,8 +2575,431 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))))\n", + "[parity min odd 5]\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "0#11\n", + "\n", + "\n", + "\n", + "I->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1#10\n", + "\n", + "\n", + "\n", + "6->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "2#11\n", + "\n", + "\n", + "\n", + "6->10\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1#8\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "1#7\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "2#8\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "0#9\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "0#8\n", + "\n", + "\n", + "\n", + "2->7\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "2#9\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "2#10\n", + "\n", + "\n", + "\n", + "1->5\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "1#12\n", + "\n", + "\n", + "\n", + "5->9\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "7->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "7->4\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "8->4\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "8->1\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "8->8\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "9->2\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "2#12\n", + "\n", + "\n", + "\n", + "9->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "0#10\n", + "\n", + "\n", + "\n", + "9->12\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "10->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "10->9\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "10->10\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "11->9\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "11->11\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "11->12\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "12->6\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "12->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "12->10\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c009c480> >" + " *' at 0x7f92f04a4660> >" ] }, "execution_count": 14, @@ -1729,7 +3014,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "9bf70138", "metadata": {}, @@ -1770,7 +3054,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "147a71a6", "metadata": {}, @@ -2110,8 +3393,323 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "zielonka_tree\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "{0,1,2,3,4,5}\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "{1,2,3,4,5}\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "{0,1,3,4,5}\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "{0,1,2,3,5}\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "{2,3,4,5}\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "{0,1,4,5}\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "{0,1,2,3}\n", + "\n", + "\n", + "\n", + "3->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "{3,4,5}\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "{2,3,5}\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "{1,4,5}\n", + "\n", + "\n", + "\n", + "5->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "{0,1,5}\n", + "\n", + "\n", + "\n", + "5->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "{1,2,3}\n", + "\n", + "\n", + "\n", + "6->11\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "{0,1,3}\n", + "\n", + "\n", + "\n", + "6->12\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "{4,5}\n", + "\n", + "\n", + "\n", + "7->13\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "{2,3}\n", + "\n", + "\n", + "\n", + "8->14\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "{4,5}\n", + "\n", + "\n", + "\n", + "9->15\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "{0,1}\n", + "\n", + "\n", + "\n", + "10->16\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "{2,3}\n", + "\n", + "\n", + "\n", + "11->17\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "{0,1}\n", + "\n", + "\n", + "\n", + "12->18\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "19\n", + "\n", + "{5}\n", + "<19>\n", + "\n", + "\n", + "\n", + "13->19\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "20\n", + "\n", + "{3}\n", + "<20>\n", + "\n", + "\n", + "\n", + "14->20\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "21\n", + "\n", + "{5}\n", + "<21>\n", + "\n", + "\n", + "\n", + "15->21\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "22\n", + "\n", + "{1}\n", + "<22>\n", + "\n", + "\n", + "\n", + "16->22\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "23\n", + "\n", + "{3}\n", + "<23>\n", + "\n", + "\n", + "\n", + "17->23\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "24\n", + "\n", + "{1}\n", + "<24>\n", + "\n", + "\n", + "\n", + "18->24\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2441,8 +4039,323 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "zielonka_tree\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "{0,1,2,3,4,5}\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "{0,2,3,4,5}\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "{0,1,2,4,5}\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "{0,1,2,3,4}\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "{2,3,4,5}\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "{0,1,4,5}\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "{0,1,2,3}\n", + "\n", + "\n", + "\n", + "3->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "{2,4,5}\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "{2,3,4}\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "{0,4,5}\n", + "\n", + "\n", + "\n", + "5->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "{0,1,4}\n", + "\n", + "\n", + "\n", + "5->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "{0,2,3}\n", + "\n", + "\n", + "\n", + "6->11\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "{0,1,2}\n", + "\n", + "\n", + "\n", + "6->12\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "{4,5}\n", + "\n", + "\n", + "\n", + "7->13\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "{2,3}\n", + "\n", + "\n", + "\n", + "8->14\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "{4,5}\n", + "\n", + "\n", + "\n", + "9->15\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "{0,1}\n", + "\n", + "\n", + "\n", + "10->16\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "{2,3}\n", + "\n", + "\n", + "\n", + "11->17\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "{0,1}\n", + "\n", + "\n", + "\n", + "12->18\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "19\n", + "\n", + "{4}\n", + "<19>\n", + "\n", + "\n", + "\n", + "13->19\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "20\n", + "\n", + "{2}\n", + "<20>\n", + "\n", + "\n", + "\n", + "14->20\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "21\n", + "\n", + "{4}\n", + "<21>\n", + "\n", + "\n", + "\n", + "15->21\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "22\n", + "\n", + "{0}\n", + "<22>\n", + "\n", + "\n", + "\n", + "16->22\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "23\n", + "\n", + "{2}\n", + "<23>\n", + "\n", + "\n", + "\n", + "17->23\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "24\n", + "\n", + "{0}\n", + "<24>\n", + "\n", + "\n", + "\n", + "18->24\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2527,8 +4440,78 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "zielonka_tree\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "{0,1,2,3,4}\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "{1,2,3,4}\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "{2,3,4}\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "{3,4}\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "{4}\n", + "<4>\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2638,8 +4621,103 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "zielonka_tree\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "{0,1,2,3,4}\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "{0,2,3,4}\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "{0,1,2}\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "{2,3,4}\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "{0,1}\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "{2}\n", + "<5>\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "{0}\n", + "<6>\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2676,8 +4754,30 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "zielonka_tree\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "{}\n", + "<0>\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2714,8 +4814,30 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "zielonka_tree\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "{}\n", + "<0>\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2733,7 +4855,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "77db26c3", "metadata": {}, @@ -2771,7 +4892,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "4786f64c", "metadata": {}, @@ -2944,8 +5064,165 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "zielonka_tree\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "{0,1,2,3}\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "{1,2,3}\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "{0,1,3}\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "{2,3}\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "{1,3}\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "{1,3}\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "{0,1}\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "{3}\n", + "<7>\n", + "\n", + "\n", + "\n", + "3->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "{1}\n", + "<8>\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "{3}\n", + "<9>\n", + "\n", + "\n", + "\n", + "4->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "{1}\n", + "<10>\n", + "\n", + "\n", + "\n", + "6->10\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " >" + " >" ] }, "execution_count": 18, @@ -2958,7 +5235,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "9d7688b3", "metadata": {}, @@ -2967,7 +5243,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "75838579", "metadata": {}, @@ -3004,7 +5279,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "cb546bc2", "metadata": {}, @@ -4236,7 +6510,7 @@ "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 20, @@ -4249,7 +6523,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "0f2f00c4", "metadata": {}, @@ -4281,7 +6554,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "3a3db431", "metadata": {}, @@ -4292,7 +6564,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "6595333d", "metadata": {}, @@ -4350,7 +6621,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "1c6d4fe9", "metadata": {}, @@ -4445,7 +6715,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "ad201f45", "metadata": { @@ -4464,7 +6733,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "04d7cc51", "metadata": {}, @@ -4494,7 +6762,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "1015abb6", "metadata": {}, @@ -4503,7 +6770,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "b89a6186", "metadata": {}, @@ -5118,8 +7384,607 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[parity min odd 3]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0#3\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1#10\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2#4\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3#4\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4#2\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5#4\n", + "\n", + "\n", + "\n", + "1->5\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "2#8\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "3#12\n", + "\n", + "\n", + "\n", + "2->7\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "3->7\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "6#7\n", + "\n", + "\n", + "\n", + "3->8\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "7#1\n", + "\n", + "\n", + "\n", + "4->9\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "5#11\n", + "\n", + "\n", + "\n", + "5->10\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "8#5\n", + "\n", + "\n", + "\n", + "5->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "7->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "5#13\n", + "\n", + "\n", + "\n", + "7->12\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "8->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "8->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "8->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "3#14\n", + "\n", + "\n", + "\n", + "8->13\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "9->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "9->5\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "9->9\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "9#9\n", + "\n", + "\n", + "\n", + "9->14\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "10->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "10->7\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "10->10\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "10->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "11->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "11->10\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "11->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "11->12\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "12->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "12->7\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "12->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "12->12\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "13->5\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "13->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "13->8\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "13->13\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "14->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "14->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "14->9\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "14->14\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c00bc870> >" + " *' at 0x7f92f04a6d30> >" ] }, "execution_count": 29, @@ -5134,7 +7999,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "f039aeaa", "metadata": {}, @@ -5164,7 +8028,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "07aaab3a", "metadata": {}, @@ -5759,8 +8622,587 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0#3\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1#10\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2#4\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3#4\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4#2\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5#4\n", + "\n", + "\n", + "\n", + "1->5\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "2#8\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "3#12\n", + "\n", + "\n", + "\n", + "2->7\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "3->7\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "6#7\n", + "\n", + "\n", + "\n", + "3->8\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "7#1\n", + "\n", + "\n", + "\n", + "4->9\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "5#11\n", + "\n", + "\n", + "\n", + "5->10\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "8#5\n", + "\n", + "\n", + "\n", + "5->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "7->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "5#13\n", + "\n", + "\n", + "\n", + "7->12\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "8->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "8->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "8->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "3#14\n", + "\n", + "\n", + "\n", + "8->13\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "9->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "9->5\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "9->9\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "9#9\n", + "\n", + "\n", + "\n", + "9->14\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "10->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "10->7\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "10->10\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "10->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "11->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "11->10\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "11->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "11->12\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "12->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "12->7\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "12->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "12->12\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "13->5\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "13->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "13->8\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "13->13\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "14->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "14->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "14->9\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "14->14\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c00bc060> >" + " *' at 0x7f92f04a6fa0> >" ] }, "execution_count": 31, @@ -5776,7 +9218,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "f14ee428", "metadata": {}, @@ -5827,7 +9268,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "ce62b966", "metadata": {}, @@ -5836,7 +9276,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "e3d0ff64", "metadata": {}, @@ -5875,7 +9314,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "09ec9887", "metadata": {}, @@ -5884,7 +9322,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "816ee0eb", "metadata": {}, @@ -5939,7 +9376,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "f380ca5f", "metadata": {}, @@ -7188,7 +10624,7 @@ "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 40, @@ -7201,7 +10637,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "98a1474c", "metadata": {}, @@ -7295,7 +10730,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "c9725681", "metadata": {}, @@ -8102,8 +11536,799 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0#3\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1#10\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2#4\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3#4\n", + "\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4#2\n", + "\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5#4\n", + "\n", + "\n", + "\n", + "\n", + "1->5\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "2#8\n", + "\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "3#5\n", + "\n", + "\n", + "\n", + "\n", + "2->7\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "3->7\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "6#7\n", + "\n", + "\n", + "\n", + "\n", + "3->8\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "7#1\n", + "\n", + "\n", + "\n", + "\n", + "4->9\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "5#5\n", + "\n", + "\n", + "\n", + "\n", + "5->10\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "8#6\n", + "\n", + "\n", + "\n", + "\n", + "5->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "2#0\n", + "\n", + "\n", + "\n", + "\n", + "6->12\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "3#0\n", + "\n", + "\n", + "\n", + "\n", + "6->13\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "6->13\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "7->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "3#11\n", + "\n", + "\n", + "\n", + "7->14\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "5#12\n", + "\n", + "\n", + "\n", + "7->15\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "8->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "8->13\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "3#14\n", + "\n", + "\n", + "\n", + "8->16\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "8#0\n", + "\n", + "\n", + "\n", + "\n", + "8->17\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "9->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "9->5\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "9->9\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "9#9\n", + "\n", + "\n", + "\n", + "9->18\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "10->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "10->7\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "10->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "10->15\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "11->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "11->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "19\n", + "\n", + "5#13\n", + "\n", + "\n", + "\n", + "11->19\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "20\n", + "\n", + "5#0\n", + "\n", + "\n", + "\n", + "\n", + "11->20\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "12->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "12->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "12->6\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "12->7\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "13->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "13->5\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "13->7\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "13->8\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "14->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "14->8\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "14->14\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "14->15\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "15->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "15->7\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "15->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "15->15\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "16->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "16->8\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "16->16\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "16->20\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "17->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "17->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "17->19\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "17->20\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "18->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "18->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "18->9\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "18->18\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "19->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "19->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "19->19\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "21\n", + "\n", + "3#7\n", + "\n", + "\n", + "\n", + "\n", + "19->21\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "20->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "20->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "20->10\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "20->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "21->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "21->8\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "21->16\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "21->20\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c00be460> >" + " *' at 0x7f92f0104390> >" ] }, "execution_count": 45, @@ -8119,7 +12344,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "5b770f23", "metadata": {}, @@ -8147,7 +12371,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "9f39b61d", "metadata": {}, @@ -8464,7 +12687,7 @@ "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 47, @@ -8673,8 +12896,192 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0#1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0#2\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "1#1\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "p\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!p\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "1#2\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "p\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!p\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "2#1\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "p\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "1#0\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "!p\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "2#2\n", + "\n", + "\n", + "\n", + "3->6\n", + "\n", + "\n", + "p\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "p\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "!p\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "!p\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "p\n", + "\n", + "\n", + "\n", + "6->1\n", + "\n", + "\n", + "p\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "2#0\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "!p\n", + "\n", + "\n", + "\n", + "7->0\n", + "\n", + "\n", + "p\n", + "\n", + "\n", + "\n", + "7->4\n", + "\n", + "\n", + "!p\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c00bdd40> >" + " *' at 0x7f92f0104450> >" ] }, "execution_count": 48, @@ -8689,7 +13096,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "03f0eda7", "metadata": {}, @@ -8698,7 +13104,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "45596824", "metadata": {}, @@ -9013,7 +13418,7 @@ "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 49, @@ -9196,8 +13601,171 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0#1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1#1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "p\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "1#2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "2#1\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "p\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!p\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "2#2\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "p\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "p\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "!p\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "!p\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "0#2\n", + "\n", + "\n", + "\n", + "4->5\n", + "\n", + "\n", + "p\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "p\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "0#0\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "!p\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "!p\n", + "\n", + "\n", + "\n", + "6->1\n", + "\n", + "\n", + "p\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c00bf300> >" + " *' at 0x7f92f0104e40> >" ] }, "execution_count": 50, @@ -9212,7 +13780,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "15f094c0", "metadata": {}, @@ -9349,8 +13916,127 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & (Fin(\n", + "\n", + ")|Fin(\n", + "\n", + ")) & (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))) | Inf(\n", + "\n", + ")\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c00be5b0> >" + " *' at 0x7f92f0104960> >" ] }, "execution_count": 51, @@ -9461,8 +14147,79 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "zielonka_tree\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "{0,1,2,3,4}\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "{0,1,2,4}\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "{1,2,4}\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "{2,4}\n", + "<3>\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "{1,4}\n", + "<4>\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " >" + " >" ] }, "execution_count": 52, @@ -9627,8 +14384,153 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")))\n", + "[parity min even 4]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c00bf5d0> >" + " *' at 0x7f92f0104ab0> >" ] }, "execution_count": 53, @@ -9948,7 +14850,7 @@ "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 55, @@ -10091,8 +14993,108 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c00f4240> >" + " *' at 0x7f92f0105710> >" ] }, "execution_count": 57, @@ -10238,8 +15240,134 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c00f4090> >" + " *' at 0x7f92f0104d20> >" ] }, "execution_count": 58, @@ -10263,7 +15391,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "36629c32", "metadata": {}, @@ -10611,7 +15738,7 @@ "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 60, @@ -10738,8 +15865,108 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c00f50b0> >" + " *' at 0x7f92f01059e0> >" ] }, "execution_count": 61, @@ -10868,8 +16095,117 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c00f52c0> >" + " *' at 0x7f92f01051d0> >" ] }, "execution_count": 62, @@ -11148,8 +16484,267 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[gen. Streett 1]\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "I->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "8->6\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "8->11\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "5->8\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "6->9\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "7->10\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "9->8\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "9->6\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "10->2\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "11->0\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "11->1\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c00f4960> >" + " *' at 0x7f92f01053b0> >" ] }, "execution_count": 63, @@ -11162,7 +16757,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "7d638d20", "metadata": {}, @@ -11243,8 +16837,71 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Inf(\n", + "\n", + ")\n", + "[Fin-less 2]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c00f5a10> >" + " *' at 0x7f92f0106430> >" ] }, "execution_count": 64, @@ -11354,8 +17011,64 @@ "\n", "\n" ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], "text/plain": [ - " *' at 0x7f82c00f5ce0> >" + " *' at 0x7f92f01051a0> >" ] }, "execution_count": 66, @@ -11413,7 +17126,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.7" + "version": "3.11.7" } }, "nbformat": 4, From eff7966cef504921c93b9a3e619ee8c7b4482c30 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sun, 11 Feb 2024 22:43:04 +0100 Subject: [PATCH 400/606] sccinfo: fix documentation for split_on_sets * spot/twaalgos/sccinfo.hh (split_on_sets): Fix the documentation to match what the code does. Reported by Pierre Ganty. --- spot/twaalgos/sccinfo.hh | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/spot/twaalgos/sccinfo.hh b/spot/twaalgos/sccinfo.hh index d7aee5000..ea36e77e0 100644 --- a/spot/twaalgos/sccinfo.hh +++ b/spot/twaalgos/sccinfo.hh @@ -50,7 +50,7 @@ namespace spot /// edges that are kept. If some edges are ignored or cut, the /// SCC graph that you can explore with scc_info::initial() and /// scc_info::succ() will be restricted to the portion reachable - /// with "keep" edges. Additionally SCCs might be created when + /// with "keep" edges. Additionally, SCCs might be created when /// edges are cut, but those will not be reachable from /// scc_info::initial().. enum class edge_filter_choice { keep, ignore, cut }; @@ -389,7 +389,7 @@ namespace spot /// Using this option is a precondition for using succ(), /// is_useful_scc(), and is_useful_state(). TRACK_SUCCS = 4, - /// Conditionally track states if the acceptance conditions uses Fin. + /// Conditionally track states if the acceptance condition uses Fin. /// This is sufficiant for determine_unknown_acceptance(). TRACK_STATES_IF_FIN_USED = 8, /// Also compute SCCs for the unreachable states. When this is @@ -431,7 +431,7 @@ namespace spot /// \brief Compute an SCC map and gather assorted information. /// /// This takes twa_graph as input and compute its SCCs. This - /// class maps all input states to their SCCs, and vice-versa. + /// class maps all input states to their SCCs, and vice versa. /// It allows iterating over all SCCs of the automaton, and checks /// their acceptance or non-acceptance. /// @@ -546,7 +546,7 @@ namespace spot /// /// If an accepting SCC has been found, return its number. /// Otherwise return -1. Note that when the acceptance condition - /// contains Fin, -1 does not implies that all SCCs are rejecting: + /// contains Fin, -1 does not imply that all SCCs are rejecting: /// it just means that no accepting SCC is known currently. In /// that case, you might want to call /// determine_unknown_acceptance() first. @@ -758,10 +758,26 @@ namespace spot /// /// Pretend that the transitions of SCC \a scc that belong to any /// of the sets given in \a sets have been removed, and return a - /// set of automata necessary to cover all remaining states. + /// set of automata with disjoint sets of transitions that cover + /// all cycles that remain after the removal. Two cycles that + /// share a state are guaranteed to be in the same automaton. + /// State and transitions that do not belong to any cycle after + /// removal may or may not be covered by the returned automata. + /// All returned automata have at least one edge, but it is + /// also possible that they may not contain any cycle. /// /// Set \a preserve_names to True if you want to keep the original /// name of each states for display. (This is a bit slower.) + /// + /// This method was originally used as a part of our generic + /// emptiness check \cite baier.19.atva . However, creating new + /// automata made it quite slow, so today our generic emptiness + /// check does not use split_on_sets(). Instead, it passes an + /// scc_and_mark_filter to scc_info in order to explore SCCs while + /// ignoring edges with some given colors and without any copy. + /// + /// \see scc_and_mark_filter + /// \see generic_emptiness_check_for_scc std::vector split_on_sets(unsigned scc, acc_cond::mark_t sets, bool preserve_names = false) const; From bcfcea62725f9740683faead2da8ad218db83919 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 16 Feb 2024 12:10:14 +0100 Subject: [PATCH 401/606] * bin/spot-x.cc: Fix some typos. --- bin/spot-x.cc | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/bin/spot-x.cc b/bin/spot-x.cc index 4f077c60d..eca2a945c 100644 --- a/bin/spot-x.cc +++ b/bin/spot-x.cc @@ -56,7 +56,7 @@ as product or sum of subformulas.") }, (done during translation, may create more states) and delayed-branching \ (almost similar, but done after translation to only remove states). \ Set to 1 to force branching-postponement, and to 2 \ -to force delayed-branching. By default delayed-branching is used.") }, +to force delayed-branching. By default, delayed-branching is used.") }, { DOC("comp-susp", "Set to 1 to enable compositional suspension, \ as described in our SPIN'13 paper (see Bibliography below). Set to 2, \ to build only the skeleton TGBA without composing it. Set to 0 (the \ @@ -87,7 +87,7 @@ the alternatinc cycle decomposition. Set to 0 to use paritization based \ on latest appearance record variants.") }, { DOC("scc-filter", "Set to 1 (the default) to enable \ SCC-pruning and acceptance simplification at the beginning of \ -post-processing. Transitions that are outside of accepting SCC are \ +post-processing. Transitions that are outside accepting SCC are \ removed from accepting sets, except those that enter into an accepting \ SCC. Set to 2 to remove even these entering transition from the \ accepting sets. Set to 0 to disable this SCC-pruning and acceptance \ @@ -106,7 +106,7 @@ uses the first level created, 2 uses the minimum level seen so far, and \ will compute an independent degeneralization order for each SCC it \ processes. This is currently disabled by default.") }, { DOC("degen-lskip", "If non-zero (the default), the degeneralization \ -algorithm will skip as much levels as possible for each transition. This \ +algorithm will skip as many levels as possible for each transition. This \ is enabled by default as it very often reduce the number of resulting \ states. A consequence of skipping levels is that the degeneralized \ automaton tends to have smaller cycles around the accepting states. \ @@ -176,27 +176,27 @@ The default is the value of parameter \"simul\" in --high mode, and 0 \ therwise.") }, { DOC("merge-states-min", "Number of states above which states are \ merged using a cheap approximation of a bisimulation quotient before \ -attempting simulation-based reductions. Defaults to 128. Set to 0 to \ +attempting simulation-based reductions. Defaults to 128. Set to 0 to \ never merge states.") }, { DOC("simul-max", "Number of states above which simulation-based \ reductions are skipped. Defaults to 4096. Set to 0 to disable. This \ applies to all simulation-based optimization, including thoses of the \ determinization algorithm.") }, { DOC("simul-trans-pruning", "Number of equivalence classes above which \ -simulation-based transition-pruning for non-deterministic automata is disabled \ -automata. Defaults to 512. Set to 0 to disable. This applies to all \ -simulation-based reduction, as well as to the simulation-based optimization of \ -the determinization algorithm. Simulation-based reduction perform a number of \ -BDD implication checks that is quadratic in the number of classes to implement \ -transition pruning. The equivalence classes is equal to the number \ -of output states of simulation-based reduction when transition-pruning is \ -disabled, it is just an upper bound otherwise.") }, +simulation-based transition-pruning for non-deterministic automata is \ +disabled. Defaults to 512. Set to 0 to disable. This applies to all \ +simulation-based reductions, as well as to the simulation-based optimization \ +of the determinization algorithm. Simulation-based reductions perform a \ +number of BDD implication checks that is quadratic in the number of classes to \ +implement transition pruning. The number of equivalence classes is equal to \ +the number of output states of the simulation-based reduction when \ +transition-pruning is disabled, it is just an upper bound otherwise.") }, { DOC("relabel-bool", "If set to a positive integer N, a formula \ with N atomic propositions or more will have its Boolean subformulas \ abstracted as atomic propositions during the translation to automaton. \ This relabeling can speeds the translation if a few Boolean subformulas \ -use a large number of atomic propositions. This relabeling make sure \ -the subexpression that are replaced do not share atomic propositions. \ +use many atomic propositions. This relabeling make sure \ +the subexpressions that are replaced do not share atomic propositions. \ By default N=4. Setting this value to 0 will disable the rewriting.") }, { DOC("relabel-overlap", "If set to a positive integer N, a formula \ with N atomic propositions or more will have its Boolean subformulas \ @@ -204,7 +204,7 @@ abstracted as atomic propositions during the translation to automaton. \ This version does not care about overlapping atomic propositions, so \ it can cause the created temporary automata to have incompatible \ combinations of atomic propositions that will be eventually be removed. \ -This relabeling is attempted after relabel-bool. By default N=8. Setting \ +This relabeling is attempted after relabel-bool. By default, N=8. Setting \ this value to 0 will disable the rewriting.") }, { DOC("wdba-minimize", "Set to 0 to disable WDBA-minimization, to 1 to \ always try it, or 2 to attempt it only on syntactic obligations or on automata \ @@ -220,7 +220,7 @@ or when det-max-states is set.") }, if the TGBA is not already deterministic. Doing so will degeneralize \ the automaton. This is disabled by default, unless sat-minimize is set.") }, { DOC("dba-simul", "Set to 1 to enable simulation-based reduction after \ -running the powerset determinization enabled by \"tba-det\". By default this \ +running the powerset determinization enabled by \"tba-det\". By default, this \ is disabled at low level or if parameter \"simul\" is set to 0.") }, { DOC("sat-minimize", "Set to a value between 1 and 4 to enable SAT-based minimization \ @@ -255,7 +255,7 @@ sets sat-minimize to 1 if not set differently.") }, { DOC("state-based", "Set to 1 to instruct the SAT-minimization procedure to produce \ an automaton where all outgoing transition of a state have the same acceptance \ -sets. By default this is only enabled when options -B or -S are used.") }, +sets. By default, this is only enabled when options -B or -S are used.") }, { DOC("simul-method", "Chose which simulation based reduction to use: 1 force the \ signature-based BDD implementation, 2 force matrix-based and 0, the default, \ From 31462d84bac89d0e86f06b1e3ebc11bb7e7990c1 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 19 Feb 2024 11:41:33 +0100 Subject: [PATCH 402/606] style: relax the else's body check * tests/sanity/style.test: Skip the "else body should be on next line" check when else is followed by if. --- tests/sanity/style.test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/sanity/style.test b/tests/sanity/style.test index 52ce2a2f2..dfe263031 100755 --- a/tests/sanity/style.test +++ b/tests/sanity/style.test @@ -160,7 +160,7 @@ for dir in "$TOP/spot" "$TOP/bin" "$TOP/tests"; do $GREP '[ ]if ([^()]*([^()]*)[^()]*).*;' $tmp && diag 'if body should be on another line.' - $GREP -E '[ ]else.*;(|.*}.*)$' $tmp && + $GREP -E '[ ]else [^i][^f].*;(|.*}.*)$' $tmp && diag 'else body should be on another line.' $GREP '[ ]while(' $tmp && From 15b876d36842d7624fc918bdb1a63e918068021e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sat, 17 Feb 2024 12:56:28 +0100 Subject: [PATCH 403/606] ltlsynt: allow regular expressions in --ins/--outs * bin/ltlsynt.cc: Implement this. * doc/org/ltlsynt.org, NEWS: Adjust documentation. * tests/core/ltlsynt.test: Add test cases. --- NEWS | 13 +++ bin/ltlsynt.cc | 214 ++++++++++++++++++++++++++-------------- doc/org/ltlsynt.org | 78 ++++++++------- tests/core/ltlsynt.test | 39 ++++++-- 4 files changed, 227 insertions(+), 117 deletions(-) diff --git a/NEWS b/NEWS index 0da1a2847..7508311ea 100644 --- a/NEWS +++ b/NEWS @@ -16,6 +16,19 @@ New in spot 2.11.6.dev (not yet released) will replace boolean subformulas by fresh atomic propositions even if those subformulas share atomic propositions. + - ltlsynt's --ins and --outs options will iterpret any atomic + proposition surrounded by '/' as a regular expressions. + For intance with + + ltlsynt --ins='/^in/,/env/' --outs=/^out/,/control/' ... + + any atomic proposition that start with 'in' or contains 'env' + will be considered as inputs, and those that start with 'out' + or contain 'control' will be considered output. + + By default, if neither --ins nor --outs is given, ltlsynt will + behave as if --ins='/^[iI]/' and --outs='/^[oO]/ were used. + - ltlsynt will now check for atomic propositions that always have the same polarity in the specification. When this happens, these output APs are replaced by true or false before running the diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 78dfb8829..2b0facc57 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -43,6 +43,7 @@ #include #include #include +#include enum { @@ -73,10 +74,10 @@ static const argp_option options[] = { nullptr, 0, nullptr, 0, "Input options:", 1 }, { "outs", OPT_OUTPUT, "PROPS", 0, "comma-separated list of controllable (a.k.a. output) atomic" - " propositions", 0 }, + " propositions, , interpreted as a regex if enclosed in slashes", 0 }, { "ins", OPT_INPUT, "PROPS", 0, "comma-separated list of uncontrollable (a.k.a. input) atomic" - " propositions", 0 }, + " propositions, interpreted as a regex if enclosed in slashes", 0 }, { "tlsf", OPT_TLSF, "FILENAME", 0, "Read a TLSF specification from FILENAME, and call syfco to " "convert it into LTL", 0 }, @@ -171,9 +172,17 @@ Exit status:\n\ 1 if at least one input problem was not realizable\n\ 2 if any error has been reported"; +// --ins and --outs, as supplied on the command-line static std::optional> all_output_aps; static std::optional> all_input_aps; +// first, separate the filters that are regular expressions from +// the others. Compile the regular expressions while we are at it. +static std::vector regex_in; +static std::vector regex_out; +// map identifier to input/output (false=input, true=output) +static std::unordered_map identifier_map; + static const char* opt_csv = nullptr; static bool opt_print_pg = false; static bool opt_print_hoa = false; @@ -690,78 +699,111 @@ namespace } } + static std::unordered_set + list_aps_in_formula(spot::formula f) + { + std::unordered_set aps; + f.traverse([&aps](spot::formula s) { + if (s.is(spot::op::ap)) + aps.emplace(s.ap_name()); + return false; + }); + return aps; + } + + // Takes a set of the atomic propositions appearing in the formula, + // and seperate them into two vectors: input APs and output APs. + static std::pair, std::vector> + filter_list_of_aps(const std::unordered_set& aps, + const char* filename, int linenum) + { + // now iterate over the list of atomic propositions to filter them + std::vector matched[2]; // 0 = input, 1 = output + for (const std::string& a: aps) + { + if (auto it = identifier_map.find(a); it != identifier_map.end()) + { + matched[it->second].push_back(a); + continue; + } + + bool found_in = false; + for (const std::regex& r: regex_in) + if (std::regex_search(a, r)) + { + found_in = true; + break; + } + bool found_out = false; + for (const std::regex& r: regex_out) + if (std::regex_search(a, r)) + { + found_out = true; + break; + } + if (all_input_aps.has_value() == all_output_aps.has_value()) + { + if (!all_input_aps.has_value()) + { + // If the atomic proposition hasn't been classified + // because neither --ins nor --out were specified, + // attempt to classify automatically using the first + // letter. + int fl = a[0]; + if (fl == 'i' || fl == 'I') + found_in = true; + else if (fl == 'o' || fl == 'O') + found_out = true; + } + if (found_in && found_out) + error_at_line(2, 0, filename, linenum, + "'%s' matches both --ins and --outs", + a.c_str()); + if (!found_in && !found_out) + { + if (all_input_aps.has_value() || all_output_aps.has_value()) + error_at_line(2, 0, filename, linenum, + "one of --ins or --outs should match '%s'", + a.c_str()); + else + error_at_line(2, 0, filename, linenum, + "since '%s' does not start with 'i' or 'o', " + "it is unclear if it is an input or " + "an output;\n use --ins or --outs", + a.c_str()); + } + } + else + { + // if we had only --ins or only --outs, anything not + // matching was was given is assumed to belong to the + // other one. + if (!all_input_aps.has_value() && !found_out) + found_in = true; + else if (!all_output_aps.has_value() && !found_in) + found_out = true; + } + matched[found_out].push_back(a); + } + return {matched[0], matched[1]}; + } + + + class ltl_processor final : public job_processor { - private: - std::optional> input_aps_; - std::optional> output_aps_; - public: - ltl_processor(std::optional> input_aps_, - std::optional> output_aps_) - : input_aps_(std::move(input_aps_)), - output_aps_(std::move(output_aps_)) + ltl_processor() { } int process_formula(spot::formula f, const char* filename, int linenum) override { - auto unknown_aps = [](spot::formula f, - const std::optional>& known, - const std::optional>& known2 = {}) - { - std::vector unknown; - std::set seen; - // If we don't have --ins and --outs, we must not find an AP. - bool can_have_ap = known.has_value(); - f.traverse([&](const spot::formula& s) - { - if (s.is(spot::op::ap)) - { - if (!seen.insert(s).second) - return false; - const std::string& a = s.ap_name(); - if (!can_have_ap - || (std::find(known->begin(), known->end(), a) == known->end() - && (!known2.has_value() - || std::find(known2->begin(), - known2->end(), a) == known2->end()))) - unknown.push_back(a); - } - return false; - }); - return unknown; - }; - - // Decide which atomic propositions are input or output. - int res; - if (!input_aps_.has_value() && output_aps_.has_value()) - { - res = solve_formula(f, unknown_aps(f, output_aps_), *output_aps_); - } - else if (!output_aps_.has_value() && input_aps_.has_value()) - { - res = solve_formula(f, *input_aps_, unknown_aps(f, input_aps_)); - } - else if (!output_aps_.has_value() && !input_aps_.has_value()) - { - for (const std::string& ap: unknown_aps(f, input_aps_, output_aps_)) - error_at_line(2, 0, filename, linenum, - "one of --ins or --outs should list '%s'", - ap.c_str()); - res = solve_formula(f, *input_aps_, *output_aps_); - } - else - { - for (const std::string& ap: unknown_aps(f, input_aps_, output_aps_)) - error_at_line(2, 0, filename, linenum, - "both --ins and --outs are specified, " - "but '%s' is unlisted", - ap.c_str()); - res = solve_formula(f, *input_aps_, *output_aps_); - } - + std::unordered_set aps = list_aps_in_formula(f); + auto [input_aps, output_aps] = + filter_list_of_aps(aps, filename, linenum); + int res = solve_formula(f, input_aps, output_aps); if (opt_csv) print_csv(f); return res; @@ -782,7 +824,7 @@ namespace // The set of atomic proposition will be temporary set to those // given by syfco, unless they were forced from the command-line. bool reset_aps = false; - if (!input_aps_.has_value() && !output_aps_.has_value()) + if (!all_input_aps.has_value() && !all_output_aps.has_value()) { reset_aps = true; static char arg5[] = "--print-output-signals"; @@ -790,12 +832,17 @@ namespace const_cast(filename), nullptr }; std::string res = read_stdout_of_command(command); - output_aps_.emplace(std::vector{}); - split_aps(res, *output_aps_); + all_output_aps.emplace(std::vector{}); + split_aps(res, *all_output_aps); + for (const std::string& a: *all_output_aps) + identifier_map.emplace(a, true); } int res = process_string(tlsf_string, filename); if (reset_aps) - output_aps_.reset(); + { + all_output_aps.reset(); + identifier_map.clear(); + } return res; } @@ -1077,14 +1124,29 @@ main(int argc, char **argv) check_no_formula(); - // Check if inputs and outputs are distinct - if (all_input_aps.has_value() && all_output_aps.has_value()) - for (const std::string& ai : *all_input_aps) - if (std::find(all_output_aps->begin(), all_output_aps->end(), ai) - != all_output_aps->end()) - error(2, 0, "'%s' appears both in --ins and --outs", ai.c_str()); + // Filter identifiers from regexes. + if (all_input_aps.has_value()) + for (const std::string& f: *all_input_aps) + { + unsigned sz = f.size(); + if (f[0] == '/' && f[sz - 1] == '/') + regex_in.push_back(std::regex(f.substr(1, sz - 2))); + else + identifier_map.emplace(f, false); + } + if (all_output_aps.has_value()) + for (const std::string& f: *all_output_aps) + { + unsigned sz = f.size(); + if (f[0] == '/' && f[sz - 1] == '/') + regex_out.push_back(std::regex(f.substr(1, sz - 2))); + else if (auto [it, is_new] = identifier_map.try_emplace(f, true); + !is_new && !it->second) + error(2, 0, "'%s' appears in both --ins and --outs", + f.c_str()); + } - ltl_processor processor(all_input_aps, all_output_aps); + ltl_processor processor; if (int res = processor.run(); res == 0 || res == 1) { // Diagnose unused -x options diff --git a/doc/org/ltlsynt.org b/doc/org/ltlsynt.org index cd3c23d62..be1c8d085 100644 --- a/doc/org/ltlsynt.org +++ b/doc/org/ltlsynt.org @@ -22,12 +22,15 @@ specifically as Mealy machines). In the automaton representing the controller, the acceptance condition is irrelevant and trivially true. =ltlsynt= has three mandatory options: -- =--ins=: a comma-separated list of input atomic propositions; -- =--outs=: a comma-separated list of output atomic propositions; +- =--ins=: a comma-separated list of input atomic propositions, or input regexes enclosed in slashes; +- =--outs=: a comma-separated list of output atomic propositions, or output regexes enclosed in slashes; - =--formula= or =--file=: a specification in LTL or PSL. -One of =--ins= or =--outs= may be omitted, as any atomic proposition not listed -as input can be assumed to be output and vice versa. +One of =--ins= or =--outs= may be omitted, as any atomic proposition +not listed as input can be assumed to be output and vice versa. If +both are omitted, =ltlsynts= will assume ~--ins=/^[iI]/~ and +~--outs=/^[oO]/~, i.e., atomic propositions will be classified as +input or output based on their first letter. The following example illustrates the synthesis of a controller ensuring that input =i1= and =i2= are both true initially if and only @@ -36,7 +39,7 @@ Note that this is an equivalence, not an implication. #+NAME: example #+BEGIN_SRC sh :exports both -ltlsynt --ins=i1,i2 -f '(i1 & i2) <-> F(o1 & X(!o1))' +ltlsynt -f '(i1 & i2) <-> F(o1 & X(!o1))' #+END_SRC #+RESULTS: example @@ -55,24 +58,27 @@ State: 0 [0&1&2] 1 [!0&2 | !1&2] 2 State: 1 -[!2] 0 +[!2] 1 State: 2 [2] 2 --END-- #+end_example The output is composed of two parts: -- The first one is a single line =REALIZABLE= or =UNREALIZABLE=; the presence of this - line, required by the [[http://http://www.syntcomp.org/][SyntComp competition]], can be disabled with option =--hide-status=. -- The second one, only present in the =REALIZABLE= case, is an automaton describing the controller. +- The first part is a single line stating =REALIZABLE= or + =UNREALIZABLE=; the presence of this line, required by the [[http://http://www.syntcomp.org/][SyntComp + competition]], can be disabled with option =--hide-status=. +- The second part, only present in the =REALIZABLE= case, is an + automaton describing the controller. -The controller contains the line =controllable-AP: 2=, which means that this automaton -should be interpreted as a Mealy machine where =o0= is part of the output. -Using the =--dot= option, makes it easier to visualize this machine. +The controller contains the line =controllable-AP: 2=, which means +that this automaton should be interpreted as a Mealy machine where +=o0= is part of the output. Using the =--dot= option, makes it easier +to visualize this machine. #+NAME: exampledot #+BEGIN_SRC sh :exports code -ltlsynt --ins=i1,i2 -f '(i1 & i2) <-> F(o1 & X(!o1))' --hide-status --dot +ltlsynt -f '(i1 & i2) <-> F(o1 & X(!o1))' --hide-status --dot #+END_SRC #+BEGIN_SRC dot :file ltlsyntex.svg :var txt=exampledot :exports results @@ -99,28 +105,32 @@ flag. This is the output format required for the [[http://syntcomp.org/][SYNTCOM #+NAME: exampleaig #+BEGIN_SRC sh :exports both -ltlsynt --ins=i1,i2 -f '(i1 & i2) <-> F(o1 & X(!o1))' --aiger +ltlsynt -f '(i1 & i2) <-> F(o1 & X(!o1))' --aiger #+END_SRC #+RESULTS: exampleaig #+begin_example REALIZABLE -aag 14 2 2 1 10 +aag 18 2 2 1 14 2 4 -6 14 -8 29 +6 23 +8 37 7 -10 7 9 -12 4 10 -14 2 12 -16 7 8 -18 4 16 -20 5 7 -22 21 19 -24 2 23 -26 3 7 -28 27 25 +10 6 9 +12 4 9 +14 5 10 +16 13 15 +18 2 17 +20 3 10 +22 19 21 +24 7 8 +26 4 24 +28 5 7 +30 27 29 +32 2 31 +34 3 7 +36 33 35 i0 i1 i1 i2 o0 o1 @@ -132,7 +142,7 @@ the controller: #+NAME: exampleaigdot #+BEGIN_SRC sh :exports code -ltlsynt --ins=i1,i2 -f '(i1 & i2) <-> F(o1 & X(!o1))' --hide-status --aiger --dot +ltlsynt -f '(i1 & i2) <-> F(o1 & X(!o1))' --hide-status --aiger --dot #+END_SRC #+BEGIN_SRC dot :file ltlsyntexaig.svg :var txt=exampleaigdot :exports results @@ -147,7 +157,7 @@ circles represent inversions (or negations), colored triangles are used to represent input signals (at the bottom) and output signals (at the top), and finally rectangles represent latches. A latch is a one bit register that delays the signal by one step. Initially, all -latches are assumed to contain =false=, and them emit their value from +latches are assumed to contain =false=, and they emit their value from the =L0_out= and =L1_out= rectangles at the bottom. Their input value, to be emitted at the next step, is received via the =L0_in= and =L1_in= boxes at the top. In =ltlsynt='s encoding, the set of latches is used @@ -172,8 +182,9 @@ be synthesized using =syfco= and =ltlsynt=: ltlsynt --tlsf FILE #+END_SRC -The above =--tlsf= option will call =syfco= to perform the conversion -and extract output signals, as if you had used: +The above =--tlsf= option will call =syfco= (which must be on your +=$PATH=) to perform the conversion and extract output signals, as if +you had used: #+BEGIN_SRC sh :export code LTL=$(syfco -f ltlxba -m fully FILE) @@ -181,6 +192,7 @@ OUT=$(syfco --print-output-signals FILE) ltlsynt --formula="$LTL" --outs="$OUT" #+END_SRC + * Internal details The tool reduces the synthesis problem to a parity game, and solves the parity @@ -237,13 +249,13 @@ be tried by separating them using commas. For instance You can also ask =ltlsynt= to print to obtained parity game into [[https://github.com/tcsprojects/pgsolver][PGSolver]] format, with the flag =--print-pg=, or in the HOA format, -using =--print-game-hoa=. These flag deactivate the resolution of the +using =--print-game-hoa=. These flags deactivate the resolution of the parity game. Note that if any of those flag is used with =--dot=, the game will be printed in the Dot format instead: #+NAME: examplegamedot #+BEGIN_SRC sh :exports code -ltlsynt --ins=i1,i2 -f '(i1 & i2) <-> F(o1 & X(!o1))' --print-game-hoa --dot +ltlsynt -f '(i1 & i2) <-> F(o1 & X(!o1))' --print-game-hoa --dot #+END_SRC #+BEGIN_SRC dot :file ltlsyntexgame.svg :var txt=examplegamedot :exports results $txt diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 7165f00c5..14a18b9c3 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -242,7 +242,7 @@ automaton has 6 states solving game with acceptance: co-Büchi game solved in X seconds EOF -ltlsynt -f "G(Fi0 && Fi1 && Fi2) -> G(i1 <-> o0)" --outs="o0" --algo=lar \ +ltlsynt -f "G(Fi0 && Fi1 && Fi2) -> G(i1 <-> o0)" --algo=lar \ --verbose --realizability 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp @@ -625,14 +625,14 @@ diff stdout expected ltlsynt --ins=a,b --outs=c,a -f 'GFa | FGc | GFb' 2>stderr && : test $? -eq 2 -grep "'a' appears both" stderr +grep "'a' appears in both" stderr ltlsynt --ins=a --outs=c -f 'GFa | FGb | GFc' 2>stderr && : test $? -eq 2 -grep "both.*but 'b' is unlisted" stderr +grep "one.*should match 'b'" stderr ltlsynt -f 'GFa | FGb | GFc' 2>stderr && : test $? -eq 2 -grep "one of --ins or --outs" stderr +grep "[-]-ins or --outs" stderr # Try to find a direct strategy for GFa <-> GFb and a direct strategy for # Gc @@ -903,7 +903,7 @@ ltlsynt --outs="" -f "GFb" | grep "UNREALIZABLE" ltlsynt --outs="" -f "1" ltlsynt --outs="" --ins="" -f "GFa" 2>&1 | \ - grep "both --ins and --outs are specified" + grep "one of --ins or --outs should match 'a'" LTL='(((((G (((((((g_0) && (G (! (r_0)))) -> (F (! (g_0)))) && (((g_0) && (X ((! (r_0)) && (! (g_0))))) -> (X ((r_0) R (! (g_0)))))) && (((g_1) && @@ -1099,17 +1099,23 @@ s7="G(o07 <-> (i7 & i8)) & G((i7 & i8) -> (o11 U i3)) & GFo12 & G(o04 <-> " s8="(i4 & i6)) & G(o05 <-> !(i4 & i6)) & G(o15 <-> (i7 & i8)) & G(i7 -> o02) & " s9="G((!i7 & !(i1 & i2 & !i5 & i6)) -> o03) & G(o01 <-> (i1 & i2 & !i5 & i6))))" s=$s1$s2$s3$s4$s5$s6$s7$s8$s9 -ltlsynt --decomp=yes -f "$s" --ins=i1,i2,i3,i4,i5,i6,i7,i8 --realizability >out -ltlsynt --decomp=no -f "$s" --ins=i1,i2,i3,i4,i5,i6,i7,i8 --realizability >>out +ltlsynt --decomp=yes -f "$s" --realizability >out +ltlsynt --decomp=no --outs='/^o[0-9]*$/' -f "$s" --realizability >>out +ltlsynt --decomp=no --outs='/^o[0-9]$/' -f "$s" --realizability >>out && : +ltlsynt -f "$s" --ins='/^i[0-9]*$/' --realizability >>out cat >expected < /dev/null +ltlsynt -f "$f1" --outs="p1, p0" --aiger > out1.hoa +ltlsynt -f "$f1" --outs="p1, /^p/" --aiger > out2.hoa +diff out1.hoa out2.hoa # issue #557 ltlsynt -f 'G(in1 <-> out0) & G(in0 <-> out1)' --ins=in1,in0 --verb 2>err >out @@ -1120,3 +1126,20 @@ cat >err2.ex < XXout2) && G((in1 | !in2) -> Fout2)' \ + --realizability >out && : +test $? -eq 1 +# specitying --outs=in2 should have priority over regular expressions +ltlsynt -f 'G((in1 && in2) <-> XXout2) && G((in1 | !in2) -> Fout2)' \ + --realizability --ins='/^i/' --outs='out2,in2' >>out +cat >expected < input)' 2>err && : +test $? -eq 2 +grep 'controlenv.*matches both' err From 82311c3e3b7ddc9d74a5642e10bddf74cbd6966b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sat, 17 Feb 2024 16:57:31 +0100 Subject: [PATCH 404/606] ltlsynt: fix the case where AP removal is disabled and decomp fails * bin/ltlsynt.cc: Correctly update the output variables in the case decomposition failed and AP removal is disabled. * tests/core/ltlsynt.test: Add a test case. --- bin/ltlsynt.cc | 6 ++++++ tests/core/ltlsynt.test | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 2b0facc57..d2957855f 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -467,6 +467,12 @@ namespace sub_outs[0].insert(ap); } } + else + { + for (const std::string& apstr: output_aps) + sub_outs[0].insert(spot::formula::ap(apstr)); + } + } std::vector> sub_outs_str; std::transform(sub_outs.begin(), sub_outs.end(), diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 14a18b9c3..28b846abd 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -1143,3 +1143,7 @@ ltlsynt --ins='/^in/,/env/' --outs='/^out/,/control/' \ -f 'G(controlenv <-> input)' 2>err && : test $? -eq 2 grep 'controlenv.*matches both' err + + +ltlsynt --polarity=1 --global-e=1 -f 'G(i -> Xo) & G(!i -> F!o)' --real +ltlsynt --polarity=0 --global-e=0 -f 'G(i -> Xo) & G(!i -> F!o)' --real From 83cabfa6f932ce9aba3cb0c82c03ced7c77c518a Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 19 Feb 2024 11:11:17 +0100 Subject: [PATCH 405/606] ltlsynt: fix suggested references * bin/man/ltlsynt.x: Add the Dissecting ltlsynt paper. * doc/org/citing.org: Put Adrien in italics. --- bin/man/ltlsynt.x | 13 ++++++++++--- doc/org/citing.org | 2 +- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/bin/man/ltlsynt.x b/bin/man/ltlsynt.x index 3f99d94f9..dea0f1bb7 100644 --- a/bin/man/ltlsynt.x +++ b/bin/man/ltlsynt.x @@ -4,8 +4,15 @@ ltlsynt \- reactive synthesis from LTL specifications [BIBLIOGRAPHY] If you would like to give a reference to this tool in an article, -we suggest you cite the following paper: +we suggest you cite the following papers: .TP \(bu -Thibaud Michaud, Maximilien Colange: Reactive Synthesis from LTL -Specification with Spot. Proceedings of SYNT@CAV'18. +Florian Renkin, Philipp Schlehuber-Caissier, Alexandre Duret-Lutz, +and Adrien Pommellet. +Dissecting ltlsynt. In Formal Methods in System Design, 2023. + +.TP +\(bu +Thibaud Michaud and Maximilien Colange. +Reactive Synthesis from LTL Specification with Spot. +In proceedings of SYNT@CAV'18. diff --git a/doc/org/citing.org b/doc/org/citing.org index bc17e0d6c..93ec52ca3 100644 --- a/doc/org/citing.org +++ b/doc/org/citing.org @@ -89,7 +89,7 @@ be more specific about a particular aspect of Spot. - *Dissecting ltlsynt*, /Florian Renkin/, /Philipp Schlehuber-Caissier/, /Alexandre Duret-Lutz/, - and Adrien Pommellet. + and /Adrien Pommellet/. In Formal Methods in System Design, 2023. ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#renkin.23.scp][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/renkin.23.scp.pdf][pdf]]) Discuss the implementation of [[file:ltlsynt.org][=ltlsynt=]]. From 60f046a574533bbdce3e154d7d7cd4de3d925a84 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 29 Feb 2024 16:54:14 +0100 Subject: [PATCH 406/606] add intersection checks between words and automata Several people have asked for a way to check whether a word is accepted by an automaton, including at least Jonah Romero and Scott Buckley. So it's time we have it. * spot/twa/twa.hh, spot/twa/twa.cc, spot/twaalgos/word.hh (intersects): Add the new variant. * spot/twa/fwd.hh: Forward declare twa_word, so that we can use it in twa.hh. * spot/twaalgos/forq_contains.cc: Use the new intersection check. * tests/python/word.ipynb, NEWS: Mention it. * THANKS: Add Scott Buckley. --- NEWS | 5 ++++ THANKS | 1 + spot/twa/fwd.hh | 4 +++ spot/twa/twa.cc | 6 +++++ spot/twa/twa.hh | 7 +++++ spot/twaalgos/forq_contains.cc | 6 ++--- spot/twaalgos/word.hh | 12 +++++++-- tests/python/word.ipynb | 49 +++++++++++++++++++++++++++++----- 8 files changed, 78 insertions(+), 12 deletions(-) diff --git a/NEWS b/NEWS index 7508311ea..81a5272a7 100644 --- a/NEWS +++ b/NEWS @@ -114,6 +114,11 @@ New in spot 2.11.6.dev (not yet released) The above also impacts autfilt --included-in option. + - Given a twa_word_ptr W and a twa_ptr A both sharing the same + alphabet, one can now write W->intersects(A) or A->intersects(W) + instead of the longuer W->as_automaton()->intersects(A) or + A->intersects(W->as_automaton()). + - spot::scc_info has a new option PROCESS_UNREACHABLE_STATES that causes it to enumerate even unreachable SCCs. diff --git a/THANKS b/THANKS index 4eb4a598c..cf923eaec 100644 --- a/THANKS +++ b/THANKS @@ -57,6 +57,7 @@ Reuben Rowe Roei Nahum Rüdiger Ehlers Samuel Judson +Scott Buckley Shachar Itzhaky Shengping Shaw Shufang Zhu diff --git a/spot/twa/fwd.hh b/spot/twa/fwd.hh index 839844875..59f6853fd 100644 --- a/spot/twa/fwd.hh +++ b/spot/twa/fwd.hh @@ -36,4 +36,8 @@ namespace spot class twa_product; typedef std::shared_ptr const_twa_product_ptr; typedef std::shared_ptr twa_product_ptr; + + struct twa_word; + typedef std::shared_ptr const_twa_word_ptr; + typedef std::shared_ptr twa_word_ptr; } diff --git a/spot/twa/twa.cc b/spot/twa/twa.cc index ab4c12ef6..66d518f65 100644 --- a/spot/twa/twa.cc +++ b/spot/twa/twa.cc @@ -155,6 +155,12 @@ namespace spot return !otf_product(self, other)->is_empty(); } + bool + twa::intersects(const_twa_word_ptr w) const + { + return intersects(w->as_automaton()); + } + twa_run_ptr twa::intersecting_run(const_twa_ptr other) const { diff --git a/spot/twa/twa.hh b/spot/twa/twa.hh index 85ce382e3..85a755873 100644 --- a/spot/twa/twa.hh +++ b/spot/twa/twa.hh @@ -865,6 +865,13 @@ namespace spot /// this case an explicit product is performed. virtual bool intersects(const_twa_ptr other) const; + /// \brief Check if this automaton _word intersects a word. + /// + /// If the twa_word actually represent a word (i.e., if each + /// Boolean formula that label its steps have a unique satisfying + /// valuation), this is equivalent to a membership test. + virtual bool intersects(const_twa_word_ptr w) const; + /// \brief Return an accepting run recognizing a word accepted by /// two automata. /// diff --git a/spot/twaalgos/forq_contains.cc b/spot/twaalgos/forq_contains.cc index 106553160..4d4264ae2 100644 --- a/spot/twaalgos/forq_contains.cc +++ b/spot/twaalgos/forq_contains.cc @@ -578,10 +578,8 @@ namespace spot::forq auto shared_dict = setup.context.A.aut->get_dict(); auto current_word = util::as_twa_word_ptr(shared_dict, word_of_u, word_of_v); - if (!setup.context.B.aut->intersects(current_word->as_automaton())) - { - return current_word; - } + if (!current_word->intersects(setup.context.B.aut)) + return current_word; } } return nullptr; diff --git a/spot/twaalgos/word.hh b/spot/twaalgos/word.hh index 68538d2d3..171b14ce3 100644 --- a/spot/twaalgos/word.hh +++ b/spot/twaalgos/word.hh @@ -85,6 +85,16 @@ namespace spot /// This is useful to evaluate a word on an automaton. twa_graph_ptr as_automaton() const; + /// \brief Check if a the twa_word intersect another automaton. + /// + /// If the twa_word actually represent a word (i.e., if each + /// Boolean formula that label its steps have a unique satisfying + /// valuation), this is equivalent to a membership test. + bool intersects(const_twa_ptr aut) const + { + return as_automaton()->intersects(aut); + } + /// \brief Print a twa_word /// /// Words are printed as @@ -101,8 +111,6 @@ namespace spot bdd_dict_ptr dict_; }; - typedef std::shared_ptr twa_word_ptr; - /// \brief Create an empty twa_word /// /// Note that empty twa_word are invalid and cannot be printed. diff --git a/tests/python/word.ipynb b/tests/python/word.ipynb index dec900dd0..e2e9441ca 100644 --- a/tests/python/word.ipynb +++ b/tests/python/word.ipynb @@ -307,7 +307,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f8f5c19e5e0> >" + " *' at 0x7f1f80568c60> >" ] }, "execution_count": 2, @@ -405,7 +405,7 @@ "$\\lnot a; \\lnot a; \\mathsf{cycle}\\{a \\land b; \\lnot a \\land b\\}$" ], "text/plain": [ - " *' at 0x7f8f5c19e040> >" + " *' at 0x7f1f805686f0> >" ] }, "execution_count": 5, @@ -530,7 +530,7 @@ "$\\lnot a; \\mathsf{cycle}\\{\\lnot a \\land b; a \\land b\\}$" ], "text/plain": [ - " *' at 0x7f8f5c19f8d0> >" + " *' at 0x7f1f8060d770> >" ] }, "execution_count": 9, @@ -581,7 +581,7 @@ "$a; a \\land b; \\mathsf{cycle}\\{\\lnot a \\land \\lnot b; \\lnot a \\land b\\}$" ], "text/plain": [ - " *' at 0x7f8f5c19e250> >" + " *' at 0x7f1f8060df80> >" ] }, "execution_count": 11, @@ -790,7 +790,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f8f5c19fab0> >" + " *' at 0x7f1f8060d800> >" ] }, "execution_count": 13, @@ -801,6 +801,43 @@ "source": [ "w.as_automaton()" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To check if a word is accepted by an automaton, you can use `intersects`. The name `intersects` actually makes more sense than `accepts` or `accepted`, because a word actually describes a set of words because of the don't care atomic propositions." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "False\n", + "False\n", + "True\n", + "True\n" + ] + } + ], + "source": [ + "print(w.intersects(aut))\n", + "print(aut.intersects(w))\n", + "print(word.intersects(aut))\n", + "print(aut.intersects(word))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -819,7 +856,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.8" } }, "nbformat": 4, From 1b81ecb80ce62781c7c9bccd7968e4470f45e418 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 6 Mar 2024 17:53:23 +0100 Subject: [PATCH 407/606] dualize: should not call cleanup_acceptance_here MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Based on a report by Emmanuel Filiot, who was surprized that dualizing Büchi did not always produce co-Büchi. * spot/twaalgos/dualize.cc: Remove the call to cleanup_acceptance_here. * spot/twaalgos/dualize.hh: Improve documentation. * NEWS: Mention the possible backward incompatible change. * tests/core/dualize.test, tests/python/dualize.py, tests/python/pdegen.py: Adjust test cases. * spot/twaalgos/complement.cc (complement): Call cleanup_acceptance_here when dualize() returns a smaller automaton. * THANKS: Add Emmanuel. --- NEWS | 7 ++++++ THANKS | 1 + spot/twaalgos/complement.cc | 15 ++++++++++++- spot/twaalgos/dualize.cc | 4 +--- spot/twaalgos/dualize.hh | 45 +++++++++++++++++++++++++++++-------- tests/core/dualize.test | 9 +++----- tests/python/dualize.py | 7 +++--- tests/python/pdegen.py | 2 ++ 8 files changed, 67 insertions(+), 23 deletions(-) diff --git a/NEWS b/NEWS index 81a5272a7..694d5bf6f 100644 --- a/NEWS +++ b/NEWS @@ -148,6 +148,13 @@ New in spot 2.11.6.dev (not yet released) should raise an exception of return nullptr if it requires more acceptance sets than supported. + - [Potential backward incompatibility] spot::dualize() does not call + cleanup_acceptance() anymore. This change ensures that the dual + of a Büchi automaton will always be a co-Büchi automaton. + Previously cleanup_acceptance(), which remove unused colors from + the acceptance, was sometimes able to simplify co-Büchi to "t", + causing surprizes. + Python: - The spot.automata() and spot.automaton() functions now accept a diff --git a/THANKS b/THANKS index cf923eaec..307d34999 100644 --- a/THANKS +++ b/THANKS @@ -15,6 +15,7 @@ David Dokoupil David Müller Dávid Smolka Edmond Irani Liu +Emmanuel Filiot Ernesto Posse Étienne Renault Fabrice Kordon diff --git a/spot/twaalgos/complement.cc b/spot/twaalgos/complement.cc index 7e38e519a..00e9cb0ce 100644 --- a/spot/twaalgos/complement.cc +++ b/spot/twaalgos/complement.cc @@ -25,6 +25,7 @@ #include #include #include +#include namespace spot { @@ -510,7 +511,19 @@ namespace spot complement(const const_twa_graph_ptr& aut, const output_aborter* aborter) { if (!aut->is_existential() || is_universal(aut)) - return dualize(aut); + { + twa_graph_ptr res = dualize(aut); + // There are cases with "t" acceptance that get converted to + // Büchi during completion, then dualized to co-Büchi, but the + // acceptance is still not used. To try to clean it up in this + // case. + if (aut->num_sets() == 0 || + // Also dualize removes sink states, but doesn't simplify + // the acceptance condition. + res->num_states() < aut->num_states()) + cleanup_acceptance_here(res); + return res; + } if (is_very_weak_automaton(aut)) // removing alternation may need more acceptance sets than we support. // in this case res==nullptr and we try the other determinization. diff --git a/spot/twaalgos/dualize.cc b/spot/twaalgos/dualize.cc index 4e2261e12..fc60d78af 100644 --- a/spot/twaalgos/dualize.cc +++ b/spot/twaalgos/dualize.cc @@ -308,7 +308,7 @@ namespace spot } if (is_deterministic(aut_)) { - res = cleanup_acceptance_here(spot::complete(aut_)); + res = spot::complete(aut_); res->set_acceptance(res->num_sets(), res->get_acceptance().complement()); // Complementing the acceptance is likely to break the terminal @@ -368,8 +368,6 @@ namespace spot res->prop_terminal(trival::maybe()); if (!has_sink) res->prop_complete(true); - - cleanup_acceptance_here(res); return res; } }; diff --git a/spot/twaalgos/dualize.hh b/spot/twaalgos/dualize.hh index d84be8173..4d79df66b 100644 --- a/spot/twaalgos/dualize.hh +++ b/spot/twaalgos/dualize.hh @@ -26,23 +26,50 @@ namespace spot /// \ingroup twa_misc /// \brief Complement an automaton by dualizing it. /// - /// Given an automaton \a aut of any type, produces the dual as output. The - /// automaton will be completed if it isn't already. If it is deterministic - /// and complete, complementing the automaton can be done by just - /// complementing the acceptance condition. + /// Given an automaton \a aut of any type, produces the dual as + /// output. Before dualization, the automaton will be completed if + /// it isn't already, but any sink state in the output might then be + /// removed. /// - /// In particular, this implies that an input that use generalized Büchi will - /// be output as generalized co-Büchi. + /// Dualizing the automaton is done by interpreting the outgoing + /// transitions of a state as a Boolean function, and then swapping + /// operators ∧ and ̇∨. This first step does not have to be done on + /// deterministic automata. Additionally, the acceptance condition + /// is dualized by swapping operators ∧ and ̇∨, and swapping Inf and + /// Fin. /// - /// Functions like to_generalized_buchi() or remove_fin() are frequently - /// called on existential automata after dualize() to obtain an easier - /// acceptance condition, but maybe at the cost of losing determinism. + /// For instance, the dual of a generalized Büchi automaton will be + /// a generalized co-Büchi automaton. + /// + /// If the input acceptance condition accepts every infinite path + /// (such as "t" or "Inf(0)|Fin(0)") and the automaton is not + /// complete, then the input automaton will be assumed to have Büchi + /// acceptance in order to complete it, and the output will then + /// have co-Büchi acceptance. + /// + /// Due to a defect in the way transition-based alternating automata + /// are represented in Spot and in the HOA format, existential + /// automata with transition-based acceptance will be converted to + /// use state-based acceptance before dualization. See + /// https://github.com/adl/hoaf/issues/68 for more information. /// /// If the input automaton is deterministic, the output will be deterministic. /// If the input automaton is existential, the output will be universal. /// If the input automaton is universal, the output will be existential. /// Finally, if the input automaton is alternating, the result is alternating. /// More can be found on page 22 (Definition 1.6) of \cite loding.98.msc . + /// + /// Functions like to_generalized_buchi() or remove_fin() are frequently + /// called on existential automata after dualize() to obtain an easier + /// acceptance condition, but maybe at the cost of losing determinism. + /// + /// Up to version 2.11.6, this function used to call + /// cleanup_acceptance_here() to simplify the acceptance condition + /// after dualization. This caused some surprizes, users expected + /// the dual of a Büchi automaton to be a co-Büchi automaton, but + /// cleanup_acceptance_here() sometimes reduced the condition to `t` + /// when all states where accepting. This function is not called + /// anymore since version 2.12. SPOT_API twa_graph_ptr dualize(const const_twa_graph_ptr& aut); } diff --git a/tests/core/dualize.test b/tests/core/dualize.test index c0ba567d4..a1b443d58 100755 --- a/tests/core/dualize.test +++ b/tests/core/dualize.test @@ -49,8 +49,7 @@ HOA: v1 States: 2 Start: 0 AP: 2 "a" "b" -acc-name: all -Acceptance: 0 t +Acceptance: 3 (Fin(0)|Fin(1)) | Inf(2) properties: trans-labels explicit-labels trans-acc complete properties: deterministic --BODY-- @@ -114,8 +113,7 @@ HOA: v1 States: 9 Start: 8 AP: 2 "p0" "p1" -acc-name: co-Buchi -Acceptance: 1 Fin(0) +Acceptance: 2 Fin(0) & Fin(1) properties: trans-labels explicit-labels state-acc univ-branch --BODY-- State: 0 @@ -148,8 +146,7 @@ HOA: v1 States: 9 Start: 8 AP: 2 "p0" "p1" -acc-name: co-Buchi -Acceptance: 1 Fin(0) +Acceptance: 2 Fin(0) & Fin(1) properties: trans-labels explicit-labels state-acc univ-branch Alias: @a 0&!1 Alias: @b !0&!1 diff --git a/tests/python/dualize.py b/tests/python/dualize.py index bfeb20b38..dd06be362 100755 --- a/tests/python/dualize.py +++ b/tests/python/dualize.py @@ -191,8 +191,8 @@ tc.assertEqual(h, """HOA: v1 States: 2 Start: 1 AP: 2 "a" "b" -acc-name: all -Acceptance: 0 t +acc-name: co-Buchi +Acceptance: 1 Fin(0) properties: trans-labels explicit-labels state-acc deterministic --BODY-- State: 0 @@ -227,8 +227,7 @@ tc.assertEqual(h, """HOA: v1 States: 2 Start: 1 AP: 2 "a" "b" -acc-name: all -Acceptance: 0 t +Acceptance: 2 Fin(0) & Fin(1) properties: trans-labels explicit-labels state-acc deterministic --BODY-- State: 0 diff --git a/tests/python/pdegen.py b/tests/python/pdegen.py index 1e886f280..4f5796ff9 100644 --- a/tests/python/pdegen.py +++ b/tests/python/pdegen.py @@ -115,6 +115,8 @@ tc.assertEqual(dd.num_states(), 1) tc.assertEqual(str(dd.get_acceptance()), 'Inf(1) & Fin(0)') e = spot.dualize(b) +spot.cleanup_acceptance_here(e) +tc.assertEqual(str(e.get_acceptance()), 'Fin(0)|Fin(1)') de = spot.partial_degeneralize(e, [0, 1]) tc.assertTrue(de.equivalent_to(e)) tc.assertEqual(de.num_states(), 4) From 0d4e93a4ec9df9d9e3fc35ca4299dd44d21c486b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 11 Mar 2024 17:38:13 +0100 Subject: [PATCH 408/606] [buddy] add missing typedefs to minterm_iterator * src/bddx.h: Here. --- buddy/src/bddx.h | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/buddy/src/bddx.h b/buddy/src/bddx.h index b3cb377a1..72c0fa863 100644 --- a/buddy/src/bddx.h +++ b/buddy/src/bddx.h @@ -1,5 +1,5 @@ /*======================================================================== - Copyright (C) 1996-2003, 2021 by Jorn Lind-Nielsen + Copyright (C) 1996-2003 by Jorn Lind-Nielsen All rights reserved Permission is hereby granted, without written agreement and without @@ -502,6 +502,7 @@ BUDDY_API_VAR const BDD bddtrue; #ifdef CPLUSPLUS #include #include +#include /*=== User BDD class ===================================================*/ @@ -1134,7 +1135,18 @@ public: class minterm_iterator { public: - minterm_iterator(minterms_of* me) + typedef bdd value_type; + typedef value_type& reference; + typedef value_type* pointer; + typedef std::ptrdiff_t difference_type; + typedef std::forward_iterator_tag iterator_category; + + explicit minterm_iterator() noexcept + : me_(nullptr) + { + } + + minterm_iterator(minterms_of* me) noexcept : me_(me) { } From 75e552fdac8dff6d24b71117de0ff63cc5a6ad01 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 11 Mar 2024 17:38:52 +0100 Subject: [PATCH 409/606] python: add bindings for BuDDy's minterms_of minterms_of was introduced in BuDDy with Spot 2.10, but wasn't properly binded in Python. * python/buddy.i: Add bindings. * tests/python/bdditer.py: Test them. --- python/buddy.i | 101 ++++++++++++++++++++++++++++++++++++++++ tests/python/bdditer.py | 6 +++ 2 files changed, 107 insertions(+) diff --git a/python/buddy.i b/python/buddy.i index 99eb1cbb1..63156aa3d 100644 --- a/python/buddy.i +++ b/python/buddy.i @@ -57,14 +57,82 @@ %module buddy %include "std_string.i" +%include "std_container.i" %{ #include +#include #include "bddx.h" #include "fddx.h" #include "bvecx.h" + %} + +// Swig come with iterators that implement a decrement method. This +// is not supported in our "successor" iterators. +%fragment("ForwardNullTerminatedIterator_T","header",fragment="SwigPyIterator_T") { +namespace swig +{ + template::value_type, + typename FromOper = from_oper > + class ForwardNullTerminatedIterator_T : public SwigPyIterator + { + public: + FromOper from; + typedef OutIterator out_iterator; + typedef ValueType value_type; + typedef SwigPyIterator base; + + ForwardNullTerminatedIterator_T(out_iterator curr, PyObject *seq) + : SwigPyIterator(seq), current(curr) + { + } + + PyObject *value() const { + if (current == nullptr) { + throw stop_iteration(); + } else { + return from(static_cast(*(current))); + } + } + + SwigPyIterator *copy() const + { + return new ForwardNullTerminatedIterator_T(*this); + } + + SwigPyIterator *incr(size_t n = 1) + { + while (n--) { + if (current == nullptr) { + throw stop_iteration(); + } else { + ++current; + } + } + return this; + } + + protected: + out_iterator current; + }; + + + template + inline SwigPyIterator* + make_forward_null_terminated_iterator(const OutIter& begin, PyObject *seq = 0) + { + return new ForwardNullTerminatedIterator_T(begin, seq); + } +}} +%fragment("ForwardNullTerminatedIterator_T"); + +%traits_swigtype(bdd); +%fragment(SWIG_Traits_frag(bdd)); + %typemap(in) (int* input_buf, int input_buf_size) { if (!PySequence_Check($input)) { @@ -362,3 +430,36 @@ class bvec return (*self)[i]; } } + + +class minterms_of +{ +public: + class minterm_iterator + { + public: + minterm_iterator(minterms_of* me); + minterm_iterator& operator++(); + void operator++(int); + bool operator==(std::nullptr_t) const; + bool operator!=(std::nullptr_t) const; + bdd operator*() const; + }; + + minterms_of(bdd fun, bdd vars); + ~minterms_of(); + minterm_iterator begin(); + std::nullptr_t end() const; + bool done() const; + bdd operator*() const; + void operator++(); +}; + +%extend minterms_of { + %newobject __iter__(PyObject **PYTHON_SELF); + swig::SwigPyIterator* __iter__(PyObject **PYTHON_SELF) + { + return swig::make_forward_null_terminated_iterator(self->begin(), + *PYTHON_SELF); + } +} diff --git a/tests/python/bdditer.py b/tests/python/bdditer.py index 2cee87a4e..a49ab8dde 100644 --- a/tests/python/bdditer.py +++ b/tests/python/bdditer.py @@ -65,6 +65,12 @@ del res2 del c gcollect() +res3 = [] +for i in buddy.minterms_of(buddy.bddtrue, run.aut.ap_vars()): + res3.append(str(spot.bdd_to_formula(i))) +tc.assertEqual(res3, ['!a & !b', '!a & b', 'a & !b', 'a & b']) +gcollect() + f = spot.bdd_to_formula(b) tc.assertTrue(f._is(spot.op_And)) tc.assertTrue(f[0]._is(spot.op_ap)) From 1e512d422b4063a9678ac88ad357b57ce516a775 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sun, 17 Mar 2024 21:27:04 +0100 Subject: [PATCH 410/606] dualize: improve performance on small automata with large |AP| For issue #566. * spot/twaalgos/dualize.cc (dualizer::copy_edges): Implement another loop to be used when the number of outgoing edges of a state is smaller than the number of AP. * tests/core/566.test: New file. * tests/Makefile.am: Add it. * NEWS: Mention the improvement. --- NEWS | 4 ++ spot/twaalgos/dualize.cc | 79 +++++++++++++++++----- tests/Makefile.am | 1 + tests/core/566.test | 137 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 205 insertions(+), 16 deletions(-) create mode 100755 tests/core/566.test diff --git a/NEWS b/NEWS index 694d5bf6f..b875bfd6e 100644 --- a/NEWS +++ b/NEWS @@ -148,6 +148,10 @@ New in spot 2.11.6.dev (not yet released) should raise an exception of return nullptr if it requires more acceptance sets than supported. + - spot::dualize() learned a trick to be faster on states that have + less outgoing edges than atomic proposition declared on the + automaton. (Issue #566.) + - [Potential backward incompatibility] spot::dualize() does not call cleanup_acceptance() anymore. This change ensures that the dual of a Büchi automaton will always be a co-Büchi automaton. diff --git a/spot/twaalgos/dualize.cc b/spot/twaalgos/dualize.cc index fc60d78af..cbef6d451 100644 --- a/spot/twaalgos/dualize.cc +++ b/spot/twaalgos/dualize.cc @@ -146,29 +146,74 @@ namespace spot { std::vector st; unsigned n = aut_->num_states(); + unsigned n_ap = res->ap().size(); + std::vector labels; + for (unsigned i = 0; i < n; ++i) { - bdd delta = dualized_transition_function(i); + unsigned n_succs; + bdd delta = dualized_transition_function(i, n_succs); + if (delta == bddfalse) + continue; bdd ap = bdd_exist(bdd_support(delta), all_vars_); bdd letters = bdd_exist(delta, all_vars_); - for (bdd oneletter: minterms_of(letters, ap)) - { - minato_isop isop(bdd_restrict(delta, oneletter)); - bdd dest; + // Create edges with label LABEL, and destinations states + // encoded in the Boolean function RESTRICTED_DELTA. + auto create_edges = [&](bdd label, bdd restricted_delta) { + minato_isop isop(restricted_delta); + bdd dest; + while ((dest = isop.next()) != bddfalse) + { + st.clear(); + acc_cond::mark_t m = bdd_to_state(dest, st); + if (st.empty()) + { + st.push_back(true_state_); + if (aut_->prop_state_acc()) + m = aut_->state_acc_sets(i); + } + res->new_univ_edge(i, st.begin(), st.end(), label, m); + } + }; - while ((dest = isop.next()) != bddfalse) + // Iterating over all mineterms can be very slow when |AP| + // is large (see issue #566) . The else branch implements + // another approach that should be exponential in the + // number of successors instead of in the number of atomic + // propositions. + if (n_succs > n_ap) + { + for (bdd oneletter: minterms_of(letters, ap)) + create_edges(oneletter, bdd_restrict(delta, oneletter)); + } + else + { + // gather all labels in the successors of state, + // and split those labels so they are all disjoint. + // + // LABELS may have 2^{n_succ} elements after this + // loop, but since n_succ <= n_ap, we expect this + // approach to be faster. + labels.clear(); + labels.reserve(n_succs); + labels.push_back(bddtrue); + for (auto& e: aut_->out(i)) { - st.clear(); - acc_cond::mark_t m = bdd_to_state(dest, st); - if (st.empty()) - { - st.push_back(true_state_); - if (aut_->prop_state_acc()) - m = aut_->state_acc_sets(i); - } - res->new_univ_edge(i, st.begin(), st.end(), oneletter, m); + // make sure we don't realloc during the loop + labels.reserve(labels.size() * 2); + // Do not use a range-based or iterator-based for + // loop here, as push_back invalidates the end + // iterator. + for (unsigned cur = 0, sz = labels.size(); cur < sz; ++cur) + if (bdd common = labels[cur] & e.cond; common != bddfalse) + { + labels[cur] -= e.cond; + labels.push_back(common); + } } + for (auto& cur: labels) + create_edges(cur, bdd_relprod(cur, delta, res->ap_vars())); } } } @@ -223,14 +268,16 @@ namespace spot } // Returns the dualized transition function of any input state as a bdd. - bdd dualized_transition_function(unsigned state_id) + bdd dualized_transition_function(unsigned state_id, unsigned& n_succ) { + n_succ = 0; if (state_to_var_[state_id] == bddtrue) return bddfalse; bdd res = bddtrue; for (auto& e : aut_->out(state_id)) { + ++n_succ; bdd dest = bddfalse; for (unsigned d : aut_->univ_dests(e)) dest |= state_to_var_[d]; diff --git a/tests/Makefile.am b/tests/Makefile.am index 67e5bb4d6..4a36a725b 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -220,6 +220,7 @@ TESTS_twa = \ core/385.test \ core/521.test \ core/522.test \ + core/566.test \ core/acc.test \ core/acc2.test \ core/bdddict.test \ diff --git a/tests/core/566.test b/tests/core/566.test new file mode 100755 index 000000000..3adcc3bc6 --- /dev/null +++ b/tests/core/566.test @@ -0,0 +1,137 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) by the Spot authors, see the AUTHORS file for details. +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs + +set -e + +cat >21.hoa < Date: Sun, 17 Mar 2024 22:42:18 +0100 Subject: [PATCH 411/606] alternation: speed up remove_alternation when few labels are used Related to issue #566. * spot/twaalgos/alternation.cc (alternation_remover::run): Here. * tests/core/566.test: Augment test case. * NEWS: Mention the change. --- NEWS | 5 +- spot/twaalgos/alternation.cc | 132 +++++++++++++++++++++++++---------- tests/core/566.test | 3 + 3 files changed, 102 insertions(+), 38 deletions(-) diff --git a/NEWS b/NEWS index b875bfd6e..9da91b234 100644 --- a/NEWS +++ b/NEWS @@ -150,7 +150,10 @@ New in spot 2.11.6.dev (not yet released) - spot::dualize() learned a trick to be faster on states that have less outgoing edges than atomic proposition declared on the - automaton. (Issue #566.) + automaton. spot::remove_alternation() learned a similar trick, + except it isn't applied at the state level but of the entire + alternating use few distinct labels. These two changes speed up + the complementation of very weak automata. (Issue #566.) - [Potential backward incompatibility] spot::dualize() does not call cleanup_acceptance() anymore. This change ensures that the dual diff --git a/spot/twaalgos/alternation.cc b/spot/twaalgos/alternation.cc index 1de366b66..dad0f307a 100644 --- a/spot/twaalgos/alternation.cc +++ b/spot/twaalgos/alternation.cc @@ -22,6 +22,7 @@ #include #include #include +#include namespace spot { @@ -361,7 +362,7 @@ namespace spot (has_reject_more_ + reject_1_count_) > SPOT_MAX_ACCSETS) return nullptr; - // Rejecting SCCs of size 1 can be handled using genralized + // Rejecting SCCs of size 1 can be handled using generalized // Büchi acceptance, using one set per SCC, as in Gastin & // Oddoux CAV'01. See also Boker & et al. ICALP'10. Larger // rejecting SCCs require a more expensive procedure known as @@ -375,6 +376,54 @@ namespace spot // This will raise an exception if we request too many sets. res->set_generalized_buchi(has_reject_more_ + reject_1_count_); + // Before we start, let's decide how we will iterate on the + // BDD that we use to encode the transition function. We have + // two way of doing so: iterating over 2^AP, or precomputing a + // set of "separated labels" that cover all labels. The + // latter is a good idea if that set is much smaller than + // 2^AP, but we cannot always know that before hand. + std::vector separated_labels; + unsigned n_ap = aut_->ap().size(); + // If |AP| is small, don't bother with the computation of + // separated labels. + bool will_use_labels = n_ap > 5; + if (will_use_labels) + { + std::set all_labels; + // Gather all labels, but stop if we see too many. + // The threshold below is arbitrary. + unsigned max_labels = 100 * n_ap; + for (auto& e: aut_->edges()) + { + if (all_labels.insert(e.cond).second) + if (all_labels.size() > max_labels) + { + will_use_labels = false; + break; + } + } + if (will_use_labels) + { + separated_labels.reserve(all_labels.size()); + separated_labels.push_back(bddtrue); + for (auto& lab: all_labels) + { + // make sure don't realloc during the loop + separated_labels.reserve(separated_labels.size() * 2); + // Do not use a range-based or iterator-based for loop + // here, as push_back invalidates the end iterator. + for (unsigned cur = 0, sz = separated_labels.size(); + cur < sz; ++cur) + if (bdd common = separated_labels[cur] & lab; + common != bddfalse) + { + separated_labels[cur] -= lab; + separated_labels.push_back(common); + } + } + } + } + // We for easier computation of outgoing sets, we will // represent states using BDD variables. allocate_state_vars(); @@ -459,44 +508,53 @@ namespace spot bdd ap = bdd_exist(bdd_support(bs), all_vars_); bdd all_letters = bdd_exist(bs, all_vars_); - // First loop over all possible valuations atomic properties. - for (bdd oneletter: minterms_of(all_letters, ap)) - { - minato_isop isop(bdd_restrict(bs, oneletter)); - bdd dest; - while ((dest = isop.next()) != bddfalse) - { - v.clear(); - acc_cond::mark_t m = bdd_to_state(dest, v); + // Given a label, and BDD expression representing + // the combination of destinations, create the edges. + auto create_edges = [&](bdd label, bdd dest_formula) + { + minato_isop isop(dest_formula); + bdd dest; + while ((dest = isop.next()) != bddfalse) + { + v.clear(); + acc_cond::mark_t m = bdd_to_state(dest, v); - // if there is no promise "f" between a state - // that does not have f, and a state that have - // "f", we can add one. Doing so will help later - // simplifications performed by postprocessor. An - // example where this is needed is the VWAA - // generated by ltl[23]ba for GFa. Without the - // next loop, the final TGBA has 2 states instead - // of 1. - for (unsigned m1: (all_marks - m).sets()) - { - if (has_reject_more_ && m1 == 0) - continue; - auto& sv = s_to_ss[s]; - unsigned ms = mark_to_state_[m1]; - if (std::find(v.begin(), v.end(), ms) != v.end()) - { - unsigned ms = mark_to_state_[m1]; - if (std::find(sv.begin(), sv.end(), ms) == sv.end()) - m.set(m1); - } - } + // if there is no promise "f" between a state + // that does not have f, and a state that have + // "f", we can add one. Doing so will help later + // simplifications performed by postprocessor. An + // example where this is needed is the VWAA + // generated by ltl[23]ba for GFa. Without the + // next loop, the final TGBA has 2 states instead + // of 1. + for (unsigned m1: (all_marks - m).sets()) + { + if (has_reject_more_ && m1 == 0) + continue; + auto& sv = s_to_ss[s]; + unsigned ms = mark_to_state_[m1]; + if (std::find(v.begin(), v.end(), ms) != v.end()) + { + unsigned ms = mark_to_state_[m1]; + if (std::find(sv.begin(), sv.end(), ms) == sv.end()) + m.set(m1); + } + } - unsigned d = new_state(v, has_mark); - if (has_mark) - m.set(0); - res->new_edge(s, d, oneletter, all_marks - m); - } - } + unsigned d = new_state(v, has_mark); + if (has_mark) + m.set(0); + res->new_edge(s, d, label, all_marks - m); + } + }; + + if (!will_use_labels) + // Loop over all possible valuations atomic properties. + for (bdd oneletter: minterms_of(all_letters, ap)) + create_edges(oneletter, bdd_restrict(bs, oneletter)); + else + for (bdd label: separated_labels) + create_edges(label, bdd_relprod(label, bs, res->ap_vars())); } res->merge_edges(); return res; diff --git a/tests/core/566.test b/tests/core/566.test index 3adcc3bc6..2399576c4 100755 --- a/tests/core/566.test +++ b/tests/core/566.test @@ -135,3 +135,6 @@ EOF # but it is difficult to test so in the test suite. res=`autfilt --dualize 21.hoa --stats='%S %E %T %s %e %t'` test "$res" = "5 13 85 6 13 12582912" + +res=`autfilt --complement 21.hoa --stats='%S %E %T %s %e %t'` +test "$res" = "5 13 85 5 11 10485760" From ef10be047c6b68c95002cdc54b0b656d1cd67842 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 18 Mar 2024 10:37:21 +0100 Subject: [PATCH 412/606] fix previous two patches make sure we don't split a label with a label that subsume it * spot/twaalgos/alternation.cc, spot/twaalgos/dualize.cc: Here. --- spot/twaalgos/alternation.cc | 4 +--- spot/twaalgos/dualize.cc | 5 ++--- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/spot/twaalgos/alternation.cc b/spot/twaalgos/alternation.cc index dad0f307a..b7e44a200 100644 --- a/spot/twaalgos/alternation.cc +++ b/spot/twaalgos/alternation.cc @@ -408,14 +408,12 @@ namespace spot separated_labels.push_back(bddtrue); for (auto& lab: all_labels) { - // make sure don't realloc during the loop - separated_labels.reserve(separated_labels.size() * 2); // Do not use a range-based or iterator-based for loop // here, as push_back invalidates the end iterator. for (unsigned cur = 0, sz = separated_labels.size(); cur < sz; ++cur) if (bdd common = separated_labels[cur] & lab; - common != bddfalse) + common != bddfalse && common != separated_labels[cur]) { separated_labels[cur] -= lab; separated_labels.push_back(common); diff --git a/spot/twaalgos/dualize.cc b/spot/twaalgos/dualize.cc index cbef6d451..bd0f4767d 100644 --- a/spot/twaalgos/dualize.cc +++ b/spot/twaalgos/dualize.cc @@ -200,13 +200,12 @@ namespace spot labels.push_back(bddtrue); for (auto& e: aut_->out(i)) { - // make sure we don't realloc during the loop - labels.reserve(labels.size() * 2); // Do not use a range-based or iterator-based for // loop here, as push_back invalidates the end // iterator. for (unsigned cur = 0, sz = labels.size(); cur < sz; ++cur) - if (bdd common = labels[cur] & e.cond; common != bddfalse) + if (bdd common = labels[cur] & e.cond; + common != bddfalse && common != labels[cur]) { labels[cur] -= e.cond; labels.push_back(common); From 0a045e5f769f4943f5871e09e82ee548dac0adf4 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 18 Mar 2024 11:01:24 +0100 Subject: [PATCH 413/606] split: factor the code common to both split_edges() versions * spot/twaalgos/split.cc: The two split_edges() versions only differ by the way they split a label. Let's define all the rest of the algorithm in split_edges_aux(). --- spot/twaalgos/split.cc | 449 ++++++++++++++++++----------------------- 1 file changed, 197 insertions(+), 252 deletions(-) diff --git a/spot/twaalgos/split.cc b/spot/twaalgos/split.cc index d26877373..8e40f79a5 100644 --- a/spot/twaalgos/split.cc +++ b/spot/twaalgos/split.cc @@ -51,276 +51,221 @@ namespace std namespace spot { - // We attempt to add a potentially new set of symbols defined as "value" to - // our current set of edge partitions, "current_set". We also specify a set - // of valid symbols considered - static void add_to_lower_bound_set_helper( - std::unordered_set& current_set, - bdd valid_symbol_set, - bdd value) + namespace { - // This function's correctness is defined by the invariant, that we never - // add a bdd to our current set unless the bdd is disjoint from every other - // element in the current_set. In other words, we will only reach the final - // set.insert(value), if we can iterate over the whole of current_set - // without finding some set intersections - if (value == bddfalse) // Don't add empty sets, as they subsume everything - { - return; - } - for (auto sym : current_set) - { - // If a sym is a subset of value, recursively add the set of symbols - // defined in value, but not in sym. This ensures the two elements - // are disjoint. - if (bdd_implies(sym, value)) - { - add_to_lower_bound_set_helper( - current_set, valid_symbol_set, (value - sym) & valid_symbol_set); - return; - } - // If a sym is a subset of the value we're trying to add, then we - // remove the symbol and add the two symbols created by partitioning - // the sym with value. - else if (bdd_implies(value, sym)) - { - current_set.erase(sym); - add_to_lower_bound_set_helper(current_set, - valid_symbol_set, - sym & value); - add_to_lower_bound_set_helper(current_set, - valid_symbol_set, - sym - value); - return; - } + // We attempt to add a potentially new set of symbols defined as "value" to + // our current set of edge partitions, "current_set". We also specify a set + // of valid symbols considered + static void + add_to_lower_bound_set_helper(std::unordered_set& current_set, + bdd valid_symbol_set, bdd value) + { + // This function's correctness is defined by the invariant, that + // we never add a bdd to our current set unless the bdd is + // disjoint from every other element in the current_set. In + // other words, we will only reach the final set.insert(value), + // if we can iterate over the whole of current_set without + // finding some set intersections + if (value == bddfalse) // Don't add empty sets, as they subsume everything + { + return; + } + for (auto sym : current_set) + { + // If a sym is a subset of value, recursively add the set of symbols + // defined in value, but not in sym. This ensures the two elements + // are disjoint. + if (bdd_implies(sym, value)) + { + add_to_lower_bound_set_helper(current_set, + valid_symbol_set, + (value - sym) & valid_symbol_set); + return; + } + // If a sym is a subset of the value we're trying to add, then we + // remove the symbol and add the two symbols created by partitioning + // the sym with value. + else if (bdd_implies(value, sym)) + { + current_set.erase(sym); + add_to_lower_bound_set_helper(current_set, + valid_symbol_set, + sym & value); + add_to_lower_bound_set_helper(current_set, + valid_symbol_set, + sym - value); + return; + } + } + // This line is only reachable if value is not a subset and doesn't + // subsume any element currently in our set + current_set.insert(value); } - // This line is only reachable if value is not a subset and doesn't - // subsume any element currently in our set - current_set.insert(value); - } - static std::array create_possible_intersections( - bdd valid_symbol_set, - std::pair const& first, - std::pair const& second) - { - auto intermediate = second.first & valid_symbol_set; - auto intermediate2 = second.second & valid_symbol_set; - return { - first.first & intermediate, - first.second & intermediate, - first.first & intermediate2, - first.second & intermediate2, - }; - } + using bdd_set = std::unordered_set; + using bdd_pair_set = std::unordered_set>; - using bdd_set = std::unordered_set; - using bdd_pair_set = std::unordered_set>; + // Transforms each element of the basis into a complement pair, + // with a valid symbol set specified + static bdd_pair_set create_complement_pairs(std::vector const& basis, + bdd valid_symbol_set) + { + bdd_pair_set intersections; + for (bdd sym: basis) + { + bdd intersection = sym & valid_symbol_set; + if (intersection != bddfalse) + { + bdd negation = valid_symbol_set - intersection; + intersections.insert(std::make_pair(intersection, negation)); + } + } + return intersections; + } - // Transforms each element of the basis into a complement pair, - // with a valid symbol set specified - static bdd_pair_set create_complement_pairs(std::vector const& basis, - bdd valid_symbol_set) - { - bdd_pair_set intersections; - for (auto& sym : basis) - { - auto intersection = sym & valid_symbol_set; - if (intersection != bddfalse) - { - auto negation = valid_symbol_set - intersection; - intersections.insert(std::make_pair(intersection, negation)); - } - } - return intersections; - } - - template - void iterate_possible_intersections(bdd_pair_set const& complement_pairs, - bdd valid_symbol_set, - Callable callable) - { - for (auto it = complement_pairs.begin(); it != complement_pairs.end(); ++it) - { + template + void iterate_possible_intersections(bdd_pair_set const& complement_pairs, + bdd valid_symbol_set, + Callable callable) + { + for (auto it = complement_pairs.begin(); + it != complement_pairs.end(); ++it) for (auto it2 = std::next(it); it2 != complement_pairs.end(); ++it2) { - auto intersections = create_possible_intersections( - valid_symbol_set, *it, *it2); - for (auto& intersection : intersections) - { - callable(intersection); - } + auto intermediate = it2->first & valid_symbol_set; + auto intermediate2 = it2->second & valid_symbol_set; + callable(it->first & intermediate); + callable(it->second & intermediate); + callable(it->first & intermediate2); + callable(it->second & intermediate2); } - } - } - - // Compute the lower set bound of a set. A valid symbol set is also - // provided to make sure that no symbol exists in the output if it is - // not also included in the valid symbol set - static bdd_set lower_set_bound(std::vector const& basis, - bdd valid_symbol_set) - { - auto complement_pairs = create_complement_pairs(basis, valid_symbol_set); - if (complement_pairs.size() == 1) - { - bdd_set lower_bound; - auto& pair = *complement_pairs.begin(); - if (pair.first != bddfalse - && bdd_implies(pair.first, valid_symbol_set)) - { - lower_bound.insert(pair.first); - } - if (pair.second != bddfalse - && bdd_implies(pair.second, valid_symbol_set)) - { - lower_bound.insert(pair.second); - } - return lower_bound; - } - else - { - bdd_set lower_bound; - iterate_possible_intersections(complement_pairs, valid_symbol_set, - [&](auto intersection) - { - add_to_lower_bound_set_helper(lower_bound, - valid_symbol_set, - intersection); - }); - - return lower_bound; - } - } - - // Partitions a symbol based on a list of other bdds called the basis. - // The resulting partition will have the property that for any paritioned - // element and any element element in the basis, the partitioned element will - // either by completely contained by that element of the basis, or completely - // disjoint. - static bdd_set generate_contained_or_disjoint_symbols(bdd sym, - std::vector const& basis) - { - auto lower_bound = lower_set_bound(basis, sym); - // If the sym was disjoint from everything in the basis, we'll be left with - // an empty lower_bound. To fix this, we will simply return a singleton, - // with sym as the only element. Notice, this singleton will satisfy the - // requirements of a return value from this function. Additionally, if the - // sym is false, that means nothing can traverse it, so we simply are left - // with no edges. - if (lower_bound.empty() && sym != bddfalse) - { - lower_bound.insert(sym); } - return lower_bound; + + // Compute the lower set bound of a set. A valid symbol set is also + // provided to make sure that no symbol exists in the output if it is + // not also included in the valid symbol set + static bdd_set lower_set_bound(std::vector const& basis, + bdd valid_symbol_set) + { + auto complement_pairs = create_complement_pairs(basis, valid_symbol_set); + if (complement_pairs.size() == 1) + { + bdd_set lower_bound; + auto& pair = *complement_pairs.begin(); + if (pair.first != bddfalse + && bdd_implies(pair.first, valid_symbol_set)) + lower_bound.insert(pair.first); + if (pair.second != bddfalse + && bdd_implies(pair.second, valid_symbol_set)) + lower_bound.insert(pair.second); + return lower_bound; + } + else + { + bdd_set lower_bound; + iterate_possible_intersections(complement_pairs, valid_symbol_set, + [&](auto intersection) + { + add_to_lower_bound_set_helper(lower_bound, + valid_symbol_set, + intersection); + }); + return lower_bound; + } + } + + // Partitions a symbol based on a list of other bdds called the + // basis. The resulting partition will have the property that for + // any partitioned element and any element element in the basis, + // the partitioned element will either by completely contained by + // that element of the basis, or completely disjoint. + static bdd_set + generate_contained_or_disjoint_symbols(bdd sym, + std::vector const& basis) + { + auto lower_bound = lower_set_bound(basis, sym); + // If the sym was disjoint from everything in the basis, we'll + // be left with an empty lower_bound. To fix this, we will + // simply return a singleton, with sym as the only + // element. Notice, this singleton will satisfy the requirements + // of a return value from this function. Additionally, if the + // sym is false, that means nothing can traverse it, so we + // simply are left with no edges. + if (lower_bound.empty() && sym != bddfalse) + lower_bound.insert(sym); + return lower_bound; + } + + template + twa_graph_ptr split_edges_aux(const const_twa_graph_ptr& aut, + genlabels gen) + { + twa_graph_ptr out = make_twa_graph(aut->get_dict()); + out->copy_acceptance_of(aut); + out->copy_ap_of(aut); + out->prop_copy(aut, twa::prop_set::all()); + out->new_states(aut->num_states()); + out->set_init_state(aut->get_init_state_number()); + + // We use a cache to avoid the costly loop around minterms_of(). + // Cache entries have the form (id, [begin, end]) where id is the + // number of a BDD that as been (or will be) split, and begin/end + // denotes a range of existing transition numbers that cover the + // split. + // + // std::pair causes some noexcept warnings when used in + // robin_hood::unordered_map with GCC 9.4. Use robin_hood::pair + // instead. + typedef robin_hood::pair cached_t; + robin_hood::unordered_map split_cond; + + internal::univ_dest_mapper uniq(out->get_graph()); + + for (auto& e: aut->edges()) + { + bdd cond = e.cond; + if (cond == bddfalse) + continue; + unsigned dst = e.dst; + if (aut->is_univ_dest(dst)) + { + auto d = aut->univ_dests(dst); + dst = uniq.new_univ_dests(d.begin(), d.end()); + } + + auto& [begin, end] = split_cond[cond.id()]; + if (begin == end) + { + begin = out->num_edges() + 1; + for (bdd minterm: gen(cond)) + out->new_edge(e.src, dst, minterm, e.acc); + end = out->num_edges() + 1; + } + else + { + auto& g = out->get_graph(); + for (unsigned i = begin; i < end; ++i) + out->new_edge(e.src, dst, g.edge_storage(i).cond, e.acc); + } + } + return out; + } } twa_graph_ptr split_edges(const const_twa_graph_ptr& aut) { - twa_graph_ptr out = make_twa_graph(aut->get_dict()); - out->copy_acceptance_of(aut); - out->copy_ap_of(aut); - out->prop_copy(aut, twa::prop_set::all()); - out->new_states(aut->num_states()); - out->set_init_state(aut->get_init_state_number()); - - // We use a cache to avoid the costly loop around minterms_of(). - // Cache entries have the form (id, [begin, end]) where id is the - // number of a BDD that as been (or will be) split, and begin/end - // denotes a range of existing transition numbers that cover the - // split. - // - // std::pair causes some noexcept warnings when used in - // robin_hood::unordered_map with GCC 9.4. Use robin_hood::pair - // instead. - typedef robin_hood::pair cached_t; - robin_hood::unordered_map split_cond; - bdd all = aut->ap_vars(); - internal::univ_dest_mapper uniq(out->get_graph()); - - for (auto& e: aut->edges()) - { - bdd cond = e.cond; - if (cond == bddfalse) - continue; - unsigned dst = e.dst; - if (aut->is_univ_dest(dst)) - { - auto d = aut->univ_dests(dst); - dst = uniq.new_univ_dests(d.begin(), d.end()); - } - - auto& [begin, end] = split_cond[cond.id()]; - if (begin == end) - { - begin = out->num_edges() + 1; - for (bdd minterm: minterms_of(cond, all)) - out->new_edge(e.src, dst, minterm, e.acc); - end = out->num_edges() + 1; - } - else - { - auto& g = out->get_graph(); - for (unsigned i = begin; i < end; ++i) - out->new_edge(e.src, dst, g.edge_storage(i).cond, e.acc); - } - } - return out; + return split_edges_aux(aut, [&](bdd cond) { + return minterms_of(cond, all); + }); } twa_graph_ptr split_edges(const const_twa_graph_ptr& aut, std::vector const& basis) { - twa_graph_ptr out = make_twa_graph(aut->get_dict()); - out->copy_acceptance_of(aut); - out->copy_ap_of(aut); - out->prop_copy(aut, twa::prop_set::all()); - out->new_states(aut->num_states()); - out->set_init_state(aut->get_init_state_number()); - - // We use a cache to avoid the costly loop around minterms_of(). - // Cache entries have the form (id, [begin, end]) where id is the - // number of a BDD that as been (or will be) split, and begin/end - // denotes a range of existing transition numbers that cover the - // split. - using cached_t = std::pair; - std::unordered_map split_cond; - internal::univ_dest_mapper uniq(out->get_graph()); - - for (auto& e: aut->edges()) - { - bdd const& cond = e.cond; - unsigned dst = e.dst; - - if (cond == bddfalse) - continue; - if (aut->is_univ_dest(dst)) - { - auto d = aut->univ_dests(dst); - dst = uniq.new_univ_dests(d.begin(), d.end()); - } - - auto& [begin, end] = split_cond[cond.id()]; - if (begin == end) - { - begin = out->num_edges() + 1; - auto split = generate_contained_or_disjoint_symbols(cond, - basis); - for (bdd minterm : split) - { - out->new_edge(e.src, dst, minterm, e.acc); - } - end = out->num_edges() + 1; - } - else - { - auto& g = out->get_graph(); - for (unsigned i = begin; i < end; ++i) - { - out->new_edge(e.src, dst, g.edge_storage(i).cond, e.acc); - } - } - } - return out; + bdd all = aut->ap_vars(); + return split_edges_aux(aut, [&](bdd cond) { + return generate_contained_or_disjoint_symbols(cond, basis); + }); } } From 3bcffa2fcdb1578d96d78a45cd9329b649159d20 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 18 Mar 2024 16:31:07 +0100 Subject: [PATCH 414/606] split: add separate_edges() and a edge_separator class This generalizes (and replaces) the two-argument split that was introduced in c2832cabfc. * spot/twaalgos/split.cc, spot/twaalgos/split.hh (edge_separator): New class. (separate_edges): New function. (split_edges): Remove the two argument version. * spot/twaalgos/forq_contains.cc: Adjust to use the edge_separator class. * tests/python/splitedge.py: Adjust test case. * tests/python/split.ipynb: New file. * tests/Makefile.am, doc/org/tut.org: Add it. * NEWS: Mention it. --- NEWS | 3 + doc/org/tut.org | 1 + spot/twaalgos/forq_contains.cc | 17 +- spot/twaalgos/split.cc | 230 +--- spot/twaalgos/split.hh | 197 ++- tests/Makefile.am | 1 + tests/python/split.ipynb | 2139 ++++++++++++++++++++++++++++++++ tests/python/splitedge.py | 7 +- 8 files changed, 2395 insertions(+), 200 deletions(-) create mode 100644 tests/python/split.ipynb diff --git a/NEWS b/NEWS index 9da91b234..832e0fd61 100644 --- a/NEWS +++ b/NEWS @@ -135,6 +135,9 @@ New in spot 2.11.6.dev (not yet released) Büchi automaton will always return a Büchi automaton. For those, a "keep_one_color" option has been added to scc_filter. + - spot::separate_edges() and spot::edge_separator offers more ways + to split labels. See https://spot.lrde.epita.fr/ipynb/split.html + - ltsmin's interface will now point to README.ltsmin in case an error is found while running divine or spins. diff --git a/doc/org/tut.org b/doc/org/tut.org index 8ae701bc5..c3833ff5d 100644 --- a/doc/org/tut.org +++ b/doc/org/tut.org @@ -71,6 +71,7 @@ real notebooks instead. conditions - [[https://spot.lrde.epita.fr/ipynb/contains.html][=contains.ipynb=]] demonstrates containment checks between formulas or automata +- [[https://spot.lrde.epita.fr/ipynb/split.html][=split.ipynb=]] illustrates various ways to split labels - [[https://spot.lrde.epita.fr/ipynb/parity.html][=parity.ipynb=]] documents algorithms for manipulating parity automata in Python - [[https://spot.lrde.epita.fr/ipynb/games.html][=games.ipynb=]] illustrates support for games diff --git a/spot/twaalgos/forq_contains.cc b/spot/twaalgos/forq_contains.cc index 4d4264ae2..bc76146bd 100644 --- a/spot/twaalgos/forq_contains.cc +++ b/spot/twaalgos/forq_contains.cc @@ -695,19 +695,6 @@ namespace spot::forq::util return all_states; } - // Create a list of bdds, where each corresponds to an edge in B - static std::vector create_edge_splitting_basis(const_graph const& B) - { - auto edges = B->edges(); - std::unordered_set out; - std::transform(edges.begin(), edges.end(), std::inserter(out, out.begin()), - [](auto& edge) - { - return edge.cond; - }); - return std::vector(out.begin(), out.end()); - } - forq_context create_forq_context(const_graph const& A, const_graph const& B) { forq_context retval; @@ -715,7 +702,9 @@ namespace spot::forq::util retval.B.outgoing = util::generate_outgoing_states(B); retval.B.final_edges = get_final_edges(B); - retval.A.aut = split_edges(A, create_edge_splitting_basis(B)); + edge_separator es; + es.add_to_basis(B); + retval.A.aut = es.separate_compat(A); retval.A.outgoing = util::generate_outgoing_states(retval.A.aut); retval.A.final_edges = get_final_edges(retval.A.aut); retval.cache.precomputed_ipost.resize(B->num_states()); diff --git a/spot/twaalgos/split.cc b/spot/twaalgos/split.cc index 8e40f79a5..a4d973d58 100644 --- a/spot/twaalgos/split.cc +++ b/spot/twaalgos/split.cc @@ -22,180 +22,10 @@ #include #include -namespace std -{ - template<> - struct hash<::bdd> - { - size_t operator()(::bdd const& instance) const noexcept - { - return ::spot::bdd_hash{}(instance); - } - }; - - template<> - struct hash> - { - size_t operator()(pair const& x) const noexcept - { - size_t first_hash = std::hash()(x.first); - size_t second_hash = std::hash()(x.second); - size_t sum = second_hash - + 0x9e3779b9 - + (first_hash << 6) - + (first_hash >> 2); - return first_hash ^ sum; - } - }; -} - namespace spot { namespace { - // We attempt to add a potentially new set of symbols defined as "value" to - // our current set of edge partitions, "current_set". We also specify a set - // of valid symbols considered - static void - add_to_lower_bound_set_helper(std::unordered_set& current_set, - bdd valid_symbol_set, bdd value) - { - // This function's correctness is defined by the invariant, that - // we never add a bdd to our current set unless the bdd is - // disjoint from every other element in the current_set. In - // other words, we will only reach the final set.insert(value), - // if we can iterate over the whole of current_set without - // finding some set intersections - if (value == bddfalse) // Don't add empty sets, as they subsume everything - { - return; - } - for (auto sym : current_set) - { - // If a sym is a subset of value, recursively add the set of symbols - // defined in value, but not in sym. This ensures the two elements - // are disjoint. - if (bdd_implies(sym, value)) - { - add_to_lower_bound_set_helper(current_set, - valid_symbol_set, - (value - sym) & valid_symbol_set); - return; - } - // If a sym is a subset of the value we're trying to add, then we - // remove the symbol and add the two symbols created by partitioning - // the sym with value. - else if (bdd_implies(value, sym)) - { - current_set.erase(sym); - add_to_lower_bound_set_helper(current_set, - valid_symbol_set, - sym & value); - add_to_lower_bound_set_helper(current_set, - valid_symbol_set, - sym - value); - return; - } - } - // This line is only reachable if value is not a subset and doesn't - // subsume any element currently in our set - current_set.insert(value); - } - - using bdd_set = std::unordered_set; - using bdd_pair_set = std::unordered_set>; - - // Transforms each element of the basis into a complement pair, - // with a valid symbol set specified - static bdd_pair_set create_complement_pairs(std::vector const& basis, - bdd valid_symbol_set) - { - bdd_pair_set intersections; - for (bdd sym: basis) - { - bdd intersection = sym & valid_symbol_set; - if (intersection != bddfalse) - { - bdd negation = valid_symbol_set - intersection; - intersections.insert(std::make_pair(intersection, negation)); - } - } - return intersections; - } - - template - void iterate_possible_intersections(bdd_pair_set const& complement_pairs, - bdd valid_symbol_set, - Callable callable) - { - for (auto it = complement_pairs.begin(); - it != complement_pairs.end(); ++it) - for (auto it2 = std::next(it); it2 != complement_pairs.end(); ++it2) - { - auto intermediate = it2->first & valid_symbol_set; - auto intermediate2 = it2->second & valid_symbol_set; - callable(it->first & intermediate); - callable(it->second & intermediate); - callable(it->first & intermediate2); - callable(it->second & intermediate2); - } - } - - // Compute the lower set bound of a set. A valid symbol set is also - // provided to make sure that no symbol exists in the output if it is - // not also included in the valid symbol set - static bdd_set lower_set_bound(std::vector const& basis, - bdd valid_symbol_set) - { - auto complement_pairs = create_complement_pairs(basis, valid_symbol_set); - if (complement_pairs.size() == 1) - { - bdd_set lower_bound; - auto& pair = *complement_pairs.begin(); - if (pair.first != bddfalse - && bdd_implies(pair.first, valid_symbol_set)) - lower_bound.insert(pair.first); - if (pair.second != bddfalse - && bdd_implies(pair.second, valid_symbol_set)) - lower_bound.insert(pair.second); - return lower_bound; - } - else - { - bdd_set lower_bound; - iterate_possible_intersections(complement_pairs, valid_symbol_set, - [&](auto intersection) - { - add_to_lower_bound_set_helper(lower_bound, - valid_symbol_set, - intersection); - }); - return lower_bound; - } - } - - // Partitions a symbol based on a list of other bdds called the - // basis. The resulting partition will have the property that for - // any partitioned element and any element element in the basis, - // the partitioned element will either by completely contained by - // that element of the basis, or completely disjoint. - static bdd_set - generate_contained_or_disjoint_symbols(bdd sym, - std::vector const& basis) - { - auto lower_bound = lower_set_bound(basis, sym); - // If the sym was disjoint from everything in the basis, we'll - // be left with an empty lower_bound. To fix this, we will - // simply return a singleton, with sym as the only - // element. Notice, this singleton will satisfy the requirements - // of a return value from this function. Additionally, if the - // sym is false, that means nothing can traverse it, so we - // simply are left with no edges. - if (lower_bound.empty() && sym != bddfalse) - lower_bound.insert(sym); - return lower_bound; - } - template twa_graph_ptr split_edges_aux(const const_twa_graph_ptr& aut, genlabels gen) @@ -217,7 +47,7 @@ namespace spot // robin_hood::unordered_map with GCC 9.4. Use robin_hood::pair // instead. typedef robin_hood::pair cached_t; - robin_hood::unordered_map split_cond; + robin_hood::unordered_map split_cond; internal::univ_dest_mapper uniq(out->get_graph()); @@ -260,12 +90,60 @@ namespace spot }); } - twa_graph_ptr split_edges(const const_twa_graph_ptr& aut, - std::vector const& basis) + void edge_separator::add_to_basis(bdd label) { - bdd all = aut->ap_vars(); - return split_edges_aux(aut, [&](bdd cond) { - return generate_contained_or_disjoint_symbols(cond, basis); + if (label == bddfalse) + return; + // Split our current set of labels using this new one. + // + // Do not use a range-based or iterator-based for loop here, + // as push_back invalidates the end iterator. + for (unsigned cur = 0, sz = basis_.size(); cur < sz; ++cur) + if (bdd common = basis_[cur] & label; + common != bddfalse && common != basis_[cur]) + { + basis_[cur] -= label; + basis_.push_back(common); + } + } + + void edge_separator::add_to_basis(const const_twa_graph_ptr& aut) + { + for (formula f: aut->ap()) + aps_.insert(f); + + robin_hood::unordered_set seen{bddtrue.id()}; + for (auto& e: aut->edges()) + if (bdd lab = e.cond; seen.insert(lab.id()).second) + add_to_basis(lab); + } + + twa_graph_ptr + edge_separator::separate_implying(const const_twa_graph_ptr& aut) + { + auto res = split_edges_aux(aut, [this](bdd cond) { + return this->separate_implying(cond); }); + for (formula f: aps_) + res->register_ap(f); + return res; + } + + twa_graph_ptr + edge_separator::separate_compat(const const_twa_graph_ptr& aut) + { + auto res = split_edges_aux(aut, [this](bdd cond) { + return this->separate_compat(cond); + }); + for (formula f: aps_) + res->register_ap(f); + return res; + } + + twa_graph_ptr separate_edges(const const_twa_graph_ptr& aut) + { + edge_separator es; + es.add_to_basis(aut); + return es.separate_implying(aut); } } diff --git a/spot/twaalgos/split.hh b/spot/twaalgos/split.hh index 63304db1e..a30d8645e 100644 --- a/spot/twaalgos/split.hh +++ b/spot/twaalgos/split.hh @@ -28,17 +28,198 @@ namespace spot /// /// Create a new version of the automaton where all edges are split /// so that they are all labeled by a conjunction of all atomic - /// propositions. After this we can consider that each edge of the - /// automate is a transition labeled by one letter. + /// propositions. + /// + /// So if an edge is labeled by "true", it will be split into + /// $2^{AP}$ distinct edges. + /// + /// After this we can consider that each edge of the automaton is a + /// transition labeled by one of $2^{AP}$ letters. + /// + /// \see separate_edges SPOT_API twa_graph_ptr split_edges(const const_twa_graph_ptr& aut); +#ifndef SWIG + // pseudo container that we use to iterate over + // the items of LABELS that are compatible with COND. + template + struct SPOT_API edge_separator_filter + { + edge_separator_filter(const std::vector& labels, bdd cond) + : labels_(labels), cond_(cond) + { + } + + class iterator + { + std::vector::const_iterator pos_; + std::vector const& labels_; + bdd cond_; + + public: + iterator(const std::vector& labels, bdd cond) + : labels_(labels), cond_(cond) + { + pos_ = labels_.begin(); + next(); + } + + iterator& operator++() + { + ++pos_; + next(); + return *this; + } + + void next() + { + // If subsumed is true, we want to match the labels + // that imply the current condition. Otherwise we + // want to match the labels that are compatible. + while (pos_ != labels_.end() && + ((subsumed && !bdd_implies(*pos_, cond_)) + || (!subsumed && (*pos_ & cond_) == bddfalse))) + ++pos_; + } + + bdd operator*() const + { + if (subsumed) + return *pos_; + else + return *pos_ & cond_; + } + + bool operator==(const iterator& other) const + { + return pos_ == other.pos_; + } + + bool operator!=(const iterator& other) const + { + return pos_ != other.pos_; + } + + bool operator==(std::vector::const_iterator pos) const + { + return pos_ == pos; + } + + bool operator!=(std::vector::const_iterator pos) const + { + return pos_ != pos; + } + }; + + iterator begin() const + { + return iterator(labels_, cond_); + } + + std::vector::const_iterator end() const + { + return labels_.end(); + } + + private: + const std::vector& labels_; + bdd cond_; + }; +#endif + + /// \ingroup twa_misc - /// \brief transform edges into transitions based on set of bdds + /// \brief separate edges so that their labels are disjoint + /// + /// To use this class, first call add_to_basis() for each label that + /// you want to separate. Then call separate() to get a new + /// automaton. + /// + /// Note that all labels seen by separate() should have been + /// previously declared using add_to_basis(), but more can be + /// declared. + /// + /// For instance an automaton has labels in {a,b,!a&!b&c} and those + /// are used as basis, the separated automaton should have its + /// labels among {a&!b,a&b,!a&b,!a&!b&c}. + class SPOT_API edge_separator + { + public: + /// \brief add label(s) to a basis + /// + /// Add a single label, or all the labels of an automaton. + /// + /// The version that takes an automaton will also record the + /// atomic propositions used by the automaton. Those atomic + /// propositions will be registered by separate_implying() or + /// separate_compat(). If you call the BDD version of + /// add_to_basis() and add a new atomic proposition, you should + /// remember to register it in the result of separate_implying() + /// or separate_compat() yourself. + /// @{ + void add_to_basis(bdd label); + void add_to_basis(const const_twa_graph_ptr& aut); + /// @} + /// \brief Separate an automaton + /// + /// This variant replaces each edge labeled by L by an edge + /// for each label of the basis that is implies L. This + /// faster than separate_compat when all edges of aut have + /// been declared in the basis. + twa_graph_ptr separate_implying(const const_twa_graph_ptr& aut); + /// \brief Separate an automaton + /// + /// This variant replaces each edge labeled by L by an edge for + /// each label of the basis that compatible implies L. This + /// faster than separate_compat when all edges of aut have been + /// declared in the basis. + twa_graph_ptr separate_compat(const const_twa_graph_ptr& aut); +#ifndef SWIG + /// \brief Separate a label + /// + /// This returns a pseudo-container that can be used to iterate + /// over the elements of the basis that imply the current label. + /// + /// For instance if the basis was created from {a,b} (i.e., the + /// basis is actually {!a&!b,a&!b,!a&b,a&b}), and the label is + /// {a}, the result will be {a&!b,a&b}. + edge_separator_filter separate_implying(bdd label) + { + return {basis_, label}; + } + /// \brief Separate a label + /// + /// This returns a pseudo-container that can be used to iterate + /// over the elements of the basis compatible with the current labal. + /// + /// For instance if the basis was created from {a,b} (i.e., the + /// basis is actually {!a&!b,a&!b,!a&b,a&b}), and the label is + /// {c&a}, the result will be {a&!b&c,a&b&c}. + edge_separator_filter separate_compat(bdd label) + { + return {basis_, label}; + } +#endif + unsigned basis_size() const + { + return basis_.size(); + } + private: + std::vector basis_{bddtrue}; + std::set aps_; + }; + + /// \ingroup twa_misc + /// \brief Make edge labels disjoints /// /// Create a new version of the automaton where all edges are split - /// such that, for any transformed edge and any set of symbols in - /// the basis, the transformed edge is either completely disjoint - /// from the set of symbols, or it is a subset of them. - SPOT_API twa_graph_ptr split_edges( - const const_twa_graph_ptr& aut, std::vector const& basis); + /// in such a way that two labels are either equal or disjoint. + /// + /// For instance if the automaton uses only {a,b,!a&!b&c} as labels, + /// the result should have label among {a&!b,a&b,!a&b,!a&!b&c}. + /// + /// Using split_edges() also creates an automaton with separated labels, + /// but the separation will be much finer since it will result in a much + /// involves all atomtic proposition. + SPOT_API twa_graph_ptr separate_edges(const const_twa_graph_ptr& aut); } diff --git a/tests/Makefile.am b/tests/Makefile.am index 4a36a725b..c6430b2ac 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -378,6 +378,7 @@ TESTS_ipython = \ python/randaut.ipynb \ python/randltl.ipynb \ python/satmin.ipynb \ + python/split.ipynb \ python/stutter-inv.ipynb \ python/synthesis.ipynb \ python/testingaut.ipynb \ diff --git a/tests/python/split.ipynb b/tests/python/split.ipynb new file mode 100644 index 000000000..3ef8bc9a5 --- /dev/null +++ b/tests/python/split.ipynb @@ -0,0 +1,2139 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "edc9ac7a", + "metadata": {}, + "source": [ + "In Spot, automata edges are labeled by Boolean functions over atomic propositions.\n", + "As a consequence, it is sometimes difficult to adapt algorithms that expect automata labeled by letters. This notebook presents methods that can be used to split those edge labels to make it easier to consider them as letters." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "f9791763", + "metadata": {}, + "outputs": [], + "source": [ + "import spot\n", + "from spot.jupyter import display_inline\n", + "spot.setup(show_default=\".A\")" + ] + }, + { + "cell_type": "markdown", + "id": "81867c56", + "metadata": {}, + "source": [ + "Consider the labels appearing in the following automaton:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "28ab6c77", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a | b\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a | b\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f31fe0369d0> >" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut = spot.translate(\"a & X(a->b) & XX(!a&!b&c)\")\n", + "aut" + ] + }, + { + "cell_type": "markdown", + "id": "dcd554c8", + "metadata": {}, + "source": [ + "We try to use the word \"edge\" to refer to an edge of the automaton, labeled by a Boolean formula over AP. These edges can be seen as representing several \"transitions\", each labeled by a valuation of all atomic propositions. So the above automaton uses 4 edges to represent 19 transitions" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "3679412a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "4 19\n" + ] + } + ], + "source": [ + "s = spot.sub_stats_reachable(aut)\n", + "print(s.edges, s.transitions)" + ] + }, + { + "cell_type": "markdown", + "id": "0804e219", + "metadata": {}, + "source": [ + "We can split the edges into the corresponding transitions using `split_edges()`." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "6f373fde", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & b & c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !b & !c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & b & c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & b & c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !b & !c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & b & c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f31fd7cfd20> >" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut_split = spot.split_edges(aut)\n", + "aut_split" + ] + }, + { + "cell_type": "markdown", + "id": "101a7100", + "metadata": {}, + "source": [ + "The opposite operation is `merge_edges()`, but it works in place:" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "cf014f95", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a | b\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a | b\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f31fd7cfd20> >" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut_split.merge_edges()\n", + "aut_split" + ] + }, + { + "cell_type": "markdown", + "id": "2bc773cf", + "metadata": {}, + "source": [ + "Another way to split edges is `separate_edges()` this tweaks the labels so that any two labels can only be equal or disjoint. Note how this creates fewer edges." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "3a130b23", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & b) | (!a & !c)\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "(!a & b) | (!a & !c)\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & b) | (!a & !c)\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "(!a & b) | (!a & !c)\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f31fd7cff90> >" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "spot.separate_edges(aut)" + ] + }, + { + "cell_type": "markdown", + "id": "3f523aba", + "metadata": {}, + "source": [ + "A slightly lower-level interface is the `edge_separator` class. This makes it possible to declare a \"basis\" (a set of labels) that will be used to separate the edge of an automaton.\n", + "\n", + "`separate_edges()` is actually implemented as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "2716cc20", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & b) | (!a & !c)\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "(!a & b) | (!a & !c)\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & b) | (!a & !c)\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "(!a & b) | (!a & !c)\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f31fd7dcf00> >" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "es = spot.edge_separator()\n", + "es.add_to_basis(aut) # create a basis from the labels of aut\n", + "es.separate_implying(aut) # replace labels by all labels of the basis that imply them" + ] + }, + { + "cell_type": "markdown", + "id": "9a46e347", + "metadata": {}, + "source": [ + "The `edge_separator` can also be used to separate the edges of *another* automaton:" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "25d779a9", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!a & d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "(!a & b & d) | (!a & !c & d)\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!a & !b & c & d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & b & d) | (!a & !c & d)\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b & d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b & d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & c & d\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "aut2 = spot.translate('a W Gd')\n", + "# replace labels based on \"compatibility\" with those from the basis\n", + "aut2sep = es.separate_compat(aut2)\n", + "display_inline(aut2, aut2sep)" + ] + }, + { + "cell_type": "markdown", + "id": "d448df40", + "metadata": {}, + "source": [ + "Now, if we take any label A in `aut2sep` and any label B in `aut`, we necessarily \n", + "have A∧B ∈ {A,0}. I.e., either A implies B, or A and B are incompatible. This is useful in certain algorithm that want to check that the inclusion of on automaton in another one, because they can arange to onlu check the inclusion (with `bdd_implies`) of the labels from the small automaton into the labels of the larger automaton." + ] + }, + { + "cell_type": "markdown", + "id": "db0b203b", + "metadata": {}, + "source": [ + "We could also use `edge_separator` to create a combined basis for two automata:" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "2de45a46", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & !b & !d\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & b & !d\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & !b & d\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & b & d\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & b & !d) | (!a & !c & !d)\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b & !d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b & !d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & c & !d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & b & d) | (!a & !c & d)\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b & d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b & d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & c & d\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "(!a & b & !d) | (!a & !c & !d)\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b & !d\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !b & c & !d\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "(!a & b & d) | (!a & !c & d)\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b & d\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !b & c & d\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !b & c & !d\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !b & c & d\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & !b & !d\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & b & !d\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & !b & d\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a & b & d\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & b & !d) | (!a & !c & !d)\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b & !d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b & !d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & c & !d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & b & d) | (!a & !c & d)\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b & d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b & d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & c & d\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "(!a & b & !d) | (!a & !c & !d)\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b & !d\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !b & c & !d\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "(!a & b & d) | (!a & !c & d)\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & b & d\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a & !b & c & d\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !b & c & !d\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!a & !b & c & d\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f31fe037180> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b & !d\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & b & !d\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b & d\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & b & d\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "(!a & b & d) | (!a & !c & d)\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!a & !b & c & d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & b & d) | (!a & !c & d)\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b & d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b & d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & c & d\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b & !d\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & b & !d\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & !b & d\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & b & d\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "(!a & b & d) | (!a & !c & d)\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!a & !b & c & d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "(!a & b & d) | (!a & !c & d)\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b & d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b & d\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b & c & d\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f31fd7dd2c0> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "es2 = spot.edge_separator()\n", + "es2.add_to_basis(aut)\n", + "es2.add_to_basis(aut2)\n", + "display(es2.separate_implying(aut), es2.separate_implying(aut2))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9f5035f7", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.8" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tests/python/splitedge.py b/tests/python/splitedge.py index 4911b10c2..af5d9f1a1 100644 --- a/tests/python/splitedge.py +++ b/tests/python/splitedge.py @@ -27,8 +27,11 @@ def create_aps(aut): def do_edge_test(aut, aps, edges_before, edges_after): tc.assertEqual(aut.num_edges(), edges_before) - aut = spot.split_edges(aut, aps) - tc.assertEqual(aut.num_edges(), edges_after) + es = spot.edge_separator() + for ap in aps: + es.add_to_basis(ap) + res = es.separate_compat(aut) + tc.assertEqual(res.num_edges(), edges_after) aut = spot.automaton(""" HOA: v1 From c220107eb4d6cd37808fb0dc2d501a21e143c898 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 18 Mar 2024 21:19:38 +0100 Subject: [PATCH 415/606] remove_alternation: use edge_separator * spot/twaalgos/split.cc, spot/twaalgos/split.hh (edge_separator::add_to_basis): Add a variant that is limited in the number of labels it adds. * spot/twaalgos/alternation.cc: Use it. Also add a cache of separated edges, as in the split. --- spot/twaalgos/alternation.cc | 85 ++++++++++++++++++++++-------------- spot/twaalgos/split.cc | 17 ++++++++ spot/twaalgos/split.hh | 13 ++++++ 3 files changed, 82 insertions(+), 33 deletions(-) diff --git a/spot/twaalgos/alternation.cc b/spot/twaalgos/alternation.cc index b7e44a200..c7b2a17d5 100644 --- a/spot/twaalgos/alternation.cc +++ b/spot/twaalgos/alternation.cc @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include #include @@ -389,37 +391,12 @@ namespace spot bool will_use_labels = n_ap > 5; if (will_use_labels) { - std::set all_labels; + edge_separator es; // Gather all labels, but stop if we see too many. // The threshold below is arbitrary. - unsigned max_labels = 100 * n_ap; - for (auto& e: aut_->edges()) - { - if (all_labels.insert(e.cond).second) - if (all_labels.size() > max_labels) - { - will_use_labels = false; - break; - } - } + will_use_labels = es.add_to_basis(aut_, 256 * n_ap); if (will_use_labels) - { - separated_labels.reserve(all_labels.size()); - separated_labels.push_back(bddtrue); - for (auto& lab: all_labels) - { - // Do not use a range-based or iterator-based for loop - // here, as push_back invalidates the end iterator. - for (unsigned cur = 0, sz = separated_labels.size(); - cur < sz; ++cur) - if (bdd common = separated_labels[cur] & lab; - common != bddfalse && common != separated_labels[cur]) - { - separated_labels[cur] -= lab; - separated_labels.push_back(common); - } - } - } + separated_labels = es.basis(); } // We for easier computation of outgoing sets, we will @@ -490,6 +467,19 @@ namespace spot acc_cond::mark_t all_marks = res->acc().all_sets(); + // We use a cache to avoid the costly loop around + // separated_labels. + // + // Cache entries have the form (bdd, [begin, end]) where bdd + // what should be split, and begin/end denotes a range of + // existing transition numbers that cover the split. + // + // std::pair causes some noexcept warnings when used in + // robin_hood::unordered_map with GCC 9.4. Use robin_hood::pair + // instead. + typedef robin_hood::pair cached_t; + robin_hood::unordered_map split_cond; + state_set v; while (!todo.empty()) { @@ -547,12 +537,41 @@ namespace spot }; if (!will_use_labels) - // Loop over all possible valuations atomic properties. - for (bdd oneletter: minterms_of(all_letters, ap)) - create_edges(oneletter, bdd_restrict(bs, oneletter)); + { + // Loop over all possible valuations of atomic properties. + for (bdd oneletter: minterms_of(all_letters, ap)) + create_edges(oneletter, bdd_restrict(bs, oneletter)); + } else - for (bdd label: separated_labels) - create_edges(label, bdd_relprod(label, bs, res->ap_vars())); + { + auto& [begin, end] = split_cond[all_letters]; + if (begin == end) + { + begin = res->num_edges() + 1; + for (bdd label: separated_labels) + create_edges(label, bdd_relprod(label, bs, + res->ap_vars())); + end = res->num_edges() + 1; + } + else + { + // We have already split all_letters once, so we + // can simply reuse the set of labels we used + // then, avoiding the iteration on + // separated_labels. + auto& g = res->get_graph(); + bdd last = bddfalse; + for (unsigned i = begin; i < end; ++i) + { + bdd label = g.edge_storage(i).cond; + if (label == last) + continue; + last = label; + create_edges(label, bdd_relprod(label, bs, + res->ap_vars())); + } + } + } } res->merge_edges(); return res; diff --git a/spot/twaalgos/split.cc b/spot/twaalgos/split.cc index a4d973d58..93d08589d 100644 --- a/spot/twaalgos/split.cc +++ b/spot/twaalgos/split.cc @@ -118,6 +118,23 @@ namespace spot add_to_basis(lab); } + bool edge_separator::add_to_basis(const const_twa_graph_ptr& aut, + unsigned long max_label) + { + std::set all_labels; + for (auto& e: aut->edges()) + { + if (all_labels.insert(e.cond).second) + if (max_label-- == 0) + return false; + } + for (bdd lab: all_labels) + add_to_basis(lab); + for (formula f: aut->ap()) + aps_.insert(f); + return true; + } + twa_graph_ptr edge_separator::separate_implying(const const_twa_graph_ptr& aut) { diff --git a/spot/twaalgos/split.hh b/spot/twaalgos/split.hh index a30d8645e..cb66619f1 100644 --- a/spot/twaalgos/split.hh +++ b/spot/twaalgos/split.hh @@ -156,9 +156,15 @@ namespace spot /// add_to_basis() and add a new atomic proposition, you should /// remember to register it in the result of separate_implying() /// or separate_compat() yourself. + /// + /// If \a max_label is given, at most \a max_label unique labels + /// are added to the basis. False is returned iff the automaton + /// used more labels. /// @{ void add_to_basis(bdd label); void add_to_basis(const const_twa_graph_ptr& aut); + bool add_to_basis(const const_twa_graph_ptr& aut, + unsigned long max_label); /// @} /// \brief Separate an automaton /// @@ -200,10 +206,17 @@ namespace spot return {basis_, label}; } #endif + unsigned basis_size() const { return basis_.size(); } + + const std::vector& basis() const + { + return basis_; + } + private: std::vector basis_{bddtrue}; std::set aps_; From 06099f649ec25aaace70b5140290e6ee2d1f4651 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 18 Mar 2024 23:20:06 +0100 Subject: [PATCH 416/606] powerset: improve tgba_powerset on small automata with large |AP| For issue #566. * spot/twaalgos/powerset.cc: Use the edge_separator on automata with |AP|>5 that have few distinct labels. * tests/core/566.test: Augment test-case. * NEWS: Update. --- NEWS | 10 ++++--- spot/twaalgos/powerset.cc | 55 ++++++++++++++++++++++++++++++--------- tests/core/566.test | 5 ++++ 3 files changed, 54 insertions(+), 16 deletions(-) diff --git a/NEWS b/NEWS index 832e0fd61..4b20958f4 100644 --- a/NEWS +++ b/NEWS @@ -153,10 +153,12 @@ New in spot 2.11.6.dev (not yet released) - spot::dualize() learned a trick to be faster on states that have less outgoing edges than atomic proposition declared on the - automaton. spot::remove_alternation() learned a similar trick, - except it isn't applied at the state level but of the entire - alternating use few distinct labels. These two changes speed up - the complementation of very weak automata. (Issue #566.) + automaton. spot::remove_alternation() and + spot::tgba_determinize() learned a similar trick, except it isn't + applied at the state level but of the entire alternating use few + distinct labels. These changes may speed up the complementation + of some very weak automata, and the minimization of some + WDBA. (Issue #566.) - [Potential backward incompatibility] spot::dualize() does not call cleanup_acceptance() anymore. This change ensures that the dual diff --git a/spot/twaalgos/powerset.cc b/spot/twaalgos/powerset.cc index 044ea21b9..2163f5ffc 100644 --- a/spot/twaalgos/powerset.cc +++ b/spot/twaalgos/powerset.cc @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -83,20 +84,44 @@ namespace spot if ((-1UL / ns) >> nap == 0) throw std::runtime_error("too many atomic propositions (or states)"); + // we have two ways of "spliting" the labels when determinizing. + // One is to iterate over 2^AP, the second is to partition the set + // of edges labels. We don't have a very clean rule to chose. The + // former is expansive when we have a lot of AP. The latter is + // good when we have few distinct labels. With too many different + // labels that may have nonempty intersections, the + // partition-based approach can consume a lot of memory. + bool will_use_labels = nap > 5; + edge_separator es; + if (will_use_labels) + { + // Gather all labels, but stop if we see too many. The + // threshold below is arbitrary: adjust if you know better. + will_use_labels = es.add_to_basis(aut, 256 * nap); + } + // Build a correspondence between conjunctions of APs and unsigned // indexes. std::vector num2bdd; num2bdd.reserve(1UL << nap); std::map bdd2num; bdd allap = aut->ap_vars(); - for (bdd one: minterms_of(bddtrue, allap)) - { - bdd2num.emplace(one, num2bdd.size()); - num2bdd.emplace_back(one); - } + + if (!will_use_labels) + for (bdd one: minterms_of(bddtrue, allap)) + { + bdd2num.emplace(one, num2bdd.size()); + num2bdd.emplace_back(one); + } + else + for (bdd one: es.basis()) + { + bdd2num.emplace(one, num2bdd.size()); + num2bdd.emplace_back(one); + } size_t nc = num2bdd.size(); // number of conditions - assert(nc == (1UL << nap)); + assert(will_use_labels || nc == (1UL << nap)); // Conceptually, we represent the automaton as an array 'bv' of // ns*nc bit vectors of size 'ns'. Each original state is @@ -172,12 +197,18 @@ namespace spot for (unsigned i = 0; i < nc; ++i) bv->at(base + i).clear_all(); for (auto& t: aut->out(src)) - for (bdd one: minterms_of(t.cond, allap)) - { - unsigned num = bdd2num[one]; - bv->at(base + num).set(t.dst); - } - + if (!will_use_labels) + for (bdd one: minterms_of(t.cond, allap)) + { + unsigned num = bdd2num[one]; + bv->at(base + num).set(t.dst); + } + else + for (bdd one: es.separate_implying(t.cond)) + { + unsigned num = bdd2num[one]; + bv->at(base + num).set(t.dst); + } assert(idx == lru.begin()->first); return idx; }; diff --git a/tests/core/566.test b/tests/core/566.test index 2399576c4..db4796c8e 100755 --- a/tests/core/566.test +++ b/tests/core/566.test @@ -138,3 +138,8 @@ test "$res" = "5 13 85 6 13 12582912" res=`autfilt --complement 21.hoa --stats='%S %E %T %s %e %t'` test "$res" = "5 13 85 5 11 10485760" + +res=`autfilt --small 21.hoa --stats='%S %E %T %s %e %t'` +test "$res" = "5 13 85 2 2 22" + +autcross --language-preserved 'autfilt --small' -F 21.hoa --verbose From cb15840c562e3fae417c87092c641c8c58d0a967 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 19 Mar 2024 21:21:22 +0100 Subject: [PATCH 417/606] org: add an example of conversion to BA format This script was first posted on https://github.com/adl/hoaf/issues/73 * doc/org/tut25.org: New file. * doc/Makefile.am: Add it. * doc/org/tut.org, doc/org/tut21.org, NEWS: Link to it. * doc/org/init.el.in: Install *.py files. * doc/org/.gitignore: Add toba.py. --- NEWS | 5 + doc/Makefile.am | 1 + doc/org/.gitignore | 1 + doc/org/init.el.in | 2 +- doc/org/tut.org | 1 + doc/org/tut21.org | 690 +++++++++++++++++++++++---------------------- doc/org/tut25.org | 285 +++++++++++++++++++ 7 files changed, 641 insertions(+), 344 deletions(-) create mode 100644 doc/org/tut25.org diff --git a/NEWS b/NEWS index 4b20958f4..111c3acb1 100644 --- a/NEWS +++ b/NEWS @@ -192,6 +192,11 @@ New in spot 2.11.6.dev (not yet released) This version of Spot now declares its svg outputs as HTML to prevent Jypyter from wrapping them is images. + Documentation: + + - https://spot.lre.epita.fr/tut25.html is a new example showing + how to print an automaton in the "BA format" (used by Rabbit). + Bugs fixed: - tgba_determinize()'s use_simulation option would cause it to diff --git a/doc/Makefile.am b/doc/Makefile.am index 30c9cfa69..7abe05f9e 100644 --- a/doc/Makefile.am +++ b/doc/Makefile.am @@ -123,6 +123,7 @@ ORG_FILES = \ org/tut22.org \ org/tut23.org \ org/tut24.org \ + org/tut25.org \ org/tut30.org \ org/tut31.org \ org/tut40.org \ diff --git a/doc/org/.gitignore b/doc/org/.gitignore index 709eb9cc5..0d5e6944a 100644 --- a/doc/org/.gitignore +++ b/doc/org/.gitignore @@ -19,3 +19,4 @@ g++wrap *.fls sitemap.org plantuml.jar +toba.py diff --git a/doc/org/init.el.in b/doc/org/init.el.in index c46363096..53b4f64cd 100644 --- a/doc/org/init.el.in +++ b/doc/org/init.el.in @@ -179,7 +179,7 @@ up.html points to index.html, then the result is: :auto-preamble t) ("spot-static" :base-directory "@abs_top_srcdir@/doc/org/" - :base-extension "css\\|js\\|png\\|svg\\|jpg\\|gif\\|pdf" + :base-extension "css\\|js\\|png\\|svg\\|jpg\\|gif\\|pdf\\|py" :publishing-directory "@abs_top_srcdir@/doc/userdoc/" :recursive t :publishing-function org-publish-attachment) diff --git a/doc/org/tut.org b/doc/org/tut.org index c3833ff5d..918bf7fbb 100644 --- a/doc/org/tut.org +++ b/doc/org/tut.org @@ -34,6 +34,7 @@ three interfaces supported by Spot: shell commands, Python, or C++. - [[file:tut03.org][Constructing and transforming formulas]] - [[file:tut21.org][Custom print of an automaton]] +- [[file:tut25.org][Printing a Büchi automaton in the "BA format"]] - [[file:tut22.org][Creating an automaton by adding states and transitions]] - [[file:tut23.org][Creating an alternating automaton by adding states and transitions]] - [[file:tut24.org][Iterating over alternating automata]] diff --git a/doc/org/tut21.org b/doc/org/tut21.org index b9281b25e..677736aea 100644 --- a/doc/org/tut21.org +++ b/doc/org/tut21.org @@ -21,9 +21,9 @@ destination states, $\mathit{cond}$ is a BDD representing the label The interface available for those graph-based automata allows random access to any state of the graph, hence the code given bellow can do a simple loop over all states of the automaton. Spot also supports a -different kind of interface (not demonstrated here) to -[[file:tut50.org][iterate over automata that are constructed -on-the-fly]] and where such a loop would be impossible. +different kind of interface (not demonstrated here) to [[file:tut50.org][iterate over +automata that are constructed on-the-fly]] and where such a loop would +be impossible. First let's create an example automaton in HOA format. We use =-U= to request unambiguous automata, as this allows us to demonstrate how @@ -47,98 +47,98 @@ properties: stutter-invariant State: 0 [0] 1 [!0] 2 -[!0&1&2] 3 -[!0&!1&2] 4 -[!0&!2] 5 -[!0&!2] 6 +[!0&!2] 3 +[!0&!2] 4 +[!0&!1&2] 5 +[!0&1&2] 6 State: 1 [t] 1 {0 1} State: 2 [!1&!2] 2 -[!1&2] 2 {1} [1&!2] 2 {0} +[!1&2] 2 {1} [1&2] 2 {0 1} State: 3 -[!0&1&2] 3 -[!0&!1&2] 4 -[!0&!2] 5 -[!0&!2] 6 -[0&!2] 7 -[0&!1&2] 8 -[0&1&2] 9 -[0&!1&2] 10 -[0&1&!2] 11 -[0&!1&!2] 12 +[!0&!2] 3 +[!0&!1&2] 5 +[!0&1&2] 6 +[0&1&2] 7 +[0&!1&!2] 8 +[0&!1&2] 9 +[0&1&!2] 10 +[0&!1&2] 12 [0&!1&!2] 13 State: 4 -[!0&1&2] 3 -[!0&!1&2] 4 -[!0&!2] 5 -[!0&1&!2] 6 -[0&1&!2] 7 -[0&!1] 8 -[0&1&2] 9 -[0&!1&2] 10 -[0&1&!2] 11 -[0&!1&!2] 12 -[0&!1&!2] 14 -[!0&!1&!2] 15 +[!0&!2] 4 +[0&!2] 11 State: 5 -[!0&1&2] 3 -[!0&!1&2] 4 -[!0&!2] 5 -[0&!1&2] 8 -[0&1&2] 9 -[0&!1&2] 10 +[!0&!2] 3 +[!0&1&!2] 4 +[!0&!1&2] 5 +[!0&1&2] 6 +[0&1&2] 7 +[0&!1&!2] 8 +[0&!1&2] 9 +[0&1&!2] 10 [0&1&!2] 11 -[0&!1&!2] 12 -[0&!1&!2] 13 +[0&!1] 12 +[!0&!1&!2] 14 +[0&!1&!2] 15 State: 6 -[!0&!2] 6 -[0&!2] 7 +[!0&!2] 3 +[!0&!2] 4 +[!0&!1&2] 5 +[!0&1&2] 6 +[0&1&2] 7 +[0&!1&!2] 8 +[0&!1&2] 9 +[0&1&!2] 10 +[0&!2] 11 +[0&!1&2] 12 +[0&!1&!2] 13 State: 7 -[!2] 7 {0 1} +[1&2] 7 +[!1&!2] 8 +[!1&2] 9 +[1&!2] 10 +[!2] 11 +[!1&2] 12 +[!1&!2] 13 State: 8 -[!1] 8 {0 1} +[1&2] 7 +[!1&!2] 8 +[!1&2] 9 +[1&!2] 10 State: 9 -[!2] 7 -[!1&2] 8 -[1&2] 9 -[!1&2] 10 +[1&2] 7 +[!1&!2] 8 +[!1&2] 9 +[1&!2] 10 [1&!2] 11 -[!1&!2] 12 -[!1&!2] 13 +[!1&!2] 15 State: 10 -[1&!2] 7 -[1&2] 9 -[!1&2] 10 -[1&!2] 11 -[!1&!2] 12 -[!1&!2] 14 -State: 11 -[!1&2] 8 -[1&2] 9 -[!1&2] 10 -[1&!2] 11 -[!1&!2] 12 +[1&2] 7 +[!1&!2] 8 +[!1&2] 9 +[1&!2] 10 +[!1&2] 12 [!1&!2] 13 +State: 11 +[!2] 11 {0 1} State: 12 -[1&2] 9 -[!1&2] 10 -[1&!2] 11 -[!1&!2] 12 +[!1] 12 {0 1} State: 13 -[!1&2] 8 +[!1&2] 12 [!1&!2] 13 State: 14 -[1&!2] 7 -[!1&!2] 14 -State: 15 -[!0&1&!2] 6 -[0&1&!2] 7 -[0&!1&!2] 14 -[!0&!1&!2] 15 +[!0&1&!2] 4 +[0&1&!2] 11 +[!0&!1&!2] 14 +[0&!1&!2] 15 [0&!1&!2] 16 +State: 15 +[1&!2] 11 +[!1&!2] 15 State: 16 [!1&!2] 16 {0 1} --END-- @@ -166,7 +166,6 @@ corresponding BDD variable number, and then use for instance #include #include #include - #include #include void custom_print(std::ostream& out, spot::twa_graph_ptr& aut); @@ -273,16 +272,16 @@ State 0: label = !a acc sets = {} edge(0 -> 3) - label = !a & b & c + label = !a & !c acc sets = {} edge(0 -> 4) - label = !a & !b & c + label = !a & !c acc sets = {} edge(0 -> 5) - label = !a & !c + label = !a & !b & c acc sets = {} edge(0 -> 6) - label = !a & !c + label = !a & b & c acc sets = {} State 1: edge(1 -> 1) @@ -292,232 +291,232 @@ State 2: edge(2 -> 2) label = !b & !c acc sets = {} - edge(2 -> 2) - label = !b & c - acc sets = {1} edge(2 -> 2) label = b & !c acc sets = {0} + edge(2 -> 2) + label = !b & c + acc sets = {1} edge(2 -> 2) label = b & c acc sets = {0,1} State 3: edge(3 -> 3) - label = !a & b & c - acc sets = {} - edge(3 -> 4) - label = !a & !b & c + label = !a & !c acc sets = {} edge(3 -> 5) - label = !a & !c + label = !a & !b & c acc sets = {} edge(3 -> 6) - label = !a & !c + label = !a & b & c acc sets = {} edge(3 -> 7) - label = a & !c - acc sets = {} - edge(3 -> 8) - label = a & !b & c - acc sets = {} - edge(3 -> 9) label = a & b & c acc sets = {} - edge(3 -> 10) + edge(3 -> 8) + label = a & !b & !c + acc sets = {} + edge(3 -> 9) label = a & !b & c acc sets = {} - edge(3 -> 11) + edge(3 -> 10) label = a & b & !c acc sets = {} edge(3 -> 12) - label = a & !b & !c + label = a & !b & c acc sets = {} edge(3 -> 13) label = a & !b & !c acc sets = {} State 4: - edge(4 -> 3) - label = !a & b & c - acc sets = {} edge(4 -> 4) - label = !a & !b & c - acc sets = {} - edge(4 -> 5) label = !a & !c acc sets = {} - edge(4 -> 6) - label = !a & b & !c - acc sets = {} - edge(4 -> 7) - label = a & b & !c - acc sets = {} - edge(4 -> 8) - label = a & !b - acc sets = {} - edge(4 -> 9) - label = a & b & c - acc sets = {} - edge(4 -> 10) - label = a & !b & c - acc sets = {} edge(4 -> 11) - label = a & b & !c - acc sets = {} - edge(4 -> 12) - label = a & !b & !c - acc sets = {} - edge(4 -> 14) - label = a & !b & !c - acc sets = {} - edge(4 -> 15) - label = !a & !b & !c + label = a & !c acc sets = {} State 5: edge(5 -> 3) - label = !a & b & c - acc sets = {} - edge(5 -> 4) - label = !a & !b & c - acc sets = {} - edge(5 -> 5) label = !a & !c acc sets = {} - edge(5 -> 8) - label = a & !b & c + edge(5 -> 4) + label = !a & b & !c acc sets = {} - edge(5 -> 9) + edge(5 -> 5) + label = !a & !b & c + acc sets = {} + edge(5 -> 6) + label = !a & b & c + acc sets = {} + edge(5 -> 7) label = a & b & c acc sets = {} - edge(5 -> 10) + edge(5 -> 8) + label = a & !b & !c + acc sets = {} + edge(5 -> 9) label = a & !b & c acc sets = {} + edge(5 -> 10) + label = a & b & !c + acc sets = {} edge(5 -> 11) label = a & b & !c acc sets = {} edge(5 -> 12) - label = a & !b & !c + label = a & !b acc sets = {} - edge(5 -> 13) + edge(5 -> 14) + label = !a & !b & !c + acc sets = {} + edge(5 -> 15) label = a & !b & !c acc sets = {} State 6: - edge(6 -> 6) + edge(6 -> 3) label = !a & !c acc sets = {} + edge(6 -> 4) + label = !a & !c + acc sets = {} + edge(6 -> 5) + label = !a & !b & c + acc sets = {} + edge(6 -> 6) + label = !a & b & c + acc sets = {} edge(6 -> 7) + label = a & b & c + acc sets = {} + edge(6 -> 8) + label = a & !b & !c + acc sets = {} + edge(6 -> 9) + label = a & !b & c + acc sets = {} + edge(6 -> 10) + label = a & b & !c + acc sets = {} + edge(6 -> 11) label = a & !c acc sets = {} + edge(6 -> 12) + label = a & !b & c + acc sets = {} + edge(6 -> 13) + label = a & !b & !c + acc sets = {} State 7: edge(7 -> 7) - label = !c - acc sets = {0,1} -State 8: - edge(8 -> 8) - label = !b - acc sets = {0,1} -State 9: - edge(9 -> 7) - label = !c - acc sets = {} - edge(9 -> 8) - label = !b & c - acc sets = {} - edge(9 -> 9) label = b & c acc sets = {} - edge(9 -> 10) + edge(7 -> 8) + label = !b & !c + acc sets = {} + edge(7 -> 9) label = !b & c acc sets = {} + edge(7 -> 10) + label = b & !c + acc sets = {} + edge(7 -> 11) + label = !c + acc sets = {} + edge(7 -> 12) + label = !b & c + acc sets = {} + edge(7 -> 13) + label = !b & !c + acc sets = {} +State 8: + edge(8 -> 7) + label = b & c + acc sets = {} + edge(8 -> 8) + label = !b & !c + acc sets = {} + edge(8 -> 9) + label = !b & c + acc sets = {} + edge(8 -> 10) + label = b & !c + acc sets = {} +State 9: + edge(9 -> 7) + label = b & c + acc sets = {} + edge(9 -> 8) + label = !b & !c + acc sets = {} + edge(9 -> 9) + label = !b & c + acc sets = {} + edge(9 -> 10) + label = b & !c + acc sets = {} edge(9 -> 11) label = b & !c acc sets = {} - edge(9 -> 12) - label = !b & !c - acc sets = {} - edge(9 -> 13) + edge(9 -> 15) label = !b & !c acc sets = {} State 10: edge(10 -> 7) - label = b & !c - acc sets = {} - edge(10 -> 9) label = b & c acc sets = {} - edge(10 -> 10) + edge(10 -> 8) + label = !b & !c + acc sets = {} + edge(10 -> 9) label = !b & c acc sets = {} - edge(10 -> 11) + edge(10 -> 10) label = b & !c acc sets = {} edge(10 -> 12) - label = !b & !c + label = !b & c acc sets = {} - edge(10 -> 14) + edge(10 -> 13) label = !b & !c acc sets = {} State 11: - edge(11 -> 8) - label = !b & c - acc sets = {} - edge(11 -> 9) - label = b & c - acc sets = {} - edge(11 -> 10) - label = !b & c - acc sets = {} edge(11 -> 11) - label = b & !c - acc sets = {} - edge(11 -> 12) - label = !b & !c - acc sets = {} - edge(11 -> 13) - label = !b & !c - acc sets = {} + label = !c + acc sets = {0,1} State 12: - edge(12 -> 9) - label = b & c - acc sets = {} - edge(12 -> 10) - label = !b & c - acc sets = {} - edge(12 -> 11) - label = b & !c - acc sets = {} edge(12 -> 12) - label = !b & !c - acc sets = {} + label = !b + acc sets = {0,1} State 13: - edge(13 -> 8) + edge(13 -> 12) label = !b & c acc sets = {} edge(13 -> 13) label = !b & !c acc sets = {} State 14: - edge(14 -> 7) - label = b & !c - acc sets = {} - edge(14 -> 14) - label = !b & !c - acc sets = {} -State 15: - edge(15 -> 6) + edge(14 -> 4) label = !a & b & !c acc sets = {} - edge(15 -> 7) + edge(14 -> 11) label = a & b & !c acc sets = {} - edge(15 -> 14) - label = a & !b & !c - acc sets = {} - edge(15 -> 15) + edge(14 -> 14) label = !a & !b & !c acc sets = {} - edge(15 -> 16) + edge(14 -> 15) label = a & !b & !c acc sets = {} + edge(14 -> 16) + label = a & !b & !c + acc sets = {} +State 15: + edge(15 -> 11) + label = b & !c + acc sets = {} + edge(15 -> 15) + label = !b & !c + acc sets = {} State 16: edge(16 -> 16) label = !b & !c @@ -594,16 +593,16 @@ State 0: label = !a acc sets = {} edge(0 -> 3) - label = !a & b & c + label = !a & !c acc sets = {} edge(0 -> 4) - label = !a & !b & c + label = !a & !c acc sets = {} edge(0 -> 5) - label = !a & !c + label = !a & !b & c acc sets = {} edge(0 -> 6) - label = !a & !c + label = !a & b & c acc sets = {} State 1: edge(1 -> 1) @@ -613,232 +612,232 @@ State 2: edge(2 -> 2) label = !b & !c acc sets = {} - edge(2 -> 2) - label = !b & c - acc sets = {1} edge(2 -> 2) label = b & !c acc sets = {0} + edge(2 -> 2) + label = !b & c + acc sets = {1} edge(2 -> 2) label = b & c acc sets = {0,1} State 3: edge(3 -> 3) - label = !a & b & c - acc sets = {} - edge(3 -> 4) - label = !a & !b & c + label = !a & !c acc sets = {} edge(3 -> 5) - label = !a & !c + label = !a & !b & c acc sets = {} edge(3 -> 6) - label = !a & !c + label = !a & b & c acc sets = {} edge(3 -> 7) - label = a & !c - acc sets = {} - edge(3 -> 8) - label = a & !b & c - acc sets = {} - edge(3 -> 9) label = a & b & c acc sets = {} - edge(3 -> 10) + edge(3 -> 8) + label = a & !b & !c + acc sets = {} + edge(3 -> 9) label = a & !b & c acc sets = {} - edge(3 -> 11) + edge(3 -> 10) label = a & b & !c acc sets = {} edge(3 -> 12) - label = a & !b & !c + label = a & !b & c acc sets = {} edge(3 -> 13) label = a & !b & !c acc sets = {} State 4: - edge(4 -> 3) - label = !a & b & c - acc sets = {} edge(4 -> 4) - label = !a & !b & c - acc sets = {} - edge(4 -> 5) label = !a & !c acc sets = {} - edge(4 -> 6) - label = !a & b & !c - acc sets = {} - edge(4 -> 7) - label = a & b & !c - acc sets = {} - edge(4 -> 8) - label = a & !b - acc sets = {} - edge(4 -> 9) - label = a & b & c - acc sets = {} - edge(4 -> 10) - label = a & !b & c - acc sets = {} edge(4 -> 11) - label = a & b & !c - acc sets = {} - edge(4 -> 12) - label = a & !b & !c - acc sets = {} - edge(4 -> 14) - label = a & !b & !c - acc sets = {} - edge(4 -> 15) - label = !a & !b & !c + label = a & !c acc sets = {} State 5: edge(5 -> 3) - label = !a & b & c - acc sets = {} - edge(5 -> 4) - label = !a & !b & c - acc sets = {} - edge(5 -> 5) label = !a & !c acc sets = {} - edge(5 -> 8) - label = a & !b & c + edge(5 -> 4) + label = !a & b & !c acc sets = {} - edge(5 -> 9) + edge(5 -> 5) + label = !a & !b & c + acc sets = {} + edge(5 -> 6) + label = !a & b & c + acc sets = {} + edge(5 -> 7) label = a & b & c acc sets = {} - edge(5 -> 10) + edge(5 -> 8) + label = a & !b & !c + acc sets = {} + edge(5 -> 9) label = a & !b & c acc sets = {} + edge(5 -> 10) + label = a & b & !c + acc sets = {} edge(5 -> 11) label = a & b & !c acc sets = {} edge(5 -> 12) - label = a & !b & !c + label = a & !b acc sets = {} - edge(5 -> 13) + edge(5 -> 14) + label = !a & !b & !c + acc sets = {} + edge(5 -> 15) label = a & !b & !c acc sets = {} State 6: - edge(6 -> 6) + edge(6 -> 3) label = !a & !c acc sets = {} + edge(6 -> 4) + label = !a & !c + acc sets = {} + edge(6 -> 5) + label = !a & !b & c + acc sets = {} + edge(6 -> 6) + label = !a & b & c + acc sets = {} edge(6 -> 7) + label = a & b & c + acc sets = {} + edge(6 -> 8) + label = a & !b & !c + acc sets = {} + edge(6 -> 9) + label = a & !b & c + acc sets = {} + edge(6 -> 10) + label = a & b & !c + acc sets = {} + edge(6 -> 11) label = a & !c acc sets = {} + edge(6 -> 12) + label = a & !b & c + acc sets = {} + edge(6 -> 13) + label = a & !b & !c + acc sets = {} State 7: edge(7 -> 7) - label = !c - acc sets = {0,1} -State 8: - edge(8 -> 8) - label = !b - acc sets = {0,1} -State 9: - edge(9 -> 7) - label = !c - acc sets = {} - edge(9 -> 8) - label = !b & c - acc sets = {} - edge(9 -> 9) label = b & c acc sets = {} - edge(9 -> 10) + edge(7 -> 8) + label = !b & !c + acc sets = {} + edge(7 -> 9) label = !b & c acc sets = {} + edge(7 -> 10) + label = b & !c + acc sets = {} + edge(7 -> 11) + label = !c + acc sets = {} + edge(7 -> 12) + label = !b & c + acc sets = {} + edge(7 -> 13) + label = !b & !c + acc sets = {} +State 8: + edge(8 -> 7) + label = b & c + acc sets = {} + edge(8 -> 8) + label = !b & !c + acc sets = {} + edge(8 -> 9) + label = !b & c + acc sets = {} + edge(8 -> 10) + label = b & !c + acc sets = {} +State 9: + edge(9 -> 7) + label = b & c + acc sets = {} + edge(9 -> 8) + label = !b & !c + acc sets = {} + edge(9 -> 9) + label = !b & c + acc sets = {} + edge(9 -> 10) + label = b & !c + acc sets = {} edge(9 -> 11) label = b & !c acc sets = {} - edge(9 -> 12) - label = !b & !c - acc sets = {} - edge(9 -> 13) + edge(9 -> 15) label = !b & !c acc sets = {} State 10: edge(10 -> 7) - label = b & !c - acc sets = {} - edge(10 -> 9) label = b & c acc sets = {} - edge(10 -> 10) + edge(10 -> 8) + label = !b & !c + acc sets = {} + edge(10 -> 9) label = !b & c acc sets = {} - edge(10 -> 11) + edge(10 -> 10) label = b & !c acc sets = {} edge(10 -> 12) - label = !b & !c + label = !b & c acc sets = {} - edge(10 -> 14) + edge(10 -> 13) label = !b & !c acc sets = {} State 11: - edge(11 -> 8) - label = !b & c - acc sets = {} - edge(11 -> 9) - label = b & c - acc sets = {} - edge(11 -> 10) - label = !b & c - acc sets = {} edge(11 -> 11) - label = b & !c - acc sets = {} - edge(11 -> 12) - label = !b & !c - acc sets = {} - edge(11 -> 13) - label = !b & !c - acc sets = {} + label = !c + acc sets = {0,1} State 12: - edge(12 -> 9) - label = b & c - acc sets = {} - edge(12 -> 10) - label = !b & c - acc sets = {} - edge(12 -> 11) - label = b & !c - acc sets = {} edge(12 -> 12) - label = !b & !c - acc sets = {} + label = !b + acc sets = {0,1} State 13: - edge(13 -> 8) + edge(13 -> 12) label = !b & c acc sets = {} edge(13 -> 13) label = !b & !c acc sets = {} State 14: - edge(14 -> 7) - label = b & !c - acc sets = {} - edge(14 -> 14) - label = !b & !c - acc sets = {} -State 15: - edge(15 -> 6) + edge(14 -> 4) label = !a & b & !c acc sets = {} - edge(15 -> 7) + edge(14 -> 11) label = a & b & !c acc sets = {} - edge(15 -> 14) - label = a & !b & !c - acc sets = {} - edge(15 -> 15) + edge(14 -> 14) label = !a & !b & !c acc sets = {} - edge(15 -> 16) + edge(14 -> 15) label = a & !b & !c acc sets = {} + edge(14 -> 16) + label = a & !b & !c + acc sets = {} +State 15: + edge(15 -> 11) + label = b & !c + acc sets = {} + edge(15 -> 15) + label = !b & !c + acc sets = {} State 16: edge(16 -> 16) label = !b & !c @@ -849,6 +848,11 @@ State 16: rm -f tut21.hoa #+END_SRC +* Going further + +As another example of printing an autoamton, see our page about +[[file:tut25.org][printing a Büchi automaton in "BA format"]]. + # LocalWords: utf html args mathit src dst cond accsets tgba Fb Fc # LocalWords: acc Buchi BDDs bdd ap ithvar aut const num init bdict # LocalWords: varnum sep Templated diff --git a/doc/org/tut25.org b/doc/org/tut25.org new file mode 100644 index 000000000..c1ee1ecba --- /dev/null +++ b/doc/org/tut25.org @@ -0,0 +1,285 @@ +# -*- coding: utf-8 -*- +#+TITLE: Printing an automaton in "BA format" +#+DESCRIPTION: Code example for converting HOA into BA format +#+INCLUDE: setup.org +#+HTML_LINK_UP: tut.html +#+PROPERTY: header-args:sh :results verbatim :exports both +#+PROPERTY: header-args:python :results output :exports both +#+PROPERTY: header-args:C+++ :results verbatim + +The [[https://languageinclusion.org/doku.php?id=tools#the_ba_format][BA format]] is a textual representation of a Büchi automaton with +letter-based alphabet, and supported by tools like [[https://languageinclusion.org/doku.php?id=tools][RABIT]] or [[http://goal.im.ntu.edu.tw/wiki/doku.php][Goal]]. It +looks as follows: + +#+BEGIN_SRC dot :file tut25-aut.svg :exports results +digraph "" { + rankdir=LR + label=<[Büchi]> + labelloc="t" + node [shape="circle"] + node [style="filled", fillcolor="#ffffa0"] + fontname="Lato" + node [fontname="Lato"] + edge [fontname="Lato"] + node[fontsize=12] fontsize=12 stylesheet="spot.css" edge[arrowhead=vee, arrowsize=.7, fontsize=12] + I [label="", style=invis, width=0] + I -> 1 + 1 [label=, peripheries=2] + 2 [label=, peripheries=2] + 3 [label=] + 1 -> 2 [label=<ℓ₁>] + 2 -> 1 [label=<ℓ₃>] + 2 -> 3 [label=<ℓ₂>] + 3 -> 1 [label=<ℓ₃>] +} +#+END_SRC + +#+RESULTS: +[[file:tut25-aut.svg]] + +#+begin_example +s₁ +ℓ₁,s₁->s₂ +ℓ₃,s2->s1 +ℓ₂,s2->s3 +ℓ₃,s3->s1 +s₁ +s₂ +#+end_example + +The first line, ~s₁~ represents the initial state, the next block of +lines of the form ~letters,src->dst~ represent the transitions of the +automaton. End the last block of lines (containing ~s₁~ and ~s₂~ in +the above example), lists the accepting states of the automaton. + +In this format, the letters and the state can be arbitrary strings +that do not include the characters ~,~ or ~-~, or ~>~. The initial +state can be omitted (the source of the first transition is then +assumed to be initial), and the list of accepting states may be empty. + +Spot has no support for letter-based alphabet (instead it uses boolean +formulas over a set of atomtic propositions), so this format does not +really make any sense. + +As an example of [[file:tut21.org][how to custom print an automaton]], let us write a +small tool that will convert any Büchi automaton that Spot can read +(e.g., a neverclaim from Spin, or an HOA file) into this "BA format". + +Consider the following Büchi automaton obtained from the LTL formula +=a W G(b->c)=. + +#+NAME: tut25ex1 +#+BEGIN_SRC sh :exports code +ltl2tgba -B "a W G(b->c)" -d +#+END_SRC +#+BEGIN_SRC dot :file tut25ex1.svg :var txt=tut25ex1 :exports results +$txt +#+END_SRC + +#+RESULTS: +[[file:tut25ex1.svg]] + +To create letters out of those formula labels, one trick is to split the transitions over +the $2^{\{a,b,c\}}$ possible valuations. + +#+NAME: tut25ex2 +#+BEGIN_SRC sh :exports code +ltl2tgba -B "a W G(b->c)" | autfilt --split-edges -d +#+END_SRC +#+BEGIN_SRC dot :file tut25ex2.svg :var txt=tut25ex2 :exports results +$txt +#+END_SRC + +#+RESULTS: +[[file:tut25ex2.svg]] + +Then each label can now be considered as a letter. + +* Convertion in Python + + +#+NAME: toba.py +#+begin_src python :exports code :eval no + #!/usr/bin/env python3 + import spot, sys + + # Read the input automaton from standard input, or from a supplied filename. + argc = len(sys.argv) + if argc < 2: + filename = "-" + elif argc == 2: + filename = sys.argv[1] + else: + print("pass a single filename, or pipe to stdin", file=sys.stderr) + exit(1) + + aut = spot.automaton(filename) + + # Make sure the acceptance condition is Büchi. Alternatively, + # allow "t" acceptance (where every state is accepting), since we + # can interpret this as a Büchi automaton in which all states are + # marked as accepting. + acc = aut.acc() + if not (acc.is_buchi() or acc.is_t()): + print(f"unsupported acceptance: {acc.get_acceptance()}", file=sys.stderr) + exit(1) + + # Transition-based acceptance is not supported by this format; + # convert to state-based if it isn't already. + aut = spot.sbacc(aut) + + # We want one minterm per edge, as those will become letters + aut = spot.split_edges(aut) + + # Now simply output the automaton in the BA format + print(aut.get_init_state_number()) + for e in aut.edges(): + print(f"{e.cond.id()},{e.src}->{e.dst}") + for s in range(aut.num_states()): + if acc.accepting(aut.state_acc_sets(s)): + print(s) +#+end_src + +#+RESULTS: + +Let's assume the above script has been saved as [[file:toba.py][=toba.py=]]. + +#+begin_src sh :noweb yes :results silent :exports results +cat >toba.py <<'EOF' +<> +EOF +chmod 0755 toba.py +#+end_src + +We can now convert our previous example in BA format. + +#+begin_src sh +ltl2tgba -B "a W G(b->c)" | ./toba.py +#+end_src + +#+RESULTS: +#+begin_example +1 +19,0->0 +21,0->0 +22,0->0 +23,0->0 +24,0->0 +10,0->0 +19,1->0 +21,1->0 +22,1->0 +23,1->1 +24,1->1 +25,1->1 +10,1->1 +0 +1 +#+end_example + +The BDD ~e.cond~ that encodes the Boolean formula labels each edge ~e~ +have been printed using ~e.cond.id()~: this is the integer identifier +that uniquely denote each formula. This identifier is good enough to +make letters unique and keep the file short. However, if you prefer to +print the formula instead, replace =e.cond.id()= by +=spot.bdd_format_formula(aut.get_dict(), e.cond)=. + +* Conversion in C++ + +Here is a C++ function that prints =aut= on =out= in BA format, using +the same logic as in the previous section. + +#+NAME: printba +#+BEGIN_SRC C++ + #include + #include + #include + #include + + void print_ba_format(std::ostream& out, spot::twa_graph_ptr aut) + { + // The input should have Büchi acceptance. Alternatively, + // allow "t" acceptance since we can interpret this as a Büchi automaton + // where all states are accepting. + const spot::acc_cond& acc = aut->acc(); + if (!(acc.is_buchi() || acc.is_t())) + throw std::runtime_error("unsupported acceptance condition"); + + // The BA format only support state-based acceptance, so get rid + // of transition-based acceptance if we have some. + aut = spot::sbacc(aut); + + // We want one minterm per edge, as those will become letters + aut = spot::split_edges(aut); + + out << aut->get_init_state_number() << '\n'; + for (auto& e: aut->edges()) + out << e.cond.id() << ',' << e.src << "->" << e.dst << '\n'; + + unsigned ns = aut->num_states(); + for (unsigned s = 0; s < ns; ++s) + if (acc.accepting(aut->state_acc_sets(s))) + out << s << '\n'; + } +#+END_SRC + +#+begin_src sh :results silent :exports results +ltl2tgba -B "a W G(b->c)" >tut25.hoa +#+end_src + +Now what remains to be done is to read some input automaton, so we +can print it: + +#+NAME: maincpp +#+BEGIN_SRC C++ :noweb strip-export :cmdline "tut25.hoa" :exports results :exports both + #include + <> + int main(int argc, const char** argv) + { + if (argc > 2) + { + std::cerr << "pass a single filename, or pipe to stdin\n"; + return 1; + } + const char* filename = "-"; + if (argc == 2) + filename = argv[1]; + spot::parsed_aut_ptr pa = parse_aut(filename, spot::make_bdd_dict()); + if (pa->format_errors(std::cerr)) + return 1; + if (pa->aborted) + { + std::cerr << "--ABORT-- read\n"; + return 1; + } + print_ba_format(std::cout, pa->aut); + return 0; + } +#+END_SRC + +Unsurprisingly running the above code on our example automaton +produces the same output. + +#+RESULTS: maincpp +#+begin_example +1 +19,0->0 +21,0->0 +22,0->0 +23,0->0 +24,0->0 +10,0->0 +19,1->0 +21,1->0 +22,1->0 +23,1->1 +24,1->1 +25,1->1 +10,1->1 +0 +1 +#+end_example + +#+begin_src sh :results silent :exports results +rm -f tut25.hoa +#+end_src From 03a4f01184d365acd1b5ffa454d1b49f08ce3a9a Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 21 Mar 2024 22:18:15 +0100 Subject: [PATCH 418/606] acc: work around a Swig 4.2.1 bug Pierre Ganty wrote that he could not compile Spot with Swig 4.2.1 anymore, and when I upgraded from 4.2.0 to 4.2.1 I could not either. It seems that declaring operator<< as friends in subclasses is confusing Swig 4.2.1. See https://github.com/swig/swig/issues/2845 * spot/twa/acc.cc, spot/twa/acc.hh: Declare operator<< for acc_cond::mark_t and acc_cond::acc_code outside the class, so that we do not need friend declarations. --- spot/twa/acc.cc | 29 +++++++++++++++++++++++++++-- spot/twa/acc.hh | 41 ++++++++++++++++------------------------- 2 files changed, 43 insertions(+), 27 deletions(-) diff --git a/spot/twa/acc.cc b/spot/twa/acc.cc index fb7373ead..74b345dc8 100644 --- a/spot/twa/acc.cc +++ b/spot/twa/acc.cc @@ -48,7 +48,7 @@ namespace spot " ."); } - std::ostream& operator<<(std::ostream& os, spot::acc_cond::mark_t m) + std::ostream& operator<<(std::ostream& os, acc_cond::mark_t m) { auto a = m; os << '{'; @@ -68,6 +68,31 @@ namespace spot return os; } + std::string acc_cond::mark_t::as_string() const + { + std::ostringstream os; + os << *this; + return os.str(); + } + + // Deprecated since Spot 2.8 + std::ostream& acc_cond::format(std::ostream& os, + acc_cond::mark_t m) const + { + if (!m) + return os; + return os << m; + } + + // Deprecated since Spot 2.8 + std::string acc_cond::format(acc_cond::mark_t m) const + { + std::ostringstream os; + if (m) + os << m; + return os.str(); + } + std::ostream& operator<<(std::ostream& os, const acc_cond& acc) { return os << '(' << acc.num_sets() << ", " << acc.get_acceptance() << ')'; @@ -1922,7 +1947,7 @@ namespace spot std::ostream& operator<<(std::ostream& os, - const spot::acc_cond::acc_code& code) + const acc_cond::acc_code& code) { return code.to_text(os); } diff --git a/spot/twa/acc.hh b/spot/twa/acc.hh index 069b9de07..02fc8f5e0 100644 --- a/spot/twa/acc.hh +++ b/spot/twa/acc.hh @@ -430,15 +430,7 @@ namespace spot /// Returns some iterable object that contains the used sets. spot::internal::mark_container sets() const; - SPOT_API - friend std::ostream& operator<<(std::ostream& os, mark_t m); - - std::string as_string() const - { - std::ostringstream os; - os << *this; - return os.str(); - } + std::string as_string() const; }; /// \brief Operators for acceptance formulas. @@ -1491,9 +1483,6 @@ namespace spot { } - /// \brief prints the acceptance formula as text - SPOT_API - friend std::ostream& operator<<(std::ostream& os, const acc_code& code); }; /// \brief Build an acceptance condition @@ -2036,22 +2025,11 @@ namespace spot // Deprecated since Spot 2.8 SPOT_DEPRECATED("Use operator<< instead.") - std::ostream& format(std::ostream& os, mark_t m) const - { - if (!m) - return os; - return os << m; - } + std::ostream& format(std::ostream& os, mark_t m) const; // Deprecated since Spot 2.8 SPOT_DEPRECATED("Use operator<< or mark_t::as_string() instead.") - std::string format(mark_t m) const - { - std::ostringstream os; - if (m) - os << m; - return os.str(); - } + std::string format(mark_t m) const; /// \brief The number of sets used in the acceptance condition. unsigned num_sets() const @@ -2380,6 +2358,19 @@ namespace spot SPOT_API std::ostream& operator<<(std::ostream& os, const acc_cond& acc); + // The next two operators used to be declared as friend inside the + // acc_cond::mark_t and acc_cond::acc_code, but Swig 4.2.1 + // introduced a bug with friend operators. See + // https://github.com/swig/swig/issues/2845 + + SPOT_API + std::ostream& operator<<(std::ostream& os, acc_cond::mark_t m); + + /// \brief prints the acceptance formula as text + SPOT_API + std::ostream& operator<<(std::ostream& os, + const acc_cond::acc_code& code); + /// @} namespace internal From 7e228e86ee0b7b6e41d9c738e7bfcc1a571ac795 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 22 Mar 2024 14:41:42 +0100 Subject: [PATCH 419/606] hoa: add option 'b' to build an alias-based basis for all labels Related to issue #563. * spot/twaalgos/hoa.hh (create_alias_basis): New function. * spot/twaalgos/hoa.cc (create_alias_basis): New function. (print_hoa): Add support for option 'b' and create_alias_basis in this case. * bin/common_aoutput.cc, NEWS: Document -Hb. * tests/core/readsave.test, tests/python/aliases.py: Add test cases. --- NEWS | 7 ++++++ bin/common_aoutput.cc | 1 + spot/twaalgos/hoa.cc | 46 ++++++++++++++++++++++++++++++++++++++-- spot/twaalgos/hoa.hh | 20 +++++++++++++++-- tests/core/readsave.test | 22 ++++++++++++------- tests/python/aliases.py | 45 +++++++++++++++++++++++++++++++++++++++ 6 files changed, 129 insertions(+), 12 deletions(-) diff --git a/NEWS b/NEWS index 111c3acb1..1af53cda2 100644 --- a/NEWS +++ b/NEWS @@ -12,6 +12,13 @@ New in spot 2.11.6.dev (not yet released) autfilt input.hoa -o output-%l.hoa + - For tools that produce automata, using -Hb or --hoa=b will produce + an HOA file in which aliases are used to form a basis for the + whole set of labels. Those aliases are only used when more than + one atomic proposition is used (otherwise, the atomic proposition + and its negation is already a basis). This can help reducing the + size of large HOA files. + - ltlfilt has a new option --relabel-overlapping-bool=abc|pnn that will replace boolean subformulas by fresh atomic propositions even if those subformulas share atomic propositions. diff --git a/bin/common_aoutput.cc b/bin/common_aoutput.cc index ad221812e..f5ba8d625 100644 --- a/bin/common_aoutput.cc +++ b/bin/common_aoutput.cc @@ -132,6 +132,7 @@ static const argp_option options[] = { "hoaf", 'H', "1.1|i|k|l|m|s|t|v", OPTION_ARG_OPTIONAL, "Output the automaton in HOA format (default). Add letters to select " "(1.1) version 1.1 of the format, " + "(b) create an alias basis if >=2 AP are used, " "(i) use implicit labels for complete deterministic automata, " "(s) prefer state-based acceptance when possible [default], " "(t) force transition-based acceptance, " diff --git a/spot/twaalgos/hoa.cc b/spot/twaalgos/hoa.cc index 644d8f166..9839955f7 100644 --- a/spot/twaalgos/hoa.cc +++ b/spot/twaalgos/hoa.cc @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -30,6 +31,7 @@ #include #include #include +#include using namespace std::string_literals; @@ -70,7 +72,8 @@ namespace spot if (bdd_is_cube(a)) alias_cubes_.emplace_back(a, i); bdd neg = !a; - aliases_map_[neg.id()] = i; + // do not overwrite an existing alias with a negation + aliases_map_.emplace(neg.id(), i); if (bdd_is_cube(neg)) alias_cubes_.emplace_back(neg, i); } @@ -464,6 +467,7 @@ namespace spot bool verbose = false; bool state_labels = false; bool v1_1 = false; + bool alias_basis = false; if (opt) while (*opt) @@ -486,6 +490,9 @@ namespace spot v1_1 = false; } break; + case 'b': + alias_basis = true; + break; case 'i': implicit_labels = true; break; @@ -520,6 +527,27 @@ namespace spot throw std::runtime_error("print_hoa(): automaton is declared not weak, " "but the acceptance makes this impossible"); + // If we were asked to create an alias basis, make sure we save + // existing aliases, so we can restore it before we exit this + // function. + std::vector> old_aliases; + if (aut->ap().size() <= 1) + alias_basis = false; + if (alias_basis) + { + if (auto* aliases = get_aliases(aut)) + old_aliases = *aliases; + create_alias_basis(std::const_pointer_cast(aut)); + } + // restore the old aliases using a unique_ptr-based scope guard, + // because there are too many ways to exit this function. + auto restore_aliases = [&old_aliases, alias_basis, aut](void*) { + if (alias_basis) + set_aliases(std::const_pointer_cast(aut), old_aliases); + }; + std::unique_ptr + restore_aliases_guard((void*)1, restore_aliases); + metadata md(aut, implicit_labels, state_labels); if (acceptance == Hoa_Acceptance_States && !md.has_state_acc) @@ -1013,7 +1041,8 @@ namespace spot } void - set_aliases(twa_ptr g, std::vector> aliases) + set_aliases(twa_ptr g, + const std::vector>& aliases) { if (aliases.empty()) { @@ -1027,4 +1056,17 @@ namespace spot } } + void + create_alias_basis(const twa_graph_ptr& aut) + { + edge_separator es; + es.add_to_basis(aut); + std::vector> aliases; + unsigned n = 0; + for (bdd b: es.basis()) + aliases.emplace_back(std::to_string(n++), b); + std::reverse(aliases.begin(), aliases.end()); + set_aliases(aut, aliases); + } + } diff --git a/spot/twaalgos/hoa.hh b/spot/twaalgos/hoa.hh index 70e2c98c6..9a53f41c9 100644 --- a/spot/twaalgos/hoa.hh +++ b/spot/twaalgos/hoa.hh @@ -35,7 +35,8 @@ namespace spot /// \param os The output stream to print on. /// \param g The automaton to output. /// \param opt a set of characters each corresponding to a possible - /// option: (i) implicit labels for complete and + /// option: (b) create an alias basis if more >=2 AP + /// are used, (i) implicit labels for complete and /// deterministic automata, (k) state labels when possible, /// (s) state-based acceptance when possible, (t) /// transition-based acceptance, (m) mixed acceptance, (l) @@ -62,7 +63,8 @@ namespace spot /// /// Pass an empty vector to remove existing aliases. SPOT_API void - set_aliases(twa_ptr g, std::vector> aliases); + set_aliases(twa_ptr g, + const std::vector>& aliases); /// \ingroup twa_io /// \brief Help printing BDDs as text, using aliases. @@ -164,4 +166,18 @@ namespace spot } }; + /// \ingroup twa_io + /// \brief Create an alias basis + /// + /// This use spot::edge_separator to build a set of alias that can + /// be used as a basis for all labels of the automaton. + /// + /// Such a basis can be used to shorten the size of an output file + /// when printing in HOA format (actually, calling print_hoa() with + /// option 'b' will call this function). Such a basis may also be + /// useful to help visualize an automaton (using spot::print_dot's + /// `@` option) when its labels are too large. + SPOT_API void + create_alias_basis(const twa_graph_ptr& aut); + } diff --git a/tests/core/readsave.test b/tests/core/readsave.test index a8c301f70..b5c2f9b08 100755 --- a/tests/core/readsave.test +++ b/tests/core/readsave.test @@ -685,7 +685,7 @@ EOF diff output2 expect2 -SPOT_DEFAULT_FORMAT=hoa=k autfilt expect2 >output2b +SPOT_DEFAULT_FORMAT=hoa=kb autfilt expect2 >output2b cat >expect2b <output3 -autfilt -H --remove-dead input >>output3 +autfilt -Hb --remove-dead input >>output3 cat >expect3 < Date: Fri, 22 Mar 2024 16:08:32 +0100 Subject: [PATCH 420/606] minimize: t acceptance is compatible with wdba-minimization * spot/twaalgos/minimize.cc (minimize_obligation_garanteed_to_work): Skip some tests when the acceptance is "t". * tests/core/det.test: Adjust. --- spot/twaalgos/minimize.cc | 24 +++++++++++++++++++----- tests/core/det.test | 6 +++--- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/spot/twaalgos/minimize.cc b/spot/twaalgos/minimize.cc index 40e176d2e..399cc2541 100644 --- a/spot/twaalgos/minimize.cc +++ b/spot/twaalgos/minimize.cc @@ -611,11 +611,25 @@ namespace spot formula f) { // WDBA-minimization necessarily work for obligations - return ((f && f.is_syntactic_obligation()) - // Weak deterministic automata are obligations - || (aut_f->prop_weak().is_true() && is_deterministic(aut_f)) - // Guarantee automata are obligations as well. - || is_terminal_automaton(aut_f)); + if (f && f.is_syntactic_obligation()) + return true; + // we can minimize automata with t acceptance + if (aut_f->acc().is_t()) + return true; + if (aut_f->prop_weak().is_false()) + return false; + // Weak deterministic automata are obligations + if (aut_f->prop_weak().is_true() && is_deterministic(aut_f)) + return true; + scc_info si(aut_f); + check_strength(std::const_pointer_cast(aut_f), &si); + // Check again, now that the strength is known + if (aut_f->prop_weak().is_true() && is_deterministic(aut_f)) + return true; + // Guarantee automata are obligations as well. + if (is_terminal_automaton(aut_f, &si)) + return true; + return false; } twa_graph_ptr diff --git a/tests/core/det.test b/tests/core/det.test index 76125ae7e..37b834f42 100755 --- a/tests/core/det.test +++ b/tests/core/det.test @@ -149,7 +149,7 @@ AP: 1 "a" acc-name: Buchi Acceptance: 1 Inf(0) properties: trans-labels explicit-labels state-acc !complete -properties: !deterministic exist-branch +properties: !deterministic exist-branch very-weak spot.highlight.states: 0 1 spot.highlight.edges: 1 2 2 2 --BODY-- @@ -174,7 +174,7 @@ AP: 1 "a" acc-name: Buchi Acceptance: 1 Inf(0) properties: trans-labels explicit-labels state-acc !complete -properties: !deterministic exist-branch +properties: !deterministic exist-branch very-weak spot.highlight.states: 1 1 --BODY-- State: 0 {0} @@ -198,7 +198,7 @@ AP: 1 "a" acc-name: Buchi Acceptance: 1 Inf(0) properties: trans-labels explicit-labels state-acc !complete -properties: !deterministic exist-branch +properties: !deterministic exist-branch very-weak spot.highlight.states: 1 5 spot.highlight.edges: 2 5 3 5 --BODY-- From 7ee2d9995f8adc618416aa5f7a9d13201d6bbfb9 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sun, 24 Mar 2024 20:59:52 +0100 Subject: [PATCH 421/606] genaut: add two families of cyclic automata These are meant to test the optimization implemented in issue #568. * spot/gen/automata.hh, spot/gen/automata.cc, bin/genaut.cc: Add support for --cycle-log-nba and --cycle-onehot-nba. * tests/core/genaut.test: Add some tests. * tests/python/gen.ipynb: Illustrate them. * NEWS: Mention them. --- NEWS | 5 + bin/genaut.cc | 6 + spot/gen/automata.cc | 81 +- spot/gen/automata.hh | 21 + tests/core/genaut.test | 7 + tests/python/gen.ipynb | 2165 +++++++++++++++++++++++++++++++++++++++- 6 files changed, 2259 insertions(+), 26 deletions(-) diff --git a/NEWS b/NEWS index 1af53cda2..f2d65c401 100644 --- a/NEWS +++ b/NEWS @@ -55,6 +55,11 @@ New in spot 2.11.6.dev (not yet released) patching the game a posteriori is cumbersome if the equivalence concerns different players). + - genaut learned two new familes of automata, --cycle-log-nba and + --cycle-onehot-nba. They both create a cycle of n^2 states that + can be reduced to a cycle of n states using reduction based on + direct simulation. + Library: - The following new trivial simplifications have been implemented for SEREs: diff --git a/bin/genaut.cc b/bin/genaut.cc index 7c3bbf70b..4f6745f3f 100644 --- a/bin/genaut.cc +++ b/bin/genaut.cc @@ -67,6 +67,12 @@ static const argp_option options[] = { "cyclist-proof-dba", gen::AUT_CYCLIST_PROOF_DBA, "RANGE", 0, "A DBA with N+2 states that should be included " "in cyclist-trace-nba=B.", 0}, + { "cycle-log-nba", gen::AUT_CYCLE_LOG_NBA, "RANGE", 0, + "A cyclic NBA with N*N states and log(N) atomic propositions, that " + "should be simplifiable to a cyclic NBA with N states.", 0 }, + { "cycle-onehot-nba", gen::AUT_CYCLE_ONEHOT_NBA, "RANGE", 0, + "A cyclic NBA with N*N states and N atomic propositions, that " + "should be simplifiable to a cyclic NBA with N states.", 0 }, RANGE_DOC, /**************************************************/ { nullptr, 0, nullptr, 0, "Miscellaneous options:", -1 }, diff --git a/spot/gen/automata.cc b/spot/gen/automata.cc index a7f858a64..5d1edfdbd 100644 --- a/spot/gen/automata.cc +++ b/spot/gen/automata.cc @@ -172,9 +172,10 @@ namespace spot } } + // Return the smallest integer k such that 2^k ≥ n > 1. static unsigned ulog2(unsigned n) { - assert(n>0); + assert(n>1); // clz() is undefined for n==0 --n; return CHAR_BIT*sizeof(unsigned) - clz(n); } @@ -229,7 +230,7 @@ namespace spot m = {}; aut->prop_state_acc(true); - // How many AP to we need to represent n letters + // How many AP to we need to represent n+1 letters unsigned nap = ulog2(n + 1); std::vector apvars(nap); for (unsigned a = 0; a < nap; ++a) @@ -252,6 +253,76 @@ namespace spot return aut; } + static twa_graph_ptr + cycle_nba(unsigned n, bool onehot, bdd_dict_ptr dict) + { + if (n == 0) + throw std::runtime_error + (onehot + ? "cycle-onehot-nba expects a positive argument" + : "cycle-log-nba expects a positive argument"); + + auto aut = make_twa_graph(dict); + acc_cond::mark_t isacc = aut->set_buchi(); + aut->new_states(n * n); + aut->set_init_state(0); + aut->prop_state_acc(true); + aut->prop_weak(n == 1); + aut->prop_universal(n == 1); + aut->prop_complete(false); + + std::vector letters; + letters.reserve(n); + + if (!onehot) + { + // How many AP to we need to represent n letters + unsigned nap = n == 1 ? 1 : ulog2(n); + std::vector apvars(nap); + for (unsigned a = 0; a < nap; ++a) + apvars[a] = aut->register_ap("p" + std::to_string(a)); + + for (unsigned letter = 0; letter < n; ++letter) + { + bdd cond = bdd_ibuildcube(letter, nap, apvars.data()); + letters.push_back(cond); + } + } + else + { + std::vector apvars(n); + bdd allneg = bddtrue; + for (unsigned a = 0; a < n; ++a) + { + int v = aut->register_ap("p" + std::to_string(a)); + apvars[a] = bdd_ithvar(v); + allneg &= bdd_nithvar(v); + } + for (unsigned a = 0; a < n; ++a) + letters.push_back(bdd_exist(allneg, apvars[a]) & apvars[a]); + } + + unsigned n2 = n * n; + for (unsigned s = 0; s < n; ++s) + { + bdd label = letters[s]; + for (unsigned copy = 0; copy < n; ++copy) + { + unsigned q = s + copy * n; + if (s != 0) + { + aut->new_edge(q, q, bddtrue); + aut->new_edge(q, (q + 1) % n2, label); + } + else + { + aut->new_edge(q, (q + 1) % n2, label, isacc); + } + } + } + return aut; + } + twa_graph_ptr aut_pattern(aut_pattern_id pattern, int n, bdd_dict_ptr dict) { @@ -278,6 +349,10 @@ namespace spot return cyclist_trace_or_proof(n, true, dict); case AUT_CYCLIST_PROOF_DBA: return cyclist_trace_or_proof(n, false, dict); + case AUT_CYCLE_LOG_NBA: + return cycle_nba(n, false, dict); + case AUT_CYCLE_ONEHOT_NBA: + return cycle_nba(n, true, dict); case AUT_END: break; } @@ -294,6 +369,8 @@ namespace spot "m-nba", "cyclist-trace-nba", "cyclist-proof-dba", + "cycle-log-nba", + "cycle-onehot-nba", }; // Make sure we do not forget to update the above table every // time a new pattern is added. diff --git a/spot/gen/automata.hh b/spot/gen/automata.hh index fdaa0a6d5..7b60b5269 100644 --- a/spot/gen/automata.hh +++ b/spot/gen/automata.hh @@ -96,6 +96,27 @@ namespace spot /// for a given n contain the automaton generated with /// AUT_CYCLIST_PROOF_DBA for the same n. AUT_CYCLIST_PROOF_DBA, + /// \brief cycles of n letters repeated n times + /// + /// This is a Büchi automaton with n^2 states, in which each + /// state i has a true self-loop and a successor labeled by the + /// (i%n)th letter. Only the states that are multiple of n have + /// no self-loop and are accepting. + /// + /// This version uses log(n) atomic propositions to + /// encore the n letters as minterms. + AUT_CYCLE_LOG_NBA, + /// \brief cycles of n letters repeated n times + /// + /// This is a Büchi automaton with n^2 states, in which each + /// state i has a true self-loop and a successor labeled by the + /// (i%n)th letter. Only the states that are multiple of n have + /// no self-loop and are accepting. + /// + /// This version uses one-hot encoding of letters, i.e, n atomic + /// propositions are used, but only one is positive (except on + /// true self-loops). + AUT_CYCLE_ONEHOT_NBA, AUT_END }; diff --git a/tests/core/genaut.test b/tests/core/genaut.test index c69b87f2a..d74700b68 100644 --- a/tests/core/genaut.test +++ b/tests/core/genaut.test @@ -29,6 +29,7 @@ res=`genaut $opts --stats="--%F=%L"` test "$opts" = "$res" genaut --ks-nca=..3 --l-nba=..3 --l-dsa=..3 --m-nba=..3 \ + --cycle-log-nba=..3 --cycle-onehot-nba=..3 \ --stats=%s,%e,%t,%c,%g >out cat >expected <expected <\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "ks-nca=3\n", + "\n", + "ks-nca=3\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "(!a & b) | (a & !b)\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "(!a & !b) | (a & b)\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!a | b\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "!a | b\n", + "\n", + "\n", + "\n", + "4->5\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "!a | b\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "6->1\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "!a | b\n", + "\n", + "\n", + "\n" + ], "text/html": [ "\n", "\n", "\n", - "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "ks-nca=3\n", + "\n", + "ks-nca=3\n", "[co-Büchi]\n", "\n", "\n", @@ -730,7 +941,7 @@ "\n" ], "text/plain": [ - "" + " *' at 0x7fb3eb3521f0> >" ] }, "metadata": {}, @@ -738,17 +949,260 @@ }, { "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "l-dsa=3\n", + "\n", + "l-dsa=3\n", + "(Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")) & (Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")) & (Fin(\n", + "\n", + ") | Inf(\n", + "\n", + "))\n", + "[Streett 3]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "1->5\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "5->9\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "6->10\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "9->9\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "\n", + "9->8\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "7->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "10->10\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "\n", + "10->11\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "8->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "11->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n" + ], "text/html": [ "\n", "\n", "\n", - "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "l-dsa=3\n", + "\n", + "l-dsa=3\n", "(Fin(\n", "\n", ") | Inf(\n", @@ -978,7 +1432,7 @@ "\n" ], "text/plain": [ - "" + " *' at 0x7fb3eb3519e0> >" ] }, "metadata": {}, @@ -986,17 +1440,223 @@ }, { "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "l-nba=3\n", + "\n", + "l-nba=3\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a | b\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "4->5\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "7->0\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "a | b\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "8->7\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "a | b\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "3->9\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "9->8\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "9->9\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "6->6\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n" + ], "text/html": [ "\n", "\n", "\n", - "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "l-nba=3\n", + "\n", + "l-nba=3\n", "[Büchi]\n", "\n", "\n", @@ -1189,7 +1849,1458 @@ "\n" ], "text/plain": [ - "" + " *' at 0x7fb3eb351440> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "m-nba=3\n", + "\n", + "m-nba=3\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "m-nba=3\n", + "\n", + "m-nba=3\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fb3eb3513e0> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "cyclist-proof-dba=3\n", + "\n", + "cyclist-proof-dba=3\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "cyclist-proof-dba=3\n", + "\n", + "cyclist-proof-dba=3\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fb3eb3503f0> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "cyclist-trace-nba=3\n", + "\n", + "cyclist-trace-nba=3\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "cyclist-trace-nba=3\n", + "\n", + "cyclist-trace-nba=3\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "p0 & p1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fb3eb350510> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "cycle-log-nba=3\n", + "\n", + "cycle-log-nba=3\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "4->5\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "8->0\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "8->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "cycle-log-nba=3\n", + "\n", + "cycle-log-nba=3\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "4->5\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "8->0\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "8->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fb3eb351080> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "cycle-onehot-nba=3\n", + "\n", + "cycle-onehot-nba=3\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "p0 & !p1 & !p2\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p0 & p1 & !p2\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!p0 & !p1 & p2\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "p0 & !p1 & !p2\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "4->5\n", + "\n", + "\n", + "!p0 & p1 & !p2\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "!p0 & !p1 & p2\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "p0 & !p1 & !p2\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "!p0 & p1 & !p2\n", + "\n", + "\n", + "\n", + "8->0\n", + "\n", + "\n", + "!p0 & !p1 & p2\n", + "\n", + "\n", + "\n", + "8->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "cycle-onehot-nba=3\n", + "\n", + "cycle-onehot-nba=3\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "p0 & !p1 & !p2\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p0 & p1 & !p2\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "!p0 & !p1 & p2\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "p0 & !p1 & !p2\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "4->5\n", + "\n", + "\n", + "!p0 & p1 & !p2\n", + "\n", + "\n", + "\n", + "5->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "!p0 & !p1 & p2\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "p0 & !p1 & !p2\n", + "\n", + "\n", + "\n", + "7->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "!p0 & p1 & !p2\n", + "\n", + "\n", + "\n", + "8->0\n", + "\n", + "\n", + "!p0 & !p1 & p2\n", + "\n", + "\n", + "\n", + "8->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fb3eb351680> >" ] }, "metadata": {}, @@ -1197,9 +3308,15 @@ } ], "source": [ - "display(sg.aut_pattern(sg.AUT_KS_NCA, 3).show('.a'),\n", - " sg.aut_pattern(sg.AUT_L_DSA, 3).show('.a'),\n", - " sg.aut_pattern(sg.AUT_L_NBA, 3).show('.a'))" + "examples = []\n", + "for p in (sg.AUT_KS_NCA, sg.AUT_L_DSA, sg.AUT_L_NBA, sg.AUT_M_NBA,\n", + " sg.AUT_CYCLIST_PROOF_DBA, sg.AUT_CYCLIST_TRACE_NBA,\n", + " sg.AUT_CYCLE_LOG_NBA, sg.AUT_CYCLE_ONEHOT_NBA):\n", + " aut = sg.aut_pattern(p, 3)\n", + " aut.set_name(sg.aut_pattern_name(p) + \"=3\")\n", + " examples.append(aut)\n", + "\n", + "display(*examples)" ] }, { @@ -1253,7 +3370,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.8" } }, "nbformat": 4, From bda40a5f198a90e85c1604041368e53f31635f86 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 22 Mar 2024 23:01:50 +0100 Subject: [PATCH 422/606] simulation: heuristically use a separated-label approach to rebuild Closes issue #568. * spot/twaalgos/simulation.cc (direct_simulation::build_result): Implement an alternate loop based on edge_separator::basis to iterate over a signature to build results. * tests/core/568.test: New file. * tests/Makefile.am: Add it. * NEWS: Mention the optimization. --- NEWS | 12 +-- spot/twaalgos/simulation.cc | 164 +++++++++++++++++++++++++----------- tests/Makefile.am | 1 + tests/core/568.test | 77 +++++++++++++++++ 4 files changed, 199 insertions(+), 55 deletions(-) create mode 100755 tests/core/568.test diff --git a/NEWS b/NEWS index f2d65c401..1d8494ae0 100644 --- a/NEWS +++ b/NEWS @@ -165,12 +165,12 @@ New in spot 2.11.6.dev (not yet released) - spot::dualize() learned a trick to be faster on states that have less outgoing edges than atomic proposition declared on the - automaton. spot::remove_alternation() and - spot::tgba_determinize() learned a similar trick, except it isn't - applied at the state level but of the entire alternating use few - distinct labels. These changes may speed up the complementation - of some very weak automata, and the minimization of some - WDBA. (Issue #566.) + automaton. spot::remove_alternation(), spot::tgba_powerset(), and + simulation-based reductions learned a similar trick, except it + isn't applied at the state level but if the entire automaton use + few distinct labels. These changes may speed up the processing of + automata with many atomic propositions but few distinct labels. + (Issue #566 and issue #568.) - [Potential backward incompatibility] spot::dualize() does not call cleanup_acceptance() anymore. This change ensures that the dual diff --git a/spot/twaalgos/simulation.cc b/spot/twaalgos/simulation.cc index 1beb41b75..9fe714e03 100644 --- a/spot/twaalgos/simulation.cc +++ b/spot/twaalgos/simulation.cc @@ -29,6 +29,8 @@ #include #include #include +#include +#include // Work around GCC bug 80947 (dominates_edge is causing spurious // visibility warnings) @@ -507,6 +509,35 @@ namespace spot res->copy_ap_of(a_); res->copy_acceptance_of(a_); + // We have two ways of "spliting" a signature to create the + // outgoing edges. One is to iterate over 2^AP, then collect + // the destinations. The second is to first create a coarser + // basis for the original set of labels, and then iterate on + // this basis. The latter is good when we have few distinct + // labels. With too many different labels that may have + // nonempty intersections, the basis approach can consume a + // lot of memory. We have to heuristically select between + // those two. + unsigned nap = res->ap().size(); + bool will_use_basis = nap > 5; + edge_separator es; + if (will_use_basis) + // Gather all labels, but stop if we see too many. The + // threshold below is arbitrary: adjust if you know better. + will_use_basis = es.add_to_basis(a_, 256 * nap); + // We use a cache to avoid the costly loop over the basis. + // + // Cache entries have the form (bdd, [begin, end]) where bdd + // what should be split using the basis, and begin/end denotes + // a range of existing transition numbers that cover the + // split. + // + // std::pair causes some noexcept warnings when used in + // robin_hood::unordered_map with GCC 9.4. Use robin_hood::pair + // instead. + typedef robin_hood::pair cached_t; + robin_hood::unordered_map split_cond; + auto state_mapping = new std::vector(); state_mapping->resize(a_->num_states()); res->set_named_prop("simulated-states", state_mapping); @@ -549,6 +580,58 @@ namespace spot auto all_inf = all_inf_; unsigned srcst = 0; + + auto create_edges = [&](int srcid, bdd one, bdd dest) { + // Iterate over all possible destination classes. We + // use minato_isop here, because if the same valuation + // of atomic properties can go to two different + // classes C1 and C2, iterating on C1 + C2 with other + // means would see C1 then (!C1)C2, instead of C1 then + // C2. With minatop_isop, we ensure that no negative + // class variable will be seen (likewise for + // promises). + minato_isop isop(dest); + + ++nb_minterms; + bdd cond_acc_dest; + while ((cond_acc_dest = isop.next()) != bddfalse) + { + ++stat.edges; + ++nb_minato; + + // Take the edge, and keep only the variable which + // are used to represent the class. + bdd dst = bdd_existcomp(cond_acc_dest, all_class_var_); + + // Keep only ones who are acceptance condition. + auto acc = bdd_to_mark(bdd_existcomp(cond_acc_dest, + all_proms_)); + + // Because we have complemented all the Inf + // acceptance conditions on the input automaton, + // we must revert them to create a new edge. + acc ^= all_inf; + if (Cosimulation) + { + if (Sba) + { + // acc should be attached to src, or rather, + // in our edge-based representation) + // to all edges leaving src. As we + // can't do this here, store this in a table + // so we can fix it later. + accst[srcst] = acc; + acc = {}; + } + gb->new_edge(dst.id(), srcid, one, acc); + } + else + { + gb->new_edge(srcid, dst.id(), one, acc); + } + } + }; + // For each class, we will create // all the edges between the states. for (auto& p: sorted_classes_) @@ -566,11 +649,10 @@ namespace spot if (Cosimulation) sig = bdd_compose(sig, bddfalse, bdd_var(bdd_initial)); - // Get all the variables in the signature. bdd sup_sig = bdd_support(sig); - // Get the variable in the signature which represents the + // Get the variables in the signature which represent the // conditions. bdd sup_all_atomic_prop = bdd_exist(sup_sig, nonapvars); @@ -578,60 +660,44 @@ namespace spot // proposition. bdd all_atomic_prop = bdd_exist(sig, nonapvars); - // First loop over all possible valuations atomic properties. - for (bdd one: minterms_of(all_atomic_prop, sup_all_atomic_prop)) + if (!will_use_basis) { - // For each possible valuation, iterate over all possible - // destination classes. We use minato_isop here, because - // if the same valuation of atomic properties can go - // to two different classes C1 and C2, iterating on - // C1 + C2 with the above minters_of loop will see - // C1 then (!C1)C2, instead of C1 then C2. - // With minatop_isop, we ensure that the no negative - // class variable will be seen (likewise for promises). - minato_isop isop(bdd_restrict(sig, one)); - ++nb_minterms; - - bdd cond_acc_dest; - while ((cond_acc_dest = isop.next()) != bddfalse) + for (bdd one: minterms_of(all_atomic_prop, sup_all_atomic_prop)) + create_edges(src.id(), one, bdd_restrict(sig, one)); + } + else + { + auto& [begin, end] = split_cond[all_atomic_prop]; + if (begin == end) { - ++stat.edges; - - ++nb_minato; - - // Take the edge, and keep only the variable which - // are used to represent the class. - bdd dst = bdd_existcomp(cond_acc_dest, all_class_var_); - - // Keep only ones who are acceptance condition. - auto acc = bdd_to_mark(bdd_existcomp(cond_acc_dest, - all_proms_)); - - // Because we have complemented all the Inf - // acceptance conditions on the input automaton, - // we must revert them to create a new edge. - acc ^= all_inf; - if (Cosimulation) + begin = res->num_edges() + 1; + for (bdd label: es.basis()) + create_edges(src.id(), label, + bdd_relprod(label, sig, + res->ap_vars())); + end = res->num_edges() + 1; + } + else + { + // We have already split all_atomic_prop once, so + // we can simply reuse the set of labels we used + // then, avoiding the iteration on es.basis(). + auto& g = res->get_graph(); + bdd last = bddfalse; + for (unsigned i = begin; i < end; ++i) { - if (Sba) - { - // acc should be attached to src, or rather, - // in our edge-based representation) - // to all edges leaving src. As we - // can't do this here, store this in a table - // so we can fix it later. - accst[srcst] = acc; - acc = {}; - } - gb->new_edge(dst.id(), src.id(), one, acc); - } - else - { - gb->new_edge(src.id(), dst.id(), one, acc); + bdd label = g.edge_storage(i).cond; + if (label == last) + continue; + last = label; + create_edges(src.id(), label, + bdd_relprod(label, sig, + res->ap_vars())); } } } + ++srcst; } diff --git a/tests/Makefile.am b/tests/Makefile.am index c6430b2ac..ebee91fa9 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -221,6 +221,7 @@ TESTS_twa = \ core/521.test \ core/522.test \ core/566.test \ + core/568.test \ core/acc.test \ core/acc2.test \ core/bdddict.test \ diff --git a/tests/core/568.test b/tests/core/568.test new file mode 100755 index 000000000..2fb4b8df2 --- /dev/null +++ b/tests/core/568.test @@ -0,0 +1,77 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) by the Spot authors, see the AUTHORS file for details. +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs + +set -e + +# For issue #568. Before the patch implemented for #568, running +# simulation-based reduction on automata with many APs was getting +# exponentially slow even if few different labels were used. +# +# For instance we had this: +# +# % genaut --cycle-onehot=10..20 --name='size %L' | +# autfilt --small --stats='%M: %S->%s in %r seconds' +# size 10: 100->10 in 0.0395407 seconds +# size 11: 121->11 in 0.0950484 seconds +# size 12: 144->12 in 0.227828 seconds +# size 13: 169->13 in 0.391545 seconds +# size 14: 196->14 in 0.954784 seconds +# size 15: 225->15 in 2.34656 seconds +# size 16: 256->16 in 5.80549 seconds +# size 17: 289->17 in 14.3545 seconds +# size 18: 324->18 in 47.1589 seconds +# size 19: 361->19 in 138.023 seconds +# +# The test below shows that even with --cycle-onehot-nba=80 we don't +# have any problem now. + +genaut --cycle-onehot-nba=20 \ + --cycle-onehot-nba=40 \ + --cycle-onehot-nba=60 \ + --cycle-onehot-nba=80 \ + --cycle-log-nba=20 \ + --cycle-log-nba=40 \ + --cycle-log-nba=60 \ + --cycle-log-nba=80 \ + | autfilt --small --stats='%S -> %s' > out +cat >expected < 20 +1600 -> 40 +3600 -> 60 +6400 -> 6400 +400 -> 20 +1600 -> 40 +3600 -> 60 +6400 -> 6400 +EOF +diff expected out + +# the reason for the 6400 states above is that simulation-based +# reductions are disabled above 4096 states by default. This can be +# changed with -x simul-max=N. + +genaut --cycle-onehot-nba=80 --cycle-log-nba=80 \ + | autfilt -x simul-max=6400 --small --stats='%S -> %s' > out +cat >expected < 80 +6400 -> 80 +EOF +diff expected out From 26ef5458eba73bf77f5a53eaf7ccd20825ec46f7 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 25 Mar 2024 10:36:34 +0100 Subject: [PATCH 423/606] determinize: speedup on automata with many AP and few labels This uses the same trick as discussed in issue #566 and issue #568. * spot/twaalgos/determinize.cc (safra_support): Use a basis if it is smaller than 2^|support| for the current Safra state. * tests/core/568.test: Add some tests. * NEWS: Mention the optimization. --- NEWS | 12 +++++------ spot/twaalgos/determinize.cc | 42 +++++++++++++++++++++++++++++++++--- tests/core/568.test | 26 ++++++++++++++++++++++ 3 files changed, 71 insertions(+), 9 deletions(-) diff --git a/NEWS b/NEWS index 1d8494ae0..e7b9b10b2 100644 --- a/NEWS +++ b/NEWS @@ -165,12 +165,12 @@ New in spot 2.11.6.dev (not yet released) - spot::dualize() learned a trick to be faster on states that have less outgoing edges than atomic proposition declared on the - automaton. spot::remove_alternation(), spot::tgba_powerset(), and - simulation-based reductions learned a similar trick, except it - isn't applied at the state level but if the entire automaton use - few distinct labels. These changes may speed up the processing of - automata with many atomic propositions but few distinct labels. - (Issue #566 and issue #568.) + automaton. spot::remove_alternation(), spot::tgba_powerset(), + simulation-based reductions, and spot::tgba_determinize() learned + a similar trick, except it isn't applied at the state level but if + the entire automaton use few distinct labels. These changes may + speed up the processing of automata with many atomic propositions + but few distinct labels. (Issue #566 and issue #568.) - [Potential backward incompatibility] spot::dualize() does not call cleanup_acceptance() anymore. This change ensures that the dual diff --git a/spot/twaalgos/determinize.cc b/spot/twaalgos/determinize.cc index a364ffa48..8f135298a 100644 --- a/spot/twaalgos/determinize.cc +++ b/spot/twaalgos/determinize.cc @@ -23,6 +23,8 @@ #include #include #include +#include +#include #include #include #include @@ -31,6 +33,7 @@ #include #include #include +#include #include namespace spot @@ -586,9 +589,27 @@ namespace spot { const std::vector& state_supports; robin_hood::unordered_flat_map, bdd_hash> cache; - + std::vector basis; + unsigned log_basis_size = 0; public: - safra_support(const std::vector& s): state_supports(s) {} + safra_support(const std::vector& s, + const const_twa_graph_ptr& orig_aut) + : state_supports(s) + { + unsigned nap = orig_aut->ap().size(); + if (nap > 5) + { + edge_separator es; + // Gather all labels, but stop if we see too many. The + // threshold below is arbitrary: adjust if you know better. + if (es.add_to_basis(orig_aut, 256 * nap)) + { + basis = es.basis(); + auto sz = basis.size(); + log_basis_size = CHAR_BIT*sizeof(sz) - clz(sz); + } + } + } const std::vector& get(const safra_state& s) @@ -600,6 +621,21 @@ namespace spot if (i.second) // insertion took place { std::vector& res = i.first->second; + // If we have a basis, we probably want to use it. + // But we should do that only if 2^|supp| is larger. + if (log_basis_size) + { + // Compute the size of the support + bdd s = supp; + unsigned sz = log_basis_size; + while (sz && s != bddtrue) + { + --sz; + s = bdd_high(s); + } + if (s != bddtrue) + return res = basis; + } for (bdd one: minterms_of(bddtrue, supp)) res.emplace_back(one); } @@ -971,7 +1007,7 @@ namespace spot } } - safra_support safra2letters(support); + safra_support safra2letters(support, aut); auto res = make_twa_graph(aut->get_dict()); res->copy_ap_of(aut); diff --git a/tests/core/568.test b/tests/core/568.test index 2fb4b8df2..b538bfb7d 100755 --- a/tests/core/568.test +++ b/tests/core/568.test @@ -75,3 +75,29 @@ cat >expected < 80 EOF diff expected out + +genaut --cycle-onehot=7..12 | + autfilt --parity -D --stats="%S -> %s" > out +cat >expected < 13 +64 -> 15 +81 -> 17 +100 -> 19 +121 -> 4360 +144 -> 9481 +EOF + +genaut --cycle-onehot=7..12 | + autfilt -x simul-max=10000 --parity -D --stats="%S -> %s" > out +cat >expected < 13 +64 -> 15 +81 -> 17 +100 -> 19 +121 -> 21 +144 -> 23 +EOF + +# Using autcross will also test tgba_determinize +genaut --cycle-onehot-nba=11..12 --cycle-log-nba=11..12 | + autcross --language-preserved 'autfilt --small' --verbose From 89f87795ca7906b61c19487aa1cec18a4ad62079 Mon Sep 17 00:00:00 2001 From: pierreganty Date: Mon, 25 Mar 2024 13:02:59 +0100 Subject: [PATCH 424/606] * doc/org/tut25.org: Minor corrections. --- doc/org/tut25.org | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/doc/org/tut25.org b/doc/org/tut25.org index c1ee1ecba..c3009e690 100644 --- a/doc/org/tut25.org +++ b/doc/org/tut25.org @@ -8,8 +8,8 @@ #+PROPERTY: header-args:C+++ :results verbatim The [[https://languageinclusion.org/doku.php?id=tools#the_ba_format][BA format]] is a textual representation of a Büchi automaton with -letter-based alphabet, and supported by tools like [[https://languageinclusion.org/doku.php?id=tools][RABIT]] or [[http://goal.im.ntu.edu.tw/wiki/doku.php][Goal]]. It -looks as follows: +letter-based alphabet, and supported by tools like [[https://github.com/Mazzocchi/FORKLIFT][FORKLIFT]], [[https://languageinclusion.org/doku.php?id=tools][RABIT]], +[[http://goal.im.ntu.edu.tw/wiki/doku.php][Goal,]] or [[https://iscasmc.ios.ac.cn/roll/doku.php?id=start][ROLL]]. It looks as follows: #+BEGIN_SRC dot :file tut25-aut.svg :exports results digraph "" { @@ -49,17 +49,17 @@ s₂ The first line, ~s₁~ represents the initial state, the next block of lines of the form ~letters,src->dst~ represent the transitions of the -automaton. End the last block of lines (containing ~s₁~ and ~s₂~ in -the above example), lists the accepting states of the automaton. +automaton, and the last block of lines (containing ~s₁~ and ~s₂~ in +the above example) lists the accepting states of the automaton. -In this format, the letters and the state can be arbitrary strings +In this format, the letters and the states are arbitrary strings that do not include the characters ~,~ or ~-~, or ~>~. The initial state can be omitted (the source of the first transition is then assumed to be initial), and the list of accepting states may be empty. Spot has no support for letter-based alphabet (instead it uses boolean -formulas over a set of atomtic propositions), so this format does not -really make any sense. +formulas over a set of atomic propositions), hence Spot has no support +for this format as input. As an example of [[file:tut21.org][how to custom print an automaton]], let us write a small tool that will convert any Büchi automaton that Spot can read @@ -95,7 +95,7 @@ $txt Then each label can now be considered as a letter. -* Convertion in Python +* Conversion in Python #+NAME: toba.py @@ -177,9 +177,9 @@ ltl2tgba -B "a W G(b->c)" | ./toba.py 1 #+end_example -The BDD ~e.cond~ that encodes the Boolean formula labels each edge ~e~ -have been printed using ~e.cond.id()~: this is the integer identifier -that uniquely denote each formula. This identifier is good enough to +The BDD ~e.cond~ that encodes the Boolean formula labelling edge ~e~ +is printed using ~e.cond.id()~ which is the integer identifier +that uniquely denotes each formula. This identifier is good enough to make letters unique and keep the file short. However, if you prefer to print the formula instead, replace =e.cond.id()= by =spot.bdd_format_formula(aut.get_dict(), e.cond)=. @@ -227,7 +227,7 @@ the same logic as in the previous section. ltl2tgba -B "a W G(b->c)" >tut25.hoa #+end_src -Now what remains to be done is to read some input automaton, so we +Now, what remains to be done is to read some input automaton, so we can print it: #+NAME: maincpp @@ -257,7 +257,7 @@ can print it: } #+END_SRC -Unsurprisingly running the above code on our example automaton +Unsurprisingly, running the above code on our example automaton produces the same output. #+RESULTS: maincpp From 88f8af22c3ffaa5c4ed30f0106714125ea6e400d Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 25 Mar 2024 17:12:05 +0100 Subject: [PATCH 425/606] autfilt: add option --separate-edges * bin/autfilt.cc: Implement it. * tests/core/split.test: Test it. * doc/org/tut25.org: Demonstrate it. * NEWS: Mention it. --- NEWS | 15 ++++++++----- bin/autfilt.cc | 10 +++++++++ doc/org/tut25.org | 49 +++++++++++++++++++++++++++++++++++++++++++ tests/core/split.test | 4 ++++ 4 files changed, 73 insertions(+), 5 deletions(-) diff --git a/NEWS b/NEWS index e7b9b10b2..c782d4233 100644 --- a/NEWS +++ b/NEWS @@ -1,5 +1,11 @@ New in spot 2.11.6.dev (not yet released) + Documentation: + + - https://spot.lre.epita.fr/tut25.html is a new example showing + how to print an automaton in the "BA format" (used by Rabbit + and other tools). + Command-line tools: - In places that accept format strings with '%' sequences, like @@ -19,6 +25,10 @@ New in spot 2.11.6.dev (not yet released) and its negation is already a basis). This can help reducing the size of large HOA files. + - autfilt learned --separate-edges, to split the labels of + the automaton using a basis of disjoint labels. See + https://spot.lre.epita.fr/tut25.html for some motivation. + - ltlfilt has a new option --relabel-overlapping-bool=abc|pnn that will replace boolean subformulas by fresh atomic propositions even if those subformulas share atomic propositions. @@ -204,11 +214,6 @@ New in spot 2.11.6.dev (not yet released) This version of Spot now declares its svg outputs as HTML to prevent Jypyter from wrapping them is images. - Documentation: - - - https://spot.lre.epita.fr/tut25.html is a new example showing - how to print an automaton in the "BA format" (used by Rabbit). - Bugs fixed: - tgba_determinize()'s use_simulation option would cause it to diff --git a/bin/autfilt.cc b/bin/autfilt.cc index 39a8f46b8..08b17df99 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -149,6 +149,7 @@ enum { OPT_SAT_MINIMIZE, OPT_SCCS, OPT_SEED, + OPT_SEPARATE_EDGES, OPT_SEP_SETS, OPT_SIMPL_ACC, OPT_SIMPLIFY_EXCLUSIVE_AP, @@ -370,6 +371,9 @@ static const argp_option options[] = { "split-edges", OPT_SPLIT_EDGES, nullptr, 0, "split edges into transitions labeled by conjunctions of all atomic " "propositions, so they can be read as letters", 0 }, + { "separate-edges", OPT_SEPARATE_EDGES, nullptr, 0, + "split edges into transitions labeled by a disjoint set of labels that" + " form a basis for the original automaton", 0 }, { "sum", OPT_SUM_OR, "FILENAME", 0, "build the sum with the automaton in FILENAME " "to sum languages", 0 }, @@ -692,6 +696,7 @@ static bool opt_rem_unreach = false; static bool opt_rem_unused_ap = false; static bool opt_sep_sets = false; static bool opt_split_edges = false; +static bool opt_separate_edges = false; static const char* opt_sat_minimize = nullptr; static const char* opt_to_finite = nullptr; static int opt_highlight_nondet_states = -1; @@ -1204,6 +1209,9 @@ parse_opt(int key, char* arg, struct argp_state*) case OPT_SPLIT_EDGES: opt_split_edges = true; break; + case OPT_SEPARATE_EDGES: + opt_separate_edges = true; + break; case OPT_STATES: opt_states = parse_range(arg, 0, std::numeric_limits::max()); break; @@ -1664,6 +1672,8 @@ namespace if (opt_split_edges) aut = spot::split_edges(aut); + else if (opt_separate_edges) + aut = spot::separate_edges(aut); if (opt_to_finite) aut = spot::to_finite(aut, opt_to_finite); diff --git a/doc/org/tut25.org b/doc/org/tut25.org index c3009e690..87ffd544d 100644 --- a/doc/org/tut25.org +++ b/doc/org/tut25.org @@ -283,3 +283,52 @@ produces the same output. #+begin_src sh :results silent :exports results rm -f tut25.hoa #+end_src + +* Improving the split + +=split_edges()= is not the only way to split the edge labels. Another +option, introduced in Spot 2.12, is =separate_edges()=: this looks at +the labels used in the automaton and intersects them to construct a +new set of disjoint labels that can be used as a basis for all labels. +In the worst case, the basis will be equal to $2^{\{a,b,c\}}$ and this +reduces to =split_edges()=. However in many cases, as in our running +example, it will require fewer labels. + +#+NAME: tut25ex3 +#+BEGIN_SRC sh :exports code +ltl2tgba -B "a W G(b->c)" | autfilt --separate-edges -d +#+END_SRC + +#+BEGIN_SRC dot :file tut25ex3.svg :var txt=tut25ex3 :exports results +$txt +#+END_SRC + +#+RESULTS: +[[file:tut25ex3.svg]] + +Fixing the above Python/C++ code to use =separate_edges()= instead of +=split_edges()= allows to convert this example using only 3 letters: + +#+NAME: toba.py +#+begin_src python :exports results + import spot + aut = spot.translate("a W G(b->c)", "BA") + aut = spot.separate_edges(aut) + acc = aut.acc() + print(aut.get_init_state_number()) + for e in aut.edges(): + print(f"{e.cond.id()},{e.src}->{e.dst}") + for s in range(aut.num_states()): + if acc.accepting(aut.state_acc_sets(s)): + print(s) +#+end_src + +#+RESULTS: toba.py +: 1 +: 83,0->0 +: 85,0->0 +: 85,1->0 +: 83,1->1 +: 76,1->1 +: 0 +: 1 diff --git a/tests/core/split.test b/tests/core/split.test index 720133e2e..e109ff4b1 100755 --- a/tests/core/split.test +++ b/tests/core/split.test @@ -23,6 +23,10 @@ set -e test 3,7 = `ltl2tgba 'a U b' --stats=%e,%t` test 7,7 = `ltl2tgba 'a U b' | autfilt --split --stats=%e,%t` +test 5,7 = `ltl2tgba 'a U b' | autfilt --separate-edges --stats=%e,%t` test 12,12 = `ltl2tgba 'a U b' | autfilt -C --split --stats=%e,%t` +test 9,12 = `ltl2tgba 'a U b' | autfilt -C --separate-edges --stats=%e,%t` test 0,0 = `ltl2tgba 0 | autfilt --split --stats=%e,%t` +test 0,0 = `ltl2tgba 0 | autfilt --separate-edges --stats=%e,%t` test 1,1 = `ltl2tgba 0 | autfilt -C --split --stats=%e,%t` +test 1,1 = `ltl2tgba 0 | autfilt -C --separate-edges --stats=%e,%t` From df44f7a5c21a8afe0d5dc00f7b7db55d499ba092 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 26 Mar 2024 16:21:44 +0100 Subject: [PATCH 426/606] require python 3.6 * HACKING, NEWS, README, doc/org/install.org: Update. * m4/pypath.m4, python/spot/__init__.py: Adjust requirements. * python/spot/ltsmin.i: Don't use capture_output, this is a 3.7 option. --- HACKING | 2 +- NEWS | 6 ++++++ README | 4 ++-- doc/org/install.org | 2 +- m4/pypath.m4 | 2 +- python/spot/__init__.py | 8 ++++---- python/spot/ltsmin.i | 3 ++- 7 files changed, 17 insertions(+), 10 deletions(-) diff --git a/HACKING b/HACKING index 59a0fc401..85b45ed14 100644 --- a/HACKING +++ b/HACKING @@ -42,7 +42,7 @@ since the generated files they produce are distributed.) A complete LaTeX distribution, including latexmk and extra fonts like dsfont.sty. ImageMagick - Python >= 3.5, IPython >= 2.3 + Python >= 3.6, IPython >= 2.3 Jupyter >= 4, with nbconvert GraphViz Java >= 1.7 (needed to run PlantUML while generating the doc) diff --git a/NEWS b/NEWS index c782d4233..4bc2f452e 100644 --- a/NEWS +++ b/NEWS @@ -1,5 +1,11 @@ New in spot 2.11.6.dev (not yet released) + Build: + + - When Python bindings are enabled, Spot now requires Python 3.6 or + later. Python 3.6 has reached end-of-life in 2021, but is still + used on CentOS 7 (which will reach end-of-support later in 2024). + Documentation: - https://spot.lre.epita.fr/tut25.html is a new example showing diff --git a/README b/README index a19fc8473..30c181574 100644 --- a/README +++ b/README @@ -76,7 +76,7 @@ Requirements Spot requires a C++17-compliant compiler. G++ 7.x or later, as well as Clang++ 5.0 or later should work. -Spot expects a complete installation of Python (version 3.5 or later). +Spot expects a complete installation of Python (version 3.6 or later). Especially, Python's headers files should be installed. If you don't have Python installed, and do NOT want to install it, you should run configure with the --disable-python option (see below). @@ -133,7 +133,7 @@ flags specific to Spot: offers a convenient interface when used in an IPython notebook, and are also used to build the CGI script that translates LTL formulas on-line. You may safely disable these, especially if you - do not have a working Python 3.2+ installation or if you are + do not have a working Python 3.6+ installation or if you are attempting some cross-compilation. --enable-max-accsets=N diff --git a/doc/org/install.org b/doc/org/install.org index 0a08677e1..f498194e9 100644 --- a/doc/org/install.org +++ b/doc/org/install.org @@ -22,7 +22,7 @@ successful development build]]. Spot requires a C++17-compliant compiler. =g++= 7.0 or later, as well as =clang++= 5.0 or later should work. -Spot expects a complete installation of Python (version 3.5 or later). +Spot expects a complete installation of Python (version 3.6 or later). Especially, Python's headers files should be installed (the package to install has a name like =python-dev= or =libpython3-dev= in most distributions). If you don't have Python installed, and do NOT want diff --git a/m4/pypath.m4 b/m4/pypath.m4 index 61a9921bd..efdf57355 100644 --- a/m4/pypath.m4 +++ b/m4/pypath.m4 @@ -1,5 +1,5 @@ AC_DEFUN([adl_CHECK_PYTHON], - [AM_PATH_PYTHON([3.5]) + [AM_PATH_PYTHON([3.6]) case $PYTHON in [[\\/$]]* | ?:[[\\/]]* );; *) AC_MSG_ERROR([The PYTHON variable must be set to an absolute filename.]);; diff --git a/python/spot/__init__.py b/python/spot/__init__.py index c44ce9555..b784fdbdf 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -19,8 +19,8 @@ import sys -if sys.hexversion < 0x03030000: - sys.exit("This module requires Python 3.3 or newer") +if sys.hexversion < 0x03060000: + sys.exit("This module requires Python 3.6 or newer") import subprocess import os @@ -710,7 +710,7 @@ def automata(*sources, timeout=None, ignore_abort=True, # returned by spot.automata() is destroyed. Otherwise, _supress() # is just a dummy context manager that does nothing (Python 3.7 # introduces nullcontext() for this purpose, but at the time of - # writing we support Python 3.4). + # writing we still have to support Python 3.6). mgr = proc if proc else _supress() with mgr: while a: @@ -730,7 +730,7 @@ def automata(*sources, timeout=None, ignore_abort=True, # an exception. if ret and sys.exc_info()[0] is None: raise subprocess.CalledProcessError(ret, filename[:-1]) - # deleting o explicitly now prevents Python 3.5 from + # deleting o explicitly used to prevent Python 3.5 from # reporting the following error: " returned a result with # an error set". It's not clear to me if the bug is in Python diff --git a/python/spot/ltsmin.i b/python/spot/ltsmin.i index 94b4ca93b..c039fdba3 100644 --- a/python/spot/ltsmin.i +++ b/python/spot/ltsmin.i @@ -173,7 +173,8 @@ try: try: p = subprocess.run(['divine', 'compile', '--ltsmin', t.name], - capture_output=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True) if p.stdout: print(p.stdout) if p.stderr: print(p.stderr, file=sys.stderr) From 7ac570fa3fd2667676a1027d2232fbe70ad4b57c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 26 Mar 2024 20:35:10 +0100 Subject: [PATCH 427/606] modernize some Python code Since we now require Python 3.6, we can use f-strings instead of format() to make the code more readable. * doc/org/tut01.org, doc/org/tut02.org, doc/org/tut03.org, doc/org/tut21.org, doc/org/tut24.org, doc/org/tut90.org, python/spot/__init__.py, python/spot/jupyter.py, tests/python/acc.py, tests/python/acc_cond.ipynb, tests/python/complement_semidet.py, tests/python/decompose.ipynb, tests/python/formulas.ipynb, tests/python/highlighting.ipynb, tests/python/ipnbdoctest.py, tests/python/ltlf.py, tests/python/parity.ipynb, tests/python/product.ipynb, tests/python/relabel.py, tests/python/satmin.ipynb, tests/python/stutter-inv.ipynb, tests/python/twagraph-internals.ipynb, tests/python/zlktree.ipynb: Use f-strings. --- doc/org/tut01.org | 3 +- doc/org/tut02.org | 4 +- doc/org/tut03.org | 4 +- doc/org/tut21.org | 4 +- doc/org/tut24.org | 36 ++++---- doc/org/tut90.org | 8 +- python/spot/__init__.py | 53 +++++------ python/spot/jupyter.py | 11 ++- tests/python/acc.py | 2 +- tests/python/acc_cond.ipynb | 42 +-------- tests/python/complement_semidet.py | 3 +- tests/python/decompose.ipynb | 83 ++++++++++-------- tests/python/formulas.ipynb | 20 ++--- tests/python/highlighting.ipynb | 50 +++++------ tests/python/ipnbdoctest.py | 5 +- tests/python/ltlf.py | 4 +- tests/python/parity.ipynb | 8 +- tests/python/product.ipynb | 16 ++-- tests/python/relabel.py | 2 +- tests/python/satmin.ipynb | 122 +++++++++++++------------- tests/python/stutter-inv.ipynb | 30 +++---- tests/python/twagraph-internals.ipynb | 37 ++++---- tests/python/zlktree.ipynb | 72 +++++++-------- 23 files changed, 292 insertions(+), 327 deletions(-) diff --git a/doc/org/tut01.org b/doc/org/tut01.org index 9d446e3cc..6693cf9a8 100644 --- a/doc/org/tut01.org +++ b/doc/org/tut01.org @@ -389,7 +389,8 @@ In C++ you can enable lenient using one of the Boolean arguments of Formulas have a custom format specification language that allows you to easily change the way a formula should be output when using the -=format()= method of strings. +=format()= method of strings, or using [[https://docs.python.org/3/tutorial/inputoutput.html#formatted-string-literals][formatted string litterals]]. + #+BEGIN_SRC python import spot diff --git a/doc/org/tut02.org b/doc/org/tut02.org index 5d63b35c9..ec703a2b2 100644 --- a/doc/org/tut02.org +++ b/doc/org/tut02.org @@ -68,8 +68,8 @@ import spot m = spot.relabeling_map() g = spot.relabel('"Proc@Here" U ("var > 10" | "var < 4")', spot.Pnn, m) for newname, oldname in m.items(): - print("#define {} ({})".format(newname.to_str(), oldname.to_str('spin', True))) - print(g.to_str('spin', True)) + print(f"#define {newname.to_str()} ({oldname.to_str('spin', True)})") +print(g.to_str('spin', True)) #+END_SRC #+RESULTS: diff --git a/doc/org/tut03.org b/doc/org/tut03.org index 40b59d82b..a266e765b 100644 --- a/doc/org/tut03.org +++ b/doc/org/tut03.org @@ -159,9 +159,9 @@ The Python equivalent is similar: # kindstr() prints the name of the operator # size() return the number of operands of the operators - print("{}, {} children".format(f.kindstr(), f.size())) + print(f"{f.kindstr()}, {f.size()} children") # [] accesses each operand - print("left: {f[0]}, right: {f[1]}".format(f=f)) + print(f"left: {f[0]}, right: {f[1]}") # you can also iterate over all operands using a for loop for child in f: print(" *", child) diff --git a/doc/org/tut21.org b/doc/org/tut21.org index 677736aea..358577db9 100644 --- a/doc/org/tut21.org +++ b/doc/org/tut21.org @@ -557,9 +557,9 @@ Here is the very same example, but written in Python: print("Stutter Invariant:", aut.prop_stutter_invariant()) for s in range(0, aut.num_states()): - print("State {}:".format(s)) + print(f"State {s}:") for t in aut.out(s): - print(" edge({} -> {})".format(t.src, t.dst)) + print(f" edge({t.src} -> {t.dst})") # bdd_print_formula() is designed to print on a std::ostream, and # is inconvenient to use in Python. Instead we use # bdd_format_formula() as this simply returns a string. diff --git a/doc/org/tut24.org b/doc/org/tut24.org index fd561eec8..e2d947531 100644 --- a/doc/org/tut24.org +++ b/doc/org/tut24.org @@ -190,26 +190,26 @@ decide whether to enclose the destinations in braces. Here is the Python version of this code: #+BEGIN_SRC python - import spot + import spot - aut = spot.automaton("tut24.hoa") - bdict = aut.get_dict() - init = aut.get_init_state_number() - ui = aut.is_univ_dest(init) - print("Initial states: {}{}{}".format("{ " if ui else "", - " ".join(map(str, aut.univ_dests(init))), - " }" if ui else "")) - for s in range(0, aut.num_states()): - print("State {}:".format(s)) - for t in aut.out(s): - ud = aut.is_univ_dest(t) - print(" edge({} -> {}{}{})".format(t.src, - "{ " if ud else "", - " ".join(map(str, aut.univ_dests(t))), - " }" if ud else "")) - print(" label =", spot.bdd_format_formula(bdict, t.cond)) - print(" acc sets =", t.acc) + aut = spot.automaton("tut24.hoa") + + def format_dest(s): + if not aut.is_univ_dest(s): + return s + else: + return f'{{ {" ".join(map(str, aut.univ_dests(s)))} }}' + + bdict = aut.get_dict() + init = aut.get_init_state_number() + print(f"Initial states: {format_dest(init)}") + for s in range(0, aut.num_states()): + print("State {}:".format(s)) + for t in aut.out(s): + print(f" edge({t.src} -> {format_dest(t.dst)})") + print(" label =", spot.bdd_format_formula(bdict, t.cond)) + print(" acc sets =", t.acc) #+END_SRC #+RESULTS: diff --git a/doc/org/tut90.org b/doc/org/tut90.org index c352c356d..ea3657e6e 100644 --- a/doc/org/tut90.org +++ b/doc/org/tut90.org @@ -108,12 +108,12 @@ import spot aut1 = spot.make_twa_graph() ap1a = aut1.register_ap("a") ap1b = aut1.register_ap("b") -print("aut1: a={} b={}".format(ap1a, ap1b)) +print(f"aut1: a={ap1a} b={ap1b}") aut2 = spot.make_twa_graph() ap2c = aut2.register_ap("c") ap2b = aut2.register_ap("b") ap2a = aut2.register_ap("a") -print("aut1: a={} b={} c={}".format(ap2a, ap2b, ap2c)) +print(f"aut1: a={ap2a} b={ap2b} c={ap2c}") #+END_SRC #+RESULTS: @@ -271,7 +271,7 @@ all transition that belong to a certain acceptance set. self.set.add((src, guard, dst)) def str_trans(self, src, guard, dst): f = spot.bdd_format_formula(self.dict, guard) - return "({},{},{})".format(src, f, dst) + return f"({src},{f},{dst})" def __str__(self): return '{' + ",".join([ self.str_trans(*t) for t in self.set ]) + '}' @@ -353,7 +353,7 @@ automaton registers its variables. self.set.add((src, guard, dest)) def str_trans(self, src, guard, dest): f = spot.bdd_format_formula(self.dict, guard) - return "({},{},{})".format(src, f, dest) + return f"({src},{f},{dest})" def __str__(self): return '{' + ",".join([ self.str_trans(*t) for t in self.set ]) + '}' diff --git a/python/spot/__init__.py b/python/spot/__init__.py index b784fdbdf..3d3393797 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -512,7 +512,7 @@ class acd: .acdrej polygon{fill:red;} .acdacc polygon{fill:green;} ''' - js = ''' + js = f''' function acdremclasses(sel, classes) {{ document.querySelectorAll(sel).forEach(n=>{{n.classList.remove(...classes)}});}} function acdaddclasses(sel, classes) {{ @@ -545,30 +545,26 @@ function acd{num}_node(node, acc){{ acdaddclasses("#acdaut{num} .acdN" + node, [acc ? "acdacc" : "acdrej", "acdbold"]); acdaddclasses("#acd{num} #N" + node, ["acdbold", "acdhigh"]); -}};'''.format(num=num) +}};''' me = 0 for n in range(self.node_count()): for e in self.edges_of_node(n): me = max(e, me) - js += 'acdaddclasses("#acdaut{num} #E{e}", ["acdN{n}"]);\n'\ - .format(num=num, e=e, n=n) + js += f'acdaddclasses("#acdaut{num} #E{e}", ["acdN{n}"]);\n' for e in range(1, me + 1): - js += 'acdonclick("#acdaut{num} #E{e}",'\ - 'function(){{acd{num}_edge({e});}});\n'\ - .format(num=num, e=e) + js += f'acdonclick("#acdaut{num} #E{e}",'\ + f'function(){{acd{num}_edge({e});}});\n' for s in range(self.get_aut().num_states()): - js += 'acdonclick("#acdaut{num} #S{s}",'\ - 'function(){{acd{num}_state({s});}});\n'\ - .format(num=num, s=s) + js += f'acdonclick("#acdaut{num} #S{s}",'\ + f'function(){{acd{num}_state({s});}});\n' for n in range(self.node_count()): v = int(self.node_acceptance(n)) - js += 'acdonclick("#acd{num} #N{n}",'\ - 'function(){{acd{num}_node({n}, {v});}});\n'\ - .format(num=num, n=n, v=v) + js += f'acdonclick("#acd{num} #N{n}",'\ + f'function(){{acd{num}_node({n}, {v});}});\n' html = '
{}
{}
'\ .format(style, - self.get_aut().show('.i(acdaut{})'.format(num)).data, - self._repr_svg_("acd{}".format(num)), + self.get_aut().show(f'.i(acdaut{num})').data, + self._repr_svg_(f"acd{num}"), js); return html @@ -746,7 +742,7 @@ def automaton(filename, **kwargs): try: return next(automata(filename, **kwargs)) except StopIteration: - raise RuntimeError("Failed to read automaton from {}".format(filename)) + raise RuntimeError(f"Failed to read automaton from {filename}") def aiger_circuits(*sources, bdd_dict = None): """Read aiger circuits from a list of sources. @@ -777,7 +773,7 @@ def aiger_circuit(source, bdd_dict = None): return next(aiger_circuits(source, bdd_dict = bdd_dict)) except StopIteration: raise RuntimeError("Failed to read an aiger circuit " - "from {}".format(source)) + f"from {source}") def _postproc_translate_options(obj, default_type, *args): @@ -795,8 +791,7 @@ def _postproc_translate_options(obj, default_type, *args): def type_set(val): nonlocal type_, type_name_ if type_ is not None and type_name_ != val: - raise ValueError("type cannot be both {} and {}" - .format(type_name_, val)) + raise ValueError(f"type cannot be both {type_name_} and {val}") elif val == 'generic' or val == 'gen' or val == 'g': type_ = postprocessor.Generic elif val == 'tgba': # historical @@ -839,8 +834,8 @@ def _postproc_translate_options(obj, default_type, *args): def pref_set(val): nonlocal pref_, pref_name_ if pref_ is not None and pref_name_ != val: - raise ValueError("preference cannot be both {} and {}" - .format(pref_name_, val)) + raise ValueError("preference cannot be both "\ + f"{pref_name_} and {val}") elif val == 'small': pref_ = postprocessor.Small elif val == 'deterministic': @@ -853,8 +848,8 @@ def _postproc_translate_options(obj, default_type, *args): def optm_set(val): nonlocal optm_, optm_name_ if optm_ is not None and optm_name_ != val: - raise ValueError("optimization level cannot be both {} and {}" - .format(optm_name_, val)) + raise ValueError("optimization level cannot be both "\ + f"{optm_name_} and {val}") if val == 'high': optm_ = postprocessor.High elif val.startswith('med'): @@ -930,10 +925,10 @@ def _postproc_translate_options(obj, default_type, *args): if lc == 1: f(compat[0]) elif lc < 1: - raise ValueError("unknown option '{}'".format(arg)) + raise ValueError(f"unknown option '{arg}'") else: - raise ValueError("ambiguous option '{}' is prefix of {}" - .format(arg, str(compat))) + raise ValueError(f"ambiguous option '{arg}' "\ + f"is prefix of {str(compat)}") if type_ is None: type_ = default_type @@ -1307,7 +1302,7 @@ def sat_minimize(aut, acc=None, colored=False, if display_log or return_log: import pandas as pd with tempfile.NamedTemporaryFile(dir='.', suffix='.satlog') as t: - args += ',log="{}"'.format(t.name) + args += f',log="{t.name}"' aut = sm(aut, args, state_based) dfrm = pd.read_csv(t.name, dtype=object) if display_log: @@ -1397,10 +1392,10 @@ def mp_hierarchy_svg(cl=None): 'B': '110,198', } if cl in coords: - highlight = ''' + highlight = f''' - '''.format(coords[cl]) + ''' else: highlight = '' return ''' diff --git a/python/spot/jupyter.py b/python/spot/jupyter.py index 0bfea81a0..97d383b1b 100644 --- a/python/spot/jupyter.py +++ b/python/spot/jupyter.py @@ -51,11 +51,11 @@ def display_inline(*args, per_row=None, show=None): If the `per_row` argument is given, at most `per_row` arguments are displayed on each row, each one taking 1/per_row of the line width. """ - width = res = '' + w = res = '' if per_row: - width = 'width:{}%;'.format(100//per_row) + w = f'width:{100//per_row}%;' for arg in args: - dpy = 'inline-block' + dpy = 'display:inline-block' if show is not None and hasattr(arg, 'show'): arg = arg.show(show) if hasattr(arg, '_repr_html_'): @@ -65,9 +65,8 @@ def display_inline(*args, per_row=None, show=None): elif hasattr(arg, '_repr_latex_'): rep = arg._repr_latex_() if not per_row: - dpy = 'inline' + dpy = 'display:inline' else: rep = str(arg) - res += ("
{}
" - .format(dpy, width, rep)) + res += f"
{rep}
" display(HTML(res)) diff --git a/tests/python/acc.py b/tests/python/acc.py index b215428b1..1374bab8a 100644 --- a/tests/python/acc.py +++ b/tests/python/acc.py @@ -22,7 +22,7 @@ tc = TestCase() a = spot.acc_cond('parity min odd 5') tc.assertEqual(str(a.fin_unit_one_split()), - '(0, {}, spot.acc_cond(5, "f"))'.format(repr(a))) + f'(0, {a!r}, spot.acc_cond(5, "f"))') a.set_acceptance('Rabin 3') tc.assertEqual(str(a.fin_unit_one_split()), diff --git a/tests/python/acc_cond.ipynb b/tests/python/acc_cond.ipynb index f2773938b..d3b6ddffd 100644 --- a/tests/python/acc_cond.ipynb +++ b/tests/python/acc_cond.ipynb @@ -11,7 +11,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -100,7 +99,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -131,7 +129,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -194,7 +191,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -220,7 +216,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -252,7 +247,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -280,7 +274,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -331,7 +324,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -359,7 +351,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -400,7 +391,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -428,7 +418,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -455,7 +444,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -483,7 +471,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -552,7 +539,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -603,7 +589,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -630,7 +615,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -743,7 +727,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -775,11 +758,10 @@ "acc = spot.acc_code('Fin(0) & Inf(1) | Inf(2)')\n", "print(\"acc =\", acc)\n", "for x in ([0, 1, 2], [1, 2], [0, 1], [0, 2], [0], [1], [2], []):\n", - " print(\"acc.accepting({}) = {}\".format(x, acc.accepting(x)))" + " print(f\"acc.accepting({x}) = {acc.accepting(x)}\")" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -809,7 +791,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -837,7 +818,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -873,7 +853,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -909,7 +888,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -978,7 +956,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1049,7 +1026,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1078,7 +1054,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1108,7 +1083,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1138,7 +1112,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1170,7 +1143,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1202,7 +1174,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1235,7 +1206,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1264,7 +1234,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1292,7 +1261,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1320,7 +1288,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1346,11 +1313,10 @@ "source": [ "print(\"acc =\", acc)\n", "for x in ([0, 1, 2, 3, 10], [1, 2]):\n", - " print(\"acc.accepting({}) = {}\".format(x, acc.accepting(x)))" + " print(f\"acc.accepting({x}) = {acc.accepting(x)}\")" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1419,7 +1385,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1446,7 +1411,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -1566,7 +1530,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.5" + "version": "3.11.8" } }, "nbformat": 4, diff --git a/tests/python/complement_semidet.py b/tests/python/complement_semidet.py index 1aa3bcf5d..1027985e2 100644 --- a/tests/python/complement_semidet.py +++ b/tests/python/complement_semidet.py @@ -31,8 +31,7 @@ n = 10000 for aut in spot.automata( "randltl -n-1 a b " "| ltl2tgba " - "| autfilt --is-semi-deterministic --acceptance-is=Buchi -n{} |" - .format(n)): + f"| autfilt --is-semi-deterministic --acceptance-is=Buchi -n{n} |"): comp = complement(aut) semidet_comp = spot.complement_semidet(aut, True) diff --git a/tests/python/decompose.ipynb b/tests/python/decompose.ipynb index 9d1728f21..f1943ecee 100644 --- a/tests/python/decompose.ipynb +++ b/tests/python/decompose.ipynb @@ -359,7 +359,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c1623d0> >" + " *' at 0x7faaf7cffd80> >" ] }, "execution_count": 2, @@ -583,7 +583,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c162340> >" + " *' at 0x7faaf7d10240> >" ] }, "execution_count": 3, @@ -874,7 +874,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c161fe0> >" + " *' at 0x7faaf7d103f0> >" ] }, "execution_count": 4, @@ -1043,7 +1043,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c162070> >" + " *' at 0x7faaf7d10600> >" ] }, "execution_count": 5, @@ -1175,7 +1175,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c162eb0> >" + " *' at 0x7faaf7d10a80> >" ] }, "execution_count": 6, @@ -1400,7 +1400,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c163390> >" + " *' at 0x7faaf7d110e0> >" ] }, "metadata": {}, @@ -1681,7 +1681,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c163360> >" + " *' at 0x7faaf7cffde0> >" ] }, "metadata": {}, @@ -2008,7 +2008,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c163390> >" + " *' at 0x7faaf7cfff60> >" ] }, "metadata": {}, @@ -2775,7 +2775,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c162b50> >" + " *' at 0x7faaf7d115f0> >" ] }, "execution_count": 8, @@ -3509,7 +3509,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c162c70> >" + " *' at 0x7faaf7d10c00> >" ] }, "metadata": {}, @@ -3964,7 +3964,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c163390> >" + " *' at 0x7faaf7cfff60> >" ] }, "metadata": {}, @@ -4375,7 +4375,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c162c70> >" + " *' at 0x7faaf7d10c00> >" ] }, "metadata": {}, @@ -5120,7 +5120,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c163c00> >" + " *' at 0x7faaf7d10a20> >" ] }, "execution_count": 10, @@ -5415,7 +5415,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c163b40> >" + " *' at 0x7faaf7d10b40> >" ] }, "metadata": {}, @@ -5684,7 +5684,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c163a80> >" + " *' at 0x7faaf7d119e0> >" ] }, "metadata": {}, @@ -5965,7 +5965,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c163e10> >" + " *' at 0x7faaf7d114d0> >" ] }, "metadata": {}, @@ -6637,7 +6637,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c163150> >" + " *' at 0x7faaf7d11c20> >" ] }, "execution_count": 12, @@ -7293,7 +7293,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c162c10> >" + " *' at 0x7faaf7d12460> >" ] }, "metadata": {}, @@ -7638,7 +7638,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c163e10> >" + " *' at 0x7faaf7d114d0> >" ] }, "metadata": {}, @@ -7945,7 +7945,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c162c10> >" + " *' at 0x7faaf7d12460> >" ] }, "metadata": {}, @@ -8612,7 +8612,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c163240> >" + " *' at 0x7faaf7d12160> >" ] }, "execution_count": 14, @@ -8823,7 +8823,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c163d50> >" + " *' at 0x7faaf7d11c80> >" ] }, "execution_count": 15, @@ -8989,7 +8989,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c17c870> >" + " *' at 0x7faaf7d120d0> >" ] }, "metadata": {}, @@ -9132,7 +9132,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c1634b0> >" + " *' at 0x7faaf7d11290> >" ] }, "metadata": {}, @@ -9293,7 +9293,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c17c540> >" + " *' at 0x7faaf7d11e60> >" ] }, "execution_count": 18, @@ -9557,7 +9557,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c17c570> >" + " *' at 0x7faaf7d10180> >" ] }, "execution_count": 19, @@ -9830,7 +9830,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c17db90> >" + " *' at 0x7faaf7d137b0> >" ] }, "metadata": {}, @@ -10025,7 +10025,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c162c10> >" + " *' at 0x7faaf7d12460> >" ] }, "metadata": {}, @@ -10279,7 +10279,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c1626d0> >" + " *' at 0x7faaf7d137b0> >" ] }, "metadata": {}, @@ -10315,10 +10315,10 @@ "name": "stdout", "output_type": "stream", "text": [ - "SCC #0 contains states [1]\n", - "SCC #1 contains states [4]\n", - "SCC #2 contains states [3]\n", - "SCC #3 contains states [0, 2]\n" + "SCC #0 contains states (1,)\n", + "SCC #1 contains states (4,)\n", + "SCC #2 contains states (3,)\n", + "SCC #3 contains states (0, 2)\n" ] }, { @@ -10642,7 +10642,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c17d410> >" + " *' at 0x7faaf7d124c0> >" ] }, "metadata": {}, @@ -10915,7 +10915,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c17d3e0> >" + " *' at 0x7faaf7d117d0> >" ] }, "execution_count": 21, @@ -10927,7 +10927,7 @@ "aut = spot.translate('(Ga -> Gb) W c')\n", "si = spot.scc_info(aut)\n", "for scc in range(si.scc_count()):\n", - " print(\"SCC #{} contains states {}\".format(scc, list(si.states_of(scc))))\n", + " print(f\"SCC #{scc} contains states {si.states_of(scc)}\")\n", "display(aut)\n", "spot.decompose_scc(si, '0,1')" ] @@ -11089,7 +11089,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f328c17e160> >" + " *' at 0x7faaf7d13420> >" ] }, "execution_count": 22, @@ -11100,6 +11100,13 @@ "source": [ "spot.decompose_scc(si, 'a2')" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -11118,7 +11125,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.8" } }, "nbformat": 4, diff --git a/tests/python/formulas.ipynb b/tests/python/formulas.ipynb index f20769a72..47b908c75 100644 --- a/tests/python/formulas.ipynb +++ b/tests/python/formulas.ipynb @@ -189,14 +189,14 @@ ], "source": [ "for i in ['spot', 'spin', 'lbt', 'wring', 'utf8', 'latex', 'sclatex', 'mathjax']:\n", - " print(\"%-10s%s\" % (i, f.to_str(i)))" + " print(f\"{i:10}{f.to_str(i)}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Formulas output via `format()` can also use some convenient shorthand to select the syntax:" + "Formulas output via `format()` of f-strings can also use some convenient shorthand to select the syntax:" ] }, { @@ -218,13 +218,13 @@ } ], "source": [ - "print(\"\"\"\\\n", - "Spin: {0:s}\n", - "Spin+parentheses: {0:sp}\n", - "Spot (default): {0}\n", - "Spot+shell quotes: {0:q}\n", - "LBT, right aligned: {0:l:~>40}\n", - "LBT, no M/W/R: {0:[MWR]l}\"\"\".format(f))" + "print(f\"\"\"\\\n", + "Spin: {f:s}\n", + "Spin+parentheses: {f:sp}\n", + "Spot (default): {f}\n", + "Spot+shell quotes: {f:q}\n", + "LBT, right aligned: {f:l:~>40}\n", + "LBT, no M/W/R: {f:[MWR]l}\"\"\")" ] }, { @@ -1050,7 +1050,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.8" } }, "nbformat": 4, diff --git a/tests/python/highlighting.ipynb b/tests/python/highlighting.ipynb index 07145174d..55b7006b2 100644 --- a/tests/python/highlighting.ipynb +++ b/tests/python/highlighting.ipynb @@ -330,7 +330,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4156190> >" + " *' at 0x7fed1691bbd0> >" ] }, "execution_count": 4, @@ -525,7 +525,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4155f80> >" + " *' at 0x7fed16a24420> >" ] }, "execution_count": 5, @@ -718,7 +718,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4156190> >" + " *' at 0x7fed1691bbd0> >" ] }, "execution_count": 6, @@ -830,9 +830,9 @@ ], "source": [ "for i in range(0, a.num_states()):\n", - " print(\"state {}: {}\".format(i, a.get_highlight_state(i)))\n", + " print(f\"state {i}: {a.get_highlight_state(i)}\")\n", "for i in range(1, a.num_edges() + 1):\n", - " print(\"edge {}: {}\".format(i, a.get_highlight_edge(i)))" + " print(f\"edge {i}: {a.get_highlight_edge(i)}\")" ] }, { @@ -1023,7 +1023,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4157900> >" + " *' at 0x7fed16a248a0> >" ] }, "execution_count": 9, @@ -1218,7 +1218,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4157780> >" + " *' at 0x7fed16a25c80> >" ] }, "execution_count": 10, @@ -1517,7 +1517,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f41574b0> >" + " *' at 0x7fed16a25950> >" ] }, "execution_count": 11, @@ -1846,7 +1846,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f41574b0> >" + " *' at 0x7fed16a25950> >" ] }, "execution_count": 14, @@ -2493,7 +2493,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f41745d0> >" + " *' at 0x7fed16a26100> >" ] }, "metadata": {}, @@ -3006,7 +3006,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4156100> >" + " *' at 0x7fed16a252c0> >" ] }, "metadata": {}, @@ -3363,7 +3363,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4157480> >" + " *' at 0x7fed16a25e00> >" ] }, "metadata": {}, @@ -3536,7 +3536,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4174d80> >" + " *' at 0x7fed16a26340> >" ] }, "metadata": {}, @@ -3637,7 +3637,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4174210> >" + " *' at 0x7fed16a26100> >" ] }, "metadata": {}, @@ -3804,7 +3804,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4174f30> >" + " *' at 0x7fed16a25f80> >" ] }, "execution_count": 17, @@ -4006,7 +4006,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4174f30> >" + " *' at 0x7fed16a25f80> >" ] }, "metadata": {}, @@ -4127,7 +4127,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4174d80> >" + " *' at 0x7fed16a26340> >" ] }, "metadata": {}, @@ -4228,7 +4228,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4174210> >" + " *' at 0x7fed16a26100> >" ] }, "metadata": {}, @@ -4635,7 +4635,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4176160> >" + " *' at 0x7fed16a279c0> >" ] }, "metadata": {}, @@ -4796,7 +4796,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4175320> >" + " *' at 0x7fed16a25e90> >" ] }, "metadata": {}, @@ -4981,7 +4981,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4176130> >" + " *' at 0x7fed16a27990> >" ] }, "metadata": {}, @@ -5284,7 +5284,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4175950> >" + " *' at 0x7fed16a27480> >" ] }, "execution_count": 22, @@ -5586,7 +5586,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4175950> >" + " *' at 0x7fed16a27480> >" ] }, "execution_count": 23, @@ -5883,7 +5883,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fa7f4175950> >" + " *' at 0x7fed16a27480> >" ] }, "metadata": {}, @@ -6391,7 +6391,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.8" } }, "nbformat": 4, diff --git a/tests/python/ipnbdoctest.py b/tests/python/ipnbdoctest.py index 47b73f901..563808e52 100755 --- a/tests/python/ipnbdoctest.py +++ b/tests/python/ipnbdoctest.py @@ -154,7 +154,7 @@ def canonicalize(s, type, ignores): s, flags=re.DOTALL) for n, p in enumerate(ignores): - s = re.sub(p, 'IGN{}'.format(n), s) + s = re.sub(p, f'IGN{n}', s) return s @@ -219,8 +219,7 @@ def compare_outputs(ref, test, ignores=[]): ok = True if len(cref) != len(ctest): - print("output length mismatch (expected {}, got {})".format( - len(cref), len(ctest))) + print(f"output length mismatch: expected {len(cref)}, got {len(ctest)}") ok = False # There can be several outputs. For instance wnen the cell both # prints a result (goes to "stdout") and displays an automaton diff --git a/tests/python/ltlf.py b/tests/python/ltlf.py index afd114855..2d964d04b 100644 --- a/tests/python/ltlf.py +++ b/tests/python/ltlf.py @@ -42,7 +42,7 @@ for f in formulas: f2 = f1.unabbreviate() f3 = spot.formula_And([spot.from_ltlf(f1), cst]) f4 = spot.formula_And([spot.from_ltlf(f2), cst]) - print("{}\t=>\t{}".format(f1, f3)) - print("{}\t=>\t{}".format(f2, f4)) + print(f"{f1}\t=>\t{f3}") + print(f"{f2}\t=>\t{f4}") tc.assertTrue(lcc.equal(f3, f4)) print() diff --git a/tests/python/parity.ipynb b/tests/python/parity.ipynb index a47864c92..95778bd8c 100644 --- a/tests/python/parity.ipynb +++ b/tests/python/parity.ipynb @@ -64,8 +64,8 @@ "for kind in ['min', 'max']:\n", " for style in ['odd', 'even']:\n", " for sets in range(1, 5):\n", - " name = 'parity {} {} {}'.format(kind, style, sets)\n", - " print('{:17} = {}'.format(name, spot.acc_code(name)))" + " name = f'parity {kind} {style} {sets}'\n", + " print(f'{name:17} = {spot.acc_code(name)}')" ] }, { @@ -4949,7 +4949,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fc0d6bea0d0> >" + " *' at 0x7f88de01ed30> >" ] }, "metadata": {}, @@ -5460,7 +5460,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.8" } }, "nbformat": 4, diff --git a/tests/python/product.ipynb b/tests/python/product.ipynb index e5434e0c7..3a133e982 100644 --- a/tests/python/product.ipynb +++ b/tests/python/product.ipynb @@ -779,8 +779,7 @@ ], "source": [ "def show_prod(a1, a2, res):\n", - " s1 = a1.num_sets()\n", - " display_inline(a1, a2.show('.tvb+{}'.format(s1)), res)\n", + " display_inline(a1, a2.show(f'.tvb+{a1.num_sets()}'), res)\n", "\n", "show_prod(a1, a2, prod)" ] @@ -1714,7 +1713,7 @@ "\n", "The one-liner above is wrong for two reasons:\n", "\n", - " - if `left` and `right` are non-deterministic, their product could be deterministic, so calling prop_universal(False) would be wrong. \n", + " - if `left` and `right` are non-deterministic, their product could be deterministic, so calling `prop_universal(False)` would be wrong. \n", "\n", " - the use of the `and` operator on `trival` is misleading in non-Boolean context. The `&` operator would be the correct operator to use if you want to work in threed-valued logic. Compare: " ] @@ -1746,8 +1745,7 @@ "maybe = spot.trival_maybe()\n", "for u in (no, maybe, yes):\n", " for v in (no, maybe, yes):\n", - " print(\"{u!s:>5} & {v!s:<5} = {r1!s:<5} {u!s:>5} and {v!s:<5} = {r2!s:<5}\"\n", - " .format(u=u, v=v, r1=(u&v), r2=(u and v)))" + " print(f\"{u!s:>5} & {v!s:<5} = {u&v!s:<5} {u!s:>5} and {v!s:<5} = {u and v !s:<5}\")" ] }, { @@ -2444,7 +2442,7 @@ "display(p3.show('.1'))\n", "pairs = p3.get_product_states()\n", "for s in range(p3.num_states()):\n", - " print(\"{}: {}\".format(s, pairs[s]))" + " print(f\"{s}: {pairs[s]}\")" ] }, { @@ -2465,7 +2463,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "87.6 µs ± 982 ns per loop (mean ± std. dev. of 7 runs, 10,000 loops each)\n" + "107 µs ± 2.91 µs per loop (mean ± std. dev. of 7 runs, 10,000 loops each)\n" ] } ], @@ -2482,7 +2480,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "2.14 µs ± 9.61 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n" + "2.23 µs ± 47.3 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n" ] } ], @@ -2525,7 +2523,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.8" } }, "nbformat": 4, diff --git a/tests/python/relabel.py b/tests/python/relabel.py index 3b68c2bc8..ed165ee37 100644 --- a/tests/python/relabel.py +++ b/tests/python/relabel.py @@ -25,7 +25,7 @@ m = spot.relabeling_map() g = spot.relabel_bse(f, spot.Pnn, m) res = "" for old, new in m.items(): - res += "#define {} {}\n".format(old, new) + res += f"#define {old} {new}\n" res += str(g) tc.assertEqual(res, """#define p0 a & b #define p1 c diff --git a/tests/python/satmin.ipynb b/tests/python/satmin.ipynb index 0c83d85f5..0522b602e 100644 --- a/tests/python/satmin.ipynb +++ b/tests/python/satmin.ipynb @@ -450,7 +450,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f61ac1ce910> >" + " *' at 0x7fe21cd04240> >" ] }, "execution_count": 3, @@ -799,7 +799,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f61ac1ce430> >" + " *' at 0x7fe21e17b0c0> >" ] }, "execution_count": 4, @@ -1244,7 +1244,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f61ac1ce940> >" + " *' at 0x7fe21cd04090> >" ] }, "execution_count": 5, @@ -1676,7 +1676,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f61ac1cefa0> >" + " *' at 0x7fe21cd04c00> >" ] }, "execution_count": 6, @@ -1938,7 +1938,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f61ac1cee80> >" + " *' at 0x7fe21cd04cf0> >" ] }, "execution_count": 7, @@ -2255,7 +2255,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f61ac1cec40> >" + " *' at 0x7fe21cd04a50> >" ] }, "execution_count": 8, @@ -2546,7 +2546,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f61ac1cebb0> >" + " *' at 0x7fe21cd04870> >" ] }, "execution_count": 9, @@ -2618,9 +2618,9 @@ " NaN\n", " 996\n", " 48806\n", - " 2\n", - " 0\n", + " 1\n", " 0\n", + " 1\n", " 0\n", " \n", " \n", @@ -2633,7 +2633,7 @@ " 2760\n", " 224707\n", " 5\n", - " 0\n", + " 1\n", " 5\n", " 0\n", " \n", @@ -2646,9 +2646,9 @@ " 32\n", " 2008\n", " 155020\n", - " 3\n", + " 4\n", " 0\n", - " 3\n", + " 2\n", " 0\n", " \n", " \n", @@ -2662,9 +2662,9 @@ "2 5 4 4 11 32 2008 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \n", - "0 48806 2 0 0 0 \n", - "1 224707 5 0 5 0 \n", - "2 155020 3 0 3 0 " + "0 48806 1 0 1 0 \n", + "1 224707 5 1 5 0 \n", + "2 155020 4 0 2 0 " ] }, "metadata": {}, @@ -2937,7 +2937,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f611de5a370> >" + " *' at 0x7fe208005380> >" ] }, "execution_count": 10, @@ -3005,10 +3005,10 @@ " NaN\n", " 348\n", " 15974\n", - " 0\n", - " 0\n", " 1\n", " 0\n", + " 0\n", + " 0\n", " \n", " \n", " 1\n", @@ -3021,7 +3021,7 @@ " 73187\n", " 2\n", " 0\n", - " 1\n", + " 0\n", " 0\n", " \n", " \n", @@ -3049,8 +3049,8 @@ "2 2 4 4 11 32 616 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \n", - "0 15974 0 0 1 0 \n", - "1 73187 2 0 1 0 \n", + "0 15974 1 0 0 0 \n", + "1 73187 2 0 0 0 \n", "2 37620 1 0 0 0 " ] }, @@ -3324,7 +3324,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f61ac1cf870> >" + " *' at 0x7fe21e11bc00> >" ] }, "execution_count": 11, @@ -3394,9 +3394,9 @@ " 40\n", " 2300\n", " 288887\n", - " 8\n", + " 7\n", " 0\n", - " 12\n", + " 8\n", " 0\n", " \n", " \n", @@ -3438,7 +3438,7 @@ "2 2 1 NaN NaN NaN 92 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \n", - "0 288887 8 0 12 0 \n", + "0 288887 7 0 8 0 \n", "1 18569 1 0 0 0 \n", "2 2337 0 0 0 0 " ] @@ -3627,7 +3627,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f61ac1cf960> >" + " *' at 0x7fe21cd04d80> >" ] }, "execution_count": 12, @@ -3695,7 +3695,7 @@ " 40\n", " 2742\n", " 173183\n", - " 4\n", + " 3\n", " 0\n", " 2\n", " 0\n", @@ -3711,7 +3711,7 @@ " 45412\n", " 1\n", " 0\n", - " 0\n", + " 1\n", " 0\n", " \n", " \n", @@ -3723,7 +3723,7 @@ " NaN\n", " 363\n", " 10496\n", - " 1\n", + " 0\n", " 0\n", " 0\n", " 0\n", @@ -3739,9 +3739,9 @@ "2 4 3 NaN NaN NaN 363 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \n", - "0 173183 4 0 2 0 \n", - "1 45412 1 0 0 0 \n", - "2 10496 1 0 0 0 " + "0 173183 3 0 2 0 \n", + "1 45412 1 0 1 0 \n", + "2 10496 0 0 0 0 " ] }, "metadata": {}, @@ -4006,7 +4006,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f611de5b3c0> >" + " *' at 0x7fe21cd04690> >" ] }, "execution_count": 13, @@ -4117,7 +4117,7 @@ " 173427\n", " 0\n", " 0\n", - " 1\n", + " 0\n", " 0\n", " \n", " \n", @@ -4133,7 +4133,7 @@ " clauses enc.user enc.sys sat.user sat.sys \n", "0 173427 3 0 2 0 \n", "1 173427 0 0 0 0 \n", - "2 173427 0 0 1 0 " + "2 173427 0 0 0 0 " ] }, "metadata": {}, @@ -4412,7 +4412,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f611de5b600> >" + " *' at 0x7fe2080067f0> >" ] }, "execution_count": 14, @@ -4484,7 +4484,7 @@ " 173183\n", " 4\n", " 0\n", - " 2\n", + " 1\n", " 0\n", " \n", " \n", @@ -4498,7 +4498,7 @@ " 173279\n", " 0\n", " 0\n", - " 0\n", + " 1\n", " 0\n", " \n", " \n", @@ -4526,8 +4526,8 @@ "2 4 3 NaN NaN NaN 2742 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \n", - "0 173183 4 0 2 0 \n", - "1 173279 0 0 0 0 \n", + "0 173183 4 0 1 0 \n", + "1 173279 0 0 1 0 \n", "2 173327 0 0 0 0 " ] }, @@ -4807,7 +4807,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f611de5bd50> >" + " *' at 0x7fe208006c10> >" ] }, "execution_count": 15, @@ -5108,7 +5108,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f611de5b6f0> >" + " *' at 0x7fe208006940> >" ] }, "metadata": {}, @@ -5159,7 +5159,7 @@ " 40\n", " 2742\n", " 173183\n", - " 4\n", + " 3\n", " 0\n", " 2\n", " 0\n", @@ -5176,7 +5176,7 @@ " 173279\n", " 0\n", " 0\n", - " 0\n", + " 1\n", " 0\n", " HOA: v1 States: 4 Start: 0 AP: 3 \"a\" \"c\" \"b\" a...\n", " \n", @@ -5206,8 +5206,8 @@ "2 4 3 NaN NaN NaN 2742 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \\\n", - "0 173183 4 0 2 0 \n", - "1 173279 0 0 0 0 \n", + "0 173183 3 0 2 0 \n", + "1 173279 0 0 1 0 \n", "2 173327 0 0 0 0 \n", "\n", " automaton \n", @@ -5544,7 +5544,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f611de94660> >" + " *' at 0x7fe208007030> >" ] }, "metadata": {}, @@ -5830,7 +5830,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f611de946f0> >" + " *' at 0x7fe208006fa0> >" ] }, "metadata": {}, @@ -5840,7 +5840,7 @@ "source": [ "for line, data in log.iterrows():\n", " if type(data.automaton) is str:\n", - " print(\"automaton from line {}:\".format(line))\n", + " print(f\"automaton from line {line}:\")\n", " display(spot.automaton(data.automaton + \"\\n\"))" ] }, @@ -6261,7 +6261,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f61ac1cefa0> >" + " *' at 0x7fe21cd04c00> >" ] }, "execution_count": 18, @@ -6329,7 +6329,7 @@ " NaN\n", " 687\n", " 21896\n", - " 1\n", + " 0\n", " 0\n", " 0\n", " 0\n", @@ -6343,9 +6343,9 @@ " 32\n", " 1905\n", " 100457\n", - " 2\n", + " 3\n", " 0\n", - " 2\n", + " 1\n", " 0\n", " \n", " \n", @@ -6358,8 +6358,8 @@ "1 6 5 4 12 32 1905 \n", "\n", " clauses enc.user enc.sys sat.user sat.sys \n", - "0 21896 1 0 0 0 \n", - "1 100457 2 0 2 0 " + "0 21896 0 0 0 0 \n", + "1 100457 3 0 1 0 " ] }, "metadata": {}, @@ -6638,7 +6638,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f61ac1cf6c0> >" + " *' at 0x7fe21cd051d0> >" ] }, "execution_count": 19, @@ -6730,7 +6730,7 @@ " 10496\n", " 0\n", " 0\n", - " 0\n", + " 1\n", " 0\n", " \n", " \n", @@ -6746,7 +6746,7 @@ " clauses enc.user enc.sys sat.user sat.sys \n", "0 51612 1 0 1 0 \n", "1 3129 0 0 0 0 \n", - "2 10496 0 0 0 0 " + "2 10496 0 0 1 0 " ] }, "metadata": {}, @@ -7025,7 +7025,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f611de5b960> >" + " *' at 0x7fe208006fa0> >" ] }, "execution_count": 20, @@ -7581,7 +7581,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f611de5b570> >" + " *' at 0x7fe2080074e0> >" ] }, "execution_count": 21, @@ -7610,7 +7610,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.8" } }, "nbformat": 4, diff --git a/tests/python/stutter-inv.ipynb b/tests/python/stutter-inv.ipynb index 2cfc87ac7..25b515199 100644 --- a/tests/python/stutter-inv.ipynb +++ b/tests/python/stutter-inv.ipynb @@ -215,7 +215,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You have to be aware of this property being set in your back because if while playing with `is_stutter_invariant()` you the incorrect formula for an automaton by mistake, the automaton will have its property set incorrectly, and running `is_stutter_invariant()` with the correct formula will simply return the cached property.\n", + "You have to be aware of this property being set in your back. If, after calling `is_stutter_invariant()`, you modify the automaton (e.g., adding edges, or changing labels), the modified automaton might then have its property set incorrectly, and running `is_stutter_invariant()` will not recompute a property that is set.\n", "\n", "In doubt, you can always reset the property as follows:" ] @@ -351,7 +351,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f47047afc00> >" + " *' at 0x7f2110141680> >" ] }, "metadata": {}, @@ -418,9 +418,9 @@ " acc, rej, aut = \"rejected\", \"accepted\", pos\n", " word2 = spot.sl2(waut).intersecting_word(aut)\n", " word2.simplify()\n", - " print(\"\"\"{} is {} by {}\n", + " print(f\"\"\"{word} is {acc} by {f}\n", " but if we stutter some of its letters, we get\n", - "{} which is {} by {}\"\"\".format(word, acc, f, word2, rej, f))" + "{word2} which is {rej} by {f}\"\"\")" ] }, { @@ -804,7 +804,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f47047d0bd0> >" + " *' at 0x7f2110143b70> >" ] }, "metadata": {}, @@ -1165,7 +1165,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f47047d0bd0> >" + " *' at 0x7f2110143b70> >" ] }, "metadata": {}, @@ -1389,7 +1389,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f47047d1590> >" + " *' at 0x7f2110143d20> >" ] }, "metadata": {}, @@ -1576,7 +1576,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f47047d0ab0> >" + " *' at 0x7f2110142b50> >" ] }, "metadata": {}, @@ -1962,7 +1962,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f47047d2640> >" + " *' at 0x7f2110142340> >" ] }, "metadata": {}, @@ -2183,7 +2183,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f47047d1d10> >" + " *' at 0x7f2110142580> >" ] }, "metadata": {}, @@ -2392,7 +2392,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f47047d2370> >" + " *' at 0x7f21101417a0> >" ] }, "metadata": {}, @@ -2434,7 +2434,7 @@ "source": [ "sil_vec = spot.stutter_invariant_letters(pos, f)\n", "for q in range(pos.num_states()):\n", - " print(\"sil_vec[{}] =\".format(q), spot.bdd_format_formula(pos.get_dict(), sil_vec[q]))" + " print(f\"sil_vec[{q}] =\", spot.bdd_format_formula(pos.get_dict(), sil_vec[q]))" ] }, { @@ -2848,7 +2848,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f47047d13e0> >" + " *' at 0x7f2110142430> >" ] }, "metadata": {}, @@ -3408,7 +3408,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f47047d13e0> >" + " *' at 0x7f2110142430> >" ] }, "metadata": {}, @@ -3649,7 +3649,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.8" } }, "nbformat": 4, diff --git a/tests/python/twagraph-internals.ipynb b/tests/python/twagraph-internals.ipynb index 230d47d89..6a1f7b2c5 100644 --- a/tests/python/twagraph-internals.ipynb +++ b/tests/python/twagraph-internals.ipynb @@ -252,7 +252,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd9b477e340> >" + " *' at 0x7f1fdcf63db0> >" ] }, "execution_count": 3, @@ -784,7 +784,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd9b477e340> >" + " *' at 0x7f1fdcf63db0> >" ] }, "metadata": {}, @@ -1043,7 +1043,8 @@ ], "source": [ "for ed in aut.out(0):\n", - " print(\"edges[{e}].src={src}, edges[{e}].dst={dst}\".format(e=aut.edge_number(ed), src=ed.src, dst=ed.dst))" + " en = aut.edge_number(ed)\n", + " print(f\"edges[{en}].src={ed.src}, edges[{en}].dst={ed.dst}\")" ] }, { @@ -1293,7 +1294,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd9b477e340> >" + " *' at 0x7f1fdcf63db0> >" ] }, "metadata": {}, @@ -1759,7 +1760,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd9b477e340> >" + " *' at 0x7f1fdcf63db0> >" ] }, "metadata": {}, @@ -1979,7 +1980,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Note that the `succ_tail` field of the `states` vector is seldom used when reading automata as the linked list of edges ends when `next_succ` (or `succ`) equals `0`. Its main use is during calls to `new_edge()`: new edges are always created at the end of the list (otherwise it would be hard to preserve the order of edges when parsing and automaton and printing it)." + "Note that the `succ_tail` field of the `states` vector is seldom used when reading automata as the linked list of edges ends when `next_succ` (or `succ`) equals `0`. Its main use is during calls to `new_edge()`: new edges are always created at the end of the list (otherwise it would be hard to preserve the order of edges when parsing an automaton and printing it)." ] }, { @@ -2233,7 +2234,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd9b477e340> >" + " *' at 0x7f1fdcf63db0> >" ] }, "metadata": {}, @@ -2604,7 +2605,7 @@ "while it:\n", " e = it.current()\n", " toerase = e.acc.has(1)\n", - " print(\"pos={}, acc={}, toerase={}\".format(aut.edge_number(e), e.acc, toerase))\n", + " print(f\"pos={aut.edge_number(e)}, acc={e.acc}, toerase={toerase}\")\n", " if toerase:\n", " it.erase()\n", " else:\n", @@ -2900,7 +2901,8 @@ ], "source": [ "for e in aut.edges(): # iterate over all non-erased edges\n", - " print(\"edges[{e}].src={src}, edges[{e}].dst={dst}\".format(e=aut.edge_number(e), src=e.src, dst=e.dst))" + " en = aut.edge_number(e)\n", + " print(f\"edges[{en}].src={e.src}, edges[{en}].dst={e.dst}\")" ] }, { @@ -3398,7 +3400,8 @@ ], "source": [ "for e in aut.edges(): # iterate over all non-erased edges\n", - " print(\"edges[{e}].src={src}, edges[{e}].dst={dst}\".format(e=aut.edge_number(e), src=e.src, dst=e.dst))" + " en = aut.edge_number(e)\n", + " print(f\"edges[{en}].src={e.src}, edges[{en}].dst={e.dst}\")" ] }, { @@ -3954,7 +3957,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd9b477e340> >" + " *' at 0x7f1fdcf63db0> >" ] }, "execution_count": 22, @@ -4768,7 +4771,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd9b477e340> >" + " *' at 0x7f1fdcf63db0> >" ] }, "execution_count": 28, @@ -5552,7 +5555,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd9b477e340> >" + " *' at 0x7f1fdcf63db0> >" ] }, "metadata": {}, @@ -6260,7 +6263,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd9b477e340> >" + " *' at 0x7f1fdcf63db0> >" ] }, "metadata": {}, @@ -6758,7 +6761,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd9b4794630> >" + " *' at 0x7f1fdd05b7e0> >" ] }, "metadata": {}, @@ -7129,7 +7132,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd9b4794630> >" + " *' at 0x7f1fdd05b7e0> >" ] }, "metadata": {}, @@ -7345,7 +7348,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.8" } }, "nbformat": 4, diff --git a/tests/python/zlktree.ipynb b/tests/python/zlktree.ipynb index fd0f33a07..3eb6f013f 100644 --- a/tests/python/zlktree.ipynb +++ b/tests/python/zlktree.ipynb @@ -387,7 +387,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 2, @@ -985,7 +985,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f048b0c0> >" + " *' at 0x7f2d01decab0> >" ] }, "execution_count": 10, @@ -994,7 +994,7 @@ } ], "source": [ - "a1 = spot.automaton(\"randaut -Q4 --colored -e.7 -A '{}' 2 |\".format(c.get_acceptance()))\n", + "a1 = spot.automaton(f\"randaut -Q4 --colored -e.7 -A '{c.get_acceptance()}' 2 |\")\n", "a1" ] }, @@ -1809,7 +1809,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f048b510> >" + " *' at 0x7f2d01decd50> >" ] }, "execution_count": 11, @@ -2131,7 +2131,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f048b540> >" + " *' at 0x7f2d01ded4d0> >" ] }, "execution_count": 13, @@ -2140,7 +2140,7 @@ } ], "source": [ - "a2 = spot.automaton(\"randaut -Q3 -e.8 --seed=4 -A '{}' 2 |\".format(c.get_acceptance()))\n", + "a2 = spot.automaton(f\"randaut -Q3 -e.8 --seed=4 -A '{c.get_acceptance()}' 2 |\")\n", "a2" ] }, @@ -2999,7 +2999,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f04a4660> >" + " *' at 0x7f2d01dedc50> >" ] }, "execution_count": 14, @@ -3709,7 +3709,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4355,7 +4355,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4511,7 +4511,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4717,7 +4717,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4777,7 +4777,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4837,7 +4837,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -5222,7 +5222,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 18, @@ -6510,7 +6510,7 @@ "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 20, @@ -7984,7 +7984,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f04a6d30> >" + " *' at 0x7f2d005206c0> >" ] }, "execution_count": 29, @@ -9202,7 +9202,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f04a6fa0> >" + " *' at 0x7f2d005207b0> >" ] }, "execution_count": 31, @@ -10624,7 +10624,7 @@ "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 40, @@ -12328,7 +12328,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f0104390> >" + " *' at 0x7f2d00521bc0> >" ] }, "execution_count": 45, @@ -12687,7 +12687,7 @@ "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 47, @@ -13081,7 +13081,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f0104450> >" + " *' at 0x7f2d005214d0> >" ] }, "execution_count": 48, @@ -13418,7 +13418,7 @@ "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 49, @@ -13765,7 +13765,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f0104e40> >" + " *' at 0x7f2d00522310> >" ] }, "execution_count": 50, @@ -14036,7 +14036,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f0104960> >" + " *' at 0x7f2d00522970> >" ] }, "execution_count": 51, @@ -14219,7 +14219,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 52, @@ -14530,7 +14530,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f0104ab0> >" + " *' at 0x7f2d00522ac0> >" ] }, "execution_count": 53, @@ -14850,7 +14850,7 @@ "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 55, @@ -15094,7 +15094,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f0105710> >" + " *' at 0x7f2d00523150> >" ] }, "execution_count": 57, @@ -15367,7 +15367,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f0104d20> >" + " *' at 0x7f2d00523240> >" ] }, "execution_count": 58, @@ -15738,7 +15738,7 @@ "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 60, @@ -15966,7 +15966,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f01059e0> >" + " *' at 0x7f2d005239c0> >" ] }, "execution_count": 61, @@ -16205,7 +16205,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f01051d0> >" + " *' at 0x7f2d005238a0> >" ] }, "execution_count": 62, @@ -16744,7 +16744,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f01053b0> >" + " *' at 0x7f2d00523720> >" ] }, "execution_count": 63, @@ -16901,7 +16901,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f0106430> >" + " *' at 0x7f2d00523fc0> >" ] }, "execution_count": 64, @@ -17068,7 +17068,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f92f01051a0> >" + " *' at 0x7f2d00550090> >" ] }, "execution_count": 66, @@ -17126,7 +17126,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.8" } }, "nbformat": 4, From 79b7cfea01ab71726821cb936b79a2f00f3d01a9 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 3 Apr 2024 15:27:21 +0200 Subject: [PATCH 428/606] ltl2tgba_fm: simplify the ratexp_to_dfa interface * spot/twaalgos/ltl2tgba_fm.cc (ratexp_to_dfa::succ): Rewrite using a vector of (label,dest) as return type. --- spot/twaalgos/ltl2tgba_fm.cc | 89 +++++++++++------------------------- 1 file changed, 26 insertions(+), 63 deletions(-) diff --git a/spot/twaalgos/ltl2tgba_fm.cc b/spot/twaalgos/ltl2tgba_fm.cc index 24c797f89..acf9511b2 100644 --- a/spot/twaalgos/ltl2tgba_fm.cc +++ b/spot/twaalgos/ltl2tgba_fm.cc @@ -101,8 +101,8 @@ namespace spot typedef twa_graph::namer namer; public: ratexp_to_dfa(translate_dict& dict); - std::tuple - succ(formula f); + + std::vector> succ(formula f); ~ratexp_to_dfa(); protected: @@ -1070,11 +1070,7 @@ namespace spot } } - // FIXME: use the new twa_graph_ptr interface - // with unsigned instead of state*. - std::tuple + std::vector> ratexp_to_dfa::succ(formula f) { f2a_t::const_iterator it = f2a_.find(f); @@ -1084,22 +1080,18 @@ namespace spot else a = translate(f); - // Using return std::make_tuple(nullptr, nullptr, nullptr) works - // with GCC 6.1.1, but breaks with clang++ 3.7.1 when using the - // same header file for . So let's use the output type - // explicitly. - typedef std::tuple res_t; - - // If a is null, f has an empty language. if (!a.first) - return res_t{nullptr, nullptr, nullptr}; + return {}; auto namer = a.second; assert(namer->has_state(f)); - auto st = a.first->state_from_number(namer->get_state(f)); - return res_t{a.first, namer, st}; + twa_graph_ptr aut = a.first; + unsigned st = namer->get_state(f); + + std::vector> res; + for (auto& e: aut->out(st)) + res.emplace_back(e.cond, namer->get_name(e.dst)); + return res; } // The rewrite rules used here are adapted from Jean-Michel @@ -1270,34 +1262,20 @@ namespace spot case op::Closure: { // rat_seen_ = true; - formula f = node[0]; - auto p = dict_.transdfa.succ(f); bdd res = bddfalse; - auto aut = std::get<0>(p); - auto namer = std::get<1>(p); - auto st = std::get<2>(p); - if (!aut) - return res; - for (auto i: aut->succ(st)) - { - bdd label = i->cond(); - const state* s = i->dst(); - formula dest = - namer->get_name(aut->state_number(s)); - - if (dest.accepts_eword()) - { - res |= label; - } - else - { - formula dest2 = formula::unop(o, dest); - if (dest2.is_ff()) - continue; - res |= - label & bdd_ithvar(dict_.register_next_variable(dest2)); - } - } + for (auto [label, dest]: dict_.transdfa.succ(node[0])) + if (dest.accepts_eword()) + { + res |= label; + } + else + { + formula dest2 = formula::unop(o, dest); + if (dest2.is_ff()) + continue; + res |= + label & bdd_ithvar(dict_.register_next_variable(dest2)); + } return res; } case op::NegClosureMarked: @@ -1312,24 +1290,11 @@ namespace spot has_marked_ = true; } - formula f = node[0]; - auto p = dict_.transdfa.succ(f); - auto aut = std::get<0>(p); - - if (!aut) - return bddtrue; - - auto namer = std::get<1>(p); - auto st = std::get<2>(p); bdd res = bddfalse; bdd missing = bddtrue; - for (auto i: aut->succ(st)) + for (auto [label, dest]: dict_.transdfa.succ(node[0])) { - bdd label = i->cond(); - const state* s = i->dst(); - formula dest = namer->get_name(aut->state_number(s)); - missing -= label; if (!dest.accepts_eword()) @@ -1342,9 +1307,7 @@ namespace spot } } - res |= missing & - // stick X(1) to preserve determinism. - bdd_ithvar(dict_.register_next_variable(formula::tt())); + res |= missing; //trace_ltl_bdd(dict_, res_); return res; } From 27a01372088716a8e150c728a5a4bd7d8b31809b Mon Sep 17 00:00:00 2001 From: pierreganty Date: Wed, 27 Mar 2024 10:27:04 +0100 Subject: [PATCH 429/606] Small fixes in the man pages * bin/man/dstar2tgba.x, bin/man/spot-x.x: Typos. --- bin/man/dstar2tgba.x | 2 +- bin/man/spot-x.x | 24 ++++++++++++++---------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/bin/man/dstar2tgba.x b/bin/man/dstar2tgba.x index e5318bc0e..7aa985696 100644 --- a/bin/man/dstar2tgba.x +++ b/bin/man/dstar2tgba.x @@ -70,7 +70,7 @@ Documents the output format of .TP 2. -Chistof Löding: Mehods for the Transformation of ω-Automata: +Christof Löding: Mehods for the Transformation of ω-Automata: Complexity and Connection to Second Order Logic. Diploma Thesis. University of Kiel. 1998. diff --git a/bin/man/spot-x.x b/bin/man/spot-x.x index b9e3c7166..6aa4a385e 100644 --- a/bin/man/spot-x.x +++ b/bin/man/spot-x.x @@ -19,7 +19,7 @@ how the smallest possible M should be searched. .TP \fB1\fR The default, \fB1\fR, performs a binary search between 1 and N. The -lower bound can sometimes be improved when the \fBsat-langmap\fR +lower bound can sometimes be improved when the \fBsat\-langmap\fR option is used. .TP @@ -55,7 +55,7 @@ collection and resizing will be output on standard error. Specifies which inclusion algorithm Spot should use. If the variable is unset, or set to \fB"default"\fR, containment checks are done using a complementation-based procedure. If the variable is set to -\fB"forq"\fR, and FORQ-based containment check is used for Büchi automata +\fB"forq"\fR, then the FORQ-based containment check is used for Büchi automata (the default procedure is still used for non-Büchi automata). See [6] in the bibliography below. @@ -90,13 +90,13 @@ executed in debug mode, showing how the input is processed. .TP \fBSPOT_DOTDEFAULT\fR -Whenever the \f(CW--dot\fR option is used without argument (even +Whenever the \fB\-\-dot\fR option is used without argument (even implicitely via \fBSPOT_DEFAULT_FORMAT\fR), the contents of this variable is used as default argument. If you have some default settings in \fBSPOT_DOTDEFAULT\fR and want to append to options -\f(CWxyz\fR temporarily for one call, use \f(CW--dot=.xyz\fR: +\f(CWxyz\fR temporarily for one call, use \fB\-\-dot=.xyz\fR: the dot character will be replaced by the contents of the -\f(CWSPOT_DOTDEFAULT\fR environment variable. +\fBSPOT_DOTDEFAULT\fR environment variable. .TP \fBSPOT_DOTEXTRA\fR @@ -105,7 +105,7 @@ before the first state is output. This makes it easy to override global attributes of the graph. .TP -\fSPOT_EXCLUSIVE_WORD\fR +\fBSPOT_EXCLUSIVE_WORD\fR Specifies which algorithm spot should use for exclusive_word. This can currently take on 1 of 2 values: 0 for the legacy implementation, and 1 for the forq implementation [6] (See bibliography below). Forq assumes buchi @@ -252,10 +252,11 @@ sl(a) x sl(!a), performed on-the-fly .IP 8 cl(a) x cl(!a) .RE + +This variable is used by the \fB\-\-check=stutter-invariance\fR and +\fB\-\-stutter-invariant\fR options, but it is ignored by +\fB\-\-check=stutter-sensitive-example\fR. .RE -This variable is used by the \fB--check=stutter-invariance\fR and -\fB--stutter-invariant\fR options, but it is ignored by -\fB--check=stutter-sensitive-example\fR. .TP \fBSPOT_SIMULATION_REDUCTION\fR @@ -289,6 +290,9 @@ autfilt (see sat\-minimize options described above or autfilt man page). The XCNF format is the one used by the SAT incremental competition. [BIBLIOGRAPHY] + +The following papers are related to some of the options and environment variables. + .TP 1. Christian Dax, Jochen Eisinger, Felix Klaedtke: Mechanizing the @@ -328,7 +332,7 @@ Describes the stutter-invariance checks that can be selected through 5. Javier Esparza, Jan Křetínský and Salomon Sickert: One Theorem to Rule Them All: A Unified Translation of LTL into ω-Automata. Proceedings -of LICS'18. To appear. +of LICS'18. Describes (among other things) the constructions used for translating formulas of the form GF(guarantee) or FG(safety), that can be From a17d8a05017123fc0faf911bcb57d204c61577aa Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 3 Apr 2024 17:47:18 +0200 Subject: [PATCH 430/606] help2man: work around some utf8 issues MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit help2man used characters in the range 0x80,...,0x84 to mark special sections/characters during its processing, but those bytes where also occurring in other utf-8 characters breaking the output. For instance the character '₁' ( a subscript 1), is encoded as "0xE2 0x82 0x81" in utf-8. * tools/help2man: Tell perl that input and output should be assumed to be utf-8. Also use "private-use codepoints" for those special characters to avoid any future conflict. --- tools/help2man | 46 +++++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/tools/help2man b/tools/help2man index bf1f075cb..82437df46 100755 --- a/tools/help2man +++ b/tools/help2man @@ -2,7 +2,8 @@ # Generate a short man page from --help and --version output. # Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2009, -# 2010, 2011, 2012, 2013, 2014, 2015, 2016 Free Software Foundation, Inc. +# 2010, 2011, 2012, 2013, 2014, 2015, 2016 Free Software Foundation, Inc. +# Later modified by the Spot authors. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -26,6 +27,9 @@ use Getopt::Long; use Text::ParseWords qw(shellwords); use Text::Tabs qw(expand); use POSIX qw(strftime setlocale LC_ALL); +use utf8; +use open IN => ":encoding(UTF-8)"; +binmode STDOUT, ":encoding(UTF-8)"; my $this_program = 'help2man'; my $this_version = '1.47.4'; @@ -405,10 +409,10 @@ s/\n\n+/\n\n/g; s/([A-Za-z])-\n *([A-Za-z])/$1$2/g; # Temporarily exchange leading dots, apostrophes and backslashes for -# tokens. -s/^\./\x80/mg; -s/^'/\x81/mg; -s/\\/\x82/g; +# tokens. \xE000..\xF8FF are so called "private-use codepoints". +s/^\./\xE080/mg; +s/^'/\xE081/mg; +s/\\/\xE082/g; # Translators: patterns are used to match common program output. In the source # these strings are all of the form of "my $PAT_something = _('...');" and are @@ -524,7 +528,7 @@ while (length) { $matched .= $& if %append_match; $indent = length ($4 || "$1$3"); - $content = ".TP\n\x84$2\n\x84$5\n"; + $content = ".TP\n\xE084$2\n\xE084$5\n"; unless ($4) { # Indent may be different on second line. @@ -536,7 +540,7 @@ while (length) elsif (s/^ {1,10}([+-]\S.*)\n//) { $matched .= $& if %append_match; - $content = ".HP\n\x84$1\n"; + $content = ".HP\n\xE084$1\n"; $indent = 80; # not continued } @@ -545,7 +549,7 @@ while (length) { $matched .= $& if %append_match; $indent = length ($4 || "$1$3"); - $content = ".TP\n\x84$2\n\x84$5\n"; + $content = ".TP\n\xE084$2\n\xE084$5\n"; } # Indented paragraph. @@ -553,7 +557,7 @@ while (length) { $matched .= $& if %append_match; $indent = length $1; - $content = ".IP\n\x84$2\n"; + $content = ".IP\n\xE084$2\n"; } # Left justified paragraph. @@ -569,7 +573,7 @@ while (length) while ($indent ? s/^ {$indent}(\S.*)\n// : s/^(\S.*)\n//) { $matched .= $& if %append_match; - $content .= "\x84$1\n"; + $content .= "\xE084$1\n"; } # Move to next paragraph. @@ -578,9 +582,9 @@ while (length) for ($content) { # Leading dot and apostrophe protection. - s/\x84\./\x80/g; - s/\x84'/\x81/g; - s/\x84//g; + s/\xE084\./\xE080/g; + s/\xE084'/\xE081/g; + s/\xE084//g; # Examples should be verbatim. unless ($sect eq _('EXAMPLES')) @@ -603,7 +607,7 @@ while (length) } # Escape remaining hyphens. - s/-/\x83/g; + s/-/\xE083/g; if ($sect eq _('COPYRIGHT')) { @@ -675,6 +679,7 @@ while (my ($sect, $text) = each %replace) # Output header. print < Date: Wed, 3 Apr 2024 23:05:12 +0200 Subject: [PATCH 431/606] man: fix several issues The \f(CW macro to switch to "constant-width" does not seem to honored when converting to html, and I've found some patch to groff removing its use from their own man page. https://lists.gnu.org/archive/html/groff-commit/2020-07/msg00015.html Lets use \fC instead, as it seems to produce some in HTML. Two manpages had URLs pointing to spot.lrde.epita.fr instead of spot.lre.epita.fr. Finally, spot-x.x had an incorrectly closed .EX block, that completly broke the HTML conversion. * bin/man/autcross.x, bin/man/ltl2tgba.x, bin/man/ltlcross.x, bin/man/spot-x.x, bin/man/spot.x: Fix the aforementioned issues. --- bin/man/autcross.x | 12 ++++++------ bin/man/ltl2tgba.x | 10 +++++----- bin/man/ltlcross.x | 28 ++++++++++++++-------------- bin/man/spot-x.x | 36 ++++++++++++++++++------------------ bin/man/spot.x | 2 +- 5 files changed, 44 insertions(+), 44 deletions(-) diff --git a/bin/man/autcross.x b/bin/man/autcross.x index 4158aca11..bda9a9cce 100644 --- a/bin/man/autcross.x +++ b/bin/man/autcross.x @@ -59,29 +59,29 @@ Information about how the execution of the tool went. values: .RS .TP -\f(CW"ok"\fR +\fC"ok"\fR The tool ran succesfully (this does not imply that the produced automaton is correct) and autcross could parse the resulting automaton. In this case \fBexit_code\fR is always 0. .TP -\f(CW"timeout"\fR +\fC"timeout"\fR The tool ran for more than the number of seconds specified with the \fB\-\-timeout\fR option. In this case \fBexit_code\fR is always -1. .TP -\f(CW"exit code"\fR +\fC"exit code"\fR The tool terminated with a non-zero exit code. \fBexit_code\fR contains that value. .TP -\f(CW"signal"\fR +\fC"signal"\fR The tool terminated with a signal. \fBexit_code\fR contains that signal's number. .TP -\f(CW"parse error"\fR +\fC"parse error"\fR The tool terminated normally, but autcross could not parse its output. In this case \fBexit_code\fR is always -1. .TP -\f(CW"no output"\fR +\fC"no output"\fR The tool terminated normally, but without creating the specified output file. In this case \fBexit_code\fR is always -1. .RE diff --git a/bin/man/ltl2tgba.x b/bin/man/ltl2tgba.x index b34aa02ab..bb41aed73 100644 --- a/bin/man/ltl2tgba.x +++ b/bin/man/ltl2tgba.x @@ -13,7 +13,7 @@ condition, meanings that a run of the automaton is accepted iff it visits ininitely often multiple acceptance sets, and it also uses transition-based acceptance, i.e., those acceptance sets are sets of transitions. TGBA are often more consise than traditional Büchi -automata. For instance the LTL formula \f(CWGFa & GFb\fR can be +automata. For instance the LTL formula \fCGFa & GFb\fR can be translated into a single-state TGBA while a traditional Büchi automaton would need 3 states. Compare .PP @@ -158,11 +158,11 @@ are not recognized, i.e., infinite words that start with a bad prefix. .PP Because of this limited expressiveness, a monitor for some given LTL or PSL formula may accept a larger language than the one specified by -the formula. For instance a monitor for the LTL formula \f(CWa U b\fR -will reject (for instance) any word starting with \f(CW!a&!b\fR as +the formula. For instance a monitor for the LTL formula \fCa U b\fR +will reject (for instance) any word starting with \fC!a&!b\fR as there is no way such a word can validate the formula, but it will not -reject a finite prefix repeating only \f(CWa&!b\fR as such a prefix -could be extented in a way that is comptible with \f(CWa U b\fR. +reject a finite prefix repeating only \fCa&!b\fR as such a prefix +could be extented in a way that is comptible with \fCa U b\fR. .PP For more information about monitors, we refer the readers to the following two papers (the first paper describes the construction of diff --git a/bin/man/ltlcross.x b/bin/man/ltlcross.x index f16b17a74..c16adb3ed 100644 --- a/bin/man/ltlcross.x +++ b/bin/man/ltlcross.x @@ -12,7 +12,7 @@ for 100 random formulas, using a timeout of 2 minutes. Because knows those tools, there is no need to specify their input and output. A trace of the execution of the two tools, including any potential issue detected, is reported on standard error, while -statistics are written to \f(CWresults.json\fR. +statistics are written to \fCresults.json\fR. .PP .in +4n .nf @@ -26,14 +26,14 @@ The next command compares .BR ltl3ba , and .BR ltl2tgba (1) -on a set of formulas saved in file \f(CWinput.ltl\fR. +on a set of formulas saved in file \fCinput.ltl\fR. Statistics are again writen -as CSV into \f(CWresults.csv\fR. This examples specify the +as CSV into \fCresults.csv\fR. This examples specify the input and output for each tool, to show how this can be done. -Note the use of \f(CW%L\fR to indicate that the formula passed t +Note the use of \fC%L\fR to indicate that the formula passed t for the formula in .BR spin (1)'s -format, and \f(CW%f\fR for the +format, and \fC%f\fR for the formula in Spot's format. Each of these tool produces an automaton in a different format (respectively, LBTT's format, Spin's never claims, and HOA format), but Spot's parser can @@ -51,7 +51,7 @@ distinguish and understand these three formats. Rabin or Streett automata output by .B ltl2dstar in its historical format can be read from a -file specified with \f(CW%D\fR instead of \f(CW%O\fR. For instance: +file specified with \fC%D\fR instead of \fC%O\fR. For instance: .PP .in +4n .nf @@ -99,7 +99,7 @@ The formula translated. \fBtool\fR The tool used to translate this formula. This is either the value of the full \fICOMMANDFMT\fR string specified on the command-line, or, -if \fICOMMANDFMT\fR has the form \f(CW{\fISHORTNAME\fR\f(CW}\fR\fICMD\fR, +if \fICOMMANDFMT\fR has the form \fC{\fISHORTNAME\fR\fC}\fR\fICMD\fR, the value of \fISHORTNAME\fR. .TP \fBexit_status\fR, \fBexit_code\fR @@ -110,29 +110,29 @@ Otherwise, \fBexit_status\fR is a string that can take the following values: .RS .TP -\f(CW"ok"\fR +\fC"ok"\fR The translator ran succesfully (this does not imply that the produced automaton is correct) and ltlcross could parse the resulting automaton. In this case \fBexit_code\fR is always 0. .TP -\f(CW"timeout"\fR +\fC"timeout"\fR The translator ran for more than the number of seconds specified with the \fB\-\-timeout\fR option. In this case \fBexit_code\fR is always -1. .TP -\f(CW"exit code"\fR +\fC"exit code"\fR The translator terminated with a non-zero exit code. \fBexit_code\fR contains that value. .TP -\f(CW"signal"\fR +\fC"signal"\fR The translator terminated with a signal. \fBexit_code\fR contains that signal's number. .TP -\f(CW"parse error"\fR +\fC"parse error"\fR The translator terminated normally, but ltlcross could not parse its output. In this case \fBexit_code\fR is always -1. .TP -\f(CW"no output"\fR +\fC"no output"\fR The translator terminated normally, but without creating the specified output file. In this case \fBexit_code\fR is always -1. .RE @@ -150,7 +150,7 @@ translated automaton. Column \fBedges\fR counts the number of edges (labeled by Boolean formulas) in the automaton seen as a graph, while \fBtransitions\fR counts the number of assignment-labeled transitions that might have been merged into a formula-labeled edge. For instance -an edge labeled by \f(CWtrue\fR will be counted as 2^3=8 transitions if +an edge labeled by \fCtrue\fR will be counted as 2^3=8 transitions if the automaton uses 3 atomic propositions. .TP \fBscc\fR, \fBnonacc_scc\fR, \fBterminal_scc\fR, \fBweak_scc\fR, \fBstrong_scc\fR diff --git a/bin/man/spot-x.x b/bin/man/spot-x.x index 6aa4a385e..25a1a89ff 100644 --- a/bin/man/spot-x.x +++ b/bin/man/spot-x.x @@ -10,7 +10,7 @@ spot-x \- Common fine-tuning options and environment variables. .\" Add any additional description here [SAT\-MINIMIZE VALUES] -When the sat-minimize=K option is used to enable SAT-based +When the \fBsat-minimize=K\fR option is used to enable SAT-based minimization of deterministic automata, a SAT solver is used to minimize an input automaton with N states into an output automaton with 1≤M≤N states. The parameter K specifies @@ -72,12 +72,12 @@ passed to the printer by suffixing the output format with .in +4n .EX % SPOT_DEFAULT_FORMAT=dot=bar autfilt ... -.EN +.EE .in -4n is the same as running .in +4n .EX -% autfilt --dot=bar ... +% autfilt \-\-dot=bar ... .EE .in -4n but the use of the environment variable makes more sense if you set @@ -92,9 +92,9 @@ executed in debug mode, showing how the input is processed. \fBSPOT_DOTDEFAULT\fR Whenever the \fB\-\-dot\fR option is used without argument (even implicitely via \fBSPOT_DEFAULT_FORMAT\fR), the contents of this -variable is used as default argument. If you have some default +variable are used as default argument. If you have some default settings in \fBSPOT_DOTDEFAULT\fR and want to append to options -\f(CWxyz\fR temporarily for one call, use \fB\-\-dot=.xyz\fR: +\fCxyz\fR temporarily for one call, use \fB\-\-dot=.xyz\fR: the dot character will be replaced by the contents of the \fBSPOT_DOTDEFAULT\fR environment variable. @@ -123,7 +123,7 @@ is actually used) .TP \fBSPOT_O_CHECK\fR Specifies the default algorithm that should be used -by the \f(CWis_obligation()\fR function. The value should +by the \fCis_obligation()\fR function. The value should be one of the following: .RS .RS @@ -141,24 +141,24 @@ by a weak and deterministic Büchi automata. .TP \fBSPOT_OOM_ABORT\fR -If this variable is set, Out-Of-Memory errors will \f(CWabort()\fR the +If this variable is set, Out-Of-Memory errors will \fCabort()\fR the program (potentially generating a coredump) instead of raising an exception. This is useful to debug a program and to obtain a stack trace pointing to the function doing the allocation. When this -variable is unset (the default), \f(CWstd::bad_alloc\fR are thrown on +variable is unset (the default), \fCstd::bad_alloc\fR are thrown on memory allocation failures, and the stack is usually unwinded up to top-level, losing the original context of the error. Note that at -least \f(CWltlcross\fR has some custom handling of -\f(CWstd::bad_alloc\fR to recover from products that are too large (by +least \fCltlcross\fR has some custom handling of +\fCstd::bad_alloc\fR to recover from products that are too large (by ignoring them), and setting this variable will interfer with that. .TP \fBSPOT_PR_CHECK\fR Select the default algorithm that must be used to check the persistence or recurrence property of a formula f. The values it can take are between -1 and 3. All methods work either on f or !f thanks to the duality of +1 and 3. All methods work either on f or !f thanks to the duality of persistence and recurrence classes. See -.UR https://spot.lrde.epita.fr/hierarchy.html +.UR https://spot.lre.epita.fr/hierarchy.html this page .UE for more details. If it is set to: @@ -196,8 +196,8 @@ format. If set, this variable should indicate how to call an external SAT\-solver \- by default, Spot uses PicoSAT, which is distributed with. This is used by the sat\-minimize option described above. -The format to follow is the following: \f(CW" [options] %I >%O"\fR. -The escape sequences \f(CW%I\fR and \f(CW%O\fR respectively +The format to follow is the following: \fC" [options] %I >%O"\fR. +The escape sequences \fC%I\fR and \fC%O\fR respectively denote the names of the input and output files. These temporary files are created in the directory specified by \fBSPOT_TMPDIR\fR or \fBTMPDIR\fR (see below). The SAT\-solver should follow the convention @@ -281,7 +281,7 @@ This is mostly useful for debugging. .TP \fBSPOT_XCNF\fR Assign a folder path to this variable to generate XCNF files whenever -SAT\-based minimization is used \- the file is outputed as "incr.xcnf" +SAT\-based minimization is used \- the file is output as "incr.xcnf" in the specified directory. This feature works only with an external SAT\-solver. See \fBSPOT_SATSOLVER\fR to know how to provide one. Also note that this needs an incremental approach without restarting the encoding i.e @@ -290,8 +290,8 @@ autfilt (see sat\-minimize options described above or autfilt man page). The XCNF format is the one used by the SAT incremental competition. [BIBLIOGRAPHY] - -The following papers are related to some of the options and environment variables. +The following papers are related to some of the options and +environment variables. .TP 1. @@ -344,7 +344,7 @@ Kyveli Doveri and Pierre Ganty and Nicolas Mazzocchi: FORQ-Based Language Inclusion Formal Testing. Proceedings of CAV'22. LNCS 13372. -The containment check implemented as spot::contains_forq(), and +The containment check implemented as \fCspot::contains_forq()\fR, and used for Büchi automata when \fBSPOT_CONTAINMENT_CHECK=forq\fR. [SEE ALSO] diff --git a/bin/man/spot.x b/bin/man/spot.x index eaa1e933b..037069d39 100644 --- a/bin/man/spot.x +++ b/bin/man/spot.x @@ -27,6 +27,6 @@ that are listed below. .BR randltl (1) .BR spot-x (7) -.UR https://spot.lrde.epita.fr/ +.UR https://spot.lre.epita.fr/ The Spot web page. .UE From 9230614f8dc69270684e97c1e6c3d32bbd0d121a Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 5 Apr 2024 12:42:43 +0200 Subject: [PATCH 432/606] ltlsynt implement polarity and gequiv after decomposition too * bin/ltlsynt.cc: Also simplify subformulas using polarity and global equivalence. Add support for --polarity=before-decompose and --global-equiv=before-decompose to restablish the previous behavior. * spot/tl/apcollect.hh, spot/tl/apcollect.cc (realizability_simplifier::merge_mapping): New method. * tests/core/ltlsynt.test: Add new test cases. --- bin/ltlsynt.cc | 67 ++++++++++++++---- spot/tl/apcollect.cc | 7 ++ spot/tl/apcollect.hh | 4 ++ tests/core/ltlsynt.test | 153 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 217 insertions(+), 14 deletions(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index d2957855f..2062b6340 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -104,12 +104,14 @@ static const argp_option options[] = { "decompose", OPT_DECOMPOSE, "yes|no", 0, "whether to decompose the specification as multiple output-disjoint " "problems to solve independently (enabled by default)", 0 }, - { "polarity", OPT_POLARITY, "yes|no", 0, + { "polarity", OPT_POLARITY, "yes|no|before-decompose", 0, "whether to remove atomic propositions that always have the same " - "polarity in the formula to speed things up (enabled by default)", 0 }, - { "global-equivalence", OPT_GEQUIV, "yes|no", 0, + "polarity in the formula to speed things up (enabled by default, " + "both before and after decomposition)", 0 }, + { "global-equivalence", OPT_GEQUIV, "yes|no|before-decompose", 0, "whether to remove atomic propositions that are always equivalent to " - "another one (enabled by default)", 0 }, + "another one (enabled by default, both before and after decomposition)", + 0 }, { "simplify", OPT_SIMPLIFY, "no|bisim|bwoa|sat|bisim-sat|bwoa-sat", 0, "simplification to apply to the controller (no) nothing, " "(bisim) bisimulation-based reduction, (bwoa) bisimulation-based " @@ -252,9 +254,25 @@ static bool decompose_values[] = false, false, false, false, }; ARGMATCH_VERIFY(decompose_args, decompose_values); +static const char* const polarity_args[] = + { + "yes", "true", "enabled", "1", + "no", "false", "disabled", "0", + "before-decompose", + nullptr + }; +enum polarity_choice { pol_no, pol_yes, pol_before_decompose }; +static polarity_choice polarity_values[] = + { + pol_yes, pol_yes, pol_yes, pol_yes, + pol_no, pol_no, pol_no, pol_no, + pol_before_decompose + }; +ARGMATCH_VERIFY(polarity_args, polarity_values); + bool opt_decompose_ltl = true; -bool opt_polarity = true; -bool opt_gequiv = true; +polarity_choice opt_polarity = pol_yes; +polarity_choice opt_gequiv = pol_yes; static const char* const simplify_args[] = { @@ -407,12 +425,12 @@ namespace // Attempt to remove superfluous atomic propositions spot::realizability_simplifier* rs = nullptr; - if (opt_polarity || opt_gequiv) + if (opt_polarity != pol_no || opt_gequiv != pol_no) { unsigned opt = 0; - if (opt_polarity) + if (opt_polarity != pol_no) opt |= spot::realizability_simplifier::polarity; - if (opt_gequiv) + if (opt_gequiv != pol_no) { if (want_game()) opt |= spot::realizability_simplifier::global_equiv_output_only; @@ -420,9 +438,7 @@ namespace opt |= spot::realizability_simplifier::global_equiv; } rs = - new spot::realizability_simplifier(original_f, - input_aps, - opt, + new spot::realizability_simplifier(original_f, input_aps, opt, gi ? gi->verbose_stream : nullptr); f = rs->simplified_formula(); } @@ -493,6 +509,7 @@ namespace auto sub_f = sub_form.begin(); auto sub_o = sub_outs_str.begin(); std::vector mealy_machines; + unsigned numsubs = sub_form.size(); for (; sub_f != sub_form.end(); ++sub_f, ++sub_o) { @@ -502,6 +519,28 @@ namespace nullptr, bddfalse }; + + if (numsubs > 1 && (opt_polarity == pol_yes || opt_gequiv == pol_yes)) + { + unsigned opt = 0; + if (opt_polarity == pol_yes) + opt |= spot::realizability_simplifier::polarity; + if (opt_gequiv == pol_yes) + { + if (want_game()) + opt |= spot::realizability_simplifier::global_equiv_output_only; + else + opt |= spot::realizability_simplifier::global_equiv; + } + if (gi->verbose_stream) + *gi->verbose_stream << "working on subformula " << *sub_f << '\n'; + spot::realizability_simplifier rsub(*sub_f, input_aps, opt, + gi ? + gi->verbose_stream : nullptr); + *sub_f = rsub.simplified_formula(); + rs->merge_mapping(rsub); + } + // If we want to print a game, // we never use the direct approach if (!want_game() && opt_bypass) @@ -1049,7 +1088,7 @@ parse_opt(int key, char *arg, struct argp_state *) break; case OPT_GEQUIV: opt_gequiv = XARGMATCH("--global-equivalence", arg, - decompose_args, decompose_values); + polarity_args, polarity_values); break; case OPT_HIDE: show_status = false; @@ -1068,7 +1107,7 @@ parse_opt(int key, char *arg, struct argp_state *) } case OPT_POLARITY: opt_polarity = XARGMATCH("--polarity", arg, - decompose_args, decompose_values); + polarity_args, polarity_values); break; case OPT_PRINT: opt_print_pg = true; diff --git a/spot/tl/apcollect.cc b/spot/tl/apcollect.cc index 72e4335f9..504f624ae 100644 --- a/spot/tl/apcollect.cc +++ b/spot/tl/apcollect.cc @@ -429,6 +429,13 @@ namespace spot while (oldf != f_); } + void + realizability_simplifier::merge_mapping(const realizability_simplifier& other) + { + for (auto [from, from_is_input, to]: other.get_mapping()) + mapping_.emplace_back(from, from_is_input, to); + } + void realizability_simplifier::patch_mealy(twa_graph_ptr mealy) const { bdd add = bddtrue; diff --git a/spot/tl/apcollect.hh b/spot/tl/apcollect.hh index a4ccfdaa6..d35461035 100644 --- a/spot/tl/apcollect.hh +++ b/spot/tl/apcollect.hh @@ -107,6 +107,10 @@ namespace spot return mapping_; } + /// \brief Augment the current mapping with output variable renaming from + /// another realizability_simplifier. + void merge_mapping(const realizability_simplifier& other); + /// \brief Patch a Mealy machine to add the missing APs. void patch_mealy(twa_graph_ptr mealy) const; diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 28b846abd..cbc49b1c9 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -732,6 +732,7 @@ diff outx exp cat >exp < y trying to create strategy directly for (b & (b | y)) -> y direct strategy might exist but was not found. translating formula done in X seconds @@ -743,6 +744,7 @@ automaton has 4 states solving game with acceptance: all game solved in X seconds simplification took X seconds +working on subformula (a | x) -> x trying to create strategy directly for (a | x) -> x direct strategy might exist but was not found. translating formula done in X seconds @@ -784,10 +786,12 @@ diff outx exp # Here, G!(!x | !y) should be Gx & Gy cat >exp <exp < b) should be G(a) & G(!b) cat >exp < b) & (a => c) & (a => d) cat >exp < b trying to create strategy directly for a -> b direct strategy might exist but was not found. translating formula done in X seconds @@ -857,6 +864,7 @@ automaton has 4 states solving game with acceptance: all game solved in X seconds simplification took X seconds +working on subformula a -> c trying to create strategy directly for a -> c direct strategy might exist but was not found. translating formula done in X seconds @@ -868,6 +876,7 @@ automaton has 4 states solving game with acceptance: all game solved in X seconds simplification took X seconds +working on subformula a -> d trying to create strategy directly for a -> d direct strategy might exist but was not found. translating formula done in X seconds @@ -889,6 +898,7 @@ diff outx exp # Here, !(F(a | b)) should be G!a & G!b cat >exp < GFb)' --outs=b,c --decompose=yes\ --verbose --pol=no --realizability 2> out cat >exp < GFb trying to create strategy directly for Ga <-> GFb direct strategy was found. EOF @@ -932,6 +944,7 @@ ltlsynt -f 'G(c) & (G(a) <-> GFb)' --outs=b,c --decompose=yes --pol=no \ --verbose --realizability --bypass=no 2> out cat >exp < GFb translating formula done in X seconds automaton has 2 states and 2 colors LAR construction done in X seconds @@ -956,6 +970,7 @@ diff outx exp # ACD verbose cat >exp < GFb translating formula done in X seconds automaton has 1 states and 2 colors ACD construction done in X seconds @@ -965,6 +980,7 @@ automaton has 6 states solving game with acceptance: generalized-Streett 1 1 game solved in X seconds simplification took X seconds +working on subformula Gc translating formula done in X seconds automaton has 1 states and 0 colors ACD construction done in X seconds @@ -1147,3 +1163,140 @@ grep 'controlenv.*matches both' err ltlsynt --polarity=1 --global-e=1 -f 'G(i -> Xo) & G(!i -> F!o)' --real ltlsynt --polarity=0 --global-e=0 -f 'G(i -> Xo) & G(!i -> F!o)' --real + +cat >exp < (o1 | !o2)) & G(i2 -> X(!o1 | o2)) +the following signals can be temporarily removed: + i1 := 1 + i2 := 1 +new formula: G(o1 | !o2) & GX(!o1 | o2) +trying to create strategy directly for G(o1 | !o2) & GX(!o1 | o2) +direct strategy might exist but was not found. +translating formula done in X seconds +automaton has 2 states and 0 colors +LAR construction done in X seconds +DPA has 2 states, 0 colors +split inputs and outputs done in X seconds +automaton has 4 states +solving game with acceptance: all +game solved in X seconds +simplification took X seconds +working on subformula G(!i1 -> (o3 | !o4)) & G(!i2 -> X(!o3 | o4)) +the following signals can be temporarily removed: + i1 := 0 + i2 := 0 +new formula: G(o3 | !o4) & GX(!o3 | o4) +trying to create strategy directly for G(o3 | !o4) & GX(!o3 | o4) +direct strategy might exist but was not found. +translating formula done in X seconds +automaton has 2 states and 0 colors +LAR construction done in X seconds +DPA has 2 states, 0 colors +split inputs and outputs done in X seconds +automaton has 4 states +solving game with acceptance: all +game solved in X seconds +simplification took X seconds +REALIZABLE +HOA: v1 +States: 1 +Start: 0 +AP: 6 "o1" "o2" "o3" "o4" "i1" "i2" +acc-name: all +Acceptance: 0 t +properties: trans-labels explicit-labels state-acc deterministic +controllable-AP: 0 1 2 3 +--BODY-- +State: 0 +[!0&!1&!2&!3 | !0&!1&2&3 | 0&1&!2&!3 | 0&1&2&3] 0 +--END-- +EOF +f1='G(i1->(o1|!o2)) & G(!i1->(o3|!o4)) & G(i2->X(!o1|o2)) & G(!i2->X(!o3|o4))' +ltlsynt -f "$f1" --verbose 2>out 1>&2 +sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx +diff outx exp + +gg='G(i2 -> (!o1 | o2)) & G(!i2 -> (!o3 | o4))' +cat >exp < (o1 | !o2)) & G(!i1 -> (o3 | !o4)) & $gg +there are 2 subformulas +working on subformula G(i1 -> (o1 | !o2)) & G(i2 -> (!o1 | o2)) +the following signals can be temporarily removed: + i1 := 1 + i2 := 1 +new formula: G(o1 | !o2) & G(!o1 | o2) + o2 := o1 +new formula: G(o1 | !o1) +trying to create strategy directly for G(o1 | !o1) +direct strategy was found. +direct strat has 1 states, 1 edges and 0 colors +simplification took X seconds +working on subformula G(!i1 -> (o3 | !o4)) & G(!i2 -> (!o3 | o4)) +the following signals can be temporarily removed: + i1 := 0 + i2 := 0 +new formula: G(o3 | !o4) & G(!o3 | o4) + o4 := o3 +new formula: G(o3 | !o3) +trying to create strategy directly for G(o3 | !o3) +direct strategy was found. +direct strat has 1 states, 1 edges and 0 colors +simplification took X seconds +REALIZABLE +HOA: v1 +States: 1 +Start: 0 +AP: 7 "o1" "o2" "o3" "o4" "o5" "i1" "i2" +acc-name: all +Acceptance: 0 t +properties: trans-labels explicit-labels state-acc deterministic weak +controllable-AP: 0 1 2 3 4 +--BODY-- +State: 0 +[!0&!1&!2&!3&4 | !0&!1&2&3&4 | 0&1&!2&!3&4 | 0&1&2&3&4] 0 +--END-- +EOF +f2='G(i1->(o1|!o2)) & G(!i1->(o3|!o4)) & G(i2->(!o1|o2)) & G(!i2->(!o3|o4))&Go5' +ltlsynt -f "$f2" --verbose 2>out 1>&2 +sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx +diff outx exp + +gg='G(i2->(!o1 | o2)) & G(!i2->(!o3 | o4))' +hh='0&1&3&!5&6 | 0&!3&!4&!5&6 | !1&2&!3&5&6 | !1&!3&!4&!5&6 | ' +ii='1&!2&3&!5&6 | 1&!2&4&5&6 | !2&!3&!4&!5&6 | !2&!3&4&5&6' +cat >exp <(o1 | !o2)) & G(!i1->(o3 | !o4)) & $gg +there are 2 subformulas +working on subformula G(i1->(o1 | !o2)) & G(i2->(!o1 | o2)) +trying to create strategy directly for G(i1->(o1 | !o2)) & G(i2->(!o1 | o2)) +direct strategy was found. +direct strat has 1 states, 1 edges and 0 colors +simplification took X seconds +working on subformula G(!i1->(o3 | !o4)) & G(!i2->(!o3 | o4)) +trying to create strategy directly for G(!i1->(o3 | !o4)) & G(!i2->(!o3 | o4)) +direct strategy was found. +direct strat has 1 states, 1 edges and 0 colors +simplification took X seconds +REALIZABLE +HOA: v1 +States: 1 +Start: 0 +AP: 7 "o1" "o2" "i1" "i2" "o3" "o4" "o5" +acc-name: all +Acceptance: 0 t +properties: trans-labels explicit-labels state-acc deterministic weak +controllable-AP: 0 1 4 5 6 +--BODY-- +State: 0 +[!0&!1&2&5&6 | !0&!1&3&!5&6 | !0&!2&4&5&6 | 0&1&2&5&6 | $hh$ii] 0 +--END-- +EOF +f2='G(i1->(o1|!o2)) & G(!i1->(o3|!o4)) & G(i2->(!o1|o2)) & G(!i2->(!o3|o4))&Go5' +ltlsynt -f "$f2" --polarity=before-decom --verbose 2>out 1>&2 +sed 's/ [0-9.e-]* seconds/ X seconds/g;s/ -> /->/g;' out > outx +diff outx exp From 1a5b4f00f569a947361f683a7c26d04c18465412 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 10 Apr 2024 21:58:32 +0200 Subject: [PATCH 433/606] * spot/twaalgos/split.hh: Typo in comment. --- spot/twaalgos/split.hh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/spot/twaalgos/split.hh b/spot/twaalgos/split.hh index cb66619f1..54599c388 100644 --- a/spot/twaalgos/split.hh +++ b/spot/twaalgos/split.hh @@ -196,7 +196,8 @@ namespace spot /// \brief Separate a label /// /// This returns a pseudo-container that can be used to iterate - /// over the elements of the basis compatible with the current labal. + /// over the elements of the basis compatible with the current + /// label. /// /// For instance if the basis was created from {a,b} (i.e., the /// basis is actually {!a&!b,a&!b,!a&b,a&b}), and the label is From 952e5024800047d495c9c625bc8aa7fc72ab2a88 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 10 Apr 2024 21:58:47 +0200 Subject: [PATCH 434/606] * .gitlab-ci.yml: Use CI_JOB_ID instead of CI_PIPELINE_ID. --- .gitlab-ci.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 805f34db1..8aabfb8fa 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -276,7 +276,7 @@ debpkg-stable: - stable script: - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian:stable - - vol=spot-stable-$CI_COMMIT_SHA-$CI_PIPELINE_ID + - vol=spot-stable-$CI_COMMIT_SHA-$CI_JOB_ID - docker volume create $vol - exitcode=0 - docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian:stable ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$? @@ -304,7 +304,7 @@ debpkg-stable-i386: needs: ["debpkg-stable"] script: - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386:stable - - vol=spot-stable-$CI_COMMIT_SHA-$CI_PIPELINE_ID + - vol=spot-stable-$CI_COMMIT_SHA-$CI_JOB_ID - docker volume create $vol - exitcode=0 - docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386:stable ./bin-spot.sh -j${NBPROC-1} || exitcode=$? @@ -331,7 +331,7 @@ debpkg-unstable: - next script: - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian - - vol=spot-unstable-$CI_COMMIT_SHA-$CI_PIPELINE_ID + - vol=spot-unstable-$CI_COMMIT_SHA-$CI_JOB_ID - docker volume create $vol - exitcode=0 - docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$? @@ -357,7 +357,7 @@ debpkg-unstable-i386: needs: ["debpkg-unstable"] script: - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386 - - vol=spot-unstable-$CI_COMMIT_SHA-$CI_PIPELINE_ID + - vol=spot-unstable-$CI_COMMIT_SHA-$CI_JOB_ID - docker volume create $vol - exitcode=0 - docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386 ./bin-spot.sh -j${NBPROC-1} || exitcode=$? From 96ff2225e3f41bb36d92e2a0c54c79823204a115 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Tue, 16 Apr 2024 16:24:54 +0200 Subject: [PATCH 435/606] Fix typos in doc, comments and messages * bin/README, bin/common_conv.hh, bin/common_trans.cc, bin/ltlsynt.cc, bin/spot-x.cc, spot/gen/automata.hh, spot/graph/graph.hh, spot/ltsmin/ltsmin.hh, spot/ltsmin/spins_interface.hh, spot/ltsmin/spins_kripke.hh, spot/mc/bloemen.hh, spot/mc/bloemen_ec.hh, spot/mc/cndfs.hh, spot/mc/deadlock.hh, spot/mc/intersect.hh, spot/mc/lpar13.hh, spot/mc/mc_instanciator.hh, spot/misc/bareword.cc, spot/misc/fixpool.hh, spot/misc/formater.hh, spot/misc/minato.hh, spot/misc/satsolver.hh, spot/misc/timer.hh, spot/parseaut/public.hh, spot/priv/partitioned_relabel.cc, spot/priv/satcommon.hh, spot/ta/ta.hh, spot/ta/taexplicit.cc, spot/ta/taproduct.hh, spot/ta/tgta.hh, spot/taalgos/reachiter.hh, spot/taalgos/tgba2ta.hh, spot/tl/apcollect.cc, spot/tl/apcollect.hh, spot/tl/formula.cc, spot/tl/parse.hh, spot/tl/randomltl.hh, spot/tl/relabel.hh, spot/tl/simplify.cc, spot/twa/acc.hh, spot/twa/bddprint.hh, spot/twa/formula2bdd.cc, spot/twa/twa.hh, spot/twa/twagraph.cc, spot/twa/twagraph.hh, spot/twaalgos/aiger.cc, spot/twaalgos/aiger.hh, spot/twaalgos/alternation.hh, spot/twaalgos/cleanacc.cc, spot/twaalgos/cobuchi.cc, spot/twaalgos/contains.cc, spot/twaalgos/couvreurnew.cc, spot/twaalgos/cycles.hh, spot/twaalgos/degen.cc, spot/twaalgos/degen.hh, spot/twaalgos/dot.hh, spot/twaalgos/dtbasat.cc, spot/twaalgos/dtwasat.cc, spot/twaalgos/dtwasat.hh, spot/twaalgos/dualize.cc, spot/twaalgos/emptiness.hh, spot/twaalgos/emptiness_stats.hh, spot/twaalgos/game.cc, spot/twaalgos/genem.hh, spot/twaalgos/hoa.hh, spot/twaalgos/langmap.hh, spot/twaalgos/ltl2tgba_fm.hh, spot/twaalgos/magic.cc, spot/twaalgos/magic.hh, spot/twaalgos/mask.hh, spot/twaalgos/mealy_machine.cc, spot/twaalgos/mealy_machine.hh, spot/twaalgos/minimize.hh, spot/twaalgos/parity.cc, spot/twaalgos/parity.hh, spot/twaalgos/postproc.cc, spot/twaalgos/product.hh, spot/twaalgos/reachiter.hh, spot/twaalgos/relabel.cc, spot/twaalgos/remfin.cc, spot/twaalgos/remfin.hh, spot/twaalgos/sccfilter.cc, spot/twaalgos/sccinfo.hh, spot/twaalgos/se05.cc, spot/twaalgos/se05.hh, spot/twaalgos/simulation.hh, spot/twaalgos/split.hh, spot/twaalgos/stats.hh, spot/twaalgos/synthesis.cc, spot/twaalgos/synthesis.hh, spot/twaalgos/tau03.hh, spot/twaalgos/tau03opt.hh, spot/twaalgos/toparity.hh, spot/twaalgos/totgba.hh, spot/twaalgos/translate.hh, spot/twaalgos/word.cc, spot/twaalgos/word.hh, spot/twaalgos/zlktree.cc, spot/twaalgos/zlktree.hh, spot/twacube/cube.hh, spot/twacube/twacube.hh, tests/core/cube.cc, tests/core/ltlsynt.test, tests/core/parity.cc, tests/core/safra.cc, tests/core/twagraph.cc: here --- bin/README | 2 +- bin/common_conv.hh | 2 +- bin/common_trans.cc | 4 ++-- bin/ltlsynt.cc | 2 +- bin/spot-x.cc | 12 ++++++------ spot/gen/automata.hh | 4 ++-- spot/graph/graph.hh | 6 +++--- spot/ltsmin/ltsmin.hh | 2 +- spot/ltsmin/spins_interface.hh | 2 +- spot/ltsmin/spins_kripke.hh | 2 +- spot/mc/bloemen.hh | 2 +- spot/mc/bloemen_ec.hh | 2 +- spot/mc/cndfs.hh | 6 +++--- spot/mc/deadlock.hh | 6 +++--- spot/mc/intersect.hh | 4 ++-- spot/mc/lpar13.hh | 2 +- spot/mc/mc_instanciator.hh | 10 +++++----- spot/misc/bareword.cc | 2 +- spot/misc/fixpool.hh | 6 +++--- spot/misc/formater.hh | 2 +- spot/misc/minato.hh | 6 +++--- spot/misc/satsolver.hh | 6 +++--- spot/misc/timer.hh | 2 +- spot/parseaut/public.hh | 2 +- spot/priv/partitioned_relabel.cc | 2 +- spot/priv/satcommon.hh | 20 ++++++++++---------- spot/ta/ta.hh | 8 ++++---- spot/ta/taexplicit.cc | 4 ++-- spot/ta/taproduct.hh | 2 +- spot/ta/tgta.hh | 6 +++--- spot/taalgos/reachiter.hh | 2 +- spot/taalgos/tgba2ta.hh | 2 +- spot/tl/apcollect.cc | 2 +- spot/tl/apcollect.hh | 2 +- spot/tl/formula.cc | 2 +- spot/tl/parse.hh | 8 ++++---- spot/tl/randomltl.hh | 2 +- spot/tl/relabel.hh | 2 +- spot/tl/simplify.cc | 6 +++--- spot/twa/acc.hh | 14 +++++++------- spot/twa/bddprint.hh | 12 ++++++------ spot/twa/formula2bdd.cc | 2 +- spot/twa/twa.hh | 8 ++++---- spot/twa/twagraph.cc | 8 ++++---- spot/twa/twagraph.hh | 2 +- spot/twaalgos/aiger.cc | 4 ++-- spot/twaalgos/aiger.hh | 4 ++-- spot/twaalgos/alternation.hh | 2 +- spot/twaalgos/cleanacc.cc | 2 +- spot/twaalgos/cobuchi.cc | 2 +- spot/twaalgos/contains.cc | 4 ++-- spot/twaalgos/couvreurnew.cc | 4 ++-- spot/twaalgos/cycles.hh | 2 +- spot/twaalgos/degen.cc | 6 +++--- spot/twaalgos/degen.hh | 8 ++++---- spot/twaalgos/dot.hh | 2 +- spot/twaalgos/dtbasat.cc | 12 ++++++------ spot/twaalgos/dtwasat.cc | 12 ++++++------ spot/twaalgos/dtwasat.hh | 2 +- spot/twaalgos/dualize.cc | 2 +- spot/twaalgos/emptiness.hh | 4 ++-- spot/twaalgos/emptiness_stats.hh | 2 +- spot/twaalgos/game.cc | 8 ++++---- spot/twaalgos/genem.hh | 2 +- spot/twaalgos/hoa.hh | 2 +- spot/twaalgos/langmap.hh | 4 ++-- spot/twaalgos/ltl2tgba_fm.hh | 2 +- spot/twaalgos/magic.cc | 4 ++-- spot/twaalgos/magic.hh | 6 +++--- spot/twaalgos/mask.hh | 4 ++-- spot/twaalgos/mealy_machine.cc | 4 ++-- spot/twaalgos/mealy_machine.hh | 4 ++-- spot/twaalgos/minimize.hh | 2 +- spot/twaalgos/parity.cc | 2 +- spot/twaalgos/parity.hh | 2 +- spot/twaalgos/postproc.cc | 2 +- spot/twaalgos/product.hh | 8 ++++---- spot/twaalgos/reachiter.hh | 4 ++-- spot/twaalgos/relabel.cc | 6 +++--- spot/twaalgos/remfin.cc | 6 +++--- spot/twaalgos/remfin.hh | 2 +- spot/twaalgos/sccfilter.cc | 2 +- spot/twaalgos/sccinfo.hh | 6 +++--- spot/twaalgos/se05.cc | 4 ++-- spot/twaalgos/se05.hh | 6 +++--- spot/twaalgos/simulation.hh | 8 ++++---- spot/twaalgos/split.hh | 2 +- spot/twaalgos/stats.hh | 2 +- spot/twaalgos/synthesis.cc | 2 +- spot/twaalgos/synthesis.hh | 2 +- spot/twaalgos/tau03.hh | 2 +- spot/twaalgos/tau03opt.hh | 4 ++-- spot/twaalgos/toparity.hh | 6 +++--- spot/twaalgos/totgba.hh | 2 +- spot/twaalgos/translate.hh | 2 +- spot/twaalgos/word.cc | 2 +- spot/twaalgos/word.hh | 4 ++-- spot/twaalgos/zlktree.cc | 2 +- spot/twaalgos/zlktree.hh | 6 +++--- spot/twacube/cube.hh | 8 ++++---- spot/twacube/twacube.hh | 4 ++-- tests/core/cube.cc | 2 +- tests/core/ltlsynt.test | 12 ++++++------ tests/core/parity.cc | 2 +- tests/core/safra.cc | 2 +- tests/core/twagraph.cc | 2 +- 106 files changed, 228 insertions(+), 228 deletions(-) diff --git a/bin/README b/bin/README index 84f574a55..9fe810e82 100644 --- a/bin/README +++ b/bin/README @@ -12,7 +12,7 @@ whose purpose is just to generate a man-page with the same format as the other man pages (this includes keeping the version number up-to-date). -There is also a script called 'options.py' that summerizes how the +There is also a script called 'options.py' that summarizes how the different short options are used among the tools. Routines that are shared by multiple command-line tools are stored in diff --git a/bin/common_conv.hh b/bin/common_conv.hh index a3a43f8eb..304c5fc99 100644 --- a/bin/common_conv.hh +++ b/bin/common_conv.hh @@ -27,5 +27,5 @@ unsigned to_unsigned (const char *s, const char* where); float to_float(const char* s, const char* where); float to_probability(const char* s, const char* where); -// Parse the comma or space seperate string of numbers. +// Parse the comma or space separated string of numbers. std::vector to_longs(const char* s); diff --git a/bin/common_trans.cc b/bin/common_trans.cc index dd7ccc0ba..05a75a0c5 100644 --- a/bin/common_trans.cc +++ b/bin/common_trans.cc @@ -980,9 +980,9 @@ static const argp_option options[] = "atomic proposition that compatible with Spin's syntax. You can " "force this relabeling to always occur with option --relabel.\n" "The sequences %f,%s,%l,%w,%F,%S,%L,%W can optionally be \"infixed\"" - " by a bracketed sequence of operators to unabbreviate before outputing" + " by a bracketed sequence of operators to unabbreviate before outputting" " the formula. For instance %[MW]f will rewrite operators M and W" - " before outputing it.\n" + " before outputting it.\n" "Furthermore, if COMMANDFMT has the form \"{NAME}CMD\", then only CMD " "will be passed to the shell, and NAME will be used to name the tool " "in the output.", 4 }, diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 2062b6340..e113bf205 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -757,7 +757,7 @@ namespace } // Takes a set of the atomic propositions appearing in the formula, - // and seperate them into two vectors: input APs and output APs. + // and separate them into two vectors: input APs and output APs. static std::pair, std::vector> filter_list_of_aps(const std::unordered_set& aps, const char* filename, int linenum) diff --git a/bin/spot-x.cc b/bin/spot-x.cc index eca2a945c..d29432a36 100644 --- a/bin/spot-x.cc +++ b/bin/spot-x.cc @@ -47,7 +47,7 @@ depends on the --low, --medium, or --high settings.") }, "Maximum number of states of automata involved in automata-based \ implication checks for formula simplifications. Defaults to 64.") }, { DOC("tls-max-ops", - "Maximum number of operands in n-ary opertors (or, and) on which \ + "Maximum number of operands in n-ary operators (or, and) on which \ implication-based simplifications are attempted. Defaults to 16.") }, { nullptr, 0, nullptr, 0, "Translation options:", 0 }, { DOC("ltl-split", "Set to 0 to disable the translation of automata \ @@ -83,7 +83,7 @@ used when comp-susp=1 and default to 1 or 2 depending on whether --small \ or --deterministic is specified.") }, { nullptr, 0, nullptr, 0, "Postprocessing options:", 0 }, { DOC("acd", "Set to 1 (the default) to use paritize automata using \ -the alternatinc cycle decomposition. Set to 0 to use paritization based \ +the alternating cycle decomposition. Set to 0 to use paritization based \ on latest appearance record variants.") }, { DOC("scc-filter", "Set to 1 (the default) to enable \ SCC-pruning and acceptance simplification at the beginning of \ @@ -91,7 +91,7 @@ post-processing. Transitions that are outside accepting SCC are \ removed from accepting sets, except those that enter into an accepting \ SCC. Set to 2 to remove even these entering transition from the \ accepting sets. Set to 0 to disable this SCC-pruning and acceptance \ -simpification pass.") }, +simplification pass.") }, { DOC("degen-reset", "If non-zero (the default), the \ degeneralization algorithm will reset its level any time it exits \ an SCC.") }, @@ -121,7 +121,7 @@ level, as it might favor finding accepting cycles earlier. If \ degen-lowinit is non-zero, then level L is always used without looking \ for the presence of an accepting self-loop.") }, { DOC("degen-remscc", "If non-zero (the default), make sure the output \ -of the degenalization has as many SCCs as the input, by removing superfluous \ +of the degeneralization has as many SCCs as the input, by removing superfluous \ ones.") }, { DOC("det-max-states", "When defined to a positive integer N, \ determinizations will be aborted whenever the number of generated \ @@ -180,7 +180,7 @@ attempting simulation-based reductions. Defaults to 128. Set to 0 to \ never merge states.") }, { DOC("simul-max", "Number of states above which simulation-based \ reductions are skipped. Defaults to 4096. Set to 0 to disable. This \ -applies to all simulation-based optimization, including thoses of the \ +applies to all simulation-based optimization, including those of the \ determinization algorithm.") }, { DOC("simul-trans-pruning", "Number of equivalence classes above which \ simulation-based transition-pruning for non-deterministic automata is \ @@ -259,7 +259,7 @@ sets. By default, this is only enabled when options -B or -S are used.") }, { DOC("simul-method", "Chose which simulation based reduction to use: 1 force the \ signature-based BDD implementation, 2 force matrix-based and 0, the default, \ -is a heristic wich choose which implementation to use.") }, +is a heuristic which chooses which implementation to use.") }, { nullptr, 0, nullptr, 0, nullptr, 0 } }; diff --git a/spot/gen/automata.hh b/spot/gen/automata.hh index 7b60b5269..94e0b987a 100644 --- a/spot/gen/automata.hh +++ b/spot/gen/automata.hh @@ -81,7 +81,7 @@ namespace spot /// \brief An NBA with (n+2) states derived from a Cyclic test /// case. /// - /// This familly of automata is derived from a couple of + /// This family of automata is derived from a couple of /// examples supplied by Reuben Rowe. The task is to /// check that the automaton generated with AUT_CYCLIST_TRACE_NBA /// for a given n contain the automaton generated with @@ -90,7 +90,7 @@ namespace spot /// \brief A DBA with (n+2) states derived from a Cyclic test /// case. /// - /// This familly of automata is derived from a couple of + /// This family of automata is derived from a couple of /// examples supplied by Reuben Rowe. The task is to /// check that the automaton generated with AUT_CYCLIST_TRACE_NBA /// for a given n contain the automaton generated with diff --git a/spot/graph/graph.hh b/spot/graph/graph.hh index 4f62f3dcd..04d0a8421 100644 --- a/spot/graph/graph.hh +++ b/spot/graph/graph.hh @@ -757,7 +757,7 @@ namespace spot ///@} ///@{ - /// \brief return the Edgeg_Data of an edge. + /// \brief return the Edge_Data of an edge. /// /// This does not use Edge_Data& as return type, because /// Edge_Data might be void. @@ -818,7 +818,7 @@ namespace spot && (dests_.capacity() - dests_.size()) < (sz + 1)) { // If dst_begin...dst_end points into dests_ and dests_ risk - // being reallocated, we have to savea the destination + // being reallocated, we have to save the destination // states before we lose them. std::vector tmp(dst_begin, dst_end); dests_.emplace_back(sz); @@ -955,7 +955,7 @@ namespace spot /// @{ /// - /// \brief Return a fake container with all edges (exluding erased + /// \brief Return a fake container with all edges (excluding erased /// edges) internal::all_trans edges() const { diff --git a/spot/ltsmin/ltsmin.hh b/spot/ltsmin/ltsmin.hh index 1611375fe..41369c653 100644 --- a/spot/ltsmin/ltsmin.hh +++ b/spot/ltsmin/ltsmin.hh @@ -73,7 +73,7 @@ namespace spot int compress = 0) const; // \brief The same as above but returns a kripkecube, i.e. a kripke - // that can be use in parallel. Moreover, it support more ellaborated + // that can be use in parallel. Moreover, it supports more elaborated // atomic propositions such as "P.a == P.c" ltsmin_kripkecube_ptr kripkecube(std::vector to_observe, formula dead = formula::tt(), diff --git a/spot/ltsmin/spins_interface.hh b/spot/ltsmin/spins_interface.hh index de64a5c99..24792a6c1 100644 --- a/spot/ltsmin/spins_interface.hh +++ b/spot/ltsmin/spins_interface.hh @@ -38,7 +38,7 @@ namespace spot /// \brief Implementation of the PINS interface. This class /// is a wrapper that, given a file, will compile it w.r.t - /// the PINS interface. The class can then be menipulated + /// the PINS interface. The class can then be manipulated /// transparently whatever the input format considered. class SPOT_API spins_interface { diff --git a/spot/ltsmin/spins_kripke.hh b/spot/ltsmin/spins_kripke.hh index c122913ee..42c7cc36f 100644 --- a/spot/ltsmin/spins_kripke.hh +++ b/spot/ltsmin/spins_kripke.hh @@ -116,7 +116,7 @@ namespace spot /// All successors are computed once when an iterator is recycled or /// created. /// - /// Note: Two threads will explore sucessors with two different orders + /// Note: Two threads will explore successors with two different orders class cspins_iterator final { public: diff --git a/spot/mc/bloemen.hh b/spot/mc/bloemen.hh index 995f5a00b..7b9c5ba7b 100644 --- a/spot/mc/bloemen.hh +++ b/spot/mc/bloemen.hh @@ -63,7 +63,7 @@ namespace spot std::atomic list_status_; }; - /// \brief The haser for the previous uf_element. + /// \brief The hasher for the previous uf_element. struct uf_element_hasher { uf_element_hasher(const uf_element*) diff --git a/spot/mc/bloemen_ec.hh b/spot/mc/bloemen_ec.hh index b91e0bbf8..89756cf4f 100644 --- a/spot/mc/bloemen_ec.hh +++ b/spot/mc/bloemen_ec.hh @@ -70,7 +70,7 @@ namespace spot std::atomic list_status_; }; - /// \brief The haser for the previous uf_element. + /// \brief The hasher for the previous uf_element. struct uf_element_hasher { uf_element_hasher(const uf_element*) diff --git a/spot/mc/cndfs.hh b/spot/mc/cndfs.hh index 5cec44471..ca52c93bf 100644 --- a/spot/mc/cndfs.hh +++ b/spot/mc/cndfs.hh @@ -361,7 +361,7 @@ namespace spot todo_blue_.back().it_prop, true, tid_); else if (acc) { - // The state cyan and we can reach it throught an + // The state cyan and we can reach it through an // accepting transition, a accepting cycle has been // found without launching a red dfs if (tmp.second.colors->l[tid_].cyan) @@ -499,7 +499,7 @@ namespace spot } kripkecube& sys_; ///< \brief The system to check - twacube_ptr twa_; ///< \brief The propertu to check + twacube_ptr twa_; ///< \brief The property to check std::vector todo_blue_; ///< \brief Blue Stack std::vector todo_red_; ///< \ brief Red Stack unsigned transitions_ = 0; ///< \brief Number of transitions @@ -514,7 +514,7 @@ namespace spot std::atomic& stop_; ///< \brief Stop-the-world boolean std::vector Rp_; ///< \brief Rp stack std::vector Rp_acc_; ///< \brief Rp acc stack - product_state cycle_start_; ///< \brief Begining of a cycle + product_state cycle_start_; ///< \brief Beginning of a cycle bool finisher_ = false; }; } diff --git a/spot/mc/deadlock.hh b/spot/mc/deadlock.hh index 3ce4d0ade..63b2dc273 100644 --- a/spot/mc/deadlock.hh +++ b/spot/mc/deadlock.hh @@ -37,7 +37,7 @@ namespace spot /// \brief This class aims to explore a model to detect wether it /// contains a deadlock. This deadlock detection performs a DFS traversal /// sharing information shared among multiple threads. - /// If Deadlock equals std::true_type performs dealock algorithm, + /// If Deadlock equals std::true_type performs deadlock algorithm, /// otherwise perform a simple reachability. template& stop_; ///< \brief Stop-the-world boolean /// \brief Stack that grows according to the todo stack. It avoid multiple - /// concurent access to the shared map. + /// concurrent access to the shared map. std::vector refs_; bool finisher_ = false; }; diff --git a/spot/mc/intersect.hh b/spot/mc/intersect.hh index e34c8dc2d..fbc90f32b 100644 --- a/spot/mc/intersect.hh +++ b/spot/mc/intersect.hh @@ -25,9 +25,9 @@ namespace spot { /// \brief Find the first couple of iterator (from a given pair of - /// interators) that intersect. This method can be used in any + /// iterators) that intersect. This method can be used in any /// DFS/BFS-like exploration algorithm. The \a parameter indicates - /// wheter the state has just been visited since the underlying job + /// whether the state has just been visited since the underlying job /// is slightly different. template static void forward_iterators(kripkecube& sys, diff --git a/spot/mc/lpar13.hh b/spot/mc/lpar13.hh index 1abbd9faf..5a1283eb9 100644 --- a/spot/mc/lpar13.hh +++ b/spot/mc/lpar13.hh @@ -195,7 +195,7 @@ namespace spot /// that a state will be popped. If the method return false, then /// the state will be popped. Otherwise the state \a newtop will /// become the new top of the DFS stack. If the state \a top is - /// the only one in the DFS stak, the parameter \a is_initial is set + /// the only one in the DFS stack, the parameter \a is_initial is set /// to true and both \a newtop and \a newtop_dfsnum have inconsistency /// values. bool pop_state(product_state, unsigned top_dfsnum, bool, diff --git a/spot/mc/mc_instanciator.hh b/spot/mc/mc_instanciator.hh index aef392738..0bf5492ce 100644 --- a/spot/mc/mc_instanciator.hh +++ b/spot/mc/mc_instanciator.hh @@ -38,8 +38,8 @@ namespace spot { /// \brief This class allows to ensure (at compile time) if - /// a given parameter can be compsidered as a modelchecking algorithm - /// (i.e., usable by instanciate) + /// a given parameter can be considered as a modelchecking algorithm + /// (i.e., usable by instantiate) template class SPOT_API is_a_mc_algorithm { @@ -123,7 +123,7 @@ namespace spot } #endif - // Wait all threads to be instanciated. + // Wait all threads to be instantiated. while (barrier) continue; swarmed[i]->run(); @@ -169,8 +169,8 @@ namespace spot bool go_on = true; for (unsigned i = 0; i < nbth && go_on; ++i) { - // Enumerate cases where a trace can be extraced - // Here we use a switch so that adding new algortihm + // Enumerate cases where a trace can be extracted + // Here we use a switch so that adding new algorithm // with new return status will trigger an error that // should the be fixed here. switch (result.value[i]) diff --git a/spot/misc/bareword.cc b/spot/misc/bareword.cc index a64d11511..c6b66e3b8 100644 --- a/spot/misc/bareword.cc +++ b/spot/misc/bareword.cc @@ -47,7 +47,7 @@ namespace spot } // This is for Spin 5. Spin 6 has a relaxed parser that can - // accept any parenthesized block as an atomic propoistion. + // accept any parenthesized block as an atomic proposition. bool is_spin_ap(const char* str) { if (!str || !islower(*str)) diff --git a/spot/misc/fixpool.hh b/spot/misc/fixpool.hh index 30b8a9b3e..3ad83be39 100644 --- a/spot/misc/fixpool.hh +++ b/spot/misc/fixpool.hh @@ -34,10 +34,10 @@ namespace spot /// - Safe: ensure (when used with memcheck) that each allocation /// is deallocated one at a time /// - Unsafe: rely on the fact that deallocating the pool also release - /// all elements it contains. This case is usefull in a multithreaded + /// all elements it contains. This case is useful in a multithreaded /// environnement with multiple fixed_sized_pool allocating the same - /// ressource. In this case it's hard to detect wich pool has allocated - /// some ressource. + /// resource. In this case it's hard to detect which pool has allocated + /// some resource. enum class pool_type { Safe , Unsafe }; /// A fixed-size memory pool implementation. diff --git a/spot/misc/formater.hh b/spot/misc/formater.hh index 2e387fe55..0f3a25d57 100644 --- a/spot/misc/formater.hh +++ b/spot/misc/formater.hh @@ -124,7 +124,7 @@ namespace spot { } - /// \brief Scan the %-sequences occuring in \a fmt. + /// \brief Scan the %-sequences occurring in \a fmt. /// /// Set has['c'] for each %c in \a fmt. \a has must /// be 256 wide. diff --git a/spot/misc/minato.hh b/spot/misc/minato.hh index 26bb631c8..9fdbe87bb 100644 --- a/spot/misc/minato.hh +++ b/spot/misc/minato.hh @@ -33,14 +33,14 @@ namespace spot class SPOT_API minato_isop { public: - /// \brief Conctructor. + /// \brief Constructor. /// \arg input The BDD function to translate in ISOP. minato_isop(bdd input); - /// \brief Conctructor. + /// \brief Constructor. /// \arg input The BDD function to translate in ISOP. /// \arg vars The set of BDD variables to factorize in \a input. minato_isop(bdd input, bdd vars); - /// \brief Conctructor. + /// \brief Constructor. /// /// This version allow some flexibility in computing the ISOP. /// the result must be within \a input_min and \a input_max. diff --git a/spot/misc/satsolver.hh b/spot/misc/satsolver.hh index 83d24d86c..280158d1f 100644 --- a/spot/misc/satsolver.hh +++ b/spot/misc/satsolver.hh @@ -69,7 +69,7 @@ namespace spot class SPOT_API satsolver { public: - /// \brief Construct the sat solver and itinialize variables. + /// \brief Construct the sat solver and initialize variables. /// If no satsolver is provided through SPOT_SATSOLVER env var, a /// distributed version of PicoSAT will be used. satsolver(); @@ -116,7 +116,7 @@ namespace spot template void comment(T first, Args... args); - /// \brief Assume a litteral value. + /// \brief Assume a literal value. /// Must only be used with distributed picolib. void assume(int lit); @@ -159,7 +159,7 @@ namespace spot int nassumptions_vars_; // Surplus of vars (for 'assume' algorithm). /// \brief Number of solutions to obtain from the satsolver - /// (without assuming litterals). + /// (without assuming literals). int nsols_; /// \brief Picosat satsolver instance. diff --git a/spot/misc/timer.hh b/spot/misc/timer.hh index e2a607376..fc1b6ada8 100644 --- a/spot/misc/timer.hh +++ b/spot/misc/timer.hh @@ -147,7 +147,7 @@ namespace spot return total_.cutime; } - /// \brief Return the system time of the current process (whithout children) + /// \brief Return the system time of the current process (without children) /// of all accumulated interval. /// /// Any time interval that has been start()ed but not stop()ed diff --git a/spot/parseaut/public.hh b/spot/parseaut/public.hh index ee9a0e671..960592ac3 100644 --- a/spot/parseaut/public.hh +++ b/spot/parseaut/public.hh @@ -68,7 +68,7 @@ namespace spot /// want_kripke. kripke_graph_ptr ks; - /// Whether an HOA file was termined with --ABORT + /// Whether an HOA file was terminated with --ABORT bool aborted = false; /// Location of the automaton in the stream. spot::location loc; diff --git a/spot/priv/partitioned_relabel.cc b/spot/priv/partitioned_relabel.cc index 0e415d944..fc19bdd45 100644 --- a/spot/priv/partitioned_relabel.cc +++ b/spot/priv/partitioned_relabel.cc @@ -88,7 +88,7 @@ bdd_partition::to_relabeling_map(twa_graph& for_me) const bdd_partition try_partition_me(const std::vector& all_cond, unsigned max_letter) { - // We create vector that will be succesively filled. + // We create vector that will be successively filled. // Each entry corresponds to a "letter", of the partition const size_t Norig = all_cond.size(); diff --git a/spot/priv/satcommon.hh b/spot/priv/satcommon.hh index ce3fcffde..e91c11745 100644 --- a/spot/priv/satcommon.hh +++ b/spot/priv/satcommon.hh @@ -111,12 +111,12 @@ public: unsigned size_nacc, unsigned size_path, bool state_based, bool dtbasat); - /// \brief Compute min_t litteral as well as min_ta, min_p and max_p. - /// After this step, all litterals are known. + /// \brief Compute min_t literal as well as min_ta, min_p and max_p. + /// After this step, all literals are known. void declare_all_vars(int& min_t); - /// \brief Return the transition's litteral corresponding to parameters. + /// \brief Return the transition's literal corresponding to parameters. inline int get_t(unsigned src, unsigned cond, unsigned dst) const { @@ -134,12 +134,12 @@ public: return min_t_ + src * cd_mult_ + cond * size_dst_ + dst; } - /// \brief Return the transition_acc's litteral corresponding to parameters. + /// \brief Return the transition_acc's literal corresponding to parameters. /// If (state_based), all outgoing transitions use the same acceptance /// variable. Therefore, for each combination (src, nacc) there is only one - /// litteral. + /// literal. /// Note that with Büchi automata, there is only one nacc, thus, only one - /// litteral for each src. + /// literal for each src. inline int get_ta(unsigned src, unsigned cond, unsigned dst, unsigned nacc = 0) const { @@ -162,7 +162,7 @@ public: : min_ta_ + src * cdn_mult_ + cond * dn_mult_ + dst * size_nacc_ + nacc; } - /// \brief Return the path's litteral corresponding to parameters. + /// \brief Return the path's literal corresponding to parameters. inline int get_p(unsigned path, unsigned src, unsigned dst) const { @@ -181,9 +181,9 @@ public: return min_p_ + path * sd_mult_ + src * size_dst_ + dst; } - /// \brief Return the path's litteral corresponding to parameters. + /// \brief Return the path's literal corresponding to parameters. /// Argument ref serves to say whether it is a candidate or a reference - /// litteral. false -> ref | true -> cand + /// literal. false -> ref | true -> cand inline int get_prc(unsigned path, unsigned src, unsigned dst, bool cand) const { @@ -238,7 +238,7 @@ public: int target_state_number, const twa_graph_ptr& res, const satsolver& solver); - /// \brief Returns the number of distinct values containted in a vector. + /// \brief Returns the number of distinct values contained in a vector. int get_number_of_distinct_vals(std::vector v); } diff --git a/spot/ta/ta.hh b/spot/ta/ta.hh index cd3024ff6..2f8a75973 100644 --- a/spot/ta/ta.hh +++ b/spot/ta/ta.hh @@ -111,7 +111,7 @@ namespace spot /// \brief Get an iterator over the successors of \a state. /// /// The iterator has been allocated with \c new. It is the - /// responsability of the caller to \c delete it when no + /// responsibility of the caller to \c delete it when no /// longer needed. /// virtual ta_succ_iterator* @@ -121,7 +121,7 @@ namespace spot /// filtred by the changeset on transitions /// /// The iterator has been allocated with \c new. It is the - /// responsability of the caller to \c delete it when no + /// responsibility of the caller to \c delete it when no /// longer needed. /// virtual ta_succ_iterator* @@ -142,7 +142,7 @@ namespace spot /// \brief Format the state as a string for printing. /// - /// This formating is the responsability of the automata + /// This formatting is the responsibility of the automata /// that owns the state. virtual std::string format_state(const spot::state* s) const = 0; @@ -190,7 +190,7 @@ namespace spot /// /// This class provides the basic functionalities required to /// iterate over the successors of a state, as well as querying - /// transition labels. Because transitions are never explicitely + /// transition labels. Because transitions are never explicitly /// encoded, labels (conditions and acceptance conditions) can only /// be queried while iterating over the successors. class ta_succ_iterator : public twa_succ_iterator diff --git a/spot/ta/taexplicit.cc b/spot/ta/taexplicit.cc index 6a842ad1f..5eb995738 100644 --- a/spot/ta/taexplicit.cc +++ b/spot/ta/taexplicit.cc @@ -106,7 +106,7 @@ namespace spot return transitions_; } - // return transitions filtred by condition + // return transitions filtered by condition state_ta_explicit::transitions* state_ta_explicit::get_transitions(bdd condition) const { @@ -279,7 +279,7 @@ namespace spot bool dest_is_livelock_accepting = dest->is_livelock_accepting_state(); - //Before deleting stuttering transitions, propaged back livelock + //Before deleting stuttering transitions, propagated back livelock //and initial state's properties if (is_stuttering_transition) { diff --git a/spot/ta/taproduct.hh b/spot/ta/taproduct.hh index ba0cf4cf3..66dc152ab 100644 --- a/spot/ta/taproduct.hh +++ b/spot/ta/taproduct.hh @@ -158,7 +158,7 @@ namespace spot virtual bool is_initial_state(const spot::state* s) const override; - /// \brief Return true if the state \a s has no succeseurs + /// \brief Return true if the state \a s has no successor /// in the TA automaton (the TA component of the product automaton) bool is_hole_state_in_ta_component(const spot::state* s) const; diff --git a/spot/ta/tgta.hh b/spot/ta/tgta.hh index bed332805..81f06ad0f 100644 --- a/spot/ta/tgta.hh +++ b/spot/ta/tgta.hh @@ -9,9 +9,9 @@ // (at your option) any later version. // // Spot is distributed in the hope that it will be useful, but WITHOUT -// ANY WARRANTY; without even the implied warranty of MERCHANta_explicitBILITY +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY // or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -// License for more deta_explicitils. +// License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see . @@ -74,7 +74,7 @@ namespace spot /// \a state and his successors /// /// The iterator has been allocated with \c new. It is the - /// responsability of the caller to \c delete it when no + /// responsibility of the caller to \c delete it when no /// longer needed. /// virtual twa_succ_iterator* diff --git a/spot/taalgos/reachiter.hh b/spot/taalgos/reachiter.hh index 733bc7b68..935eae7a5 100644 --- a/spot/taalgos/reachiter.hh +++ b/spot/taalgos/reachiter.hh @@ -78,7 +78,7 @@ namespace spot /// /// \param in The source state number. /// \param out The destination state number. - /// \param si The spot::twa_succ_iterator positionned on the current + /// \param si The spot::twa_succ_iterator positioned on the current /// transition. virtual void process_link(int in, int out, const ta_succ_iterator* si); diff --git a/spot/taalgos/tgba2ta.hh b/spot/taalgos/tgba2ta.hh index 99240893b..fa5f035c0 100644 --- a/spot/taalgos/tgba2ta.hh +++ b/spot/taalgos/tgba2ta.hh @@ -37,7 +37,7 @@ namespace spot /// \param degeneralized When false, the returned automaton is a generalized /// form of TA, called GTA (Generalized Testing Automaton). /// Like TGBA, GTA use Generalized Büchi acceptance - /// conditions intead of Buchi-accepting states: there are several acceptance + /// conditions instead of Buchi-accepting states: there are several acceptance /// sets (of transitions), and a path is accepted if it traverses /// at least one transition of each set infinitely often or if it contains a /// livelock-accepting cycle (like a TA). The spot emptiness check algorithm diff --git a/spot/tl/apcollect.cc b/spot/tl/apcollect.cc index 504f624ae..6cea88ea6 100644 --- a/spot/tl/apcollect.cc +++ b/spot/tl/apcollect.cc @@ -69,7 +69,7 @@ namespace spot { atomic_prop_set res; - // polirity: 0 = negative, 1 = positive, 2 or more = both. + // polarity: 0 = negative, 1 = positive, 2 or more = both. auto rec = [&res](formula f, unsigned polarity, auto self) { switch (f.kind()) diff --git a/spot/tl/apcollect.hh b/spot/tl/apcollect.hh index d35461035..fec68287c 100644 --- a/spot/tl/apcollect.hh +++ b/spot/tl/apcollect.hh @@ -57,7 +57,7 @@ namespace spot atomic_prop_collect_as_bdd(formula f, const twa_ptr& a); - /// \brief Collect the literals occuring in f + /// \brief Collect the literals occurring in f /// /// This function records each atomic proposition occurring in f /// along with the polarity of its occurrence. For instance if the diff --git a/spot/tl/formula.cc b/spot/tl/formula.cc index 3c5afc8d1..db4b32ec7 100644 --- a/spot/tl/formula.cc +++ b/spot/tl/formula.cc @@ -276,7 +276,7 @@ namespace spot // pointers we should remove. We can do it in the same loop. // // It is simpler to construct a separate vector to do that, but that's - // only needed if we have nested multops or null poiners. + // only needed if we have nested multops or null pointers. if (std::find_if(v.begin(), v.end(), [o](const fnode* f) { return f == nullptr || f->is(o); }) != v.end()) diff --git a/spot/tl/parse.hh b/spot/tl/parse.hh index 5907d2756..3702807b2 100644 --- a/spot/tl/parse.hh +++ b/spot/tl/parse.hh @@ -110,7 +110,7 @@ namespace spot /// field parsed_formula::f in the returned object can be a non-zero /// value even if it encountered error during the parsing of \a /// ltl_string. If you want to make sure \a ltl_string was parsed - /// succesfully, check \a parsed_formula::errors for emptiness. + /// successfully, check \a parsed_formula::errors for emptiness. /// /// \warning This function is not reentrant. SPOT_API @@ -133,7 +133,7 @@ namespace spot /// field parsed_formula::f in the returned object can be a non-zero /// value even if it encountered error during the parsing of \a /// ltl_string. If you want to make sure \a ltl_string was parsed - /// succesfully, check \a parsed_formula::errors for emptiness. + /// successfully, check \a parsed_formula::errors for emptiness. /// /// \warning This function is not reentrant. SPOT_API @@ -154,7 +154,7 @@ namespace spot /// field parsed_formula::f in the returned object can be a non-zero /// value even if it encountered error during the parsing of \a /// ltl_string. If you want to make sure \a ltl_string was parsed - /// succesfully, check \a parsed_formula::errors for emptiness. + /// successfully, check \a parsed_formula::errors for emptiness. /// /// The LBT syntax, also used by the lbtt and scheck tools, is /// extended to support W, and M operators (as done in lbtt), and @@ -191,7 +191,7 @@ namespace spot /// field parsed_formula::f in the returned object can be a non-zero /// value even if it encountered error during the parsing of \a /// ltl_string. If you want to make sure \a ltl_string was parsed - /// succesfully, check \a parsed_formula::errors for emptiness. + /// successfully, check \a parsed_formula::errors for emptiness. /// /// \warning This function is not reentrant. SPOT_API diff --git a/spot/tl/randomltl.hh b/spot/tl/randomltl.hh index 99b664b00..d4c52debf 100644 --- a/spot/tl/randomltl.hh +++ b/spot/tl/randomltl.hh @@ -200,7 +200,7 @@ namespace spot class SPOT_API random_sere final: public random_formula { public: - /// Create a random SERE genere using atomic propositions from \a ap. + /// Create a random SERE generator using atomic propositions from \a ap. /// /// The default priorities are defined as follows: /// diff --git a/spot/tl/relabel.hh b/spot/tl/relabel.hh index 1f8e32f22..1d4bbc12d 100644 --- a/spot/tl/relabel.hh +++ b/spot/tl/relabel.hh @@ -35,7 +35,7 @@ namespace spot /// between the new names (keys) and the old names (values). /// /// \see relabel_bse - /// \see relabel_overlaping_bse + /// \see relabel_overlapping_bse SPOT_API formula relabel(formula f, relabeling_style style, relabeling_map* m = nullptr); diff --git a/spot/tl/simplify.cc b/spot/tl/simplify.cc index c670fc730..5ed627c1b 100644 --- a/spot/tl/simplify.cc +++ b/spot/tl/simplify.cc @@ -948,7 +948,7 @@ namespace spot { } - // if !neg build c&X(c&X(...&X(tail))) with n occurences of c + // if !neg build c&X(c&X(...&X(tail))) with n occurrences of c // if neg build !c|X(!c|X(...|X(tail))). formula dup_b_x_tail(bool neg, formula c, formula tail, unsigned n) @@ -1027,7 +1027,7 @@ namespace spot // // The above usually make more sense when reversed (see // them in the And and Or rewritings), except when we - // try to maximaze the size of subformula that do not + // try to maximize the size of subformula that do not // have EventUniv formulae. if (opt_.favor_event_univ) if (c.is(op::Or, op::And)) @@ -1585,7 +1585,7 @@ namespace spot } } } - // {b[*i..j]} = b&X(b&X(... b)) with i occurences of b + // {b[*i..j]} = b&X(b&X(... b)) with i occurrences of b // !{b[*i..j]} = !b&X(!b&X(... !b)) if (!opt_.reduce_size_strictly) if (c.is(op::Star)) diff --git a/spot/twa/acc.hh b/spot/twa/acc.hh index 02fc8f5e0..1749e45cd 100644 --- a/spot/twa/acc.hh +++ b/spot/twa/acc.hh @@ -1186,7 +1186,7 @@ namespace spot /// \brief Convert the acceptance formula into a BDD /// - /// \a map should be a vector indiced by colors, that + /// \a map should be a vector indexed by colors, that /// maps each color to the desired BDD representation. bdd to_bdd(const bdd* map) const; @@ -1275,7 +1275,7 @@ namespace spot /// Fin(i) changed to true and Inf(i) to false. /// /// If the condition is a disjunction and one of the disjunct - /// has the shape `...&Fin(i)&...`, then `i` will be prefered + /// has the shape `...&Fin(i)&...`, then `i` will be preferred /// over any arbitrary Fin. /// /// The second element of the pair, is the same acceptance @@ -1307,7 +1307,7 @@ namespace spot /// If no disjunct has the right shape, then a random Fin(i) is /// searched in the formula, and the output (i, left, right). /// is such that left contains all disjuncts containing Fin(i) - /// (at any depth), and right contains the original formlula + /// (at any depth), and right contains the original formula /// where Fin(i) has been replaced by false. /// @{ std::tuple @@ -1345,7 +1345,7 @@ namespace spot /// \brief Check potential acceptance of an SCC. /// /// Assuming that an SCC intersects all sets in \a - /// infinitely_often (i.e., for each set in \a infinetely_often, + /// infinitely_often (i.e., for each set in \a infinitely_often, /// there exist one marked transition in the SCC), and is /// included in all sets in \a always_present (i.e., all /// transitions are marked with \a always_present), this returns @@ -1464,7 +1464,7 @@ namespace spot /// "Fin(!x)" and "Inf(!x)" are not supported by this parser. /// /// Or the string could be the name of an acceptance condition, as - /// speficied in the HOA format. (E.g. "Rabin 2", "parity max odd 3", + /// specified in the HOA format. (E.g. "Rabin 2", "parity max odd 3", /// "generalized-Rabin 4 2 1", etc.). /// /// A spot::parse_error is thrown on syntax error. @@ -2190,7 +2190,7 @@ namespace spot /// Fin(i) changed to true and Inf(i) to false. /// /// If the condition is a disjunction and one of the disjunct - /// has the shape `...&Fin(i)&...`, then `i` will be prefered + /// has the shape `...&Fin(i)&...`, then `i` will be preferred /// over any arbitrary Fin. /// /// The second element of the pair, is the same acceptance @@ -2226,7 +2226,7 @@ namespace spot /// If no disjunct has the right shape, then a random Fin(i) is /// searched in the formula, and the output (i, left, right). /// is such that left contains all disjuncts containing Fin(i) - /// (at any depth), and right contains the original formlula + /// (at any depth), and right contains the original formula /// where Fin(i) has been replaced by false. /// @{ std::tuple diff --git a/spot/twa/bddprint.hh b/spot/twa/bddprint.hh index a85acb765..2ed55d333 100644 --- a/spot/twa/bddprint.hh +++ b/spot/twa/bddprint.hh @@ -40,7 +40,7 @@ namespace spot /// This assumes that \a b is a conjunction of literals. /// \param dict The dictionary to use, to lookup variables. /// \param b The BDD to print. - /// \return The BDD formated as a string. + /// \return The BDD formatted as a string. SPOT_API std::string bdd_format_sat(const bdd_dict_ptr& dict, bdd b); @@ -50,7 +50,7 @@ namespace spot /// \param os The output stream. /// \param dict The dictionary to use, to lookup variables. /// \param b The BDD to print. - /// \return The BDD formated as a string. + /// \return The BDD formatted as a string. SPOT_API std::ostream& bdd_print_accset(std::ostream& os, const bdd_dict_ptr& dict, bdd b); @@ -59,7 +59,7 @@ namespace spot /// This is used when saving a TGBA. /// \param dict The dictionary to use, to lookup variables. /// \param b The BDD to print. - /// \return The BDD formated as a string. + /// \return The BDD formatted as a string. SPOT_API std::string bdd_format_accset(const bdd_dict_ptr& dict, bdd b); @@ -73,7 +73,7 @@ namespace spot /// \brief Format a BDD as a set. /// \param dict The dictionary to use, to lookup variables. /// \param b The BDD to print. - /// \return The BDD formated as a string. + /// \return The BDD formatted as a string. SPOT_API std::string bdd_format_set(const bdd_dict_ptr& dict, bdd b); @@ -87,7 +87,7 @@ namespace spot /// \brief Format a BDD as a formula. /// \param dict The dictionary to use, to lookup variables. /// \param b The BDD to print. - /// \return The BDD formated as a string. + /// \return The BDD formatted as a string. SPOT_API std::string bdd_format_formula(const bdd_dict_ptr& dict, bdd b); @@ -98,7 +98,7 @@ namespace spot /// \brief Format a BDD as an irredundant sum of product. /// \param dict The dictionary to use, to lookup variables. /// \param b The BDD to print. - /// \return The BDD formated as a string. + /// \return The BDD formatted as a string. SPOT_API std::string bdd_format_isop(const bdd_dict_ptr& dict, bdd b); diff --git a/spot/twa/formula2bdd.cc b/spot/twa/formula2bdd.cc index 540426d85..8c1223f93 100644 --- a/spot/twa/formula2bdd.cc +++ b/spot/twa/formula2bdd.cc @@ -25,7 +25,7 @@ namespace spot { namespace { - // Convert a BDD which is known to be a conjonction into a formula. + // Convert a BDD which is known to be a conjunction into a formula. // If dual is true, dualize the result, i.e., negate literals, and // exchange ∧ and ∨. template diff --git a/spot/twa/twa.hh b/spot/twa/twa.hh index 85a755873..053344cf2 100644 --- a/spot/twa/twa.hh +++ b/spot/twa/twa.hh @@ -630,14 +630,14 @@ namespace spot /// \brief Get the initial state of the automaton. /// /// The state has been allocated with \c new. It is the - /// responsability of the caller to \c destroy it when no + /// responsibility of the caller to \c destroy it when no /// longer needed. virtual const state* get_init_state() const = 0; /// \brief Get an iterator over the successors of \a local_state. /// /// The iterator has been allocated with \c new. It is the - /// responsability of the caller to \c delete it when no + /// responsibility of the caller to \c delete it when no /// longer needed. /// /// \see succ() @@ -785,7 +785,7 @@ namespace spot /// \brief Format the state as a string for printing. /// - /// Formating is the responsability of the automata that owns the + /// Formatting is the responsibility of the automata that owns the /// state, so that state objects could be implemented as very /// small objects, maybe sharing data with other state objects via /// data structure stored in the automaton. @@ -795,7 +795,7 @@ namespace spot /// /// This converts \a s, into that corresponding spot::state for \a /// t. This is useful when you have the state of a product, and - /// want to restrict this state to a specific automata occuring in + /// want to restrict this state to a specific automata occurring in /// the product. /// /// It goes without saying that \a s and \a t should be compatible diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index eb74dd496..2ccc411a9 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -412,7 +412,7 @@ namespace spot // (We will use two hash maps in this case.) auto sp = get_named_prop>("state-player"); - // The hashing is a bit delicat: We may only use the dst if it has + // The hashing is a bit delicate: We may only use the dst if it has // no self-loop. HASH_OF_STATE stores the hash associated to each // state (by default its own number) or some common value if the // state contains self-loop. @@ -505,7 +505,7 @@ namespace spot } // All states that might possible be merged share the same hash // Info hash coll - //std::cout << "Hash collission rate pre merge: " + //std::cout << "Hash collision rate pre merge: " // << ((map0.size()+map1.size())/((float)n_states)) // << '\n'; @@ -561,7 +561,7 @@ namespace spot sl1 = e_chain[sl1]; sl2 = e_chain[sl2]; } - // Since edges are ordered on each side, aadvance + // Since edges are ordered on each side, advance // the smallest side in case there is no match. else if (edge_data_comp(data1, data2)) sl1 = e_chain[sl1]; @@ -747,7 +747,7 @@ namespace spot if (merged) defrag_states(remap, st); // Info hash coll 2 - //std::cout << "Hash collission rate post merge: " + //std::cout << "Hash collision rate post merge: " // << ((map0.size()+map1.size())/((float)num_states())) // << '\n'; return merged; diff --git a/spot/twa/twagraph.hh b/spot/twa/twagraph.hh index 30a023c68..36fa31836 100644 --- a/spot/twa/twagraph.hh +++ b/spot/twa/twagraph.hh @@ -600,7 +600,7 @@ namespace spot /// the edges /// \param to_merge_ptr Determines which states are candidates. /// If null, all states are considered - /// The actual implementation differd from merge_states(). + /// The actual implementation differs from merge_states(). /// It is more costly, but is more precise, in the sense that /// more states are merged. unsigned merge_states_of(bool stable = true, diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index 659dd281d..95c758ede 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -1031,7 +1031,7 @@ namespace spot it2.first.first.id(), it2.first.second.id()); }); if (itm == occur_map.cend()) - throw std::runtime_error("Empty occurence map"); + throw std::runtime_error("Empty occurrence map"); return *itm; }; @@ -1180,7 +1180,7 @@ namespace spot // Create all the bdds/vars // true/false/latches/inputs already exist - // Get the gatenumber corresponding to an output + // Get the gate number corresponding to an output auto v2g = [&](unsigned v)->unsigned { v = circ.aig_pos(v); diff --git a/spot/twaalgos/aiger.hh b/spot/twaalgos/aiger.hh index 1fe2ffddf..129f12ad2 100644 --- a/spot/twaalgos/aiger.hh +++ b/spot/twaalgos/aiger.hh @@ -86,7 +86,7 @@ namespace spot public: - /// \brief Mark the beginning of a test tranlation + /// \brief Mark the beginning of a test translation /// /// Sometimes different encodings produces more or less gates. /// To improve performances, one can "safe" the current status @@ -297,7 +297,7 @@ namespace spot /// \param method How to translate the bdd. 0: If-then-else normal form, /// 1: isop normal form, 2: try both and retain smaller /// \param use_dual Encode the negations of the given bdds and - /// retain the smalles implementation + /// retain the smallest implementation /// \param use_split_off 0: Use base algo /// 1: Separate the different types of input signals /// (like latches, inputs) to increase gate diff --git a/spot/twaalgos/alternation.hh b/spot/twaalgos/alternation.hh index 1e0ba87ed..a03ddc121 100644 --- a/spot/twaalgos/alternation.hh +++ b/spot/twaalgos/alternation.hh @@ -40,7 +40,7 @@ namespace spot /// /// operator() can be called on states with universal branching /// (that's actually the point), and can be called on state number - /// that designate groupes of destination states (in that case the + /// that designate groups of destination states (in that case the /// conjunction of all those states are taken). class SPOT_API outedge_combiner { diff --git a/spot/twaalgos/cleanacc.cc b/spot/twaalgos/cleanacc.cc index 454b531c1..26f6f834f 100644 --- a/spot/twaalgos/cleanacc.cc +++ b/spot/twaalgos/cleanacc.cc @@ -549,7 +549,7 @@ namespace spot for (auto& e: aut->edges()) { - // Just avoit the e.acc.sets() loops on marks that we have + // Just avoid the e.acc.sets() loops on marks that we have // just seen. if (e.acc == previous_a) continue; diff --git a/spot/twaalgos/cobuchi.cc b/spot/twaalgos/cobuchi.cc index 986c982fb..509cb4d88 100644 --- a/spot/twaalgos/cobuchi.cc +++ b/spot/twaalgos/cobuchi.cc @@ -408,7 +408,7 @@ namespace spot // Each state is characterized by a bitvect_array of 2 bitvects: // bv1 -> the set of original states that it represents // bv2 -> a set of marked states (~) - // To do so, we keep a correspondance between a state number and its + // To do so, we keep a correspondence between a state number and its // bitvect representation. dca_st_mapping bv_to_num_; std::vector> num_2_bv_; diff --git a/spot/twaalgos/contains.cc b/spot/twaalgos/contains.cc index 3df305379..67791dbac 100644 --- a/spot/twaalgos/contains.cc +++ b/spot/twaalgos/contains.cc @@ -80,14 +80,14 @@ namespace spot bool are_equivalent(const_twa_graph_ptr left, formula right) { - // The first containement check does not involve a + // The first containment check does not involve a // complementation, the second might. return contains(left, right) && contains(right, left); } bool are_equivalent(formula left, const_twa_graph_ptr right) { - // The first containement check does not involve a + // The first containment check does not involve a // complementation, the second might. return contains(right, left) && contains(left, right); } diff --git a/spot/twaalgos/couvreurnew.cc b/spot/twaalgos/couvreurnew.cc index 62cbb7aa1..59efbef16 100644 --- a/spot/twaalgos/couvreurnew.cc +++ b/spot/twaalgos/couvreurnew.cc @@ -282,7 +282,7 @@ namespace spot const_twa_ptr>::type; // The status of the emptiness-check on success. - // It contains everyting needed to build a counter-example: + // It contains everything needed to build a counter-example: // the automaton, the stack of SCCs traversed by the counter-example, // and the heap of visited states with their indices. template @@ -579,7 +579,7 @@ namespace spot return check_impl(); } - // The following two methods anticipe the future interface of the + // The following two methods anticipate the future interface of the // class emptiness_check. Following the interface of twa, the class // emptiness_check_result should not be exposed. bool diff --git a/spot/twaalgos/cycles.hh b/spot/twaalgos/cycles.hh index bd613a1c2..fda2210e6 100644 --- a/spot/twaalgos/cycles.hh +++ b/spot/twaalgos/cycles.hh @@ -43,7 +43,7 @@ namespace spot /// /// We represent a cycle by a sequence of succ_iterator objects /// positioned on the transition contributing to the cycle. These - /// succ_itertor are stored, along with their source state, in the + /// succ_iterator are stored, along with their source state, in the /// dfs_ stack. Only the last portion of this stack may form a /// cycle. /// diff --git a/spot/twaalgos/degen.cc b/spot/twaalgos/degen.cc index fc737e3f7..897062cb3 100644 --- a/spot/twaalgos/degen.cc +++ b/spot/twaalgos/degen.cc @@ -32,7 +32,7 @@ namespace spot { namespace { - // A state in the degenalized automaton corresponds to a state in + // A state in the degeneralized automaton corresponds to a state in // the TGBA associated to a level. The level is just an index in // the list of acceptance sets. typedef std::pair degen_state; @@ -495,7 +495,7 @@ namespace spot levels->emplace_back(ds.second); // Level cache stores one encountered level for each state - // (the value of use_lvl_cache determinates which level + // (the value of use_lvl_cache determines which level // should be remembered). This cache is used when // re-entering the SCC. if (use_lvl_cache) @@ -579,7 +579,7 @@ namespace spot // where 1 is initial and => marks accepting // edges: 1=>1, 1=>2, 2->2, 2->1. This is // already an SBA, with 1 as accepting state. - // However if you try degeralize it without + // However if you try degeneralize it without // ignoring *prev, you'll get two copies of state // 2, depending on whether we reach it using 1=>2 // or from 2->2. If this example was not clear, diff --git a/spot/twaalgos/degen.hh b/spot/twaalgos/degen.hh index b04ae7bb8..643c1d219 100644 --- a/spot/twaalgos/degen.hh +++ b/spot/twaalgos/degen.hh @@ -28,7 +28,7 @@ namespace spot /// \brief Degeneralize a generalized (co)Büchi automaton into an /// equivalent (co)Büchi automaton. /// - /// There are two variants of the function. If the generalizd + /// There are two variants of the function. If the generalized /// (co)Büchi acceptance uses N colors, degeneralize() algorithm /// will builds a state-based (co)Büchi automaton that has at most /// (N+1) times the number of states of the original automaton. @@ -38,12 +38,12 @@ namespace spot /// /// Additional options control optimizations described in /// \cite babiak.13.spin . When \a use_z_lvl is set, the level of - /// the degeneralized automaton is reset everytime an SCC is exited. + /// the degeneralized automaton is reset every time an SCC is exited. /// If \a use_cust_acc_orders is set, the degeneralization will /// compute a custom acceptance order for each SCC (this option is /// disabled by default because our benchmarks show that it usually /// does more harm than good). If \a use_lvl_cache is set, - /// everytime an SCC is entered on a state that as already been + /// every time an SCC is entered on a state that as already been /// associated to some level elsewhere, reuse that level (set it to /// 2 to keep the smallest number, 3 to keep the largest level, and /// 1 to keep the first level found). If \a ignaccsl is set, we do @@ -75,7 +75,7 @@ namespace spot /// /// As an alternative method to degeneralization, one may also /// consider ACD transform. acd_transform() will never produce - /// larger automata than degenaralize_tba(), and + /// larger automata than degeneralize_tba(), and /// acd_transform_sbacc() produce smaller automata than /// degeneralize() on the average. See \cite casares.22.tacas for /// some comparisons. diff --git a/spot/twaalgos/dot.hh b/spot/twaalgos/dot.hh index fbf63e641..0483967fb 100644 --- a/spot/twaalgos/dot.hh +++ b/spot/twaalgos/dot.hh @@ -29,7 +29,7 @@ namespace spot /// \brief Print reachable states in dot format. /// /// If \a assume_sba is set, this assumes that the automaton - /// is an SBA and use double elipse to mark accepting states. + /// is an SBA and use double ellipse to mark accepting states. /// /// \param options an optional string of letters, each indicating a /// different option. Presently the following options are diff --git a/spot/twaalgos/dtbasat.cc b/spot/twaalgos/dtbasat.cc index 90319bd20..4a95eed8c 100644 --- a/spot/twaalgos/dtbasat.cc +++ b/spot/twaalgos/dtbasat.cc @@ -243,7 +243,7 @@ namespace spot // it is necessary to associate to each path constructed, an ID number. // // Given this ID, src_cand, dst_cand and a boolean that tells we want - // ref or cand var, the corresponding litteral can be retrieved thanks + // ref or cand var, the corresponding literal can be retrieved thanks // to get_prc(...), a vars_helper's method. unsigned path_size = 0; for (unsigned i = 0; i < d.ref_size; ++i) @@ -264,8 +264,8 @@ namespace spot } } - // Fill dict's bdd vetor (alpha_vect) and save each bdd and it's - // corresponding index in alpha_map. This is necessary beacause + // Fill dict's bdd vector (alpha_vect) and save each bdd and it's + // corresponding index in alpha_map. This is necessary because // some loops start from a precise bdd. Therefore, it's useful // to know its corresponding index to deal with vars_helper. unsigned j = 0; @@ -281,7 +281,7 @@ namespace spot d.helper.init(d.cand_size, d.alpha_vect.size(), d.cand_size, 1, path_size, state_based, true); - // Based on all previous informations, helper knows all litterals. + // Based on all previous informations, helper knows all literals. d.helper.declare_all_vars(++d.nvars); } @@ -399,7 +399,7 @@ namespace spot const acc_cond& ra = ref->acc(); - // construction of contraints (4,5) : all loops in the product + // construction of constraints (4,5) : all loops in the product // where no accepting run is detected in the ref. automaton, // must also be marked as not accepting in the cand. automaton for (unsigned q1p = 0; q1p < d.ref_size; ++q1p) @@ -473,7 +473,7 @@ namespace spot } } } - // construction of contraints (6,7): all loops in the product + // construction of constraints (6,7): all loops in the product // where accepting run is detected in the ref. automaton, must // also be marked as accepting in the candidate. for (unsigned q1p = 0; q1p < d.ref_size; ++q1p) diff --git a/spot/twaalgos/dtwasat.cc b/spot/twaalgos/dtwasat.cc index 64dfbd5f6..93e482a3b 100644 --- a/spot/twaalgos/dtwasat.cc +++ b/spot/twaalgos/dtwasat.cc @@ -459,8 +459,8 @@ namespace spot // refhist, it is necessary to associate to each path constructed, // an ID number. // - // Given this ID, src_cand, dst_cand, the corresponding litteral can be - // retrived thanks to get_prc(...) a vars_helper's method. + // Given this ID, src_cand, dst_cand, the corresponding literal can be + // retrieved thanks to get_prc(...) a vars_helper's method. unsigned path_size = 0; for (unsigned i = 0; i < d.ref_size; ++i) { @@ -493,8 +493,8 @@ namespace spot } } - // Fill dict's bdd vetor (alpha_vect) and save each bdd and it's - // corresponding index in alpha_map. This is necessary beacause + // Fill dict's bdd vector (alpha_vect) and save each bdd and it's + // corresponding index in alpha_map. This is necessary because // some loops start from a precise bdd. Therefore, it's useful // to know its corresponding index to deal with vars_helper. unsigned j = 0; @@ -510,7 +510,7 @@ namespace spot d.helper.init(d.cand_size, d.alpha_vect.size(), d.cand_size, d.cand_nacc, path_size, state_based, false); - // Based on all previous informations, helper knows all litterals. + // Based on all previous informations, helper knows all literals. d.helper.declare_all_vars(++d.nvars); } @@ -909,7 +909,7 @@ namespace spot dout << "--- transition_acc variables ---\n"; if (state_based) { - dout << "In state_based mode, there is only 1 litteral for each " + dout << "In state_based mode, there is only 1 literal for each " "combination of src and nacc, regardless of dst or cond!\n"; for (unsigned i = 0; i < satdict.cand_size; ++i) for (unsigned j = 0; j < satdict.cand_nacc; ++j) diff --git a/spot/twaalgos/dtwasat.hh b/spot/twaalgos/dtwasat.hh index f51ca1405..64e01c7ae 100644 --- a/spot/twaalgos/dtwasat.hh +++ b/spot/twaalgos/dtwasat.hh @@ -85,7 +85,7 @@ namespace spot /// \brief Attempt to minimize a deterministic TωA with a SAT solver. /// - /// It acts like dtwa_sat_synthetisze() and obtains a first minimized + /// It acts like dtwa_sat_synthetize() and obtains a first minimized /// automaton. Then, incrementally, it encodes and solves the deletion of one /// state as many time as param value. /// If param >= 0, this process is fully repeated until the minimal automaton diff --git a/spot/twaalgos/dualize.cc b/spot/twaalgos/dualize.cc index bd0f4767d..f342439d9 100644 --- a/spot/twaalgos/dualize.cc +++ b/spot/twaalgos/dualize.cc @@ -177,7 +177,7 @@ namespace spot } }; - // Iterating over all mineterms can be very slow when |AP| + // Iterating over all minterms can be very slow when |AP| // is large (see issue #566) . The else branch implements // another approach that should be exponential in the // number of successors instead of in the number of atomic diff --git a/spot/twaalgos/emptiness.hh b/spot/twaalgos/emptiness.hh index cd9b40c3f..c204e8cc0 100644 --- a/spot/twaalgos/emptiness.hh +++ b/spot/twaalgos/emptiness.hh @@ -112,7 +112,7 @@ namespace spot return a_; } - /// Return the options parametrizing how the accepting run is computed. + // / Return the options parameterizing how the accepting run is computed. const option_map& options() const { @@ -153,7 +153,7 @@ namespace spot return a_; } - /// Return the options parametrizing how the emptiness check is realized. + /// Return the options parameterizing how the emptiness check is realized. const option_map& options() const { diff --git a/spot/twaalgos/emptiness_stats.hh b/spot/twaalgos/emptiness_stats.hh index 936929bc3..55df5d621 100644 --- a/spot/twaalgos/emptiness_stats.hh +++ b/spot/twaalgos/emptiness_stats.hh @@ -127,7 +127,7 @@ namespace spot } private : - unsigned states_; /// number of disctint visited states + unsigned states_; /// number of distinct visited states unsigned transitions_; /// number of visited transitions unsigned depth_; /// maximal depth of the stack(s) unsigned max_depth_; /// maximal depth of the stack(s) diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index 3c7758161..404fa4778 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -491,7 +491,7 @@ namespace spot bool acc_par, par_t min_win_par, bool respect_sg=true) { // In fix_scc, the attr computation is - // abused so we can not check ertain things + // abused so we can not check certain things // Computes the attractor of the winning set of player p within a // subgame given as rd. // If acc_par is true, max_par transitions are also accepting and @@ -860,7 +860,7 @@ namespace spot // during construction std::vector s_; - // Informations about sccs andthe current scc + // Informations about sccs and the current scc std::unique_ptr info_; par_t max_abs_par_; // Max parity occurring in the current scc // Minimal and maximal parity occurring in the entire graph @@ -1112,7 +1112,7 @@ namespace spot if (owners->size() != arena->num_states()) throw std::runtime_error("set_state_player(): The \"state-player\" " "vector has a different " - "size comparerd to the automaton! " + "size compared to the automaton! " "Called new_state in between?"); (*owners)[state] = owner; @@ -1286,7 +1286,7 @@ namespace spot game->set_named_prop("strategy", strategy); // transposed is a reversed copy of game to compute predecessors - // more easily. It also keep track of the original edge iindex. + // more easily. It also keep track of the original edge index. struct edge_data { unsigned edgeidx; }; diff --git a/spot/twaalgos/genem.hh b/spot/twaalgos/genem.hh index 0a6de1040..8854c0ebc 100644 --- a/spot/twaalgos/genem.hh +++ b/spot/twaalgos/genem.hh @@ -114,7 +114,7 @@ namespace spot /// Give the set of transitions contained in /// an accepting cycle of the SCC \a scc of \a aut. /// - /// \param si scc_info used to describle the automaton + /// \param si scc_info used to describe the automaton /// \param scc SCC to consider /// \param aut_acc Acceptance condition used for this SCC /// \param removed_colors A set of colors that can't appear on a transition diff --git a/spot/twaalgos/hoa.hh b/spot/twaalgos/hoa.hh index 9a53f41c9..48d730444 100644 --- a/spot/twaalgos/hoa.hh +++ b/spot/twaalgos/hoa.hh @@ -153,7 +153,7 @@ namespace spot /// \brief Retrieve the list of aliases. /// - /// This points to the same list that the automaton's "aliasaes" + /// This points to the same list that the automaton's "aliases" /// named properties points to. Will return `nullptr` if no /// aliases are defined. /// diff --git a/spot/twaalgos/langmap.hh b/spot/twaalgos/langmap.hh index 98e783f41..4b1a71308 100644 --- a/spot/twaalgos/langmap.hh +++ b/spot/twaalgos/langmap.hh @@ -26,8 +26,8 @@ namespace spot /// \brief Identify states that recognize the same language. /// /// The returned vector is the same size as the automaton's number of state. - /// The number of different values (ignoring occurences) in the vector is the - /// total number of recognized languages, states recognizing the same + // / The number of different values (ignoring occurrences) in the vector is + /// the total number of recognized languages, states recognizing the same /// language have the same value. /// /// The given automaton must be deterministic. diff --git a/spot/twaalgos/ltl2tgba_fm.hh b/spot/twaalgos/ltl2tgba_fm.hh index 554b8019d..ac7c5645e 100644 --- a/spot/twaalgos/ltl2tgba_fm.hh +++ b/spot/twaalgos/ltl2tgba_fm.hh @@ -67,7 +67,7 @@ namespace spot /// for the type of reduction you want, see spot::tl_simplifier. /// This idea is taken from \cite thirioux.02.fmics . /// - /// \param unambiguous When true, unambigous TGBA will be produced + /// \param unambiguous When true, unambiguous TGBA will be produced /// using the trick described in \cite benedikt.13.tacas . /// /// \param aborter When given, aborts the construction whenever the diff --git a/spot/twaalgos/magic.cc b/spot/twaalgos/magic.cc index 635bba286..f8e986449 100644 --- a/spot/twaalgos/magic.cc +++ b/spot/twaalgos/magic.cc @@ -211,7 +211,7 @@ namespace spot // the test 'c.get_color() != RED' is added to limit // the number of runs reported by successive // calls to the check method. Without this - // functionnality, the test can be ommited. + // functionality, the test can be omitted. trace << " It is blue and the arc is " << "accepting, start a red dfs" << std::endl; target = f.s; @@ -242,7 +242,7 @@ namespace spot // the test 'c.get_color() != RED' is added to limit // the number of runs reported by successive // calls to the check method. Without this - // functionnality, the test can be ommited. + // functionality, the test can be omitted. trace << " It is blue and the arc from " << a_->format_state(st_blue.front().s) << " to it is accepting, start a red dfs" diff --git a/spot/twaalgos/magic.hh b/spot/twaalgos/magic.hh index c8e3e4261..99f46f3d8 100644 --- a/spot/twaalgos/magic.hh +++ b/spot/twaalgos/magic.hh @@ -33,7 +33,7 @@ namespace spot /// \pre The automaton \a a must have at most one acceptance condition (i.e. /// it is a TBA). /// - /// During the visit of \a a, the returned checker stores explicitely all + /// During the visit of \a a, the returned checker stores explicitly all /// the traversed states. /// The method \a check() of the checker can be called several times /// (until it returns a null pointer) to enumerate all the visited acceptance @@ -88,11 +88,11 @@ namespace spot /// \pre The automaton \a a must have at most one acceptance condition (i.e. /// it is a TBA). /// - /// During the visit of \a a, the returned checker does not store explicitely + /// During the visit of \a a, the returned checker does not store explicitly /// the traversed states but uses the bit-state hashing technic presented in: /// \cite Holzmann.91.book. /// - /// Consequently, the detection of an acceptence cycle is not ensured. + /// Consequently, the detection of an acceptance cycle is not ensured. /// /// The size of the heap is limited to \n size bytes. /// diff --git a/spot/twaalgos/mask.hh b/spot/twaalgos/mask.hh index 564622bed..5a022bfc0 100644 --- a/spot/twaalgos/mask.hh +++ b/spot/twaalgos/mask.hh @@ -136,7 +136,7 @@ namespace spot /// /// It can modify either the condition or the acceptance sets of /// the edges. Set the condition to bddfalse to remove it. Note that - /// all transtions will be processed. + /// all transitions will be processed. /// \param init The optional new initial state. template void transform_copy(const const_twa_graph_ptr& old, @@ -157,7 +157,7 @@ namespace spot acc_cond::mark_t acc = t.acc; trans(t.src, cond, acc, t.dst); // Having the same number of states should assure that state ids are - // equivilent in old and cpy. + // equivalent in old and cpy. SPOT_ASSERT(t.src < cpy->num_states() && t.dst < cpy->num_states()); if (cond != bddfalse) cpy->new_edge(t.src, t.dst, cond, acc); diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index 512848edb..055cb511e 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -99,7 +99,7 @@ namespace { if (!f) throw std::runtime_error("`" + name + - "' could not be oppened for writing."); + "' could not be opened for writing."); } ~fwrapper() { @@ -257,7 +257,7 @@ namespace spot if (!is_deterministic_(ins)) { trace << "is_input_deterministic_mealy(): State number " - << s << " is not input determinisc!\n"; + << s << " is not input determinist!\n"; return false; } } diff --git a/spot/twaalgos/mealy_machine.hh b/spot/twaalgos/mealy_machine.hh index 9fd3c084e..baeca6c4f 100644 --- a/spot/twaalgos/mealy_machine.hh +++ b/spot/twaalgos/mealy_machine.hh @@ -91,7 +91,7 @@ namespace spot /// \brief split a separated mealy machine /// /// In a separated mealy machine, every transitions as a label of - /// the form `(in)&(out)`. This function will turn each transtion + /// the form `(in)&(out)`. This function will turn each transition /// into a pair of consecutive transitions labeled by `in` and /// `out`, and turn the mealy machine into a game (what we call a /// split mealy machine) @@ -197,7 +197,7 @@ namespace spot /// \pre The machines have to be both either split or unsplit, /// input complete and compatible. All of this is checked by assertion. /// \result A mealy machine representing the shared behaviour, - /// with the same tyoe (mealy/separated/split) as the input machines + /// with the same type (mealy/separated/split) as the input machines SPOT_API twa_graph_ptr mealy_product(const const_twa_graph_ptr& left, const const_twa_graph_ptr& right); diff --git a/spot/twaalgos/minimize.hh b/spot/twaalgos/minimize.hh index e9390c6c1..0b5c11bf2 100644 --- a/spot/twaalgos/minimize.hh +++ b/spot/twaalgos/minimize.hh @@ -95,7 +95,7 @@ namespace spot /// returned. Otherwise, if \a aut_neg_f was not supplied but \a f /// was, \a aut_neg_f is built from the negation of \a f. Then we /// check that product(aut,!minimize(aut_f)) and - /// product(aut_neg_f,minize(aut)) are both empty. If they + /// product(aut_neg_f,minimize(aut)) are both empty. If they /// are, the the minimization was sound. (See the paper for full /// details.) /// diff --git a/spot/twaalgos/parity.cc b/spot/twaalgos/parity.cc index f3765305f..cf84e7da8 100644 --- a/spot/twaalgos/parity.cc +++ b/spot/twaalgos/parity.cc @@ -133,7 +133,7 @@ namespace spot current_odd = current_odd != toggle_style; bool change_style = false; auto num_sets = old_num_sets; - // If the parity neeeds to be changed, then a new acceptance set is created. + // If the parity needs to be changed, then a new acceptance set is created. // The old acceptance sets are shifted if (output_odd != current_odd) { diff --git a/spot/twaalgos/parity.hh b/spot/twaalgos/parity.hh index 4043affc2..5363161fc 100644 --- a/spot/twaalgos/parity.hh +++ b/spot/twaalgos/parity.hh @@ -166,7 +166,7 @@ namespace spot /// When \a layered is true all transition that belong to the same /// layer receive the same color. When layer is `false`, only the /// transition that where used initially to define the layers (i.e, - /// the transition with the maximal color in the previous exemple), + /// the transition with the maximal color in the previous example), /// get their color adjusted. The other will receive either no /// color (if \a colored is false), or a useless color (if \a colored /// is true). Here "useless color" means the smallest color diff --git a/spot/twaalgos/postproc.cc b/spot/twaalgos/postproc.cc index 0a5979064..eabde299b 100644 --- a/spot/twaalgos/postproc.cc +++ b/spot/twaalgos/postproc.cc @@ -365,7 +365,7 @@ namespace spot // However (1) degeneralization is faster if the input is // GBA, and (2) if we want a deterministic parity automaton and the // input is not deterministic, that is useless here. We need - // to determinize it first, and our deterministization + // to determinize it first, and our determinization // function only deal with TGBA as input. if ((via_gba || (want_parity && !a->acc().is_parity())) && !a->acc().is_generalized_buchi()) diff --git a/spot/twaalgos/product.hh b/spot/twaalgos/product.hh index 796e4c23c..39bdad0be 100644 --- a/spot/twaalgos/product.hh +++ b/spot/twaalgos/product.hh @@ -38,7 +38,7 @@ namespace spot /// conjunction of the acceptance conditions of the two input /// automata. /// - /// As an optionmization, in case one of the left or right automaton + /// As an optimization, in case one of the left or right automaton /// is weak, the acceptance condition of the result is made simpler: /// it usually is the acceptance condition of the other argument, /// therefore avoiding the need to introduce new accepting sets. @@ -69,7 +69,7 @@ namespace spot /// conjunction of the acceptance conditions of the two input /// automata. /// - /// As an optionmization, in case one of the left or right automaton + /// As an optimization, in case one of the left or right automaton /// is weak, the acceptance condition of the result is made simpler: /// it usually is the acceptance condition of the other argument, /// therefore avoiding the need to introduce new accepting sets. @@ -98,7 +98,7 @@ namespace spot /// disjunction of the acceptance conditions of the two input /// automata. /// - /// As an optionmization, in case one of the left or right automaton + /// As an optimization, in case one of the left or right automaton /// is weak, the acceptance condition of the result is made simpler: /// it usually is the acceptance condition of the other argument, /// therefore avoiding the need to introduce new accepting sets. @@ -126,7 +126,7 @@ namespace spot /// disjunction of the acceptance conditions of the two input /// automata. /// - /// As an optionmization, in case one of the left or right automaton + /// As an optimization, in case one of the left or right automaton /// is weak, the acceptance condition of the result is made simpler: /// it usually is the acceptance condition of the other argument, /// therefore avoiding the need to introduce new accepting sets. diff --git a/spot/twaalgos/reachiter.hh b/spot/twaalgos/reachiter.hh index f1faf8b99..f9b45d38a 100644 --- a/spot/twaalgos/reachiter.hh +++ b/spot/twaalgos/reachiter.hh @@ -73,7 +73,7 @@ namespace spot /// \param in The source state number. /// \param out_s The destination state /// \param out The destination state number. - /// \param si The spot::twa_succ_iterator positionned on the current + /// \param si The spot::twa_succ_iterator positioned on the current /// transition. /// /// The in_s and out_s states are owned by the @@ -141,7 +141,7 @@ namespace spot /// \param in The source state number. /// \param out_s The destination state /// \param out The destination state number. - /// \param si The spot::twa_succ_iterator positionned on the current + /// \param si The spot::twa_succ_iterator positioned on the current /// transition. /// /// The in_s and out_s states are owned by the diff --git a/spot/twaalgos/relabel.cc b/spot/twaalgos/relabel.cc index 594b8bdeb..9a34da7c5 100644 --- a/spot/twaalgos/relabel.cc +++ b/spot/twaalgos/relabel.cc @@ -137,7 +137,7 @@ namespace spot }; - // When split we need to distiguish effectively new and old edges + // When split we need to distinguish effectively new and old edges if (split) { aut.get_graph().remove_dead_edges_(); @@ -381,12 +381,12 @@ namespace spot } - // Save the composed letters? With a special seperator like T/F? + // Save the composed letters? With a special separator like T/F? // Is swapping between formula <-> bdd expensive for (auto& e : aut.edges()) translate(e.cond); - // Remove the new auxilliary variables from the aut + // Remove the new auxiliary variables from the aut bdd c_supp = new_var_supp; while (c_supp != bddtrue) { diff --git a/spot/twaalgos/remfin.cc b/spot/twaalgos/remfin.cc index 435fdfa6c..aff736bf0 100644 --- a/spot/twaalgos/remfin.cc +++ b/spot/twaalgos/remfin.cc @@ -176,7 +176,7 @@ namespace spot // Specialized conversion from transition based Rabin acceptance to // transition based Büchi acceptance. // Is able to detect SCCs that are TBA-type (i.e., they can be - // converted to Büchi acceptance without chaning their structure). + // converted to Büchi acceptance without changing their structure). // // See "Deterministic ω-automata vis-a-vis Deterministic Büchi // Automata", S. Krishnan, A. Puri, and R. Brayton (ISAAC'94) for @@ -381,7 +381,7 @@ namespace spot { true, // state based true, // inherently weak - true, true, // determinisitic + true, true, // deterministic true, // complete true, // stutter inv. }); @@ -667,7 +667,7 @@ namespace spot << main_add << '\n'; // If the SCC is rejecting, there is no need for clone. - // Pretend we don't interesect any Fin. + // Pretend we don't intersect any Fin. if (si.is_rejecting_scc(n)) intersects_fin = false; diff --git a/spot/twaalgos/remfin.hh b/spot/twaalgos/remfin.hh index 08cb786a4..7d47b6b42 100644 --- a/spot/twaalgos/remfin.hh +++ b/spot/twaalgos/remfin.hh @@ -23,7 +23,7 @@ namespace spot { /// \ingroup twa_acc_transform - /// \brief Check if \a aut is Rablin-like and Büchi-realizable. + /// \brief Check if \a aut is Rabin-like and Büchi-realizable. /// /// This is inspired from rabin_to_buchi_maybe()'s algorithm. The /// main difference is that here, no automaton is built. diff --git a/spot/twaalgos/sccfilter.cc b/spot/twaalgos/sccfilter.cc index 9ec3f1c0e..5d67f1b1e 100644 --- a/spot/twaalgos/sccfilter.cc +++ b/spot/twaalgos/sccfilter.cc @@ -231,7 +231,7 @@ namespace spot // all acceptance sets, as this edge cannot be part // of any loop. // - If an edge is in an non-accepting SCC, we can only - // remove the Inf sets, as removinf the Fin sets + // remove the Inf sets, as removing the Fin sets // might make the SCC accepting. // // The above rules are made more complex with two flags: diff --git a/spot/twaalgos/sccinfo.hh b/spot/twaalgos/sccinfo.hh index ea36e77e0..76dd3ed8b 100644 --- a/spot/twaalgos/sccinfo.hh +++ b/spot/twaalgos/sccinfo.hh @@ -319,7 +319,7 @@ namespace spot /// \brief True if we know that the SCC has an accepting cycle /// /// Note that both is_accepting() and is_rejecting() may return - /// false if an SCC interesects a mix of Fin and Inf sets. + /// false if an SCC intersects a mix of Fin and Inf sets. /// Call determine_unknown_acceptance() to decide. bool is_accepting() const { @@ -329,7 +329,7 @@ namespace spot /// \brief True if we know that all cycles in the SCC are rejecting /// /// Note that both is_accepting() and is_rejecting() may return - /// false if an SCC interesects a mix of Fin and Inf sets. + /// false if an SCC intersects a mix of Fin and Inf sets. /// Call determine_unknown_acceptance() to decide. bool is_rejecting() const { @@ -509,7 +509,7 @@ namespace spot scc_info(const scc_and_mark_filter& filt, scc_info_options options); // we separate the two functions so that we can rename // scc_info(x,options) into scc_info_with_options(x,options) in Python. - // Otherwrise calling scc_info(aut,options) can be confused with + // Otherwise calling scc_info(aut,options) can be confused with // scc_info(aut,initial_state). scc_info(const scc_and_mark_filter& filt) : scc_info(filt, scc_info_options::ALL) diff --git a/spot/twaalgos/se05.cc b/spot/twaalgos/se05.cc index a4eb21af5..c74fae427 100644 --- a/spot/twaalgos/se05.cc +++ b/spot/twaalgos/se05.cc @@ -215,7 +215,7 @@ namespace spot // the test 'c.get_color() != RED' is added to limit // the number of runs reported by successive // calls to the check method. Without this - // functionnality, the test can be ommited. + // functionality, the test can be omitted. trace << " It is cyan or blue and the arc is " << "accepting, start a red dfs" << std::endl; c.set_color(RED); @@ -244,7 +244,7 @@ namespace spot // the test 'c.get_color() != RED' is added to limit // the number of runs reported by successive // calls to the check method. Without this - // functionnality, the test can be ommited. + // functionality, the test can be omitted. trace << " The arc from " << a_->format_state(st_blue.front().s) << " to the current state is accepting, start a " diff --git a/spot/twaalgos/se05.hh b/spot/twaalgos/se05.hh index 640d37f0a..e9df0ed38 100644 --- a/spot/twaalgos/se05.hh +++ b/spot/twaalgos/se05.hh @@ -33,7 +33,7 @@ namespace spot /// \pre The automaton \a a must have at most one acceptance condition (i.e. /// it is a TBA). /// - /// During the visit of \a a, the returned checker stores explicitely all + /// During the visit of \a a, the returned checker stores explicitly all /// the traversed states. /// The method \a check() of the checker can be called several times /// (until it returns a null pointer) to enumerate all the visited accepting @@ -92,11 +92,11 @@ namespace spot /// \pre The automaton \a a must have at most one acceptance condition (i.e. /// it is a TBA). /// - /// During the visit of \a a, the returned checker does not store explicitely + /// During the visit of \a a, the returned checker does not store explicitly /// the traversed states but uses the bit-state hashing technic presented in /// \cite holzmann.91.book /// - /// Consequently, the detection of an acceptence cycle is not ensured. + /// Consequently, the detection of an acceptance cycle is not ensured. /// /// The size of the heap is limited to \n size bytes. /// diff --git a/spot/twaalgos/simulation.hh b/spot/twaalgos/simulation.hh index 07f5d8832..01fc1365d 100644 --- a/spot/twaalgos/simulation.hh +++ b/spot/twaalgos/simulation.hh @@ -51,7 +51,7 @@ namespace spot /// The resulting automaton has a named property "simulated-states", /// that is a vector mapping each state of the input to a state of /// the output. Note that some input states may be mapped to -1, as - /// a by-product of transition prunning. + /// a by-product of transition pruning. /// /// \param automaton the automaton to simulate. /// @@ -153,7 +153,7 @@ namespace spot /// then reduce the automaton. /// /// There is no need to call scc_filter() before as it is always applied to - /// remove dead and unreacheable states. + /// remove dead and unreachable states. /// /// \param aut the automaton to simulate. /// \return a new automaton which is at worst a copy of the received @@ -171,7 +171,7 @@ namespace spot /// way as reduce_direct_sim(). /// /// There is no need to call scc_filter() before as it is always applied to - /// remove dead and unreacheable states. + /// remove dead and unreachable states. /// /// \param aut the automaton to simulate. /// \return a new automaton which is at worst a copy of the received @@ -190,7 +190,7 @@ namespace spot /// transitions). /// /// There is no need to call scc_filter() before as it is always applied to - /// remove dead and unreacheable states. + /// remove dead and unreachable states. /// /// \param aut the automaton to simulate. /// \return a new automaton which is at worst a copy of the received diff --git a/spot/twaalgos/split.hh b/spot/twaalgos/split.hh index 54599c388..9b273096d 100644 --- a/spot/twaalgos/split.hh +++ b/spot/twaalgos/split.hh @@ -234,6 +234,6 @@ namespace spot /// /// Using split_edges() also creates an automaton with separated labels, /// but the separation will be much finer since it will result in a much - /// involves all atomtic proposition. + /// involves all atomic proposition. SPOT_API twa_graph_ptr separate_edges(const const_twa_graph_ptr& aut); } diff --git a/spot/twaalgos/stats.hh b/spot/twaalgos/stats.hh index 63aa3c4e9..f2f72fd06 100644 --- a/spot/twaalgos/stats.hh +++ b/spot/twaalgos/stats.hh @@ -51,7 +51,7 @@ namespace spot /// \brief Compute sub statistics for an automaton. SPOT_API twa_sub_statistics sub_stats_reachable(const const_twa_ptr& g); - /// \brief Count all transtitions, even unreachable ones. + /// \brief Count all transitions, even unreachable ones. SPOT_API unsigned long long count_all_transitions(const const_twa_graph_ptr& g); diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index fef412e09..b2c8648ff 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1418,7 +1418,7 @@ namespace spot auto delta = sw.stop(); bv->trans_time += delta; if (vs) - *vs << "tanslating formula done in " << delta << " seconds\n"; + *vs << "translating formula done in " << delta << " seconds\n"; } res->prop_complete(trival::maybe()); diff --git a/spot/twaalgos/synthesis.hh b/spot/twaalgos/synthesis.hh index d3d5d3271..5174176e9 100644 --- a/spot/twaalgos/synthesis.hh +++ b/spot/twaalgos/synthesis.hh @@ -209,7 +209,7 @@ namespace spot }; /// \ingroup synthesis - /// \brief Seeks to decompose a formula into independently synthesizable + /// \brief Seeks to decompose a formula into independently synthetizable /// sub-parts. The conjunction of all sub-parts then /// satisfies the specification /// diff --git a/spot/twaalgos/tau03.hh b/spot/twaalgos/tau03.hh index a4c7ad1e6..6e9f31267 100644 --- a/spot/twaalgos/tau03.hh +++ b/spot/twaalgos/tau03.hh @@ -31,7 +31,7 @@ namespace spot /// /// \pre The automaton \a a must have at least one acceptance condition. /// - /// During the visit of \a a, the returned checker stores explicitely all + /// During the visit of \a a, the returned checker stores explicitly all /// the traversed states. The implemented algorithm is the following: /// /** \verbatim diff --git a/spot/twaalgos/tau03opt.hh b/spot/twaalgos/tau03opt.hh index 2d6f529b2..6e910ab57 100644 --- a/spot/twaalgos/tau03opt.hh +++ b/spot/twaalgos/tau03opt.hh @@ -31,7 +31,7 @@ namespace spot /// /// \pre The automaton \a a must have at least one acceptance condition. /// - /// During the visit of \a a, the returned checker stores explicitely all + /// During the visit of \a a, the returned checker stores explicitly all /// the traversed states. The implemented algorithm is the following: /// /** \verbatim @@ -84,7 +84,7 @@ namespace spot end; \endverbatim */ /// - /// This algorithm is a generalisation to TGBA of the one implemented in + /// This algorithm is a generalization to TGBA of the one implemented in /// spot::explicit_se05_search. It is based on the acceptance set labelling /// of states used in spot::explicit_tau03_search. Moreover, it introduce /// a slight optimisation based on vectors of integers counting for each diff --git a/spot/twaalgos/toparity.hh b/spot/twaalgos/toparity.hh index 5283b637a..648fd29e5 100644 --- a/spot/twaalgos/toparity.hh +++ b/spot/twaalgos/toparity.hh @@ -135,7 +135,7 @@ namespace spot /// This procedure combines many strategies in an attempt to produce /// the smallest possible parity automaton. Some of the strategies /// include CAR (color acceptance record), IAR (index appearance - /// record), partial degenerazation, conversion from Rabin to Büchi + /// record), partial degeneralization, conversion from Rabin to Büchi /// when possible, etc. /// /// The \a options argument can be used to selectively disable some of the @@ -152,7 +152,7 @@ namespace spot /// /// This implements a straightforward adaptation of the LAR (latest /// appearance record) to automata with transition-based marks. We - /// call this adaptation the CAR (color apperance record), as it + /// call this adaptation the CAR (color appearance record), as it /// tracks colors (i.e., acceptance sets) instead of states. /// /// It is better to use to_parity() instead, as it will use better @@ -186,7 +186,7 @@ namespace spot /// \ingroup twa_acc_transform /// \brief Turn a Rabin-like or Streett-like automaton into a parity automaton - /// based on the index appearence record (IAR) + /// based on the index appearance record (IAR) /// /// Returns nullptr if the input automaton is neither Rabin-like nor /// Streett-like, and calls spot::iar() otherwise. diff --git a/spot/twaalgos/totgba.hh b/spot/twaalgos/totgba.hh index 51af4d9f6..b437cab8e 100644 --- a/spot/twaalgos/totgba.hh +++ b/spot/twaalgos/totgba.hh @@ -71,7 +71,7 @@ namespace spot /// \brief Take an automaton with any acceptance condition and return /// an equivalent Generalized Streett automaton. /// - /// This works by putting the acceptance condition in cunjunctive + /// This works by putting the acceptance condition in conjunctive /// normal form, and then merging all the /// Inf(x1)|Inf(x2)|...|Inf(xn) that may occur in clauses into a /// single Inf(X). diff --git a/spot/twaalgos/translate.hh b/spot/twaalgos/translate.hh index d1dbe5e27..cc5d4f9af 100644 --- a/spot/twaalgos/translate.hh +++ b/spot/twaalgos/translate.hh @@ -36,7 +36,7 @@ namespace spot /// automaton produced (TGBA, BA, Monitor). The default is TGBA. /// /// Method set_pref() may be used to specify whether small automata - /// should be prefered over deterministic automata. + /// should be preferred over deterministic automata. /// /// Method set_level() may be used to specify the optimization level. /// diff --git a/spot/twaalgos/word.cc b/spot/twaalgos/word.cc index bc9f5a52b..7ac3fa598 100644 --- a/spot/twaalgos/word.cc +++ b/spot/twaalgos/word.cc @@ -206,7 +206,7 @@ namespace spot if (word[ind] == '}') word_parse_error(word, ind, "Expected ';' delimiter: " "'}' stands for ending a cycle"); - // Exract formula, convert it to bdd and add it to the prefix sequence + // Extract formula, convert it to bdd and add it to the prefix sequence extract_bdd(tw->prefix); if (i == std::string::npos) word_parse_error(word, ind + 1, "Missing cycle in formula"); diff --git a/spot/twaalgos/word.hh b/spot/twaalgos/word.hh index 171b14ce3..3b5753fb4 100644 --- a/spot/twaalgos/word.hh +++ b/spot/twaalgos/word.hh @@ -39,7 +39,7 @@ namespace spot dict_->unregister_all_my_variables(this); } - /// \brief Simplify a lasso-shapped word. + /// \brief Simplify a lasso-shaped word. /// /// The simplified twa_word may represent a subset of the actual /// words represented by the original twa_word. The typical @@ -79,7 +79,7 @@ namespace spot /// \brief Convert the twa_word as an automaton. /// - /// Convert the twa_word into a lasso-shapred automaton + /// Convert the twa_word into a lasso-shaped automaton /// with "true" acceptance condition. /// /// This is useful to evaluate a word on an automaton. diff --git a/spot/twaalgos/zlktree.cc b/spot/twaalgos/zlktree.cc index 521d9630a..2d8ada12f 100644 --- a/spot/twaalgos/zlktree.cc +++ b/spot/twaalgos/zlktree.cc @@ -816,7 +816,7 @@ namespace spot throw std::runtime_error("acd::first_branch(): unknown state " + std::to_string(s)); unsigned scc = si_->scc_of(s); - if (trees_[scc].trivial) // the branch is irrelevant for transiant SCCs + if (trees_[scc].trivial) // the branch is irrelevant for transient SCCs return 0; if (SPOT_UNLIKELY(nodes_.empty())) // make sure we do not complain about this if all SCCs are trivial. diff --git a/spot/twaalgos/zlktree.hh b/spot/twaalgos/zlktree.hh index e0ec2c3e3..a07dbe65d 100644 --- a/spot/twaalgos/zlktree.hh +++ b/spot/twaalgos/zlktree.hh @@ -480,13 +480,13 @@ namespace spot /// /// If \a colored is set, each output transition will have exactly /// one color, and the output automaton will use at most n+1 colors - /// if the input has n colors. If \a colored is unsed (the default), + /// if the input has n colors. If \a colored is unset (the default), /// output transitions will use at most one color, and output /// automaton will use at most n colors. /// - /// The acd_tranform() is the original function producing + /// The acd_transform() is the original function producing /// optimal transition-based output (optimal in the sense of least - /// number of duplicated states), while the acd_tansform_sbacc() variant + /// number of duplicated states), while the acd_transform_sbacc() variant /// produces state-based output from transition-based input and without /// any optimality claim. The \a order_heuristics argument, enabled /// by default activates the ORDER_HEURISTICS option of the ACD. diff --git a/spot/twacube/cube.hh b/spot/twacube/cube.hh index fc46249f0..45edc64b0 100644 --- a/spot/twacube/cube.hh +++ b/spot/twacube/cube.hh @@ -41,18 +41,18 @@ namespace spot /// Warning : a variable cannot be set in both bitset at the /// same time (consistency! cannot be true and false) /// - /// The cube for (a & !b) will be repensented by : + /// The cube for (a & !b) will be represented by: /// - true_var = 1 0 /// - false_var = 0 1 /// /// To represent free variables such as in (a & !b) | (a & b) - /// (wich is equivalent to (a) with b free) + /// (which is equivalent to (a) with b free) /// - true_var : 1 0 /// - false_var : 0 0 - /// This exemple shows that the representation of free variables + /// This example shows that the representation of free variables /// is done by unsetting variable in both vector /// - /// To be memory efficient, these two bitsets are contigous in memory + /// To be memory efficient, these two bitsets are contigious in memory /// i.e. if we want to represent 35 variables, a cube will be /// represented by 4 unsigned int contiguous in memory. The 35 /// first bits represent truth values. The 29 bits following are diff --git a/spot/twacube/twacube.hh b/spot/twacube/twacube.hh index 37ab9abcf..745df56a9 100644 --- a/spot/twacube/twacube.hh +++ b/spot/twacube/twacube.hh @@ -93,7 +93,7 @@ namespace spot } /// \brief Returns the current transition according to a specific - /// \a seed. The \a seed is traditionnally the thread identifier. + /// \a seed. The \a seed is traditionally the thread identifier. inline unsigned current(unsigned seed = 0) const { // no-swarming : since twacube are dedicated for parallelism, i.e. @@ -204,7 +204,7 @@ namespace spot const twacube& twa); private: unsigned init_; ///< The Id of the initial state - acc_cond acc_; ///< The acceptance contidion + acc_cond acc_; ///< The acceptance condition const std::vector aps_; ///< The name of atomic propositions graph_t theg_; ///< The underlying graph cubeset cubeset_; ///< Ease the cube manipulation diff --git a/tests/core/cube.cc b/tests/core/cube.cc index bcd5e8c4c..936c6acae 100644 --- a/tests/core/cube.cc +++ b/tests/core/cube.cc @@ -35,7 +35,7 @@ static bool test_translation(bdd& input, spot::cubeset& cubeset, std::unordered_map& reverse_binder, std::vector& aps) { - // The BDD used to detect if the convertion works + // The BDD used to detect if the conversion works bdd res = bddfalse; bdd initial = input; diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index cbc49b1c9..1e0397a5f 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -206,7 +206,7 @@ diff outx exp cat >exp < GFb -tanslating formula done in X seconds +translating formula done in X seconds direct strategy was found. direct strat has 1 states, 2 edges and 0 colors simplification took X seconds @@ -642,7 +642,7 @@ the following signals can be temporarily removed: new formula: GFa <-> GFb there are 1 subformulas trying to create strategy directly for GFa <-> GFb -tanslating formula done in X seconds +translating formula done in X seconds direct strategy was found. direct strat has 1 states, 2 edges and 0 colors simplification took X seconds @@ -658,7 +658,7 @@ for f in "(GFa <-> GFb) & G(c <-> d)" "(GFb <-> GFa) & G(c <-> d)" \ do cat >exp <exp < GFa) & G((a & c) | (!a & !c)) -tanslating formula done in X seconds +translating formula done in X seconds direct strategy was found. direct strat has 1 states, 2 edges and 0 colors simplification took X seconds @@ -701,7 +701,7 @@ diff outx exp cat >exp < FGb -tanslating formula done in X seconds +translating formula done in X seconds direct strategy was found. direct strat has 2 states, 3 edges and 0 colors simplification took X seconds @@ -1043,7 +1043,7 @@ the following signals can be temporarily removed: new formula: GFi <-> GFo1 there are 1 subformulas trying to create strategy directly for GFi <-> GFo1 -tanslating formula done in X seconds +translating formula done in X seconds direct strategy was found. direct strat has 1 states, 2 edges and 0 colors simplification took X seconds diff --git a/tests/core/parity.cc b/tests/core/parity.cc index 2e3949ae9..e7dda99df 100644 --- a/tests/core/parity.cc +++ b/tests/core/parity.cc @@ -236,7 +236,7 @@ static bool is_right_parity(spot::const_twa_graph_ptr aut, target_odd = origin_odd; if (!(is_max == target_max && is_odd == target_odd)) { - std::cerr << "======Wrong accceptance======\n"; + std::cerr << "======Wrong acceptance======\n"; std::string kind[] = { "max", "min", "same", "any" }; std::string style[] = { "odd", "even", "same", "any" }; std::cerr << "target: " << kind[target_kind] << ' ' diff --git a/tests/core/safra.cc b/tests/core/safra.cc index d01613d02..b238df92a 100644 --- a/tests/core/safra.cc +++ b/tests/core/safra.cc @@ -44,7 +44,7 @@ static void help() "\t-b\treduce result using bisimulation\n" "\t--scc_opt\tUse an SCC-based Safra\n" "\t--bisim_opt\tUse Simulation info to reduce macro-states size\n" - "\t--stutter\tStutter-invarience optimisation\n"; + "\t--stutter\tStutter-invariance optimisation\n"; exit(1); } diff --git a/tests/core/twagraph.cc b/tests/core/twagraph.cc index 7e33d6748..378d366c2 100644 --- a/tests/core/twagraph.cc +++ b/tests/core/twagraph.cc @@ -218,7 +218,7 @@ static void f6() // when faced with a more involved problem static void f7() { - // The current mege_states implementation of "next" + // The current merge_states implementation of "next" // needs two successive calls to obtain an automaton with only 3 states // This is especially annoying as this depends on the numbering. // By renumbering 2->1 3->2 1->3 the current version only needs one call too From f57782686d434695bcd4d3676c3de97c6d02e9fc Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Tue, 16 Apr 2024 16:37:00 +0200 Subject: [PATCH 436/606] Rename minimize_obligation_garanteed_to_work minimize_obligation_garanteed_to_work is now minimize_obligation_guaranteed_to_work * spot/twaalgos/minimize.hh, spot/twaalgos/minimize.cc: change name. * spot/twaalgos/postproc.cc: update call * NEWS: Mention it. --- NEWS | 9 ++++++--- spot/twaalgos/minimize.cc | 4 ++-- spot/twaalgos/minimize.hh | 2 +- spot/twaalgos/postproc.cc | 2 +- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/NEWS b/NEWS index 4bc2f452e..38b832611 100644 --- a/NEWS +++ b/NEWS @@ -109,10 +109,10 @@ New in spot 2.11.6.dev (not yet released) - spot::translate() has a new -x option "relabel-overlap=M" that augments the existing "relabel-bool=N". By default, N=4, M=8. When the formula to translate has more than N atomic propositions, - relabel_bse() is first called to attempt to rename non-overlaping + relabel_bse() is first called to attempt to rename non-overlapping boolean subexpressions (i.e., no shared atomic proposition) in order to reduce the number of atomic proposition, a source of - explonential explosion in several places of the translation + exponential explosion in several places of the translation pipeline. This relabel-bool optimization exists since Spot 2.4. The new feature is that if, after relabel-bool, the formula still has more than M atomic propositions, then spot::translate() now @@ -159,7 +159,7 @@ New in spot 2.11.6.dev (not yet released) t or co-Büchi). In case where the input automaton had no rejecting cycle, the Büchi acceptance was overkill: scc_filter will now use "t" acceptance. This change may have unexpected - conseqences in code paths that assume running scc_filter on a + consequences in code paths that assume running scc_filter on a Büchi automaton will always return a Büchi automaton. For those, a "keep_one_color" option has been added to scc_filter. @@ -195,6 +195,9 @@ New in spot 2.11.6.dev (not yet released) the acceptance, was sometimes able to simplify co-Büchi to "t", causing surprizes. + - Rename minimize_obligation_garanteed_to_work to + minimize_obligation_guaranteed_to_work. + Python: - The spot.automata() and spot.automaton() functions now accept a diff --git a/spot/twaalgos/minimize.cc b/spot/twaalgos/minimize.cc index 399cc2541..97b182566 100644 --- a/spot/twaalgos/minimize.cc +++ b/spot/twaalgos/minimize.cc @@ -607,7 +607,7 @@ namespace spot return product(min_aut, aut_neg)->is_empty(); } - bool minimize_obligation_garanteed_to_work(const const_twa_graph_ptr& aut_f, + bool minimize_obligation_guaranteed_to_work(const const_twa_graph_ptr& aut_f, formula f) { // WDBA-minimization necessarily work for obligations @@ -644,7 +644,7 @@ namespace spot ("minimize_obligation() does not support alternation"); bool minimization_will_be_correct = false; - if (minimize_obligation_garanteed_to_work(aut_f, f)) + if (minimize_obligation_guaranteed_to_work(aut_f, f)) { minimization_will_be_correct = true; } diff --git a/spot/twaalgos/minimize.hh b/spot/twaalgos/minimize.hh index 0b5c11bf2..276da5149 100644 --- a/spot/twaalgos/minimize.hh +++ b/spot/twaalgos/minimize.hh @@ -130,7 +130,7 @@ namespace spot /// minimize_obligation(), but as it is less likely, you might /// decide to save time. SPOT_API - bool minimize_obligation_garanteed_to_work(const const_twa_graph_ptr& aut_f, + bool minimize_obligation_guaranteed_to_work(const const_twa_graph_ptr& aut_f, formula f = nullptr); /// @} diff --git a/spot/twaalgos/postproc.cc b/spot/twaalgos/postproc.cc index eabde299b..b7f6d27de 100644 --- a/spot/twaalgos/postproc.cc +++ b/spot/twaalgos/postproc.cc @@ -474,7 +474,7 @@ namespace spot wdba_minimize = 0; } if (wdba_minimize == 2) - wdba_minimize = minimize_obligation_garanteed_to_work(a, f); + wdba_minimize = minimize_obligation_guaranteed_to_work(a, f); if (wdba_minimize) { bool reject_bigger = (PREF_ == Small) && (level_ <= Medium); From 2ffdd8494230463b8609649db38b534e028f7bd3 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Tue, 16 Apr 2024 16:42:27 +0200 Subject: [PATCH 437/606] Rename split_independant_formulas split_independant_formulas is now split_independent_formulas * spot/twaalgos/synthesis.hh, spot/twaalgos/synthesis.cc: change name. * bin/ltlsynt.cc: update call * NEWS: Mention it. --- NEWS | 3 +++ bin/ltlsynt.cc | 2 +- spot/twaalgos/synthesis.cc | 6 +++--- spot/twaalgos/synthesis.hh | 4 ++-- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/NEWS b/NEWS index 38b832611..c8dca2c82 100644 --- a/NEWS +++ b/NEWS @@ -198,6 +198,9 @@ New in spot 2.11.6.dev (not yet released) - Rename minimize_obligation_garanteed_to_work to minimize_obligation_guaranteed_to_work. + - Rename split_independant_formulas to + split_independent_formulas. + Python: - The spot.automata() and spot.automaton() functions now accept a diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index e113bf205..24f4af16a 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -447,7 +447,7 @@ namespace std::vector> sub_outs; if (opt_decompose_ltl) { - auto subs = split_independant_formulas(f, output_aps); + auto subs = split_independent_formulas(f, output_aps); if (gi->verbose_stream) { *gi->verbose_stream << "there are " diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index b2c8648ff..2928a642d 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1847,7 +1847,7 @@ namespace // anonymous for subsformula namespace spot { std::pair, std::vector>> - split_independant_formulas(formula f, const std::vector& outs) + split_independent_formulas(formula f, const std::vector& outs) { formula_2_inout_props form2props(outs); std::set outs_set(outs.begin(), outs.end()); @@ -1905,10 +1905,10 @@ namespace spot } std::pair, std::vector>> - split_independant_formulas(const std::string& f, + split_independent_formulas(const std::string& f, const std::vector& outs) { - return split_independant_formulas(parse_formula(f), outs); + return split_independent_formulas(parse_formula(f), outs); } bool diff --git a/spot/twaalgos/synthesis.hh b/spot/twaalgos/synthesis.hh index 5174176e9..3d25441e9 100644 --- a/spot/twaalgos/synthesis.hh +++ b/spot/twaalgos/synthesis.hh @@ -222,10 +222,10 @@ namespace spot /// propositions each. /// @{ SPOT_API std::pair, std::vector>> - split_independant_formulas(formula f, const std::vector& outs); + split_independent_formulas(formula f, const std::vector& outs); SPOT_API std::pair, std::vector>> - split_independant_formulas(const std::string& f, + split_independent_formulas(const std::string& f, const std::vector& outs); /// @} From c5490428be2433bf455b231a58a967eeb3aed65b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 19 Apr 2024 09:43:50 +0200 Subject: [PATCH 438/606] * tests/sanity/style.test: Fix spurious failure. --- tests/sanity/style.test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/sanity/style.test b/tests/sanity/style.test index dfe263031..ea8705363 100755 --- a/tests/sanity/style.test +++ b/tests/sanity/style.test @@ -226,7 +226,7 @@ for dir in "$TOP/spot" "$TOP/bin" "$TOP/tests"; do $GREP 'operator[^a-zA-Z0-9_(]*[ ][^a-zA-Z0-9_(]*(' $tmp && diag 'Write operatorXX(...) without spaces around XX.' - $GREP 'operator[^(]* (' $tmp && + $GREP 'operator[^(s]* (' $tmp && diag 'No space before (' $GREP '[ ]default:[^:].*;' $tmp && From ffddbd84d0ec4848c3c74603cae0e0fa8e921e4a Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 19 Apr 2024 10:20:49 +0200 Subject: [PATCH 439/606] * NEWS: Fix some typos. --- NEWS | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/NEWS b/NEWS index c8dca2c82..0d7060529 100644 --- a/NEWS +++ b/NEWS @@ -28,7 +28,7 @@ New in spot 2.11.6.dev (not yet released) an HOA file in which aliases are used to form a basis for the whole set of labels. Those aliases are only used when more than one atomic proposition is used (otherwise, the atomic proposition - and its negation is already a basis). This can help reducing the + and its negation is already a basis). This can help reduce the size of large HOA files. - autfilt learned --separate-edges, to split the labels of @@ -40,12 +40,12 @@ New in spot 2.11.6.dev (not yet released) if those subformulas share atomic propositions. - ltlsynt's --ins and --outs options will iterpret any atomic - proposition surrounded by '/' as a regular expressions. + proposition surrounded by '/' as a regular expression. For intance with ltlsynt --ins='/^in/,/env/' --outs=/^out/,/control/' ... - any atomic proposition that start with 'in' or contains 'env' + any atomic proposition that starts with 'in' or contains 'env' will be considered as inputs, and those that start with 'out' or contain 'control' will be considered output. @@ -129,7 +129,7 @@ New in spot 2.11.6.dev (not yet released) 36 seconds; it now produces an AIG circuit with 53 nodes in only 0.1 second. - - spot::contains_forq() is a implementation of the paper "FORQ-Based + - spot::contains_forq() is an implementation of the paper "FORQ-Based Language Inclusion Formal Testing" (Doveri, Ganty, Mazzocchi; CAV'22) contributed by Jonah Romero. @@ -176,7 +176,7 @@ New in spot 2.11.6.dev (not yet released) this. - spot::remove_alternation() has a new argument to decide whether it - should raise an exception of return nullptr if it requires more + should raise an exception or return nullptr if it requires more acceptance sets than supported. - spot::dualize() learned a trick to be faster on states that have @@ -184,22 +184,25 @@ New in spot 2.11.6.dev (not yet released) automaton. spot::remove_alternation(), spot::tgba_powerset(), simulation-based reductions, and spot::tgba_determinize() learned a similar trick, except it isn't applied at the state level but if - the entire automaton use few distinct labels. These changes may + the entire automaton uses few distinct labels. These changes may speed up the processing of automata with many atomic propositions but few distinct labels. (Issue #566 and issue #568.) - - [Potential backward incompatibility] spot::dualize() does not call - cleanup_acceptance() anymore. This change ensures that the dual - of a Büchi automaton will always be a co-Büchi automaton. - Previously cleanup_acceptance(), which remove unused colors from - the acceptance, was sometimes able to simplify co-Büchi to "t", - causing surprizes. + Backward incompatibilities: - - Rename minimize_obligation_garanteed_to_work to - minimize_obligation_guaranteed_to_work. + - spot::dualize() does not call cleanup_acceptance() anymore. This + change ensures that the dual of a Büchi automaton will always be a + co-Büchi automaton. Previously cleanup_acceptance(), which remove + unused colors from the acceptance, was sometimes able to simplify + co-Büchi to "t", causing surprizes. - - Rename split_independant_formulas to - split_independent_formulas. + - Function minimize_obligation_garanteed_to_work() was renamed to + minimize_obligation_guaranteed_to_work(). We believe this + function is only used by Spot currently. + + - Function split_independant_formulas() was renamed to + split_independent_formulas(). We believe this function is only + used by Spot currently. Python: @@ -217,7 +220,7 @@ New in spot 2.11.6.dev (not yet released) - Recent version Jupyter Notebook and Jupyter Lab started to render SVG elements using tag to make it easier to copy/paste - those image. This breaks several usages, including the + those images. This breaks several usages, including the possibility to have informative tooltips on states and edges (used in Spot). See the following issues for more details. https://github.com/jupyter/notebook/issues/7114 @@ -258,7 +261,7 @@ New in spot 2.11.6.dev (not yet released) - Functions complement() and change_parity() could incorrectly read or write the unused edge #0. In the case of complement(), writing - that edge was usually harmless. However in some scenario, + that edge was usually harmless. However, in some scenario, complement could need to stick a ⓪ acceptance mark on edge #0, then the acceptance condition could be simplified to "t", and finally change_parity could be confused to find such an accepting From ab7f4f51c448e1b7f605d9ba872d9dc16eb57eed Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 24 Apr 2024 23:45:47 +0200 Subject: [PATCH 440/606] simulation: fix determinism check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit bda40a5f introduced a subtle bug where nm_minato was being increased in more cases causing some non-deterministic automata to be incorrectly tagged as deterministic automata. Fixes issue #575, reported by Dávid Smolka. * spot/twaalgos/simulation.cc (create_edges): Do not increment nm_minato when dest is bddfalse. * tests/core/568.test: Add Dávid's first test-case. * tests/python/forq_contains.py: Add Dávid's second test-case. --- spot/twaalgos/simulation.cc | 2 ++ tests/core/568.test | 24 ++++++++++++++++++++++++ tests/python/forq_contains.py | 25 +++++++++++++++++++++++++ 3 files changed, 51 insertions(+) diff --git a/spot/twaalgos/simulation.cc b/spot/twaalgos/simulation.cc index 9fe714e03..b06fb7fc7 100644 --- a/spot/twaalgos/simulation.cc +++ b/spot/twaalgos/simulation.cc @@ -582,6 +582,8 @@ namespace spot unsigned srcst = 0; auto create_edges = [&](int srcid, bdd one, bdd dest) { + if (dest == bddfalse) + return; // Iterate over all possible destination classes. We // use minato_isop here, because if the same valuation // of atomic properties can go to two different diff --git a/tests/core/568.test b/tests/core/568.test index b538bfb7d..8de15609b 100755 --- a/tests/core/568.test +++ b/tests/core/568.test @@ -101,3 +101,27 @@ EOF # Using autcross will also test tgba_determinize genaut --cycle-onehot-nba=11..12 --cycle-log-nba=11..12 | autcross --language-preserved 'autfilt --small' --verbose + + + +# A test case from issue #575 +cat >575.hoa < res.hoa +test "16 49 0" = "`autfilt --stats='%s %e %d' res.hoa`" diff --git a/tests/python/forq_contains.py b/tests/python/forq_contains.py index 165f9030f..df5d0ed66 100644 --- a/tests/python/forq_contains.py +++ b/tests/python/forq_contains.py @@ -360,3 +360,28 @@ tc.assertFalse(spot.contains(tba2, tba)) a = spot.translate("(p0 & p2) -> G!p1", "buchi") b = spot.translate("p0 -> G!p1", "buchi") do_symmetric_test(b, a) + +# issue #575 +aut1 = spot.automaton("""HOA: v1.1 States: 12 Start: 0 AP: 6 "p13" +"p12" "p15" "p16" "p14" "p11" acc-name: Buchi Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels trans-acc !complete +properties: !deterministic exist-branch --BODY-- State: 0 [!0&!1&!3 | +!2 | !3&!4] 1 [!0&!1&3 | !2 | 3&!4] 2 [0&!3&4 | 1&!3&4 | !2] 3 [0&3&4 +| 1&3&4 | !2] 4 [0&!1&3&4 | !2] 6 State: 1 [t] 7 State: 2 [!0&!1&3&!5 +| !2 | 3&!4&!5] 2 {0} [0&3&4&!5 | 1&3&4&!5 | !2] 4 [0&!1&3&4&!5 | !2] +6 State: 3 [t] 5 State: 4 [!2 | 3&!4&!5] 2 {0} [!2 | 3&4&!5] 4 State: +5 [t] 9 State: 6 [!0&!1&3&!5 | !0&3&!4&!5 | !2] 2 {0} [!0&1&3&4&!5 | +!2] 4 [0&!1&3&!5 | 0&3&!4&!5 | !2] 6 [0&1&3&4&!5 | !2] 10 State: 7 [t] +11 State: 8 [t] 8 {0} State: 9 [t] 8 {0} State: 10 [!0&3&!4&!5 | !2] 2 +{0} [!0&3&4&!5 | !2] 4 [0&3&!4&!5 | !2] 6 [0&3&4&!5 | !2] 10 State: 11 +[t] 8 {0} --END--""") +aut2 = spot.automaton("""HOA: v1 States: 5 Start: 0 AP: 6 "p13" "p12" +"p15" "p16" "p14" "p11" acc-name: Buchi Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels trans-acc --BODY-- State: 0 +[!0&!1&2&3 | 2&3&!4] 1 [0&2&3&4 | 1&2&3&4] 2 [0&!1&2&3&4] 3 State: 1 +[!0&!1&2&3&!5 | 2&3&!4&!5] 1 {0} [0&2&3&4&!5 | 1&2&3&4&!5] 2 +[0&!1&2&3&4&!5] 3 State: 2 [2&3&!4&!5] 1 {0} [2&3&4&!5] 2 State: 3 +[!0&!1&2&3&!5 | !0&2&3&!4&!5] 1 {0} [!0&1&2&3&4&!5] 2 [0&!1&2&3&!5 | +0&2&3&!4&!5] 3 [0&1&2&3&4&!5] 4 State: 4 [!0&2&3&!4&!5] 1 {0} +[!0&2&3&4&!5] 2 [0&2&3&!4&!5] 3 [0&2&3&4&!5] 4 --END--""") +do_symmetric_test(aut2, aut1) From be102e09d4d08c6e31315e5e22fb7a8e7423a135 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 25 Apr 2024 17:59:10 +0200 Subject: [PATCH 441/606] implement BA acceptance set reduction and enlargement For issue #570. * spot/twaalgos/cleanacc.hh, spot/twaalgos/cleanacc.cc (reduce_buchi_acceptance_set_here, enlarge_buchi_acceptance_set_here): New functions. * bin/autfilt.cc: Add options --reduce-acceptance-set and --enlarge-acceptance-set. * tests/core/basetred.test: New file. * tests/Makefile.am: Add it. * NEWS: Mention it. --- NEWS | 9 +++ bin/autfilt.cc | 27 +++++++- spot/twaalgos/cleanacc.cc | 133 ++++++++++++++++++++++++++++++++++++++ spot/twaalgos/cleanacc.hh | 28 ++++++++ tests/Makefile.am | 1 + tests/core/basetred.test | 116 +++++++++++++++++++++++++++++++++ 6 files changed, 311 insertions(+), 3 deletions(-) create mode 100755 tests/core/basetred.test diff --git a/NEWS b/NEWS index 0d7060529..347f44488 100644 --- a/NEWS +++ b/NEWS @@ -35,6 +35,10 @@ New in spot 2.11.6.dev (not yet released) the automaton using a basis of disjoint labels. See https://spot.lre.epita.fr/tut25.html for some motivation. + - autfilt learned --reduce-acceptance-set/--enlarge-acceptance-set + to heurisitcally remove/add to unnecessary acceptance marks in + Büchi automata. (Issue #570) + - ltlfilt has a new option --relabel-overlapping-bool=abc|pnn that will replace boolean subformulas by fresh atomic propositions even if those subformulas share atomic propositions. @@ -142,6 +146,11 @@ New in spot 2.11.6.dev (not yet released) The above also impacts autfilt --included-in option. + - spot::reduce_buchi_acceptance_set_here() and + spot::enlarge_buchi_acceptance_set_here() will heuristically + remove/add unnecessary acceptance marks in Büchi automata. + (Issue #570.) + - Given a twa_word_ptr W and a twa_ptr A both sharing the same alphabet, one can now write W->intersects(A) or A->intersects(W) instead of the longuer W->as_automaton()->intersects(A) or diff --git a/bin/autfilt.cc b/bin/autfilt.cc index 08b17df99..420bf2867 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -102,6 +102,7 @@ enum { OPT_DUALIZE, OPT_DNF_ACC, OPT_EDGES, + OPT_ENLARGE_ACCEPTANCE_SET, OPT_EQUIVALENT_TO, OPT_EXCLUSIVE_AP, OPT_GENERALIZED_RABIN, @@ -139,6 +140,7 @@ enum { OPT_PRODUCT_AND, OPT_PRODUCT_OR, OPT_RANDOMIZE, + OPT_REDUCE_ACCEPTANCE_SET, OPT_REJ_SCCS, OPT_REJECT_WORD, OPT_REM_AP, @@ -278,6 +280,12 @@ static const argp_option options[] = { "nth", 'N', "RANGE", 0, "assuming input automata are numbered from 1, keep only those in RANGE", 0 }, + { "enlarge-acceptance-set", OPT_ENLARGE_ACCEPTANCE_SET, nullptr, 0, + "enlarge the number of accepting transitions (or states if -S) in a " + "Büchi automaton", 0 }, + { "reduce-acceptance-set", OPT_REDUCE_ACCEPTANCE_SET, nullptr, 0, + "reduce the number of accepting transitions (or states if -S) in a " + "Büchi automaton", 0 }, /**************************************************/ RANGE_DOC_FULL, WORD_DOC, @@ -705,6 +713,8 @@ static int opt_highlight_accepting_run = -1; static bool opt_highlight_languages = false; static bool opt_dca = false; static bool opt_streett_like = false; +static bool opt_enlarge_acceptance_set = false; +static bool opt_reduce_acceptance_set = false; static spot::twa_graph_ptr ensure_deterministic(const spot::twa_graph_ptr& aut, bool nonalt = false) @@ -899,12 +909,12 @@ parse_opt(int key, char* arg, struct argp_state*) opt_dnf_acc = true; opt_cnf_acc = false; break; - case OPT_STREETT_LIKE: - opt_streett_like = true; - break; case OPT_EDGES: opt_edges = parse_range(arg, 0, std::numeric_limits::max()); break; + case OPT_ENLARGE_ACCEPTANCE_SET: + opt_enlarge_acceptance_set = true; + break; case OPT_EXCLUSIVE_AP: opt->excl_ap.add_group(arg); break; @@ -1164,6 +1174,9 @@ parse_opt(int key, char* arg, struct argp_state*) randomize_st = true; } break; + case OPT_REDUCE_ACCEPTANCE_SET: + opt_reduce_acceptance_set = true; + break; case OPT_REJ_SCCS: opt_rej_sccs = parse_range(arg, 0, std::numeric_limits::max()); opt_art_sccs_set = true; @@ -1215,6 +1228,9 @@ parse_opt(int key, char* arg, struct argp_state*) case OPT_STATES: opt_states = parse_range(arg, 0, std::numeric_limits::max()); break; + case OPT_STREETT_LIKE: + opt_streett_like = true; + break; case OPT_STRIPACC: opt_stripacc = true; break; @@ -1670,6 +1686,11 @@ namespace else if (opt_rem_unused_ap) // constrain(aut, true) already does that aut->remove_unused_ap(); + if (opt_enlarge_acceptance_set) + spot::enlarge_buchi_acceptance_set_here(aut, sbacc); + if (opt_reduce_acceptance_set) + spot::reduce_buchi_acceptance_set_here(aut, sbacc); + if (opt_split_edges) aut = spot::split_edges(aut); else if (opt_separate_edges) diff --git a/spot/twaalgos/cleanacc.cc b/spot/twaalgos/cleanacc.cc index 26f6f834f..4bb96e44a 100644 --- a/spot/twaalgos/cleanacc.cc +++ b/spot/twaalgos/cleanacc.cc @@ -18,6 +18,8 @@ #include "config.h" #include +#include +#include namespace spot { @@ -668,4 +670,135 @@ namespace spot { return simplify_acceptance_here(make_twa_graph(aut, twa::prop_set::all())); } + + twa_graph_ptr + reduce_buchi_acceptance_set_here(twa_graph_ptr& aut, bool preserve_sbacc) + { + if (!aut->acc().is_buchi()) + throw std::invalid_argument + ("reduce_buchi_acceptance_set_here() expects a Büchi automaton"); + + if (!preserve_sbacc) + aut->prop_state_acc(trival::maybe()); + aut->prop_weak(trival::maybe()); // issue #562 + + // For each accepting edge in the automaton, we will test if the + // acceptance mark can be removed. To test this, we have to make + // sure that no accepting cycle depends exclusively on this mark. + // We do so by temporary changing the mark of the current edge to + // {1}, and then using the following acceptance condition to + // ensure that there is no cycle that pass through {1} when we + // ignore all other edges with {0}. + acc_cond testacc = acc_cond(2, (acc_cond::acc_code::fin({0}) & + acc_cond::acc_code::inf({1}))); + + acc_cond::mark_t one{1}; + acc_cond::mark_t zero{0}; + acc_cond::mark_t none{}; + scc_info si(aut, scc_info_options::TRACK_STATES); + + if (!preserve_sbacc || !aut->prop_state_acc()) + // transition-based version + for (auto& e: aut->edges()) + { + if (e.acc == none) // nothing to remove + continue; + unsigned srcscc = si.scc_of(e.src); + if (srcscc != si.scc_of(e.dst)) // transient edge + { + e.acc = none; + } + else + { + e.acc = one; + if (generic_emptiness_check_for_scc(si, srcscc, testacc)) + e.acc = none; + else + e.acc = zero; + } + } + else + // state-based version + for (unsigned s = 0, ns = aut->num_states(); s < ns; ++s) + { + acc_cond::mark_t acc = aut->state_acc_sets(s); + if (acc == none) // nothing to remove + continue; + for (auto& e: aut->out(s)) + e.acc = one; + if (generic_emptiness_check_for_scc(si, si.scc_of(s), testacc)) + acc = none; + for (auto& e: aut->out(s)) + e.acc = acc; + } + return aut; + } + + twa_graph_ptr + enlarge_buchi_acceptance_set_here(twa_graph_ptr& aut, bool preserve_sbacc) + { + if (!aut->acc().is_buchi()) + throw std::invalid_argument + ("enlarge_buchi_acceptance_set_here() expects a Büchi automaton"); + + if (!preserve_sbacc) + aut->prop_state_acc(trival::maybe()); + aut->prop_weak(trival::maybe()); // issue #562 + + // For each edge not marked as accepting will test if an + // acceptance mark can be added. To test this, we have to make + // sure that no rejecting cycle goes through this edge. + // We do so my temporary changing the mark of the current edge to + // {1}, and then using the following acceptance condition to + // ensure that there is no accepting cycle that pass through {1} + // when we ignore all other edges with {0}. + acc_cond testacc = + acc_cond(2, acc_cond::acc_code::fin({0}) & acc_cond::acc_code::inf({1})); + + acc_cond::mark_t one{1}; + acc_cond::mark_t zero{0}; + acc_cond::mark_t none{}; + scc_info si(aut, scc_info_options::TRACK_STATES); + + if (!preserve_sbacc || !aut->prop_state_acc()) + // transition-based version + for (auto& e: aut->edges()) + { + if (e.acc == zero) // nothing to add + continue; + unsigned srcscc = si.scc_of(e.src); + if (si.is_rejecting_scc(srcscc)) // nothing to add + continue; + if (srcscc != si.scc_of(e.dst)) // transient edge + { + e.acc = zero; + } + else + { + e.acc = one; + if (generic_emptiness_check_for_scc(si, srcscc, testacc)) + e.acc = zero; + else + e.acc = none; + } + } + else + // state-based version + for (unsigned s = 0, ns = aut->num_states(); s < ns; ++s) + { + acc_cond::mark_t acc = aut->state_acc_sets(s); + if (acc == zero) // nothing to add + continue; + unsigned srcscc = si.scc_of(s); + if (si.is_rejecting_scc(srcscc)) // nothing to add + continue; + for (auto& e: aut->out(s)) + e.acc = one; + if (generic_emptiness_check_for_scc(si, srcscc, testacc)) + acc = zero; + for (auto& e: aut->out(s)) + e.acc = acc; + } + return aut; + } } diff --git a/spot/twaalgos/cleanacc.hh b/spot/twaalgos/cleanacc.hh index 3c2c38070..a3055c1eb 100644 --- a/spot/twaalgos/cleanacc.hh +++ b/spot/twaalgos/cleanacc.hh @@ -70,4 +70,32 @@ namespace spot SPOT_API twa_graph_ptr simplify_acceptance(const_twa_graph_ptr aut); /// @} + + /// \ingroup twa_acc_transform + /// \brief Reduce the acceptance set of a Büchi automaton + /// + /// Iterate over all accepting transitions, and remove them from the + /// acceptance set if this does not change the language. + /// + /// This modifies the automaton in place. + /// + /// If the input has state-based acceptance, it might lose it, + /// unless \a preserve_sbacc is set. + SPOT_API twa_graph_ptr + reduce_buchi_acceptance_set_here(twa_graph_ptr& aut, + bool preserve_sbacc = false); + + /// \ingroup twa_acc_transform + /// \brief Enlarge the acceptance set of a Büchi automaton + /// + /// Iterate over all accepting transitions, and add them to the + /// acceptance set if this cannot change the language. + /// + /// This modifies the automaton in place. + /// + /// If the input has state-based acceptance, it might lose it, + /// unless \a preserve_sbacc is set. + SPOT_API twa_graph_ptr + enlarge_buchi_acceptance_set_here(twa_graph_ptr& aut, + bool preserve_sbacc = false); } diff --git a/tests/Makefile.am b/tests/Makefile.am index ebee91fa9..0c3c995c5 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -224,6 +224,7 @@ TESTS_twa = \ core/568.test \ core/acc.test \ core/acc2.test \ + core/basetred.test \ core/bdddict.test \ core/cube.test \ core/alternating.test \ diff --git a/tests/core/basetred.test b/tests/core/basetred.test new file mode 100755 index 000000000..a7f07d872 --- /dev/null +++ b/tests/core/basetred.test @@ -0,0 +1,116 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) by the Spot authors, see the AUTHORS file for details. +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs + +set -e + +genaut --cycle-log-nba=3 > in1.hoa +test 3 -eq `grep -c '{0}' in1.hoa` +autfilt --reduce-acceptance-set in1.hoa > out1.hoa +autfilt --enlarge-acceptance-set in1.hoa > out1l.hoa +autfilt --reduce-acceptance-set out1l.hoa > out1r.hoa +autfilt --enlarge-acceptance-set out1r.hoa > out1rl.hoa +test 1 -eq `grep -c '{0}' out1.hoa` +test 9 -eq `grep -c '{0}' out1l.hoa` +test 1 -eq `grep -c '{0}' out1r.hoa` +diff out1l.hoa out1rl.hoa +autfilt --reduce-acceptance-set -S in1.hoa > out1b.hoa +autfilt --enlarge-acceptance-set -S in1.hoa > out1lb.hoa +autfilt --enlarge-acceptance-set -S out1b.hoa > out1lbb.hoa +test 1 -eq `grep -c '{0}' out1b.hoa` +test 3 -eq `grep -c '{0}' out1lb.hoa` +test 3 -eq `grep -c '{0}' out1lbb.hoa` +diff out1.hoa out1b.hoa +diff out1lb.hoa out1lbb.hoa + +cat >in2.hoa < out2.hoa +autfilt --reduce-acceptance-set -S in2.hoa > out2b.hoa +autfilt --enlarge-acceptance-set out2.hoa > out2l.hoa +autfilt --enlarge-acceptance-set -S out2b.hoa > out2bl.hoa + +cat >ex2.hoa <ex2b.hoa < Date: Thu, 2 May 2024 21:35:21 +0200 Subject: [PATCH 442/606] Upgrade detection of Python include path for Python 3.12 Fixes #577. * m4/pypath.m4: Python 3.12 removed distutils, so use sysconfig instead. * NEWS: Mention the bug. --- NEWS | 3 +++ m4/pypath.m4 | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index 347f44488..59ccf6606 100644 --- a/NEWS +++ b/NEWS @@ -317,6 +317,9 @@ New in spot 2.11.6.dev (not yet released) by "a[->]". The latter should only replace "(!a)[*];a". (Issue #559.) + - The configure script failed to detect the include path for Python 3.12. + (Issue #577.) + New in spot 2.11.6 (2023-08-01) Bug fixes: diff --git a/m4/pypath.m4 b/m4/pypath.m4 index efdf57355..f750e172c 100644 --- a/m4/pypath.m4 +++ b/m4/pypath.m4 @@ -6,8 +6,8 @@ AC_DEFUN([adl_CHECK_PYTHON], esac AC_CACHE_CHECK([for $am_display_PYTHON includes directory], [adl_cv_python_inc], - [adl_cv_python_inc=`$PYTHON -c "import sys; from distutils import sysconfig;] -[sys.stdout.write(sysconfig.get_python_inc())" 2>/dev/null`]) + [adl_cv_python_inc=`$PYTHON -c "import sys, sysconfig;] +[sys.stdout.write(sysconfig.get_path('include'))" 2>/dev/null`]) AC_CACHE_CHECK([for $am_display_PYTHON extension suffix], [adl_cv_python_ext], [adl_cv_python_ext=`$PYTHON -c "import importlib.machinery as i; print(i.EXTENSION_SUFFIXES[[0]])"`]) From e6362b785bdb339874ec5bb70960fecfe0dd7db9 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 3 May 2024 00:05:33 +0200 Subject: [PATCH 443/606] python: use raw strings when appropriate We had some incorrectly escaped strings that are now causing SyntaxWarnings with Python 3.12 * bin/options.py, python/spot/aux_.py, python/spot/ltsmin.i, python/spot/__init__.py: Here. * NEWS: Mention the fix. --- NEWS | 3 +++ bin/options.py | 6 +++--- python/spot/__init__.py | 16 ++++++++-------- python/spot/aux_.py | 2 +- python/spot/ltsmin.i | 2 +- 5 files changed, 16 insertions(+), 13 deletions(-) diff --git a/NEWS b/NEWS index 59ccf6606..d19451793 100644 --- a/NEWS +++ b/NEWS @@ -320,6 +320,9 @@ New in spot 2.11.6.dev (not yet released) - The configure script failed to detect the include path for Python 3.12. (Issue #577.) + - Some incorrectly escaped strings in Python code were causing + warnings with Python 3.12. + New in spot 2.11.6 (2023-08-01) Bug fixes: diff --git a/bin/options.py b/bin/options.py index c2ef4de72..52ebf73e6 100755 --- a/bin/options.py +++ b/bin/options.py @@ -29,10 +29,10 @@ import subprocess with open('Makefile.am', 'r') as mf: lines = mf.read() -lines = re.sub('\s*\\\\\s*', ' ', lines) -bin_programs = re.search('bin_PROGRAMS\s*=([\w \t]*)', lines).group(1).split() +lines = re.sub(r'\s*\\\s*', ' ', lines) +bin_programs = re.search(r'bin_PROGRAMS\s*=([\w \t]*)', lines).group(1).split() -optre = re.compile('(-\w), (--[\w=-]+)') +optre = re.compile(r'(-\w), (--[\w=-]+)') d = {} diff --git a/python/spot/__init__.py b/python/spot/__init__.py index 3d3393797..1c6133390 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -344,8 +344,8 @@ class formula: return str_sclatex_psl(self, parenth) elif format == 'mathjax' or format == 'j': return (str_sclatex_psl(self, parenth). - replace("``", "\\unicode{x201C}"). - replace("\\textrm{''}", "\\unicode{x201D}")) + replace("``", r"\unicode{x201C}"). + replace(r"\textrm{''}", r"\unicode{x201D}")) elif format == 'dot' or format == 'd': ostr = ostringstream() print_dot_psl(ostr, self) @@ -466,16 +466,16 @@ class formula: @_extend(atomic_prop_set) class atomic_prop_set: def _repr_latex_(self): - res = '$\{' + res = r'$\{' comma = '' for ap in self: apname = ap.to_str('j') - if not '\\unicode{' in apname: - apname = "\\unicode{x201C}" + apname + "\\unicode{x201D}" + if not r'\unicode{' in apname: + apname = r"\unicode{x201C}" + apname + r"\unicode{x201D}" res += comma comma = ', ' res += apname - res += '\}$' + res += r'\}$' return res @@ -1445,12 +1445,12 @@ class twa_word: res += bdd_to_formula(letter, bd).to_str('j') if len(res) > 1: res += '; ' - res += '\\mathsf{cycle}\\{' + res += r'\mathsf{cycle}\{' for idx, letter in enumerate(self.cycle): if idx: res += '; ' res += bdd_to_formula(letter, bd).to_str('j') - return res + '\\}$' + return res + r'\}$' def as_svg(self): """ diff --git a/python/spot/aux_.py b/python/spot/aux_.py index 211a988bd..7fa0668a7 100644 --- a/python/spot/aux_.py +++ b/python/spot/aux_.py @@ -51,7 +51,7 @@ def extend(*classes): # parameter is inverted. https://gitlab.com/graphviz/graphviz/issues/1605 # In our case, the scale parameters should both be <= 1, so we can # detect when that is not the case. -svgscale_regex = re.compile('transform="scale\(([\d.]+) ([\d.]+)\) rotate') +svgscale_regex = re.compile(r'transform="scale\(([\d.]+) ([\d.]+)\) rotate') def _gvfix(matchobj): xs = float(matchobj.group(1)) diff --git a/python/spot/ltsmin.i b/python/spot/ltsmin.i index c039fdba3..dd3d56d87 100644 --- a/python/spot/ltsmin.i +++ b/python/spot/ltsmin.i @@ -74,7 +74,7 @@ def load(filename): p = subprocess.run(['spins', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) - if p.stdout: print(re.sub('^\s*\[\.*\s*\]\n', '', p.stdout, + if p.stdout: print(re.sub(r'^\s*\[\.*\s*\]\n', '', p.stdout, flags=re.MULTILINE), file=sys.stderr) if p.stderr: print(p.stderr, file=sys.stderr) p.check_returncode() From c5c3e905aef1da3efb31308acec8e2f83bf51f48 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 3 May 2024 16:32:16 +0200 Subject: [PATCH 444/606] python: workaround different help() output in Python 3.12 Python 3.12 introduced some subtle changes in the way doc strings are displayed by help(). This was causing spurious errors in the following test. * tests/python/formulas.ipynb: Use print(x.__doc__) instead of help(x). --- tests/python/formulas.ipynb | 177 ++++++++++++++++++------------------ 1 file changed, 88 insertions(+), 89 deletions(-) diff --git a/tests/python/formulas.ipynb b/tests/python/formulas.ipynb index 47b908c75..20e96c17d 100644 --- a/tests/python/formulas.ipynb +++ b/tests/python/formulas.ipynb @@ -10,7 +10,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -27,7 +27,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -39,7 +39,7 @@ "spot.formula(\"p1 U (p2 R (p3 & !p4))\")" ] }, - "execution_count": 2, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -51,7 +51,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -63,7 +63,7 @@ "spot.formula(\"{a;first_match({b[*];c[+]}[:*3..5];b)}<>-> (c & GFb)\")" ] }, - "execution_count": 3, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -81,7 +81,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -93,7 +93,7 @@ "spot.formula(\"c & (a | b)\")" ] }, - "execution_count": 4, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -111,7 +111,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -123,7 +123,7 @@ "spot.formula(\"c & (a | b)\")" ] }, - "execution_count": 5, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -142,7 +142,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -151,7 +151,7 @@ "'p1 U (p2 R (p3 & !p4))'" ] }, - "execution_count": 6, + "execution_count": 15, "metadata": {}, "output_type": "execute_result" } @@ -169,7 +169,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -201,7 +201,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 17, "metadata": {}, "outputs": [ { @@ -231,64 +231,63 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The specifiers that can be used with `format` are documented as follows:" + "The specifiers that can be used with `format` are documented as follows.\n", + "(Note: As this document is part of our test-suite we have to use `print(x.__doc__)` instead of `help(x)` to work around some formating changes introduced in Python 3.12's `help()`.)" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 18, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Help on function __format__ in module spot:\n", + "Format the formula according to `spec`.\n", "\n", - "__format__(self, spec)\n", - " Format the formula according to `spec`.\n", - " \n", - " Parameters\n", - " ----------\n", - " spec : str, optional\n", - " a list of letters that specify how the formula\n", - " should be formatted.\n", - " \n", - " Supported specifiers\n", - " --------------------\n", - " \n", - " - 'f': use Spot's syntax (default)\n", - " - '8': use Spot's syntax in UTF-8 mode\n", - " - 's': use Spin's syntax\n", - " - 'l': use LBT's syntax\n", - " - 'w': use Wring's syntax\n", - " - 'x': use LaTeX output\n", - " - 'X': use self-contained LaTeX output\n", - " - 'j': use self-contained LaTeX output, adjusted for MathJax\n", - " \n", - " Add some of those letters for additional options:\n", - " \n", - " - 'p': use full parentheses\n", - " - 'c': escape the formula for CSV output (this will\n", - " enclose the formula in double quotes, and escape\n", - " any included double quotes)\n", - " - 'h': escape the formula for HTML output\n", - " - 'd': escape double quotes and backslash,\n", - " for use in C-strings (the outermost double\n", - " quotes are *not* added)\n", - " - 'q': quote and escape for shell output, using single\n", - " quotes or double quotes depending on the contents.\n", - " - '[...]': rewrite away all the operators specified in brackets,\n", - " using spot.unabbreviate().\n", - " \n", - " - ':spec': pass the remaining specification to the\n", - " formating function for strings.\n", - "\n" + " Parameters\n", + " ----------\n", + " spec : str, optional\n", + " a list of letters that specify how the formula\n", + " should be formatted.\n", + "\n", + " Supported specifiers\n", + " --------------------\n", + "\n", + " - 'f': use Spot's syntax (default)\n", + " - '8': use Spot's syntax in UTF-8 mode\n", + " - 's': use Spin's syntax\n", + " - 'l': use LBT's syntax\n", + " - 'w': use Wring's syntax\n", + " - 'x': use LaTeX output\n", + " - 'X': use self-contained LaTeX output\n", + " - 'j': use self-contained LaTeX output, adjusted for MathJax\n", + "\n", + " Add some of those letters for additional options:\n", + "\n", + " - 'p': use full parentheses\n", + " - 'c': escape the formula for CSV output (this will\n", + " enclose the formula in double quotes, and escape\n", + " any included double quotes)\n", + " - 'h': escape the formula for HTML output\n", + " - 'd': escape double quotes and backslash,\n", + " for use in C-strings (the outermost double\n", + " quotes are *not* added)\n", + " - 'q': quote and escape for shell output, using single\n", + " quotes or double quotes depending on the contents.\n", + " - '[...]': rewrite away all the operators specified in brackets,\n", + " using spot.unabbreviate().\n", + "\n", + " - ':spec': pass the remaining specification to the\n", + " formating function for strings.\n", + "\n", + " \n" ] } ], "source": [ - "help(spot.formula.__format__)" + "print(spot.formula.__format__.__doc__)" ] }, { @@ -300,7 +299,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 19, "metadata": {}, "outputs": [ { @@ -309,7 +308,7 @@ "True" ] }, - "execution_count": 10, + "execution_count": 19, "metadata": {}, "output_type": "execute_result" } @@ -320,7 +319,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 20, "metadata": {}, "outputs": [ { @@ -329,7 +328,7 @@ "False" ] }, - "execution_count": 11, + "execution_count": 20, "metadata": {}, "output_type": "execute_result" } @@ -347,7 +346,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 21, "metadata": {}, "outputs": [ { @@ -356,7 +355,7 @@ "True" ] }, - "execution_count": 12, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } @@ -367,7 +366,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 22, "metadata": {}, "outputs": [ { @@ -376,7 +375,7 @@ "False" ] }, - "execution_count": 13, + "execution_count": 22, "metadata": {}, "output_type": "execute_result" } @@ -387,7 +386,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 23, "metadata": {}, "outputs": [ { @@ -396,7 +395,7 @@ "True" ] }, - "execution_count": 14, + "execution_count": 23, "metadata": {}, "output_type": "execute_result" } @@ -414,7 +413,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 24, "metadata": {}, "outputs": [ { @@ -426,7 +425,7 @@ "spot.formula(\"\\\"a > b\\\" & \\\"proc[2]@init\\\" & GF_foo_\")" ] }, - "execution_count": 15, + "execution_count": 24, "metadata": {}, "output_type": "execute_result" } @@ -437,7 +436,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 25, "metadata": {}, "outputs": [ { @@ -449,7 +448,7 @@ "spot.formula(\"a & b & GFc\")" ] }, - "execution_count": 16, + "execution_count": 25, "metadata": {}, "output_type": "execute_result" } @@ -460,7 +459,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 26, "metadata": {}, "outputs": [ { @@ -472,7 +471,7 @@ "spot.formula(\"p0 & p1 & GFp2\")" ] }, - "execution_count": 17, + "execution_count": 26, "metadata": {}, "output_type": "execute_result" } @@ -490,7 +489,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 27, "metadata": {}, "outputs": [ { @@ -711,7 +710,7 @@ "" ] }, - "execution_count": 18, + "execution_count": 27, "metadata": {}, "output_type": "execute_result" } @@ -729,7 +728,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 28, "metadata": {}, "outputs": [ { @@ -765,7 +764,7 @@ "" ] }, - "execution_count": 19, + "execution_count": 28, "metadata": {}, "output_type": "execute_result" } @@ -776,7 +775,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 29, "metadata": {}, "outputs": [ { @@ -785,7 +784,7 @@ "'recurrence'" ] }, - "execution_count": 20, + "execution_count": 29, "metadata": {}, "output_type": "execute_result" } @@ -796,7 +795,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 30, "metadata": {}, "outputs": [ { @@ -808,7 +807,7 @@ "spot.formula(\"F(a & X(!a & b))\")" ] }, - "execution_count": 21, + "execution_count": 30, "metadata": {}, "output_type": "execute_result" } @@ -826,7 +825,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 31, "metadata": {}, "outputs": [ { @@ -838,7 +837,7 @@ "spot.formula(\"F(a & ((a & (a U (!a & b)) & ((!b U !a) | (b U !a))) | (!a & (!a U (a & !a & b)) & ((!b U a) | (b U a))) | (b & (b U (!a & b & !b)) & ((!a U !b) | (a U !b))) | (!b & (!b U (!a & b)) & ((!a U b) | (a U b))) | (!a & b & (G!a | Ga) & (G!b | Gb))))\")" ] }, - "execution_count": 22, + "execution_count": 31, "metadata": {}, "output_type": "execute_result" } @@ -856,7 +855,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 32, "metadata": {}, "outputs": [ { @@ -868,7 +867,7 @@ "spot.formula(\"(0 R !(a <-> b)) -> (1 U (a <-> b))\")" ] }, - "execution_count": 23, + "execution_count": 32, "metadata": {}, "output_type": "execute_result" } @@ -880,7 +879,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 33, "metadata": {}, "outputs": [ { @@ -892,7 +891,7 @@ "spot.formula(\"(1 U ((a & b) | (!a & !b))) | !(0 R ((!a & b) | (a & !b)))\")" ] }, - "execution_count": 24, + "execution_count": 33, "metadata": {}, "output_type": "execute_result" } @@ -910,7 +909,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 34, "metadata": {}, "outputs": [ { @@ -946,7 +945,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 35, "metadata": {}, "outputs": [ { @@ -986,7 +985,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 36, "metadata": {}, "outputs": [ { From a826a4ae6f092d49d4c9114b5f4bb8fc1034126b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 6 May 2024 17:25:45 +0200 Subject: [PATCH 445/606] * tests/python/formulas.ipynb: Improve SONF example (fixes #578). --- tests/python/formulas.ipynb | 111 +++++++++++++++++++----------------- 1 file changed, 59 insertions(+), 52 deletions(-) diff --git a/tests/python/formulas.ipynb b/tests/python/formulas.ipynb index 20e96c17d..2882ac7ac 100644 --- a/tests/python/formulas.ipynb +++ b/tests/python/formulas.ipynb @@ -10,7 +10,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -27,7 +27,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 2, "metadata": {}, "outputs": [ { @@ -39,7 +39,7 @@ "spot.formula(\"p1 U (p2 R (p3 & !p4))\")" ] }, - "execution_count": 11, + "execution_count": 2, "metadata": {}, "output_type": "execute_result" } @@ -51,7 +51,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 3, "metadata": {}, "outputs": [ { @@ -63,7 +63,7 @@ "spot.formula(\"{a;first_match({b[*];c[+]}[:*3..5];b)}<>-> (c & GFb)\")" ] }, - "execution_count": 12, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -81,7 +81,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -93,7 +93,7 @@ "spot.formula(\"c & (a | b)\")" ] }, - "execution_count": 13, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -111,7 +111,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -123,7 +123,7 @@ "spot.formula(\"c & (a | b)\")" ] }, - "execution_count": 14, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -142,7 +142,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -151,7 +151,7 @@ "'p1 U (p2 R (p3 & !p4))'" ] }, - "execution_count": 15, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -169,7 +169,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -201,7 +201,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -237,7 +237,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 9, "metadata": {}, "outputs": [ { @@ -299,7 +299,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -308,7 +308,7 @@ "True" ] }, - "execution_count": 19, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -319,7 +319,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -328,7 +328,7 @@ "False" ] }, - "execution_count": 20, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -346,7 +346,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -355,7 +355,7 @@ "True" ] }, - "execution_count": 21, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -366,7 +366,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -375,7 +375,7 @@ "False" ] }, - "execution_count": 22, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -386,7 +386,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -395,7 +395,7 @@ "True" ] }, - "execution_count": 23, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -413,7 +413,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -425,7 +425,7 @@ "spot.formula(\"\\\"a > b\\\" & \\\"proc[2]@init\\\" & GF_foo_\")" ] }, - "execution_count": 24, + "execution_count": 15, "metadata": {}, "output_type": "execute_result" } @@ -436,7 +436,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -448,7 +448,7 @@ "spot.formula(\"a & b & GFc\")" ] }, - "execution_count": 25, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } @@ -459,7 +459,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 17, "metadata": {}, "outputs": [ { @@ -471,7 +471,7 @@ "spot.formula(\"p0 & p1 & GFp2\")" ] }, - "execution_count": 26, + "execution_count": 17, "metadata": {}, "output_type": "execute_result" } @@ -489,7 +489,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 18, "metadata": {}, "outputs": [ { @@ -710,7 +710,7 @@ "" ] }, - "execution_count": 27, + "execution_count": 18, "metadata": {}, "output_type": "execute_result" } @@ -728,7 +728,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 19, "metadata": {}, "outputs": [ { @@ -764,7 +764,7 @@ "" ] }, - "execution_count": 28, + "execution_count": 19, "metadata": {}, "output_type": "execute_result" } @@ -775,7 +775,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 20, "metadata": {}, "outputs": [ { @@ -784,7 +784,7 @@ "'recurrence'" ] }, - "execution_count": 29, + "execution_count": 20, "metadata": {}, "output_type": "execute_result" } @@ -795,7 +795,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 21, "metadata": {}, "outputs": [ { @@ -807,7 +807,7 @@ "spot.formula(\"F(a & X(!a & b))\")" ] }, - "execution_count": 30, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } @@ -825,7 +825,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 22, "metadata": {}, "outputs": [ { @@ -837,7 +837,7 @@ "spot.formula(\"F(a & ((a & (a U (!a & b)) & ((!b U !a) | (b U !a))) | (!a & (!a U (a & !a & b)) & ((!b U a) | (b U a))) | (b & (b U (!a & b & !b)) & ((!a U !b) | (a U !b))) | (!b & (!b U (!a & b)) & ((!a U b) | (a U b))) | (!a & b & (G!a | Ga) & (G!b | Gb))))\")" ] }, - "execution_count": 31, + "execution_count": 22, "metadata": {}, "output_type": "execute_result" } @@ -855,7 +855,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 23, "metadata": {}, "outputs": [ { @@ -867,7 +867,7 @@ "spot.formula(\"(0 R !(a <-> b)) -> (1 U (a <-> b))\")" ] }, - "execution_count": 32, + "execution_count": 23, "metadata": {}, "output_type": "execute_result" } @@ -879,7 +879,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 24, "metadata": {}, "outputs": [ { @@ -891,7 +891,7 @@ "spot.formula(\"(1 U ((a & b) | (!a & !b))) | !(0 R ((!a & b) | (a & !b)))\")" ] }, - "execution_count": 33, + "execution_count": 24, "metadata": {}, "output_type": "execute_result" } @@ -909,7 +909,7 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 25, "metadata": {}, "outputs": [ { @@ -945,7 +945,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 26, "metadata": {}, "outputs": [ { @@ -980,12 +980,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Converting to Suffix Operator Normal Form:" + "Converting to [Suffix Operator Normal Form](https://doi.org/10.1109/TCAD.2008.2003303):" ] }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 27, "metadata": {}, "outputs": [ { @@ -1003,10 +1003,10 @@ { "data": { "text/latex": [ - "$\\mathsf{G} \\mathit{sonf\\_}_{0} \\land \\mathsf{G} (\\lnot \\mathit{sonf\\_}_{1} \\lor \\mathsf{F} a) \\land \\mathsf{G} (\\lnot \\mathit{sonf\\_}_{0} \\lor (\\{x^{\\star}\\}\\mathrel{\\Box\\kern-1.7pt\\raise.4pt\\hbox{$\\mathord{\\rightarrow}$}} \\mathit{sonf\\_}_{1}))$" + "$\\mathsf{G} p_{0} \\land \\mathsf{G} (\\lnot p_{1} \\lor \\mathsf{F} a) \\land \\mathsf{G} (\\lnot p_{0} \\lor (\\{x^{\\star}\\}\\mathrel{\\Box\\kern-1.7pt\\raise.4pt\\hbox{$\\mathord{\\rightarrow}$}} p_{1}))$" ], "text/plain": [ - "spot.formula(\"Gsonf_0 & G(!sonf_1 | Fa) & G(!sonf_0 | ({x[*]}[]-> sonf_1))\")" + "spot.formula(\"Gp0 & G(!p1 | Fa) & G(!p0 | ({x[*]}[]-> p1))\")" ] }, "metadata": {}, @@ -1015,7 +1015,7 @@ { "data": { "text/plain": [ - "('sonf_0', 'sonf_1')" + "('p0', 'p1')" ] }, "metadata": {}, @@ -1027,10 +1027,17 @@ "display(f)\n", "\n", "# In addition to the formula, returns a list of newly introduced APs\n", - "f, aps = spot.suffix_operator_normal_form(f, 'sonf_')\n", + "f, aps = spot.suffix_operator_normal_form(f, 'p')\n", "display(f)\n", "display(aps)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { From ed91f59bbd3dbe795c6ed41766619fe8a9928533 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 13 May 2024 22:15:15 +0200 Subject: [PATCH 446/606] tl: new PSL trivial simplifications Always rewrite {[*]}[]->0 as 0, and {[*]}<>->1 = 1. Fixes #572. * spot/tl/formula.cc: Implement them. * doc/tl/tl.tex, NEWS: Document them. * tests/core/equals.test: Test those. --- NEWS | 5 +++++ doc/tl/tl.tex | 2 ++ spot/tl/formula.cc | 6 ++++-- tests/core/equals.test | 2 ++ 4 files changed, 13 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index d19451793..33a40d185 100644 --- a/NEWS +++ b/NEWS @@ -89,6 +89,11 @@ New in spot 2.11.6.dev (not yet released) - b:b[*i..j] = b[*max(i,1)..j] - b[*i..j]:b[*k..l] = b[*max(i,1)+max(k,1)-1, j+l-1] + - The following new trivial simplifications have been implemented + for PSL operators: + - {[*]}[]->0 = 0 + - {[*]}<>->1 = 1 + - The HOA parser is a bit smarter when merging multiple initial states into a single initial state (Spot's automaton class supports only one): it now reuses the edges leaving initial states diff --git a/doc/tl/tl.tex b/doc/tl/tl.tex index e7c283bc6..141daa7b8 100644 --- a/doc/tl/tl.tex +++ b/doc/tl/tl.tex @@ -998,6 +998,8 @@ formula $b$, the following rewritings are systematically performed & \nsere{b} &\equiv \NOT b\\ \sere{r}\Asuffix \1&\equiv \1 & \sere{r}\Esuffix \0&\equiv \0 \\ + \sere{\STAR{}} \Asuffix \0 &\equiv \0 +& \sere{\STAR{}} \Esuffix \1 &\equiv \1 \\ \end{align*} \chapter{Grammar} diff --git a/spot/tl/formula.cc b/spot/tl/formula.cc index db4b32ec7..1a1a4fb47 100644 --- a/spot/tl/formula.cc +++ b/spot/tl/formula.cc @@ -1163,6 +1163,7 @@ namespace spot // - 1 <>-> Exp = Exp // - [*0] <>-> Exp = 0 // - Exp <>-> 0 = 0 + // - [*] <>-> 1 = 1 // - boolExp <>-> Exp = boolExp & Exp if (first->is_tt()) return second; @@ -1172,7 +1173,7 @@ namespace spot second->destroy(); return ff(); } - if (second->is_ff()) + if (second->is_ff() || (second->is_tt() && first == one_star())) { first->destroy(); return second; @@ -1185,6 +1186,7 @@ namespace spot // - 1 []-> Exp = Exp // - [*0] []-> Exp = 1 // - Exp []-> 1 = 1 + // - [*] []-> 0 = 0 // - boolExp []-> Exp = !boolExp | Exp if (first->is_tt()) return second; @@ -1194,7 +1196,7 @@ namespace spot second->destroy(); return tt(); } - if (second->is_tt()) + if (second->is_tt() || (second->is_ff() && first == one_star())) { first->destroy(); return second; diff --git a/tests/core/equals.test b/tests/core/equals.test index b36f9edc3..ffc400c83 100755 --- a/tests/core/equals.test +++ b/tests/core/equals.test @@ -55,6 +55,8 @@ GGGGGx, Gx !!!!!x, !x {[*0];x}<>->1, {x}<>->1 {x;[*0]}<>->1, {x}<>-> 1 +{[*]}[]->0, 0 +{[*]}<>->1, 1 {[*0];x;[*0];[*0]}<>->1, {x}<>->1 {[*0];x;[*0];x;[*0]}<>->1, {x;x}<>->1 {x;x;x;[*0];x;x}<>->1, {x;x;x;x;x}<>->1 From 2bd2abd4c9de9b7f16bc679aed0db36645077b89 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 14 May 2024 10:20:45 +0200 Subject: [PATCH 447/606] pdegen & toparity: minor refactor * spot/twaalgos/degen.hh (is_partially_degeneralizable): Pass the forbid vector by reference, and document it. I hope that not passing forbid by copy will get rid of a spurious "potential nullptr" warning by gcc on Arch Linux. * spot/twaalgos/degen.cc: Adjust, and refactor the code a bit. * spot/twaalgos/toparity.cc: Likewise. --- spot/twaalgos/degen.cc | 27 ++--- spot/twaalgos/degen.hh | 9 +- spot/twaalgos/toparity.cc | 218 +++++++++++++++++++------------------- 3 files changed, 126 insertions(+), 128 deletions(-) diff --git a/spot/twaalgos/degen.cc b/spot/twaalgos/degen.cc index 897062cb3..77790c5d3 100644 --- a/spot/twaalgos/degen.cc +++ b/spot/twaalgos/degen.cc @@ -872,7 +872,7 @@ namespace spot acc_cond::mark_t is_partially_degeneralizable(const const_twa_graph_ptr& aut, bool allow_inf, bool allow_fin, - std::vector forbid) + const std::vector& forbid) { auto& code = aut->get_acceptance(); @@ -881,16 +881,19 @@ namespace spot acc_cond::mark_t res = {}; unsigned res_sz = -1U; - auto update = [&](const acc_cond::mark_t& m) + + auto keep_smallest_mark = [&](const acc_cond::mark_t& m) { + if (std::find(forbid.begin(), forbid.end(), m) != forbid.end()) + return false; unsigned sz = m.count(); if (sz > 1 && sz < res_sz) { res_sz = sz; res = m; } - // If we have found a pair to degeneralize, we - // won't find + // If we have found a pair to degeneralize, we won't find a + // smaller one. return res_sz == 2; }; @@ -906,22 +909,14 @@ namespace spot case acc_cond::acc_op::Fin: case acc_cond::acc_op::FinNeg: pos -= 2; - if (allow_fin) - { - auto m = code[pos].mark; - if (!std::count(forbid.begin(), forbid.end(), m) && update(m)) - return res; - } + if (allow_fin && keep_smallest_mark(code[pos].mark)) + return res; break; case acc_cond::acc_op::Inf: case acc_cond::acc_op::InfNeg: pos -= 2; - if (allow_inf) - { - auto m = code[pos].mark; - if (!std::count(forbid.begin(), forbid.end(), m) && update(m)) - return res; - } + if (allow_inf && keep_smallest_mark(code[pos].mark)) + return res; break; } } diff --git a/spot/twaalgos/degen.hh b/spot/twaalgos/degen.hh index 643c1d219..833157bfa 100644 --- a/spot/twaalgos/degen.hh +++ b/spot/twaalgos/degen.hh @@ -158,11 +158,14 @@ namespace spot /// /// The optional arguments \a allow_inf and \a allow_fin, can be set /// to false to disallow one type of match. + /// + /// If you need to disallow certain marks from being returned, pass + /// them in the \a forbid vector. SPOT_API acc_cond::mark_t is_partially_degeneralizable(const const_twa_graph_ptr& aut, - bool allow_inf = true, - bool allow_fin = true, - std::vector forbid = {}); + bool allow_inf = true, bool allow_fin = true, + const std::vector& + forbid = {}); /// \ingroup twa_algorithms /// \brief Propagate marks around the automaton diff --git a/spot/twaalgos/toparity.cc b/spot/twaalgos/toparity.cc index 960f4daaa..a8a42dc57 100644 --- a/spot/twaalgos/toparity.cc +++ b/spot/twaalgos/toparity.cc @@ -2198,17 +2198,17 @@ namespace spot return true; std::vector pairs; if (deg->acc().is_rabin_like(pairs)) - { - remove_duplicates(pairs); - if (pairs.size() < nb_col_orig) - return true; - } + { + remove_duplicates(pairs); + if (pairs.size() < nb_col_orig) + return true; + } if (deg->acc().is_streett_like(pairs)) - { - remove_duplicates(pairs); - if (pairs.size() < nb_col_orig) - return true; - } + { + remove_duplicates(pairs); + if (pairs.size() < nb_col_orig) + return true; + } return false; } @@ -2223,118 +2223,118 @@ namespace spot max_color_scc_ = 0; // If the sub_automaton is "empty", we don't need to apply an algorithm. if (sub_aut->num_edges() == 0) - { - apply_copy(sub_aut, {}, none_algo); - return; - } + { + apply_copy(sub_aut, {}, none_algo); + return; + } bool tried_emptiness = false; bool changed_structure = true; while (true) - { - auto cond_before_simpl = sub_aut->acc(); - if (opt_.acc_clean) - simplify_acceptance_here(sub_aut); - if (opt_.propagate_col) { - propagate_marks_here(sub_aut); + auto cond_before_simpl = sub_aut->acc(); if (opt_.acc_clean) simplify_acceptance_here(sub_aut); - } - if (opt_.datas && sub_aut->acc() != cond_before_simpl) - algo_used_ |= algorithm::ACC_CLEAN; - - if (opt_.parity_equiv || opt_.parity_prefix) - { - // If we don't try to find a parity prefix, we can stop - // to construct the tree when it has not parity shape. - zielonka_tree_options zopt = zielonka_tree_options::MERGE_SUBTREES - | zielonka_tree_options::CHECK_PARITY; - if (!opt_.parity_prefix) - zopt = zopt | zielonka_tree_options::ABORT_WRONG_SHAPE; - auto tree = zielonka_tree(sub_aut->acc(), zopt); - // If it is not parity shape, tree.nodes_ will be empty - if (tree.num_branches() != 0 && opt_.parity_equiv - && try_parity_equivalence(tree, sub_aut)) - return; - if (opt_.parity_prefix && try_parity_prefix(tree, sub_aut)) - return; - } - - if (changed_structure && opt_.parity_prefix_general - && try_parity_prefix_general(sub_aut)) - return; - - if (opt_.generic_emptiness && !tried_emptiness - && try_emptiness(sub_aut, tried_emptiness)) - return; - - // Buchi_type_to_buchi is more general that Rabin_to_buchi so - // we just call rabin_to_buchi if buchi_type_to_buchi is false. - if (!opt_.buchi_type_to_buchi && !opt_.parity_type_to_parity - && opt_.rabin_to_buchi - && try_rabin_to_buchi(sub_aut)) - return; - - // As parity_type_to_parity is stronger, we don't - // try if this option is used. - if (opt_.buchi_type_to_buchi && !opt_.parity_type_to_parity - && try_buchi_type(sub_aut)) - return; - - // We don't do it if parity_prefix_general is true as on a parity-type - // automaton parity_prefix_general removes all the transitions and - // we also get a parity-type automaton. - if (!opt_.parity_prefix_general && opt_.parity_type_to_parity - && try_parity_type(sub_aut)) - return; - - if (opt_.partial_degen - && is_partially_degeneralizable(sub_aut, true, true)) - { - auto deg = sub_aut; - std::vector forbid; - auto m = is_partially_degeneralizable(sub_aut, true, true, forbid); - bool changed = false; - while (m) - { - auto tmp = partial_degeneralize(deg, m); - simplify_acceptance_here(tmp); - if (keep_deg(deg, tmp)) + if (opt_.propagate_col) { - algo_used_ |= algorithm::PARTIAL_DEGEN; - deg = tmp; - changed = true; - changed_structure = true; + propagate_marks_here(sub_aut); + if (opt_.acc_clean) + simplify_acceptance_here(sub_aut); } - else - forbid.emplace_back(m); - m = is_partially_degeneralizable(deg, true, true, forbid); - } + if (opt_.datas && sub_aut->acc() != cond_before_simpl) + algo_used_ |= algorithm::ACC_CLEAN; - if (changed) - { - sub_aut = deg; - continue; - } + if (opt_.parity_equiv || opt_.parity_prefix) + { + // If we don't try to find a parity prefix, we can stop + // to construct the tree when it has not parity shape. + zielonka_tree_options zopt = + zielonka_tree_options::MERGE_SUBTREES + | zielonka_tree_options::CHECK_PARITY; + if (!opt_.parity_prefix) + zopt = zopt | zielonka_tree_options::ABORT_WRONG_SHAPE; + auto tree = zielonka_tree(sub_aut->acc(), zopt); + // If it is not parity shape, tree.nodes_ will be empty + if (tree.num_branches() != 0 && opt_.parity_equiv + && try_parity_equivalence(tree, sub_aut)) + return; + if (opt_.parity_prefix && try_parity_prefix(tree, sub_aut)) + return; + } + + if (changed_structure && opt_.parity_prefix_general + && try_parity_prefix_general(sub_aut)) + return; + + if (opt_.generic_emptiness + && !tried_emptiness && try_emptiness(sub_aut, tried_emptiness)) + return; + + // Buchi_type_to_buchi is more general that Rabin_to_buchi so + // we just call rabin_to_buchi if buchi_type_to_buchi is false. + if (!opt_.buchi_type_to_buchi + && !opt_.parity_type_to_parity && opt_.rabin_to_buchi + && try_rabin_to_buchi(sub_aut)) + return; + + // As parity_type_to_parity is stronger, we don't + // try if this option is used. + if (opt_.buchi_type_to_buchi + && !opt_.parity_type_to_parity && try_buchi_type(sub_aut)) + return; + + // We don't do it if parity_prefix_general is true as on a parity-type + // automaton parity_prefix_general removes all the transitions and + // we also get a parity-type automaton. + if (!opt_.parity_prefix_general && opt_.parity_type_to_parity + && try_parity_type(sub_aut)) + return; + + if (opt_.partial_degen) + { + twa_graph_ptr deg = sub_aut; + std::vector forbid; + bool changed = false; + while (acc_cond::mark_t m = + is_partially_degeneralizable(deg, true, true, forbid)) + { + twa_graph_ptr tmp = partial_degeneralize(deg, m); + simplify_acceptance_here(tmp); + if (keep_deg(deg, tmp)) + { + algo_used_ |= algorithm::PARTIAL_DEGEN; + deg = tmp; + changed = true; + changed_structure = true; + } + else + { + forbid.emplace_back(m); + } + } + if (changed) + { + sub_aut = deg; + continue; + } + } + break; } - break; - } if (opt_.use_generalized_rabin) - { - auto gen_rab = to_generalized_rabin(sub_aut); - // to_generalized_rabin does not propagate original-states. - auto sub_aut_orig = + { + auto gen_rab = to_generalized_rabin(sub_aut); + // to_generalized_rabin does not propagate original-states. + auto sub_aut_orig = sub_aut->get_named_prop>("original-states"); - assert(sub_aut_orig); - auto orig = new std::vector(); - const auto sub_aut_num_states = sub_aut->num_states(); - orig->reserve(sub_aut_num_states); - gen_rab->set_named_prop("original-states", orig); - for (unsigned i = 0; i < sub_aut_num_states; ++i) - orig->push_back((*sub_aut_orig)[i]); - sub_aut = partial_degeneralize(gen_rab); - } + assert(sub_aut_orig); + auto orig = new std::vector(); + const auto sub_aut_num_states = sub_aut->num_states(); + orig->reserve(sub_aut_num_states); + gen_rab->set_named_prop("original-states", orig); + for (unsigned i = 0; i < sub_aut_num_states; ++i) + orig->push_back((*sub_aut_orig)[i]); + sub_aut = partial_degeneralize(gen_rab); + } std::vector pairs; algorithm algo = choose_lar(sub_aut->acc(), pairs, sub_aut->num_edges()); if (opt_.datas) From 913e807d66843c657c6d1b1066dfa3c9922e350a Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 16 May 2024 12:17:17 +0200 Subject: [PATCH 448/606] stats: fix rounding issues Fixes #582. * spot/twaalgos/stats.cc: Add 0.5 to the result of bdd_satcountset() before truncating it. * NEWS: Mention the bug. --- NEWS | 3 +++ spot/twaalgos/stats.cc | 15 ++++++++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/NEWS b/NEWS index 33a40d185..48ebbdb15 100644 --- a/NEWS +++ b/NEWS @@ -325,6 +325,9 @@ New in spot 2.11.6.dev (not yet released) - The configure script failed to detect the include path for Python 3.12. (Issue #577.) + - Work around many failures caused by incorrect rounding of floating + point values in the counting of transitions. (Issue #582) + - Some incorrectly escaped strings in Python code were causing warnings with Python 3.12. diff --git a/spot/twaalgos/stats.cc b/spot/twaalgos/stats.cc index 4b3b6185c..c8f45f634 100644 --- a/spot/twaalgos/stats.cc +++ b/spot/twaalgos/stats.cc @@ -35,7 +35,10 @@ namespace spot unsigned long long tr = 0; bdd v = g->ap_vars(); for (auto& e: g->edges()) - tr += bdd_satcountset(e.cond, v); + // We add 0.5 to work around rounding errors in the computation + // of bdd_satcountset(), as the conversion is done by + // truncation. See issue #582. + tr += 0.5 + bdd_satcountset(e.cond, v); return tr; } @@ -79,7 +82,10 @@ namespace spot const twa_succ_iterator* it) override { ++s_.edges; - s_.transitions += bdd_satcountset(it->cond(), apvars_); + // We add 0.5 to work around rounding errors in the + // computation of bdd_satcountset(), as the conversion is done + // by truncation. See issue #582. + s_.transitions += 0.5 + bdd_satcountset(it->cond(), apvars_); } private: @@ -182,7 +188,10 @@ namespace spot [&s, &ge](bdd cond) { ++s.edges; - s.transitions += bdd_satcountset(cond, ge->ap_vars()); + // We add 0.5 to work around rounding errors in the + // computation of bdd_satcountset(), as the conversion + // is done by truncation. See issue #582. + s.transitions += 0.5 + bdd_satcountset(cond, ge->ap_vars()); }); } return s; From 532b57d0df8b6d61c8655f458ee46401640a70b7 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 14 May 2024 15:19:37 +0200 Subject: [PATCH 449/606] Release spot 2.12 * NEWS, configure.ac, doc/org/setup.org: Bump version to 2.12. --- NEWS | 12 ++++++------ configure.ac | 2 +- doc/org/setup.org | 6 +++--- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/NEWS b/NEWS index 48ebbdb15..75eeaab61 100644 --- a/NEWS +++ b/NEWS @@ -1,4 +1,4 @@ -New in spot 2.11.6.dev (not yet released) +New in spot 2.12 (2024-05-16) Build: @@ -19,8 +19,8 @@ New in spot 2.11.6.dev (not yet released) to produce the 0-based serial number of the produced object. This differs from the existing '%L' that is usually related to the line number of the input (when that makes sense). For instance to - split a file that contains many automata into several files, one - per automaton, do + split a file that contains many automata into one file per + automaton, do autfilt input.hoa -o output-%l.hoa @@ -50,7 +50,7 @@ New in spot 2.11.6.dev (not yet released) ltlsynt --ins='/^in/,/env/' --outs=/^out/,/control/' ... any atomic proposition that starts with 'in' or contains 'env' - will be considered as inputs, and those that start with 'out' + will be considered as input, and one that starts with 'out' or contain 'control' will be considered output. By default, if neither --ins nor --outs is given, ltlsynt will @@ -149,7 +149,7 @@ New in spot 2.11.6.dev (not yet released) spot::contains_forq() implementation will be used instead when applicable (inclusion between Büchi automata). - The above also impacts autfilt --included-in option. + The above also impacts autfilt's --included-in option. - spot::reduce_buchi_acceptance_set_here() and spot::enlarge_buchi_acceptance_set_here() will heuristically @@ -170,7 +170,7 @@ New in spot 2.11.6.dev (not yet released) - scc_filter used to reduce automata tagged with the inherently-weak property to weak Büchi automata (unless the acceptance was already - t or co-Büchi). In case where the input automaton had no + t or co-Büchi). In cases where the input automaton had no rejecting cycle, the Büchi acceptance was overkill: scc_filter will now use "t" acceptance. This change may have unexpected consequences in code paths that assume running scc_filter on a diff --git a/configure.ac b/configure.ac index 209153007..efbd45695 100644 --- a/configure.ac +++ b/configure.ac @@ -17,7 +17,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.11.6.dev], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.12], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) diff --git a/doc/org/setup.org b/doc/org/setup.org index 772ff2cc0..440dcfa00 100644 --- a/doc/org/setup.org +++ b/doc/org/setup.org @@ -1,11 +1,11 @@ #+OPTIONS: H:2 num:nil toc:t html-postamble:nil ^:nil #+EMAIL: spot@lrde.epita.fr #+HTML_LINK_HOME: index.html -#+MACRO: LASTDATE 2023-08-01 +#+MACRO: LASTDATE 2024-05-14 #+NAME: SPOT_VERSION #+BEGIN_SRC python :exports none :results value :wrap org -return "2.11.6" +return "2.12" #+END_SRC #+NAME: TARBALL_LINK @@ -15,7 +15,7 @@ return "2.11.6" #+NAME: NEWS_LINK #+BEGIN_SRC python :exports none :var version=SPOT_VERSION :results output :wrap org - version = version.replace('.', '-') + version = str(version).replace('.', '-') print(f"[[https://gitlab.lre.epita.fr/spot/spot/blob/spot-{version}/NEWS][summary of the changes]]") #+END_SRC From c44e03c79133d96e8bbeb3560d745bf77def43fc Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 14 May 2024 15:21:02 +0200 Subject: [PATCH 450/606] * NEWS, configure.ac: Bump version to 2.12.0.dev. --- NEWS | 4 ++++ configure.ac | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 75eeaab61..c0ab55c77 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,7 @@ +New in spot 2.12.0.dev (not yet released) + + Nothing yet. + New in spot 2.12 (2024-05-16) Build: diff --git a/configure.ac b/configure.ac index efbd45695..4ec7633e4 100644 --- a/configure.ac +++ b/configure.ac @@ -17,7 +17,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.12], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.12.0.dev], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) From 799db051116688ae7ad9d9b796024cf590f2ced7 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 16 May 2024 22:40:52 +0200 Subject: [PATCH 451/606] * doc/org/tut25.org: Fix typos in the example. --- doc/org/tut25.org | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/org/tut25.org b/doc/org/tut25.org index 87ffd544d..7005bd0de 100644 --- a/doc/org/tut25.org +++ b/doc/org/tut25.org @@ -40,9 +40,9 @@ digraph "" { #+begin_example s₁ ℓ₁,s₁->s₂ -ℓ₃,s2->s1 -ℓ₂,s2->s3 -ℓ₃,s3->s1 +ℓ₃,s₂->s₁ +ℓ₂,s₂->s₃ +ℓ₃,s₃->s₁ s₁ s₂ #+end_example From 6420bde5fd16340e1837aed97084df9bb40c5d6e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 20 May 2024 16:11:18 +0200 Subject: [PATCH 452/606] debian: drop ipython3-notebook dependency * debian/control: Remove the ipython3-notebook alternate build-depends, as this package was removed from Debian in 2016. Add gdb ad build-depends, in attempt to fix some OSB builds. --- debian/control | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/control b/debian/control index e29454c54..2fdb354ef 100644 --- a/debian/control +++ b/debian/control @@ -2,7 +2,7 @@ Source: spot Section: science Priority: optional Maintainer: Alexandre Duret-Lutz -Build-Depends: debhelper (>= 12), python3-all-dev, ipython3-notebook | python3-ipykernel, ipython3-notebook | python3-nbconvert, libltdl-dev, dh-python, graphviz, jupyter-nbconvert, doxygen +Build-Depends: debhelper (>= 12), python3-all-dev, python3-ipykernel, python3-nbconvert, libltdl-dev, dh-python, graphviz, jupyter-nbconvert, doxygen, gdb Standards-Version: 4.5.1 Homepage: http://spot.lrde.epita.fr/ From fdb09f787e6dfc43eddba53541c7f671241f30e0 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 20 May 2024 16:15:57 +0200 Subject: [PATCH 453/606] * spot.spec.in: Update URL and description. --- spot.spec.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spot.spec.in b/spot.spec.in index 714d8589e..bd465fd62 100755 --- a/spot.spec.in +++ b/spot.spec.in @@ -1,10 +1,10 @@ Name: spot Version: @VERSION@ Release: @GITPATCH@%{?dist} -Summary: Model checking and omega-automata manipulation library +Summary: C++17 library for LTL, omega-automata manipulation, model checking License: GPLv3 -URL: https://spot.lrde.epita.fr +URL: https://spot.lre.epita.fr Source0: http://www.lrde.epita.fr/dload/spot/%{name}-%{version}.tar.gz From c7c18db6db17f3f25b6fabbf2d243fcd67283ebb Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 27 Jun 2024 15:39:15 +0200 Subject: [PATCH 454/606] work around GCC bug 108860 GCC 12/13/14 can emit spurious warnings for something as innocent as vec.insert(vec.begin(), 12)... Reported by Antoine Martin and Quentin Rataud. * m4/gccwarn.m4: Test the above code and disable GCC's -Wnull-dereference warning if necessary. --- m4/gccwarn.m4 | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/m4/gccwarn.m4 b/m4/gccwarn.m4 index 13f770ccc..091006af7 100644 --- a/m4/gccwarn.m4 +++ b/m4/gccwarn.m4 @@ -21,6 +21,7 @@ AC_DEFUN([CF_GXX_WARNINGS], cat > conftest.$ac_ext < +#include #include // From GCC bug 106159 @@ -29,6 +30,13 @@ struct left { virtual ~left() {} }; struct right { virtual ~right() {} }; struct both: public left, public right {}; +// For GCC bug 108860 +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108860 +void test(std::vector& v) +{ + v.insert(v.begin(), 12); +} + int main(int argc, char *argv[[]]) { // This string comparison is here to detect superfluous From 2274308cad433d89188953f24163bb2e141ce4f7 Mon Sep 17 00:00:00 2001 From: Florian Renkin Date: Fri, 17 May 2024 13:13:59 +0000 Subject: [PATCH 455/606] reduce_mealy_here: do not reduce when size is 1 * spot/twaalgos/mealy_machine.cc: here --- spot/twaalgos/mealy_machine.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index 055cb511e..e63193cdc 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -800,6 +800,9 @@ namespace spot { ensure_mealy("reduce_mealy_here", mm); + if (mm->num_states() == 1) + return; + // Only consider infinite runs mm->purge_dead_states(); From 5ddac258e1d536c8d61637a2581d51fe35813d19 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber Date: Sun, 3 Mar 2024 22:15:27 +0100 Subject: [PATCH 456/606] Introduce new ways to split an automaton The explicit way of splitting suffers if there are too many input APs, two new ways of splitting are introduced as well as a heuristic to chose between them. * NEWS: update * spot/twaalgos/synthesis.cc, spot/twaalgos/synthesis.hh: New fonctions * bin/ltlsynt.cc: Add corresponding option * tests/core/gamehoa.test, tests/core/ltlsynt.test, tests/python/_partitioned_relabel.ipynb, tests/python/_synthesis.ipynb, tests/python/game.py, tests/python/split.py, tests/python/synthesis.py: Adjusting and adding test --- NEWS | 9 + bin/ltlsynt.cc | 27 +- spot/twaalgos/synthesis.cc | 1006 ++++++++++++++++++++++- spot/twaalgos/synthesis.hh | 138 ++-- tests/core/gamehoa.test | 6 +- tests/core/ltlsynt.test | 23 +- tests/python/_partitioned_relabel.ipynb | 34 +- tests/python/_synthesis.ipynb | 68 +- tests/python/game.py | 23 +- tests/python/split.py | 166 ++++ tests/python/synthesis.py | 10 +- 11 files changed, 1372 insertions(+), 138 deletions(-) diff --git a/NEWS b/NEWS index c0ab55c77..957b9b254 100644 --- a/NEWS +++ b/NEWS @@ -86,6 +86,15 @@ New in spot 2.12 (2024-05-16) Library: + - split_2step has now multiple ways to split for improved performance. + The option can be controlled via the synthesis_info::splittype + - EXPL: explicit splitting of each state as before + - SEMISYM: The outgoing transition of each state are encoded + as a bdd; Works better for larger number of input APs + - FULLYSYM: The automaton is first translated into a + fully symbolic version, then split. + - AUTO: Let the heuristic decide what to do. + - The following new trivial simplifications have been implemented for SEREs: - f|[+] = [+] if f rejects [*0] - f|[*] = [*] if f accepts [*0] diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 24f4af16a..456d2cb41 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -63,6 +63,7 @@ enum OPT_PRINT_HOA, OPT_REAL, OPT_SIMPLIFY, + OPT_SPLITTYPE, OPT_TLSF, OPT_VERBOSE, OPT_VERIFY @@ -118,6 +119,9 @@ static const argp_option options[] = "reduction with output assignment, (sat) SAT-based minimization, " "(bisim-sat) SAT after bisim, (bwoa-sat) SAT after bwoa. Defaults " "to 'bwoa'.", 0 }, + { "splittype", OPT_SPLITTYPE, "expl|semisym|fullysym|auto", 0, + "Selects the algorithm to use to transform the automaton into " + "a game graph. Defaults to 'auto'.", 0}, /**************************************************/ { nullptr, 0, nullptr, 0, "Output options:", 20 }, { "print-pg", OPT_PRINT, nullptr, 0, @@ -295,6 +299,23 @@ static unsigned simplify_values[] = }; ARGMATCH_VERIFY(simplify_args, simplify_values); +static const char* const splittype_args[] = + { + "expl", + "semisym", + "fullysym", + "auto", + nullptr + }; +static spot::synthesis_info::splittype splittype_values[] = + { + spot::synthesis_info::splittype::EXPL, + spot::synthesis_info::splittype::SEMISYM, + spot::synthesis_info::splittype::FULLYSYM, + spot::synthesis_info::splittype::AUTO, + }; +ARGMATCH_VERIFY(splittype_args, splittype_values); + namespace { static bool want_game() @@ -909,7 +930,7 @@ namespace return 2; } if (!arena->get_named_prop>("state-player")) - arena = spot::split_2step(arena, true); + arena = spot::split_2step(arena, gi); else { // Check if the game is alternating and fix trivial cases @@ -1127,6 +1148,10 @@ parse_opt(int key, char *arg, struct argp_state *) gi->minimize_lvl = XARGMATCH("--simplify", arg, simplify_args, simplify_values); break; + case OPT_SPLITTYPE: + gi->sp = XARGMATCH("--splittype", arg, + splittype_args, splittype_values); + break; case OPT_TLSF: jobs.emplace_back(arg, job_type::TLSF_FILENAME); break; diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 2928a642d..3c93137dc 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -32,9 +32,15 @@ #include #include #include +#include +#include #include #include +#include +#ifndef NDEBUG +#include +#endif // Helper function/structures for split_2step namespace{ using namespace spot; @@ -440,9 +446,11 @@ namespace{ namespace spot { + namespace + { twa_graph_ptr - split_2step(const const_twa_graph_ptr& aut, - const bdd& output_bdd, bool complete_env) + split_2step_expl_impl(const const_twa_graph_ptr& aut, + const bdd& output_bdd, bool complete_env) { assert(!aut->get_named_prop("state-player") && "aut is already split!"); @@ -681,14 +689,961 @@ namespace spot // Done return split; + } // End split old impl + + std::vector + do_bin_encode_(unsigned N, int var0, unsigned Nvar) + { + auto bddvec = std::vector>(Nvar); + for (unsigned i = 0; i < Nvar; ++i) + bddvec[i] = {bdd_nithvar(var0+i), bdd_ithvar(var0+i)}; + + auto do_bin_encode_1 = [&bddvec, Nvar](unsigned s) -> bdd { + bdd res = bddtrue; + + for (unsigned i = 0; i < Nvar; ++i) + { + res &= bddvec[i][s&1]; + s >>= 1; + } + return res; + }; + + auto s2bdd = std::vector(N); + for (unsigned s = 0; s < N; ++s) + s2bdd[s] = do_bin_encode_1(s); + + return s2bdd; + } + + struct bitVectDecodeIterator{ + const std::vector& v_; + unsigned u_, idx_ = 0u, vsize_; + bool first_ = true; + + bitVectDecodeIterator(const std::vector& v, unsigned init) + : v_{v} + , u_{init} + , vsize_(v_.size()) + { + } + + // Sets to zero all variable bits before the current idx + void small_reset_() noexcept { + // Skip current idx + for (--idx_; idx_!= -1u; --idx_) + u_ ^= (1u << v_[idx_]); + idx_ = 0; + } + + bitVectDecodeIterator& operator++() noexcept { + first_ = false; + // Search for the next variable bit to increase + while (idx_ < vsize_) + { + auto curr = 1u << v_[idx_]; + if (!(u_ & curr)){ + u_ |= curr; + small_reset_(); + return *this; + } + ++idx_; + } + return *this; + } + + unsigned operator*() const noexcept { + return u_; + } + + explicit operator bool() const noexcept{ + // There is always at least one element + return idx_ < vsize_ || first_; + } + + }; + + class bitVectDecodeRange + { + private: + const std::vector& v_; + const unsigned initval_; + + public: + bitVectDecodeRange(const std::vector& v, unsigned init) + : v_{v} + , initval_{init} + { + } + + auto + begin() const + { + return bitVectDecodeIterator(v_, initval_); + } + + }; + + template + twa_graph_ptr + split_2step_sym_impl(const const_twa_graph_ptr& aut, + const bdd& output_bdd, bool complete_env) + { + + assert(!aut->get_named_prop("state-player") + && "aut is already split!"); + + auto split = make_twa_graph(aut->get_dict()); + + auto [has_unsat, unsat_mark] = aut->acc().unsat_mark(); + bool max_par, odd_par, color_env; + color_env = aut->acc().is_parity(max_par, odd_par, true); + const unsigned Ncolor = aut->acc().all_sets().max_set(); + + split->copy_ap_of(aut); + split->new_states(aut->num_states()); + split->set_init_state(aut->get_init_state_number()); + set_synthesis_outputs(split, output_bdd); + //split->prop_copy(aut, twa::prop_set::all()); // todo why? + + const auto use_color = has_unsat; + color_env &= use_color; + if (has_unsat) + split->copy_acceptance_of(aut); + else + { + if (complete_env) + { + split->set_co_buchi(); // Fin(0) + unsat_mark = acc_cond::mark_t({0}); + has_unsat = true; + } + else + split->acc().set_acceptance(acc_cond::acc_code::t()); + } + + // Reserve all the necessary variables + const unsigned N = split->num_states(); + const unsigned Nstvars = std::ceil(std::log2(N)); + // we use one hot encoding for colors + constexpr unsigned Ncolorvars = SPOT_MAX_ACCSETS; + // Last one is for no color + + auto var_in = std::vector(); + auto var_out = std::vector(); + + { + bdd allbdd = split->ap_vars(); + while (allbdd != bddtrue) + { + int lvar = bdd_var(allbdd); + bdd l = bdd_ithvar(lvar); + if (bdd_implies(output_bdd, l)) + var_out.push_back(lvar); + else + var_in.push_back(lvar); + allbdd = bdd_high(allbdd); + assert(allbdd != bddfalse); + } + } + + const unsigned Nin = var_in.size(); + const unsigned Nout = var_out.size(); + + // Register the vars + // Need to be released + // Two possibilities for the need of variables: + // 1) FULLYSYM == false + // in conditions, (dst) states, color x out + // [(dst) states, color x out] is a player state + // 2) FULLYSYM == true + // (src) states, in conditions, (dst) states, color x out + // [(dst) states, color x out] is a player state + + int zeroIdx = aut->get_dict() + ->register_anonymous_variables(Nstvars*(1+FULLYSYM)+Nout + +Nin+Ncolorvars, &N); + + int srcStIdx = zeroIdx; + int inIdx = srcStIdx + Nstvars*FULLYSYM; + int dstStIdx = inIdx + Nin; + int colorIdx = dstStIdx + Nstvars; + int outIdx = colorIdx + Ncolorvars; + + // Construct the pairs + bddPair* replace_fwd = bdd_newpair(); + bddPair* replace_bkwd = bdd_newpair(); + bddPair* replace_in_fwd = bdd_newpair(); + bddPair* replace_in_bkwd = bdd_newpair(); + bddPair* replace_out_fwd = bdd_newpair(); + bddPair* replace_out_bkwd = bdd_newpair(); + + if (not replace_fwd || not replace_in_fwd || not replace_out_fwd + || not replace_bkwd || not replace_in_bkwd || not replace_out_bkwd) + throw std::runtime_error("split_2step(): bddpair alloc error."); + + { // Map old and contiguous inputs and outputs + auto var_new = std::vector(Nin); + std::iota(var_new.begin(), var_new.end(), inIdx); + bdd_setpairs(replace_fwd, var_in.data(), var_new.data(), Nin); + bdd_setpairs(replace_in_fwd, var_in.data(), var_new.data(), Nin); + bdd_setpairs(replace_bkwd, var_new.data(), var_in.data(), Nin); + bdd_setpairs(replace_in_bkwd, var_new.data(), var_in.data(), Nin); + + var_new.resize(Nout); + std::iota(var_new.begin(), var_new.end(), outIdx); + bdd_setpairs(replace_fwd, var_out.data(), var_new.data(), Nout); + bdd_setpairs(replace_out_fwd, var_out.data(), var_new.data(), Nout); + bdd_setpairs(replace_bkwd, var_new.data(), var_out.data(), Nout); + bdd_setpairs(replace_out_bkwd, var_new.data(), var_out.data(), Nout); + } + + // Encode states -> binary encoding (gray code for faster encode?) + auto dstEnvs2bdd = do_bin_encode_(N, dstStIdx, Nstvars); + //Source states are only needed once + + // Last bdd is no color + auto color2bdd = std::vector>(Ncolorvars); + for (int i = 0; i < (int) Ncolorvars; ++i) + color2bdd[i] = {bdd_nithvar(colorIdx + i), bdd_ithvar(colorIdx + i)}; + + // There are no colors -> All False + const bdd noColorBdd + = std::accumulate(color2bdd.begin(), color2bdd.end(), + (bdd) bddtrue, + [](const bdd& l, const auto& r) -> bdd + {return l & r[0]; }); + + // Each player state corresponds to a set of (dst_state, colors, outs) + // We also store the "least accepting" color + auto playbdd2st + = std::unordered_map, + bdd_hash>(); + playbdd2st.reserve(N); + + // Encode (in, out, state) and split< + auto invar2bdd = std::vector>(Nin); + for (int i = 0; i < (int) Nin; ++i) + invar2bdd[i] = {bdd_nithvar(inIdx+i), bdd_ithvar(inIdx+i)}; + + enum class ctask{ + PUT = 0, + VISIT, + POP + }; + + // Fwd map complete condition + // We could work with int, the bdd will stay in the graph + auto fwd_comp_repl = std::unordered_map(); + + // Encode a single edge in from aut with the new variables + auto encode_edge = [&](const auto& e) -> bdd + { + // Build color cond + // No color -> No bdd + bdd color_cond = noColorBdd; + if (use_color && e.acc != acc_cond::mark_t{}) + { + color_cond = bddtrue; + + for (unsigned acolor = 0; acolor < Ncolor; ++acolor) + color_cond &= color2bdd[acolor][e.acc.has(acolor)]; + } + // The whole edge; Order the and? N-ary and? + + auto [itc, insc] + = fwd_comp_repl.try_emplace(e.cond, bddfalse); + if (insc) + itc->second = bdd_replace(e.cond, replace_fwd); + return itc->second & color_cond & dstEnvs2bdd[e.dst]; + }; + + auto abstract_traverse + = [](auto& stack, auto&& fput, auto&& fpop, auto&& fvisit) -> void + { + while (not stack.empty()) + { + auto [ct, current] = std::move(stack.back()); + + stack.pop_back(); + + switch (ct) + { + case ctask::PUT: + fput(current); + break; + case ctask::POP: + fpop(current); + break; + case ctask::VISIT: + fvisit(current); + break; + } + } + }; + + auto abstract_put = [](auto& stack, const bdd& ccond, auto&& metaf) + { + for (int polprime : {0, 1}) + { + bdd cprime = polprime == 0 ? bdd_low(ccond) : bdd_high(ccond); + + if (cprime != bddfalse) + { + stack.emplace_back(ctask::POP, metaf(polprime)); + stack.emplace_back(ctask::VISIT, cprime); + stack.emplace_back(ctask::PUT, metaf(polprime)); + } + } + }; + + // Bkwd replace map + auto bkwd_out_repl = std::unordered_map(); + + // Final step construct colors and conditions + // cond is a bdd over color variables and new outputs + auto construct_colorcond + = [&](bdd cond) + { + // We need to do a final traversal of the color + // It is similar to the traversal of the states + + // The result + auto all_comb = std::vector>(); + + // int[2] is relative lvl and polarity + using stack_type = std::variant; + auto stack_colors = std::vector>(); + // Variables that do not appear can take both values + auto current_colors = acc_cond::mark_t{}; + + + auto fputCC = [&](const stack_type& ccond) -> void + { + auto lvl = std::get(ccond); + //if (lvl != Ncolorvars - 1) + // current_colors.set(lvl); // One hot + assert(lvl < Ncolorvars || lvl == -1u); + if (lvl != -1u) + current_colors.set(lvl); // One hot + }; + + auto fpopCC = [&](const stack_type& ccond) -> void + { + auto lvl = std::get(ccond); + //if (lvl != Ncolorvars - 1) + // current_colors.clear(lvl); // One cold + assert(lvl < Ncolorvars || lvl == -1u); + if (lvl != -1u) + current_colors.clear(lvl); // One cold + }; + + auto fvisitCC = [&](const stack_type& ccondin) -> void + { + bdd ccond = std::get(ccondin); + //if (ccond == bddfalse) + // return; // Nothing to do + + int clvl = ccond == bddtrue ? outIdx : bdd_var(ccond); + // We either have a out condition or true + if (clvl >= outIdx) + { + // We have found a new color comb + // Leading to ccond -> add + auto [itc, insc] + = bkwd_out_repl.try_emplace(ccond, bddfalse); + if (insc) + itc->second = bdd_replace(ccond, + replace_out_bkwd); + all_comb.emplace_back(current_colors, + itc->second); + + } + else + { + int rel_lvl = clvl - colorIdx; + // If the no color mark appears -> mark must be empty + auto metaf = [&, ulvl = (unsigned) rel_lvl](int pol) + { + // If the polarity is negative ("one cold") + // Then ignore it + assert(!pol || !current_colors.has(ulvl)); + return pol == 0 ? -1u : ulvl; + }; + abstract_put(stack_colors, ccond, metaf); + } + + }; + + stack_colors.emplace_back(ctask::VISIT, cond); + abstract_traverse(stack_colors, fputCC, fpopCC, fvisitCC); + + return all_comb; + }; + + + // The condition contains variables of dst_state, color x cond + // In a much similar manner we need to traverse the states, as we traversed + // the inputs + // Mapping bdd(color x new outs) -> [mark x old outs] + auto bdd2colorCond + = std::unordered_map>, + bdd_hash>(); + + struct unsigedItDescr { + unsigned val; + std::array canChange; + std::vector idx; + + unsigedItDescr() + : val{0u} + , idx{32, -1u} + { + canChange.fill(true); + } + }; + + auto construct_ply_state + = [&](bdd cond) -> std::pair + { + + // Needed to determine "least" accepting color for this state + // That is the color that we can put on all incoming transitions + auto thiscolor = acc_cond::mark_t{}; + bool has_uncolored = false; + unsigned thisstate = split->new_state(); + + // int[2] is relative lvl and polarity + using stack_type = std::variant>; + auto stack_states = std::vector>(); + auto current_dst_states = unsigedItDescr{}; + + auto fputPlySt + = [¤t_dst_states](const stack_type& ccond) -> void + { + assert((std::holds_alternative>(ccond))); + auto [lvl, pol] = std::get>(ccond); + // Fix the corresponding bit + // Not changeable + current_dst_states.canChange[lvl] = false; + if (pol) + // Set the bit true + current_dst_states.val |= (1u << lvl); + else + // Unset it + current_dst_states.val &= ~(1u << lvl); + }; + + auto fpopPlySt + = [¤t_dst_states](const stack_type& ccond) -> void + { + assert((std::holds_alternative>(ccond))); + // We need to unset the bit and mark it as changeable + auto lvl = std::get>(ccond)[0]; + current_dst_states.val &= ~(1u << lvl); + current_dst_states.canChange[lvl] = true; + }; + + auto fvisitPlySt = [&](const stack_type& ccondin) -> void + { + assert(std::holds_alternative(ccondin)); + const bdd& ccond = std::get(ccondin); + int clvl = ccond == bddtrue ? colorIdx : bdd_var(ccond); + if (clvl >= colorIdx) + { + // We have found a new "cube of states" + // Leading to ccond + auto [it_cc, ins_cc] + = bdd2colorCond.try_emplace(ccond, + std::vector< + std::pair>()); + if (ins_cc) + it_cc->second = construct_colorcond(ccond); + + // Loop over all the states in the "cube" + //auto state_range = bitVectDecodeRange(current_dst_states); + //for (auto it_s = state_range.begin(); (bool) it_s; ++it_s) + // Get all the modifiable idx + current_dst_states.idx.clear(); + for (unsigned i = 0; i < Nstvars; ++i) + if (current_dst_states.canChange[i]) + current_dst_states.idx.push_back(i); + // Loop over them + auto state_range = bitVectDecodeRange(current_dst_states.idx, + current_dst_states.val); + for (auto it_s = state_range.begin(); (bool) it_s; ++it_s) + // Loop over all edges + for (const auto& [acolor, acond] : it_cc->second) + { + split->new_edge(thisstate, *it_s, acond, acolor); + // Update color + thiscolor |= acolor; + has_uncolored |= !acolor; + } + + } + else + { + int rel_lvl = clvl - dstStIdx; + auto metaf = [rel_lvl](int pol) + { + return std::array{rel_lvl, pol}; + }; + abstract_put(stack_states, ccond, metaf); + } + + }; + + stack_states.emplace_back(ctask::VISIT, cond); + abstract_traverse(stack_states, fputPlySt, fpopPlySt, fvisitPlySt); + + // Adjust the color depending on options and acceptance conditions + // Todo: check if dead ends are treated correctly + if (!color_env | has_uncolored) + // Do something smart for TELA? + thiscolor = acc_cond::mark_t({}); + else if (max_par) + thiscolor = + acc_cond::mark_t({thiscolor.min_set()-1}); + else // min_par + thiscolor = + acc_cond::mark_t({thiscolor.max_set()-1}); + + return std::make_pair(thiscolor, thisstate); + }; + + // Fwd map for replacing + // Todo is this a good idea? + auto bkwd_in_repl = std::unordered_map(); + + auto stack_inputs = std::vector>(); + + bdd current_in = bddtrue; + + // Define the abstract traverse + auto fputInTrav = [¤t_in](const bdd& ccond) -> void + { + current_in &= ccond; + }; + + auto fpopInTrav = [¤t_in](const bdd& ccond) -> void + { + // At the end, exist is cheap (I hope) + current_in = bdd_exist(current_in, ccond); + }; + + + + unsigned sink_env = -1u; + + if constexpr (FULLYSYM) + { + // First we need to encode the complete automaton + // Create the symbolic aut + // To avoid reencoding, swap + bddPair* replace_dstSt_srcSt = bdd_newpair(); + { + auto varSrc = std::vector(Nstvars); + auto varDst = std::vector(Nstvars); + std::iota(varSrc.begin(), varSrc.end(), srcStIdx); + std::iota(varDst.begin(), varDst.end(), dstStIdx); + bdd_setpairs(replace_dstSt_srcSt, varDst.data(), + varSrc.data(), Nstvars); + } + auto getSrc = [&](unsigned s) + {return bdd_replace(dstEnvs2bdd[s], replace_dstSt_srcSt); }; + + bdd sym_aut = bddfalse; + for (unsigned s = 0; s < N; ++s) + { + bdd enc_out_s = bddfalse; + for (const auto& e : aut->out(s)) + enc_out_s |= encode_edge(e); + sym_aut |= getSrc(s)&enc_out_s; + } + + // Define how to construct an extended player state + // An extended player is constructing the list + // of (player state, input condition) from a bdd + // containing (in const, dst state, color cond) + // This function needs to traverse the incondition + // put and pop can be reused + auto construct_ext_ply_state + = [&](auto& plystatedict, const bdd& ccond) + { + current_in = bddtrue; + + auto& [plyconddict, plycondvect] = plystatedict; + + auto fvisitInTrav + = [&](const bdd& ccond) -> void + { + + int clvl = bdd_var(ccond); + assert(clvl >= inIdx); + if (clvl >= dstStIdx) // States come after input + { + // We have found a new in cube + // Add to the existing ones if necessary + // Translate to old variables + auto [itc, insc] + = bkwd_in_repl.try_emplace(current_in, bddfalse); + if (insc) + itc->second = bdd_replace(current_in, replace_in_bkwd); + const bdd& current_in_old = itc->second; + + // treat it + auto [it_s, ins_s] + = playbdd2st.try_emplace( + ccond, + std::make_pair(acc_cond::mark_t{}, + -1u)); + if (ins_s) + // A new player state and the corresponding least + // accepting color + it_s->second = construct_ply_state(ccond); + + // Add the condition + auto [it_e, ins_e] + = plyconddict.try_emplace(ccond, -1u); + // Add the input + if (ins_e) + { + it_e->second = plycondvect.size(); + plycondvect.emplace_back(ccond, bddfalse); + } + // The second is the in + plycondvect[it_e->second].second |= current_in_old; + assert(plycondvect[it_e->second].second != bddfalse + && "bddfalse is not allowed as condition"); + } + else + { + auto metaf = [&bddopts = invar2bdd[clvl - inIdx]](int pol) + { + return bddopts[pol]; + }; + abstract_put(stack_inputs, ccond, metaf); + } + }; + + // Do the actual visit + assert(stack_inputs.empty()); + stack_inputs.emplace_back(ctask::VISIT, ccond); + abstract_traverse(stack_inputs, fputInTrav, + fpopInTrav, fvisitInTrav); + }; // construct_ext_ply_state + + // What we want is + // dict[bdd (in, dst, cc) -> dict[ ply state bdd -> input bdd]] + // However this is not possible as it would possibly + // reorder the transitions + // So we need an additional vector and idx only into it + // The vector holds the player state cond + // (same as the key of the unordered_map) + // To ensure efficient iteration + auto ext_ply_dict + = std::unordered_map, + std::vector>>, bdd_hash>(); + // bdd over new variables -> bdd over old variables, player state + + // Vist the src states + using stack_type = std::variant>; + auto stack_states = std::vector>(); + // Variables that do not appear can take both values + auto current_src_states = unsigedItDescr{}; + + auto fputSrcSt + = [¤t_src_states](const stack_type& ccond) -> void + { + assert((std::holds_alternative>(ccond))); + auto [lvl, pol] = std::get>(ccond); + // Fix the corresponding bit + // Not changeable + current_src_states.canChange[lvl] = false; + if (pol) + // Set the bit true + current_src_states.val |= (1u << lvl); + else + // Unset it + current_src_states.val &= ~(1u << lvl); + }; + + auto fpopSrcSt + = [¤t_src_states](const stack_type& ccond) -> void + { + assert((std::holds_alternative>(ccond))); + // We need to unset the bit and mark it as changeable + auto lvl = std::get>(ccond)[0]; + current_src_states.val &= ~(1u << lvl); + current_src_states.canChange[lvl] = true; + }; + + auto fvisitSrcSt = [&](const stack_type& ccondin) -> void + { + assert(std::holds_alternative(ccondin)); + const bdd& ccond = std::get(ccondin); + int clvl = ccond == bddtrue ? inIdx : bdd_var(ccond); + if (clvl >= inIdx) + { + // We have found a new "cube of states" + // Leading to ccond + auto [it_cc, ins_cc] + = ext_ply_dict.try_emplace( + ccond, + decltype(ext_ply_dict)::mapped_type{}); + if (ins_cc) + // Construct "in place" + construct_ext_ply_state(it_cc->second, ccond); + + assert(!it_cc->second.second.empty() + && "Dead ends should not be splitted"); + + // Get all the modifiable idx + current_src_states.idx.clear(); + for (unsigned i = 0; i < Nstvars; ++i) + if (current_src_states.canChange[i]) + current_src_states.idx.push_back(i); + // Loop over them + auto state_range = bitVectDecodeRange(current_src_states.idx, + current_src_states.val); + for (auto it_s = state_range.begin(); (bool) it_s; ++it_s) + // Loop over all edges + for (const auto& [plystcond, incond] : it_cc->second.second) + { + const auto& [acolor, plyst] = playbdd2st[plystcond]; + split->new_edge(*it_s, plyst, incond, acolor); + } + } + else + { + int rel_lvl = clvl - srcStIdx; + auto metaf = [rel_lvl](int pol) + { + return std::array{rel_lvl, pol}; + }; + abstract_put(stack_states, ccond, metaf); + } + + }; + + stack_states.emplace_back(ctask::VISIT, sym_aut); + abstract_traverse(stack_states, fputSrcSt, fpopSrcSt, fvisitSrcSt); + + // Free the pairs + bdd_freepair(replace_dstSt_srcSt); + } + else + { + // If a completion is demanded we might have to create sinks + // Sink controlled by player + unsigned sink_con = -1u; + auto get_sink_con_state = [&split, &sink_con, &sink_env, + um = unsat_mark, hu = has_unsat] + (bool create = true) + { + assert(hu); + if (SPOT_UNLIKELY((sink_con == -1u) && create)) + { + sink_con = split->new_state(); + sink_env = split->new_state(); + split->new_edge(sink_con, sink_env, bddtrue, um); + split->new_edge(sink_env, sink_con, bddtrue, um); + } + return sink_con; + }; + + // envstate -> edge number for current state + auto s_edge_dict = std::unordered_map(); + + for (unsigned s = 0; s < N; ++s) + { + s_edge_dict.clear(); // "Local" dict, outgoing for this state + + // Encode the edge as new bdd over (input, state, color, out) vars + bdd enc_out_s = bddfalse; + for (const auto &e: aut->out(s)) + enc_out_s |= encode_edge(e); // Switch to new ins and outs + + // Can only be false if there is no outgoing edge + // In this case: Nothing to do + assert(enc_out_s != bddfalse + || (!(aut->out(s).begin()))); + + if (enc_out_s == bddfalse) + { + std::cerr << "Dead end state: " << s << '\n'; +#ifndef NDEBUG + print_hoa(std::cerr, aut); +#endif + continue; + } + + // traverse the ins to do the actual split + assert(stack_inputs.empty()); + stack_inputs.emplace_back(ctask::VISIT, enc_out_s); + current_in = bddtrue; + + bdd all_in = bddfalse; // Only needed for completion + + auto fvisitInTravS + = [&](const bdd& ccond) -> void + { + int clvl = bdd_var(ccond); + if (clvl >= dstStIdx) // States come after input + { + // We have found a new in cube + // Add to the existing ones if necessary + // Translate to old variables + auto [itc, insc] + = bkwd_in_repl.try_emplace(current_in, bddfalse); + if (insc) + itc->second = bdd_replace(current_in, replace_in_bkwd); + const bdd& current_in_old = itc->second; + + if (complete_env) + all_in |= current_in_old; + // treat it + auto [it_s, ins_s] + = playbdd2st.try_emplace( + ccond, + std::make_pair(acc_cond::mark_t{}, + -1u)); + if (ins_s) + // A new player state and the corresponding least + // accepting color + it_s->second = construct_ply_state(ccond); + + // Add the condition + auto [it_e, ins_e] + = s_edge_dict.try_emplace(it_s->second.second, -1u); + if (ins_e) // Create a new edge + it_e->second + = split->new_edge(s, it_s->second.second, + current_in_old, + it_s->second.first); + else // Disjunction over input + split->edge_storage(it_e->second).cond + |= current_in_old; + } + else + { + auto metaf = [&bddopts = invar2bdd[clvl - inIdx]](int pol) + { + return bddopts[pol]; + }; + abstract_put(stack_inputs, ccond, metaf); + } + }; + + // Traverse all the ins + abstract_traverse(stack_inputs, fputInTrav, + fpopInTrav, fvisitInTravS); + + // Complete if necessary + if (complete_env && (all_in != bddtrue)) + split->new_edge(s, get_sink_con_state(), bddtrue - all_in); + + } // Current state is now split + } // Else + + split->prop_universal(trival::maybe()); + + // The named property + // compute the owners + // env is equal to false + auto owner = std::vector(split->num_states(), false); + // All "new" states belong to the player + std::fill(owner.begin()+aut->num_states(), owner.end(), true); + // Check if sinks have been created + if (sink_env != -1u) + owner.at(sink_env) = false; + + // !use_color -> all words accepted + // complete_env && sink_env == -1u + // complet. for env demanded but already + // satisfied -> split is also all true + if (complete_env && sink_env == -1u && !use_color) + split->acc() = acc_cond::acc_code::t(); + + set_state_players(split, std::move(owner)); + + // release the variables + // Release the pairs + for (auto pair_ptr : {replace_fwd, + replace_bkwd, + replace_in_fwd, + replace_in_bkwd, + replace_out_fwd, + replace_out_bkwd}) + bdd_freepair(pair_ptr); + aut->get_dict()->unregister_all_my_variables(&N); + + // Done + return split; + } // New split impl + + twa_graph_ptr + split_2step_(const const_twa_graph_ptr& aut, + const bdd& output_bdd, bool complete_env, + synthesis_info::splittype sp + = synthesis_info::splittype::AUTO) + { + // Heuristic for AUTO goes here + // For the moment semisym is almost always best except if there are + // really few inputs + unsigned nIns = aut->ap().size() - bdd_nodecount(output_bdd); + sp = sp == synthesis_info::splittype::AUTO ? + (nIns < 4 ? synthesis_info::splittype::EXPL + : synthesis_info::splittype::SEMISYM) + : sp; + + switch (sp) + { + case (synthesis_info::splittype::EXPL): + return split_2step_expl_impl(aut, output_bdd, complete_env); + case (synthesis_info::splittype::SEMISYM): + return split_2step_sym_impl(aut, output_bdd, complete_env); + case (synthesis_info::splittype::FULLYSYM): + return split_2step_sym_impl(aut, output_bdd, complete_env); + default: + throw std::runtime_error("split_2step_(): " + "Expected explicit splittype."); + } + } + + } // End anonymous + + + twa_graph_ptr + split_2step(const const_twa_graph_ptr& aut, + const bdd& output_bdd, bool complete_env, + synthesis_info::splittype sp) + { + return split_2step_(aut, output_bdd, complete_env, sp); } twa_graph_ptr - split_2step(const const_twa_graph_ptr& aut, bool complete_env) + split_2step(const const_twa_graph_ptr& aut, bool complete_env, + synthesis_info::splittype sp) { - return split_2step(aut, - get_synthesis_outputs(aut), - complete_env); + return split_2step_(aut, + get_synthesis_outputs(aut), + complete_env, sp); + } + + twa_graph_ptr + split_2step(const const_twa_graph_ptr& aut, + synthesis_info& gi) + { + return split_2step_(aut, + get_synthesis_outputs(aut), + true, + gi.sp); } twa_graph_ptr @@ -906,6 +1861,12 @@ namespace spot twa_graph_ptr dpa = nullptr; + auto set_split = [&outs, &gi](auto& g) + { + set_synthesis_outputs(g, outs); + return split_2step(g, gi); + }; + switch (gi.s) { case algo::DET_SPLIT: @@ -927,7 +1888,8 @@ namespace spot << bv->paritize_time << " seconds\n"; if (bv) sw.start(); - dpa = split_2step(tmp, outs, true); + + dpa = set_split(tmp); if (bv) bv->split_time += sw.stop(); if (vs) @@ -949,7 +1911,7 @@ namespace spot << " states\n"; if (bv) sw.start(); - dpa = split_2step(aut, outs, true); + dpa = set_split(aut); if (bv) bv->split_time += sw.stop(); if (vs) @@ -961,7 +1923,7 @@ namespace spot case algo::SPLIT_DET: { sw.start(); - auto split = split_2step(aut, outs, true); + auto split = set_split(aut); if (bv) bv->split_time += sw.stop(); if (vs) @@ -1020,7 +1982,7 @@ namespace spot if (bv) sw.start(); - dpa = split_2step(dpa, outs, true); + dpa = set_split(dpa); if (bv) bv->split_time += sw.stop(); if (vs) @@ -1250,7 +2212,8 @@ namespace spot }; auto ret_sol_exists = - [&vs, &want_strategy, &tmp, &dict](twa_graph_ptr strat) + [&vs, &want_strategy, &tmp, &dict, &output_aps] + (twa_graph_ptr strat) { dict->unregister_all_my_variables(&tmp); if (vs) @@ -1265,7 +2228,17 @@ namespace spot } } if (strat) - strat->merge_edges(); + { + strat->merge_edges(); + bdd outputs = bddtrue; + std::for_each( + output_aps.begin(), + output_aps.end(), + [&strat, &outputs](const std::string& ap) -> void + { outputs &= bdd_ithvar(strat->register_ap(ap)); }); + + set_synthesis_outputs(strat, outputs); + } return mealy_like{ mealy_like::realizability_code::REALIZABLE_REGULAR, strat, @@ -1544,7 +2517,8 @@ namespace // anonymous for subsformula todo.pop(); formula current_form = assumptions[current_index]; done[current_index] = true; - auto [ins_current, outs_current] = form2props.aps_of(current_form); + auto [ins_current, outs_current] + = form2props.aps_of(current_form); result.first.insert(ins_current.begin(), ins_current.end()); result.second.insert(outs_current.begin(), outs_current.end()); for (unsigned i = 0; i < ass_size; ++i) @@ -1552,7 +2526,8 @@ namespace // anonymous for subsformula if (done[i]) continue; auto other_form = assumptions[i]; - auto [ins_other, outs_other] = form2props.aps_of(other_form); + auto [ins_other, outs_other] + = form2props.aps_of(other_form); if (are_intersecting(ins_current, ins_other) || are_intersecting(outs_other, outs_other)) todo.push(i); @@ -1587,7 +2562,8 @@ namespace // anonymous for subsformula // We merge two assumpt or guar. that share a proposition from decRelProps std::vector assumptions_split, guarantees_split; - auto fus = [&](std::vector &forms, std::vector &res) + auto fus = [&](std::vector &forms, + std::vector &res) { std::stack todo; todo.emplace(0); diff --git a/spot/twaalgos/synthesis.hh b/spot/twaalgos/synthesis.hh index 3d25441e9..2c5bdff1b 100644 --- a/spot/twaalgos/synthesis.hh +++ b/spot/twaalgos/synthesis.hh @@ -25,6 +25,71 @@ namespace spot { + + /// \ingroup synthesis + /// \brief Benchmarking data and options for synthesis + struct SPOT_API synthesis_info + { + enum class algo + { + DET_SPLIT=0, + SPLIT_DET, + DPA_SPLIT, + LAR, + LAR_OLD, + ACD, + }; + + enum class splittype + { + AUTO=0, // Uses a heuristic to choose + EXPL, // Explicit enumerations of inputs + SEMISYM, // Works on one bdd per env state + FULLYSYM // Works on a fully symbolic version of the automaton + }; + + struct bench_var + { + double total_time = 0.0; + double trans_time = 0.0; + double split_time = 0.0; + double paritize_time = 0.0; + double solve_time = 0.0; + double strat2aut_time = 0.0; + double simplify_strat_time = 0.0; + double aig_time = 0.0; + unsigned nb_states_arena = 0; + unsigned nb_states_arena_env = 0; + unsigned nb_strat_states = 0; + unsigned nb_strat_edges = 0; + unsigned nb_simpl_strat_states = 0; + unsigned nb_simpl_strat_edges = 0; + unsigned nb_latches = 0; + unsigned nb_gates = 0; + bool realizable = false; + }; + + synthesis_info() + : force_sbacc{false}, + s{algo::LAR}, + minimize_lvl{2}, + sp{splittype::AUTO}, + bv{}, + verbose_stream{nullptr}, + dict(make_bdd_dict()) + { + } + + bool force_sbacc; + algo s; + int minimize_lvl; + splittype sp; + std::optional bv; + std::ostream* verbose_stream; + option_map opt; + bdd_dict_ptr dict; + }; + /// \addtogroup synthesis Reactive Synthesis /// \ingroup twa_algorithms @@ -51,18 +116,33 @@ namespace spot /// are treated as inputs /// \param complete_env Whether the automaton should be complete for the /// environment, i.e. the player of inputs + /// \param sp Defines which splitting algo to use /// \note This function also computes the state players /// \note If the automaton is to be completed, sink states will /// be added for both env and player if necessary SPOT_API twa_graph_ptr split_2step(const const_twa_graph_ptr& aut, - const bdd& output_bdd, bool complete_env = true); + const bdd& output_bdd, bool complete_env = true, + synthesis_info::splittype sp + = synthesis_info::splittype::AUTO); /// \ingroup synthesis /// \brief Like split_2step but relying on the named property /// 'synthesis-outputs' SPOT_API twa_graph_ptr - split_2step(const const_twa_graph_ptr& aut, bool complete_env = true); + split_2step(const const_twa_graph_ptr& aut, bool complete_env = true, + synthesis_info::splittype sp + = synthesis_info::splittype::AUTO); + + /// \ingroup synthesis + /// \brief Like split_2step but allows to fine-tune the splitting + /// via the options set in \a gi, always completes the + /// environment states and relies on the named property to + /// extract the output proposition + SPOT_API twa_graph_ptr + split_2step(const const_twa_graph_ptr& aut, + synthesis_info& gi); + /// \ingroup synthesis /// \brief the inverse of split_2step @@ -75,60 +155,6 @@ namespace spot SPOT_API twa_graph_ptr unsplit_2step(const const_twa_graph_ptr& aut); - /// \ingroup synthesis - /// \brief Benchmarking data and options for synthesis - struct SPOT_API synthesis_info - { - enum class algo - { - DET_SPLIT=0, - SPLIT_DET, - DPA_SPLIT, - LAR, - LAR_OLD, - ACD, - }; - - struct bench_var - { - double total_time = 0.0; - double trans_time = 0.0; - double split_time = 0.0; - double paritize_time = 0.0; - double solve_time = 0.0; - double strat2aut_time = 0.0; - double simplify_strat_time = 0.0; - double aig_time = 0.0; - unsigned nb_states_arena = 0; - unsigned nb_states_arena_env = 0; - unsigned nb_strat_states = 0; - unsigned nb_strat_edges = 0; - unsigned nb_simpl_strat_states = 0; - unsigned nb_simpl_strat_edges = 0; - unsigned nb_latches = 0; - unsigned nb_gates = 0; - bool realizable = false; - }; - - synthesis_info() - : force_sbacc{false}, - s{algo::LAR}, - minimize_lvl{2}, - bv{}, - verbose_stream{nullptr}, - dict(make_bdd_dict()) - { - } - - bool force_sbacc; - algo s; - int minimize_lvl; - std::optional bv; - std::ostream* verbose_stream; - option_map opt; - bdd_dict_ptr dict; - }; - /// \ingroup synthesis /// \brief Stream algo SPOT_API std::ostream& diff --git a/tests/core/gamehoa.test b/tests/core/gamehoa.test index f50602e34..6135cd438 100755 --- a/tests/core/gamehoa.test +++ b/tests/core/gamehoa.test @@ -21,11 +21,13 @@ set -x -ltlsynt --ins=a,c --outs=b -f 'GF(a <-> XXXc) <-> GFb' --print-game-hoa >out +ltlsynt --ins=a,c --outs=b -f 'GF(a <-> XXXc) <-> GFb' --print-game-hoa \ +--splittype=expl >out grep spot-state-player: out autfilt out >out2 diff out out2 -ltlsynt --ins=a,c --outs=b -f 'GF(a <-> XXXc) <-> GFb' --print-game-hoa=l >out3 +ltlsynt --ins=a,c --outs=b -f 'GF(a <-> XXXc) <-> GFb' --print-game-hoa=l \ +--splittype=expl >out3 test 1 = `wc -l < out3` cmp out out3 && exit 1 autfilt out3 >out2 diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 1e0397a5f..77f661c40 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -484,10 +484,22 @@ i3 i3 o0 o0 o1 o1 EOF -ltlsynt -f "G((i0 && i1)<->X(o0)) && G((i2|i3)<->X(o1))" --outs="o0,o1"\ - --aiger=isop+ud --algo=lar --decompose=no --simpl=no >out +ltlsynt -f "G((i0 && i1)<->X(o0)) && G((i2|i3)<->X(o1))" --outs="o0,o1" \ + --aiger=isop+ud --algo=lar --decompose=no --simpl=no \ + --splittype=expl >out diff out exp +for splitt in expl semisym fullysym auto +do + res=$(ltlsynt -f "G((i0 && i1)<->X(o0)) && G((i2|i3)<->X(o1))" \ + --outs="o0,o1" --aiger=isop+ud --algo=lar --decompose=no \ + --simpl=no --splittype="$splitt" --realizability) + if [[ "$res" != "REALIZABLE" ]]; then + echo "Expected realizable" + fi +done + + cat >exp <X(o0)) && G((i2|i3)<->X(o1))" --outs="o0,o1"\ - --aiger=isop --algo=lar --decompose=no --simpl=no >out + --aiger=isop --algo=lar --decompose=no --simpl=no \ + --splittype=expl >out diff out exp @@ -590,10 +603,10 @@ o0 o0 o1 o1 EOF ltlsynt -f "G((i0 && i1)<->X(o0)) && G((i2|i3)<->X(o1))" --outs="o0,o1"\ - --aiger=isop+ud --algo=lar --decompose=yes --simpl=no >out + --aiger=isop+ud --algo=lar --decompose=yes --simpl=no --splittype=expl >out diff out exp ltlsynt -f "G((i0 && i1)<->X(o0)) && G((i2|i3)<->X(o1))" --outs="o0,o1"\ - --aiger=isop+ud --algo=lar --simpl=no >out + --aiger=isop+ud --algo=lar --simpl=no --splittype=expl >out diff out exp # Issue #477 diff --git a/tests/python/_partitioned_relabel.ipynb b/tests/python/_partitioned_relabel.ipynb index 446e18a68..a4d6a3f57 100644 --- a/tests/python/_partitioned_relabel.ipynb +++ b/tests/python/_partitioned_relabel.ipynb @@ -203,7 +203,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f483c46fc00> >" + " *' at 0x7f0fd837fe10> >" ] }, "execution_count": 2, @@ -411,7 +411,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f483c46fc00> >" + " *' at 0x7f0fd837fe10> >" ] }, "execution_count": 3, @@ -597,7 +597,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f483c46fc00> >" + " *' at 0x7f0fd837fe10> >" ] }, "execution_count": 4, @@ -782,7 +782,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f483c46dfb0> >" + " *' at 0x7f0fd8394990> >" ] }, "metadata": {}, @@ -1052,7 +1052,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f483c46dfb0> >" + " *' at 0x7f0fd8394990> >" ] }, "execution_count": 5, @@ -1326,7 +1326,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f483c46dfb0> >" + " *' at 0x7f0fd8394990> >" ] }, "execution_count": 6, @@ -1537,7 +1537,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f483c46e9d0> >" + " *' at 0x7f0fd83943c0> >" ] }, "metadata": {}, @@ -1760,7 +1760,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f483c46e9d0> >" + " *' at 0x7f0fd83943c0> >" ] }, "execution_count": 7, @@ -2023,7 +2023,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f483c46e9d0> >" + " *' at 0x7f0fd83943c0> >" ] }, "execution_count": 8, @@ -2055,7 +2055,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 14, "id": "296a93d3", "metadata": {}, "outputs": [ @@ -2304,7 +2304,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f483c46f4b0> >" + " *' at 0x7f0fd8395590> >" ] }, "metadata": {}, @@ -2718,7 +2718,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f483c46eca0> >" + " *' at 0x7f0fd837fcf0> >" ] }, "metadata": {}, @@ -3312,7 +3312,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f483c46eca0> >" + " *' at 0x7f0fd837fcf0> >" ] }, "metadata": {}, @@ -3346,7 +3346,7 @@ "display(aut)\n", "\n", "# Convert to split mealy machine\n", - "auts = spot.split_2step(aut)\n", + "auts = spot.split_2step(aut, True, spot.synthesis_info.splittype_EXPL)\n", "print(auts.to_str(\"hoa\"))\n", "display(auts)\n", "\n", @@ -3951,7 +3951,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f483c46eca0> >" + " *' at 0x7f0fd8396b20> >" ] }, "metadata": {}, @@ -4020,7 +4020,7 @@ " for relabel_player in [True, False]:\n", " for split_env in [True, False]:\n", " for split_player in [True, False]:\n", - " auts = spot.split_2step(aut)\n", + " auts = spot.split_2step(aut, True, spot.synthesis_info.splittype_EXPL)\n", " rel_dicts = spot.partitioned_game_relabel_here(auts, relabel_env, relabel_player, split_env, split_player, 10000, 10000)\n", " spot.relabel_game_here(auts, rel_dicts)\n", " print(spot.are_equivalent(aut, spot.unsplit_2step(auts)))" @@ -4051,7 +4051,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.6" } }, "nbformat": 4, diff --git a/tests/python/_synthesis.ipynb b/tests/python/_synthesis.ipynb index 1520b0c2f..6623b5820 100644 --- a/tests/python/_synthesis.ipynb +++ b/tests/python/_synthesis.ipynb @@ -48,6 +48,7 @@ "source": [ "si = spot.synthesis_info()\n", "si.s = spot.synthesis_info.algo_LAR # Use LAR algorithm\n", + "si.sp = spot.synthesis_info.splittype_EXPL\n", "game = spot.ltl_to_game(\"G((F(i0) && F(i1))->(G(i1<->(X(o0)))))\", [\"o0\"], si)\n", "spot.solve_game(game)" ] @@ -787,7 +788,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a8777750> >" + " *' at 0x75e94c292280> >" ] }, "execution_count": 8, @@ -930,7 +931,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a8777750> >" + " *' at 0x75e94c292280> >" ] }, "execution_count": 9, @@ -1149,7 +1150,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a8777660> >" + " *' at 0x75e94c290c30> >" ] }, "execution_count": 10, @@ -1158,7 +1159,7 @@ } ], "source": [ - "a_s = spot.split_2step(a, True)\n", + "a_s = spot.split_2step(a, True, spot.synthesis_info.splittype_EXPL)\n", "print(a.acc())\n", "a_s" ] @@ -1322,7 +1323,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a8774840> >" + " *' at 0x75e94c293a20> >" ] }, "execution_count": 11, @@ -1618,7 +1619,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a8774930> >" + " *' at 0x75e94fe9f090> >" ] }, "execution_count": 12, @@ -1627,7 +1628,7 @@ } ], "source": [ - "a_snc = spot.split_2step(a, False)\n", + "a_snc = spot.split_2step(a, False, spot.synthesis_info.splittype_EXPL)\n", "print(a_snc.acc())\n", "a_snc" ] @@ -2006,7 +2007,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a8776f10> >" + " *' at 0x75e94c292880> >" ] }, "execution_count": 13, @@ -2015,7 +2016,7 @@ } ], "source": [ - "a_s = spot.split_2step(a, True)\n", + "a_s = spot.split_2step(a, True, spot.synthesis_info.splittype_EXPL)\n", "print(a_s.acc())\n", "a_s" ] @@ -2289,7 +2290,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a9ff0db0> >" + " *' at 0x75e94c2910b0> >" ] }, "execution_count": 14, @@ -2531,7 +2532,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a9ff0db0> >" + " *' at 0x75e94c2910b0> >" ] }, "execution_count": 15, @@ -2803,7 +2804,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a8775800> >" + " *' at 0x75e94c293720> >" ] }, "execution_count": 16, @@ -3046,7 +3047,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a8775800> >" + " *' at 0x75e94c293720> >" ] }, "execution_count": 17, @@ -3757,7 +3758,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a876e310> >" + " *' at 0x75e94c290600> >" ] }, "execution_count": 18, @@ -4015,7 +4016,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a876e310> >" + " *' at 0x75e94c290600> >" ] }, "execution_count": 19, @@ -4298,7 +4299,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a8776850> >" + " *' at 0x75e94c290f90> >" ] }, "metadata": {}, @@ -6066,7 +6067,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a9ff1350> >" + " *' at 0x75e94c2901b0> >" ] }, "execution_count": 20, @@ -6101,7 +6102,7 @@ "\n", "display(aut)\n", "\n", - "aut = spot.split_2step(aut, x, False)\n", + "aut = spot.split_2step(aut, x, False, spot.synthesis_info.splittype_EXPL)\n", "\n", "display(aut.show_storage())\n", "aut" @@ -6975,7 +6976,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a9ff1350> >" + " *' at 0x75e94c2901b0> >" ] }, "execution_count": 21, @@ -7219,7 +7220,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a8776970> >" + " *' at 0x75e94c290ea0> >" ] }, "metadata": {}, @@ -8107,7 +8108,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a8775620> >" + " *' at 0x75e94c291b60> >" ] }, "execution_count": 22, @@ -8141,7 +8142,7 @@ "\n", "display(aut)\n", "\n", - "aut = spot.split_2step(aut, x, False)\n", + "aut = spot.split_2step(aut, x, False, spot.synthesis_info.splittype_EXPL)\n", "\n", "display(aut.show_storage())\n", "aut" @@ -8529,7 +8530,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a8775620> >" + " *' at 0x75e94c291b60> >" ] }, "execution_count": 23, @@ -8679,7 +8680,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a876e100> >" + " *' at 0x75e94c291ad0> >" ] }, "metadata": {}, @@ -8858,7 +8859,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a876e100> >" + " *' at 0x75e94c291ad0> >" ] }, "metadata": {}, @@ -9062,7 +9063,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a876f570> >" + " *' at 0x75e94c27bde0> >" ] }, "execution_count": 25, @@ -9072,6 +9073,7 @@ ], "source": [ "si = spot.synthesis_info()\n", + "si.sp = spot.synthesis_info.splittype_EXPL\n", "\n", "aut = spot.ltl_to_game(\"(a|b|c|d)->x\", [\"x\"], si)\n", "aut" @@ -9268,7 +9270,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a876fae0> >" + " *' at 0x75e94c290210> >" ] }, "execution_count": 27, @@ -9582,7 +9584,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f08a876f2a0> >" + " *' at 0x75e94c292b80> >" ] }, "execution_count": 28, @@ -9594,14 +9596,6 @@ "aig = spot.mealy_machine_to_aig(ctrl, \"ite\")\n", "aig" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "eb81b7d3", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { @@ -9620,7 +9614,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.6" } }, "nbformat": 4, diff --git a/tests/python/game.py b/tests/python/game.py index e0d880647..3462cc7d2 100644 --- a/tests/python/game.py +++ b/tests/python/game.py @@ -282,8 +282,8 @@ spot.change_parity_here(gdpa, spot.parity_kind_max, spot.parity_style_odd) gsdpa = spot.split_2step(gdpa, b, True) spot.colorize_parity_here(gsdpa, True) tc.assertTrue(spot.solve_parity_game(gsdpa)) -tc.assertEqual(spot.highlight_strategy(gsdpa).to_str("HOA", "1.1"), -"""HOA: v1.1 +gsdpa_solved_ref = spot.automaton( + """HOA: v1.1 States: 18 Start: 0 AP: 2 "a" "b" @@ -292,7 +292,7 @@ Acceptance: 5 Fin(4) & (Inf(3) | (Fin(2) & (Inf(1) | Fin(0)))) properties: trans-labels explicit-labels trans-acc colored complete properties: deterministic spot.highlight.states: 0 4 1 4 2 4 3 4 4 4 5 4 6 4 7 4 8 4 9 4 """ -+"""10 4 11 4 12 4 13 4 14 4 15 4 16 4 17 4 + +"""10 4 11 4 12 4 13 4 14 4 15 4 16 4 17 4 spot.highlight.edges: 15 4 17 4 20 4 22 4 24 4 26 4 28 4 30 4 31 4 32 4 33 4 spot.state-player: 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 controllable-AP: 1 @@ -350,6 +350,23 @@ State: 17 [t] 4 {4} --END--""" ) +tc.assertTrue(spot.solve_parity_game(gsdpa_solved_ref)) + +# Check for the same language +tc.assertTrue(spot.are_equivalent(gsdpa, gsdpa_solved_ref)) +# Check if the winning regions are the same for env states +# Env states should by construction have the same number as before +players_new = spot.get_state_players(gsdpa) +players_ref = spot.get_state_players(gsdpa_solved_ref) +# States maybe renumbered, but remain in the same "class" +tc.assertEqual(players_new, players_ref) +# Check that env states have the same winner +winners_new = spot.get_state_winners(gsdpa) +winners_ref = spot.get_state_winners(gsdpa_solved_ref) + +tc.assertTrue(all([wn == wr for (wn, wr, p) in + zip(winners_new, winners_ref, players_ref) + if not p])) # Test the different parity conditions gdpa = spot.tgba_determinize(spot.degeneralize_tba(g), diff --git a/tests/python/split.py b/tests/python/split.py index a953b82e2..de05e070b 100644 --- a/tests/python/split.py +++ b/tests/python/split.py @@ -135,3 +135,169 @@ aut, s = do_split('((G (((! g_0) || (! g_1)) && ((r_0 && (X r_1)) -> (F (g_0 \ && g_1))))) && (G (r_0 -> F g_0))) && (G (r_1 -> F g_1))', ['g_0', 'g_1']) tc.assertTrue(equiv(aut, spot.unsplit_2step(s))) + + +# check equivalence of split automata +# for the different methods for certain cases +autstr = """HOA: v1 +name: "r2b_ack0 | F((!b2r_req0 & Xr2b_ack0) | (b2r_req0 & XG!r2b_ack0)) \ +| (!b2r_req0 & G(!r2b_ack0 | ((!b2r_req0 | !b2r_req1) & X!b2r_req0 \ +& (!(s2b_req0 | s2b_req1) | XF(b2r_req0 | b2r_req1)) & (!b2r_req0 \ +| X(b2r_req0 | (b2r_req1 M !b2r_req0))) & (!b2r_req0 | r2b_ack0 \ +| Xb2r_req0))))" +States: 12 +Start: 7 +AP: 5 "r2b_ack0" "b2r_req0" "b2r_req1" "s2b_req0" "s2b_req1" +controllable-AP: 2 1 +acc-name: parity max even 4 +Acceptance: 4 Fin(3) & (Inf(2) | (Fin(1) & Inf(0))) +properties: trans-labels explicit-labels trans-acc colored complete +properties: deterministic +--BODY-- +State: 0 +[!0&!1] 0 {2} +[0] 1 {1} +[!0&1] 3 {2} +State: 1 +[t] 1 {2} +State: 2 +[!0&1] 2 {2} +[0&1] 2 {3} +[!0&!1] 4 {2} +[0&!1] 5 {3} +State: 3 +[!0&!1] 0 {2} +[0&1&2] 2 {1} +[!0&1] 3 {2} +[0&!1&3 | 0&!1&4] 6 {2} +[0&!1&!3&!4] 7 {2} +[0&1&!2] 8 {2} +State: 4 +[0] 1 {1} +[!0&1] 2 {2} +[!0&!1] 4 {2} +State: 5 +[0] 1 {1} +[!0&1] 2 {1} +[!0&!1] 5 {1} +State: 6 +[!0&!1&2] 0 {2} +[0] 1 {1} +[!0&1] 2 {1} +[!0&!1&!2] 9 {1} +State: 7 +[!0&!1] 0 {2} +[0] 1 {1} +[!0&1] 2 {1} +State: 8 +[!0&!1&2] 0 {2} +[1] 2 {1} +[0&!1&2&3 | 0&!1&2&4] 6 {2} +[0&!1&2&!3&!4] 7 {2} +[!0&!1&!2] 10 {1} +[0&!1&!2] 11 {1} +State: 9 +[!0&!1&2] 0 {2} +[0] 1 {1} +[!0&1] 3 {2} +[!0&!1&!2] 9 {1} +State: 10 +[!0&!1&2] 0 {2} +[0] 1 {1} +[!0&1] 2 {1} +[!0&!1&!2] 10 {2} +State: 11 +[!0&!1&2] 0 {2} +[0] 1 {1} +[!0&1] 2 {1} +[!0&!1&!2] 11 {1} +--END-- +HOA: v1 +States: 2 +Start: 0 +AP: 15 "u0room29light0f1dturn2off1b" "u0room29light0f1dturn2on1b" \ + "p0b0room29window29opened" "u0room29light0f1dtoggle1b" \ + "p0b0room29window29closed" "p0p0all2windows2closed0room" \ + "u0system29start2new2timer0f1dmin25231b" \ + "u0system29start2new2timer0f1dhour241b" \ + "u0room29warnlight29control0room29warnlight29control" \ + "u0system29start2new2timer0system29start2new2timer" \ + "u0room29warnlight29control0f1dturn2on1b" \ + "u0room29warnlight29control0f1dturn2off1b" "u0room29light0room29light" \ + "u0system29start2new2timer0f1dhour251b" "p0b0timeout" +acc-name: all +Acceptance: 0 t +properties: trans-labels explicit-labels state-acc deterministic +controllable-AP: 0 1 3 6 7 8 9 10 11 12 13 +--BODY-- +State: 0 +[!0&!1&!3&!6&!7&!8&!9&!10&11&12&13 | !0&!1&!3&!6&!7&!8&!9&10&!11&12&13 \ +| !0&!1&!3&!6&!7&!8&9&!10&11&12&!13 | !0&!1&!3&!6&!7&!8&9&10&!11&12&!13 \ +| !0&!1&!3&!6&!7&8&!9&!10&!11&12&13 | !0&!1&!3&!6&!7&8&9&!10&!11&12&!13 \ +| !0&!1&!3&!6&7&!8&!9&!10&11&12&!13 | !0&!1&!3&!6&7&!8&!9&10&!11&12&!13 \ +| !0&!1&!3&!6&7&8&!9&!10&!11&12&!13 | !0&!1&!3&6&!7&!8&!9&!10&11&12&!13 \ +| !0&!1&!3&6&!7&!8&!9&10&!11&12&!13 | !0&!1&!3&6&!7&8&!9&!10&!11&12&!13 \ +| !0&!1&3&!6&!7&!8&!9&!10&11&!12&13 | !0&!1&3&!6&!7&!8&!9&10&!11&!12&13 \ +| !0&!1&3&!6&!7&!8&9&!10&11&!12&!13 | !0&!1&3&!6&!7&!8&9&10&!11&!12&!13 \ +| !0&!1&3&!6&!7&8&!9&!10&!11&!12&13 | !0&!1&3&!6&!7&8&9&!10&!11&!12&!13 \ +| !0&!1&3&!6&7&!8&!9&!10&11&!12&!13 | !0&!1&3&!6&7&!8&!9&10&!11&!12&!13 \ +| !0&!1&3&!6&7&8&!9&!10&!11&!12&!13 | !0&!1&3&6&!7&!8&!9&!10&11&!12&!13 \ +| !0&!1&3&6&!7&!8&!9&10&!11&!12&!13 | !0&!1&3&6&!7&8&!9&!10&!11&!12&!13 \ +| !0&1&!3&!6&!7&!8&!9&!10&11&!12&13 | !0&1&!3&!6&!7&!8&!9&10&!11&!12&13 \ +| !0&1&!3&!6&!7&!8&9&!10&11&!12&!13 | !0&1&!3&!6&!7&!8&9&10&!11&!12&!13 \ +| !0&1&!3&!6&!7&8&!9&!10&!11&!12&13 | !0&1&!3&!6&!7&8&9&!10&!11&!12&!13 \ +| !0&1&!3&!6&7&!8&!9&!10&11&!12&!13 | !0&1&!3&!6&7&!8&!9&10&!11&!12&!13 \ +| !0&1&!3&!6&7&8&!9&!10&!11&!12&!13 | !0&1&!3&6&!7&!8&!9&!10&11&!12&!13 \ +| !0&1&!3&6&!7&!8&!9&10&!11&!12&!13 | !0&1&!3&6&!7&8&!9&!10&!11&!12&!13 \ +| 0&!1&!3&!6&!7&!8&!9&!10&11&!12&13 | 0&!1&!3&!6&!7&!8&!9&10&!11&!12&13 \ +| 0&!1&!3&!6&!7&!8&9&!10&11&!12&!13 | 0&!1&!3&!6&!7&!8&9&10&!11&!12&!13 \ +| 0&!1&!3&!6&!7&8&!9&!10&!11&!12&13 | 0&!1&!3&!6&!7&8&9&!10&!11&!12&!13 \ +| 0&!1&!3&!6&7&!8&!9&!10&11&!12&!13 | 0&!1&!3&!6&7&!8&!9&10&!11&!12&!13 \ +| 0&!1&!3&!6&7&8&!9&!10&!11&!12&!13 | 0&!1&!3&6&!7&!8&!9&!10&11&!12&!13 \ +| 0&!1&!3&6&!7&!8&!9&10&!11&!12&!13 | 0&!1&!3&6&!7&8&!9&!10&!11&!12&!13] 1 +State: 1 +[!0&!1&!3&!6&!7&!8&!9&!10&11&12&13 | !0&!1&!3&!6&!7&!8&!9&10&!11&12&13 \ +| !0&!1&!3&!6&!7&!8&9&!10&11&12&!13 | !0&!1&!3&!6&!7&!8&9&10&!11&12&!13 \ +| !0&!1&!3&!6&!7&8&!9&!10&!11&12&13 | !0&!1&!3&!6&!7&8&9&!10&!11&12&!13 \ +| !0&!1&!3&!6&7&!8&!9&!10&11&12&!13 | !0&!1&!3&!6&7&!8&!9&10&!11&12&!13 \ +| !0&!1&!3&!6&7&8&!9&!10&!11&12&!13 | !0&!1&!3&6&!7&!8&!9&!10&11&12&!13 \ +| !0&!1&!3&6&!7&!8&!9&10&!11&12&!13 | !0&!1&!3&6&!7&8&!9&!10&!11&12&!13 \ +| !0&!1&3&!6&!7&!8&!9&!10&11&!12&13 | !0&!1&3&!6&!7&!8&!9&10&!11&!12&13 \ +| !0&!1&3&!6&!7&!8&9&!10&11&!12&!13 | !0&!1&3&!6&!7&!8&9&10&!11&!12&!13 \ +| !0&!1&3&!6&!7&8&!9&!10&!11&!12&13 | !0&!1&3&!6&!7&8&9&!10&!11&!12&!13 \ +| !0&!1&3&!6&7&!8&!9&!10&11&!12&!13 | !0&!1&3&!6&7&!8&!9&10&!11&!12&!13 \ +| !0&!1&3&!6&7&8&!9&!10&!11&!12&!13 | !0&!1&3&6&!7&!8&!9&!10&11&!12&!13 \ +| !0&!1&3&6&!7&!8&!9&10&!11&!12&!13 | !0&!1&3&6&!7&8&!9&!10&!11&!12&!13 \ +| !0&1&!3&!6&!7&!8&!9&!10&11&!12&13 | !0&1&!3&!6&!7&!8&!9&10&!11&!12&13 \ +| !0&1&!3&!6&!7&!8&9&!10&11&!12&!13 | !0&1&!3&!6&!7&!8&9&10&!11&!12&!13 \ +| !0&1&!3&!6&!7&8&!9&!10&!11&!12&13 | !0&1&!3&!6&!7&8&9&!10&!11&!12&!13 \ +| !0&1&!3&!6&7&!8&!9&!10&11&!12&!13 | !0&1&!3&!6&7&!8&!9&10&!11&!12&!13 \ +| !0&1&!3&!6&7&8&!9&!10&!11&!12&!13 | !0&1&!3&6&!7&!8&!9&!10&11&!12&!13 \ +| !0&1&!3&6&!7&!8&!9&10&!11&!12&!13 | !0&1&!3&6&!7&8&!9&!10&!11&!12&!13 \ +| 0&!1&!3&!6&!7&!8&!9&!10&11&!12&13 | 0&!1&!3&!6&!7&!8&!9&10&!11&!12&13 \ +| 0&!1&!3&!6&!7&!8&9&!10&11&!12&!13 | 0&!1&!3&!6&!7&!8&9&10&!11&!12&!13 \ +| 0&!1&!3&!6&!7&8&!9&!10&!11&!12&13 | 0&!1&!3&!6&!7&8&9&!10&!11&!12&!13 \ +| 0&!1&!3&!6&7&!8&!9&!10&11&!12&!13 | 0&!1&!3&!6&7&!8&!9&10&!11&!12&!13 \ +| 0&!1&!3&!6&7&8&!9&!10&!11&!12&!13 | 0&!1&!3&6&!7&!8&!9&!10&11&!12&!13 \ +| 0&!1&!3&6&!7&!8&!9&10&!11&!12&!13 | 0&!1&!3&6&!7&8&!9&!10&!11&!12&!13] 1 +--END-- +""" + +for autus in spot.automata(autstr): + si = spot.synthesis_info() + all_split = [] + for sp in [spot.synthesis_info.splittype_EXPL, + spot.synthesis_info.splittype_SEMISYM, + spot.synthesis_info.splittype_FULLYSYM, + spot.synthesis_info.splittype_AUTO + ]: + all_split.append(spot.split_2step(autus, si)) + for i in range(len(all_split)): + for j in range(i+1, len(all_split)): + tc.assertTrue(spot.are_equivalent(all_split[i], all_split[j])) + + +del autus +del si +del all_split +gcollect() diff --git a/tests/python/synthesis.py b/tests/python/synthesis.py index 991e1cbf4..0f2aa1ba1 100644 --- a/tests/python/synthesis.py +++ b/tests/python/synthesis.py @@ -29,8 +29,7 @@ for i in range(0, 2): tc.assertFalse(spot.solve_game(game)) # A game can have only inputs -game = spot.ltl_to_game("GFa", []) -tc.assertEqual(game.to_str(), """HOA: v1 +game_ref = spot.automaton("""HOA: v1 States: 3 Start: 0 AP: 1 "a" @@ -49,3 +48,10 @@ State: 1 State: 2 [t] 0 {0} --END--""") + +gi = spot.synthesis_info() +gi.dict = game_ref.get_dict() + +game = spot.ltl_to_game("GFa", [], gi) + +tc.assertTrue(spot.are_equivalent(game, game_ref)) From f03e32619aa096824c7652a376b6d2f39aac12af Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 4 Jul 2024 12:11:10 +0200 Subject: [PATCH 457/606] improve some comments * spot/twaalgos/complement.hh, spot/twaalgos/complement.cc: Here. --- spot/twaalgos/complement.cc | 7 ++++--- spot/twaalgos/complement.hh | 3 ++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/spot/twaalgos/complement.cc b/spot/twaalgos/complement.cc index 00e9cb0ce..9fad4eac4 100644 --- a/spot/twaalgos/complement.cc +++ b/spot/twaalgos/complement.cc @@ -515,7 +515,7 @@ namespace spot twa_graph_ptr res = dualize(aut); // There are cases with "t" acceptance that get converted to // Büchi during completion, then dualized to co-Büchi, but the - // acceptance is still not used. To try to clean it up in this + // acceptance is still not used. Try to clean it up in this // case. if (aut->num_sets() == 0 || // Also dualize removes sink states, but doesn't simplify @@ -525,8 +525,9 @@ namespace spot return res; } if (is_very_weak_automaton(aut)) - // removing alternation may need more acceptance sets than we support. - // in this case res==nullptr and we try the other determinization. + // Removing alternation may need more acceptance sets than Spot + // supports. When this happens res==nullptr and we fall back to + // determinization-based complementation. if (twa_graph_ptr res = remove_alternation(dualize(aut), false, aborter, false)) return res; diff --git a/spot/twaalgos/complement.hh b/spot/twaalgos/complement.hh index 4b74f27b8..6bb8ff1d9 100644 --- a/spot/twaalgos/complement.hh +++ b/spot/twaalgos/complement.hh @@ -73,7 +73,8 @@ namespace spot /// If an output_aborter is supplied, it is used to /// abort the construction of larger automata. /// - /// complement_semidet() is not yet used. + /// complement_semidet() is not yet used, as it is not always better + /// when the input is semi-deterministic. SPOT_API twa_graph_ptr complement(const const_twa_graph_ptr& aut, const output_aborter* aborter = nullptr); From 31511e042a10aa9c00d821f49abce8a5e6960ccc Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 15 Jul 2024 15:27:57 +0200 Subject: [PATCH 458/606] twaalgos: implement restrict_dead_end_edges_here() Discussed in issue #587. * spot/twaalgos/deadends.cc, spot/twaalgos/deadends.hh: New files. * spot/twaalgos/Makefile.am, python/spot/impl.i: Add them. * tests/core/deadends.test, tests/python/deadends.py: New files. * tests/Makefile.am: Add them. * spot/twa/acc.cc, spot/twa/acc.hh (keep_one_inf_per_branch): New method. * bin/autfilt.cc: Learn option --restrict-dead-end-edges. * NEWS: Mention it. --- NEWS | 14 ++- bin/autfilt.cc | 12 +++ python/spot/impl.i | 2 + spot/twa/acc.cc | 80 +++++++++++++++++ spot/twa/acc.hh | 17 ++++ spot/twaalgos/Makefile.am | 2 + spot/twaalgos/deadends.cc | 181 ++++++++++++++++++++++++++++++++++++++ spot/twaalgos/deadends.hh | 51 +++++++++++ tests/Makefile.am | 2 + tests/core/deadends.test | 70 +++++++++++++++ tests/python/deadends.py | 158 +++++++++++++++++++++++++++++++++ 11 files changed, 588 insertions(+), 1 deletion(-) create mode 100644 spot/twaalgos/deadends.cc create mode 100644 spot/twaalgos/deadends.hh create mode 100755 tests/core/deadends.test create mode 100644 tests/python/deadends.py diff --git a/NEWS b/NEWS index 957b9b254..14b21c99e 100644 --- a/NEWS +++ b/NEWS @@ -1,6 +1,18 @@ New in spot 2.12.0.dev (not yet released) - Nothing yet. + Command-line tools: + + - autfilt learned --restrict-dead-end-edges, to restricts labels of + edges leading to dead-ends. See the description of + restrict_dead_end_edges_here() below. + + Library: + + - restrict_dead_end_edges_here() can reduce non-determinism (but + not remove it) by restricting the label L of some edge (S)-L->(D) + going to a state D that does not have other successor than + itself. The conditions are detailled in the documentation of + this function. New in spot 2.12 (2024-05-16) diff --git a/bin/autfilt.cc b/bin/autfilt.cc index 420bf2867..1c063af65 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -148,6 +149,7 @@ enum { OPT_REM_UNREACH, OPT_REM_UNUSED_AP, OPT_REM_FIN, + OPT_RESTRICT_DEAD_ENDS, OPT_SAT_MINIMIZE, OPT_SCCS, OPT_SEED, @@ -373,6 +375,9 @@ static const argp_option options[] = { "remove-dead-states", OPT_REM_DEAD, nullptr, 0, "remove states that are unreachable, or that cannot belong to an " "infinite path", 0 }, + { "restrict-dead-end-edges", OPT_RESTRICT_DEAD_ENDS, nullptr, 0, + "restrict labels of dead-end edges, based on useful transitions of the " + "state they reach", 0 }, { "simplify-acceptance", OPT_SIMPL_ACC, nullptr, 0, "simplify the acceptance condition by merging identical acceptance sets " "and by simplifying some terms containing complementary sets", 0 }, @@ -715,6 +720,7 @@ static bool opt_dca = false; static bool opt_streett_like = false; static bool opt_enlarge_acceptance_set = false; static bool opt_reduce_acceptance_set = false; +static bool opt_restrict_dead_ends = false; static spot::twa_graph_ptr ensure_deterministic(const spot::twa_graph_ptr& aut, bool nonalt = false) @@ -1199,6 +1205,9 @@ parse_opt(int key, char* arg, struct argp_state*) case OPT_REM_UNUSED_AP: opt_rem_unused_ap = true; break; + case OPT_RESTRICT_DEAD_ENDS: + opt_restrict_dead_ends = true; + break; case OPT_SAT_MINIMIZE: opt_sat_minimize = arg ? arg : ""; break; @@ -1442,6 +1451,9 @@ namespace else if (opt_clean_acc) cleanup_acceptance_here(aut); + if (opt_restrict_dead_ends) + restrict_dead_end_edges_here(aut); + if (opt_sep_sets) separate_sets_here(aut); if (opt_complement_acc) diff --git a/python/spot/impl.i b/python/spot/impl.i index 778983aac..6fa3e9f07 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -115,6 +115,7 @@ #include #include #include +#include #include #include #include @@ -717,6 +718,7 @@ def state_is_accepting(self, src) -> "bool": %include %include %include +%include %include %include %include diff --git a/spot/twa/acc.cc b/spot/twa/acc.cc index 74b345dc8..507da630e 100644 --- a/spot/twa/acc.cc +++ b/spot/twa/acc.cc @@ -1845,6 +1845,86 @@ namespace spot return force_inf_rec(&back(), rem); } + namespace + { + static acc_cond::acc_code keep_one_inf_per_branch_rec + (const acc_cond::acc_word* pos, bool inf_seen) + { + auto start = pos - pos->sub.size; + switch (pos->sub.op) + { + case acc_cond::acc_op::And: + { + auto cur = --pos; + auto res = acc_cond::acc_code::t(); + // Make a first pass to find Inf(...) + if (!inf_seen) + do + { + if (cur->sub.op == acc_cond::acc_op::Inf) + { + res = acc_cond::acc_code::inf(cur[-1].mark.lowest()); + inf_seen = true; + break; + } + cur -= cur->sub.size + 1; + } + while (cur > start); + // Now process the rest. + do + { + if (pos->sub.op != acc_cond::acc_op::Inf) + { + auto tmp = + keep_one_inf_per_branch_rec(pos, inf_seen) & + std::move(res); + std::swap(tmp, res); + } + pos -= pos->sub.size + 1; + } + while (pos > start); + return res; + } + case acc_cond::acc_op::Or: + { + --pos; + auto res = acc_cond::acc_code::f(); + do + { + auto tmp = + keep_one_inf_per_branch_rec(pos, inf_seen) | std::move(res); + std::swap(tmp, res); + pos -= pos->sub.size + 1; + } + while (pos > start); + return res; + } + case acc_cond::acc_op::Fin: + return acc_cond::acc_code::fin(pos[-1].mark); + case acc_cond::acc_op::Inf: + if (inf_seen) + return acc_cond::acc_code::t(); + else + return acc_cond::acc_code::inf(pos[-1].mark.lowest()); + case acc_cond::acc_op::FinNeg: + case acc_cond::acc_op::InfNeg: + SPOT_UNREACHABLE(); + return {}; + } + SPOT_UNREACHABLE(); + return {}; + } + + } + + acc_cond::acc_code + acc_cond::acc_code::keep_one_inf_per_branch() const + { + if (is_t() || is_f()) + return *this; + return keep_one_inf_per_branch_rec(&back(), false); + } + acc_cond::mark_t acc_cond::acc_code::used_sets() const { diff --git a/spot/twa/acc.hh b/spot/twa/acc.hh index 1749e45cd..6e03d3b20 100644 --- a/spot/twa/acc.hh +++ b/spot/twa/acc.hh @@ -1391,6 +1391,13 @@ namespace spot /// \brief For all `x` in \a m, replaces `Fin(x)` by `false`. acc_code force_inf(mark_t m) const; + /// \brief Rewrite an acceptance condition by keeping at most + /// one Inf(x) on each dijunctive branch. + /// + /// For instance `(Fin(0)&Inf(1)&(Inf(2)|Fin(3))) | Inf(4)&Inf(5)` + /// will become `(Fin(0)&Inf(1) | Inf(4)` + acc_code keep_one_inf_per_branch() const; + /// \brief Return the set of sets appearing in the condition. acc_cond::mark_t used_sets() const; @@ -1992,6 +1999,16 @@ namespace spot return code_.inf_satisfiable(inf); } + /// \brief Rewrite an acceptance condition by keeping at most + /// one Inf(x) on each disjunctive branch. + /// + /// For instance `(Fin(0)&Inf(1)&(Inf(2)|Fin(3))) | Inf(4)&Inf(5)` + /// will become `(Fin(0)&Inf(1) | Inf(4)` + acc_cond keep_one_inf_per_branch() const + { + return {num_sets(), code_.keep_one_inf_per_branch()}; + } + /// \brief Check potential acceptance of an SCC. /// /// Assuming that an SCC intersects all sets in \a diff --git a/spot/twaalgos/Makefile.am b/spot/twaalgos/Makefile.am index 80884cdbb..ede79f81a 100644 --- a/spot/twaalgos/Makefile.am +++ b/spot/twaalgos/Makefile.am @@ -39,6 +39,7 @@ twaalgos_HEADERS = \ copy.hh \ cycles.hh \ dbranch.hh \ + deadends.hh \ degen.hh \ determinize.hh \ dot.hh \ @@ -114,6 +115,7 @@ libtwaalgos_la_SOURCES = \ contains.cc \ cycles.cc \ dbranch.cc \ + deadends.cc \ degen.cc \ determinize.cc \ dot.cc \ diff --git a/spot/twaalgos/deadends.cc b/spot/twaalgos/deadends.cc new file mode 100644 index 000000000..cb8b7f886 --- /dev/null +++ b/spot/twaalgos/deadends.cc @@ -0,0 +1,181 @@ +// -*- coding: utf-8 -*- +// Copyright (C) by the Spot authors, see the AUTHORS file for details. +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" + +#include +#include + +namespace spot +{ + namespace + { + // Gather a disjunction of labels that appears on the edges of a + // dead-end state that have to be seen in order to make an + // accepting cycle. + static bdd + gather_useful_labels(const const_twa_graph_ptr& aut, + acc_cond::mark_t used_in_cond, + unsigned state) + { + // First, simplify the acceptance condition c based on the set + // of colors occurring around the state. + auto c = aut->get_acceptance(); + acc_cond::mark_t used_on_no_edge = used_in_cond; + acc_cond::mark_t used_on_all_edges = used_in_cond; + for (auto& e: aut->edges()) + { + used_on_no_edge -= e.acc; + used_on_all_edges &= e.acc; + } + + // if x appears on all edges, then + // Fin(x) = false and Inf(x) = true + if (used_on_all_edges) + c = c.remove(used_on_all_edges, false); + // if x appears on no edge at all, then + // Fin(x) = true and Inf(x) = false + if (used_on_no_edge) + c = c.remove(used_on_no_edge, true); + + if (c.is_f()) + return bddfalse; + if (c.is_t()) + return bddtrue; + + auto d = c.keep_one_inf_per_branch(); + + // Now look for edges that are useful to the simplified + // acceptance condition. + // We consider an edge as useful if its colors satisfy at + // least one Fin(x) or Inf(x) in the acceptance. + bdd useful = bddfalse; + for (auto& e: aut->out(state)) + if (d.accepting(e.acc)) + useful |= e.cond; + return useful; + } + } + + twa_graph_ptr + restrict_dead_end_edges_here(twa_graph_ptr& aut) + { + // We don't have anything to do if the automaton is deterministic. + if (aut->prop_universal()) + return aut; + if (!aut->is_existential()) + throw std::runtime_error + ("restrict_dead_end_edges_here() does not support alternation"); + unsigned ns = aut->num_states(); + // Find the states that are dead ends, i.e., that + // have only themselves as successors. + std::vector dead_end_states(ns, true); + // Also record the disjunction of all self-loops around each + // state. + std::vector self_loops(ns, bddfalse); + for (auto& e: aut->edges()) + if (e.src == e.dst) + self_loops[e.src] |= e.cond; + else + dead_end_states[e.src] = false; + + // If the automaton is weak, we can consider every label of + // the dead-end state as useful. + bool is_weak = aut->prop_weak().is_true(); + + // This will hold the labels of the useful self-loops of the the + // dead-end states. But we don't want to initialize it until we + // need it. + std::vector dead_end_useful(is_weak ? 0U : ns, bddfalse); + std::vector dead_end_useful_computed(is_weak ? 0U : ns, false); + acc_cond::mark_t used_in_cond = aut->get_acceptance().used_sets(); + + std::vector label_unions(ns, bddfalse); + bool created_false_labels = false; + bool nondeterministic_for_sure = false; + for (unsigned s = 0; s < ns; ++s) + { + // compute a union of labels per dead-end destination + for (auto& e: aut->out(s)) + if (e.src != e.dst && dead_end_states[e.dst]) + label_unions[e.dst] |= e.cond; + + // Iterate over all edges (SRC,COND,DST), find those such that + // (1) DST is a dead-end, + // (2) Lab(DST,DST))⇒Lab(SRC,SRC) + // (3) UsefulLab(DST)⇒Lab(SRC,DST)⇒Lab(SRC,SRC) + // + // where Lab(X,Y) is the union of all labels between X and Y + // And UsefulLab(DST) are the labeled of the "useful" self + // loops of DST (see gather_useful_labels). + for (auto& e: aut->out(s)) + if (e.src != e.dst && dead_end_states[e.dst]) + { + if (bdd u = label_unions[e.dst], sl = self_loops[e.src]; + bdd_implies(u, sl) && bdd_implies(self_loops[e.dst], sl)) + { + // Find the edges of DST that are necessary to an + // accepting loop, and gather their labels. + bdd d; + if (is_weak) + { + d = self_loops[e.dst]; + } + else + { + if (!dead_end_useful_computed[e.dst]) + { + dead_end_useful[e.dst] = + gather_useful_labels(aut, used_in_cond, e.dst); + dead_end_useful_computed[e.dst] = true; + } + d = dead_end_useful[e.dst]; + } + if (bdd_implies(d, u)) + { + // Restrict the dead-end transition's label. + bdd cond = e.cond; + cond &= d; + if (cond != e.cond) + { + e.cond = cond; + if (cond == bddfalse) + created_false_labels = true; + else + nondeterministic_for_sure = true; + } + } + } + } + // reset unions before next iteration + for (auto& e: aut->out(s)) + label_unions[e.dst] = bddfalse; + } + // Note that restricting those label will never make the automaton + // deterministic. In fact, it could make the automaton + // non-deterministic. Additionally, completeness will not be + // changed. This is because the restricted Lab(SRC,DST) still + // implies Lab(SRC,SRC). + if (nondeterministic_for_sure) + aut->prop_universal(false); + if (created_false_labels) + aut->merge_edges(); + return aut; + } + +} diff --git a/spot/twaalgos/deadends.hh b/spot/twaalgos/deadends.hh new file mode 100644 index 000000000..58b2d017f --- /dev/null +++ b/spot/twaalgos/deadends.hh @@ -0,0 +1,51 @@ +// -*- coding: utf-8 -*- +// Copyright (C) by the Spot authors, see the AUTHORS file for details. +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include + +namespace spot +{ + /// \brief restrict labels from "dead-end edges" + /// + /// A dead-end edge is an edge between two states S₁ and S₂ such + /// that S₂ has only itself as successor. I.e., once a run goes + /// through this "dead-end" edge, it gets stuck in S₂. + /// + /// Let Lab(S,D) denote the disjunction of all labels between S and + /// D. Let UsefulLab(D,D) be the disjunction of labels of any + /// subset of self-loops of D that will intersect all accepting + /// cycles around D (one way to compute this subset is to simplify + /// the acceptance condition with keep_one_inf_per_branch(), and + /// then keep each edge that satisfy it). + /// + /// Now, if the following implications are satisfied + /// + /// ⎧ UsefulLab(D,D) ⇒ Lab(S,D) ⇒ Lab(S,S),
+ /// ⎨
+ /// ⎩ Lab(D,D) ⇒ Lab(S,S).
+ /// + /// then any edge between S and D, labeled by ℓ⊆Lab(S,D) + /// can be replaced by ℓ∩UsefulLab(D,D). + /// + /// This algorithm has no effect on deterministic automata (where + /// it is not possible that Lab(S,D) ⇒ Lab(S,S)). + SPOT_API twa_graph_ptr + restrict_dead_end_edges_here(twa_graph_ptr& aut); +} diff --git a/tests/Makefile.am b/tests/Makefile.am index 0c3c995c5..cfe6a033a 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -270,6 +270,7 @@ TESTS_twa = \ core/dupexp.test \ core/exclusive-tgba.test \ core/remprop.test \ + core/deadends.test \ core/degendet.test \ core/degenid.test \ core/degenlskip.test \ @@ -413,6 +414,7 @@ TESTS_python = \ python/dbranch.py \ python/declenv.py \ python/decompose_scc.py \ + python/deadends.py \ python/det.py \ python/dualize.py \ python/ecfalse.py \ diff --git a/tests/core/deadends.test b/tests/core/deadends.test new file mode 100755 index 000000000..4ecb3d552 --- /dev/null +++ b/tests/core/deadends.test @@ -0,0 +1,70 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) by the Spot authors, see the AUTHORS file for details. +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs + +set -e + +# The following formula was incorrectly reduced during +# the development of --restrict-dead-edges. +out=`ltl2tgba 'X((GF!p2 R p3) & G(p3 U p2))' | + autfilt --restrict-dead --stats='%T %t'` +test "$out" = '11 11' # should be twice the same value + +# The following formulas are all reduced +cat >input.ltl <. + +# Test that the spot.gen package works, in particular, we want +# to make sure that the objects created from spot.gen methods +# are usable with methods from the spot package. + + +import spot +from unittest import TestCase +tc = TestCase() + +b = spot.translate('FGb') +a = spot.translate('GFa & GFc') +p = spot.product_susp(b, a) +q = spot.scc_filter(spot.simulation(p), True) +s = p.to_str() +spot.restrict_dead_end_edges_here(p) +s += p.to_str() +# Applying it twice should not change anything +spot.restrict_dead_end_edges_here(p) +s += p.to_str() + +tc.assertEqual(s, """HOA: v1 +States: 2 +Start: 0 +AP: 3 "a" "b" "c" +acc-name: generalized-Buchi 2 +Acceptance: 2 Inf(0)&Inf(1) +properties: trans-labels explicit-labels trans-acc stutter-invariant +--BODY-- +State: 0 +[t] 0 +[!0&1&!2] 1 +[0&1&!2] 1 {0} +[!0&1&2] 1 {1} +[0&1&2] 1 {0 1} +State: 1 +[!0&1&!2] 1 +[0&1&!2] 1 {0} +[!0&1&2] 1 {1} +[0&1&2] 1 {0 1} +--END--HOA: v1 +States: 2 +Start: 0 +AP: 3 "a" "b" "c" +acc-name: generalized-Buchi 2 +Acceptance: 2 Inf(0)&Inf(1) +properties: trans-labels explicit-labels trans-acc stutter-invariant +--BODY-- +State: 0 +[t] 0 +[0&1&!2] 1 {0} +[0&1&2] 1 {0 1} +State: 1 +[!0&1&!2] 1 +[0&1&!2] 1 {0} +[!0&1&2] 1 {1} +[0&1&2] 1 {0 1} +--END--HOA: v1 +States: 2 +Start: 0 +AP: 3 "a" "b" "c" +acc-name: generalized-Buchi 2 +Acceptance: 2 Inf(0)&Inf(1) +properties: trans-labels explicit-labels trans-acc stutter-invariant +--BODY-- +State: 0 +[t] 0 +[0&1&!2] 1 {0} +[0&1&2] 1 {0 1} +State: 1 +[!0&1&!2] 1 +[0&1&!2] 1 {0} +[!0&1&2] 1 {1} +[0&1&2] 1 {0 1} +--END--""") + +spot.restrict_dead_end_edges_here(q) +s = q.to_str() +tc.assertEqual(s, """HOA: v1 +States: 2 +Start: 0 +AP: 3 "a" "b" "c" +acc-name: generalized-Buchi 2 +Acceptance: 2 Inf(0)&Inf(1) +properties: trans-labels explicit-labels trans-acc stutter-invariant +--BODY-- +State: 0 +[t] 0 +[0&1] 1 +State: 1 +[!0&1&!2] 1 +[0&1&!2] 1 {0} +[!0&1&2] 1 {1} +[0&1&2] 1 {0 1} +--END--""") + +a = spot.translate('GFa & (FGb | FGc) & GFc') +s = a.to_str() +spot.restrict_dead_end_edges_here(a) +s += a.to_str() +tc.assertEqual(s, """HOA: v1 +States: 3 +Start: 0 +AP: 3 "a" "b" "c" +acc-name: generalized-Buchi 2 +Acceptance: 2 Inf(0)&Inf(1) +properties: trans-labels explicit-labels trans-acc stutter-invariant +--BODY-- +State: 0 +[t] 0 +[0&1 | 1&2] 1 +[2] 2 +State: 1 +[!0&1&!2] 1 +[0&1&!2] 1 {0} +[!0&1&2] 1 {1} +[0&1&2] 1 {0 1} +State: 2 +[!0&2] 2 {1} +[0&2] 2 {0 1} +--END--HOA: v1 +States: 3 +Start: 0 +AP: 3 "a" "b" "c" +acc-name: generalized-Buchi 2 +Acceptance: 2 Inf(0)&Inf(1) +properties: trans-labels explicit-labels trans-acc stutter-invariant +--BODY-- +State: 0 +[t] 0 +[0&1] 1 +[0&2] 2 +State: 1 +[!0&1&!2] 1 +[0&1&!2] 1 {0} +[!0&1&2] 1 {1} +[0&1&2] 1 {0 1} +State: 2 +[!0&2] 2 {1} +[0&2] 2 {0 1} +--END--""") From 6a7ef4db3f5ab2b92b8dc9a1323acfb5dee867e4 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 17 Jul 2024 16:59:07 +0200 Subject: [PATCH 459/606] postprocess: call restrict_dead_end_edges_here() Related to issue #587. * spot/twaalgos/postproc.cc, spot/twaalgos/postproc.hh: Add support for option "rde". * bin/spot-x.cc, NEWS: Mention it. * tests/core/deadends.test, tests/core/ltl2tgba2.test, tests/python/atva16-fig2a.ipynb, tests/python/deadends.py: Adjust test cases to reflect the improvement. * tests/core/ltlsynt.test: Also adjust this test case, which is the only one worsened. Some extra gates are generated when translating GFa<->GFb with --algo=ds or --algo=sd. Issue #588 would be one way to fix that. --- NEWS | 6 +++- bin/spot-x.cc | 6 ++++ spot/twaalgos/postproc.cc | 10 ++++++ spot/twaalgos/postproc.hh | 1 + tests/core/deadends.test | 12 ++++++- tests/core/ltl2tgba2.test | 6 ++-- tests/core/ltlsynt.test | 59 +++++++++++++++++---------------- tests/python/atva16-fig2a.ipynb | 14 ++++---- tests/python/deadends.py | 2 +- 9 files changed, 75 insertions(+), 41 deletions(-) diff --git a/NEWS b/NEWS index 14b21c99e..45d0c958f 100644 --- a/NEWS +++ b/NEWS @@ -6,7 +6,7 @@ New in spot 2.12.0.dev (not yet released) edges leading to dead-ends. See the description of restrict_dead_end_edges_here() below. - Library: + Library: - restrict_dead_end_edges_here() can reduce non-determinism (but not remove it) by restricting the label L of some edge (S)-L->(D) @@ -14,6 +14,10 @@ New in spot 2.12.0.dev (not yet released) itself. The conditions are detailled in the documentation of this function. + - spot::postprocessor will now call restrict_dead_end_edges_here() + in its highest setting. This can be fine-tuned with the "rde" + extra option, see the spot-x (7) man page for detail. + New in spot 2.12 (2024-05-16) Build: diff --git a/bin/spot-x.cc b/bin/spot-x.cc index d29432a36..543afeac3 100644 --- a/bin/spot-x.cc +++ b/bin/spot-x.cc @@ -206,6 +206,12 @@ it can cause the created temporary automata to have incompatible \ combinations of atomic propositions that will be eventually be removed. \ This relabeling is attempted after relabel-bool. By default, N=8. Setting \ this value to 0 will disable the rewriting.") }, + { DOC("rde", "Disable (0), or enable (1) the 'restrict-dead-end-edges' \ +optimization. A dead-end-edge is one that move to a state that has only \ +itself as successors. The label of such edges can be simplified in some \ +situtation, reducing non-determinism slightly. By default (-1), this is \ +enabled only in --high mode, or if both --medium and --deterministic are \ +used.") }, { DOC("wdba-minimize", "Set to 0 to disable WDBA-minimization, to 1 to \ always try it, or 2 to attempt it only on syntactic obligations or on automata \ that are weak and deterministic. The default is 1 in --high mode, else 2 in \ diff --git a/spot/twaalgos/postproc.cc b/spot/twaalgos/postproc.cc index b7f6d27de..a73ce6430 100644 --- a/spot/twaalgos/postproc.cc +++ b/spot/twaalgos/postproc.cc @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -108,6 +109,7 @@ namespace spot wdba_det_max_ = opt->get("wdba-det-max", 4096); simul_trans_pruning_ = opt->get("simul-trans-pruning", 512); acd_ = opt->get("acd", 1); + rde_ = opt->get("rde", -1); if (sat_acc_ && sat_minimize_ == 0) sat_minimize_ = 1; // Dicho. @@ -527,6 +529,14 @@ namespace spot } } + // Restricting dead-end edges only makes sense on non-deterministic + // automata. rde_ == 0 disable this. rde_ > 0 enable it. + // By default (rde_ < 0), we only enable this on High and Medium+Det. + if (!dba && rde_ != 0 && !is_deterministic(sim) && + (rde_ > 0 || (level_ == High || + (level_ == Medium && PREF_ == Deterministic)))) + restrict_dead_end_edges_here(sim); + // If WDBA failed, but the simulation returned a deterministic // automaton, use it as dba. assert(dba || sim); diff --git a/spot/twaalgos/postproc.hh b/spot/twaalgos/postproc.hh index 73da4baab..4ce6c0262 100644 --- a/spot/twaalgos/postproc.hh +++ b/spot/twaalgos/postproc.hh @@ -271,6 +271,7 @@ namespace spot int wdba_det_max_ = 4096; bool acd_ = true; bool acd_was_used_; + int rde_ = -1; }; /// @} } diff --git a/tests/core/deadends.test b/tests/core/deadends.test index 4ecb3d552..da1a4b91f 100755 --- a/tests/core/deadends.test +++ b/tests/core/deadends.test @@ -59,7 +59,8 @@ FG((Gp2 | Xp3) & (F!p2 | X!p3)) GFp0 & FGp1 & FGp2 & GFp3 EOF -ltl2tgba -F input.ltl | tee output.aut | +# disable rde so we can apply it manually +ltl2tgba -x rde=0 -F input.ltl | tee output.aut | autfilt --restrict-dead --stats="%T %t %M" | while read in out f; do : $in : $out : "$f" @@ -68,3 +69,12 @@ ltl2tgba -F input.ltl | tee output.aut | done autcross -F output.aut --language-preserved 'autfilt --restrict-dead' + +# by default, the result of ltl2tgba is already restricted +ltl2tgba -F input.ltl | + autfilt --restrict-dead --stats="%T %t %M" | + while read in out f; do + : $in : $out : "$f" + test $in -ne $out && exit 1 + : + done diff --git a/tests/core/ltl2tgba2.test b/tests/core/ltl2tgba2.test index 8c0ca1dc0..c03dbc293 100755 --- a/tests/core/ltl2tgba2.test +++ b/tests/core/ltl2tgba2.test @@ -273,7 +273,7 @@ and-fg,32, $fg42, $fg42, $fg42, $fg42, $fg42, $fg42 !sb-patterns,5, 2,7, 2,7, 2,7, 2,7, 3,12, 3,12 !sb-patterns,6, 3,11, 4,14, 3,11, 4,14, 3,11, 4,14 !sb-patterns,7, 4,16, 4,16, 4,16, 4,16, 4,16, 4,16 -!sb-patterns,9, 3,13, 3,13, 4,17, 4,17, 5,21, 5,21 +!sb-patterns,9, 3,12, 3,12, 4,17, 4,17, 5,21, 5,21 !sb-patterns,10, 2,6, 2,6, 2,6, 2,6, 2,6, 2,6 !sb-patterns,11, 1,0, 1,0, 1,0, 1,0, 1,0, 1,0 !sb-patterns,12, 1,0, 1,0, 1,0, 1,0, 1,0, 1,0 @@ -340,7 +340,7 @@ and-fg,32, $fg42, $fg42, $fg42, $fg42, $fg42, $fg42 !hkrss-patterns,53, 4,32, 4,32, 4,32, 4,32, 4,32, 4,32 !hkrss-patterns,54, 4,32, 4,32, 4,32, 4,32, 4,32, 4,32 !hkrss-patterns,55, 5,12, 6,12, 5,12, 6,12, 5,12, 6,12 -!p-patterns,2, 2,15, 2,15, 3,19, 3,19, 4,23, 4,23 +!p-patterns,2, 2,14, 2,14, 3,19, 3,19, 4,23, 4,23 !p-patterns,3, 3,41, 3,41, 3,41, 3,41, 3,41, 3,41 !p-patterns,4, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1 !p-patterns,5, 2,6, 2,6, 2,6, 2,6, 2,6, 2,6 @@ -379,7 +379,7 @@ FG(a | Fb), 3,15, 3,15, 3,15, 3,15, 1,4 FG(a & Fb), 2,7, 2,7, 3,9, 3,9, 1,4 GF(a & Gb), 2,7, 2,7, 3,9, 3,9, 1,4 GF(a | Gb), 2,7, 2,7, 3,12, 3,12, 1,4 -Ge | GF(Ge & X(c & Fd)), 4,31, 4,31, 6,39, 6,39, 2,16 +Ge | GF(Ge & X(c & Fd)), 4,30, 4,30, 6,39, 6,39, 2,16 F(GF(b & Gc) | Ge), 3,22, 3,22, 4,26, 4,26, 1,8 EOF diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 77f661c40..acd935560 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -22,40 +22,43 @@ set -e cat >exp <\n", "\n", - "\n", "\n", "0->1\n", "\n", "\n", - "a | b\n", + "a\n", "\n", "\n", "\n", @@ -158,7 +158,7 @@ "\n", "\n", - "\n", "\n", "0->1\n", "\n", "\n", - "a | b\n", + "a\n", "\n", "\n", "\n", @@ -262,7 +262,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fdd801ca760> >" + " *' at 0x7f30b04c1d40> >" ] }, "execution_count": 3, @@ -345,9 +345,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.12.4" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/tests/python/deadends.py b/tests/python/deadends.py index fd5f18ea3..4b19f0a98 100644 --- a/tests/python/deadends.py +++ b/tests/python/deadends.py @@ -111,7 +111,7 @@ State: 1 [0&1&2] 1 {0 1} --END--""") -a = spot.translate('GFa & (FGb | FGc) & GFc') +a = spot.translate('GFa & (FGb | FGc) & GFc', xargs='rde=0') s = a.to_str() spot.restrict_dead_end_edges_here(a) s += a.to_str() From 0c52c490798a92fda213105592ea30a69366be25 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 18 Jul 2024 16:36:01 +0200 Subject: [PATCH 460/606] doc: fix documentation of P and R classes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * doc/tl/tl.tex: Remove four incorrect production rules in the grammar for φ_P and φ_R, and fix two. --- doc/tl/tl.tex | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/doc/tl/tl.tex b/doc/tl/tl.tex index 141daa7b8..8982f606f 100644 --- a/doc/tl/tl.tex +++ b/doc/tl/tl.tex @@ -1308,8 +1308,7 @@ any unbounded SERE. \sere{r_I}\Asuffix \varphi_S\\ \varphi_P ::={}& \varphi_O \mid \NOT\varphi_R\mid \varphi_P\AND \varphi_P\mid (\varphi_P\OR \varphi_P)\mid - \varphi_P\EQUIV \varphi_P\mid \varphi_P\XOR \varphi_P\mid - \varphi_P\IMPLIES \varphi_P\\ + \varphi_R\IMPLIES \varphi_P\\ \mid{}& \X\varphi_P \mid \F\varphi_P \mid \varphi_P\U\varphi_P\mid\varphi_P\R\varphi_S\mid \varphi_S\W\varphi_P\mid\varphi_P\M\varphi_P\\ @@ -1318,8 +1317,7 @@ any unbounded SERE. \sere{r_I}\Asuffix \varphi_S\\ \varphi_R ::={}& \varphi_O \mid \NOT\varphi_P\mid \varphi_R\AND \varphi_R\mid (\varphi_R\OR \varphi_R)\mid - \varphi_R\EQUIV \varphi_R\mid \varphi_R\XOR \varphi_R\mid - \varphi_R\IMPLIES \varphi_R\\ + \varphi_P\IMPLIES \varphi_R\\ \mid{}& \X\varphi_R \mid \G\varphi_R \mid \varphi_R\U\varphi_G\mid\varphi_R\R\varphi_R\mid \varphi_R\W\varphi_R\mid\varphi_G\M\varphi_R\\ From 7901a37747795acda07d6b545466386fefa2311b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 19 Jul 2024 17:04:21 +0200 Subject: [PATCH 461/606] =?UTF-8?q?formula:=20track=20=CE=94=E2=82=81,=20?= =?UTF-8?q?=CE=A3=E2=82=82,=20=CE=A0=E2=82=82,=20and=20=CE=94=E2=82=82=20m?= =?UTF-8?q?embership?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * spot/tl/formula.hh, spot/tl/formula.cc: Update the properties and track them. * tests/core/kind.test: Augment the test case. * doc/tl/tl.tex, doc/spot.bib, NEWS: Document these new classes. --- NEWS | 5 ++ doc/spot.bib | 15 ++++ doc/tl/tl.tex | 205 +++++++++++++++++++++++++++++++++++-------- spot/tl/formula.cc | 154 +++++++++++++++++++++++++------- spot/tl/formula.hh | 75 +++++++++++++--- tests/core/kind.test | 194 ++++++++++++++++++++-------------------- 6 files changed, 473 insertions(+), 175 deletions(-) diff --git a/NEWS b/NEWS index 45d0c958f..700942091 100644 --- a/NEWS +++ b/NEWS @@ -18,6 +18,11 @@ New in spot 2.12.0.dev (not yet released) in its highest setting. This can be fine-tuned with the "rde" extra option, see the spot-x (7) man page for detail. + - The formula class now keeps track of membership to the Δ₁, Σ₂, + Π₂, and Δ₂ syntactic class. This can be tested with + formula::is_delta1(), formula::is_sigma2(), formula::is_pi2(), + formula::is_delta2(). See doc/tl/tl.pdf from more discussion. + New in spot 2.12 (2024-05-16) Build: diff --git a/doc/spot.bib b/doc/spot.bib index 3f24e40be..a2d5c1e9d 100644 --- a/doc/spot.bib +++ b/doc/spot.bib @@ -1,3 +1,4 @@ + @InProceedings{ babiak.12.tacas, author = {Tom{\'a}{\v{s}} Babiak and Mojm{\'i}r K{\v{r}}et{\'i}nsk{\'y} and Vojt{\v{e}}ch {\v{R}}eh{\'a}k @@ -470,6 +471,20 @@ doi = {10.1145/3209108.3209161} } +@Article{ esparza.24.acm, + author = {Javier Esparza and Rub\'{e}n Rubio and Salomon Sickert}, + title = {Efficient Normalization of Linear Temporal Logic}, + year = 2024, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + volume = {71}, + number = {2}, + issn = {0004-5411}, + doi = {10.1145/3651152}, + journal = {Journal of the ACM}, + month = apr +} + @InProceedings{ etessami.00.concur, author = {Kousha Etessami and Gerard J. Holzmann}, title = {Optimizing {B\"u}chi Automata}, diff --git a/doc/tl/tl.tex b/doc/tl/tl.tex index 8982f606f..65db63511 100644 --- a/doc/tl/tl.tex +++ b/doc/tl/tl.tex @@ -1121,6 +1121,14 @@ instance using the following methods: recurrence property. \\\texttt{is\_syntactic\_persistence()}& Whether the formula is a syntactic persistence property. +\\\texttt{is\_syntactic\_delta1()}& Whether the formula belongs to + the $\Delta_1$ class. +\\\texttt{is\_syntactic\_pi2()}& Whether the formula belongs to + the $\Pi_2$ class. +\\\texttt{is\_syntactic\_sigma2()}& Whether the formula belongs to + the $\Sigma_2$ class. +\\\texttt{is\_syntactic\_delta2()}& Whether the formula belongs to + the $\Delta_2$ class. \\\texttt{is\_marked()}& Whether the formula contains a special ``marked'' version of the $\Esuffix$ or $\nsere{r}$ operators.\newfootnotemark{1} \\\texttt{accepts\_eword()}& Whether the formula accepts @@ -1198,6 +1206,9 @@ rules: \mid \varphi_U\M \varphi_U \end{align*} +Given a formula \texttt{f}, its membership to these two classes can be +tested with \texttt{f.is\_eventual()} and \texttt{f.is\_universal()}. + \section{Syntactic Hierarchy Classes} \begin{figure}[tbp] @@ -1221,12 +1232,13 @@ rules: \path[fill=green!40,fill opacity=.5] (6,0) -- (1.5,0) -- (6,3); \draw (0,0) rectangle (6,7); - \node[align=center] (rea) at (3,6) {Reactivity\\ $\bigwedge\G\F p_i\lor \F\G q_i$}; - \node[align=center] (rec) at (1,4.5) {Recurrence\\ $\G\F p$}; - \node[align=center] (per) at (5,4.5) {Persistence\\ $\F\G p$}; - \node[align=center] (obl) at (3,2.85) {Obligation\\ $\bigwedge\G p_i\lor \F q_i$}; - \node[align=center] (saf) at (1,1) {Safety\\ $\G p$}; - \node[align=center] (gua) at (5,1) {Guarantee\\ $\F p$}; + \node[align=center] (rea) at (3,5.9) {Reactivity\\ $\bigwedge\G\F p_i\lor \F\G q_i$ \\ $\Delta_2$}; + \node[align=center] (rec) at (1,4.4) {Recurrence\\ $\G\F p$ \\ $\Pi_2$}; + \node[align=center] (per) at (5,4.4) {Persistence\\ $\F\G p$ \\ $\Sigma_2$}; + \node[align=center] (obl) at (3,2.85) {Obligation\\ $\bigwedge\G p_i\lor \F q_i$ \\ $\Delta_1$}; + \node[align=center] (saf) at (1,1) {Safety\\ $\G p$ \\ $\Pi_1$}; + \node[align=center] (gua) at (5,1) {Guarantee\\ $\F p$ \\ $\Sigma_1$}; + \node[align=center] (bas) at (3,0.4) {$\Delta_0$}; \node[align=right,below left] (det) at (-.2,6.7) {Deterministic\\Büchi\\Automata}; \node[align=left,below right](weak) at (6.2,6.7) {Weak Büchi\\Automata}; @@ -1254,24 +1266,107 @@ presented by~\citet{chang.92.icalp}, but other presentations have been done including negation~\citep{cerna.03.mfcs} and weak until~\citep{schneider.01.lpar}. -The following grammar rules extend the aforementioned work slightly by -dealing with PSL operators. These are the rules used by Spot to -decide upon construction to which class a formula belongs (see the -methods \texttt{is\_syntactic\_safety()}, -\texttt{is\_syntactic\_guarantee()}, -\texttt{is\_syntactic\_obligation()}, -\texttt{is\_syntactic\_recurrence()}, and -\texttt{is\_syntactic\_persistence()} listed on -page~\pageref{property-methods}). +Spot implements two versions of a syntactic hierarchy, and extend them +to deal with PSL operators. -The symbols $\varphi_G$, $\varphi_S$, $\varphi_O$, $\varphi_P$, -$\varphi_R$ denote any formula belonging respectively to the -Guarantee, Safety, Obligation, Persistence, or Recurrence classes. -Additionally $\varphi_B$ denotes a finite LTL formula (the unnamed -class at the intersection of Safety and Guarantee formulas, at the -\textbf{b}ottom of Fig.~\ref{fig:hierarchy}). $v$ denotes any -variable, $r$ any SERE, $r_F$ any bounded SERE (no loops), and $r_I$ -any unbounded SERE. + +The first hierarchy, usually denoted with names such as $\Sigma_i$ and +$\Pi_i$, as shown in Fig.~\ref{fig:hierarchy}. Following Esparza et +al.~\cite{esparza.24.acm}, we also introduce the $\Delta_0$, +$\Delta_1$, and $\Delta_2$ classes. + +Intuitively, those classes are related to how the weak operators +($\G$, $\W$, $\R$) alternate with the strong operators ($\U$, $\F$, +$\M$) in formula: + +\begin{itemize} +\item the class $\Delta_0$ contains all formulas that may only + use $\X$ as temporal operator, +\item formulas in $\Pi_1$ contains no strong operators, +\item formulas in $\Sigma_1$ contains no weak operators, +\item the class $\Delta_1$ contains all boolean combinations of + $\Pi_1$ and $\Sigma_1$, +\item in each branch of a formula of $\Pi_2$ that contains both types + of operator, weak operators are all above strong operators, +\item in each branch of a formula of $\Sigma_2$ that contains both types + of operator, strong operators are all above weak operators, +\item the class $\Delta_2$ contains all boolean combinations of + $\Pi_2$ and $\Sigma_2$. +\end{itemize} + +Those classes can be captured by the following grammar rules, where +$v$ denotes any variable, $r$ any SERE, $r_F$ any bounded SERE (no +loops), and $r_I$ any unbounded SERE. + +\begin{align*} + \varphi_{\Delta_0} ::={}& \0\mid\1\mid v\mid\NOT\varphi_{\Delta_0}\mid\varphi_{\Delta_0}\AND\varphi_{\Delta_0} + \mid(\varphi_{\Delta_0}\OR\varphi_{\Delta_0})\mid\varphi_{\Delta_0}\EQUIV\varphi_{\Delta_0} + \mid\varphi_{\Delta_0}\XOR\varphi_{\Delta_0}\mid\varphi_{\Delta_0}\IMPLIES\varphi_{\Delta_0} + \mid\X\varphi_{\Delta_0}\\ + \mid{}& \sere{r_F}\mid \nsere{r_F}\\ + \varphi_{\Pi_1} ::={}& \varphi_{\Delta_0}\mid \NOT\varphi_S\mid + \varphi_{\Pi_1}\AND \varphi_{\Pi_1}\mid (\varphi_{\Pi_1}\OR \varphi_{\Pi_1}) + \mid\varphi_S\IMPLIES\varphi_{\Pi_1}\mid + \X\varphi_{\Pi_1} \mid \F\varphi_{\Pi_1}\mid + \varphi_{\Pi_1}\U\varphi_{\Pi_1}\mid \varphi_{\Pi_1}\M\varphi_{\Pi_1}\\ + \mid{}& \nsere{r}\mid + \sere{r}\Esuffix \varphi_{\Pi_1}\mid + \sere{r_F}\Asuffix \varphi_{\Pi_1} \\ + \varphi_{\Sigma_1} ::={}& \varphi_{\Delta_0}\mid \NOT\varphi_{\Pi_1}\mid + \varphi_{\Sigma_1}\AND \varphi_{\Sigma_1}\mid (\varphi_{\Sigma_1}\OR \varphi_{\Sigma_1}) + \mid\varphi_{\Pi_1}\IMPLIES\varphi_{\Sigma_1}\mid + \X\varphi_{\Sigma_1} \mid \G\varphi_{\Sigma_1}\mid + \varphi_{\Sigma_1}\R\varphi_{\Sigma_1}\mid \varphi_{\Sigma_1}\W\varphi_{\Sigma_1}\\ + \mid{}& \sere{r}\mid + \sere{r_F}\Esuffix \varphi_{\Sigma_1}\mid + \sere{r}\Asuffix \varphi_{\Sigma_1}\\ + \varphi_{\Delta_1} ::={}& \varphi_{\Pi_1} \mid \varphi_{\Sigma_1}\mid \NOT\varphi_{\Delta_1}\mid + \varphi_{\Delta_1}\AND \varphi_{\Delta_1}\mid (\varphi_{\Delta_1}\OR \varphi_{\Delta_1})\mid + \varphi_{\Delta_1}\EQUIV \varphi_{\Delta_1}\mid \varphi_{\Delta_1}\XOR \varphi_{\Delta_1}\mid + \varphi_{\Delta_1}\IMPLIES \varphi_{\Delta_1}\\ + \mid{}& \X\varphi_{\Delta_1} \mid{} + \sere{r_F}\Esuffix \varphi_{\Delta_1} \mid + \sere{r_F}\Asuffix \varphi_{\Delta_1}\\ + \varphi_{\Sigma_2} ::={}& \varphi_{\Delta_1} \mid \NOT\varphi_{\Pi_2}\mid + \varphi_{\Sigma_2}\AND \varphi_{\Sigma_2}\mid (\varphi_{\Sigma_2}\OR \varphi_{\Sigma_2})\mid + \varphi_{\Pi_2}\IMPLIES \varphi_{\Sigma_2}\\ + \mid{}& \X\varphi_{\Sigma_2} \mid \F\varphi_{\Sigma_2} \mid + \varphi_{\Sigma_2}\U\varphi_{\Sigma_2}\mid\varphi_{\Sigma_2}\M\varphi_{\Sigma_2} + \mid{} \sere{r}\Esuffix \varphi_{\Sigma_2}\mid \sere{r_F}\Asuffix \varphi_{\Sigma_2}\\ + \varphi_{\Pi_2} ::={}& \varphi_{\Delta_1} \mid \NOT\varphi_{\Sigma_2}\mid + \varphi_{\Pi_2}\AND \varphi_{\Pi_2}\mid (\varphi_{\Pi_2}\OR \varphi_{\Pi_2})\mid + \varphi_{\Sigma_2}\IMPLIES \varphi_{\Pi_2}\\ + \mid{}& \X\varphi_{\Pi_2} \mid \G\varphi_{\Pi_2} \mid + \varphi_{\Pi_2}\R\varphi_{\Pi_2}\mid + \varphi_{\Pi_2}\W\varphi_{\Pi_2} \mid{} \sere{r}\Asuffix \varphi_{\Pi_2}\mid \sere{r_F}\Esuffix \varphi_{\Pi_2}\\ + \varphi_{\Delta_2} ::={}& \varphi_{\Pi_2} \mid \varphi_{\Sigma_2}\mid \NOT\varphi_{\Delta_2}\mid + \varphi_{\Delta_2}\AND \varphi_{\Delta_2}\mid (\varphi_{\Delta_2}\OR \varphi_{\Delta_2})\mid + \varphi_{\Delta_2}\EQUIV \varphi_{\Delta_2}\mid \varphi_{\Delta_2}\XOR \varphi_{\Delta_2}\mid + \varphi_{\Delta_2}\IMPLIES \varphi_{\Delta_2}\\ + \mid{}& \X\varphi_{\Delta_2} \mid{} \sere{r_F}\Esuffix \varphi_{\Delta_2} \mid + \sere{r_F}\Asuffix \varphi_{\Delta_2}\\ +\end{align*} + + +A nice property of these classes, is that they are as expressive as +their corresponding automata classes. For instance any LTL/PSL +property that is representable by a deterministic Büchi automaton (the +recurrence class) can be represented by an LTL/PSL formula in the +$\Pi_2$ fragment, even if the original formula isn't in the $\Pi_2$ +fragment originally. + +If the objective is to classify properties syntactically, it is useful +to use some slightly more complete grammar rules. In the following +list, the rules the initial $G$, $S$, $O$, $P$, $R$ of their +corresponding property clases, as listed in Fig.~\ref{fig:hierarchy} +(i.e., Guarantee, Safety, Obligation, Persistence, Recurrence). +Additionally, $B$ denotes the ``bottom'' class (a.k.a. $\Delta_0$). +Note that $\varphi_B$, $\varphi_G$, and $\varphi_S$ are rigorously +equivalent to $\varphi_{\Delta_0}$, $\varphi_{\Pi_1}$, and +$\varphi_{\Sigma_1}$. The difference in the higher classes are +\colorbox{yellow}{highlighted}. There is no generalization of +$\varphi_{\Delta_2}$ since any LTL/PSL formula is a reactivity +property. \begin{align*} \varphi_B ::={}& \0\mid\1\mid v\mid\NOT\varphi_B\mid\varphi_B\AND\varphi_B @@ -1300,36 +1395,70 @@ any unbounded SERE. \varphi_O\EQUIV \varphi_O\mid \varphi_O\XOR \varphi_O\mid \varphi_O\IMPLIES \varphi_O\\ \mid{}& \X\varphi_O \mid - \varphi_O\U\varphi_G\mid\varphi_O\R\varphi_S \mid - \varphi_S\W\varphi_O\mid \varphi_G\M\varphi_O\\ - \mid{}& \sere{r} \mid \nsere{r}\mid - \sere{r_F}\Esuffix \varphi_O \mid \sere{r_I}\Esuffix \varphi_G\mid + \colorbox{yellow}{$\varphi_O\U\varphi_G$}\mid + \colorbox{yellow}{$\varphi_O\R\varphi_S$}\mid + \colorbox{yellow}{$\varphi_S\W\varphi_O$}\mid + \colorbox{yellow}{$\varphi_G\M\varphi_O$}\\ + \mid{}& \sere{r_F}\Esuffix \varphi_O \mid \colorbox{yellow}{$\sere{r_I}\Esuffix \varphi_G$}\mid \sere{r_F}\Asuffix \varphi_O\mid - \sere{r_I}\Asuffix \varphi_S\\ + \colorbox{yellow}{$\sere{r_I}\Asuffix \varphi_S$}\\ \varphi_P ::={}& \varphi_O \mid \NOT\varphi_R\mid \varphi_P\AND \varphi_P\mid (\varphi_P\OR \varphi_P)\mid \varphi_R\IMPLIES \varphi_P\\ \mid{}& \X\varphi_P \mid \F\varphi_P \mid - \varphi_P\U\varphi_P\mid\varphi_P\R\varphi_S\mid - \varphi_S\W\varphi_P\mid\varphi_P\M\varphi_P\\ + \varphi_P\U\varphi_P\mid\colorbox{yellow}{$\varphi_P\R\varphi_S$}\mid + \colorbox{yellow}{$\varphi_S\W\varphi_P$}\mid\varphi_P\M\varphi_P\\ \mid{}& \sere{r}\Esuffix \varphi_P\mid \sere{r_F}\Asuffix \varphi_P\mid - \sere{r_I}\Asuffix \varphi_S\\ + \colorbox{yellow}{$\sere{r_I}\Asuffix \varphi_S$}\\ \varphi_R ::={}& \varphi_O \mid \NOT\varphi_P\mid \varphi_R\AND \varphi_R\mid (\varphi_R\OR \varphi_R)\mid \varphi_P\IMPLIES \varphi_R\\ \mid{}& \X\varphi_R \mid \G\varphi_R \mid - \varphi_R\U\varphi_G\mid\varphi_R\R\varphi_R\mid - \varphi_R\W\varphi_R\mid\varphi_G\M\varphi_R\\ - \mid{}& \sere{r}\Asuffix \varphi_R\mid \sere{r_F}\Esuffix \varphi_R \mid \sere{r_I}\Esuffix \varphi_G\\ + \colorbox{yellow}{$\varphi_R\U\varphi_G$}\mid\varphi_R\R\varphi_R\mid + \varphi_R\W\varphi_R\mid\colorbox{yellow}{$\varphi_G\M\varphi_R$}\\ + \mid{}& \sere{r}\Asuffix \varphi_R\mid \sere{r_F}\Esuffix \varphi_R \mid \colorbox{yellow}{$\sere{r_I}\Esuffix \varphi_G$}\\ \end{align*} + It should be noted that a formula can belong to a class of the temporal hierarchy even if it does not syntactically appears so. For -instance the formula $(\G(q\OR \F\G p)\AND \G(r\OR \F\G\NOT p))\OR\G -q\OR \G r$ is not syntactically safe, yet it is a safety formula -equivalent to $\G q\OR \G r$. Such a formula is usually said -\emph{pathologically safe}. +instance the formula +$(\G(q\OR \F\G p)\AND \G(r\OR \F\G\NOT p))\OR\G q\OR \G r$ is not +syntactically safe (and isn't even in $\Delta_2$), yet it is a safety +formula equivalent to $\G q\OR \G r$ (which is in $\Pi_1$, the +syntactical class of safety formulas). Such a formula is usually said +to be a \emph{pathological safety} formula. + +To illustrate the difference in the grammar for the higher classes, +consider the formula $\G((\G a) \U b)$. This formula can be converted +to a deterministic Büchi automaton, so it specifies a recurrence +property. It is captured by the grammar rule for $\varphi_R$ above, +yet it does not belong to the $\Pi_2$ class because of the alternation +between weak ($\G$), strong ($\U$), and weak ($\G$) operators. +However the equivalent formula $\G((\G a) \W b))\land \G\F b$ belongs +to $\Pi_2$. + +Spot computes the membership to each of those class whenever a formula +$f$ is constructed. Here is how the membership to each of those class +can be tested: +\begin{center} +\begin{tabular}{cl} + \toprule + $f\in \Delta_0$ & \texttt{f.is\_syntactic\_safety() \&\& f.is\_syntactic\_guarantee()} \\ + $f\in \Pi_1$, $f\in S$ & \texttt{f.is\_syntactic\_safety()} \\ + $f\in \Sigma_1$, $f\in G$ & \texttt{f.is\_syntactic\_guarantee()} \\ + $f\in \Delta_1$ & \texttt{f.is\_delta1()} \\ + $f\in O$ & \texttt{f.is\_syntactic\_obligation()} \\ + $f\in \Pi_2$ & \texttt{f.is\_pi2()} \\ + $f\in R$ & \texttt{f.is\_syntactic\_recurrence()} \\ + $f\in \Sigma_2$ & \texttt{f.is\_sigma2()} \\ + $f\in P$ & \texttt{f.is\_syntactic\_persistence()} \\ + $f\in \Delta_2$ & \texttt{f.is\_delta2()} \\ + \bottomrule +\end{tabular} +\end{center} + \chapter{Rewritings} diff --git a/spot/tl/formula.cc b/spot/tl/formula.cc index 1a1a4fb47..fe5770931 100644 --- a/spot/tl/formula.cc +++ b/spot/tl/formula.cc @@ -1290,6 +1290,10 @@ namespace spot is_.accepting_eword = false; is_.lbt_atomic_props = true; is_.spin_atomic_props = true; + is_.delta1 = true; + is_.sigma2 = true; + is_.pi2 = true; + is_.delta2 = true; break; case op::eword: is_.boolean = false; @@ -1312,6 +1316,10 @@ namespace spot is_.accepting_eword = true; is_.lbt_atomic_props = true; is_.spin_atomic_props = true; + is_.delta1 = true; + is_.sigma2 = true; + is_.pi2 = true; + is_.delta2 = true; break; case op::ap: is_.boolean = true; @@ -1348,6 +1356,10 @@ namespace spot is_.lbt_atomic_props = lbtap; is_.spin_atomic_props = lbtap || is_spin_ap(n.c_str()); } + is_.delta1 = true; + is_.sigma2 = true; + is_.pi2 = true; + is_.delta2 = true; break; case op::Not: props = children[0]->props; @@ -1364,6 +1376,10 @@ namespace spot is_.syntactic_persistence = children[0]->is_syntactic_recurrence(); is_.accepting_eword = false; + // is_.delta1 inherited + is_.sigma2 = children[0]->is_pi2(); + is_.pi2 = children[0]->is_sigma2(); + // is_.delta2 inherited break; case op::X: case op::strong_X: @@ -1382,6 +1398,10 @@ namespace spot // we could make sense of it if we start supporting LTL over // finite traces. is_.accepting_eword = false; + // is_.delta1 inherited + // is_.sigma2 inherited + // is_.pi2 inherited + // is_.delta2 inherited break; case op::F: props = children[0]->props; @@ -1397,6 +1417,10 @@ namespace spot is_.syntactic_recurrence = is_.syntactic_guarantee; // is_.syntactic_persistence inherited is_.accepting_eword = false; + is_.delta1 = is_.syntactic_guarantee; + is_.pi2 = is_.syntactic_guarantee; + // is_.sigma2 inherited + is_.delta2 = is_.pi2 | is_.sigma2; break; case op::G: props = children[0]->props; @@ -1412,6 +1436,10 @@ namespace spot // is_.syntactic_recurrence inherited is_.syntactic_persistence = is_.syntactic_safety; is_.accepting_eword = false; + is_.delta1 = is_.syntactic_safety; + is_.sigma2 = is_.syntactic_safety; + // is_.pi2 inherited + is_.delta2 = is_.pi2 | is_.sigma2; break; case op::NegClosure: case op::NegClosureMarked: @@ -1427,6 +1455,10 @@ namespace spot is_.syntactic_recurrence = true; is_.syntactic_persistence = true; is_.accepting_eword = false; + is_.delta1 = true; + is_.sigma2 = true; + is_.pi2 = true; + is_.delta2 = true; assert(children[0]->is_sere_formula()); assert(!children[0]->is_boolean()); break; @@ -1443,6 +1475,10 @@ namespace spot is_.syntactic_recurrence = true; is_.syntactic_persistence = true; is_.accepting_eword = false; + is_.delta1 = true; + is_.sigma2 = true; + is_.pi2 = true; + is_.delta2 = true; assert(children[0]->is_sere_formula()); assert(!children[0]->is_boolean()); break; @@ -1473,6 +1509,17 @@ namespace spot is_.syntactic_recurrence = false; is_.syntactic_persistence = false; } + if (is_.delta1) + { + assert(is_.pi2 == true); + assert(is_.sigma2 == true); + assert(is_.delta2 == true); + } + else + { + is_.pi2 = false; + is_.sigma2 = false; + } break; case op::Implies: props = children[0]->props & children[1]->props; @@ -1494,6 +1541,10 @@ namespace spot is_.syntactic_recurrence = children[0]->is_syntactic_persistence() && children[1]->is_syntactic_recurrence(); is_.accepting_eword = false; + // is_.delta1 inherited + is_.sigma2 = children[0]->is_pi2() && children[1]->is_sigma2(); + is_.pi2 = children[0]->is_sigma2() && children[1]->is_pi2(); + // is_.delta2 inherited break; case op::EConcatMarked: case op::EConcat: @@ -1507,18 +1558,25 @@ namespace spot is_.syntactic_guarantee = children[1]->is_syntactic_guarantee(); is_.syntactic_persistence = children[1]->is_syntactic_persistence(); - if (children[0]->is_finite()) + is_.sigma2 = children[1]->is_sigma2(); + if (children[0]->is_finite()) // behaves like X { is_.syntactic_safety = children[1]->is_syntactic_safety(); is_.syntactic_obligation = children[1]->is_syntactic_obligation(); is_.syntactic_recurrence = children[1]->is_syntactic_recurrence(); + is_.delta1 = children[1]->is_delta1(); + is_.pi2 = children[1]->is_pi2(); + is_.delta2 = children[1]->is_delta2(); } - else + else // behaves like F { is_.syntactic_safety = false; bool g = children[1]->is_syntactic_guarantee(); is_.syntactic_obligation = g; is_.syntactic_recurrence = g; + is_.delta1 = g; + is_.pi2 = g; + is_.delta2 = g | is_.sigma2; } assert(children[0]->is_sere_formula()); assert(children[1]->is_psl_formula()); @@ -1536,19 +1594,25 @@ namespace spot is_.syntactic_safety = children[1]->is_syntactic_safety(); is_.syntactic_recurrence = children[1]->is_syntactic_recurrence(); - if (children[0]->is_finite()) + is_.pi2 = children[1]->is_pi2(); + if (children[0]->is_finite()) // behaves like X { is_.syntactic_guarantee = children[1]->is_syntactic_guarantee(); is_.syntactic_obligation = children[1]->is_syntactic_obligation(); - is_.syntactic_persistence = - children[1]->is_syntactic_persistence(); + is_.syntactic_persistence = children[1]->is_syntactic_persistence(); + is_.delta1 = children[1]->is_delta1(); + is_.sigma2 = children[1]->is_sigma2(); + is_.delta2 = children[1]->is_delta2(); } - else + else // behaves like G { is_.syntactic_guarantee = false; bool s = children[1]->is_syntactic_safety(); is_.syntactic_obligation = s; is_.syntactic_persistence = s; + is_.delta1 = s; + is_.sigma2 = s; + is_.delta2 = is_.pi2 | s; } assert(children[0]->is_sere_formula()); assert(children[1]->is_psl_formula()); @@ -1596,6 +1660,10 @@ namespace spot children[0]->is_syntactic_recurrence() && children[1]->is_syntactic_guarantee(); // is_.syntactic_persistence = Persistence U Persistance + is_.delta1 = is_.syntactic_guarantee; + // is_.sigma2 = Σ₂ U Σ₂ + is_.pi2 = is_.syntactic_guarantee; + is_.delta2 = is_.sigma2 | is_.pi2; break; case op::W: // See comment for op::U. @@ -1617,7 +1685,10 @@ namespace spot is_.syntactic_persistence = // Safety W Persistance children[0]->is_syntactic_safety() && children[1]->is_syntactic_persistence(); - + is_.delta1 = is_.syntactic_safety; + is_.sigma2 = is_.syntactic_safety; + // is_.pi2 = Π₂ U Π₂ + is_.delta2 = is_.sigma2 | is_.pi2; break; case op::R: // See comment for op::U. @@ -1640,7 +1711,10 @@ namespace spot is_.syntactic_persistence = // Persistence R Safety children[0]->is_syntactic_persistence() && children[1]->is_syntactic_safety(); - + is_.delta1 = is_.syntactic_safety; + is_.sigma2 = is_.syntactic_safety; + // is_.pi2 = Π₂ U Π₂ + is_.delta2 = is_.sigma2 | is_.pi2; break; case op::M: // See comment for op::U. @@ -1662,7 +1736,10 @@ namespace spot children[0]->is_syntactic_guarantee() && children[1]->is_syntactic_recurrence(); // is_.syntactic_persistence = Persistence M Persistance - + is_.delta1 = is_.syntactic_guarantee; + // is_.sigma2 = Σ₂ M Σ₂ + is_.pi2 = is_.syntactic_guarantee; + is_.delta2 = is_.sigma2 | is_.pi2; break; case op::Or: { @@ -1787,6 +1864,10 @@ namespace spot is_.syntactic_obligation = false; is_.syntactic_recurrence = false; is_.syntactic_persistence = false; + is_.delta1 = false; + is_.pi2 = false; + is_.sigma2 = false; + is_.delta2 = false; switch (op_) { @@ -1832,6 +1913,10 @@ namespace spot is_.syntactic_obligation = false; is_.syntactic_recurrence = false; is_.syntactic_persistence = false; + is_.delta1 = false; + is_.pi2 = false; + is_.sigma2 = false; + is_.delta2 = false; break; } } @@ -2011,31 +2096,38 @@ namespace spot return strverscmp(f->ap_name().c_str(), g->ap_name().c_str()); } -#define printprops \ - proprint(is_boolean, "B", "Boolean formula"); \ +#define printprops \ + proprint(is_boolean, "B", "Boolean formula"); \ proprint(is_sugar_free_boolean, "&", "without Boolean sugar"); \ - proprint(is_in_nenoform, "!", "in negative normal form"); \ - proprint(is_syntactic_stutter_invariant, "x", \ - "syntactic stutter invariant"); \ + proprint(is_in_nenoform, "!", "in negative normal form"); \ + proprint(is_syntactic_stutter_invariant, "x", \ + "syntactic stutter invariant"); \ proprint(is_sugar_free_ltl, "f", "without LTL sugar"); \ - proprint(is_ltl_formula, "L", "LTL formula"); \ - proprint(is_psl_formula, "P", "PSL formula"); \ - proprint(is_sere_formula, "S", "SERE formula"); \ - proprint(is_finite, "F", "finite"); \ - proprint(is_eventual, "e", "pure eventuality"); \ - proprint(is_universal, "u", "purely universal"); \ - proprint(is_syntactic_safety, "s", "syntactic safety"); \ - proprint(is_syntactic_guarantee, "g", "syntactic guarantee"); \ - proprint(is_syntactic_obligation, "o", "syntactic obligation"); \ - proprint(is_syntactic_persistence, "p", "syntactic persistence"); \ - proprint(is_syntactic_recurrence, "r", "syntactic recurrence"); \ - proprint(is_marked, "+", "marked"); \ - proprint(accepts_eword, "0", "accepts the empty word"); \ - proprint(has_lbt_atomic_props, "l", \ - "has LBT-style atomic props"); \ - proprint(has_spin_atomic_props, "a", \ - "has Spin-style atomic props"); + proprint(is_ltl_formula, "L", "LTL formula"); \ + proprint(is_psl_formula, "P", "PSL formula"); \ + proprint(is_sere_formula, "S", "SERE formula"); \ + proprint(is_finite, "F", "finite"); \ + proprint(is_eventual, "e", "pure eventuality"); \ + proprint(is_universal, "u", "purely universal"); \ + proprint(is_syntactic_safety, "s", "syntactic safety"); \ + proprint(is_syntactic_guarantee, "g", "syntactic guarantee"); \ + proprint(is_syntactic_obligation, "o", "syntactic obligation"); \ + proprint(is_syntactic_persistence, "p", "syntactic persistence"); \ + proprint(is_syntactic_recurrence, "r", "syntactic recurrence"); \ + proprint(is_marked, "+", "marked"); \ + proprint(accepts_eword, "0", "accepts the empty word"); \ + proprint(has_lbt_atomic_props, "l", \ + "has LBT-style atomic props"); \ + proprint(has_spin_atomic_props, "a", \ + "has Spin-style atomic props"); \ + proprint(is_delta1, "O", "delta1"); \ + proprint(is_sigma2, "P", "sigma2"); \ + proprint(is_pi2, "R", "pi2"); \ + proprint(is_delta2, "D", "delta2"); + // O (for Δ₁), P (for Σ₂), R (for Π₂) are the uppercase versions of + // o (obligation), p (persistence), r (recurrence) because they are + // stricter subsets of those. std::list list_formula_props(const formula& f) diff --git a/spot/tl/formula.hh b/spot/tl/formula.hh index 4f56c38bc..d2d3dd080 100644 --- a/spot/tl/formula.hh +++ b/spot/tl/formula.hh @@ -522,6 +522,30 @@ namespace spot return is_.spin_atomic_props; } + /// \see formula::is_sigma2 + bool is_sigma2() const + { + return is_.sigma2; + } + + /// \see formula::is_pi2 + bool is_pi2() const + { + return is_.pi2; + } + + /// \see formula::is_delta1 + bool is_delta1() const + { + return is_.delta1; + } + + /// \see formula::is_delta2 + bool is_delta2() const + { + return is_.delta2; + } + private: static size_t bump_next_id(); void setup_props(op o); @@ -627,15 +651,19 @@ namespace spot bool finite:1; // Finite SERE formulae, or Bool+X forms. bool eventual:1; // Purely eventual formula. bool universal:1; // Purely universal formula. - bool syntactic_safety:1; // Syntactic Safety Property. - bool syntactic_guarantee:1; // Syntactic Guarantee Property. - bool syntactic_obligation:1; // Syntactic Obligation Property. - bool syntactic_recurrence:1; // Syntactic Recurrence Property. - bool syntactic_persistence:1; // Syntactic Persistence Property. + bool syntactic_safety:1; // Syntactic Safety Property (S). + bool syntactic_guarantee:1; // Syntactic Guarantee Property (G). + bool syntactic_obligation:1; // Syntactic Obligation Property (O). + bool syntactic_recurrence:1; // Syntactic Recurrence Property (R). + bool syntactic_persistence:1; // Syntactic Persistence Property (P). bool not_marked:1; // No occurrence of EConcatMarked. bool accepting_eword:1; // Accepts the empty word. bool lbt_atomic_props:1; // Use only atomic propositions like p42. bool spin_atomic_props:1; // Use only spin-compatible atomic props. + bool delta1:1; // Boolean combination of (S) and (G). + bool sigma2:1; // Boolean comb. of (S) with X/F/U/M possibly applied. + bool pi2:1; // Boolean comb. of (G) with X/G/R/W possibly applied. + bool delta2:1; // Boolean combination of (Σ₂) and (Π₂). }; union { @@ -1698,16 +1726,43 @@ namespace spot /// universal formula also satisfies the formula. /// \cite etessami.00.concur SPOT_DEF_PROP(is_universal); - /// Whether a PSL/LTL formula is syntactic safety property. + /// \brief Whether a PSL/LTL formula is syntactic safety property. + /// + /// Is class is also called Π₁. SPOT_DEF_PROP(is_syntactic_safety); - /// Whether a PSL/LTL formula is syntactic guarantee property. + /// \brief Whether a PSL/LTL formula is syntactic guarantee property. + /// + /// Is class is also called Σ₁. SPOT_DEF_PROP(is_syntactic_guarantee); - /// Whether a PSL/LTL formula is syntactic obligation property. + /// \brief Whether a PSL/LTL formula is in the Δ₁ syntactic frament + /// + /// A formula is in Δ₁ if it is a boolean combination of syntactic + /// safety and syntactic guarantee properties. + SPOT_DEF_PROP(is_delta1); + /// \brief Whether a PSL/LTL formula is syntactic obligation property. + /// + /// This class is a proper syntactic superset of Δ₁, but has the + /// same expressive power. SPOT_DEF_PROP(is_syntactic_obligation); - /// Whether a PSL/LTL formula is syntactic recurrence property. + /// Whether a PSL/LTL formula is in Σ₂ + SPOT_DEF_PROP(is_sigma2); + /// Whether a PSL/LTL formula is in Π₂ + SPOT_DEF_PROP(is_pi2); + /// \brief Whether a PSL/LTL formula is syntactic recurrence property. + /// + /// This class is a proper syntactic superset of Σ₂ syntactically, + /// expressive power. SPOT_DEF_PROP(is_syntactic_recurrence); - /// Whether a PSL/LTL formula is syntactic persistence property. + /// \brief Whether a PSL/LTL formula is syntactic persistence property. + /// + /// This class is a proper syntactic superset of Π₂, but has the + /// same expressive power. SPOT_DEF_PROP(is_syntactic_persistence); + /// \brief Whether a PSL/LTL formula is in the Δ₂ syntactic frament + /// + /// A formula is in Δ₂ if it is a boolean combination of Σ₂ and Π₂ + /// properties. + SPOT_DEF_PROP(is_delta2); /// \brief Whether the formula has an occurrence of EConcatMarked /// or NegClosureMarked SPOT_DEF_PROP(is_marked); diff --git a/tests/core/kind.test b/tests/core/kind.test index 93c7dcef4..14e9870fa 100755 --- a/tests/core/kind.test +++ b/tests/core/kind.test @@ -25,118 +25,120 @@ set -e cat >input<b,BxfLPSFsgopra -!a,B&!xfLPSFsgopra -!(a|b),B&xfLPSFsgopra -F(a),&!xLPegopra -G(a),&!xLPusopra -a U b,&!xfLPgopra -a U Fb,&!xLPegopra -Ga U b,&!xLPopra -1 U a,&!xfLPegopra -a W b,&!xfLPsopra -a W 0,&!xfLPusopra -a M b,&!xfLPgopra -a M 1,&!xfLPegopra -a R b,&!xfLPsopra -0 R b,&!xfLPusopra -a R (b R (c R d)),&!xfLPsopra -a U (b U (c U d)),&!xfLPgopra -a W (b W (c W d)),&!xfLPsopra -a M (b M (c M d)),&!xfLPgopra -Fa -> Fb,xLPopra -Ga -> Fb,xLPegopra -Fa -> Gb,xLPusopra -(Ga|Fc) -> Fb,xLPopra -(Ga|Fa) -> Gb,xLPopra -{a;c*;b}|->!Xb,&fPsopra -{a;c*;b}|->X!b,&!fPsopra -{a;c*;b}|->!Fb,&Psopra -{a;c*;b}|->G!b,&!Psopra -{a;c*;b}|->!Gb,&Pra -{a;c*;b}|->F!b,&!Pra -{a;c*;b}|->GFa,&!Pra +a,B&!xfLPSFsgopraOPRD +a<->b,BxfLPSFsgopraOPRD +!a,B&!xfLPSFsgopraOPRD +!(a|b),B&xfLPSFsgopraOPRD +F(a),&!xLPegopraOPRD +G(a),&!xLPusopraOPRD +a U b,&!xfLPgopraOPRD +a U Fb,&!xLPegopraOPRD +Ga U b,&!xLPopraPD +1 U a,&!xfLPegopraOPRD +a W b,&!xfLPsopraOPRD +a W 0,&!xfLPusopraOPRD +a M b,&!xfLPgopraOPRD +a M 1,&!xfLPegopraOPRD +a R b,&!xfLPsopraOPRD +0 R b,&!xfLPusopraOPRD +a R (b R (c R d)),&!xfLPsopraOPRD +a U (b U (c U d)),&!xfLPgopraOPRD +a W (b W (c W d)),&!xfLPsopraOPRD +a M (b M (c M d)),&!xfLPgopraOPRD +Fa -> Fb,xLPopraOPRD +Ga -> Fb,xLPegopraOPRD +Fa -> Gb,xLPusopraOPRD +(Ga|Fc) -> Fb,xLPopraOPRD +(Ga|Fa) -> Gb,xLPopraOPRD +{a;c*;b}|->!Xb,&fPsopraOPRD +{a;c*;b}|->X!b,&!fPsopraOPRD +{a;c*;b}|->!Fb,&PsopraOPRD +{a;c*;b}|->G!b,&!PsopraOPRD +{a;c*;b}|->!Gb,&PraRD +{a;c*;b}|->F!b,&!PraRD +{a;c*;b}|->GFa,&!PraRD {a;c*;b}|->FGa,&!Pa -{a[+];c[+];b*}|->!Fb,&Psopra -{a*;c[+];b*}|->!Fb,&xPsopra -{a[+];c*;b[+]}|->G!b,&!Psopra -{a*;c[+];b[+]}|->!Gb,&Pra -{a[+];c*;b[+]}|->F!b,&!Pra -{a[+];c[+];b*}|->GFa,&!Pra +{a[+];c[+];b*}|->!Fb,&PsopraOPRD +{a*;c[+];b*}|->!Fb,&xPsopraOPRD +{a[+];c*;b[+]}|->G!b,&!PsopraOPRD +{a*;c[+];b[+]}|->!Gb,&PraRD +{a[+];c*;b[+]}|->F!b,&!PraRD +{a[+];c[+];b*}|->GFa,&!PraRD {a*;c[+];b[+]}|->FGa,&!Pa -{a;c;b|(d;e)}|->!Xb,&fPFsgopra -{a;c;b|(d;e)}|->X!b,&!fPFsgopra -{a;c;b|(d;e)}|->!Fb,&Psopra -{a;c;b|(d;e)}|->G!b,&!Psopra -{a;c;b|(d;e)}|->!Gb,&Pgopra -{a;c;b|(d;e)}|->F!b,&!Pgopra -{a;c;b|(d;e)}|->GFa,&!Pra -{a;c;b|(d;e)}|->FGa,&!Ppa -{a[+] && c[+]}|->!Xb,&fPsopra -{a[+] && c[+]}|->X!b,&!fPsopra -{a[+] && c[+]}|->!Fb,&xPsopra -{a[+] && c[+]}|->G!b,&!xPsopra -{a[+] && c[+]}|->!Gb,&xPra -{a[+] && c[+]}|->F!b,&!xPra -{a[+] && c[+]}|->GFa,&!xPra +{a;c;b|(d;e)}|->!Xb,&fPFsgopraOPRD +{a;c;b|(d;e)}|->X!b,&!fPFsgopraOPRD +{a;c;b|(d;e)}|->!Fb,&PsopraOPRD +{a;c;b|(d;e)}|->G!b,&!PsopraOPRD +{a;c;b|(d;e)}|->!Gb,&PgopraOPRD +{a;c;b|(d;e)}|->F!b,&!PgopraOPRD +{a;c;b|(d;e)}|->GFa,&!PraRD +{a;c;b|(d;e)}|->FGa,&!PpaPD +{a[+] && c[+]}|->!Xb,&fPsopraOPRD +{a[+] && c[+]}|->X!b,&!fPsopraOPRD +{a[+] && c[+]}|->!Fb,&xPsopraOPRD +{a[+] && c[+]}|->G!b,&!xPsopraOPRD +{a[+] && c[+]}|->!Gb,&xPraRD +{a[+] && c[+]}|->F!b,&!xPraRD +{a[+] && c[+]}|->GFa,&!xPraRD {a[+] && c[+]}|->FGa,&!xPa -{a;c*;b}<>->!Gb,&Pgopra -{a;c*;b}<>->F!b,&!Pgopra -{a;c*;b}<>->FGb,&!Ppa -{a;c*;b}<>->!GFb,&Ppa +{a;c*;b}<>->!Gb,&PgopraOPRD +{a;c*;b}<>->F!b,&!PgopraOPRD +{a;c*;b}<>->FGb,&!PpaPD +{a;c*;b}<>->!GFb,&PpaPD {a;c*;b}<>->GFb,&!Pa {a;c*;b}<>->!FGb,&Pa {a*;c[+];b[+]}<>->!FGb,&Pa -{a;c|d;b}<>->!Gb,&Pgopra -{a;c|d;b}<>->G!b,&!Psopra -{a;c|d;b}<>->FGb,&!Ppa -{a;c|d;b}<>->!GFb,&Ppa -{a;c|d;b}<>->GFb,&!Pra -{a;c|d;_b}<>->!FGb,&Pr +{a;c|d;b}<>->!Gb,&PgopraOPRD +{a;c|d;b}<>->G!b,&!PsopraOPRD +{a;c|d;b}<>->FGb,&!PpaPD +{a;c|d;b}<>->!GFb,&PpaPD +{a;c|d;b}<>->GFb,&!PraRD +{a;c|d;_b}<>->!FGb,&PrRD # Equivalent to a&b&c&d -{a:b:c:d}!,B&!xfLPSFsgopra -a&b&c&d,B&!xfLPSFsgopra -(Xa <-> XXXc) U (b & Fe),LPgopra -(!X(a|X(!b))&(FX(g xor h)))U(!G(a|b)),LPegopra -(!X(a|X(!b))&(GX(g xor h)))R(!F(a|b)),LPusopra -(!X(a|X(!b))&(GX(g xor h)))U(!G(a|b)),LPeopra -(!X(a|X(!b))&(FX(g xor h)))R(!F(a|b)),LPuopra -(!X(a|X(!b))&(GX(g xor h)))U(!F(a|b)),LPpa -(!X(a|X(!b))&(FX(g xor h)))R(!G(a|b)),LPra -(!X(a|GXF(!b))&(FGX(g xor h)))U(!F(a|b)),LPpa +{a:b:c:d}!,B&!xfLPSFsgopraOPRD +a&b&c&d,B&!xfLPSFsgopraOPRD +(Xa <-> XXXc) U (b & Fe),LPgopraOPRD +(!X(a|X(!b))&(FX(g xor h)))U(!G(a|b)),LPegopraOPRD +(!X(a|X(!b))&(GX(g xor h)))R(!F(a|b)),LPusopraOPRD +(!X(a|X(!b))&(GX(g xor h)))U(!G(a|b)),LPeopraPD +(!X(a|X(!b))&(FX(g xor h)))R(!F(a|b)),LPuopraRD +(!X(a|X(!b))&(GX(g xor h)))U(!F(a|b)),LPpaPD +(!X(a|X(!b))&(FX(g xor h)))R(!G(a|b)),LPraRD +(!X(a|GXF(!b))&(FGX(g xor h)))U(!F(a|b)),LPpaPD (!X(a|GXF(!b))&(FGX(g xor h)))R(!F(a|b)),LPupa -(!X(a|FXG(!b))&(GFX(g xor h)))R(!G(a|b)),LPra +(!X(a|FXG(!b))&(GFX(g xor h)))R(!G(a|b)),LPraRD (!X(a|FXG(!b))&(GFX(g xor h)))U(!G(a|b)),LPera -(!X(a|GXF(!b))&(FGX(g xor h)))U(!G(a|Fb)),LPepa +(!X(a|GXF(!b))&(FGX(g xor h)))U(!G(a|Fb)),LPepaPD (!X(a|GXF(!b))&(FGX(g xor h)))U(!F(a|Gb)),LPa -(!X(a|FXG(!b))&(GFX(g xor h)))R(!F(a|Gb)),LPura +(!X(a|FXG(!b))&(GFX(g xor h)))R(!F(a|Gb)),LPuraRD (!X(a|FXG(!b))&(GFX(g xor h)))R(!G(a|Fb)),LPa GFa M GFb,&!xLPeua -FGa M FGb,&!xLPeupa +FGa M FGb,&!xLPeupaPD Fa M GFb,&!xLPera -GFa W GFb,&!xLPeura +GFa W GFb,&!xLPeuraRD FGa W FGb,&!xLPeua Ga W FGb,&!xLPupa -Ga W b,&!xLPsopra -Fa M b,&!xLPgopra -{a;b*;c},&!fPsopra -{a;b*;c}!,&!fPgopra +Ga W b,&!xLPsopraOPRD +Fa M b,&!xLPgopraOPRD +{a;b*;c},&!fPsopraOPRD +{a;b*;c}!,&!fPgopraOPRD # The negative normal form is {a;b*;c}[]->1 -!{a;b*;c}!,&fPsopra -{a;b*;p112}[]->0,&!fPsopra -!{a;b*;c.2},&!fPgopr -!{a[+];b*;c[+]},&!fPgopra -!{a[+];b*;c*},&!xfPgopra -{a[+];b*;c[+]},&!fPsopra -{a[+] && b || c[+]},&!fPsopra -{a[+] && b[+] || c[+]},&!xfPsopra -{p[+]:p[+]},&!xfPsoprla -(!p W Gp) | ({(!p[*];(p[+]:(p[*];!p[+])))[:*4][:+]}<>-> (!p W Gp)),&!xPpla -{b[+][:*0..3]},&!fPsopra -{a->c[*]},xfPsopra -{(a[+];b*);c*}<>->d,&!xfPgopra -{first_match(a[+];b*);c*}<>->d,&!fPgopra +!{a;b*;c}!,&fPsopraOPRD +{a;b*;p112}[]->0,&!fPsopraOPRD +!{a;b*;c.2},&!fPgoprOPRD +!{a[+];b*;c[+]},&!fPgopraOPRD +!{a[+];b*;c*},&!xfPgopraOPRD +{a[+];b*;c[+]},&!fPsopraOPRD +{a[+] && b || c[+]},&!fPsopraOPRD +{a[+] && b[+] || c[+]},&!xfPsopraOPRD +{p[+]:p[+]},&!xfPsoprlaOPRD +(!p W Gp) | ({(!p[*];(p[+]:(p[*];!p[+])))[:*4][:+]}<>-> (!p W Gp)),&!xPplaPD +{b[+][:*0..3]},&!fPsopraOPRD +{a->c[*]},xfPsopraOPRD +{(a[+];b*);c*}<>->d,&!xfPgopraOPRD +{first_match(a[+];b*);c*}<>->d,&!fPgopraOPRD +G(Ga U b),&!xLPura +GFb & G(Ga R b),&!xLPuraRD EOF run 0 ../kind input From 5bc4d12bbada0e6ec2ef804e1091b80c6bd8c39f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 19 Jul 2024 18:18:12 +0200 Subject: [PATCH 462/606] ltlfilt: support --pi1 --sigma1 --delta1 --pi2 --sigma2 * bin/ltlfilt.cc: Implement those option. * tests/core/hierarchy.test: Add a quick test. * NEWS: Mention it. --- NEWS | 5 ++++- bin/ltlfilt.cc | 35 +++++++++++++++++++++++++++++++++-- tests/core/hierarchy.test | 6 ++++++ 3 files changed, 43 insertions(+), 3 deletions(-) diff --git a/NEWS b/NEWS index 700942091..5f09664f9 100644 --- a/NEWS +++ b/NEWS @@ -6,6 +6,9 @@ New in spot 2.12.0.dev (not yet released) edges leading to dead-ends. See the description of restrict_dead_end_edges_here() below. + - ltlfilt learned --pi1, --sigma1, --delta1, --pi2, --sigma2, and + --delta2 to filter according to classes Π₁,Σ₁,Δ₁,Π₂,Σ₂, and Δ₂. + Library: - restrict_dead_end_edges_here() can reduce non-determinism (but @@ -112,7 +115,7 @@ New in spot 2.12 (2024-05-16) - EXPL: explicit splitting of each state as before - SEMISYM: The outgoing transition of each state are encoded as a bdd; Works better for larger number of input APs - - FULLYSYM: The automaton is first translated into a + - FULLYSYM: The automaton is first translated into a fully symbolic version, then split. - AUTO: Let the heuristic decide what to do. diff --git a/bin/ltlfilt.cc b/bin/ltlfilt.cc index 0403ea76c..f56074d78 100644 --- a/bin/ltlfilt.cc +++ b/bin/ltlfilt.cc @@ -74,6 +74,8 @@ enum { OPT_BSIZE_MAX, OPT_BSIZE_MIN, OPT_DEFINE, + OPT_DELTA1, + OPT_DELTA2, OPT_DROP_ERRORS, OPT_EQUIVALENT_TO, OPT_EXCLUSIVE_AP, @@ -89,6 +91,7 @@ enum { OPT_NNF, OPT_OBLIGATION, OPT_PERSISTENCE, + OPT_PI2, OPT_RECURRENCE, OPT_REJECT_WORD, OPT_RELABEL, @@ -97,6 +100,7 @@ enum { OPT_REMOVE_WM, OPT_REMOVE_X, OPT_SAFETY, + OPT_SIGMA2, OPT_SIZE, OPT_SIZE_MAX, OPT_SIZE_MIN, @@ -184,15 +188,22 @@ static const argp_option options[] = { "suspendable", OPT_SUSPENDABLE, nullptr, 0, "synonym for --universal --eventual", 0 }, { "syntactic-safety", OPT_SYNTACTIC_SAFETY, nullptr, 0, - "match syntactic-safety formulas", 0 }, + "match syntactic-safety (a.k.a. Π₁) formulas", 0 }, + { "pi1", 0, nullptr, OPTION_ALIAS, nullptr, 0 }, { "syntactic-guarantee", OPT_SYNTACTIC_GUARANTEE, nullptr, 0, - "match syntactic-guarantee formulas", 0 }, + "match syntactic-guarantee (a.k.a. Σ₁) formulas", 0 }, + { "sigma1", 0, nullptr, OPTION_ALIAS, nullptr, 0 }, { "syntactic-obligation", OPT_SYNTACTIC_OBLIGATION, nullptr, 0, "match syntactic-obligation formulas", 0 }, + { "delta1", OPT_DELTA1, nullptr, 0, + "match Δ₁ formulas", 0 }, { "syntactic-recurrence", OPT_SYNTACTIC_RECURRENCE, nullptr, 0, "match syntactic-recurrence formulas", 0 }, + { "pi2", OPT_PI2, nullptr, 0, "match Π₂ formulas", 0 }, { "syntactic-persistence", OPT_SYNTACTIC_PERSISTENCE, nullptr, 0, "match syntactic-persistence formulas", 0 }, + { "sigma2", OPT_SIGMA2, nullptr, 0, "match Σ₂ formulas", 0 }, + { "delta2", OPT_DELTA2, nullptr, 0, "match Δ₂ formulas", 0 }, { "syntactic-stutter-invariant", OPT_SYNTACTIC_SI, nullptr, 0, "match stutter-invariant formulas syntactically (LTL-X or siPSL)", 0 }, { "nox", 0, nullptr, OPTION_ALIAS, nullptr, 0 }, @@ -312,6 +323,10 @@ static bool syntactic_guarantee = false; static bool syntactic_obligation = false; static bool syntactic_recurrence = false; static bool syntactic_persistence = false; +static bool delta1 = false; +static bool delta2 = false; +static bool sigma2 = false; +static bool pi2 = false; static bool syntactic_si = false; static bool safety = false; static bool guarantee = false; @@ -441,6 +456,12 @@ parse_opt(int key, char* arg, struct argp_state*) case OPT_DEFINE: opt->output_define.reset(new output_file(arg ? arg : "-")); break; + case OPT_DELTA1: + delta1 = true; + break; + case OPT_DELTA2: + delta2 = true; + break; case OPT_DROP_ERRORS: error_style = drop_errors; break; @@ -501,6 +522,9 @@ parse_opt(int key, char* arg, struct argp_state*) case OPT_OBLIGATION: obligation = true; break; + case OPT_PI2: + pi2 = true; + break; case OPT_PERSISTENCE: persistence = true; break; @@ -564,6 +588,9 @@ parse_opt(int key, char* arg, struct argp_state*) case OPT_AP_N: ap_n = parse_range(arg, 0, std::numeric_limits::max()); break; + case OPT_SIGMA2: + sigma2 = true; + break; case OPT_SUSPENDABLE: universal = true; eventual = true; @@ -745,8 +772,12 @@ namespace matched &= !syntactic_safety || f.is_syntactic_safety(); matched &= !syntactic_guarantee || f.is_syntactic_guarantee(); matched &= !syntactic_obligation || f.is_syntactic_obligation(); + matched &= !delta1 || f.is_delta1(); matched &= !syntactic_recurrence || f.is_syntactic_recurrence(); + matched &= !pi2 || f.is_pi2(); matched &= !syntactic_persistence || f.is_syntactic_persistence(); + matched &= !sigma2 || f.is_sigma2(); + matched &= !delta2 || f.is_delta2(); matched &= !syntactic_si || f.is_syntactic_stutter_invariant(); if (matched && (ap_n.min > 0 || ap_n.max >= 0)) { diff --git a/tests/core/hierarchy.test b/tests/core/hierarchy.test index 330b49070..2f7089332 100755 --- a/tests/core/hierarchy.test +++ b/tests/core/hierarchy.test @@ -21,16 +21,22 @@ set -e +test 11 -eq `genltl --dac | ltlfilt --pi1 -c` test 11 -eq `genltl --dac | ltlfilt --syntactic-safety -c` test 37 -eq `genltl --dac | ltlfilt --safety -c` test 'Fp0' = `genltl --dac | ltlfilt --syntactic-guarantee` test 'Fp0' = `genltl --dac | ltlfilt --guarantee` +test 'Fp0' = `genltl --dac | ltlfilt --sigma1` +test 23 -eq `genltl --dac | ltlfilt --delta1 -c` test 25 -eq `genltl --dac | ltlfilt --syntactic-obligation -c` test 40 -eq `genltl --dac | ltlfilt --obligation -c` +test 42 -eq `genltl --dac | ltlfilt --pi2 -c` test 47 -eq `genltl --dac | ltlfilt --syntactic-recurrence -c` test 52 -eq `genltl --dac | ltlfilt --recurrence -c` +test 29 -eq `genltl --dac | ltlfilt --sigma2 -c` test 29 -eq `genltl --dac | ltlfilt --syntactic-persistence -c` test 41 -eq `genltl --dac | ltlfilt --persistence -c` +test 48 -eq `genltl --dac | ltlfilt --delta2 -c` test 'G!p0 | F(p0 & (!p1 W p2))' = "`genltl --dac | ltlfilt -v --obligation | ltlfilt --persistence`" test 12 -eq `genltl --dac | From 41abe3f831eba9cdfa72f6b8685aba26accbce59 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 25 Jul 2024 10:42:58 +0200 Subject: [PATCH 463/606] tl: implement to_delta2() * spot/tl/delta2.cc, spot/tl/delta2.hh: New files. * spot/tl/Makefile.am: Add them. * python/spot/impl.i: Include delta2.hh. * tests/python/delta2.py: New file. * tests/Makefile.am: Add it. * NEWS: Mention the change. --- NEWS | 4 + python/spot/impl.i | 2 + spot/tl/Makefile.am | 2 + spot/tl/delta2.cc | 451 +++++++++++++++++++++++++++++++++++++++++ spot/tl/delta2.hh | 42 ++++ tests/Makefile.am | 1 + tests/python/delta2.py | 57 ++++++ 7 files changed, 559 insertions(+) create mode 100644 spot/tl/delta2.cc create mode 100644 spot/tl/delta2.hh create mode 100755 tests/python/delta2.py diff --git a/NEWS b/NEWS index 5f09664f9..a4670f5d8 100644 --- a/NEWS +++ b/NEWS @@ -26,6 +26,10 @@ New in spot 2.12.0.dev (not yet released) formula::is_delta1(), formula::is_sigma2(), formula::is_pi2(), formula::is_delta2(). See doc/tl/tl.pdf from more discussion. + - spot::to_delta2() implements Δ₂-normalization for LTL formulas, + following "Efficient Normalization of Linear Temporal Logic" by + Esparza et al. (J. ACM, 2024). + New in spot 2.12 (2024-05-16) Build: diff --git a/python/spot/impl.i b/python/spot/impl.i index 6fa3e9f07..6d91144d8 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -90,6 +90,7 @@ #include #include #include +#include #include #include #include @@ -632,6 +633,7 @@ namespace std { %include %include %include +%include %include %include %include diff --git a/spot/tl/Makefile.am b/spot/tl/Makefile.am index a1f0ce104..6c7650875 100644 --- a/spot/tl/Makefile.am +++ b/spot/tl/Makefile.am @@ -27,6 +27,7 @@ tl_HEADERS = \ contain.hh \ declenv.hh \ defaultenv.hh \ + delta2.hh \ dot.hh \ environment.hh \ exclusive.hh \ @@ -52,6 +53,7 @@ libtl_la_SOURCES = \ contain.cc \ declenv.cc \ defaultenv.cc \ + delta2.cc \ dot.cc \ exclusive.cc \ formula.cc \ diff --git a/spot/tl/delta2.cc b/spot/tl/delta2.cc new file mode 100644 index 000000000..e2d2e66d4 --- /dev/null +++ b/spot/tl/delta2.cc @@ -0,0 +1,451 @@ +// -*- coding: utf-8 -*- +// Copyright (C) by the Spot authors, see the AUTHORS file for details. +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" +#include +#include + +namespace spot +{ + namespace + { + static formula + is_F(formula f) + { + if (f.is(op::F)) + return f[0]; + if (f.is(op::U) && f[0].is_tt()) + return f[1]; + if (f.is(op::M) && f[1].is_tt()) + return f[0]; + return nullptr; + } + + static formula + is_G(formula f) + { + if (f.is(op::G)) + return f[0]; + if (f.is(op::R) && f[0].is_ff()) + return f[1]; + if (f.is(op::W) && f[1].is_ff()) + return f[0]; + return nullptr; + } + + static formula + is_FG(formula x) + { + if (formula f = is_F(x); f) + return is_G(f); + return nullptr; + } + + static formula + is_GF(formula x) + { + if (formula f = is_G(x); f) + return is_F(f); + return nullptr; + } + + + static formula + rewrite_strong_under_weak(formula f) + { + // FIXME: Can we replace is_FG/is_GF by is_suspendable? This + // isn't straightforward, because stage3 is only looking for + // FG/GF. + if (f.is_delta1() || is_FG(f) || is_GF(f)) + return f; + if (f.is(op::W) || f.is(op::G)) + { + formula f0 = f[0]; + formula f1 = f.is(op::W) ? f[1] : formula::ff(); + // If φ₁ contains a strong operator (i.e., is not a safety) + // we have φ₀ W φ₁ = (φ₀ U φ₁) | G(φ₀) + if (!f1.is_syntactic_safety()) + { + formula left = formula::U(f0, f1); + formula right = formula::G(f0); + return rewrite_strong_under_weak(formula::Or({left, right})); + } + // x[φ₀Uφ₁] W φ₂ = + // (GFφ₁ & (x[φ₀Wφ₁] W φ₂)) | x[φ₀Uφ₁] U (φ₂|G(x[false])) + // x[Fφ₀] W φ₂= (GFφ₀ & (x[true] W φ₂)) | x[Fφ₀] U (φ₂ | G(x[false])) + // x[φ₀Mφ₁] W φ₂ = + // (GFφ₀ & (x[φ₀Rφ₁] R φ₂)) | x[φ₀Mφ₁] U (φ₂|G(x[false])) + formula match = nullptr; // (φ₀ U φ₁) once matched + formula prefix = nullptr; // GF(φ₁) + auto find_u = [&](formula node, auto&& self) { + if (!match || match == node) + { + if (is_FG(node) || is_GF(node)) + return node; + if (node.is(op::U)) + { + if (!match) + { + match = node; + prefix = formula::G(formula::F(match[1])); + } + return formula::W(node[0], node[1]); + } + else if (node.is(op::M)) + { + if (!match) + { + match = node; + prefix = formula::G(formula::F(match[0])); + } + return formula::R(node[0], node[1]); + } + else if (node.is(op::F)) // like tt U φ₀ + { + if (!match) + { + match = node; + prefix = formula::G(formula::F(match[0])); + } + return formula::tt(); + } + } + return node.map(self, self); + }; + formula g = find_u(f0, find_u); + if (!match) + return f; + assert(!match.is_syntactic_safety()); + auto match_to_false = [&](formula node, auto&& self) { + if (node == match) + return formula::ff(); + if (node.is_syntactic_safety()) + return node; + return node.map(self, self); + }; + formula ww = rewrite_strong_under_weak(formula::W(g, f1)); + prefix = formula::And({prefix, ww}); + formula gx_false = formula::G(match_to_false(f0, match_to_false)); + formula u_right = formula::U(f0, formula::Or({f1, gx_false})); + return formula::Or({prefix, rewrite_strong_under_weak(u_right)}); + } + if (f.is(op::R)) + { + formula f0 = f[0]; + formula f1 = f[1]; + // If φ₀ contains a strong operator (i.e., is not a safety) + // we have φ₀ R φ₁ = (φ₀ M φ₁) | G(φ₁) + if (!f0.is_syntactic_safety()) + { + formula left = formula::M(f0, f1); + formula right = formula::G(f1); + return rewrite_strong_under_weak(formula::Or({left, right})); + } + // φ₀ R x[φ₁Uφ₂] = + // (GFφ₂ & (φ₀ R x[φ₁Wφ₂])) | ((φ₀|G(x[false])) M x[φ₁Uφ₂]) + // φ₀ R x[Fφ₁] = (GFφ₁ & (φ₀ R x[true])) | ((φ₀|G(x[false])) M x[Fφ₁]) + // φ₀ R x[φ₁Mφ₂] = + // (GFφ₀ & (φ₀ R x[φ₁Rφ₂])) | ((φ₀|G(x[false])) M x[φ₁Mφ₂]) + formula match = nullptr; // (φ₀ U φ₁) once matched + formula prefix = nullptr; // GF(φ₁) + auto find_u = [&](formula node, auto&& self) { + if (!match || match == node) + { + if (is_FG(node) || is_GF(node)) + return node; + if (node.is(op::U)) + { + if (!match) + { + match = node; + prefix = formula::G(formula::F(match[1])); + } + return formula::W(node[0], node[1]); + } + else if (node.is(op::M)) + { + if (!match) + { + match = node; + prefix = formula::G(formula::F(match[0])); + } + return formula::R(node[0], node[1]); + } + else if (node.is(op::F)) // like tt U φ₀ + { + if (!match) + { + match = node; + prefix = formula::G(formula::F(match[0])); + } + return formula::tt(); + } + } + return node.map(self, self); + }; + formula g = find_u(f1, find_u); + if (!match) + return f; + // φ₀ R x[φ₁Uφ₂] = + // (GFφ₂ & (φ₀ R x[φ₁Wφ₂])) | ((φ₀|G(x[false])) M x[φ₁Uφ₂]) + // φ₀ R x[Fφ₁] = (GFφ₁ & (φ₀ R x[true])) | ((φ₀|G(x[false])) M x[Fφ₁]) + // φ₀ R x[φ₁Mφ₂] = + // (GFφ₀ & (φ₀ R x[φ₁Rφ₂])) | ((φ₀|G(x[false])) M x[φ₁Mφ₂]) + assert(!match.is_syntactic_safety()); + auto match_to_false = [&](formula node, auto&& self) { + if (node == match) + return formula::ff(); + if (node.is_syntactic_safety()) + return node; + return node.map(self, self); + }; + formula rw = rewrite_strong_under_weak(formula::R(f0, g)); + prefix = formula::And({prefix, rw}); + formula gx_false = formula::G(match_to_false(f1, match_to_false)); + formula m_right = formula::M(formula::Or({f0, gx_false}), f1); + return formula::Or({prefix, rewrite_strong_under_weak(m_right)}); + } + return f.map(rewrite_strong_under_weak); + } + + // c[susp] = (susp & c[true]) || c[false] + formula + fish_inner_suspendable(formula f) + { + if (f.is_delta1() || is_FG(f) || is_GF(f)) + return f; + formula match = nullptr; + // return c[true], and set match to susp. + auto find_inner_susp = [&](formula node, auto self) + { + if (node.is_delta1()) + { + return node; + } + else if (node.is_eventual() && node.is_universal()) + { + if (!match) + { + // Try to find a deeper suspendable node if it + // exist, we want to start from the bottom. + node = node.map(self, self); + if (!match) + match = node; + } + if (node == match) + return formula::tt(); + } + return node.map(self, self); + }; + formula c_true = f.map(find_inner_susp, find_inner_susp); + if (!match) + return c_true; // == f. + + auto match_to_false = [&](formula node, auto&& self) { + if (node.is_delta1()) + return node; + if (node == match) + return formula::ff(); + return node.map(self, self); + }; + formula c_false = f.map(match_to_false, match_to_false); + match = fish_inner_suspendable(match); + c_true = fish_inner_suspendable(c_true); + c_false = fish_inner_suspendable(c_false); + return formula::Or({formula::And({match, c_true}), c_false}); + } + + static formula + normalize_inside_suspendable(formula f) + { + if (f.is_delta1()) + return f; + if (formula inner = is_GF(f)) + { + // GF(x[φ₀ W φ₁]) = GF(x[φ₀ U φ₁]) | (FG(φ₀) & GF(x[true]) + // GF(x[φ₀ R φ₁]) = GF(x[φ₀ M φ₁]) | (FG(φ₁) & GF(x[true]) + // GF(x[Gφ₀]) = GF(x[false]) | (FG(φ₀) & GF(x[true]) + formula match = nullptr; // (φ₀ W φ₁) once matched + formula suffix = nullptr; // FG(φ₀) + auto find_w = [&](formula node, auto&& self) { + if (!match || match == node) + { + if (node.is(op::W)) + { + if (!match) + { + match = node; + suffix = formula::F(formula::G(match[0])); + } + return formula::U(node[0], node[1]); + } + else if (node.is(op::R)) + { + if (!match) + { + match = node; + suffix = formula::F(formula::G(match[1])); + } + return formula::M(node[0], node[1]); + } + else if (node.is(op::G)) // like 0 R φ₀ + { + if (!match) + { + match = node; + suffix = formula::F(formula::G(match[0])); + } + return formula::ff(); + } + } + return node.map(self, self); + }; + formula res = find_w(inner, find_w); + if (!match) + return f; + // append GF(x[true]) to suffix + assert(!match.is_syntactic_guarantee()); + auto match_to_true = [&](formula node, auto&& self) { + if (node == match) + return formula::tt(); + if (node.is_syntactic_guarantee()) + return node; + return node.map(self, self); + }; + suffix = formula::And({suffix, + f.map(match_to_true, match_to_true)}); + res = formula::Or({formula::G(formula::F(res)), suffix}); + return normalize_inside_suspendable(res); + } + else if (formula inner = is_FG(f)) + { + // FG(x[φ₀ U φ₁]) = (GF(φ₁) & FG(x[φ₀ W φ₁])) | FG(x[false]) + // FG(x[φ₀ M φ₁]) = (GF(φ₀) & FG(x[φ₀ R φ₁])) | FG(x[false]) + // FG(x[Fφ₀]) = (GF(φ₀) & FG(x[true])) | FG(x[false]) + formula match = nullptr; // (φ₀ U φ₁) once matched + formula prefix = nullptr; // GF(φ₁) + auto find_u = [&](formula node, auto&& self) { + if (!match || match == node) + { + if (node.is(op::U)) + { + if (!match) + { + match = node; + prefix = formula::G(formula::F(match[1])); + } + return formula::W(node[0], node[1]); + } + else if (node.is(op::M)) + { + if (!match) + { + match = node; + prefix = formula::G(formula::F(match[0])); + } + return formula::R(node[0], node[1]); + } + else if (node.is(op::F)) // like tt U φ₀ + { + if (!match) + { + match = node; + prefix = formula::G(formula::F(match[0])); + } + return formula::tt(); + } + } + return node.map(self, self); + }; + formula res = find_u(inner, find_u); + if (!match) + return f; + res = formula::And({formula::F(formula::G(res)), prefix}); + // append FG(x[false]) + assert(!match.is_syntactic_safety()); + auto match_to_false = [&](formula node, auto&& self) { + if (node == match) + return formula::ff(); + if (node.is_syntactic_safety()) + return node; + return node.map(self, self); + }; + res = formula::Or({res, f.map(match_to_false, match_to_false)}); + return normalize_inside_suspendable(res); + } + return f.map(normalize_inside_suspendable); + } + + + // Dispatch Fun on top-level temporal operators that aren't + // already in Δ₂ form. + template + static formula + dispatch(formula f, Fun&& fun) + { + if (f.is_delta2()) + return f; + switch (auto k = f.kind()) + { + case op::F: + case op::G: + case op::U: + case op::R: + case op::W: + case op::M: + return fun(f); + case op::EConcat: + case op::EConcatMarked: + case op::UConcat: + // not yet supported + return formula::binop(k, f[0], dispatch(f[1], fun)); + default: + break; + } + return f.map(dispatch, fun); + } + } + + formula to_delta2(formula f, tl_simplifier* tls) + { + if (f.is_delta2()) + return f; + bool own_tls = !tls; + if (own_tls) + { + tl_simplifier_options opt(false, false, false, + false, false, false, + false, false, false, + true); + tls = new tl_simplifier(opt); + } + // This will ensure the formula is in NNF, except + // maybe for the top level operator. + f = tls->simplify(f); + // stage 1 + f = dispatch(f, rewrite_strong_under_weak); + // stage 2 + f = dispatch(f, fish_inner_suspendable); + // stage 3 + f = dispatch(f, normalize_inside_suspendable); + // f = tls->simplify(f); + if (own_tls) + delete tls; + return f; + } +} diff --git a/spot/tl/delta2.hh b/spot/tl/delta2.hh new file mode 100644 index 000000000..766a8ffc1 --- /dev/null +++ b/spot/tl/delta2.hh @@ -0,0 +1,42 @@ +// -*- coding: utf-8 -*- +// Copyright (C) by the Spot authors, see the AUTHORS file for details. +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include +#include + +namespace spot +{ + /// \ingroup tl_rewriting + /// \brief Convert an LTL formula to Δ₂ + /// + /// This implement LTL rewriting rules as given by + /// \cite esparza.24.acm + /// + /// Only LTL operators are supported, PSL operators + /// will be left untouched. + /// + /// If \a tls is given, it will be used to simplify formulas and + /// puts formulas in negative normal form. If \a tls is not + /// given, a temporary simplifier will be created. + /// + /// No transformation is attempted if the input is already Δ₂. + SPOT_API formula + to_delta2(formula f, tl_simplifier* tls = nullptr); +} diff --git a/tests/Makefile.am b/tests/Makefile.am index cfe6a033a..08dfbf377 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -415,6 +415,7 @@ TESTS_python = \ python/declenv.py \ python/decompose_scc.py \ python/deadends.py \ + python/delta2.py \ python/det.py \ python/dualize.py \ python/ecfalse.py \ diff --git a/tests/python/delta2.py b/tests/python/delta2.py new file mode 100755 index 000000000..a60f60456 --- /dev/null +++ b/tests/python/delta2.py @@ -0,0 +1,57 @@ +#!/usr/bin/python3 +# -*- mode: python; coding: utf-8 -*- +# Copyright (C) by the Spot authors, see the AUTHORS file for details. +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import spot +from unittest import TestCase +tc = TestCase() + +def generate_formulas(): + for i in ['{} {} ({} {} {})', '({} {} {}) {} {}']: + for k in "MWUR": + for l in "MWUR": + for a in ['a', 'false', 'true']: + for b in ['b', 'false', 'true']: + for c in ['c', 'false', 'true']: + inner = i.format(a,k,b,l,c); + for j in ['GF({})', 'FG({})', + '({}) W d', '({}) U d', + 'd W ({})', 'd U ({})', + '({}) R d', '({}) M d', + 'd R ({})', 'd M ({})']: + yield j.format(inner) + + for j1 in ['G(F({}){}F(e {} f))', + 'F(G({}){}G(e {} f))']: + for j2 in "&|": + for j3 in "MWUR": + yield j1.format(inner,j2,j3) + +seen = set() +for f in generate_formulas(): + ltl_in = spot.formula(f) + if ltl_in in seen: + continue + seen.add(ltl_in) + ltl_out = spot.to_delta2(ltl_in) + ok = spot.are_equivalent(ltl_in, ltl_out) + din = ltl_in.is_delta2() + dout = ltl_out.is_delta2() + print(f"{ok:1} {din:1}{dout:1} {ltl_in::30} {ltl_out::30}") + tc.assertTrue(ok) + tc.assertTrue(dout) From bcdfe44c44242a8780860f904364f99efc182f4f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 25 Jul 2024 16:34:11 +0200 Subject: [PATCH 464/606] ltlfilt: add support for --to-delta2 * bin/ltlfilt.cc: Implement this option. * tests/core/delta2.test: New file. * tests/Makefile.am: Add it. * NEWS: Mention it. --- NEWS | 2 ++ bin/ltlfilt.cc | 12 +++++++++++- tests/Makefile.am | 1 + tests/core/delta2.test | 30 ++++++++++++++++++++++++++++++ 4 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 tests/core/delta2.test diff --git a/NEWS b/NEWS index a4670f5d8..cb1129084 100644 --- a/NEWS +++ b/NEWS @@ -9,6 +9,8 @@ New in spot 2.12.0.dev (not yet released) - ltlfilt learned --pi1, --sigma1, --delta1, --pi2, --sigma2, and --delta2 to filter according to classes Π₁,Σ₁,Δ₁,Π₂,Σ₂, and Δ₂. + - ltlfilt learned --to-delta2 to transform an LTL formula into Δ₂. + Library: - restrict_dead_end_edges_here() can reduce non-determinism (but diff --git a/bin/ltlfilt.cc b/bin/ltlfilt.cc index f56074d78..2fd069dc2 100644 --- a/bin/ltlfilt.cc +++ b/bin/ltlfilt.cc @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -115,6 +116,7 @@ enum { OPT_SYNTACTIC_RECURRENCE, OPT_SYNTACTIC_SAFETY, OPT_SYNTACTIC_SI, + OPT_TO_DELTA2, OPT_UNABBREVIATE, OPT_UNIVERSAL, }; @@ -161,6 +163,8 @@ static const argp_option options[] = { "remove-x", OPT_REMOVE_X, nullptr, 0, "remove X operators (valid only for stutter-insensitive properties)", 0 }, + { "to-delta2", OPT_TO_DELTA2, nullptr, 0, + "rewrite LTL formula in Δ₂-form", 0 }, { "unabbreviate", OPT_UNABBREVIATE, "STR", OPTION_ARG_OPTIONAL, "remove all occurrences of the operators specified by STR, which " "must be a substring of \"eFGiMRW^\", where 'e', 'i', and '^' stand " @@ -349,7 +353,7 @@ static int opt_max_count = -1; static long int match_count = 0; static const char* from_ltlf = nullptr; static const char* sonf = nullptr; - +static bool to_delta2 = false; // We want all these variables to be destroyed when we exit main, to // make sure it happens before all other global variables (like the @@ -579,6 +583,9 @@ parse_opt(int key, char* arg, struct argp_state*) case OPT_STUTTER_INSENSITIVE: stutter_insensitive = true; break; + case OPT_TO_DELTA2: + to_delta2 = true; + break; case OPT_UNABBREVIATE: if (arg) unabbreviate += arg; @@ -734,6 +741,9 @@ namespace } } + if (to_delta2) + f = spot::to_delta2(f); + switch (relabeling) { case ApRelabeling: diff --git a/tests/Makefile.am b/tests/Makefile.am index 08dfbf377..bffb79ff1 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -236,6 +236,7 @@ TESTS_twa = \ core/renault.test \ core/nondet.test \ core/det.test \ + core/delta2.test \ core/semidet.test \ core/neverclaimread.test \ core/parseaut.test \ diff --git a/tests/core/delta2.test b/tests/core/delta2.test new file mode 100644 index 000000000..ee202cbd2 --- /dev/null +++ b/tests/core/delta2.test @@ -0,0 +1,30 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) by the Spot authors, see the AUTHORS file for details. +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs +set -e + +genltl --dac-p --eh-p --hkrss-p --sb-p --sejk-p \ + --stats='%F:%L,%f' > formulas.txt +ltlfilt --to-delta2 --delta2 -F formulas.txt/2 > res.txt +ltlfilt --to-delta2 -v --delta2 -F formulas.txt/2 --stats='%<' || : +test `wc -l < formulas.txt` -eq `wc -l < res.txt` + +ltlcross -F formulas.txt/2 \ + 'ltl2tgba' 'ltlfilt --to-delta2 -f %f | ltl2tgba -G >%O' From 44efc9659586e25c74c4e0686c486f9c925bd2ea Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 25 Jul 2024 17:56:04 +0200 Subject: [PATCH 465/606] formula: add a missing trivial rewriting in SERE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We should have [*0]|f ≡ f when f is a SERE that already accept the empty word. Fixes issue #454. * spot/tl/formula.cc: Implement the rewriting. * tests/core/reduccmp.test: Add a test case. * doc/tl/tl.tex, NEWS: Document it. --- NEWS | 4 ++++ doc/tl/tl.tex | 30 +++++++++++++++--------------- spot/tl/formula.cc | 11 ++++++++++- tests/core/reduccmp.test | 3 ++- 4 files changed, 31 insertions(+), 17 deletions(-) diff --git a/NEWS b/NEWS index cb1129084..56e2040d4 100644 --- a/NEWS +++ b/NEWS @@ -32,6 +32,10 @@ New in spot 2.12.0.dev (not yet released) following "Efficient Normalization of Linear Temporal Logic" by Esparza et al. (J. ACM, 2024). + - Trivial rewritings (those performed everytime at construction) + were missing the rule "[*0]|f ≡ f" when f already accepts the + empty word. (Issue #545.) + New in spot 2.12 (2024-05-16) Build: diff --git a/doc/tl/tl.tex b/doc/tl/tl.tex index 65db63511..9d1b1c2ef 100644 --- a/doc/tl/tl.tex +++ b/doc/tl/tl.tex @@ -839,47 +839,47 @@ The following rules are all valid with the two arguments swapped. %\samp{$\FUSION$}.) \begin{align*} - \0\AND f &\equiv \0 & \0\ANDALT f &\equiv \0 & + \0\AND f &\equiv \0 & \0\OR f &\equiv f & \0 \FUSION f &\equiv \0 & - \0 \CONCAT f &\equiv \0 \\ + \mathllap{\0 \CONCAT f}&\equiv \0 \\ + \1\ANDALT b &\equiv b & \1\AND f&\equiv \begin{cases} 1\mathrlap{\text{~if~} \varepsilon\VDash f} \\ f\mathrlap{\text{~if~} \varepsilon\nVDash f} \\ \end{cases} & - \1\ANDALT b &\equiv b & \1\OR b &\equiv \1 & - \1 \FUSION f & \equiv f\mathrlap{\text{~if~}\varepsilon\nVDash f}\\ - && + \1 \FUSION f & \equiv \mathrlap{f\text{~if~}\varepsilon\nVDash f}\\ \STAR{} \ANDALT f &\equiv f & + && \STAR{} \OR f &\equiv \mathrlap{\STAR{}} & && - \STAR{} \CONCAT f &\equiv \STAR{}\text{~if~}\varepsilon\VDash f& \\ - && + \mathllap{\STAR{} \CONCAT f }&\equiv \STAR{}\text{~if~}\varepsilon\VDash f& \\ \PLUS{} \ANDALT f &\equiv f \text{~if~}\varepsilon\nVDash f& + && \PLUS{} \OR f &\equiv \begin{cases} \mathrlap{\STAR{}\text{~if~} \varepsilon\VDash f} \\ \mathrlap{\PLUS{}\text{~if~} \varepsilon\nVDash f} \\ \end{cases} & && && \\ - \eword\AND f &\equiv f & \eword\ANDALT f &\equiv \begin{cases} - \mathrlap{\eword\text{~if~} \varepsilon\VDash f} \\ - \0\mathrlap{\phantom{\STAR{}}\text{~if~} \varepsilon\nVDash f} \\ + \!\eword\text{~if~}\varepsilon\VDash{f} \\ + \0\!\phantom{\STAR{}}\text{~if~}\varepsilon\nVDash{f} \\ \end{cases} & - && + \mathllap{\eword}\AND f &\equiv f & + \eword\OR f &\equiv f \text{~if~} \varepsilon\VDash f& \eword \FUSION f &\equiv \0 & - \eword \CONCAT f &\equiv f\\ - f\AND f &\equiv f& + \mathllap{\eword \CONCAT f}&\equiv f\\ f\ANDALT f &\equiv f & + f\AND f &\equiv f& f\OR f &\equiv f& f\FUSION f&\equiv f\FSTAR{2}& f\CONCAT f&\equiv f\STAR{2}\\ - b_1 \AND b_2 &\equiv b_1\ANDALT b_2 & + b_1 \AND b_2 &\equiv \mathrlap{b_1\ANDALT b_2}& && && b_1:b_2 &\equiv b_1\ANDALT b_2 @@ -890,7 +890,7 @@ f\STAR{\mvar{i}..\mvar{j}}\CONCAT f\STAR{\mvar{k}..\mvar{l}}&\equiv f\STAR{\mvar f\FSTAR{\mvar{i}..\mvar{j}}\FUSION f&\equiv f\FSTAR{\mvar{i+1}..\mvar{j+1}} & f\FSTAR{\mvar{i}..\mvar{j}}\FUSION f\FSTAR{\mvar{k}..\mvar{l}}&\equiv f\FSTAR{\mvar{i+k}..\mvar{j+l}}\\ b\STAR{\mvar{i}..\mvar{j}}\FUSION b &\equiv b\STAR{\mvar{\max(i,1)}..\mvar{j}} & -b\STAR{\mvar{i}..\mvar{j}}\FUSION b\STAR{\mvar{k}..\mvar{l}} &\equiv b\mathrlap{\STAR{\mvar{\max(i,1)+\max(k,1)-1}..\mvar{j+l-1}}} +b\STAR{\mvar{i}..\mvar{j}}\FUSION b\STAR{\mvar{k}..\mvar{l}} &\equiv b\STAR{\mvar{\max(i,1)+\max(k,1)-1}..\mvar{j+l-1}} \end{align*} \section{SERE-LTL Binding Operators} diff --git a/spot/tl/formula.cc b/spot/tl/formula.cc index fe5770931..b905f4464 100644 --- a/spot/tl/formula.cc +++ b/spot/tl/formula.cc @@ -357,7 +357,16 @@ namespace spot break; case op::OrRat: neutral = ff(); - neutral2 = nullptr; + { + // If this OrRat contains an operand that accept [*0] but + // isn't [*0], then any [+0] can be removed. + bool eword_accepted = + std::find_if(v.begin(), v.end(), + [](const fnode* f) { + return f->accepts_eword() && !f->is_eword(); + }) != v.end(); + neutral2 = eword_accepted ? eword() : nullptr; + } abs = one_star(); abs2 = nullptr; weak_abs = one_plus(); diff --git a/tests/core/reduccmp.test b/tests/core/reduccmp.test index 27580cb34..92cc2719f 100755 --- a/tests/core/reduccmp.test +++ b/tests/core/reduccmp.test @@ -487,7 +487,8 @@ GF(a && GF(b) && c), G(F(a & c) & Fb) {first_match(1:e[*0..3])[*]}[]-> c, c W !e {first_match(first_match(a*;e);b)}[]->a, {first_match(a[*];e)}[]-> X(a | !b) {first_match(first_match(a*;e):b*)}[]->a, {first_match(a[*];e)}[]-> (a | !b) - +# issue 545 +{([*0]|a[*]|b);c}<>->d, {(a[*]|b);c}<>->d # issue 558 (was a false alarm, but still good to test) {(!b)[*3];b}!, !b & X(!b & X(!b & Xb)) {(!b)[+];b}!, !b & XFb From a0a6ec67940eb38d08f1ed6e0c2030c4b10d9e55 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 25 Jul 2024 18:01:42 +0200 Subject: [PATCH 466/606] * doc/tl/tl.tex: Some typos. --- doc/tl/tl.tex | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/tl/tl.tex b/doc/tl/tl.tex index 9d1b1c2ef..727d37eb9 100644 --- a/doc/tl/tl.tex +++ b/doc/tl/tl.tex @@ -1600,10 +1600,10 @@ The goals in most of these simplification are to: \end{itemize} Rewritings defined with $\equivEU$ are applied only when -\verb|tl_simplifier_options::favor_event_univ|' is \texttt{true}: +`\verb|tl_simplifier_options::favor_event_univ|' is \texttt{true}: they try to lift subformulas that are both eventual and universal \emph{higher} in the syntax tree. Conversely, rules defined with $\equivNeu$ -are applied only when \verb|favor_event_univ|' is \texttt{false}: they +are applied only when `\verb|favor_event_univ|' is \texttt{false}: they try to \textit{lower} subformulas that are both eventual and universal. Currently all these simplifications assume LTL semantics, so they make @@ -1613,10 +1613,10 @@ only listed with $\X$. \subsection{Basic Simplifications}\label{sec:basic-simp} These simplifications are enabled with -\verb|tl_simplifier_options::reduce_basics|'. A couple of them may +`\verb|tl_simplifier_options::reduce_basics|'. A couple of them may enlarge the size of the formula: they are denoted using $\equiV$ instead of $\equiv$, and they can be disabled by setting the -\verb|tl_simplifier_options::reduce_size_strictly|' option to +`\verb|tl_simplifier_options::reduce_size_strictly|' option to \texttt{true}. \subsubsection{Basic Simplifications for Temporal Operators} @@ -1842,7 +1842,7 @@ $\Esuffix$. They assume that $b$, denote a Boolean formula. As noted at the beginning for section~\ref{sec:basic-simp}, rewritings denoted with $\equiV$ can be disabled by setting the -\verb|tl_simplifier_options::reduce_size_strictly|' option to +`\verb|tl_simplifier_options::reduce_size_strictly|' option to \texttt{true}. \begin{align*} @@ -1945,7 +1945,7 @@ $q,\,q_i$ & a pure eventuality that is also purely universal \\ \G(f_1\AND\ldots\AND f_n \AND q_1 \AND \ldots \AND q_p)&\equivEU \G(f_1\AND\ldots\AND f_n)\AND q_1 \AND \ldots \AND q_p \\ \G\F(f_1\AND\ldots\AND f_n \AND q_1 \AND \ldots \AND q_p)&\equiv \G(\F(f_1\AND\ldots\AND f_n)\AND q_1 \AND \ldots \AND q_p) \\ \G(f_1\AND\ldots\AND f_n \AND e_1 \AND \ldots \AND e_m \AND \G(e_{m+1}) \AND \ldots\AND \G(e_p))&\equivEU \G(f_1\AND\ldots\AND f_n)\AND \G(e_1 \AND \ldots \AND e_p) \\ - \G(f_1\AND\ldots\AND f_n \AND \G(g_1) \AND \ldots \AND \G(g_m) &\equiv \G(f_1\AND\ldots\AND f_n\AND g_1 \AND \ldots \AND g_m) \\ + \G(f_1\AND\ldots\AND f_n \AND \G(g_1) \AND \ldots \AND \G(g_m)) &\equiv \G(f_1\AND\ldots\AND f_n\AND g_1 \AND \ldots \AND g_m) \\ \F(f_1 \OR \ldots \OR f_n \OR u_1 \OR \ldots \OR u_m \OR \F(u_{m+1})\OR\ldots\OR \F(u_p)) &\equivEU \F(f_1\OR \ldots\OR f_n) \OR \F(u_1 \OR \ldots \OR u_p)\\ \F(f_1 \OR \ldots \OR f_n \OR \F(g_1) \OR \ldots \OR \G(g_m)) &\equiv \F(f_1\OR \ldots\OR f_n \OR g_1 \OR \ldots \OR g_m)\\ \G(f_1)\AND\ldots\AND \G(f_n) \AND \G(e_1) \AND \ldots\AND \G(e_p)&\equivEU \G(f_1\AND\ldots\AND f_n)\AND \G(e_1 \AND \ldots \AND e_p) \\ @@ -1964,19 +1964,19 @@ implication can be done in two ways: \begin{description} \item[Syntactic Implication Checks] were initially proposed by~\citet{somenzi.00.cav}. This detection is enabled by the - ``\verb|tl_simplifier_options::synt_impl|'' option. This is a + `\verb|tl_simplifier_options::synt_impl|' option. This is a cheap way to detect implications, but it may miss some. The rules we implement are described in Appendix~\ref{ann:syntimpl}. \item[Language Containment Checks] were initially proposed by~\citet{tauriainen.03.tr}. This detection is enabled by the - ``\verb|tl_simplifier_options::containment_checks|'' option. + `\verb|tl_simplifier_options::containment_checks|' option. \end{description} In the following rewritings rules, $f\simp g$ means that $g$ was proved to be implied by $f$ using either of the above two methods. Additionally, implications denoted by $f\Simp g$ are only checked if -the ``\verb|tl_simplifier_options::containment_checks_stronger|'' +the `\verb|tl_simplifier_options::containment_checks_stronger|' option is set (otherwise the rewriting rule is not applied). We write $f\simpe g$ iff $f\simp g$ and $g\simp f$. @@ -2063,7 +2063,7 @@ The first six rules, about n-ary operators $\AND$ and $\OR$, are implemented for $n$ operands by testing each operand against all other. To prevent the complexity to escalate, this is only performed with up to 16 operands. That value can be changed in -``\verb|tl_simplifier_options::containment_max_ops|''. +`\verb|tl_simplifier_options::containment_max_ops|'. The following rules mix implication-based checks with formulas that are pure eventualities ($e$) or that are purely universal ($u$). From 3693bbab0805dab77a9fca88ed600a029744c319 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 20 Aug 2024 16:52:27 +0200 Subject: [PATCH 467/606] synthesis: rewrite a C++20 construct into C++17 It is illegal to capture the element of a structured binding in C++17, GCC 15 will complain about it. * spot/twaalgos/synthesis.cc: Move the illegal structured binding inside the lambda. --- spot/twaalgos/synthesis.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 3c93137dc..6b5fa7af9 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1276,11 +1276,10 @@ namespace spot { current_in = bddtrue; - auto& [plyconddict, plycondvect] = plystatedict; - auto fvisitInTrav = [&](const bdd& ccond) -> void { + auto& [plyconddict, plycondvect] = plystatedict; int clvl = bdd_var(ccond); assert(clvl >= inIdx); From 436e5a2d7f25dd5e29690649f7207ecb4ee509c9 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 21 Aug 2024 11:18:14 +0200 Subject: [PATCH 468/606] ltlgrind: improve error message when formulas are missing The error message, inherited from ltl2tgba, used to say "No formula to translate", but "translate" isn't appropriate here. * bin/common_finput.cc, bin/common_finput.hh (check_no_formula): Allow "translate" to be changed. * bin/ltlgrind.cc: Change it. * tests/core/ltlgrind.test: Test it. --- bin/common_finput.cc | 6 +++--- bin/common_finput.hh | 2 +- bin/ltlgrind.cc | 2 +- tests/core/ltlgrind.test | 8 ++++++++ 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/bin/common_finput.cc b/bin/common_finput.cc index df0343dd1..14cd06b36 100644 --- a/bin/common_finput.cc +++ b/bin/common_finput.cc @@ -388,14 +388,14 @@ job_processor::run() return error; } -void check_no_formula() +void check_no_formula(const char* action) { if (!jobs.empty()) return; if (isatty(STDIN_FILENO)) - error(2, 0, "No formula to translate? Run '%s --help' for help.\n" + error(2, 0, "No formula to %s? Run '%s --help' for help.\n" "Use '%s -' to force reading formulas from the standard " - "input.", program_name, program_name); + "input.", action, program_name, program_name); jobs.emplace_back("-", job_type::LTL_FILENAME); } diff --git a/bin/common_finput.hh b/bin/common_finput.hh index 30b7f333c..491364d19 100644 --- a/bin/common_finput.hh +++ b/bin/common_finput.hh @@ -93,5 +93,5 @@ public: // Report and error message or add a default job depending on whether // the input is a tty. -void check_no_formula(); +void check_no_formula(const char* action = "translate"); void check_no_automaton(); diff --git a/bin/ltlgrind.cc b/bin/ltlgrind.cc index 626211adc..61ffc3cd5 100644 --- a/bin/ltlgrind.cc +++ b/bin/ltlgrind.cc @@ -199,7 +199,7 @@ main(int argc, char* argv[]) mut_opts |= opt_all; - check_no_formula(); + check_no_formula("mutate"); mutate_processor processor; if (processor.run()) diff --git a/tests/core/ltlgrind.test b/tests/core/ltlgrind.test index f508c4826..bfceef88a 100755 --- a/tests/core/ltlgrind.test +++ b/tests/core/ltlgrind.test @@ -200,3 +200,11 @@ EOF ltlgrind -f 'a U b' -m 999999999999999999999999999 2>err && exit 1 grep 'too large' err + +# The following message appears only if run from a tty. +if (: > /dev/tty) >/dev/null 2>&1 ; then + ltlgrind err && exit 1 + grep 'No formula to mutate' err +fi + +: From baf2778c9a08e4d123c0d3c6dfab70855b786d5c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 21 Aug 2024 21:36:54 +0200 Subject: [PATCH 469/606] randltl: fix generation without unary operators * spot/tl/randomltl.hh (has_unary_ops): New method. * spot/tl/randomltl.cc: Avoid creating subformulas of even size when we do not have unary operators. * tests/core/randpsl.test: Test it. * NEWS: Mention it. --- NEWS | 5 +++++ spot/tl/randomltl.cc | 38 +++++++++++++++++++++++++++++++++----- spot/tl/randomltl.hh | 6 ++++++ tests/core/randpsl.test | 18 ++++++++++++++++++ 4 files changed, 62 insertions(+), 5 deletions(-) diff --git a/NEWS b/NEWS index 56e2040d4..cd7c00763 100644 --- a/NEWS +++ b/NEWS @@ -36,6 +36,11 @@ New in spot 2.12.0.dev (not yet released) were missing the rule "[*0]|f ≡ f" when f already accepts the empty word. (Issue #545.) + Bug fixes: + + - Generating random formula without any unary opertors would very + often create formulas much smaller than asked. + New in spot 2.12 (2024-05-16) Build: diff --git a/spot/tl/randomltl.cc b/spot/tl/randomltl.cc index 9aa604ee2..e415535b2 100644 --- a/spot/tl/randomltl.cc +++ b/spot/tl/randomltl.cc @@ -92,14 +92,20 @@ namespace spot { assert(n >= 3); --n; - int l = rrand(1, n - 1); + int l; // size of left + if ((n & 1) | rl->has_unary_ops()) + l = rrand(1, n - 1); + else + // if we do not have unary ops, we must split n in two odd sizes + l = rrand(0, n/2 - 1)*2 + 1; + // Force the order of generation of operands to be right, then // left. This is historical, because gcc evaluates argument // from right to left and we used to make the two calls to // generate() inside of the call to instance() before // discovering that clang would perform the nested calls from // left to right. - auto right = rl->generate(n - l); + formula right = rl->generate(n - l); return formula::binop(Op, rl->generate(l), right); } @@ -110,7 +116,25 @@ namespace spot assert(n >= 3); --n; const random_psl* rp = static_cast(rl); - int l = rrand(1, n - 1); + int l; // size of left + bool left_must_be_odd = !rp->rs.has_unary_ops(); + bool right_must_be_odd = !rl->has_unary_ops(); + if (n & 1) + { + if (left_must_be_odd && !right_must_be_odd) + l = rrand(0, n/2 - 1) * 2 + 1; + else if (!left_must_be_odd && right_must_be_odd) + l = rrand(1, n/2) * 2; + else + l = rrand(1, n - 1); + } + else + { + if (left_must_be_odd || right_must_be_odd) + l = rrand(0, n/2 - 1) * 2 + 1; + else + l = rrand(1, n - 1); + } // See comment in binop_builder. auto right = rl->generate(n - l); return formula::binop(Op, rp->rs.generate(l), right); @@ -152,9 +176,13 @@ namespace spot { assert(n >= 3); --n; - int l = rrand(1, n - 1); // See comment in binop_builder. - auto right = rl->generate(n - l); + int l; // size of left + if ((n & 1) | rl->has_unary_ops()) + l = rrand(1, n - 1); + else + l = rrand(0, n/2 - 1)*2 + 1; + formula right = rl->generate(n - l); return formula::multop(Op, {rl->generate(l), right}); } diff --git a/spot/tl/randomltl.hh b/spot/tl/randomltl.hh index d4c52debf..a7ea3561c 100644 --- a/spot/tl/randomltl.hh +++ b/spot/tl/randomltl.hh @@ -72,6 +72,12 @@ namespace spot /// occurrences of the \c F operator. const char* parse_options(char* options); + /// \brief whether we can use unary operators + bool has_unary_ops() const + { + return total_2_ > 0.0; + } + protected: void update_sums(); diff --git a/tests/core/randpsl.test b/tests/core/randpsl.test index 5e7192894..9d4f825aa 100755 --- a/tests/core/randpsl.test +++ b/tests/core/randpsl.test @@ -36,3 +36,21 @@ test `wc -l < formulas` = 50 randltl --psl --sere-priorities=first_match=10 -n 100 2 | grep first_match + +# the random generator had trouble generating formulas of the proper size when +# unary operators were disabled +P=true=0,false=0,not=0 +randltl --tree-size=19 -B --boolean-prio=$P 1000 -n10 --stats=%a >out +cat >expected < Date: Thu, 22 Aug 2024 17:04:48 +0200 Subject: [PATCH 470/606] bin: new ltlmix tool Fixes #400. * spot/tl/randomltl.cc, spot/tl/randomltl.hh: Adjust to accept a set of formula to replace the atomic propositions. * bin/ltlmix.cc: New file. * bin/Makefile.am: Add it. * bin/man/ltlmix.x: New file. * bin/man/Makefile.am: Add it. * doc/org/ltlmix.org: New file. * doc/Makefile.am: Add it. * bin/man/genltl.x, bin/man/randltl.x, bin/man/spot.x, bin/spot.cc, doc/org/arch.tex, doc/org/concepts.org, doc/org/tools.org, NEWS: Mention ltlmix. * tests/core/ltlmix.test: New file. * tests/Makefile.am: Add it. --- NEWS | 7 +- bin/Makefile.am | 2 + bin/ltlmix.cc | 302 ++++++++++++++++++++++++++++++++++ bin/man/Makefile.am | 4 + bin/man/genltl.x | 1 + bin/man/ltlmix.x | 7 + bin/man/randltl.x | 1 + bin/man/spot.x | 1 + bin/spot.cc | 2 + doc/Makefile.am | 1 + doc/org/arch.tex | 29 ++-- doc/org/concepts.org | 2 +- doc/org/ltlmix.org | 365 +++++++++++++++++++++++++++++++++++++++++ doc/org/tools.org | 2 + spot/tl/randomltl.cc | 122 +++++++++++--- spot/tl/randomltl.hh | 140 ++++++++++------ tests/Makefile.am | 1 + tests/core/ltlmix.test | 93 +++++++++++ 18 files changed, 995 insertions(+), 87 deletions(-) create mode 100644 bin/ltlmix.cc create mode 100644 bin/man/ltlmix.x create mode 100644 doc/org/ltlmix.org create mode 100755 tests/core/ltlmix.test diff --git a/NEWS b/NEWS index cd7c00763..178ba5327 100644 --- a/NEWS +++ b/NEWS @@ -2,6 +2,9 @@ New in spot 2.12.0.dev (not yet released) Command-line tools: + - ltlmix is a new tool that generate formulas by combining existing + ones. See https://spot.lre.epita.fr/ltlmix.html for examples. + - autfilt learned --restrict-dead-end-edges, to restricts labels of edges leading to dead-ends. See the description of restrict_dead_end_edges_here() below. @@ -38,8 +41,8 @@ New in spot 2.12.0.dev (not yet released) Bug fixes: - - Generating random formula without any unary opertors would very - often create formulas much smaller than asked. + - Generating random formulas without any unary opertor would very + often create formulas much smaller than specified. New in spot 2.12 (2024-05-16) diff --git a/bin/Makefile.am b/bin/Makefile.am index 78d189dc9..b665d859c 100644 --- a/bin/Makefile.am +++ b/bin/Makefile.am @@ -69,6 +69,7 @@ bin_PROGRAMS = \ ltldo \ ltlfilt \ ltlgrind \ + ltlmix \ ltlsynt \ randaut \ randltl @@ -92,6 +93,7 @@ ltl2tgta_SOURCES = ltl2tgta.cc ltlcross_SOURCES = ltlcross.cc ltlgrind_SOURCES = ltlgrind.cc ltldo_SOURCES = ltldo.cc +ltlmix_SOURCES = ltlmix.cc ltlsynt_SOURCES = ltlsynt.cc dstar2tgba_SOURCES = dstar2tgba.cc spot_x_SOURCES = spot-x.cc diff --git a/bin/ltlmix.cc b/bin/ltlmix.cc new file mode 100644 index 000000000..9e6ada1c1 --- /dev/null +++ b/bin/ltlmix.cc @@ -0,0 +1,302 @@ +// -*- coding: utf-8 -*- +// Copyright (C) by the Spot authors, see the AUTHORS file for details. +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + + +#include "common_sys.hh" +#include +#include "error.h" + +#include "common_setup.hh" +#include "common_finput.hh" +#include "common_output.hh" +#include "common_conv.hh" +#include "common_cout.hh" +#include "common_range.hh" + +#include +#include +#include + +enum { + OPT_BOOLEAN_PRIORITIES = 256, + OPT_DUMP_PRIORITIES, + OPT_DUPS, + OPT_LTL_PRIORITIES, + OPT_SEED, + OPT_TREE_SIZE, +}; + +static const char * argp_program_doc = + "Combine formulas taken randomly from an input set.\n\n\ +The input set is specified using FILENAME, -F FILENAME, or -f formula.\n\ +By default this generates a Boolean pattern of size 5, for instance\n\ +\"(φ₁ & φ₂) | φ₃\", where each φᵢ is randomly taken from the input set.\n\ +The size and nature of the pattern can be changed using generation\n\ +parameters. Additionally, it is possible to rename the atomic propositions\n\ +in each φᵢ using -A or -P.\v\ +Example:\n\ +\n\ +Generates 10 random Boolean combinations of terms of the form GFa, with\n\ +'a' picked from a set of 5 atomic propositions:\n\ + % ltlmix -f GFa -n10 -A5\n\ +\n\ +Build a single LTL formula over subformulas taken randomly from the list of\n\ +55 patterns by Dwyer et al., using a choice of 10 atomic propositions to\n\ +relabel subformulas:\n\ + % genltl --dac | ltlmix -L -A10\n\ +\n\ +Build 5 random positive Boolean combination of GFa and GFb:\n" + // next line is in its own double-quote to please sanity.test + " % ltlmix -f GFa -f GFb --boolean-prio=not=0,xor=0,implies=0,equiv=0 -n5"; + +static const argp_option options[] = { + // Keep this alphabetically sorted (expect for aliases). + /**************************************************/ + { nullptr, 0, nullptr, 0, "Generation parameters:", 2 }, + { "ap-count", 'A', "N", 0, + "rename the atomic propositions in each selected formula by drawing " + "randomly from N atomic propositions (the rewriting is bijective " + "if N is larger than the original set)", 0 }, + { "polarized-ap", 'P', "N", 0, + "similar to -A N, but randomize the polarity of the new atomic " + "proposition", 0 }, + { "boolean", 'B', nullptr, 0, + "generate Boolean combination of formulas (default)", 0 }, + { "allow-dups", OPT_DUPS, nullptr, 0, + "allow duplicate formulas to be output", 0 }, + { "ltl", 'L', nullptr, 0, "generate LTL combinations of subformulas", 0 }, + { "formulas", 'n', "INT", 0, + "number of formulas to generate (default: 1);\n" + "use a negative value for unbounded generation", 0 }, + { "seed", OPT_SEED, "INT", 0, + "seed for the random number generator (default: 0)", 0 }, + { "tree-size", OPT_TREE_SIZE, "RANGE", 0, + "tree size of main pattern generated (default: 5);\n" + "input formulas count as size 1.", 0 }, + /**************************************************/ + { nullptr, 0, nullptr, 0, "Adjusting probabilities:", 4 }, + { "dump-priorities", OPT_DUMP_PRIORITIES, nullptr, 0, + "show current priorities, do not generate any formula", 0 }, + { "ltl-priorities", OPT_LTL_PRIORITIES, "STRING", 0, + "set priorities for LTL formulas", 0 }, + { "boolean-priorities", OPT_BOOLEAN_PRIORITIES, "STRING", 0, + "set priorities for Boolean formulas", 0 }, + { nullptr, 0, nullptr, 0, "STRING should be a comma-separated list of " + "assignments, assigning integer priorities to the tokens " + "listed by --dump-priorities.", 0 }, + /**************************************************/ + { nullptr, 0, nullptr, 0, "Output options:", -20 }, + { nullptr, 0, nullptr, 0, "The FORMAT string passed to --format may use " + "the following interpreted sequences:", -19 }, + { "%f", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "the formula (in the selected syntax)", 0 }, + { "%l", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "the (serial) number of the formula (0-based)", 0 }, + { "%L", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "the (serial) number of the formula (1-based)", 0 }, + { "%%", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "a single %", 0 }, + COMMON_LTL_OUTPUT_SPECS, + /**************************************************/ + { nullptr, 0, nullptr, 0, "Miscellaneous options:", -1 }, + { nullptr, 0, nullptr, 0, nullptr, 0 } +}; + +static const argp_child children[] = { + { &finput_argp, 0, nullptr, 0 }, + { &output_argp, 0, nullptr, 0 }, + { &misc_argp, 0, nullptr, 0 }, + { nullptr, 0, nullptr, 0 } +}; + +static int opt_formulas = 1; +static spot::randltlgenerator::output_type output = + spot::randltlgenerator::Bool; +static char* opt_pL = nullptr; +static char* opt_pB = nullptr; +static bool opt_dump_priorities = false; +static int opt_seed = 0; +static range opt_tree_size = { 5, 5 }; +static bool opt_unique = true; +static int opt_ap_count = 0; +static bool opt_literal = false; + +namespace +{ + // We want all these variables to be destroyed when we exit main, to + // make sure it happens before all other global variables (like the + // atomic propositions maps) are destroyed. Otherwise we risk + // accessing deleted stuff. + static struct opt_t + { + spot::atomic_prop_set sub; + }* opt = nullptr; + + class sub_processor final: public job_processor + { + public: + int + process_formula(spot::formula f, const char* filename = nullptr, + int linenum = 0) override + { + (void) filename; + (void) linenum; + opt->sub.insert(f); + return 0; + } + }; +} + +static sub_processor subreader; + +static int +parse_opt(int key, char* arg, struct argp_state*) +{ + // Called from C code, so should not raise any exception. + BEGIN_EXCEPTION_PROTECT; + switch (key) + { + case 'A': + opt_ap_count = to_int(arg, "-A/--ap-count"); + opt_literal = false; + break; + case 'B': + output = spot::randltlgenerator::Bool; + break; + case 'L': + output = spot::randltlgenerator::LTL; + break; + case 'n': + opt_formulas = to_int(arg, "-n/--formulas"); + break; + case 'P': + opt_ap_count = to_int(arg, "-P/--polarized-ap"); + opt_literal = true; + break; + case OPT_BOOLEAN_PRIORITIES: + opt_pB = arg; + break; + case OPT_LTL_PRIORITIES: + opt_pL = arg; + break; + case OPT_DUMP_PRIORITIES: + opt_dump_priorities = true; + break; + case OPT_DUPS: + opt_unique = false; + break; + case OPT_SEED: + opt_seed = to_int(arg, "--seed"); + break; + case OPT_TREE_SIZE: + opt_tree_size = parse_range(arg); + if (opt_tree_size.min > opt_tree_size.max) + std::swap(opt_tree_size.min, opt_tree_size.max); + break; + case ARGP_KEY_ARG: + jobs.emplace_back(arg, job_type::LTL_FILENAME); + break; + default: + return ARGP_ERR_UNKNOWN; + } + END_EXCEPTION_PROTECT; + return 0; +} + +int +main(int argc, char* argv[]) +{ + return protected_main(argv, [&] { + const argp ap = { options, parse_opt, "[FILENAME[/COL]...]", + argp_program_doc, children, nullptr, nullptr }; + + // This will ensure that all objects stored in this struct are + // destroyed before global variables. + opt_t o; + opt = &o; + + if (int err = argp_parse(&ap, argc, argv, ARGP_NO_HELP, nullptr, nullptr)) + exit(err); + + check_no_formula("combine"); + + if (subreader.run()) + return 2; + + if (opt->sub.empty()) + error(2, 0, "the set of subformulas to build from is empty"); + + spot::srand(opt_seed); + + spot::randltlgenerator rg + (opt_ap_count, + [&] (){ + spot::option_map opts; + opts.set("output", output); + opts.set("tree_size_min", opt_tree_size.min); + opts.set("tree_size_max", opt_tree_size.max); + opts.set("seed", opt_seed); + opts.set("simplification_level", 0); + opts.set("unique", opt_unique); + opts.set("literals", opt_literal); + return opts; + }(), opt_pL, nullptr, opt_pB, &opt->sub); + + if (opt_dump_priorities) + { + switch (output) + { + case spot::randltlgenerator::LTL: + std::cout << + "Use --ltl-priorities to set the following LTL priorities:\n"; + rg.dump_ltl_priorities(std::cout); + break; + case spot::randltlgenerator::Bool: + std::cout << + "Use --boolean-priorities to set the following Boolean " + "formula priorities:\n"; + rg.dump_bool_priorities(std::cout); + break; + case spot::randltlgenerator::PSL: + case spot::randltlgenerator::SERE: + error(2, 0, "PSL/SERE output is unsupported"); + break; + } + exit(0); + } + + int count = 0; + while (opt_formulas < 0 || opt_formulas--) + { + spot::formula f = rg.next(); + if (!f) + { + error(2, 0, "failed to generate a new unique formula after %d " \ + "trials", spot::randltlgenerator::MAX_TRIALS); + } + else + { + output_formula_checked(f, nullptr, nullptr, count + 1, count); + ++count; + } + }; + + flush_cout(); + return 0; + }); +} diff --git a/bin/man/Makefile.am b/bin/man/Makefile.am index 1b6319766..2ccf4b8b1 100644 --- a/bin/man/Makefile.am +++ b/bin/man/Makefile.am @@ -35,6 +35,7 @@ dist_man1_MANS = \ ltldo.1 \ ltlfilt.1 \ ltlgrind.1 \ + ltlmix.1 \ ltlsynt.1 \ randaut.1 \ randltl.1 @@ -72,6 +73,9 @@ ltlfilt.1: $(common_dep) $(srcdir)/ltlfilt.x $(srcdir)/../ltlfilt.cc ltlgrind.1: $(common_dep) $(srcdir)/ltlgrind.x $(srcdir)/../ltlgrind.cc $(convman) ../ltlgrind$(EXEEXT) $(srcdir)/ltlgrind.x $@ +ltlmix.1: $(common_dep) $(srcdir)/ltlmix.x $(srcdir)/../ltlmix.cc + $(convman) ../ltlmix$(EXEEXT) $(srcdir)/ltlmix.x $@ + ltlsynt.1: $(common_dep) $(srcdir)/ltlsynt.x $(srcdir)/../ltlsynt.cc $(convman) ../ltlsynt$(EXEEXT) $(srcdir)/ltlsynt.x $@ diff --git a/bin/man/genltl.x b/bin/man/genltl.x index 6194ba8ab..db40c653b 100644 --- a/bin/man/genltl.x +++ b/bin/man/genltl.x @@ -80,3 +80,4 @@ Proceedings of RV'10. LNCS 6418. .BR ltlfilt (1), .BR randaut (1), .BR randltl (1) +.BR ltlmix (1) diff --git a/bin/man/ltlmix.x b/bin/man/ltlmix.x new file mode 100644 index 000000000..dbdc2f9f5 --- /dev/null +++ b/bin/man/ltlmix.x @@ -0,0 +1,7 @@ +[NAME] +ltlmix \- combine formulas selected randomly +[DESCRIPTION] +.\" Add any additional description here +[SEE ALSO] +.BR randltl (1), +.BR genltl (1) diff --git a/bin/man/randltl.x b/bin/man/randltl.x index cce4714fb..c9ab79c80 100644 --- a/bin/man/randltl.x +++ b/bin/man/randltl.x @@ -14,3 +14,4 @@ Proceedings of ATVA'13. LNCS 8172. .BR genltl (1), .BR ltlfilt (1), .BR randaut (1) +.BR ltlmix (1) diff --git a/bin/man/spot.x b/bin/man/spot.x index 037069d39..428839987 100644 --- a/bin/man/spot.x +++ b/bin/man/spot.x @@ -22,6 +22,7 @@ that are listed below. .BR ltldo (1) .BR ltlfilt (1) .BR ltlgrind (1) +.BR ltlmix (1) .BR ltlsynt (1) .BR randaut (1) .BR randltl (1) diff --git a/bin/spot.cc b/bin/spot.cc index 75401ddbc..e8be7f42a 100644 --- a/bin/spot.cc +++ b/bin/spot.cc @@ -37,6 +37,8 @@ static const argp_option options[] = { DOC("ltlgrind", "Mutate LTL or PSL formulas to generate similar but " "simpler ones. Use this when looking for shorter formula to " "reproduce a bug.") }, + { DOC("ltlmix", + "Combine LTL/PSL formulas taken randomly from some input set.") }, { nullptr, 0, nullptr, 0, "Tools that output automata or circuits:", 0 }, { DOC("randaut", "Generate random ω-automata.") }, { DOC("genaut", "Generate ω-automata from scalable patterns.") }, diff --git a/doc/Makefile.am b/doc/Makefile.am index 7abe05f9e..5b8e7edac 100644 --- a/doc/Makefile.am +++ b/doc/Makefile.am @@ -104,6 +104,7 @@ ORG_FILES = \ org/ltldo.org \ org/ltlfilt.org \ org/ltlgrind.org \ + org/ltlmix.org \ org/ltlsynt.org \ org/ltlsynt.tex \ org/oaut.org \ diff --git a/doc/org/arch.tex b/doc/org/arch.tex index 0e9e97e4a..f94df93e1 100644 --- a/doc/org/arch.tex +++ b/doc/org/arch.tex @@ -22,18 +22,19 @@ usedby/.style={->,ultra thick,>={Stealth[length=5mm,round]},gray!50!black}} \node[cppbox=14.12cm] (libspot) {\texttt{libspot\strut}}; \node[shbox=3cm,above right=2mm and 0mm of libspot.north west,align=center] (shcmd) { - \href{https://spot.lrde.epita.fr/randltl.html}{\texttt{randltl}}\\ - \href{https://spot.lrde.epita.fr/ltlfilt.html}{\texttt{ltlfilt}}\\ - \href{https://spot.lrde.epita.fr/randaut.html}{\texttt{randaut}}\\ - \href{https://spot.lrde.epita.fr/autfilt.html}{\texttt{autfilt}}\\ - \href{https://spot.lrde.epita.fr/ltl2tgba.html}{\texttt{ltl2tgba}}\\ - \href{https://spot.lrde.epita.fr/ltl2tgta.html}{\texttt{ltl2tgta}}\\ - \href{https://spot.lrde.epita.fr/dstar2tgba.html}{\texttt{dstar2tgba}}\\ - \href{https://spot.lrde.epita.fr/ltlcross.html}{\texttt{ltlcross}}\\ - \href{https://spot.lrde.epita.fr/ltlgrind.html}{\texttt{ltlgrind}}\\ - \href{https://spot.lrde.epita.fr/ltlsynt.html}{\texttt{ltlsynt}}\\ - \href{https://spot.lrde.epita.fr/ltldo.html}{\texttt{ltldo}}\\ - \href{https://spot.lrde.epita.fr/autcross.html}{\texttt{autcross}} + \href{https://spot.lre.epita.fr/randltl.html}{\texttt{randltl}}\\ + \href{https://spot.lre.epita.fr/ltlmix.html}{\texttt{ltlmix}}\\ + \href{https://spot.lre.epita.fr/ltlfilt.html}{\texttt{ltlfilt}}\\ + \href{https://spot.lre.epita.fr/randaut.html}{\texttt{randaut}}\\ + \href{https://spot.lre.epita.fr/autfilt.html}{\texttt{autfilt}}\\ + \href{https://spot.lre.epita.fr/ltl2tgba.html}{\texttt{ltl2tgba}}\\ + \href{https://spot.lre.epita.fr/ltl2tgta.html}{\texttt{ltl2tgta}}\\ + \href{https://spot.lre.epita.fr/dstar2tgba.html}{\texttt{dstar2tgba}}\\ + \href{https://spot.lre.epita.fr/ltlcross.html}{\texttt{ltlcross}}\\ + \href{https://spot.lre.epita.fr/ltlgrind.html}{\texttt{ltlgrind}}\\ + \href{https://spot.lre.epita.fr/ltlsynt.html}{\texttt{ltlsynt}}\\ + \href{https://spot.lre.epita.fr/ltldo.html}{\texttt{ltldo}}\\ + \href{https://spot.lre.epita.fr/autcross.html}{\texttt{autcross}} }; \node[cppbox=4.7cm,above right=0mm and 2mm of shcmd.south east] (libgen) {\texttt{libspotgen\strut}}; \node[cppbox=2.5cm,above right=0mm and 2mm of libgen.south east] (buddy) {\texttt{libbddx\strut}}; @@ -41,8 +42,8 @@ \node[cppbox=4cm,above right=0mm and 2mm of pyspot.south east] (libltsmin) {\texttt{libspotltsmin\strut}}; \node[shbox=1.5cm,above right=2mm and 0mm of libgen.north west,align=center] (genaut) { - \href{https://www.lrde.epita.fr/genaut.html}{\texttt{genaut\strut}}\\ - \href{https://www.lrde.epita.fr/genltl.html}{\texttt{genltl}} + \href{https://www.lre.epita.fr/genaut.html}{\texttt{genaut\strut}}\\ + \href{https://www.lre.epita.fr/genltl.html}{\texttt{genltl}} }; \node[pybox=3cm,above left=2mm and 0mm of libgen.north east] (pygen) {\texttt{import spot.gen\strut}}; diff --git a/doc/org/concepts.org b/doc/org/concepts.org index c4de7a324..d7ce0a57e 100644 --- a/doc/org/concepts.org +++ b/doc/org/concepts.org @@ -158,7 +158,7 @@ $txt #+END_SRC #+RESULTS: -[[file:concept-buchi.svg]] +[[file:concept-buchi2.svg]] The =1= displayed on the edge that loops on state =1= should be read as /true/, i.e., the Boolean formula that accepts diff --git a/doc/org/ltlmix.org b/doc/org/ltlmix.org new file mode 100644 index 000000000..338122d41 --- /dev/null +++ b/doc/org/ltlmix.org @@ -0,0 +1,365 @@ +# -*- coding: utf-8 -*- +#+TITLE: =ltlgrind= +#+DESCRIPTION: Spot command-line tool for combining LTL formulas randomly +#+INCLUDE: setup.org +#+HTML_LINK_UP: tools.html +#+PROPERTY: header-args:sh :results verbatim :exports both + +This tool creates new formulas by combining formulas randomly selected +from an input set of formulas. Some authors have argued that for some +tasks, like [[https://www.cs.rice.edu/~vardi/papers/time13.pdf][LTL satisfiability]], working with randomly generated +formulas is often easy, because random formulas tend to simplify +trivially. =ltlmix= allows you to take a set of formulas, usually +some handwritten, meaningful formulas, and combine those formulas to +build larger sets that are possibly more challenging. + +Here is a very simple example that builds five formulas that are +Boolean combination of formulas from taken in the set +$\{\mathsf{GF}a,\mathsf{FG}b,\mathsf{X}c\}$: + +#+BEGIN_SRC sh :exports both +ltlmix -f GFa -f FGb -f Xc -n 5 +#+END_SRC + +#+RESULTS: +: !FGb xor !Xc +: !GFa -> !FGb +: FGb | (FGb -> Xc) +: FGb +: GFa & (FGb | Xc) + +* Shape of the generated formulas + +** Size of the syntax tree + +For each formula that it generates, =ltlmix= constructs a random +syntax-tree of a certain size (5 by default) in which internal nodes +represent operators selected randomly from a list of operator, and +leaves are subformulas selected randomly from the set of input +formulas. As an example, the syntax tree of =!φ₁ xor !φ₂= has size 5, +and its leaves =φ₁= and =φ₂= will be taken randomly from the set of +input formulas. + +The algorithm is actually the same as for =randltl=, except +that =randltl= use random atomic propositions as leaves when =ltlmix= +uses random formulas. + +The same input formula can be picked several times to be used on +multiple leaves of the tree. Note that because Spot implements some +trivial rewritings directly during the construction of any formula, +formulas like =FGb | !!FGb= (which correspond to a tree of size 5 in +the above example) cannot be represented: they are automatically +simplified to =FGb=. Similarly, something like =φ xor φ= will be +output as =0=. + +The size of the tree can be changed using the =--tree-size= option. + +#+BEGIN_SRC sh :exports both + for i in 1 2 3 4 5 6 7 8 9 10 11 12; do + ltlmix -fXa -fGb -fFc -fXd -fGe --tree-size=$i + done +#+END_SRC + +#+RESULTS: +#+begin_example +Fc +!Xd +0 +Ge xor !Fc +!Xd xor !Ge +!Xd xor (Fc | Xd) +!Ge +Ge xor (!Ge -> Gb) +Ge xor (!Xa -> !Fc) +(Ge & !Fc) xor (!Gb xor !Ge) +(Ge & !Fc) xor (!Gb xor (Gb | Fc)) +(Ge & (Gb xor Xd)) xor (!Fc -> (Gb | Fc)) +#+end_example + +The above shows that while the size of the syntax tree generally grows +along with =--tree-size= there are several cases where it reduces +trivially. + +** Operator priorities + +It is possible to control the set of operators used in the generation. +The first step is to obtain that list of operators with =--dump-priorities=. +For instance: + +#+BEGIN_SRC sh :exports both + ltlmix -fXa -fGb -fFc -fXd -fGe --dump-priorities +#+END_SRC + +#+RESULTS: +#+begin_example +Use --boolean-priorities to set the following Boolean formula priorities: +sub 5 +false 0 +true 0 +not 1 +equiv 1 +implies 1 +xor 1 +and 1 +or 1 +#+end_example + +In the above list, =false= and =true= represent the Boolean constants +(which are usually undesirable when building random Boolean formulas), +and =sub= represent a random formula drawn from the list of input +formulas. + +The above command shows that each operator has a weight, called +/priority/. When the priority is 0, the operator is never used. When +=ltlmix= generates a syntax tree of size N, it looks among all +operators that can be used at the root of such a tree, and performs a +weighted random selection. In other words, an operator with priority +=2= will be twice more likely to be selected than an operator with +priority =1=. + +Those priorities can be changed with =--boolean-priorities= as in the +following example, which disables =xor= and makes =<->= thrice more +likely to appear. + +#+BEGIN_SRC sh :exports both + for i in 1 2 3 4 5 6 7 8 9 10 11 12; do + ltlmix -fXa -fGb -fFc -fXd -fGe --tree-size=$i --boolean-prio='xor=0,equiv=3' + done +#+END_SRC + +#+RESULTS: +#+begin_example +Fc +!Xd +1 +Ge <-> !Fc +!Xd <-> !Ge +!Xd <-> (Fc | Xd) +Ge +Ge <-> (Gb <-> !Ge) +Ge <-> (!Fc <-> !Xa) +(Ge & !Fc) <-> (!Ge -> !Gb) +(Ge & !Fc) <-> ((Gb | Fc) -> !Gb) +(Ge & (Gb <-> Xd)) <-> ((Gb | Fc) <-> !Fc) +#+end_example + +** Boolean or LTL syntax tree + +By default, the syntax tree generated on top of the randomly selected +input formula uses only Boolean operators. + +Using option =-L= will use LTL operators instead. + +#+BEGIN_SRC sh :exports both + ltlmix -fXa -fGb -fFc --tree-size=10 -L -n10 +#+END_SRC + +#+RESULTS: +#+begin_example +Gb R (XFc W 0) +!(Gb | !Xa) +1 U !X(0) +(Xa xor Gb) -> (GXa M Fc) +GFc M 1 +(Fc U Gb) -> (0 R Xa) +!Gb <-> (Gb | GFc) +1 +GFc | (1 U Xa) +!(Xa | GFc) +#+end_example + +The following operators are used: + +#+BEGIN_SRC sh :exports both + ltlmix -fXa -fGb -fFc -fXd -fGe -L --dump-priorities +#+END_SRC + +#+RESULTS: +#+begin_example +Use --ltl-priorities to set the following LTL priorities: +sub 5 +false 1 +true 1 +not 1 +F 1 +G 1 +X 1 +equiv 1 +implies 1 +xor 1 +R 1 +U 1 +W 1 +M 1 +and 1 +or 1 +#+end_example + +Note that in the LTL case, =false= and =true= can be generated by default. + +* Randomizing atomic propositions with =-A= or =-P= + +Options =-A= or =-P= can be used to change the atomic propositions +used in the input formulas. This works as follows: if =-A N= was +given, every time an input formula φ is selected, its atomic +propositions are replaced by atomic propositions randomly selected in +a set of size $N$. If φ uses $i$ atomic propositions and $i\ge N$, +then those $i$ atomic proposition will be remapped to $i$ distinct +atomic propositions chosen randomly in that set. if $i>N$, some of +the new atomic propositions may replace several of the original atomic +propositions. + +Option =-P N= is similar to =-A N= except that the selected atomic +propositions can possibly be negated. + + +These options solve two problems: + +- They lessen the issue that a formula selected several times can lead + to syntax tree such as =φ | φ | φ= that reduces to =φ=. Now, each + occurrence of =φ= as a chance to use different atomic propositions. + (the larger =N= is, the more likely it is that these copies of φ + will be different). + +- They allow combining formulas that had completely different sets of + atomic propositions, in such a way that they are now interdependent + (the smaller N is the more likely it is that subformulas will share + atomic propositions). + + +Here is an example with a single formula, =GFa=, whose atomic proposition +will be randomly replaced by one of $\{p_0,p_1,p_2,p_3,p_4\}$. + +#+BEGIN_SRC sh :exports both + ltlmix -fGFa -A5 --tree-size=8 -n10 +#+END_SRC + +#+RESULTS: +#+begin_example +(GFp2 & GFp3) xor (!GFp0 xor GFp1) +(GFp4 -> GFp1) -> !GFp2 +!GFp4 | ((GFp2 & GFp3) -> GFp2) +!GFp2 | (GFp3 <-> (GFp2 & GFp1)) +!GFp0 | GFp4 +!(GFp2 & GFp1) <-> (GFp3 xor GFp0) +(GFp2 xor GFp0) | (GFp4 -> !GFp0) +(GFp4 | !GFp3) -> GFp4 +!GFp0 -> (GFp2 | GFp1) +!GFp1 <-> (!GFp2 xor !GFp1) +#+end_example + +Here is a similar example, with polarized atomic propositions instead: + +#+BEGIN_SRC sh :exports both + ltlmix -fGFa -P5 --tree-size=8 -n10 +#+END_SRC + +#+RESULTS: +#+begin_example +(GFp2 & GF!p3) xor (GFp4 -> !GF!p1) +(GFp4 | !GFp2) -> (GFp1 -> GF!p1) +!GF!p2 & (GF!p0 xor (GF!p0 -> GF!p3)) +GFp1 <-> (GF!p3 | !GFp0) +GF!p1 & GFp0 & (GF!p3 xor !GF!p4) +(GF!p1 xor GF!p2) | (GF!p3 & !GF!p4) +!(GFp4 xor (!GF!p2 | !GF!p3)) +GFp0 | (!GFp1 -> (GFp1 -> GF!p4)) +GF!p1 xor (!GF!p2 | (GF!p1 <-> GFp0)) +!((GF!p2 <-> GF!p4) & (GFp1 xor GF!p2)) +#+end_example + + +* More serious examples + +** Mixing the DAC patterns + +The command [[file:genltl.org][=genltl --dac-pattern=]] will print a list of 55 LTL +formulas representing various specification patterns listed by Dwyer +et al. (FMSP'98). Using =--stat=%x= to count the atomic propositions +in each formula, and some standard unix tools, we can compute that +they use at most 6 atomic propositions. + +#+BEGIN_SRC sh :exports both + genltl --dac-pattern --stat=%x | sort -n | tail -n 1 +#+END_SRC +#+RESULTS: +: 6 + +Based on this, we could decide to generate Boolean combination of +those formulas while replacing atomic propositions by literals built +out of a set of 10 atomic propositions (chosen larger than 6 to ensure +that each individual formula will still make sense after the change of +atomic propositions). + +#+BEGIN_SRC sh :exports both + genltl --dac-pattern | ltlmix -n8 -P10 +#+END_SRC + +#+RESULTS: +: !G((p8 & F!p7) -> (!p4 U (!p7 | (!p2 & !p4 & X(!p4 U p1))))) xor !G(!p3 -> ((p4 & !p7) U (!p4 | ((p4 & p7) U (!p4 | ((p4 & !p7) U (!p4 | ((p4 & p7) U (!p4 | (!p7 W !p4) | Gp7))))))))) +: !G(!p3 -> Gp5) xor !G(!p7 -> G(p9 -> (!p5 & !p8 & X(!p5 U p2)))) +: G(p6 -> ((!(!p1 & p7 & X(!p1 U (!p1 & !p3))) U (p1 | !p2)) | G!(p7 & XF!p3))) & (G((!p4 & XF!p5) -> XF(!p5 & F!p0)) <-> G((p5 & !p6) -> (p5 W (p5 & p7)))) +: !G((p0 & p9) -> (!p7 W (!p0 | p4))) & !G((p1 & !p2) -> (!p8 W p2)) +: ((Fp2 -> ((!p1 -> (!p2 U (p0 & !p2))) U p2)) -> G(p1 -> G(p9 -> (!p4 & p8 & X(!p4 U !p7))))) xor G(p1 -> Gp9) +: !G((p5 & !p9 & F!p5) -> ((!p8 -> (p5 U (!p0 & p5))) U !p5)) -> !G((p6 & p9) -> (!p7 W !p9)) +: G((!p1 & !p2) -> (p9 W p1)) <-> (G(p5 -> G(p0 -> F!p4)) -> (Fp6 -> ((!p5 & !p6) U (p6 | ((p5 & !p6) U (p6 | ((!p5 & !p6) U (p6 | ((p5 & !p6) U (p6 | (!p5 U p6))))))))))) +: ((Fp1 -> ((p6 -> (!p1 U (!p1 & !p2 & X(!p1 U !p9)))) U p1)) <-> (F!p0 -> (p0 U (p0 & !p7 & X(p0 U !p9))))) | (Fp2 -> (p6 U (p2 | (p6 & !p7 & X(p6 U p1))))) + +Now we might want to clean this list a bit by relabeling each formula +so is uses atomic propositions $\{p_0,p_1,...\}$ starting at 0 and without gap. + +#+BEGIN_SRC sh :exports both + genltl --dac-pattern | ltlmix -n8 -P10 | ltlfilt --relabel=pnn +#+END_SRC + +#+RESULTS: +: !G((p0 & F!p1) -> (!p2 U (!p1 | (!p2 & !p3 & X(!p2 U p4))))) xor !G(!p5 -> ((!p1 & p2) U (!p2 | ((p1 & p2) U (!p2 | ((!p1 & p2) U (!p2 | ((p1 & p2) U (!p2 | (!p1 W !p2) | Gp1))))))))) +: !G(!p0 -> Gp1) xor !G(!p2 -> G(p3 -> (!p1 & !p4 & X(!p1 U p5)))) +: G(p0 -> ((!(!p1 & p2 & X(!p1 U (!p1 & !p3))) U (p1 | !p4)) | G!(p2 & XF!p3))) & (G((!p5 & XF!p6) -> XF(!p6 & F!p7)) <-> G((!p0 & p6) -> (p6 W (p2 & p6)))) +: !G((p0 & p1) -> (!p2 W (!p0 | p3))) & !G((p4 & !p5) -> (!p6 W p5)) +: ((Fp0 -> ((!p1 -> (!p0 U (!p0 & p2))) U p0)) -> G(p1 -> G(p3 -> (!p4 & p5 & X(!p4 U !p6))))) xor G(p1 -> Gp3) +: !G((p0 & !p1 & F!p0) -> ((!p2 -> (p0 U (p0 & !p3))) U !p0)) -> !G((p1 & p4) -> (!p5 W !p1)) +: G((!p0 & !p1) -> (p2 W p0)) <-> (G(p3 -> G(p4 -> F!p5)) -> (Fp6 -> ((!p3 & !p6) U (p6 | ((p3 & !p6) U (p6 | ((!p3 & !p6) U (p6 | ((p3 & !p6) U (p6 | (!p3 U p6))))))))))) +: ((Fp0 -> ((p1 -> (!p0 U (!p0 & !p2 & X(!p0 U !p3)))) U p0)) <-> (F!p4 -> (p4 U (p4 & !p5 & X(p4 U !p3))))) | (Fp2 -> (p1 U (p2 | (p1 & !p5 & X(p1 U p0))))) + +** Random conjunctions + +Some benchmark (e.g., [[https://www.cs.rice.edu/~vardi/papers/time13.pdf][for LTL satisfiability]]) are built by conjunction +of $L$ random formulas picked from a set of basic formulas. Each +picked formula has its atomic proposition mapped to random literals +built from a subset of $m$ atomic variables. + +Given a value for $m$, option =-P m= will achieve the second part of +the above description. To build a conjunction of $L$ formulas, we +need to ask for a tree of size $2L-1$ in which only the =and= operator +is allowed. + +Here is an example with $L=10$ (hence =--tree-size=19=) and $m=50$. +The example use a small set of three basic formulas +$\{\mathsf{G}a,\mathsf{F}a,\mathsf{X}a\}$ for illustration, but in +practice you should replace these =-f= options by =-F FILENAME= +pointing to a file containing all the input formulas to select from. + +#+BEGIN_SRC sh :exports both + ltlmix -fGa -fFa -fXa -n10 -P50 \ + --tree-size=19 --boolean-prio=not=0,or=0,xor=0,equiv=0,implies=0 +#+END_SRC + +#+RESULTS: +#+begin_example +Xp27 & F!p21 & G!p19 & X!p26 & Fp4 & Gp43 & Fp39 & Fp7 +G!p28 & G!p30 & X!p34 & Fp21 & Gp3 & Fp6 & F!p18 & Xp5 & Fp8 & F!p12 +X!p32 & Xp9 & Gp4 & Xp48 & F!p48 & X!p6 & Fp5 & Xp20 & Fp28 & Fp26 +G!p46 & X!p14 & Fp29 & X!p36 & Fp12 & Xp47 & Fp42 & Gp14 & Fp19 +G!p28 & Fp21 & Fp36 & F!p0 & G!p14 & Xp21 & F!p28 & G!p21 & Gp21 & Gp40 +Gp3 & F!p48 & F!p28 & Xp7 & Gp8 & Xp42 & Gp0 & Xp36 & F!p2 & G!p0 +Xp36 & Xp46 & F!p31 & Xp11 & Xp26 & G!p9 & F!p36 & X!p12 & Fp15 & Xp18 +Xp9 & X!p33 & Fp44 & X!p13 & Gp37 & Xp19 & G!p43 & F!p34 & Gp36 & Gp10 +Xp27 & Xp5 & Fp28 & Xp18 & G!p13 & Gp35 & Gp38 & G!p45 & G!p48 & Gp12 +Xp7 & G!p48 & Xp14 & Fp24 & Xp43 & Fp47 & Fp14 & Gp30 & Xp23 & G!p31 +#+end_example + +Selecting 10 random conjuncts out of 3×50×2=300 possibilities has a +13.7% chance that at least 2 conjuncts will be identical (see [[https://en.wikipedia.org/wiki/Birthday_problem][Birthday +paradox]]), so because of Spot's trivial rewritings, some hove the above +formulas may have fewer than 10 conjuncts. diff --git a/doc/org/tools.org b/doc/org/tools.org index 46ca38ccd..e24258a19 100644 --- a/doc/org/tools.org +++ b/doc/org/tools.org @@ -41,6 +41,7 @@ corresponding commands are hidden. * Command-line tools - [[file:randltl.org][=randltl=]] Generate random LTL/PSL formulas. +- [[file:ltlmix.org][=ltlmix=]] Combine LTL/PSL formulas taken randomly from some input set. - [[file:ltlfilt.org][=ltlfilt=]] Filter, convert, and transform LTL/PSL formulas. - [[file:genltl.org][=genltl=]] Generate LTL formulas from scalable patterns. - [[file:ltl2tgba.org][=ltl2tgba=]] Translate LTL/PSL formulas into various types of automata. @@ -77,6 +78,7 @@ convenience, you can browse their HTML versions: [[./man/ltldo.1.html][=ltldo=]](1), [[./man/ltlfilt.1.html][=ltlfilt=]](1), [[./man/ltlgrind.1.html][=ltlgrind=]](1), +[[./man/ltlmix.1.html][=ltlmix=]](1), [[./man/ltlsynt.1.html][=ltlsynt=]](1), [[./man/randaut.1.html][=randaut=]](1), [[./man/randltl.1.html][=randltl=]](1), diff --git a/spot/tl/randomltl.cc b/spot/tl/randomltl.cc index e415535b2..913b60522 100644 --- a/spot/tl/randomltl.cc +++ b/spot/tl/randomltl.cc @@ -28,6 +28,45 @@ namespace spot { namespace { + // Rename atomic propositions in f using atomic propositions drawn + // randomly from \a ap. Avoid repetition if \a ap is large + // enough. If \a lit is true, change the polarity of the atomic + // proposition randomly. + static formula + randomize_ap(formula f, const atomic_prop_set* ap, bool lit) + { + std::vector randap(ap->begin(), ap->end()); + unsigned current_range = randap.size(); + std::map mapping; + + auto relabel = [&](formula f, auto self) -> formula + { + if (f.is(op::ap)) + { + // Did we already rename this AP? + if (auto it = mapping.find(f); it != mapping.end()) + return it->second; + + // If we exhausted all possible AP, start again + if (current_range == 0) + current_range = randap.size(); + + // + unsigned pos = mrand(current_range--); + formula ap = randap[pos]; + std::swap(randap[current_range], randap[pos]); + + if (lit && drand() < 0.5) + ap = formula::Not(ap); + + return mapping[f] = ap; + } + return f.map(self, self); + }; + return relabel(f, relabel); + } + + static formula ap_builder(const random_formula* rl, int n) { @@ -38,6 +77,20 @@ namespace spot return *i; } + static formula + pattern_builder(const random_formula* rl, int n) + { + assert(n == 1); + (void) n; + atomic_prop_set::const_iterator i = rl->patterns()->begin(); + std::advance(i, mrand(rl->patterns()->size())); + formula f = *i; + const atomic_prop_set* ap = rl->ap(); + if (ap && ap->size() > 0) + f = randomize_ap(f, ap, rl->draw_literals()); + return f; + } + static formula true_builder(const random_formula*, int n) { @@ -353,13 +406,28 @@ namespace spot } // Boolean formulae - random_boolean::random_boolean(const atomic_prop_set* ap) + random_boolean::random_boolean(const atomic_prop_set* ap, + const atomic_prop_set* patterns) : random_formula(9, ap) { - proba_[0].setup("ap", 1, ap_builder); - proba_[0].proba = ap_->size(); + if (patterns) + { + proba_[0].setup("sub", 1, pattern_builder); + patterns_ = patterns; + proba_[0].proba = patterns_->size(); + } + else + { + proba_[0].setup("ap", 1, ap_builder); + proba_[0].proba = ap_->size(); + } proba_[1].setup("false", 1, false_builder); proba_[2].setup("true", 1, true_builder); + if (patterns) + { + proba_[1].proba = 0.0; + proba_[2].proba = 0.0; + } proba_2_or_more_ = proba_2_ = proba_ + 3; proba_[3].setup("not", 2, unop_builder); proba_[4].setup("equiv", 3, binop_builder); @@ -373,10 +441,19 @@ namespace spot // LTL formulae void - random_ltl::setup_proba_() + random_ltl::setup_proba_(const atomic_prop_set* patterns) { - proba_[0].setup("ap", 1, ap_builder); - proba_[0].proba = ap_->size(); + if (patterns) + { + proba_[0].setup("sub", 1, pattern_builder); + patterns_ = patterns; + proba_[0].proba = patterns_->size(); + } + else + { + proba_[0].setup("ap", 1, ap_builder); + proba_[0].proba = ap_->size(); + } proba_[1].setup("false", 1, false_builder); proba_[2].setup("true", 1, true_builder); proba_2_or_more_ = proba_2_ = proba_ + 3; @@ -395,17 +472,18 @@ namespace spot proba_[15].setup("or", 3, multop_builder); } - random_ltl::random_ltl(const atomic_prop_set* ap) + random_ltl::random_ltl(const atomic_prop_set* ap, + const atomic_prop_set* patterns) : random_formula(16, ap) { - setup_proba_(); + setup_proba_(patterns); update_sums(); } random_ltl::random_ltl(int size, const atomic_prop_set* ap) : random_formula(size, ap) { - setup_proba_(); + setup_proba_(nullptr); // No call to update_sums(), this functions is always // called by the random_psl constructor. } @@ -428,7 +506,8 @@ namespace spot const option_map& opts, char* opt_pL, char* opt_pS, - char* opt_pB) + char* opt_pB, + const atomic_prop_set* subs) : opt_simpl_level_(opts.get("simplification_level", 3)), simpl_(tl_simplifier_options{opt_simpl_level_}) { @@ -439,6 +518,7 @@ namespace spot opt_tree_size_max_ = opts.get("tree_size_max", 15); opt_unique_ = opts.get("unique", 1); opt_wf_ = opts.get("wf", 0); + bool lit = opts.get("literals", 0); const char* tok_pL = nullptr; const char* tok_pS = nullptr; @@ -447,23 +527,25 @@ namespace spot switch (output_) { case randltlgenerator::LTL: - rf_ = new random_ltl(&aprops_); + rf_ = new random_ltl(&aprops_, subs); + rf_->draw_literals(lit); if (opt_pS) - throw std::invalid_argument("Cannot set sere priorities with " + throw std::invalid_argument("Cannot set SERE priorities with " "LTL output"); if (opt_pB) - throw std::invalid_argument("Cannot set boolean priorities with " + throw std::invalid_argument("Cannot set Boolean priorities with " "LTL output"); tok_pL = rf_->parse_options(opt_pL); break; case randltlgenerator::Bool: - rf_ = new random_boolean(&aprops_); + rf_ = new random_boolean(&aprops_, subs); + rf_->draw_literals(lit); tok_pB = rf_->parse_options(opt_pB); if (opt_pL) - throw std::invalid_argument("Cannot set ltl priorities with " + throw std::invalid_argument("Cannot set LTL priorities with " "Boolean output"); if (opt_pS) - throw std::invalid_argument("Cannot set sere priorities " + throw std::invalid_argument("Cannot set SERE priorities " "with Boolean output"); break; case randltlgenerator::SERE: @@ -471,7 +553,7 @@ namespace spot tok_pS = rs_->parse_options(opt_pS); tok_pB = rs_->rb.parse_options(opt_pB); if (opt_pL) - throw std::invalid_argument("Cannot set ltl priorities " + throw std::invalid_argument("Cannot set LTL priorities " "with SERE output"); break; case randltlgenerator::PSL: @@ -500,9 +582,10 @@ namespace spot const option_map& opts, char* opt_pL, char* opt_pS, - char* opt_pB) + char* opt_pB, + const atomic_prop_set* subs) : randltlgenerator(create_atomic_prop_set(aprops_n), opts, - opt_pL, opt_pS, opt_pB) + opt_pL, opt_pS, opt_pB, subs) { } @@ -602,4 +685,5 @@ namespace spot { rs_->rb.dump_priorities(os); } + } diff --git a/spot/tl/randomltl.hh b/spot/tl/randomltl.hh index a7ea3561c..8c2e7e0cd 100644 --- a/spot/tl/randomltl.hh +++ b/spot/tl/randomltl.hh @@ -44,16 +44,33 @@ namespace spot delete[] proba_; } - /// Return the set of atomic proposition used to build formulae. - const atomic_prop_set* - ap() const + /// Return the set of atomic proposition used to build formulas. + const atomic_prop_set* ap() const { return ap_; } + /// Return the set of patterns (sub-formulas) used to build formulas. + const atomic_prop_set* patterns() const + { + return patterns_; + } + + /// Check whether relabeling APs should use literals. + bool draw_literals() const + { + return draw_literals_; + } + + /// Set whether relabeling APs should use literals. + void draw_literals(bool lit) + { + draw_literals_ = lit; + } + /// \brief Generate a formula of size \a n. /// - /// It is possible to obtain formulae that are smaller than \a + /// It is possible to obtain formulas that are smaller than \a /// n, because some simple simplifications are performed by the /// AST. (For instance the formula a | a is /// automatically reduced to a by spot::multop.) @@ -63,7 +80,7 @@ namespace spot /// and atomic propositions. std::ostream& dump_priorities(std::ostream& os) const; - /// \brief Update the priorities used to generate the formulae. + /// \brief Update the priorities used to generate the formulas. /// /// \a options should be comma-separated list of KEY=VALUE /// assignments, using keys from the above list. @@ -98,14 +115,16 @@ namespace spot op_proba* proba_2_or_more_; double total_2_and_more_; const atomic_prop_set* ap_; + const atomic_prop_set* patterns_ = nullptr; + bool draw_literals_; }; /// \ingroup tl_io - /// \brief Generate random LTL formulae. + /// \brief Generate random LTL formulas. /// - /// This class recursively constructs LTL formulae of a given - /// size. The formulae will use the use atomic propositions from + /// This class recursively constructs LTL formulas of a given + /// size. The formulas will use the use atomic propositions from /// the set of propositions passed to the constructor, in addition /// to the constant and all LTL operators supported by Spot. /// @@ -118,25 +137,26 @@ namespace spot public: /// Create a random LTL generator using atomic propositions from \a ap. /// - /// The default priorities are defined as follows: + /// The default priorities are defined as follows, depending on the + /// presence of \a subformulas. /// /** \verbatim - ap n - false 1 - true 1 - not 1 - F 1 - G 1 - X 1 - equiv 1 - implies 1 - xor 1 - R 1 - U 1 - W 1 - M 1 - and 1 - or 1 + ap n sub n + false 1 false 1 + true 1 true 1 + not 1 not 1 + F 1 F 1 + G 1 G 1 + X 1 X 1 + equiv 1 equiv 1 + implies 1 implies 1 + xor 1 xor 1 + R 1 R 1 + U 1 U 1 + W 1 W 1 + M 1 M 1 + and 1 and 1 + or 1 or 1 \endverbatim */ /// /// Where \c n is the number of atomic propositions in the @@ -147,18 +167,25 @@ namespace spot /// as each constant (i.e., true and false) to be picked. /// /// These priorities can be changed use the parse_options method. - random_ltl(const atomic_prop_set* ap); + /// + /// If a set of subformulas is passed to the constructor, the generator + /// will build a Boolean formulas using patterns as atoms. Atomic + /// propositions in patterns will be rewritten randomly by drawing + /// some from \a ap. The probability of false/true to be generated + /// default to 0 in this case. + random_ltl(const atomic_prop_set* ap, + const atomic_prop_set* subformulas = nullptr); protected: - void setup_proba_(); + void setup_proba_(const atomic_prop_set* patterns); random_ltl(int size, const atomic_prop_set* ap); }; /// \ingroup tl_io - /// \brief Generate random Boolean formulae. + /// \brief Generate random Boolean formulas. /// - /// This class recursively constructs Boolean formulae of a given size. - /// The formulae will use the use atomic propositions from the + /// This class recursively constructs Boolean formulas of a given size. + /// The formulas will use the use atomic propositions from the /// set of propositions passed to the constructor, in addition to the /// constant and all Boolean operators supported by Spot. /// @@ -169,18 +196,19 @@ namespace spot /// Create a random Boolean formula generator using atomic /// propositions from \a ap. /// - /// The default priorities are defined as follows: + /// The default priorities are defined as follows depending on + /// the presence of \a subformulas. /// /** \verbatim - ap n - false 1 - true 1 - not 1 - equiv 1 - implies 1 - xor 1 - and 1 - or 1 + ap n sub n + false 1 false 0 + true 1 true 0 + not 1 not 1 + equiv 1 equiv 1 + implies 1 implies 1 + xor 1 xor 1 + and 1 and 1 + or 1 or 1 \endverbatim */ /// /// Where \c n is the number of atomic propositions in the @@ -191,14 +219,20 @@ namespace spot /// as each constant (i.e., true and false) to be picked. /// /// These priorities can be changed use the parse_options method. - random_boolean(const atomic_prop_set* ap); + /// + /// If a set of \a subformulas is passed to the constructor, the + /// generator will build a Boolean formulas using patterns as + /// atoms. Atomic propositions in patterns will be rewritten + /// randomly by drawing some from \a ap. + random_boolean(const atomic_prop_set* ap, + const atomic_prop_set* subformulas = nullptr); }; /// \ingroup tl_io /// \brief Generate random SERE. /// /// This class recursively constructs SERE of a given size. - /// The formulae will use the use atomic propositions from the + /// The formulas will use the use atomic propositions from the /// set of propositions passed to the constructor, in addition to the /// constant and all SERE operators supported by Spot. /// @@ -230,7 +264,7 @@ namespace spot /// These priorities can be changed use the parse_options method. /// /// In addition, you can set the properties of the Boolean - /// formula generator used to build Boolean subformulae using + /// formula generator used to build Boolean subformulas using /// the parse_options method of the \c rb attribute. random_sere(const atomic_prop_set* ap); @@ -238,10 +272,10 @@ namespace spot }; /// \ingroup tl_io - /// \brief Generate random PSL formulae. + /// \brief Generate random PSL formulas. /// - /// This class recursively constructs PSL formulae of a given size. - /// The formulae will use the use atomic propositions from the + /// This class recursively constructs PSL formulas of a given size. + /// The formulas will use the use atomic propositions from the /// set of propositions passed to the constructor, in addition to the /// constant and all PSL operators supported by Spot. class SPOT_API random_psl: public random_ltl @@ -249,7 +283,7 @@ namespace spot public: /// Create a random PSL generator using atomic propositions from \a ap. /// - /// PSL formulae are built by combining LTL operators, plus + /// PSL formulas are built by combining LTL operators, plus /// three operators (EConcat, UConcat, Closure) taking a SERE /// as parameter. /// @@ -287,11 +321,11 @@ namespace spot /// These priorities can be changed use the parse_options method. /// /// In addition, you can set the properties of the SERE generator - /// used to build SERE subformulae using the parse_options method + /// used to build SERE subformulas using the parse_options method /// of the \c rs attribute. random_psl(const atomic_prop_set* ap); - /// The SERE generator used to generate SERE subformulae. + /// The SERE generator used to generate SERE subformulas. random_sere rs; }; @@ -307,12 +341,14 @@ namespace spot randltlgenerator(int aprops_n, const option_map& opts, char* opt_pL = nullptr, char* opt_pS = nullptr, - char* opt_pB = nullptr); + char* opt_pB = nullptr, + const atomic_prop_set* subformulas = nullptr); randltlgenerator(atomic_prop_set aprops, const option_map& opts, char* opt_pL = nullptr, char* opt_pS = nullptr, - char* opt_pB = nullptr); + char* opt_pB = nullptr, + const atomic_prop_set* subformulas = nullptr); ~randltlgenerator(); @@ -345,4 +381,6 @@ namespace spot random_psl* rp_ = nullptr; random_sere* rs_ = nullptr; }; + + } diff --git a/tests/Makefile.am b/tests/Makefile.am index bffb79ff1..54abc1a73 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -176,6 +176,7 @@ TESTS_tl = \ core/kind.test \ core/remove_x.test \ core/ltlrel.test \ + core/ltlmix.test \ core/ltlgrind.test \ core/ltlcrossgrind.test \ core/ltlfilt.test \ diff --git a/tests/core/ltlmix.test b/tests/core/ltlmix.test new file mode 100755 index 000000000..dd279e4fa --- /dev/null +++ b/tests/core/ltlmix.test @@ -0,0 +1,93 @@ +#! /bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) by the Spot authors, see the AUTHORS file for details. +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs || exit 1 + +set -e + +# Make sure sort isn't affected by the user's LC_COLLATE +LC_ALL=C +export LC_ALL + +ltlmix -fGa -fFb --tree-size=4 -n 50 -o out.ltl 2>stderr && exit 1 +grep 'failed to generate a new unique formula' stderr +test 34 -eq `wc -l < out.ltl` + +ltlmix -fGa -fFb --tree-size=4 -n 500 -L -o out.ltl 2>stderr && exit 1 +grep 'failed to generate a new unique formula' stderr +test 448 -eq `wc -l < out.ltl` + +P=--boolean-priorities=not=0,xor=0,equiv=0,implies=0 +ltlmix -fGa -fFb --tree-size=4 -n 10 $P -o out.ltl 2>stderr && exit 1 +grep 'failed to generate a new unique formula' stderr +test 4 -eq `wc -l < out.ltl` + +ltlmix -fa -A9 --tree-size=1 -n9 | sort > list1 +ltlmix -fa -P9 --tree-size=1 -n18 | sort > list2 +grep -v '!' list2 > list3 +diff list1 list3 + +# The following message appears only if run from a tty. +if (: > /dev/tty) >/dev/null 2>&1 ; then + ltlmix -A9 -n10 err && exit 2 + grep 'No formula to combine' err +fi + + +genltl --gf-equiv-xn=1..3 | ltlmix --tree-size=1 -A2 -n6 | sort >out +cat >expected < XXXp0) +GF(p0 <-> XXp0) +GF(p0 <-> Xp0) +GF(p1 <-> XXXp1) +GF(p1 <-> XXp1) +GF(p1 <-> Xp1) +EOF +diff out expected + + +genltl --and-gf=4 > in.ltl +ltlmix in.ltl --tree-size=1 -A1 --stats=%x > out.txt +ltlmix in.ltl --tree-size=1 -A2 --stats=%x >> out.txt +ltlmix in.ltl --tree-size=1 -A3 --stats=%x >> out.txt +ltlmix in.ltl --tree-size=1 -A4 --stats=%x >> out.txt +ltlmix in.ltl --tree-size=1 -A5 --stats=%x >> out.txt +ltlmix in.ltl --tree-size=1 -A6 --stats=%x >> out.txt + +cat >expected < out.txt +ltlmix in.ltl --tree-size=1 -P2 --stats=%x >> out.txt +ltlmix in.ltl --tree-size=1 -P3 --stats=%x >> out.txt +ltlmix in.ltl --tree-size=1 -P4 --stats=%x >> out.txt +ltlmix in.ltl --tree-size=1 -P5 --stats=%x >> out.txt +ltlmix in.ltl --tree-size=1 -P6 --stats=%x >> out.txt +diff out.txt expected + +ltlmix -fa -A500 $P,or=0 -n10 | tee out +test 10 -eq `grep '&.*&' < out | wc -l` From 2390a89986e23a681bbffaf9a5aa067c1014b00c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 23 Aug 2024 15:17:04 +0200 Subject: [PATCH 471/606] ltlmix: learn option -R for random conjuncts * bin/ltlmix.cc: Implement this option. * doc/org/ltlmix.org: Illustrate it. * tests/core/ltlmix.test: Add a test. --- bin/ltlmix.cc | 28 +++++++++++++++++++++------- doc/org/ltlmix.org | 33 ++++++++++++++++++++++++++++----- tests/core/ltlmix.test | 3 +++ 3 files changed, 52 insertions(+), 12 deletions(-) diff --git a/bin/ltlmix.cc b/bin/ltlmix.cc index 9e6ada1c1..1e36eeeb2 100644 --- a/bin/ltlmix.cc +++ b/bin/ltlmix.cc @@ -68,26 +68,31 @@ static const argp_option options[] = { // Keep this alphabetically sorted (expect for aliases). /**************************************************/ { nullptr, 0, nullptr, 0, "Generation parameters:", 2 }, + { "allow-dups", OPT_DUPS, nullptr, 0, + "allow duplicate formulas to be output", 0 }, { "ap-count", 'A', "N", 0, "rename the atomic propositions in each selected formula by drawing " "randomly from N atomic propositions (the rewriting is bijective " "if N is larger than the original set)", 0 }, - { "polarized-ap", 'P', "N", 0, - "similar to -A N, but randomize the polarity of the new atomic " - "proposition", 0 }, { "boolean", 'B', nullptr, 0, - "generate Boolean combination of formulas (default)", 0 }, - { "allow-dups", OPT_DUPS, nullptr, 0, - "allow duplicate formulas to be output", 0 }, - { "ltl", 'L', nullptr, 0, "generate LTL combinations of subformulas", 0 }, + "generate Boolean combinations of formulas (default)", 0 }, { "formulas", 'n', "INT", 0, "number of formulas to generate (default: 1);\n" "use a negative value for unbounded generation", 0 }, + { "ltl", 'L', nullptr, 0, "generate LTL combinations of subformulas", 0 }, + { "polarized-ap", 'P', "N", 0, + "similar to -A N, but randomize the polarity of the new atomic " + "propositions", 0 }, + { "random-conjuncts", 'C', "N", 0, + "generate random-conjunctions of N conjuncts; " + "shorthand for --tree-size {2N-1} -B " + "--boolean-priorities=[disable everything but 'and']", 0 }, { "seed", OPT_SEED, "INT", 0, "seed for the random number generator (default: 0)", 0 }, { "tree-size", OPT_TREE_SIZE, "RANGE", 0, "tree size of main pattern generated (default: 5);\n" "input formulas count as size 1.", 0 }, + RANGE_DOC, /**************************************************/ { nullptr, 0, nullptr, 0, "Adjusting probabilities:", 4 }, { "dump-priorities", OPT_DUMP_PRIORITIES, nullptr, 0, @@ -129,6 +134,7 @@ static spot::randltlgenerator::output_type output = spot::randltlgenerator::Bool; static char* opt_pL = nullptr; static char* opt_pB = nullptr; +static char random_conj[] = "not=0,implies=0,equiv=0,xor=0,or=0"; static bool opt_dump_priorities = false; static int opt_seed = 0; static range opt_tree_size = { 5, 5 }; @@ -178,6 +184,14 @@ parse_opt(int key, char* arg, struct argp_state*) case 'B': output = spot::randltlgenerator::Bool; break; + case 'C': + { + int s = 2 * to_int(arg, "-C/--random-conjuncs") - 1; + opt_tree_size = {s, s}; + output = spot::randltlgenerator::Bool; + opt_pB = random_conj; + break; + } case 'L': output = spot::randltlgenerator::LTL; break; diff --git a/doc/org/ltlmix.org b/doc/org/ltlmix.org index 338122d41..341c64833 100644 --- a/doc/org/ltlmix.org +++ b/doc/org/ltlmix.org @@ -324,10 +324,10 @@ so is uses atomic propositions $\{p_0,p_1,...\}$ starting at 0 and without gap. ** Random conjunctions -Some benchmark (e.g., [[https://www.cs.rice.edu/~vardi/papers/time13.pdf][for LTL satisfiability]]) are built by conjunction -of $L$ random formulas picked from a set of basic formulas. Each -picked formula has its atomic proposition mapped to random literals -built from a subset of $m$ atomic variables. +Some benchmarks (e.g., [[https://www.cs.rice.edu/~vardi/papers/time13.pdf][for LTL satisfiability]]) are built by +conjunction of $L$ random formulas picked from a set of basic +formulas. Each picked formula has its atomic proposition mapped to +random literals built from a subset of $m$ atomic variables. Given a value for $m$, option =-P m= will achieve the second part of the above description. To build a conjunction of $L$ formulas, we @@ -359,7 +359,30 @@ Xp27 & Xp5 & Fp28 & Xp18 & G!p13 & Gp35 & Gp38 & G!p45 & G!p48 & Gp12 Xp7 & G!p48 & Xp14 & Fp24 & Xp43 & Fp47 & Fp14 & Gp30 & Xp23 & G!p31 #+end_example +In fact building random conjunctions is common enough to have its own +flag. Using =-C N= will see the tree size to $2N-1$ and disable all +operators but =and=. The above command can therefore be reduced to + +#+BEGIN_SRC sh :exports both + ltlmix -fGa -fFa -fXa -n10 -P50 -C10 +#+END_SRC + +#+RESULTS: +#+begin_example +Xp27 & F!p21 & G!p19 & X!p26 & Fp4 & Gp43 & Fp39 & Fp7 +G!p28 & G!p30 & X!p34 & Fp21 & Gp3 & Fp6 & F!p18 & Xp5 & Fp8 & F!p12 +X!p32 & Xp9 & Gp4 & Xp48 & F!p48 & X!p6 & Fp5 & Xp20 & Fp28 & Fp26 +G!p46 & X!p14 & Fp29 & X!p36 & Fp12 & Xp47 & Fp42 & Gp14 & Fp19 +G!p28 & Fp21 & Fp36 & F!p0 & G!p14 & Xp21 & F!p28 & G!p21 & Gp21 & Gp40 +Gp3 & F!p48 & F!p28 & Xp7 & Gp8 & Xp42 & Gp0 & Xp36 & F!p2 & G!p0 +Xp36 & Xp46 & F!p31 & Xp11 & Xp26 & G!p9 & F!p36 & X!p12 & Fp15 & Xp18 +Xp9 & X!p33 & Fp44 & X!p13 & Gp37 & Xp19 & G!p43 & F!p34 & Gp36 & Gp10 +Xp27 & Xp5 & Fp28 & Xp18 & G!p13 & Gp35 & Gp38 & G!p45 & G!p48 & Gp12 +Xp7 & G!p48 & Xp14 & Fp24 & Xp43 & Fp47 & Fp14 & Gp30 & Xp23 & G!p31 +#+end_example + + Selecting 10 random conjuncts out of 3×50×2=300 possibilities has a 13.7% chance that at least 2 conjuncts will be identical (see [[https://en.wikipedia.org/wiki/Birthday_problem][Birthday -paradox]]), so because of Spot's trivial rewritings, some hove the above +paradox]]), so because of Spot's trivial rewritings, some of the above formulas may have fewer than 10 conjuncts. diff --git a/tests/core/ltlmix.test b/tests/core/ltlmix.test index dd279e4fa..232c1f76c 100755 --- a/tests/core/ltlmix.test +++ b/tests/core/ltlmix.test @@ -91,3 +91,6 @@ diff out.txt expected ltlmix -fa -A500 $P,or=0 -n10 | tee out test 10 -eq `grep '&.*&' < out | wc -l` + +ltlmix -fa -A500 -C3 -n10 | tee out2 +diff out out2 From bea1713f4ee2e90c57bf1f423b2bff17728ddb95 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 23 Aug 2024 16:05:14 +0200 Subject: [PATCH 472/606] ltlsynt: extract In/Out AP processing in separate file We'd like to reuse the --ins/--outs matching in ltlfilt as well, so let's put that code in a common file. * bin/common_ioap.cc, bin/common_ioap.hh: New files. * bin/ltlsynt.cc: Extracted from here. * bin/Makefile.am: Add them. --- bin/Makefile.am | 2 + bin/common_ioap.cc | 166 +++++++++++++++++++++++++++++++++++++++++++++ bin/common_ioap.hh | 51 ++++++++++++++ bin/ltlsynt.cc | 149 +--------------------------------------- 4 files changed, 222 insertions(+), 146 deletions(-) create mode 100644 bin/common_ioap.cc create mode 100644 bin/common_ioap.hh diff --git a/bin/Makefile.am b/bin/Makefile.am index b665d859c..c92cdf6f6 100644 --- a/bin/Makefile.am +++ b/bin/Makefile.am @@ -43,6 +43,8 @@ libcommon_a_SOURCES = \ common_finput.hh \ common_hoaread.cc \ common_hoaread.hh \ + common_ioap.cc \ + common_ioap.hh \ common_output.cc \ common_output.hh \ common_post.cc \ diff --git a/bin/common_ioap.cc b/bin/common_ioap.cc new file mode 100644 index 000000000..d38a190aa --- /dev/null +++ b/bin/common_ioap.cc @@ -0,0 +1,166 @@ +// -*- coding: utf-8 -*- +// Copyright (C) by the Spot authors, see the AUTHORS file for details. +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "common_ioap.hh" +#include "error.h" +#include + +// --ins and --outs, as supplied on the command-line +std::optional> all_output_aps; +std::optional> all_input_aps; + +// Store refirst, separate the filters that are regular expressions from +// the others. Compile the regular expressions while we are at it. +std::vector regex_in; +std::vector regex_out; +// map identifier to input/output (false=input, true=output) +std::unordered_map identifier_map; + +static std::string +str_tolower(std::string s) +{ + std::transform(s.begin(), s.end(), s.begin(), + [](unsigned char c){ return std::tolower(c); }); + return s; +} + +void +split_aps(const std::string& arg, std::vector& where) +{ + std::istringstream aps(arg); + std::string ap; + while (std::getline(aps, ap, ',')) + { + ap.erase(remove_if(ap.begin(), ap.end(), isspace), ap.end()); + where.push_back(str_tolower(ap)); + } +} + +void process_io_options() +{ + // Filter identifiers from regexes. + if (all_input_aps.has_value()) + for (const std::string& f: *all_input_aps) + { + unsigned sz = f.size(); + if (f[0] == '/' && f[sz - 1] == '/') + regex_in.push_back(std::regex(f.substr(1, sz - 2))); + else + identifier_map.emplace(f, false); + } + if (all_output_aps.has_value()) + for (const std::string& f: *all_output_aps) + { + unsigned sz = f.size(); + if (f[0] == '/' && f[sz - 1] == '/') + regex_out.push_back(std::regex(f.substr(1, sz - 2))); + else if (auto [it, is_new] = identifier_map.try_emplace(f, true); + !is_new && !it->second) + error(2, 0, "'%s' appears in both --ins and --outs", + f.c_str()); + } +} + +static std::unordered_set +list_aps_in_formula(spot::formula f) +{ + std::unordered_set aps; + f.traverse([&aps](spot::formula s) { + if (s.is(spot::op::ap)) + aps.emplace(s.ap_name()); + return false; + }); + return aps; +} + +// Takes a set of the atomic propositions appearing in the formula, +// and separate them into two vectors: input APs and output APs. +std::pair, std::vector> +filter_list_of_aps(spot::formula f, const char* filename, int linenum) +{ + std::unordered_set aps = list_aps_in_formula(f); + // now iterate over the list of atomic propositions to filter them + std::vector matched[2]; // 0 = input, 1 = output + for (const std::string& a: aps) + { + if (auto it = identifier_map.find(a); it != identifier_map.end()) + { + matched[it->second].push_back(a); + continue; + } + + bool found_in = false; + for (const std::regex& r: regex_in) + if (std::regex_search(a, r)) + { + found_in = true; + break; + } + bool found_out = false; + for (const std::regex& r: regex_out) + if (std::regex_search(a, r)) + { + found_out = true; + break; + } + if (all_input_aps.has_value() == all_output_aps.has_value()) + { + if (!all_input_aps.has_value()) + { + // If the atomic proposition hasn't been classified + // because neither --ins nor --out were specified, + // attempt to classify automatically using the first + // letter. + int fl = a[0]; + if (fl == 'i' || fl == 'I') + found_in = true; + else if (fl == 'o' || fl == 'O') + found_out = true; + } + if (found_in && found_out) + error_at_line(2, 0, filename, linenum, + "'%s' matches both --ins and --outs", + a.c_str()); + if (!found_in && !found_out) + { + if (all_input_aps.has_value() || all_output_aps.has_value()) + error_at_line(2, 0, filename, linenum, + "one of --ins or --outs should match '%s'", + a.c_str()); + else + error_at_line(2, 0, filename, linenum, + "since '%s' does not start with 'i' or 'o', " + "it is unclear if it is an input or " + "an output;\n use --ins or --outs", + a.c_str()); + } + } + else + { + // if we had only --ins or only --outs, anything not + // matching that was given is assumed to belong to the + // other one. + if (!all_input_aps.has_value() && !found_out) + found_in = true; + else if (!all_output_aps.has_value() && !found_in) + found_out = true; + } + matched[found_out].push_back(a); + } + return {matched[0], matched[1]}; +} diff --git a/bin/common_ioap.hh b/bin/common_ioap.hh new file mode 100644 index 000000000..960b26a8a --- /dev/null +++ b/bin/common_ioap.hh @@ -0,0 +1,51 @@ +// -*- coding: utf-8 -*- +// Copyright (C) by the Spot authors, see the AUTHORS file for details. +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include "common_sys.hh" +#include +#include +#include +#include +#include +#include + +// --ins and --outs, as supplied on the command-line +extern std::optional> all_output_aps; +extern std::optional> all_input_aps; + +// Comma-separated list of strings, such as those passed to --ins/--outs +void split_aps(const std::string& arg, std::vector& where); + +// process the all_output_aps and all_input_aps above to +// fill regex_in, regex_out, and identifier_map. +void process_io_options(); + +// Store refirst, separate the filters that are regular expressions from +// the others. Compile the regular expressions while we are at it. +extern std::vector regex_in; +extern std::vector regex_out; +// map identifier to input/output (false=input, true=output) +extern std::unordered_map identifier_map; + +// Separate the set of the atomic propositions appearing in f, into +// two vectors: input APs and output APs, becased on regex_in, +// regex_out, and identifier_map. +std::pair, std::vector> +filter_list_of_aps(spot::formula f, const char* filename, int linenum); diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 456d2cb41..9e7aee595 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -26,6 +26,7 @@ #include "common_setup.hh" #include "common_sys.hh" #include "common_trans.hh" +#include "common_ioap.hh" #include #include @@ -43,7 +44,6 @@ #include #include #include -#include enum { @@ -178,17 +178,6 @@ Exit status:\n\ 1 if at least one input problem was not realizable\n\ 2 if any error has been reported"; -// --ins and --outs, as supplied on the command-line -static std::optional> all_output_aps; -static std::optional> all_input_aps; - -// first, separate the filters that are regular expressions from -// the others. Compile the regular expressions while we are at it. -static std::vector regex_in; -static std::vector regex_out; -// map identifier to input/output (false=input, true=output) -static std::unordered_map identifier_map; - static const char* opt_csv = nullptr; static bool opt_print_pg = false; static bool opt_print_hoa = false; @@ -323,14 +312,6 @@ namespace return opt_print_pg || opt_print_hoa; } - auto str_tolower = [] (std::string s) - { - std::transform(s.begin(), s.end(), s.begin(), - [](unsigned char c){ return std::tolower(c); }); - return s; - }; - - static void dispatch_print_hoa(spot::twa_graph_ptr& game, const spot::realizability_simplifier* rs = nullptr) @@ -753,108 +734,6 @@ namespace return 0; } - static void - split_aps(const std::string& arg, std::vector& where) - { - std::istringstream aps(arg); - std::string ap; - while (std::getline(aps, ap, ',')) - { - ap.erase(remove_if(ap.begin(), ap.end(), isspace), ap.end()); - where.push_back(str_tolower(ap)); - } - } - - static std::unordered_set - list_aps_in_formula(spot::formula f) - { - std::unordered_set aps; - f.traverse([&aps](spot::formula s) { - if (s.is(spot::op::ap)) - aps.emplace(s.ap_name()); - return false; - }); - return aps; - } - - // Takes a set of the atomic propositions appearing in the formula, - // and separate them into two vectors: input APs and output APs. - static std::pair, std::vector> - filter_list_of_aps(const std::unordered_set& aps, - const char* filename, int linenum) - { - // now iterate over the list of atomic propositions to filter them - std::vector matched[2]; // 0 = input, 1 = output - for (const std::string& a: aps) - { - if (auto it = identifier_map.find(a); it != identifier_map.end()) - { - matched[it->second].push_back(a); - continue; - } - - bool found_in = false; - for (const std::regex& r: regex_in) - if (std::regex_search(a, r)) - { - found_in = true; - break; - } - bool found_out = false; - for (const std::regex& r: regex_out) - if (std::regex_search(a, r)) - { - found_out = true; - break; - } - if (all_input_aps.has_value() == all_output_aps.has_value()) - { - if (!all_input_aps.has_value()) - { - // If the atomic proposition hasn't been classified - // because neither --ins nor --out were specified, - // attempt to classify automatically using the first - // letter. - int fl = a[0]; - if (fl == 'i' || fl == 'I') - found_in = true; - else if (fl == 'o' || fl == 'O') - found_out = true; - } - if (found_in && found_out) - error_at_line(2, 0, filename, linenum, - "'%s' matches both --ins and --outs", - a.c_str()); - if (!found_in && !found_out) - { - if (all_input_aps.has_value() || all_output_aps.has_value()) - error_at_line(2, 0, filename, linenum, - "one of --ins or --outs should match '%s'", - a.c_str()); - else - error_at_line(2, 0, filename, linenum, - "since '%s' does not start with 'i' or 'o', " - "it is unclear if it is an input or " - "an output;\n use --ins or --outs", - a.c_str()); - } - } - else - { - // if we had only --ins or only --outs, anything not - // matching was was given is assumed to belong to the - // other one. - if (!all_input_aps.has_value() && !found_out) - found_in = true; - else if (!all_output_aps.has_value() && !found_in) - found_out = true; - } - matched[found_out].push_back(a); - } - return {matched[0], matched[1]}; - } - - class ltl_processor final : public job_processor { @@ -866,9 +745,8 @@ namespace int process_formula(spot::formula f, const char* filename, int linenum) override { - std::unordered_set aps = list_aps_in_formula(f); auto [input_aps, output_aps] = - filter_list_of_aps(aps, filename, linenum); + filter_list_of_aps(f, filename, linenum); int res = solve_formula(f, input_aps, output_aps); if (opt_csv) print_csv(f); @@ -1193,28 +1071,7 @@ main(int argc, char **argv) exit(err); check_no_formula(); - - // Filter identifiers from regexes. - if (all_input_aps.has_value()) - for (const std::string& f: *all_input_aps) - { - unsigned sz = f.size(); - if (f[0] == '/' && f[sz - 1] == '/') - regex_in.push_back(std::regex(f.substr(1, sz - 2))); - else - identifier_map.emplace(f, false); - } - if (all_output_aps.has_value()) - for (const std::string& f: *all_output_aps) - { - unsigned sz = f.size(); - if (f[0] == '/' && f[sz - 1] == '/') - regex_out.push_back(std::regex(f.substr(1, sz - 2))); - else if (auto [it, is_new] = identifier_map.try_emplace(f, true); - !is_new && !it->second) - error(2, 0, "'%s' appears in both --ins and --outs", - f.c_str()); - } + process_io_options(); ltl_processor processor; if (int res = processor.run(); res == 0 || res == 1) From 6fa42c90b8b4c9fde09245dfdb236c14709843f8 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 23 Aug 2024 17:22:17 +0200 Subject: [PATCH 473/606] ltlfilt: add support for --relabel=io, --ins, and --outs * bin/common_ioap.cc, bin/common_ioap.hh (relabel_io): New function. * bin/ltlfilt.cc: Implement the above options. * doc/org/ltlfilt.org, NEWS: Illustrate them. * tests/core/ltlfilt.test: Add some quick tests. --- NEWS | 10 +++++ bin/common_ioap.cc | 34 +++++++++++++++ bin/common_ioap.hh | 6 +++ bin/ltlfilt.cc | 38 +++++++++++++++- doc/org/ltlfilt.org | 41 +++++++++++++++++- tests/core/ltlfilt.test | 96 +++++++++++++++++++++++++++++++++++++++++ 6 files changed, 222 insertions(+), 3 deletions(-) diff --git a/NEWS b/NEWS index 178ba5327..cacacf5d2 100644 --- a/NEWS +++ b/NEWS @@ -5,6 +5,16 @@ New in spot 2.12.0.dev (not yet released) - ltlmix is a new tool that generate formulas by combining existing ones. See https://spot.lre.epita.fr/ltlmix.html for examples. + - ltlfilt learned a --relabel=io mode, that is useful to shorten + atomic propositions in the context of LTL synthesis. For instance + + % ltlfilt -f 'G(req->Fack)&G(go->Fgrant)' --relabel=io --ins=req,go + G(i1 -> Fo0) & G(i0 -> Fo1) + + The resulting formulas are now usable by ltlsynt without having to + specify which atomic propositions are input or output, as this can + be inferred from their name. + - autfilt learned --restrict-dead-end-edges, to restricts labels of edges leading to dead-ends. See the description of restrict_dead_end_edges_here() below. diff --git a/bin/common_ioap.cc b/bin/common_ioap.cc index d38a190aa..312334051 100644 --- a/bin/common_ioap.cc +++ b/bin/common_ioap.cc @@ -164,3 +164,37 @@ filter_list_of_aps(spot::formula f, const char* filename, int linenum) } return {matched[0], matched[1]}; } + + +spot::formula relabel_io(spot::formula f, spot::relabeling_map& fro, + const char* filename, int linenum) +{ + auto [ins, outs] = filter_list_of_aps(f, filename, linenum); + // Different implementation of unordered_set, usinged in + // filter_list_of_aps can cause aps to be output in different order. + // Let's sort everything for the sake of determinism. + std::sort(ins.begin(), ins.end()); + std::sort(outs.begin(), outs.end()); + spot::relabeling_map to; + unsigned ni = 0; + for (std::string& i: ins) + { + std::ostringstream s; + s << 'i' << ni++; + spot::formula a1 = spot::formula::ap(i); + spot::formula a2 = spot::formula::ap(s.str()); + fro[a2] = a1; + to[a1] = a2; + } + unsigned no = 0; + for (std::string& o: outs) + { + std::ostringstream s; + s << 'o' << no++; + spot::formula a1 = spot::formula::ap(o); + spot::formula a2 = spot::formula::ap(s.str()); + fro[a2] = a1; + to[a1] = a2; + } + return spot::relabel_apply(f, &to); +} diff --git a/bin/common_ioap.hh b/bin/common_ioap.hh index 960b26a8a..cc20c61a5 100644 --- a/bin/common_ioap.hh +++ b/bin/common_ioap.hh @@ -25,6 +25,7 @@ #include #include #include +#include // --ins and --outs, as supplied on the command-line extern std::optional> all_output_aps; @@ -49,3 +50,8 @@ extern std::unordered_map identifier_map; // regex_out, and identifier_map. std::pair, std::vector> filter_list_of_aps(spot::formula f, const char* filename, int linenum); + + +// Relabel APs incrementally, based on i/o class. +spot::formula relabel_io(spot::formula f, spot::relabeling_map& fro, + const char* filename, int linenum); diff --git a/bin/ltlfilt.cc b/bin/ltlfilt.cc index 2fd069dc2..8509b4c9f 100644 --- a/bin/ltlfilt.cc +++ b/bin/ltlfilt.cc @@ -31,6 +31,7 @@ #include "common_output.hh" #include "common_cout.hh" #include "common_conv.hh" +#include "common_ioap.hh" #include "common_r.hh" #include "common_range.hh" @@ -86,6 +87,7 @@ enum { OPT_IGNORE_ERRORS, OPT_IMPLIED_BY, OPT_IMPLY, + OPT_INS, OPT_LIVENESS, OPT_LTL, OPT_NEGATE, @@ -117,6 +119,7 @@ enum { OPT_SYNTACTIC_SAFETY, OPT_SYNTACTIC_SI, OPT_TO_DELTA2, + OPT_OUTS, OPT_UNABBREVIATE, OPT_UNIVERSAL, }; @@ -141,7 +144,7 @@ static const argp_option options[] = { "sonf-aps", OPT_SONF_APS, "FILENAME", OPTION_ARG_OPTIONAL, "when used with --sonf, output the newly introduced atomic " "propositions", 0 }, - { "relabel", OPT_RELABEL, "abc|pnn", OPTION_ARG_OPTIONAL, + { "relabel", OPT_RELABEL, "abc|pnn|io", OPTION_ARG_OPTIONAL, "relabel all atomic propositions, alphabetically unless " \ "specified otherwise", 0 }, { "relabel-bool", OPT_RELABEL_BOOL, "abc|pnn", OPTION_ARG_OPTIONAL, @@ -178,6 +181,12 @@ static const argp_option options[] = { "from-ltlf", OPT_FROM_LTLF, "alive", OPTION_ARG_OPTIONAL, "transform LTLf (finite LTL) to LTL by introducing some 'alive'" " proposition", 0 }, + { "ins", OPT_INS, "PROPS", 0, + "comma-separated list of input atomic propositions to use with " + "--relabel=io, interpreted as a regex if enclosed in slashes", 0 }, + { "outs", OPT_OUTS, "PROPS", 0, + "comma-separated list of output atomic propositions to use with " + "--relabel=io, interpreted as a regex if enclosed in slashes", 0 }, DECLARE_OPT_R, LEVEL_DOC(4), /**************************************************/ @@ -341,6 +350,7 @@ static range size = { -1, -1 }; static range bsize = { -1, -1 }; enum relabeling_mode { NoRelabeling = 0, ApRelabeling, + IOApRelabeling, BseRelabeling, OverlappingRelabeling }; static relabeling_mode relabeling = NoRelabeling; @@ -391,9 +401,12 @@ parse_relabeling_style(const char* arg, const char* optname) style = spot::Abc; else if (!strncasecmp(arg, "pnn", 4)) style = spot::Pnn; + else if (!*optname && !strncasecmp(arg, "io", 2)) + relabeling = IOApRelabeling; // style is actually not supported else error(2, 0, "invalid argument for --relabel%s: '%s'\n" - "expecting 'abc' or 'pnn'", optname, arg); + "expecting %s", optname, arg, + *optname ? "'abc' or 'pnn'" : "'abc', 'pnn', or 'io'"); } @@ -502,6 +515,12 @@ parse_opt(int key, char* arg, struct argp_state*) opt->imply = spot::formula::And({opt->imply, i}); break; } + case OPT_INS: + { + all_input_aps.emplace(std::vector{}); + split_aps(arg, *all_input_aps); + break; + } case OPT_LIVENESS: liveness = true; break; @@ -517,6 +536,12 @@ parse_opt(int key, char* arg, struct argp_state*) case OPT_NNF: nnf = true; break; + case OPT_OUTS: + { + all_output_aps.emplace(std::vector{}); + split_aps(arg, *all_output_aps); + break; + } case OPT_SONF: sonf = arg ? arg : "sonf_"; break; @@ -752,6 +777,12 @@ namespace f = spot::relabel(f, style, &relmap); break; } + case IOApRelabeling: + { + relmap.clear(); + f = relabel_io(f, relmap, filename, linenum); + break; + } case BseRelabeling: { relmap.clear(); @@ -948,6 +979,9 @@ main(int argc, char** argv) if (jobs.empty()) jobs.emplace_back("-", job_type::LTL_FILENAME); + if (relabeling == IOApRelabeling) + process_io_options(); + if (boolean_to_isop && simplification_level == 0) simplification_level = 1; spot::tl_simplifier_options tlopt(simplification_level); diff --git a/doc/org/ltlfilt.org b/doc/org/ltlfilt.org index 6609afb4e..a5ae9f3f0 100644 --- a/doc/org/ltlfilt.org +++ b/doc/org/ltlfilt.org @@ -75,9 +75,15 @@ ltlfilt --help | sed -n '/Transformation options.*:/,/^$/p' | sed '1d;$d' propositions. --from-ltlf[=alive] transform LTLf (finite LTL) to LTL by introducing some 'alive' proposition + --ins=PROPS comma-separated list of input atomic propositions + to use with --relabel=io, interpreted as a regex + if enclosed in slashes --negate negate each formula --nnf rewrite formulas in negative normal form - --relabel[=abc|pnn] relabel all atomic propositions, alphabetically + --outs=PROPS comma-separated list of output atomic propositions + to use with --relabel=io, interpreted as a regex + if enclosed in slashes + --relabel[=abc|pnn|io] relabel all atomic propositions, alphabetically unless specified otherwise --relabel-bool[=abc|pnn] relabel Boolean subexpressions that do not share atomic propositions, relabel alphabetically @@ -95,6 +101,7 @@ ltlfilt --help | sed -n '/Transformation options.*:/,/^$/p' | sed '1d;$d' --sonf[=PREFIX] rewrite formulas in suffix operator normal form --sonf-aps[=FILENAME] when used with --sonf, output the newly introduced atomic propositions + --to-delta2 rewrite LTL formula in Δ₂-form --unabbreviate[=STR] remove all occurrences of the operators specified by STR, which must be a substring of "eFGiMRW^", where 'e', 'i', and '^' stand respectively for @@ -294,6 +301,38 @@ ltldo ltl3ba -f '"proc@loc1" U "proc@loc2"' --spin This case also relabels the formula before calling =ltl3ba=, and it then renames all the atomic propositions in the output. + +A special relabeling mode related to LTL synthesis is =--relabel=io=. +In LTL synthesis (see [[file:ltlsynt.org][=ltlsynt=]]), atomic propositions are partitioned +in two sets: the /input/ propositions represent choices from the +environment, while /output/ proposition represent choices by the +controller to be synthesized. For instance +=G(req -> Fack) & G(go -> Fgrant)= +represents could be a specification where =req= and =go= are inputs, +while =ack= and =grant= are outputs. Tool such as =ltlsynt= need +to be told using options such as =--ins= or =--outs= which atomic +propositions are input or output. Often these atomic propositions +can have very long names, so it is useful to be able to rename +them without fogeting about their nature. Option =--relabel=io= +combined with one if =--ins= or =--outs= will do exactly that: + +#+BEGIN_SRC sh +ltlfilt -f 'G(req -> Fack) & G(go -> Fgrant)' --relabel=io --ins=req,go +#+END_SRC +#+RESULTS: +: G(i1 -> Fo1) & G(i0 -> Fo0) + +Like in [[file:ltlsynt.org][=ltlsynt=]], options =--ins= and =--outs= take a comma-separated +list of atomic propositions as argument. Additionally, if an atomic +proposition in this list is enclosed in slashes (as in +=--out=req,/^go/=), it is used as a regular expression for matching +atomic propositions. + +By the way, such an IO-renamed formula can be given to [[file:ltlsynt.org][=ltlsynt=]] without +having to specify =--ins= or =--outs=, because when these two options +are missing the convention is that anything starting with =i= is an +input, and anything starting with =o= is an output. + An example showing how to use the =--from-ltlf= option is on [[file:tut12.org][a separate page]]. diff --git a/tests/core/ltlfilt.test b/tests/core/ltlfilt.test index 426734851..2352c4707 100755 --- a/tests/core/ltlfilt.test +++ b/tests/core/ltlfilt.test @@ -531,6 +531,102 @@ EOF run 0 ltlfilt -s -u --relabel=pnn --define in >out diff exp out +cat >exp <(i0 || o0) && <>[](i0 || o0) +#define i0 (a) +#define i1 (b) +#define o0 (c) +i1 && []<>(i0 || o0) && <>[](i0 || o0) +#define o0 (c) +#define o1 (d) +#define o2 (e) +#define o3 (f) +#define o4 (h) +#define o5 (i) +o4 || o5 || [](o1 && o2) || <>[](!o0 || Xo3) +#define i0 (b) +#define o0 (c) +#define o1 (e) +#define o2 (f) +#define o3 (g) +i0 && o1 && (o2 || o3) && !Xo0 +#define i0 (a) +#define i1 (b) +#define o0 (c) +i1 && []<>(i0 || o0) && ![]<>!(i0 || o0) +#define i0 (a) +#define i1 (b) +#define o0 (c) +#define o1 (d) +<>(i0 <-> i1) -> !(o0 <-> o1) +#define i0 (a) +#define i1 (b) +#define o0 (c) +#define o1 (d) +#define o2 (e) +(i0 && i1 && o0) U (o0 && o1 && o2) +#define i0 (a) +#define i1 (b) +#define o0 (c) +(i0 && i1 && o0) U !(i0 && i1 && o0) +#define i0 (a) +#define i1 (b) +#define o0 (c) +#define o1 (d) +#define o2 (e) +(i0 && i1 && o0) U (!o0 && o1 && o2) +#define i0 (a) +#define i1 (b) +#define o0 (c) +#define o1 (d) +#define o2 (e) +#define o3 (f) +(o0 && o1 && (i0 || i1)) U (!o1 && o2 && o3) +#define i0 (a) +#define i1 (b) +#define o0 (d) +#define o1 (e) +#define o2 (f) +(o0 && (i0 || i1)) U (!o0 && o1 && o2) +#define i0 (a) +#define i1 (b) +#define o0 (c) +(i0 && !i0) || (i1 && !i1) || (o0 && !o0) +#define i0 (a) +#define i1 (b) +#define o0 (c) +#define o1 (d) +((i0 && !i0) || (i1 && !i1) || (o0 && !o0)) U o1 +#define i0 (a) +#define i1 (b) +#define o0 (c) +#define o1 (d) +#define o2 (e) +((i0 && !i0) || (i1 && !i1) || (o0 && o2)) U o1 +#define i0 (a) +#define i1 (b) +#define o0 (c) +((i0 && !i1) || (!i0 && i1)) U o0 +#define i0 (a) +#define i1 (b) +#define o0 (c) +((i0 && !i1) || (i0 -> i1)) U o0 +EOF + +run 0 ltlfilt -s -u --relabel=io --ins=a,b --define in >out +diff exp out +run 0 ltlfilt -s -u --relabel=io --ins='/[ab]/' --define in >out +diff exp out +run 0 ltlfilt -s -u --relabel=io --outs='/[^ab]/' --define in >out +diff exp out + cat >exp < Date: Mon, 26 Aug 2024 11:42:09 +0200 Subject: [PATCH 474/606] ltlmix: add support for the I/O variants * bin/ltlmix.cc: Add options --ins, --outs, as well as the two-argument form of -A/-P. * bin/common_ioap.hh, bin/common_ioap.cc (is_output): New function. * spot/tl/apcollect.cc, spot/tl/apcollect.hh (create_atomic_prop_set): Allow the prefix string to be changed. * spot/tl/randomltl.cc, spot/tl/randomltl.hh: Add support for an I/O version with two set of atomic proposition, and a predicate to decide if the original proposition was input or output. * tests/core/ltlmix.test: More tests. --- bin/common_ioap.cc | 132 +++++++++++++++++++++-------------------- bin/common_ioap.hh | 9 +++ bin/ltlmix.cc | 88 ++++++++++++++++++++++++--- spot/tl/apcollect.cc | 4 +- spot/tl/apcollect.hh | 6 +- spot/tl/randomltl.cc | 88 ++++++++++++++++++++------- spot/tl/randomltl.hh | 35 +++++++++-- tests/core/ltlmix.test | 27 +++++++++ 8 files changed, 286 insertions(+), 103 deletions(-) diff --git a/bin/common_ioap.cc b/bin/common_ioap.cc index 312334051..65e05c7ca 100644 --- a/bin/common_ioap.cc +++ b/bin/common_ioap.cc @@ -88,6 +88,72 @@ list_aps_in_formula(spot::formula f) return aps; } + +bool +is_output(const std::string& a, const char* filename, int linenum) +{ + if (auto it = identifier_map.find(a); it != identifier_map.end()) + return it->second; + + bool found_in = false; + for (const std::regex& r: regex_in) + if (std::regex_search(a, r)) + { + found_in = true; + break; + } + bool found_out = false; + for (const std::regex& r: regex_out) + if (std::regex_search(a, r)) + { + found_out = true; + break; + } + if (all_input_aps.has_value() == all_output_aps.has_value()) + { + if (!all_input_aps.has_value()) + { + // If the atomic proposition hasn't been classified + // because neither --ins nor --out were specified, + // attempt to classify automatically using the first + // letter. + int fl = a[0]; + if (fl == 'i' || fl == 'I') + found_in = true; + else if (fl == 'o' || fl == 'O') + found_out = true; + } + if (found_in && found_out) + error_at_line(2, 0, filename, linenum, + "'%s' matches both --ins and --outs", + a.c_str()); + if (!found_in && !found_out) + { + if (all_input_aps.has_value() || all_output_aps.has_value()) + error_at_line(2, 0, filename, linenum, + "one of --ins or --outs should match '%s'", + a.c_str()); + else + error_at_line(2, 0, filename, linenum, + "since '%s' does not start with 'i' or 'o', " + "it is unclear if it is an input or " + "an output;\n use --ins or --outs", + a.c_str()); + } + } + else + { + // if we had only --ins or only --outs, anything not + // matching that was given is assumed to belong to the + // other one. + if (!all_input_aps.has_value() && !found_out) + found_in = true; + else if (!all_output_aps.has_value() && !found_in) + found_out = true; + } + return found_out; +} + // Takes a set of the atomic propositions appearing in the formula, // and separate them into two vectors: input APs and output APs. std::pair, std::vector> @@ -97,71 +163,7 @@ filter_list_of_aps(spot::formula f, const char* filename, int linenum) // now iterate over the list of atomic propositions to filter them std::vector matched[2]; // 0 = input, 1 = output for (const std::string& a: aps) - { - if (auto it = identifier_map.find(a); it != identifier_map.end()) - { - matched[it->second].push_back(a); - continue; - } - - bool found_in = false; - for (const std::regex& r: regex_in) - if (std::regex_search(a, r)) - { - found_in = true; - break; - } - bool found_out = false; - for (const std::regex& r: regex_out) - if (std::regex_search(a, r)) - { - found_out = true; - break; - } - if (all_input_aps.has_value() == all_output_aps.has_value()) - { - if (!all_input_aps.has_value()) - { - // If the atomic proposition hasn't been classified - // because neither --ins nor --out were specified, - // attempt to classify automatically using the first - // letter. - int fl = a[0]; - if (fl == 'i' || fl == 'I') - found_in = true; - else if (fl == 'o' || fl == 'O') - found_out = true; - } - if (found_in && found_out) - error_at_line(2, 0, filename, linenum, - "'%s' matches both --ins and --outs", - a.c_str()); - if (!found_in && !found_out) - { - if (all_input_aps.has_value() || all_output_aps.has_value()) - error_at_line(2, 0, filename, linenum, - "one of --ins or --outs should match '%s'", - a.c_str()); - else - error_at_line(2, 0, filename, linenum, - "since '%s' does not start with 'i' or 'o', " - "it is unclear if it is an input or " - "an output;\n use --ins or --outs", - a.c_str()); - } - } - else - { - // if we had only --ins or only --outs, anything not - // matching that was given is assumed to belong to the - // other one. - if (!all_input_aps.has_value() && !found_out) - found_in = true; - else if (!all_output_aps.has_value() && !found_in) - found_out = true; - } - matched[found_out].push_back(a); - } + matched[is_output(a, filename, linenum)].push_back(a); return {matched[0], matched[1]}; } diff --git a/bin/common_ioap.hh b/bin/common_ioap.hh index cc20c61a5..575e749bf 100644 --- a/bin/common_ioap.hh +++ b/bin/common_ioap.hh @@ -45,6 +45,15 @@ extern std::vector regex_out; // map identifier to input/output (false=input, true=output) extern std::unordered_map identifier_map; + +// Given an atomic proposition AP and the above +// regex_in/regex_out/identifier_map, decide if this AP is an output +// (true) or input (false. +bool +is_output(const std::string& ap, + const char* filename = nullptr, int linenum = 0); + + // Separate the set of the atomic propositions appearing in f, into // two vectors: input APs and output APs, becased on regex_in, // regex_out, and identifier_map. diff --git a/bin/ltlmix.cc b/bin/ltlmix.cc index 1e36eeeb2..a97c807d0 100644 --- a/bin/ltlmix.cc +++ b/bin/ltlmix.cc @@ -24,6 +24,7 @@ #include "common_setup.hh" #include "common_finput.hh" #include "common_output.hh" +#include "common_ioap.hh" #include "common_conv.hh" #include "common_cout.hh" #include "common_range.hh" @@ -36,7 +37,9 @@ enum { OPT_BOOLEAN_PRIORITIES = 256, OPT_DUMP_PRIORITIES, OPT_DUPS, + OPT_INS, OPT_LTL_PRIORITIES, + OPT_OUTS, OPT_SEED, OPT_TREE_SIZE, }; @@ -70,18 +73,21 @@ static const argp_option options[] = { { nullptr, 0, nullptr, 0, "Generation parameters:", 2 }, { "allow-dups", OPT_DUPS, nullptr, 0, "allow duplicate formulas to be output", 0 }, - { "ap-count", 'A', "N", 0, + { "ap-count", 'A', "N[,M]", 0, "rename the atomic propositions in each selected formula by drawing " "randomly from N atomic propositions (the rewriting is bijective " - "if N is larger than the original set)", 0 }, + "if N is larger than the original set). If M is specified, two sets " + "of atomic propositions are used to represent inputs and outputs, and " + "options --ins/--outs can be used to classify the original propositions.", + 0 }, { "boolean", 'B', nullptr, 0, "generate Boolean combinations of formulas (default)", 0 }, { "formulas", 'n', "INT", 0, "number of formulas to generate (default: 1);\n" "use a negative value for unbounded generation", 0 }, { "ltl", 'L', nullptr, 0, "generate LTL combinations of subformulas", 0 }, - { "polarized-ap", 'P', "N", 0, - "similar to -A N, but randomize the polarity of the new atomic " + { "polarized-ap", 'P', "N[,M]", 0, + "similar to -A, but randomize the polarity of the new atomic " "propositions", 0 }, { "random-conjuncts", 'C', "N", 0, "generate random-conjunctions of N conjuncts; " @@ -92,6 +98,12 @@ static const argp_option options[] = { { "tree-size", OPT_TREE_SIZE, "RANGE", 0, "tree size of main pattern generated (default: 5);\n" "input formulas count as size 1.", 0 }, + { "ins", OPT_INS, "PROPS", 0, + "comma-separated list of atomic propositions to consider as input, " + "interpreted as a regex if enclosed in slashes", 0 }, + { "outs", OPT_OUTS, "PROPS", 0, + "comma-separated list of atomic propositions to consider as putput, " + "interpreted as a regex if enclosed in slashes", 0 }, RANGE_DOC, /**************************************************/ { nullptr, 0, nullptr, 0, "Adjusting probabilities:", 4 }, @@ -140,7 +152,9 @@ static int opt_seed = 0; static range opt_tree_size = { 5, 5 }; static bool opt_unique = true; static int opt_ap_count = 0; +static int opt_out_ap_count = 0; static bool opt_literal = false; +static bool opt_io = false; namespace { @@ -160,8 +174,12 @@ namespace process_formula(spot::formula f, const char* filename = nullptr, int linenum = 0) override { - (void) filename; - (void) linenum; + if (opt_io) + // Filter the atomic propositions of each formula in order to + // report those that are not classifiable. Throw the result + // of that filtering away, as we only care about the potential + // diagnostics. + (void) filter_list_of_aps(f, filename, linenum); opt->sub.insert(f); return 0; } @@ -170,6 +188,24 @@ namespace static sub_processor subreader; +std::pair to_int_pair(const char* arg, const char* opt) +{ + const char* comma = strchr(arg, ','); + if (!comma) + { + int res = to_int(arg, opt); + return {res, 0}; + } + else + { + std::string arg1(arg, comma); + int res1 = to_int(arg1.c_str(), opt); + int res2 = to_int(comma + 1, opt); + return {res1, res2}; + } +} + + static int parse_opt(int key, char* arg, struct argp_state*) { @@ -178,7 +214,8 @@ parse_opt(int key, char* arg, struct argp_state*) switch (key) { case 'A': - opt_ap_count = to_int(arg, "-A/--ap-count"); + std::tie(opt_ap_count, opt_out_ap_count) = + to_int_pair(arg, "-A/--ap-count"); opt_literal = false; break; case 'B': @@ -199,7 +236,8 @@ parse_opt(int key, char* arg, struct argp_state*) opt_formulas = to_int(arg, "-n/--formulas"); break; case 'P': - opt_ap_count = to_int(arg, "-P/--polarized-ap"); + std::tie(opt_ap_count, opt_out_ap_count) = + to_int_pair(arg, "-P/--polarized-ap"); opt_literal = true; break; case OPT_BOOLEAN_PRIORITIES: @@ -214,6 +252,20 @@ parse_opt(int key, char* arg, struct argp_state*) case OPT_DUPS: opt_unique = false; break; + case OPT_INS: + { + all_input_aps.emplace(std::vector{}); + split_aps(arg, *all_input_aps); + opt_io = true; + break; + } + case OPT_OUTS: + { + all_output_aps.emplace(std::vector{}); + split_aps(arg, *all_output_aps); + opt_io = true; + break; + } case OPT_SEED: opt_seed = to_int(arg, "--seed"); break; @@ -249,6 +301,19 @@ main(int argc, char* argv[]) check_no_formula("combine"); + if (opt_io && !opt_out_ap_count) + error(2, 0, + "options --ins and --outs only make sense when the " + "two-argument version of '-A N,M' or '-P N,M' is used."); + if (opt_out_ap_count > 0) + // Do not require --ins/--outs to be used, as the input + // pattern may use atomic propositions starting with i/o + // already. Setting opt_io will cause the subreader to + // complain about unclassifible atomic propositions. + opt_io = true; + if (opt_io) + process_io_options(); + if (subreader.run()) return 2; @@ -257,11 +322,16 @@ main(int argc, char* argv[]) spot::srand(opt_seed); + std::function output_p = nullptr; + if (opt_out_ap_count) + output_p = [&](spot::formula f) { return is_output(f.ap_name()); }; + spot::randltlgenerator rg (opt_ap_count, [&] (){ spot::option_map opts; opts.set("output", output); + opts.set("out_ap_size", opt_out_ap_count); opts.set("tree_size_min", opt_tree_size.min); opts.set("tree_size_max", opt_tree_size.max); opts.set("seed", opt_seed); @@ -269,7 +339,7 @@ main(int argc, char* argv[]) opts.set("unique", opt_unique); opts.set("literals", opt_literal); return opts; - }(), opt_pL, nullptr, opt_pB, &opt->sub); + }(), opt_pL, nullptr, opt_pB, &opt->sub, output_p); if (opt_dump_priorities) { diff --git a/spot/tl/apcollect.cc b/spot/tl/apcollect.cc index 6cea88ea6..ca3004bd2 100644 --- a/spot/tl/apcollect.cc +++ b/spot/tl/apcollect.cc @@ -28,13 +28,13 @@ namespace spot { - atomic_prop_set create_atomic_prop_set(unsigned n) + atomic_prop_set create_atomic_prop_set(unsigned n, const char* prefix) { atomic_prop_set res; for (unsigned i = 0; i < n; ++i) { std::ostringstream p; - p << 'p' << i; + p << prefix << i; res.insert(formula::ap(p.str())); } return res; diff --git a/spot/tl/apcollect.hh b/spot/tl/apcollect.hh index fec68287c..0157f9c7c 100644 --- a/spot/tl/apcollect.hh +++ b/spot/tl/apcollect.hh @@ -32,9 +32,11 @@ namespace spot /// Set of atomic propositions. typedef std::set atomic_prop_set; - /// \brief construct an atomic_prop_set with n propositions + /// \brief construct an atomic_prop_set with n propositions starting + /// with \a prefix SPOT_API - atomic_prop_set create_atomic_prop_set(unsigned n); + atomic_prop_set create_atomic_prop_set(unsigned n, + const char* prefix = "p"); /// \brief Return the set of atomic propositions occurring in a formula. /// diff --git a/spot/tl/randomltl.cc b/spot/tl/randomltl.cc index 913b60522..f36d4c444 100644 --- a/spot/tl/randomltl.cc +++ b/spot/tl/randomltl.cc @@ -32,11 +32,30 @@ namespace spot // randomly from \a ap. Avoid repetition if \a ap is large // enough. If \a lit is true, change the polarity of the atomic // proposition randomly. + // + // If \a out_ap is non-empty, use \a is_output to decide if an original + // atomic proposition should be replaced by an AP from ap or out_ap. static formula - randomize_ap(formula f, const atomic_prop_set* ap, bool lit) + randomize_ap(formula f, const atomic_prop_set* ap, + const atomic_prop_set* out_ap, + std::function is_output, + bool lit) { std::vector randap(ap->begin(), ap->end()); + std::vector randap_out; + if (out_ap && is_output != nullptr) + { + randap_out.reserve(out_ap->size()); + randap_out.insert(randap_out.begin(), out_ap->begin(), out_ap->end()); + } + if (randap_out.empty()) + { + is_output = nullptr; + out_ap = nullptr; + } + unsigned current_range = randap.size(); + unsigned current_out_range = randap_out.size(); std::map mapping; auto relabel = [&](formula f, auto self) -> formula @@ -47,15 +66,29 @@ namespace spot if (auto it = mapping.find(f); it != mapping.end()) return it->second; - // If we exhausted all possible AP, start again - if (current_range == 0) - current_range = randap.size(); - - // - unsigned pos = mrand(current_range--); - formula ap = randap[pos]; - std::swap(randap[current_range], randap[pos]); + bool is_out = false; + if (out_ap && is_output != nullptr) + is_out = is_output(f); + formula ap; + if (!is_out) + { + // If we exhausted all possible AP, start again + if (current_range == 0) + current_range = randap.size(); + unsigned pos = mrand(current_range--); + ap = randap[pos]; + std::swap(randap[current_range], randap[pos]); + } + else + { + // If we exhausted all possible AP, start again + if (current_out_range == 0) + current_out_range = randap_out.size(); + unsigned pos = mrand(current_out_range--); + ap = randap_out[pos]; + std::swap(randap_out[current_out_range], randap_out[pos]); + } if (lit && drand() < 0.5) ap = formula::Not(ap); @@ -86,8 +119,11 @@ namespace spot std::advance(i, mrand(rl->patterns()->size())); formula f = *i; const atomic_prop_set* ap = rl->ap(); + const atomic_prop_set* out_ap = rl->output_ap(); + auto is_output = rl->is_output_fun(); if (ap && ap->size() > 0) - f = randomize_ap(f, ap, rl->draw_literals()); + f = randomize_ap(f, ap, out_ap, is_output, + rl->draw_literals()); return f; } @@ -407,8 +443,10 @@ namespace spot // Boolean formulae random_boolean::random_boolean(const atomic_prop_set* ap, + const atomic_prop_set* output_ap, + std::function is_output, const atomic_prop_set* patterns) - : random_formula(9, ap) + : random_formula(9, ap, output_ap, is_output) { if (patterns) { @@ -473,15 +511,19 @@ namespace spot } random_ltl::random_ltl(const atomic_prop_set* ap, + const atomic_prop_set* output_ap, + std::function is_output, const atomic_prop_set* patterns) - : random_formula(16, ap) + : random_formula(16, ap, output_ap, is_output) { setup_proba_(patterns); update_sums(); } - random_ltl::random_ltl(int size, const atomic_prop_set* ap) - : random_formula(size, ap) + random_ltl::random_ltl(int size, const atomic_prop_set* ap, + const atomic_prop_set* output_ap, + std::function is_output) + : random_formula(size, ap, output_ap, is_output) { setup_proba_(nullptr); // No call to update_sums(), this functions is always @@ -507,7 +549,8 @@ namespace spot char* opt_pL, char* opt_pS, char* opt_pB, - const atomic_prop_set* subs) + const atomic_prop_set* subs, + std::function is_output) : opt_simpl_level_(opts.get("simplification_level", 3)), simpl_(tl_simplifier_options{opt_simpl_level_}) { @@ -518,6 +561,9 @@ namespace spot opt_tree_size_max_ = opts.get("tree_size_max", 15); opt_unique_ = opts.get("unique", 1); opt_wf_ = opts.get("wf", 0); + unsigned opt_output = opts.get("out_ap_size"); + if (opt_output > 0) + aprops_out_ = create_atomic_prop_set(opt_output, "o"); bool lit = opts.get("literals", 0); const char* tok_pL = nullptr; @@ -527,7 +573,7 @@ namespace spot switch (output_) { case randltlgenerator::LTL: - rf_ = new random_ltl(&aprops_, subs); + rf_ = new random_ltl(&aprops_, &aprops_out_, is_output, subs); rf_->draw_literals(lit); if (opt_pS) throw std::invalid_argument("Cannot set SERE priorities with " @@ -538,7 +584,7 @@ namespace spot tok_pL = rf_->parse_options(opt_pL); break; case randltlgenerator::Bool: - rf_ = new random_boolean(&aprops_, subs); + rf_ = new random_boolean(&aprops_, &aprops_out_, is_output, subs); rf_->draw_literals(lit); tok_pB = rf_->parse_options(opt_pB); if (opt_pL) @@ -583,9 +629,11 @@ namespace spot char* opt_pL, char* opt_pS, char* opt_pB, - const atomic_prop_set* subs) - : randltlgenerator(create_atomic_prop_set(aprops_n), opts, - opt_pL, opt_pS, opt_pB, subs) + const atomic_prop_set* subs, + std::function is_output) + : randltlgenerator(create_atomic_prop_set(aprops_n, + is_output == nullptr ? "p" : "i"), + opts, opt_pL, opt_pS, opt_pB, subs, is_output) { } diff --git a/spot/tl/randomltl.hh b/spot/tl/randomltl.hh index 8c2e7e0cd..2968ab2ef 100644 --- a/spot/tl/randomltl.hh +++ b/spot/tl/randomltl.hh @@ -34,8 +34,11 @@ namespace spot { public: random_formula(unsigned proba_size, - const atomic_prop_set* ap): - proba_size_(proba_size), proba_(new op_proba[proba_size_]), ap_(ap) + const atomic_prop_set* ap, + const atomic_prop_set* output_ap = nullptr, + std::function is_output = nullptr): + proba_size_(proba_size), proba_(new op_proba[proba_size_]), ap_(ap), + output_ap_(output_ap), is_output_(is_output) { } @@ -50,6 +53,17 @@ namespace spot return ap_; } + /// Return the set of atomic proposition used to build formulas. + const atomic_prop_set* output_ap() const + { + return output_ap_; + } + + std::function is_output_fun() const + { + return is_output_; + } + /// Return the set of patterns (sub-formulas) used to build formulas. const atomic_prop_set* patterns() const { @@ -115,7 +129,9 @@ namespace spot op_proba* proba_2_or_more_; double total_2_and_more_; const atomic_prop_set* ap_; + const atomic_prop_set* output_ap_ = nullptr; const atomic_prop_set* patterns_ = nullptr; + std::function is_output_ = nullptr; bool draw_literals_; }; @@ -174,11 +190,15 @@ namespace spot /// some from \a ap. The probability of false/true to be generated /// default to 0 in this case. random_ltl(const atomic_prop_set* ap, + const atomic_prop_set* output_ap = nullptr, + std::function is_output = nullptr, const atomic_prop_set* subformulas = nullptr); protected: void setup_proba_(const atomic_prop_set* patterns); - random_ltl(int size, const atomic_prop_set* ap); + random_ltl(int size, const atomic_prop_set* ap, + const atomic_prop_set* output_ap = nullptr, + std::function is_output = nullptr); }; /// \ingroup tl_io @@ -225,6 +245,8 @@ namespace spot /// atoms. Atomic propositions in patterns will be rewritten /// randomly by drawing some from \a ap. random_boolean(const atomic_prop_set* ap, + const atomic_prop_set* output_ap = nullptr, + std::function is_output = nullptr, const atomic_prop_set* subformulas = nullptr); }; @@ -342,13 +364,15 @@ namespace spot char* opt_pL = nullptr, char* opt_pS = nullptr, char* opt_pB = nullptr, - const atomic_prop_set* subformulas = nullptr); + const atomic_prop_set* subformulas = nullptr, + std::function is_output = nullptr); randltlgenerator(atomic_prop_set aprops, const option_map& opts, char* opt_pL = nullptr, char* opt_pS = nullptr, char* opt_pB = nullptr, - const atomic_prop_set* subformulas = nullptr); + const atomic_prop_set* subformulas = nullptr, + std::function is_output = nullptr); ~randltlgenerator(); @@ -366,6 +390,7 @@ namespace spot private: fset_t unique_set_; atomic_prop_set aprops_; + atomic_prop_set aprops_out_; int opt_seed_; int opt_tree_size_min_; diff --git a/tests/core/ltlmix.test b/tests/core/ltlmix.test index 232c1f76c..9e65b0021 100755 --- a/tests/core/ltlmix.test +++ b/tests/core/ltlmix.test @@ -94,3 +94,30 @@ test 10 -eq `grep '&.*&' < out | wc -l` ltlmix -fa -A500 -C3 -n10 | tee out2 diff out out2 + + +ltlmix -fGi -fFo -A3,3 -C4 -n10 | tee out +cat >expected < out +diff out expected + +ltlmix -fGa -fFz -A3,3 --outs='/[m-o]/,z' -C4 -n10 > out +diff out expected + + +ltlmix -fGa -fFz -A3,3 -C4 -n10 2> err && exit 1 +cat err +grep 'ins.*outs' err From c6f4b186556164f5e062e50cad5092adff884d9a Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 26 Aug 2024 15:27:23 +0200 Subject: [PATCH 475/606] genltl: add --lily-patterns * spot/gen/formulas.cc, spot/gen/formulas.hh, bin/genltl.cc: Implement support for --lily-pattern. * doc/spot.bib, bin/man/genltl.x: Add references. * NEWS: Mention it. * tests/core/ltlsynt.test: Use these formulas. * tests/core/genltl.test: Adjust. --- NEWS | 7 +++++ bin/genltl.cc | 5 +++ bin/man/genltl.x | 10 ++++-- doc/spot.bib | 11 +++++++ spot/gen/formulas.cc | 69 +++++++++++++++++++++++++++++++++++++---- spot/gen/formulas.hh | 3 ++ tests/core/genltl.test | 6 ++++ tests/core/ltlsynt.test | 29 +++++++++++++++++ 8 files changed, 131 insertions(+), 9 deletions(-) diff --git a/NEWS b/NEWS index cacacf5d2..0436d7cf8 100644 --- a/NEWS +++ b/NEWS @@ -15,6 +15,13 @@ New in spot 2.12.0.dev (not yet released) specify which atomic propositions are input or output, as this can be inferred from their name. + - genltl learned --lily-patterns to generate the example LTL + synthesis specifications from Lily 1.0.2. Those come with input + and output atomic proposition rewriten in the form "iNN" or "oNN", + so they can be fed to ltlsynt directly, as in + + % genltl --lily-patterns | ltlsynt -q + - autfilt learned --restrict-dead-end-edges, to restricts labels of edges leading to dead-ends. See the description of restrict_dead_end_edges_here() below. diff --git a/bin/genltl.cc b/bin/genltl.cc index 3a3cb169e..5a3ab3539 100644 --- a/bin/genltl.cc +++ b/bin/genltl.cc @@ -115,6 +115,11 @@ static const argp_option options[] = { "kv-psi", gen::LTL_KV_PSI, "RANGE", 0, "quadratic formula with doubly exponential DBA", 0 }, OPT_ALIAS(kr-n2), + { "lily-patterns", gen::LTL_LILY_PATTERNS, "RANGE", OPTION_ARG_OPTIONAL, + "LTL synthesis specification examples from Lily 1.0.2 " + "[Jobstmann & Bloem, FMCAD'06] " + "(range should be included in 1..23)", 0 }, + OPT_ALIAS(jb-patterns), { "ms-example", gen::LTL_MS_EXAMPLE, "RANGE[,RANGE]", 0, "GF(a1&X(a2&X(a3&...Xan)))&F(b1&F(b2&F(b3&...&Xbm)))", 0 }, { "ms-phi-h", gen::LTL_MS_PHI_H, "RANGE", 0, diff --git a/bin/man/genltl.x b/bin/man/genltl.x index db40c653b..35c2c1f66 100644 --- a/bin/man/genltl.x +++ b/bin/man/genltl.x @@ -29,13 +29,17 @@ gh J. Geldenhuys and H. Hansen: Larger automata and less work for LTL model checking. Proceedings of Spin'06. LNCS 3925. .TP +go +P. Gastin and D. Oddoux: Fast LTL to Büchi Automata Translation. +Proceedings of CAV'01. LNCS 2102. +.TP hkrss J. Holeček, T. Kratochvila, V. Řehák, D. Šafránek, and P. Šimeček: Verification Results in Liberouter Project. Tech. Report 03, CESNET, 2004. .TP -go -P. Gastin and D. Oddoux: Fast LTL to Büchi Automata Translation. -Proceedings of CAV'01. LNCS 2102. +jb, lily +B. Jobstmann, and R. Bloem: +Optimizations for LTL Synthesis. Proceedings of FMCAD'06. IEEE. .TP kr O. Kupferman and A. Rosenberg: The Blow-Up in Translating LTL to Deterministic diff --git a/doc/spot.bib b/doc/spot.bib index a2d5c1e9d..bc2b39a1f 100644 --- a/doc/spot.bib +++ b/doc/spot.bib @@ -639,6 +639,17 @@ doi = {10.4204/EPTCS.229.10} } +@InProceedings{ jobsmann.06.fmcad, + author = {Barbara Jobstmann and Roderick Bloem}, + title = {Optimizations for {LTL} Systhesis}, + booktitle = {Proceedings of the 6th International Conference on Formal + Methods in Computer-Aided Design (FMCAD'06)}, + year = {2006}, + month = nov, + publisher = {IEEE}, + doi = {10.1109/FMCAD.2006.22} +} + @InCollection{ klein.07.ciaa, year = {2007}, booktitle = {Proceedings of the 12th International Conference on the diff --git a/spot/gen/formulas.cc b/spot/gen/formulas.cc index d888f4053..ad7f2e8d4 100644 --- a/spot/gen/formulas.cc +++ b/spot/gen/formulas.cc @@ -824,6 +824,56 @@ namespace spot return spot::relabel(parse_formula(formulas[n - 1]), Pnn); } + static formula + lily_pattern(int n) + { + static const char* formulas[] = { + "G(i2->(X(o0&X(o0&Xo0))&(o0->X!o0)&(i0->X(!o0 U i1))))", + "G(i2->(X(o0|X(o0|Xo0))&(o0->X!o0)&(i0->X(!o0 U i1))))", + "G(i0->Xi1)->G(i2->(X(o0|X(o0|Xo0))&(o0->X!o0)&(i0->X(!o0 U i1))))", + "G(i0->X(i1|Xi1))->" + "G(i2->(X(o0|X(o0|Xo0))&(o0->X!o0)&(i0->X(!o0 U i1))))", + "G(i0->X(i1|Xi1))->" + "G(i2->(X(i0|o0|X(i0|o0|X(i0|o0)))&(o0->X!o0)&(i0->X(!o0 U i1))))", + "G(i0->X(i1|X(i1|Xi1)))->" + "G(i2->(X(i0|o0|X(i0|o0|X(i0|o0)))&(o0->X!o0)&(i0->X(!o0 U i1))))", + "G(i0->X(i1|Xi1))->G(i0->(X(!o0 U i1)" + "&(o0->X!o0)&(i2->(i0|o0|X(i0|o0|X(i0|o0|X(i0|o0)))))))", + "GFi0->GFo0", + "GFi0->(!o0&G(!o0->((!o0 U i0)&(i0->Fo0)))&GFo0)", + "(GFi1|Fi0)->(GFo1|Go0)", + "!(G(i1->Fo0)&G(i0->Fo1))", + "G!o1|G(i1->Fo0)|G(i0->Fo1)", + "Gi0->(Fo0&(G!i0->F!o0))", + "G!(o0&o1)&(GFi0->GFo0)&(GFi1->GFo1)", + // lily=15 This matches the original formula from Lily, not + // the unrealizable version from Syfco. See + // https://github.com/reactive-systems/syfco/issues/55 + "G(i0->(!(o0&o1)&Fo0&(i1->Fo1)))&((!o0 U i0)|G!o0)&((!o1 U i1)|G!o1)", + // lily=16 Same comment as above. + "G(i0->(!(o0&o1)&!(o0&o2)&!(o1&o2)&Fo0&(i1->Fo1)&(i2->Fo2)))&" + "((!o0 U i0)|G!o0)&((!o1 U i1)|G!o1)&((!o2 U i2)|G!o2)", + "G(!(o0&o1)&!(o1&o2)&!(o0&o2))&(GFi0->GFo0)&(GFi1->GFo1)&GFo2", + "G(!(o0&o1)&!(o0&o2)&!(o0&o3)&!(o1&o2)&!(o1&o3)&!(o2&o3))&" + "(GFi0->GFo0)&(GFi1->GFo1)&(GFi2->GFo2)&GFo3", + "GFi1->G(o1->(!(o0&o1)&(o1 U i1)&(o0->(o0 U i1))&(i0->Fo0)&Fo1))", + "(!i1&G((!i1&!o2)->X!i1)&G(i1->F!i1)&G(o2->Xi1))->" + "G((o1&X!o1)->(o2&(o0|o1)&((o0&X!o0)->o2)&((!o0&(!i0|!i1))->" + "Xo0)&((!o1&(i0|!i1))->Xo1)&(i0->F!o0)&F!o1))", + "(G(!i0|!i1)&G(!i0|!i2)&G(!i0|!i3)&G(!i1|!i2)&G(!i1|!i3)&" + "G(!i2|!i3))->G((!o0|!o1)&(!o0|!o2)&(!o0|!o3)&(!o1|!o2)&(!o1|!o3)&" + "(!o2|!o3)&G(i0->(Xo0|XXo0|XXXo0))&G(i1->(Xo1|XXo1|XXXo1))&" + "G(i2->(Xo2|XXo2|XXXo2))&G(i3->(Xo3|XXo3|XXXo3)))", + "(!i0&!i1&!i2&G!(i0&i1)&GF!i2&G((!i2&o0)->X!i2)&G(i2->X(!i2|" + "X(!i2|X(!i2 | X!i2)))))->G(!(i2&Xo0)&(i1->F!o0)&(i0->Fo0))", + "(G((i0&Xo0)->Xi0)&GF!i0)->GX(!i0&X!o0)", + }; + constexpr unsigned max = (sizeof formulas)/(sizeof *formulas); + if (n < 1 || (unsigned) n > max) + bad_number("lily-patterns", n, max); + return parse_formula(formulas[n - 1]); + } + static formula p_pattern(int n) { @@ -1359,12 +1409,8 @@ namespace spot return kr1_exp(n, "a", "b", "c", "d", "y", "z"); case LTL_KV_PSI: return kv_exp(n, "a", "b", "c", "d"); - case LTL_OR_FG: - return FG_n("p", n, false); - case LTL_OR_G: - return combunop_n("p", n, op::G, false); - case LTL_OR_GF: - return GF_n("p", n, false); + case LTL_LILY_PATTERNS: + return lily_pattern(n); case LTL_MS_EXAMPLE: return ms_example("a", "b", n, m); case LTL_MS_PHI_H: @@ -1373,6 +1419,12 @@ namespace spot return ms_phi_rs("a", "b", n, true); case LTL_MS_PHI_S: return ms_phi_rs("a", "b", n, false); + case LTL_OR_FG: + return FG_n("p", n, false); + case LTL_OR_G: + return combunop_n("p", n, op::G, false); + case LTL_OR_GF: + return GF_n("p", n, false); case LTL_P_PATTERNS: return p_pattern(n); case LTL_PPS_ARBITER_STANDARD: @@ -1448,6 +1500,7 @@ namespace spot "kr-n", "kr-nlogn", "kv-psi", + "lily-patterns", "ms-example", "ms-phi-h", "ms-phi-r", @@ -1518,6 +1571,9 @@ namespace spot case LTL_KR_N: case LTL_KR_NLOGN: case LTL_KV_PSI: + return 0; + case LTL_LILY_PATTERNS: + return 23; case LTL_MS_EXAMPLE: case LTL_MS_PHI_H: case LTL_MS_PHI_R: @@ -1586,6 +1642,7 @@ namespace spot case LTL_KR_N: case LTL_KR_NLOGN: case LTL_KV_PSI: + case LTL_LILY_PATTERNS: return 1; case LTL_MS_EXAMPLE: return 2; diff --git a/spot/gen/formulas.hh b/spot/gen/formulas.hh index 131b234b5..044ab753f 100644 --- a/spot/gen/formulas.hh +++ b/spot/gen/formulas.hh @@ -91,6 +91,9 @@ namespace spot /// \cite kupferman.10.mochart , /// \cite kupferman.05.tcl . LTL_KV_PSI, + /// LTL synthesis examples specification from the Lily 1.0.2 + /// distribution. \cite jobstmann.06.fmcad + LTL_LILY_PATTERNS, /// `GF(a1&X(a2&X(a3&...Xan)))&F(b1&F(b2&F(b3&...&Xbm)))` /// \cite muller.17.gandalf LTL_MS_EXAMPLE, diff --git a/tests/core/genltl.test b/tests/core/genltl.test index add4b1d99..3597179d9 100755 --- a/tests/core/genltl.test +++ b/tests/core/genltl.test @@ -241,6 +241,12 @@ test $? = 2 test "`cat err`" = \ "genltl: no pattern hkrss-patterns=0, supported range is 1..55" +genltl --lily-patterns=0 2> err && exit 1 +test $? = 2 +test "`cat err`" = \ + "genltl: no pattern lily-patterns=0, supported range is 1..23" + + genltl --kr-n=0 2> err && exit 1 test $? = 2 test "`cat err`" = "genltl: no pattern kr-n=0, supported range is 1.." diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index acd935560..9500550ea 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -1316,3 +1316,32 @@ f2='G(i1->(o1|!o2)) & G(!i1->(o3|!o4)) & G(i2->(!o1|o2)) & G(!i2->(!o3|o4))&Go5' ltlsynt -f "$f2" --polarity=before-decom --verbose 2>out 1>&2 sed 's/ [0-9.e-]* seconds/ X seconds/g;s/ -> /->/g;' out > outx diff outx exp + + +genltl --lily-patterns | ltlsynt -q > out && exit 2 +cat >expected < Date: Mon, 26 Aug 2024 16:08:05 +0200 Subject: [PATCH 476/606] org: add example of ltlmix used on synthesis specifications * doc/org/ltlmix.org: Here. --- doc/org/ltlmix.org | 105 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 104 insertions(+), 1 deletion(-) diff --git a/doc/org/ltlmix.org b/doc/org/ltlmix.org index 341c64833..71b99b934 100644 --- a/doc/org/ltlmix.org +++ b/doc/org/ltlmix.org @@ -218,7 +218,7 @@ These options solve two problems: - They lessen the issue that a formula selected several times can lead to syntax tree such as =φ | φ | φ= that reduces to =φ=. Now, each occurrence of =φ= as a chance to use different atomic propositions. - (the larger =N= is, the more likely it is that these copies of φ + (The larger =N= is, the more likely it is that these copies of φ will be different). - They allow combining formulas that had completely different sets of @@ -386,3 +386,106 @@ Selecting 10 random conjuncts out of 3×50×2=300 possibilities has a 13.7% chance that at least 2 conjuncts will be identical (see [[https://en.wikipedia.org/wiki/Birthday_problem][Birthday paradox]]), so because of Spot's trivial rewritings, some of the above formulas may have fewer than 10 conjuncts. + +** Random conjunctions for LTL synthesis + +Generating formulas for LTL synthesis differs from LTL satisfiability +because we have to deal with two sets of atomic proposition: one set +for input, and one set for output. + +[[https://www.ijcai.org/proceedings/2017/0189.pdf][Zhu et al. (IJCAI'17)]] generate their benchmark for LTL synthesis using +a setup similar to the above, except that when atomic proposition are +randomized, we must make sure not to change their input or output +nature. + +They use small examples from the [[http://www.ist.tugraz.at/staff/jobstmann/lily/][Lily]] distribution has basic formulas +to combine. Spot can print those using =genltl --lily=. There are 23 +of them, we will limit ourselves to four of them for illustrative +purpose. + +#+BEGIN_SRC sh :exports both + genltl --lily=8..11 +#+END_SRC + +#+RESULTS: +: GFi0 -> GFo0 +: GFi0 -> (!o0 & G(!o0 -> ((!o0 U i0) & (i0 -> Fo0))) & GFo0) +: (GFi1 | Fi0) -> (GFo1 | Go0) +: !(G(i1 -> Fo0) & G(i0 -> Fo1)) + +Notice that atomic proposition either start with =i= (for input) or +=o= for output. This allows Spot to infer their nature. For instance, +we could feed those directly to [[file:ltlsynt.org][=ltlsynt=]]: + +#+BEGIN_SRC sh :exports both :prologue true + genltl --lily=8..11 | ltlsynt -q +#+END_SRC + +#+RESULTS: +: REALIZABLE +: REALIZABLE +: REALIZABLE +: UNREALIZABLE + + +When randomizing the atomic propositions in these formulas before +combining them, we want to replace each input (resp. output) +proposition by a random input (resp. output) proposition. This is +achieved by passing two numbers to =-A= or =-P=. In the case of [[https://www.ijcai.org/proceedings/2017/0189.pdf][Zhu +et al.]], they do not change the polarity of the proposition during +their generation, so we would use =-A= to mimic their setup. + +Here are 6 random conjunctions of the above four patterns, in which +each input (resp. output) atomic proposition has been replaced by a +random input (resp. output) atomic proposition picked randomly in a +set of 5 (resp. 4). + + +#+BEGIN_SRC sh :exports both + genltl --lily=8..11 | ltlmix -A5,4 -C3 -n6 +#+END_SRC + +#+RESULTS: +: !(G(i3 -> Fo3) & G(i2 -> Fo2)) & (GFi2 -> (!o1 & GFo1 & G(!o1 -> ((!o1 U i2) & (i2 -> Fo1))))) & (GFi4 -> GFo1) +: (GFi2 -> (!o1 & GFo1 & G(!o1 -> ((!o1 U i2) & (i2 -> Fo1))))) & !(G(i0 -> Fo1) & G(i4 -> Fo3)) & (GFi4 -> (!o3 & GFo3 & G(!o3 -> ((i4 -> Fo3) & (!o3 U i4))))) +: (GFi3 -> (!o3 & GFo3 & G(!o3 -> ((i3 -> Fo3) & (!o3 U i3))))) & ((GFi2 | Fi3) -> (GFo0 | Go2)) & ((Fi0 | GFi2) -> (GFo3 | Go2)) +: (GFi3 -> GFo2) & (GFi2 -> GFo0) & (GFi3 -> (!o0 & GFo0 & G(!o0 -> ((!o0 U i3) & (i3 -> Fo0))))) +: !(G(i3 -> Fo1) & G(i1 -> Fo3)) & !(G(i3 -> Fo0) & G(i0 -> Fo2)) & ((GFi0 | Fi3) -> (GFo0 | Go1)) +: ((Fi1 | GFi4) -> (Go0 | GFo2)) & !(G(i0 -> Fo2) & G(i4 -> Fo1)) & !(G(i3 -> Fo2) & G(i1 -> Fo1)) + +#+BEGIN_SRC sh :exports both :prologue true + genltl --lily=8..11 | ltlmix -A5,4 -C3 -n6 | ltlsynt -q +#+END_SRC + +#+RESULTS: +: UNREALIZABLE +: UNREALIZABLE +: REALIZABLE +: UNREALIZABLE +: UNREALIZABLE +: UNREALIZABLE + +Note that because one of the original pattern is unrealizable, any +conjunction involving it will be unrealizable. Even if we had only +realizable specifications to combine, the smaller the atomic +propositions set are, the more likely the random conjuncts will be in +conflict. Therefore, increasing the number of atomic propositions to +chose from may help to get more realizable formulas. + +#+BEGIN_SRC sh :exports both :prologue true + genltl --lily=8..11 | ltlmix -A50,50 -C3 -n6 | ltlsynt -q +#+END_SRC + +#+RESULTS: +: UNREALIZABLE +: UNREALIZABLE +: REALIZABLE +: REALIZABLE +: UNREALIZABLE +: UNREALIZABLE + + +When the original LTL synthesis specification formulas have atomic +proposition that do not start with =i= or =o=, options =--ins= and +=--outs= can be used to specify the nature of the atomic propositions. +These options work as with [[file:ltlsynt.org][=ltlsynt=]]. From b549e8e8c10ea0188049068f22f76f3ec62b3f5b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 27 Aug 2024 00:28:35 +0200 Subject: [PATCH 477/606] * doc/org/ltlmix.org: Fix several typos. --- doc/org/ltlmix.org | 105 +++++++++++++++++++++++---------------------- 1 file changed, 54 insertions(+), 51 deletions(-) diff --git a/doc/org/ltlmix.org b/doc/org/ltlmix.org index 71b99b934..b8555592e 100644 --- a/doc/org/ltlmix.org +++ b/doc/org/ltlmix.org @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -#+TITLE: =ltlgrind= +#+TITLE: =ltlmix= #+DESCRIPTION: Spot command-line tool for combining LTL formulas randomly #+INCLUDE: setup.org #+HTML_LINK_UP: tools.html @@ -14,7 +14,7 @@ some handwritten, meaningful formulas, and combine those formulas to build larger sets that are possibly more challenging. Here is a very simple example that builds five formulas that are -Boolean combination of formulas from taken in the set +Boolean combination of formulas taken from the set $\{\mathsf{GF}a,\mathsf{FG}b,\mathsf{X}c\}$: #+BEGIN_SRC sh :exports both @@ -34,15 +34,15 @@ ltlmix -f GFa -f FGb -f Xc -n 5 For each formula that it generates, =ltlmix= constructs a random syntax-tree of a certain size (5 by default) in which internal nodes -represent operators selected randomly from a list of operator, and -leaves are subformulas selected randomly from the set of input -formulas. As an example, the syntax tree of =!φ₁ xor !φ₂= has size 5, -and its leaves =φ₁= and =φ₂= will be taken randomly from the set of -input formulas. +represent operators selected randomly from a list of operators. +Leaves of that tree are subformulas selected randomly from the set of +input formulas. As an example, the syntax tree of =!φ₁ xor !φ₂= has +size 5, and its leaves =φ₁= and =φ₂= will be taken randomly from the +set of input formulas. -The algorithm is actually the same as for =randltl=, except -that =randltl= use random atomic propositions as leaves when =ltlmix= -uses random formulas. +The algorithm is actually the same as for =randltl=, except that +=randltl= uses random atomic propositions as leaves when =ltlmix= uses +random formulas. The same input formula can be picked several times to be used on multiple leaves of the tree. Note that because Spot implements some @@ -104,10 +104,10 @@ and 1 or 1 #+end_example -In the above list, =false= and =true= represent the Boolean constants -(which are usually undesirable when building random Boolean formulas), -and =sub= represent a random formula drawn from the list of input -formulas. +In the above list, =false= and =true= represent the Boolean constants: +those are usually undesirable when building random Boolean formulas, +especially with Spot's trivial rewritings. =sub= represents a random +formula drawn from the list of input formulas. The above command shows that each operator has a weight, called /priority/. When the priority is 0, the operator is never used. When @@ -146,7 +146,7 @@ Ge <-> (!Fc <-> !Xa) ** Boolean or LTL syntax tree By default, the syntax tree generated on top of the randomly selected -input formula uses only Boolean operators. +input formulas uses only Boolean operators. Using option =-L= will use LTL operators instead. @@ -195,21 +195,24 @@ and 1 or 1 #+end_example -Note that in the LTL case, =false= and =true= can be generated by default. +Note that in the LTL case, =false= and =true= can be generated by +default: when building leave, =alse= and =true= have the same +probability to be selected as any input formula. +example). He * Randomizing atomic propositions with =-A= or =-P= Options =-A= or =-P= can be used to change the atomic propositions -used in the input formulas. This works as follows: if =-A N= was -given, every time an input formula φ is selected, its atomic +used in the input formulas. This works as follows: if option =-A N= +was given, every time an input formula φ is selected, its atomic propositions are replaced by atomic propositions randomly selected in a set of size $N$. If φ uses $i$ atomic propositions and $i\ge N$, then those $i$ atomic proposition will be remapped to $i$ distinct -atomic propositions chosen randomly in that set. if $i>N$, some of +atomic propositions chosen randomly in that set. If $i>N$, some of the new atomic propositions may replace several of the original atomic propositions. -Option =-P N= is similar to =-A N= except that the selected atomic +Option =-P N= is similar to =-A N=, except that the selected atomic propositions can possibly be negated. @@ -218,17 +221,18 @@ These options solve two problems: - They lessen the issue that a formula selected several times can lead to syntax tree such as =φ | φ | φ= that reduces to =φ=. Now, each occurrence of =φ= as a chance to use different atomic propositions. - (The larger =N= is, the more likely it is that these copies of φ - will be different). + The larger =N= is, the more likely it is that these copies of φ + will be different. - They allow combining formulas that had completely different sets of - atomic propositions, in such a way that they are now interdependent - (the smaller N is the more likely it is that subformulas will share - atomic propositions). + atomic propositions, in such a way that they are now interdependent. + The smaller N is the more likely it is that subformulas will share + atomic propositions. -Here is an example with a single formula, =GFa=, whose atomic proposition -will be randomly replaced by one of $\{p_0,p_1,p_2,p_3,p_4\}$. +Here is that same example with a single formula, =GFa=, whose atomic +proposition will be randomly replaced by one of +$\{p_0,p_1,p_2,p_3,p_4\}$. #+BEGIN_SRC sh :exports both ltlmix -fGFa -A5 --tree-size=8 -n10 @@ -274,8 +278,8 @@ GF!p1 xor (!GF!p2 | (GF!p1 <-> GFp0)) ** Mixing the DAC patterns The command [[file:genltl.org][=genltl --dac-pattern=]] will print a list of 55 LTL -formulas representing various specification patterns listed by Dwyer -et al. (FMSP'98). Using =--stat=%x= to count the atomic propositions +formulas representing various specification patterns listed by [[https://doi.org/10.1145/302405.30267][Dwyer +et al. (FMSP'98)]]. Using =--stat=%x= to count the atomic propositions in each formula, and some standard unix tools, we can compute that they use at most 6 atomic propositions. @@ -324,9 +328,9 @@ so is uses atomic propositions $\{p_0,p_1,...\}$ starting at 0 and without gap. ** Random conjunctions -Some benchmarks (e.g., [[https://www.cs.rice.edu/~vardi/papers/time13.pdf][for LTL satisfiability]]) are built by -conjunction of $L$ random formulas picked from a set of basic -formulas. Each picked formula has its atomic proposition mapped to +Some benchmarks (e.g., [[https://www.cs.rice.edu/~vardi/papers/time13.pdf][for LTL satisfiability]]) are built as +conjunctions of $L$ random formulas picked from a set of basic +formulas. Each picked formula has its atomic propositions mapped to random literals built from a subset of $m$ atomic variables. Given a value for $m$, option =-P m= will achieve the second part of @@ -335,7 +339,7 @@ need to ask for a tree of size $2L-1$ in which only the =and= operator is allowed. Here is an example with $L=10$ (hence =--tree-size=19=) and $m=50$. -The example use a small set of three basic formulas +The example uses a small set of three basic formulas $\{\mathsf{G}a,\mathsf{F}a,\mathsf{X}a\}$ for illustration, but in practice you should replace these =-f= options by =-F FILENAME= pointing to a file containing all the input formulas to select from. @@ -359,9 +363,9 @@ Xp27 & Xp5 & Fp28 & Xp18 & G!p13 & Gp35 & Gp38 & G!p45 & G!p48 & Gp12 Xp7 & G!p48 & Xp14 & Fp24 & Xp43 & Fp47 & Fp14 & Gp30 & Xp23 & G!p31 #+end_example -In fact building random conjunctions is common enough to have its own -flag. Using =-C N= will see the tree size to $2N-1$ and disable all -operators but =and=. The above command can therefore be reduced to +Random conjunctions is common enough to have its own flag. Using =-C +N= will see the tree size to $2N-1$ and disable all operators but +=and=. The above command can therefore be reduced to #+BEGIN_SRC sh :exports both ltlmix -fGa -fFa -fXa -n10 -P50 -C10 @@ -381,7 +385,6 @@ Xp27 & Xp5 & Fp28 & Xp18 & G!p13 & Gp35 & Gp38 & G!p45 & G!p48 & Gp12 Xp7 & G!p48 & Xp14 & Fp24 & Xp43 & Fp47 & Fp14 & Gp30 & Xp23 & G!p31 #+end_example - Selecting 10 random conjuncts out of 3×50×2=300 possibilities has a 13.7% chance that at least 2 conjuncts will be identical (see [[https://en.wikipedia.org/wiki/Birthday_problem][Birthday paradox]]), so because of Spot's trivial rewritings, some of the above @@ -398,10 +401,10 @@ a setup similar to the above, except that when atomic proposition are randomized, we must make sure not to change their input or output nature. -They use small examples from the [[http://www.ist.tugraz.at/staff/jobstmann/lily/][Lily]] distribution has basic formulas -to combine. Spot can print those using =genltl --lily=. There are 23 -of them, we will limit ourselves to four of them for illustrative -purpose. +They use small examples from the [[http://www.ist.tugraz.at/staff/jobstmann/lily/][Lily]] distribution as basic formulas +to combine. Spot can print those formulas using =genltl --lily=. +There are 23 of them, but we will limit ourselves to four of them for +illustrative purpose. #+BEGIN_SRC sh :exports both genltl --lily=8..11 @@ -413,12 +416,13 @@ purpose. : (GFi1 | Fi0) -> (GFo1 | Go0) : !(G(i1 -> Fo0) & G(i0 -> Fo1)) -Notice that atomic proposition either start with =i= (for input) or -=o= for output. This allows Spot to infer their nature. For instance, -we could feed those directly to [[file:ltlsynt.org][=ltlsynt=]]: +Notice how these atomic propositions either start with =i= (for input) +or =o= for output. This allows Spot to infer their nature. For +instance, we could feed those directly to [[file:ltlsynt.org][=ltlsynt=]] to decide if they +are realizable: -#+BEGIN_SRC sh :exports both :prologue true - genltl --lily=8..11 | ltlsynt -q +#+BEGIN_SRC sh :exports both :epilogue true + genltl --lily=8..11 | ltlsynt --realizability #+END_SRC #+RESULTS: @@ -427,7 +431,6 @@ we could feed those directly to [[file:ltlsynt.org][=ltlsynt=]]: : REALIZABLE : UNREALIZABLE - When randomizing the atomic propositions in these formulas before combining them, we want to replace each input (resp. output) proposition by a random input (resp. output) proposition. This is @@ -453,8 +456,8 @@ set of 5 (resp. 4). : !(G(i3 -> Fo1) & G(i1 -> Fo3)) & !(G(i3 -> Fo0) & G(i0 -> Fo2)) & ((GFi0 | Fi3) -> (GFo0 | Go1)) : ((Fi1 | GFi4) -> (Go0 | GFo2)) & !(G(i0 -> Fo2) & G(i4 -> Fo1)) & !(G(i3 -> Fo2) & G(i1 -> Fo1)) -#+BEGIN_SRC sh :exports both :prologue true - genltl --lily=8..11 | ltlmix -A5,4 -C3 -n6 | ltlsynt -q +#+BEGIN_SRC sh :exports both :epilogue true + genltl --lily=8..11 | ltlmix -A5,4 -C3 -n6 | ltlsynt --realizability #+END_SRC #+RESULTS: @@ -468,11 +471,11 @@ set of 5 (resp. 4). Note that because one of the original pattern is unrealizable, any conjunction involving it will be unrealizable. Even if we had only realizable specifications to combine, the smaller the atomic -propositions set are, the more likely the random conjuncts will be in +proposition sets are, the more likely the random conjuncts will be in conflict. Therefore, increasing the number of atomic propositions to chose from may help to get more realizable formulas. -#+BEGIN_SRC sh :exports both :prologue true +#+BEGIN_SRC sh :exports both :epilogue true genltl --lily=8..11 | ltlmix -A50,50 -C3 -n6 | ltlsynt -q #+END_SRC From 5f1d00b8588dc729de47e3e957a5645527cf299c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 28 Aug 2024 15:57:33 +0200 Subject: [PATCH 478/606] twaalgos: introduce match_states(a,b) This is a useful part for issue #591. * spot/twaalgos/matchstates.cc, spot/twaalgos/matchstates.hh: New files. * spot/twaalgos/Makefile.am: Add them. * python/spot/impl.i: Add python bindings. * tests/python/matchstates.py: New file. * tests/Makefile.am: Add it. * NEWS: Mention this new function. --- NEWS | 6 ++++ python/spot/impl.i | 3 ++ spot/twaalgos/Makefile.am | 2 ++ spot/twaalgos/matchstates.cc | 50 +++++++++++++++++++++++++++++ spot/twaalgos/matchstates.hh | 40 +++++++++++++++++++++++ tests/Makefile.am | 1 + tests/python/matchstates.py | 62 ++++++++++++++++++++++++++++++++++++ 7 files changed, 164 insertions(+) create mode 100644 spot/twaalgos/matchstates.cc create mode 100644 spot/twaalgos/matchstates.hh create mode 100644 tests/python/matchstates.py diff --git a/NEWS b/NEWS index 0436d7cf8..4a79a551c 100644 --- a/NEWS +++ b/NEWS @@ -56,6 +56,12 @@ New in spot 2.12.0.dev (not yet released) were missing the rule "[*0]|f ≡ f" when f already accepts the empty word. (Issue #545.) + - spot::match_states(A, B) is a new function that returns a vector + V such that V[x] contains all states y such that state (x, y) can + reach an accepting cycle in product(A, B). In particular, if A + and B accept the same language, any word accepted by A from state + x can be accepted in B from some state in V[x]. + Bug fixes: - Generating random formulas without any unary opertor would very diff --git a/python/spot/impl.i b/python/spot/impl.i index 6d91144d8..1f9c11fde 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -132,6 +132,7 @@ #include #include #include +#include #include #include #include @@ -522,6 +523,7 @@ namespace std { %template(pairintacccond) pair; %template(vectorformula) vector; %template(vectorunsigned) vector; + %template(vectorvectorunsigned) vector>; %template(vectorpairunsigned) vector>; %template(vectoracccond) vector; %template(vectoracccode) vector; @@ -739,6 +741,7 @@ def state_is_accepting(self, src) -> "bool": %include %include %include +%include %include %include %include diff --git a/spot/twaalgos/Makefile.am b/spot/twaalgos/Makefile.am index ede79f81a..b5993aaef 100644 --- a/spot/twaalgos/Makefile.am +++ b/spot/twaalgos/Makefile.am @@ -64,6 +64,7 @@ twaalgos_HEADERS = \ ltl2tgba_fm.hh \ magic.hh \ mask.hh \ + matchstates.hh \ minimize.hh \ mealy_machine.hh \ couvreurnew.hh \ @@ -139,6 +140,7 @@ libtwaalgos_la_SOURCES = \ ltl2tgba_fm.cc \ magic.cc \ mask.cc \ + matchstates.cc \ minimize.cc \ mealy_machine.cc \ couvreurnew.cc \ diff --git a/spot/twaalgos/matchstates.cc b/spot/twaalgos/matchstates.cc new file mode 100644 index 000000000..14edfe18b --- /dev/null +++ b/spot/twaalgos/matchstates.cc @@ -0,0 +1,50 @@ +// -*- coding: utf-8 -*- +// Copyright (C) by the Spot authors, see the AUTHORS file for details. +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" +#include +#include +#include + +namespace spot +{ + std::vector> + match_states(const const_twa_graph_ptr& aut1, + const const_twa_graph_ptr& aut2) + { + twa_graph_ptr prod = product(aut1, aut2); + product_states* ps = prod->get_named_prop("product-states"); + if (!ps) + return {}; + scc_info si(prod, scc_info_options::TRACK_SUCCS); + + std::vector> v(aut1->num_states()); + unsigned sz = ps->size(); + assert(sz == prod->num_states()); + for (unsigned sp = 0; sp < sz; ++sp) + if (si.is_useful_state(sp)) + { + auto [sl, sr] = (*ps)[sp]; + v[sl].push_back(sr); + } + return v; + } + + + +} diff --git a/spot/twaalgos/matchstates.hh b/spot/twaalgos/matchstates.hh new file mode 100644 index 000000000..7bd972402 --- /dev/null +++ b/spot/twaalgos/matchstates.hh @@ -0,0 +1,40 @@ +// -*- coding: utf-8 -*- +// Copyright (C) by the Spot authors, see the AUTHORS file for details. +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include +#include +#include + +namespace spot +{ + /// \ingroup twa_algorithms + /// \brief match the state of \a aut1 with the states of \a aut2. + /// + /// Return a vector `V` such that for each state `x` of + /// \a aut1, `V[x]` contains the set of states `y` such that + /// `(x,y)` is a useful state of `product(aut1,aut2)`. + /// + /// In particular, if the language of \a aut2 includes the language + /// of \a aut1, then any word accepted from state `x` in \a aut1 + /// is also accepted from one of the states in `V[x]`. + SPOT_API std::vector> + match_states(const const_twa_graph_ptr& aut1, + const const_twa_graph_ptr& aut2); +} diff --git a/tests/Makefile.am b/tests/Makefile.am index 54abc1a73..16b283077 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -435,6 +435,7 @@ TESTS_python = \ python/ltlf.py \ python/ltlparse.py \ python/ltlsimple.py \ + python/matchstates.py \ python/mealy.py \ python/_mealy.ipynb \ python/merge.py \ diff --git a/tests/python/matchstates.py b/tests/python/matchstates.py new file mode 100644 index 000000000..050a84b84 --- /dev/null +++ b/tests/python/matchstates.py @@ -0,0 +1,62 @@ +#!/usr/bin/python3 +# -*- mode: python; coding: utf-8 -*- +# Copyright (C) by the Spot authors, see the AUTHORS file for details. +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import spot +from unittest import TestCase +tc = TestCase() + +a = spot.automaton("""HOA: v1 +States: 4 +Start: 0 +AP: 3 "a" "b" "c" +acc-name: all +Acceptance: 0 t +properties: trans-labels explicit-labels state-acc stutter-invariant +properties: very-weak +--BODY-- +State: 0 "Ga | Gb | Gc" +[0] 1 +[1] 2 +[2] 3 +State: 1 "Ga" +[0] 1 +State: 2 "Gb" +[1] 2 +State: 3 "Gc" +[2] 3 +--END--""") + +b = spot.automaton("""HOA: v1 States: 7 Start: 6 AP: 3 "a" "b" "c" +acc-name: all Acceptance: 0 t properties: trans-labels explicit-labels +state-acc deterministic properties: stutter-invariant very-weak +--BODY-- State: 0 [2] 0 State: 1 [!1&2] 0 [1&2] 1 [1&!2] 2 State: 2 +[1] 2 State: 3 [0] 3 State: 4 [!0&2] 0 [0&!2] 3 [0&2] 4 State: 5 +[!0&1] 2 [0&!1] 3 [0&1] 5 State: 6 [!0&!1&2] 0 [!0&1&2] 1 [!0&1&!2] 2 +[0&!1&!2] 3 [0&!1&2] 4 [0&1&!2] 5 [0&1&2] 6 --END--""") + +m1 = spot.match_states(a, b) +tc.assertEqual(m1, ((6,), (3, 4, 5, 6), (1, 2, 5, 6), (0, 1, 4, 6))) +m2 = spot.match_states(b, a) +tc.assertEqual(m2, ((3,), (2, 3), (2,), (1,), (1, 3), (1, 2), (0, 1, 2, 3))) + +c = spot.translate('false') +m3 = spot.match_states(a, c) +tc.assertEqual(m3, ((), (), (), ())) +m4 = spot.match_states(c, a) +tc.assertEqual(m4, ((), )) From 3d3e87948c0c7dc798d53abfc7ab61d29d06770b Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 29 Aug 2024 11:16:35 +0200 Subject: [PATCH 479/606] twaalgos: add a match_states variant with a formula argument This is related to issue #591, reported by Blake C. Rawlings. * spot/twaalgos/ltl2tgba_fm.cc, spot/twaalgos/ltl2tgba_fm.hh (ltl_to_tgba_fm): Add option to keep LTL labels. * spot/twaalgos/matchstates.cc, spot/twaalgos/matchstates.hh (match_states): Add variant with a formula as second argument. * tests/python/matchstates.py: Test it. * NEWS: Mention it. * THANKS: Add reporter. --- NEWS | 6 ++++++ THANKS | 1 + spot/twaalgos/ltl2tgba_fm.cc | 7 ++++--- spot/twaalgos/ltl2tgba_fm.hh | 7 ++++++- spot/twaalgos/matchstates.cc | 39 ++++++++++++++++++++++++++++++++++++ spot/twaalgos/matchstates.hh | 18 +++++++++++++++++ tests/python/matchstates.py | 15 ++++++++++++++ 7 files changed, 89 insertions(+), 4 deletions(-) diff --git a/NEWS b/NEWS index 4a79a551c..2833b5cd8 100644 --- a/NEWS +++ b/NEWS @@ -62,6 +62,12 @@ New in spot 2.12.0.dev (not yet released) and B accept the same language, any word accepted by A from state x can be accepted in B from some state in V[x]. + That function also has a variant spot::match_states(A, f) where f + is an LTL formula. In this case it returns and array of + formulas. If f represents a superset of the language of A, then + any word accepted by A from state x satisfies V[x]. Related to + Issue #591. + Bug fixes: - Generating random formulas without any unary opertor would very diff --git a/THANKS b/THANKS index 307d34999..fcae660e7 100644 --- a/THANKS +++ b/THANKS @@ -5,6 +5,7 @@ Andreas Tollkötter Andrew Wells Anton Pirogov Ayrat Khalimov +Blake C. Rawlings Cambridge Yang Caroline Lemieux Christian Dax diff --git a/spot/twaalgos/ltl2tgba_fm.cc b/spot/twaalgos/ltl2tgba_fm.cc index acf9511b2..09e88acae 100644 --- a/spot/twaalgos/ltl2tgba_fm.cc +++ b/spot/twaalgos/ltl2tgba_fm.cc @@ -1887,7 +1887,7 @@ namespace spot bool exprop, bool symb_merge, bool branching_postponement, bool fair_loop_approx, const atomic_prop_set* unobs, tl_simplifier* simplifier, bool unambiguous, - const output_aborter* aborter) + const output_aborter* aborter, bool label_with_ltl) { tl_simplifier* s = simplifier; @@ -2216,8 +2216,9 @@ namespace spot if (orig_f.is_syntactic_guarantee()) a->prop_terminal(true); } - // Set the following to true to preserve state names. - a->release_formula_namer(namer, false); + + // This gives each state a name of label_with_ltl is set. + a->release_formula_namer(namer, label_with_ltl); if (!simplifier) // This should not be deleted before we have registered all propositions. diff --git a/spot/twaalgos/ltl2tgba_fm.hh b/spot/twaalgos/ltl2tgba_fm.hh index ac7c5645e..717ae6b1b 100644 --- a/spot/twaalgos/ltl2tgba_fm.hh +++ b/spot/twaalgos/ltl2tgba_fm.hh @@ -74,6 +74,10 @@ namespace spot /// constructed automaton would become larger than specified by the /// output_aborter. /// + /// \param label_with_ltl keep one LTL formula equivalent the + /// language recognized by each state, and use that to name each + /// state. + /// /// \return A spot::twa_graph that recognizes the language of \a f. SPOT_API twa_graph_ptr ltl_to_tgba_fm(formula f, const bdd_dict_ptr& dict, @@ -83,5 +87,6 @@ namespace spot const atomic_prop_set* unobs = nullptr, tl_simplifier* simplifier = nullptr, bool unambiguous = false, - const output_aborter* aborter = nullptr); + const output_aborter* aborter = nullptr, + bool label_with_ltl = false); } diff --git a/spot/twaalgos/matchstates.cc b/spot/twaalgos/matchstates.cc index 14edfe18b..0d9595501 100644 --- a/spot/twaalgos/matchstates.cc +++ b/spot/twaalgos/matchstates.cc @@ -20,6 +20,8 @@ #include #include #include +#include +#include namespace spot { @@ -45,6 +47,43 @@ namespace spot return v; } + std::vector + match_states(const const_twa_graph_ptr& aut1, formula f) + { + twa_graph_ptr aut2 = ltl_to_tgba_fm(f, aut1->get_dict(), + false /* exprop */, + true /* symbolic merge */, + false /* branching postponement */, + false /* fair loop approx. */, + nullptr /* unobs event */, + nullptr /* simplifier */, + false /* unambiguous */, + nullptr /* aborter */, + true /* label with LTL */); + auto state_names = + aut2->get_named_prop>("state-names"); + auto v = match_states(aut1, aut2); + unsigned sz1 = aut1->num_states(); + unsigned sz2 = aut2->num_states(); + // State are labeled with strings, but we know those strings to + // represent LTL formulas, so convert those. + std::vector state_formulas; + state_formulas.reserve(sz2); + for (unsigned i = 0; i < sz2; ++i) + state_formulas.push_back(parse_formula((*state_names)[i])); + std::vector res; + res.reserve(sz1); + + std::vector disjuncts; + for (unsigned i = 0; i < sz1; ++i) + { + disjuncts.clear(); + for (unsigned j: v[i]) + disjuncts.push_back(state_formulas[j]); + res.push_back(formula::Or(disjuncts)); + } + return res; + } } diff --git a/spot/twaalgos/matchstates.hh b/spot/twaalgos/matchstates.hh index 7bd972402..3c9bb6236 100644 --- a/spot/twaalgos/matchstates.hh +++ b/spot/twaalgos/matchstates.hh @@ -20,6 +20,7 @@ #include #include +#include #include namespace spot @@ -37,4 +38,21 @@ namespace spot SPOT_API std::vector> match_states(const const_twa_graph_ptr& aut1, const const_twa_graph_ptr& aut2); + + /// \ingroup twa_algorithms \brief match the states of \a aut with + /// formulas "reachable" from \a f. + /// + /// The returned vector V assigns each state `x` of \a aut to a + /// formula `V[x]`. + /// + /// This translates \a f as an automaton B in which states are labeled + /// by formulas, match the states of \a aut with the states of B, and + /// use that to find formulas associated to each state of \a aut. + /// + /// In particular, if the language of \a f is a superset of the + /// language of \a aut, then every word accepted in \a aut from + /// state `x` will satisfy formula `V[x]`. However `V[x]` may + /// accept more than the words accepted from `a` in \a aut. + SPOT_API std::vector + match_states(const const_twa_graph_ptr& aut, formula f); } diff --git a/tests/python/matchstates.py b/tests/python/matchstates.py index 050a84b84..f5d8102be 100644 --- a/tests/python/matchstates.py +++ b/tests/python/matchstates.py @@ -60,3 +60,18 @@ m3 = spot.match_states(a, c) tc.assertEqual(m3, ((), (), (), ())) m4 = spot.match_states(c, a) tc.assertEqual(m4, ((), )) + +f = spot.formula("Ga | Gb | Gc") +m5 = spot.match_states(a, f) +tc.assertEqual(m5, (f, f, f, f)) +m6 = spot.match_states(b, f) +tc.assertEqual(m6, (spot.formula("Gc"), + spot.formula("Gb | Gc"), + spot.formula("Gb"), + spot.formula("Ga"), + spot.formula("Ga | Gc"), + spot.formula("Ga | Gb"), + spot.formula("Ga | Gb | Gc"))) + +m7 = spot.match_states(c, f) # Note that f is not the formula for c +tc.assertEqual(m7, (spot.formula("0"),)) From c5d991e55c243998622cb2f10d4185c26fa4bdb1 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 30 Aug 2024 11:38:26 +0200 Subject: [PATCH 480/606] autfilt: add a --track-formula option Fixes #591. * spot/twaalgos/matchstates.cc, spot/twaalgos/matchstates.hh (match_states_decorate): New function. * bin/autfilt.cc: Add a --track-formula option. * tests/core/trackf.test: New file. * tests/Makefile.am: Test it. * NEWS: Mention it. --- NEWS | 3 + bin/autfilt.cc | 18 ++++- spot/twaalgos/matchstates.cc | 18 ++++- spot/twaalgos/matchstates.hh | 13 +++- tests/Makefile.am | 1 + tests/core/trackf.test | 123 +++++++++++++++++++++++++++++++++++ 6 files changed, 171 insertions(+), 5 deletions(-) create mode 100755 tests/core/trackf.test diff --git a/NEWS b/NEWS index 2833b5cd8..a288b097c 100644 --- a/NEWS +++ b/NEWS @@ -26,6 +26,9 @@ New in spot 2.12.0.dev (not yet released) edges leading to dead-ends. See the description of restrict_dead_end_edges_here() below. + - autfilt learned --track-formula=F to label states with formulas + derived from F. (This is more precise on deterministic automata.) + - ltlfilt learned --pi1, --sigma1, --delta1, --pi2, --sigma2, and --delta2 to filter according to classes Π₁,Σ₁,Δ₁,Π₂,Σ₂, and Δ₂. diff --git a/bin/autfilt.cc b/bin/autfilt.cc index 1c063af65..0252a0562 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -63,6 +64,7 @@ #include #include #include +#include #include #include #include @@ -165,6 +167,7 @@ enum { OPT_SUM_AND, OPT_TERMINAL_SCCS, OPT_TO_FINITE, + OPT_TRACK_FORMULA, OPT_TRIV_SCCS, OPT_USED_AP_N, OPT_UNUSED_AP_N, @@ -413,6 +416,7 @@ static const argp_option options[] = "Convert an automaton with \"alive\" and \"!alive\" propositions " "into a Büchi automaton interpretable as a finite automaton. " "States with a outgoing \"!alive\" edge are marked as accepting.", 0 }, + /**************************************************/ { nullptr, 0, nullptr, 0, "Decorations (for -d and -H1.1 output):", 9 }, { "highlight-accepting-run", OPT_HIGHLIGHT_ACCEPTING_RUN, "NUM", OPTION_ARG_OPTIONAL, "highlight one accepting run using color NUM", 0}, @@ -426,9 +430,12 @@ static const argp_option options[] = OPTION_ARG_OPTIONAL, "highlight nondeterministic states and edges with color NUM", 0}, { "highlight-word", OPT_HIGHLIGHT_WORD, "[NUM,]WORD", 0, - "highlight one run matching WORD using color NUM", 0}, + "highlight one run matching WORD using color NUM", 0 }, { "highlight-languages", OPT_HIGHLIGHT_LANGUAGES, nullptr, 0 , - "highlight states that recognize identical languages", 0}, + "highlight states that recognize identical languages", 0 }, + { "track-formula", OPT_TRACK_FORMULA, "FORMULA", 0, + "attempt to label the states of the automaton assuming the automaton " + "recognize FORMULA (use deterministic automata for precision)", 0 }, /**************************************************/ { nullptr, 0, nullptr, 0, "If any option among --small, --deterministic, or --any is given, " @@ -716,6 +723,7 @@ static int opt_highlight_nondet_states = -1; static int opt_highlight_nondet_edges = -1; static int opt_highlight_accepting_run = -1; static bool opt_highlight_languages = false; +static spot::formula opt_track_formula = nullptr; static bool opt_dca = false; static bool opt_streett_like = false; static bool opt_enlarge_acceptance_set = false; @@ -1271,6 +1279,9 @@ parse_opt(int key, char* arg, struct argp_state*) case OPT_TO_FINITE: opt_to_finite = arg ? arg : "alive"; break; + case OPT_TRACK_FORMULA: + opt_track_formula = spot::parse_formula(arg); + break; case OPT_TRIV_SCCS: opt_triv_sccs = parse_range(arg, 0, std::numeric_limits::max()); opt_art_sccs_set = true; @@ -1708,6 +1719,9 @@ namespace else if (opt_separate_edges) aut = spot::separate_edges(aut); + if (opt_track_formula) + match_states_decorate(aut, opt_track_formula); + if (opt_to_finite) aut = spot::to_finite(aut, opt_to_finite); diff --git a/spot/twaalgos/matchstates.cc b/spot/twaalgos/matchstates.cc index 0d9595501..b87401706 100644 --- a/spot/twaalgos/matchstates.cc +++ b/spot/twaalgos/matchstates.cc @@ -22,6 +22,8 @@ #include #include #include +#include +#include namespace spot { @@ -73,6 +75,8 @@ namespace spot for (unsigned i = 0; i < sz2; ++i) state_formulas.push_back(parse_formula((*state_names)[i])); + tl_simplifier tls(tl_simplifier_options(2)); + std::vector res; res.reserve(sz1); @@ -82,8 +86,20 @@ namespace spot disjuncts.clear(); for (unsigned j: v[i]) disjuncts.push_back(state_formulas[j]); - res.push_back(formula::Or(disjuncts)); + res.push_back(tls.simplify(formula::Or(disjuncts))); } return res; } + + void + match_states_decorate(twa_graph_ptr& aut, formula f) + { + std::vector v = spot::match_states(aut, f); + auto* n = new std::vector; + n->reserve(v.size()); + for (spot::formula f: v) + n->push_back(str_psl(f)); + aut->set_named_prop("state-names", n); + } + } diff --git a/spot/twaalgos/matchstates.hh b/spot/twaalgos/matchstates.hh index 3c9bb6236..8061993a4 100644 --- a/spot/twaalgos/matchstates.hh +++ b/spot/twaalgos/matchstates.hh @@ -39,8 +39,9 @@ namespace spot match_states(const const_twa_graph_ptr& aut1, const const_twa_graph_ptr& aut2); - /// \ingroup twa_algorithms \brief match the states of \a aut with - /// formulas "reachable" from \a f. + /// \ingroup twa_algorithms + /// \brief match the states of \a aut with formulas "reachable" from + /// \a f. /// /// The returned vector V assigns each state `x` of \a aut to a /// formula `V[x]`. @@ -55,4 +56,12 @@ namespace spot /// accept more than the words accepted from `a` in \a aut. SPOT_API std::vector match_states(const const_twa_graph_ptr& aut, formula f); + + + /// \ingroup twa_algorithms + /// + /// \brief label the state of \a aut with the result of + /// `match_states(aut,f)`. + SPOT_API void + match_states_decorate(twa_graph_ptr& aut, formula f); } diff --git a/tests/Makefile.am b/tests/Makefile.am index 16b283077..898bcccd8 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -316,6 +316,7 @@ TESTS_twa = \ core/sbacc.test \ core/stutter-tgba.test \ core/strength.test \ + core/trackf.test \ core/emptchk.test \ core/emptchke.test \ core/dfs.test \ diff --git a/tests/core/trackf.test b/tests/core/trackf.test new file mode 100755 index 000000000..a5958b59a --- /dev/null +++ b/tests/core/trackf.test @@ -0,0 +1,123 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) by the Spot authors, see the AUTHORS file for details. +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs + +set -e + +ltl2tgba -D 'Ga | Gb | Gc' | + autfilt --track-formula='Ga | Gb | Gc' > out +cat >exp < out +cat >exp < out +cat >exp < Date: Fri, 30 Aug 2024 16:05:50 +0200 Subject: [PATCH 481/606] game: fix solving "parity min" games with multi-colored edges * spot/twaalgos/game.cc: If the original acceptance is "parity min", use min_set(), not max_set(), to read edge priorities. * tests/python/game.py: Add a test case. * NEWS: Mention the bug. --- NEWS | 5 +++++ spot/twaalgos/game.cc | 3 ++- tests/python/game.py | 25 ++++++++++++++++++++++++- 3 files changed, 31 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index a288b097c..c9bfab98e 100644 --- a/NEWS +++ b/NEWS @@ -76,6 +76,11 @@ New in spot 2.12.0.dev (not yet released) - Generating random formulas without any unary opertor would very often create formulas much smaller than specified. + - The parity game solver, which internally works on "parity max + odd", but actually accept any type of parity acceptance, could be + confused by games with "parity min" acceptance using transition + with several colors (a rather uncommon situation). + New in spot 2.12 (2024-05-16) Build: diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index 404fa4778..e2c550531 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -357,7 +357,8 @@ namespace spot // Takes an edge and returns the "equivalent" max odd parity auto equiv_par = [max, odd, next_max_par, inv = 2*max-1](const auto& e) { - par_t e_par = e.acc.max_set() - 1; // -1 for empty + par_t e_par = + (max ? e.acc.max_set() : e.acc.min_set()) - 1; // -1 for empty // If "min" and empty -> set to n if (!max & (e_par == -1)) e_par = next_max_par; diff --git a/tests/python/game.py b/tests/python/game.py index 3462cc7d2..f8007c372 100644 --- a/tests/python/game.py +++ b/tests/python/game.py @@ -368,6 +368,21 @@ tc.assertTrue(all([wn == wr for (wn, wr, p) in zip(winners_new, winners_ref, players_ref) if not p])) +def maximize_colors(aut, is_max): + ns = aut.num_sets() + v = [] + if is_max: + for c in range(ns+1): + v.append(spot.mark_t(list(range(c)))) + for e in aut.edges(): + e.acc = v[e.acc.max_set()] + else: + for c in range(ns+1): + v.append(spot.mark_t(list(range(c, ns)))) + v.insert(0, spot.mark_t([])) + for e in aut.edges(): + e.acc = v[e.acc.min_set()] + # Test the different parity conditions gdpa = spot.tgba_determinize(spot.degeneralize_tba(g), False, True, True, False) @@ -387,6 +402,14 @@ for kind in [spot.parity_kind_min, spot.parity_kind_max]: tc.assertTrue(spot.solve_parity_game(g_test_split1)) c_strat1 = spot.get_strategy(g_test_split1) tc.assertTrue(c_strat == c_strat1) + # Same test, but adding a lot of useless colors in the game + g_test_split2 = spot.change_parity(g_test_split, kind, style) + maximize_colors(g_test_split2, kind == spot.parity_kind_max) + spot.set_state_players(g_test_split2, sp) + tc.assertTrue(spot.solve_parity_game(g_test_split2)) + c_strat2 = spot.get_strategy(g_test_split2) + tc.assertTrue(c_strat == c_strat2) + # Test that strategies are not appended # if solve is called multiple times @@ -537,4 +560,4 @@ f1 = "((((G (F (idle))) && (G (((idle) && (X ((! (grant_0)) \ && (! (F (G ((request_0) && (X (! (grant_0)))))))) \ && (! (F (G ((request_1) && (X (! (grant_1)))))))))" outs = ["grant_0", "grant1"] -tc.assertEqual(synt_ltlf(f1, outs)[0], False) \ No newline at end of file +tc.assertEqual(synt_ltlf(f1, outs)[0], False) From c9911962d4e59b45d8eef34f2155b98ecbcad6c1 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 2 Sep 2024 13:50:36 +0200 Subject: [PATCH 482/606] python: improve support of spot-extra, and recent swig I could not run "make check" in a copy of seminator 2.0 regenerated with swig 4.0, because of changes in the way Swig imports its shared libraries. * python/spot/__init__.py: If sys.path contains "/spot-extra" directory, add it to spot.__path__ as well. This helps situations where a plugin use libtool and the development tree has the shared libraries in .../spot-extra/.libs/ --- python/spot/__init__.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/python/spot/__init__.py b/python/spot/__init__.py index 1c6133390..6f571f932 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -38,11 +38,17 @@ if 'SPOT_UNINSTALLED' in os.environ: # We may have third-party plugins that want to be loaded as "spot.xxx", but # that are installed in a different $prefix. This sets things so that any # file that looks like spot-extra/xxx.py can be loaded with "import spot.xxx". +# When libtool is used in a development build, it is likely that PYTHONPATH +# is already set up to contains something like .../spot-extra/.libs, so we +# want to copy those as well. for path in sys.path: if path not in __path__: - path += "/spot-extra" - if os.path.isdir(path): + if "/spot-extra" in path: __path__.append(path) + else: + path += "/spot-extra" + if os.path.isdir(path): + __path__.append(path) from spot.impl import * @@ -58,7 +64,7 @@ from spot.aux import \ ostream_to_svg as _ostream_to_svg -# The parrameters used by default when show() is called on an automaton. +# The parameters used by default when show() is called on an automaton. _show_default = None From 8eec295e231bbdaf114b20f7ce95d215e4213c71 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 2 Sep 2024 13:54:36 +0200 Subject: [PATCH 483/606] * configure.ac: Typo. --- configure.ac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index 4ec7633e4..e85feb8cf 100644 --- a/configure.ac +++ b/configure.ac @@ -220,7 +220,7 @@ AC_CHECK_PROG([LBTT_TRANSLATE], [lbtt-translate], [lbtt-translate]) AX_CHECK_VALGRIND # Debian used to reserve the name 'swig' for swig-2.0. So prefer # swig4.0 (available in Debian bullseye) to swig3.0 (available in Debian buster) -# ti swig. +# to swig. AC_CHECK_PROGS([SWIG], [swig4.0 swig3.0 swig], [swig]) AC_SUBST([CROSS_COMPILING], [$cross_compiling]) From a8a0a1973efce724d9a1ac81bfab217486e5c70e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 2 Sep 2024 17:26:42 +0200 Subject: [PATCH 484/606] ltlsynt: fix usage for --dot's argument * bin/ltlsynt.cc (dispatch_print_hoa): Pass the right argument to print_dot. * tests/core/ltlsynt.test: Test it. * NEWS: Mention the bug. --- NEWS | 2 ++ bin/ltlsynt.cc | 2 +- tests/core/ltlsynt.test | 6 +++++- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index c9bfab98e..f53ef6ece 100644 --- a/NEWS +++ b/NEWS @@ -81,6 +81,8 @@ New in spot 2.12.0.dev (not yet released) confused by games with "parity min" acceptance using transition with several colors (a rather uncommon situation). + - "ltlsynt ... --print-game --dot=ARGS" was ignoring ARGS. + New in spot 2.12 (2024-05-16) Build: diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 9e7aee595..bc4bd1561 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -324,7 +324,7 @@ namespace rs->patch_game(game); if (opt_dot) - spot::print_dot(std::cout, game, opt_print_hoa_args); + spot::print_dot(std::cout, game, opt_dot_arg); else if (opt_print_pg) spot::print_pg(std::cout, game); else diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 9500550ea..7b87f03aa 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -1101,7 +1101,11 @@ diff outx exp # Test --dot and --hide-status ltlsynt -f 'i <-> Fo' --ins=i --aiger --dot | grep arrowhead=dot -ltlsynt -f 'i <-> Fo' --ins=i --print-game-hoa --dot | grep 'shape="diamond"' +ltlsynt -f 'i <-> Fo' --ins=i --print-game-hoa --dot > out +grep 'shape="diamond"' out +grep 'Inf(0)' out +ltlsynt -f 'i <-> Fo' --ins=i --print-game-hoa --dot=bar > out +grep 'label= Fo' --ins=i --dot --hide-status > res cat >exp < Date: Mon, 2 Sep 2024 17:28:28 +0200 Subject: [PATCH 485/606] fix spurious g++-14 warning * spot/twaalgos/mealy_machine.cc (mm_sat_prob_t::get_sol): Here. --- spot/twaalgos/mealy_machine.cc | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index e63193cdc..5021f2a94 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -2604,9 +2604,14 @@ namespace return {}; case PICOSAT_SATISFIABLE: { - std::vector - res(1 + (unsigned) picosat_variables(lm.psat_), -1); - SPOT_ASSUME(res.data()); // g++ 11 believes data might be nullptr + unsigned nvar = 1 + (unsigned) picosat_variables(lm.psat_); + // Asssuming res.data() non-null was enough to prevent g++ + // 11 from issuing a spurious "potential null pointer + // dereference" on the res[0] assignment. Since g++14 we + // also need to assume nvar>0. + SPOT_ASSUME(nvar > 0); + std::vector res(nvar, -1); + SPOT_ASSUME(res.data()); res[0] = 0; // Convention for (int lit : lm.all_lits) res[lit] = picosat_deref(lm.psat_, lit); From e6ebbdf65fae199692ba6c9ab90dfab4da7b1ec3 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 3 Sep 2024 14:20:17 +0200 Subject: [PATCH 486/606] ltlfilt, ltlsynt, ltlmix: add a --part-file option * bin/common_ioap.cc, bin/common_ioap.hh (read_part_file): New function. * bin/ltlfilt.cc, bin/ltlmix.cc, bin/ltlsynt.cc: Use it. * doc/org/ltlfilt.org, doc/org/ltlmix.org, doc/org/ltlsynt.org: Mention that new option, and improve the links to its description in ltlsynt.org. * NEWS: Mention the new option. * tests/core/ltlfilt.test, tests/core/ltlmix.test, tests/core/ltlsynt.test: Adjust test cases. --- NEWS | 9 +- bin/common_ioap.cc | 70 ++++++++++++++- bin/common_ioap.hh | 3 + bin/ltlfilt.cc | 23 ++--- bin/ltlmix.cc | 31 ++++--- bin/ltlsynt.cc | 22 ++--- doc/org/ltlfilt.org | 18 ++-- doc/org/ltlmix.org | 6 +- doc/org/ltlsynt.org | 193 ++++++++++++++++++++++------------------ tests/core/ltlfilt.test | 12 +++ tests/core/ltlmix.test | 16 ++++ tests/core/ltlsynt.test | 6 +- 12 files changed, 271 insertions(+), 138 deletions(-) diff --git a/NEWS b/NEWS index f53ef6ece..b255d1319 100644 --- a/NEWS +++ b/NEWS @@ -2,9 +2,13 @@ New in spot 2.12.0.dev (not yet released) Command-line tools: - - ltlmix is a new tool that generate formulas by combining existing + - ltlmix is a new tool that generates formulas by combining existing ones. See https://spot.lre.epita.fr/ltlmix.html for examples. + - ltlsynt learned a --part-file option, to specify the partition of + input/output proposition from a *.part file, as used in several + other tools. + - ltlfilt learned a --relabel=io mode, that is useful to shorten atomic propositions in the context of LTL synthesis. For instance @@ -13,7 +17,8 @@ New in spot 2.12.0.dev (not yet released) The resulting formulas are now usable by ltlsynt without having to specify which atomic propositions are input or output, as this can - be inferred from their name. + be inferred from their name. (This suspports a --part-file option + as well.) - genltl learned --lily-patterns to generate the example LTL synthesis specifications from Lily 1.0.2. Those come with input diff --git a/bin/common_ioap.cc b/bin/common_ioap.cc index 65e05c7ca..6bb246a63 100644 --- a/bin/common_ioap.cc +++ b/bin/common_ioap.cc @@ -18,6 +18,7 @@ #include "common_ioap.hh" #include "error.h" +#include #include // --ins and --outs, as supplied on the command-line @@ -31,6 +32,8 @@ std::vector regex_out; // map identifier to input/output (false=input, true=output) std::unordered_map identifier_map; +static bool a_part_file_was_read = false; + static std::string str_tolower(std::string s) { @@ -71,7 +74,10 @@ void process_io_options() regex_out.push_back(std::regex(f.substr(1, sz - 2))); else if (auto [it, is_new] = identifier_map.try_emplace(f, true); !is_new && !it->second) - error(2, 0, "'%s' appears in both --ins and --outs", + error(2, 0, + a_part_file_was_read ? + "'%s' appears in both inputs and outputs" : + "'%s' appears in both --ins and --outs", f.c_str()); } } @@ -125,19 +131,23 @@ is_output(const std::string& a, const char* filename, int linenum) } if (found_in && found_out) error_at_line(2, 0, filename, linenum, + a_part_file_was_read ? + "'%s' matches both inputs and outputs" : "'%s' matches both --ins and --outs", a.c_str()); if (!found_in && !found_out) { if (all_input_aps.has_value() || all_output_aps.has_value()) error_at_line(2, 0, filename, linenum, + a_part_file_was_read ? + "'%s' does not match any input or output" : "one of --ins or --outs should match '%s'", a.c_str()); else error_at_line(2, 0, filename, linenum, "since '%s' does not start with 'i' or 'o', " "it is unclear if it is an input or " - "an output;\n use --ins or --outs", + "an output;\n use --ins, --outs, or --part-file", a.c_str()); } } @@ -200,3 +210,59 @@ spot::formula relabel_io(spot::formula f, spot::relabeling_map& fro, } return spot::relabel_apply(f, &to); } + +// Read FILENAME as a ".part" file. It should +// contains lines of text of the following form: +// +// .inputs IN1 IN2 IN3... +// .outputs OUT1 OUT2 OUT3... +void read_part_file(const char* filename) +{ + std::ifstream in(filename); + if (!in) + error(2, errno, "cannot open '%s'", filename); + + // This parsing is inspired from Lily's parser for .part files. We + // read words one by one, and change the "mode" if we the word is + // ".inputs" or ".outputs". A '#' introduce a comment until the end + // of the line. + std::string word; + enum { Unknown, Input, Output } mode = Unknown; + while (in >> word) + { + // The benchmarks for Syft use ".inputs:" instead of ".inputs". + if (word == ".inputs" || word == ".inputs:") + { + mode = Input; + if (!all_input_aps.has_value()) + all_input_aps.emplace(); + } + // The benchmarks for Syft use ".outputs:" instead of ".outputs". + else if (word == ".outputs" || word == ".outputs:") + { + mode = Output; + if (!all_output_aps.has_value()) + all_output_aps.emplace(); + } + else if (word[0] == '#') + { + // Skip the rest of the line. + in.ignore(std::numeric_limits::max(), '\n'); + } + else if (mode == Unknown) + { + error_at_line(2, 0, filename, 0, + "expected '.inputs' or '.outputs' instead of '%s'", + word.c_str()); + } + else if (mode == Input) + { + all_input_aps->push_back(str_tolower(word)); + } + else /* mode == Output */ + { + all_output_aps->push_back(str_tolower(word)); + } + } + a_part_file_was_read = true; +} diff --git a/bin/common_ioap.hh b/bin/common_ioap.hh index 575e749bf..f5ff41f01 100644 --- a/bin/common_ioap.hh +++ b/bin/common_ioap.hh @@ -64,3 +64,6 @@ filter_list_of_aps(spot::formula f, const char* filename, int linenum); // Relabel APs incrementally, based on i/o class. spot::formula relabel_io(spot::formula f, spot::relabeling_map& fro, const char* filename, int linenum); + +// Read a .part file. +void read_part_file(const char* filename); diff --git a/bin/ltlfilt.cc b/bin/ltlfilt.cc index 8509b4c9f..4c17f2214 100644 --- a/bin/ltlfilt.cc +++ b/bin/ltlfilt.cc @@ -120,6 +120,7 @@ enum { OPT_SYNTACTIC_SI, OPT_TO_DELTA2, OPT_OUTS, + OPT_PART_FILE, OPT_UNABBREVIATE, OPT_UNIVERSAL, }; @@ -187,6 +188,9 @@ static const argp_option options[] = { "outs", OPT_OUTS, "PROPS", 0, "comma-separated list of output atomic propositions to use with " "--relabel=io, interpreted as a regex if enclosed in slashes", 0 }, + { "part-file", OPT_PART_FILE, "FILENAME", 0, + "file containing the partition of atomic propositions to use with " + "--relabel=io", 0 }, DECLARE_OPT_R, LEVEL_DOC(4), /**************************************************/ @@ -516,11 +520,9 @@ parse_opt(int key, char* arg, struct argp_state*) break; } case OPT_INS: - { - all_input_aps.emplace(std::vector{}); - split_aps(arg, *all_input_aps); - break; - } + all_input_aps.emplace(); + split_aps(arg, *all_input_aps); + break; case OPT_LIVENESS: liveness = true; break; @@ -537,11 +539,12 @@ parse_opt(int key, char* arg, struct argp_state*) nnf = true; break; case OPT_OUTS: - { - all_output_aps.emplace(std::vector{}); - split_aps(arg, *all_output_aps); - break; - } + all_output_aps.emplace(); + split_aps(arg, *all_output_aps); + break; + case OPT_PART_FILE: + read_part_file(arg); + break; case OPT_SONF: sonf = arg ? arg : "sonf_"; break; diff --git a/bin/ltlmix.cc b/bin/ltlmix.cc index a97c807d0..0cbc4ac48 100644 --- a/bin/ltlmix.cc +++ b/bin/ltlmix.cc @@ -40,6 +40,7 @@ enum { OPT_INS, OPT_LTL_PRIORITIES, OPT_OUTS, + OPT_PART_FILE, OPT_SEED, OPT_TREE_SIZE, }; @@ -102,8 +103,10 @@ static const argp_option options[] = { "comma-separated list of atomic propositions to consider as input, " "interpreted as a regex if enclosed in slashes", 0 }, { "outs", OPT_OUTS, "PROPS", 0, - "comma-separated list of atomic propositions to consider as putput, " + "comma-separated list of atomic propositions to consider as output, " "interpreted as a regex if enclosed in slashes", 0 }, + { "part-file", OPT_PART_FILE, "FILENAME", 0, + "read the I/O partition of atomic propositions from FILENAME", 0 }, RANGE_DOC, /**************************************************/ { nullptr, 0, nullptr, 0, "Adjusting probabilities:", 4 }, @@ -253,19 +256,19 @@ parse_opt(int key, char* arg, struct argp_state*) opt_unique = false; break; case OPT_INS: - { - all_input_aps.emplace(std::vector{}); - split_aps(arg, *all_input_aps); - opt_io = true; - break; - } + all_input_aps.emplace(); + split_aps(arg, *all_input_aps); + opt_io = true; + break; case OPT_OUTS: - { - all_output_aps.emplace(std::vector{}); - split_aps(arg, *all_output_aps); - opt_io = true; - break; - } + all_output_aps.emplace(); + split_aps(arg, *all_output_aps); + opt_io = true; + break; + case OPT_PART_FILE: + read_part_file(arg); + opt_io = true; + break; case OPT_SEED: opt_seed = to_int(arg, "--seed"); break; @@ -303,7 +306,7 @@ main(int argc, char* argv[]) if (opt_io && !opt_out_ap_count) error(2, 0, - "options --ins and --outs only make sense when the " + "options --ins, --outs, --part-file only make sense when the " "two-argument version of '-A N,M' or '-P N,M' is used."); if (opt_out_ap_count > 0) // Do not require --ins/--outs to be used, as the input diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index bc4bd1561..f465e2bf7 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -57,6 +57,7 @@ enum OPT_HIDE, OPT_INPUT, OPT_OUTPUT, + OPT_PART_FILE, OPT_POLARITY, OPT_PRINT, OPT_PRINT_AIGER, @@ -79,6 +80,8 @@ static const argp_option options[] = { "ins", OPT_INPUT, "PROPS", 0, "comma-separated list of uncontrollable (a.k.a. input) atomic" " propositions, interpreted as a regex if enclosed in slashes", 0 }, + { "part-file", OPT_PART_FILE, "FILENAME", 0, + "read the I/O partition of atomic propositions from FILENAME", 0 }, { "tlsf", OPT_TLSF, "FILENAME", 0, "Read a TLSF specification from FILENAME, and call syfco to " "convert it into LTL", 0 }, @@ -993,17 +996,16 @@ parse_opt(int key, char *arg, struct argp_state *) show_status = false; break; case OPT_INPUT: - { - all_input_aps.emplace(std::vector{}); - split_aps(arg, *all_input_aps); - break; - } + all_input_aps.emplace(); + split_aps(arg, *all_input_aps); + break; case OPT_OUTPUT: - { - all_output_aps.emplace(std::vector{}); - split_aps(arg, *all_output_aps); - break; - } + all_output_aps.emplace(); + split_aps(arg, *all_output_aps); + break; + case OPT_PART_FILE: + read_part_file(arg); + break; case OPT_POLARITY: opt_polarity = XARGMATCH("--polarity", arg, polarity_args, polarity_values); diff --git a/doc/org/ltlfilt.org b/doc/org/ltlfilt.org index a5ae9f3f0..d28d265eb 100644 --- a/doc/org/ltlfilt.org +++ b/doc/org/ltlfilt.org @@ -314,7 +314,7 @@ to be told using options such as =--ins= or =--outs= which atomic propositions are input or output. Often these atomic propositions can have very long names, so it is useful to be able to rename them without fogeting about their nature. Option =--relabel=io= -combined with one if =--ins= or =--outs= will do exactly that: +combined with one if =--ins=, =--outs=, or =--part-file= will do exactly that: #+BEGIN_SRC sh ltlfilt -f 'G(req -> Fack) & G(go -> Fgrant)' --relabel=io --ins=req,go @@ -322,16 +322,14 @@ ltlfilt -f 'G(req -> Fack) & G(go -> Fgrant)' --relabel=io --ins=req,go #+RESULTS: : G(i1 -> Fo1) & G(i0 -> Fo0) -Like in [[file:ltlsynt.org][=ltlsynt=]], options =--ins= and =--outs= take a comma-separated -list of atomic propositions as argument. Additionally, if an atomic -proposition in this list is enclosed in slashes (as in -=--out=req,/^go/=), it is used as a regular expression for matching -atomic propositions. +The syntax for options =--ins=, =--outs= and =--part-file= is the +same as for [[file:ltlsynt.org::#input-options][=ltlsynt=]]. -By the way, such an IO-renamed formula can be given to [[file:ltlsynt.org][=ltlsynt=]] without -having to specify =--ins= or =--outs=, because when these two options -are missing the convention is that anything starting with =i= is an -input, and anything starting with =o= is an output. +By the way, such an IO-renamed formula can be given to [[file:ltlsynt.org][=ltlsynt=]] +without having to specify =--ins=, =--outs=, or =--part-file=, because +when these two options are missing the convention is that anything +starting with =i= is an input, and anything starting with =o= is an +output. An example showing how to use the =--from-ltlf= option is on [[file:tut12.org][a separate page]]. diff --git a/doc/org/ltlmix.org b/doc/org/ltlmix.org index b8555592e..c62a649e5 100644 --- a/doc/org/ltlmix.org +++ b/doc/org/ltlmix.org @@ -489,6 +489,6 @@ chose from may help to get more realizable formulas. When the original LTL synthesis specification formulas have atomic -proposition that do not start with =i= or =o=, options =--ins= and -=--outs= can be used to specify the nature of the atomic propositions. -These options work as with [[file:ltlsynt.org][=ltlsynt=]]. +proposition that do not start with =i= or =o=, options =--ins=, +=--outs=, or =--part-file= can be used to specify the nature of the +atomic propositions. These options work as [[file:ltlsynt.org::#input-options][=ltlsynt='s input options]]. diff --git a/doc/org/ltlsynt.org b/doc/org/ltlsynt.org index be1c8d085..93c92e29b 100644 --- a/doc/org/ltlsynt.org +++ b/doc/org/ltlsynt.org @@ -9,7 +9,7 @@ This tool synthesizes reactive controllers from LTL/PSL formulas. -Consider a set $I$ of /input/ atomic propositions, a set $O$ of output atomic +Consider a set $I$ of /input/ atomic propositions, a set $O$ of /output/ atomic propositions, and a PSL formula \phi over the propositions in $I \cup O$. A *reactive controller* realizing \phi is a function $c: (2^{I})^\star \times 2^I \mapsto 2^O$ such that, for every \omega-word $(u_i)_{i \in N} \in (2^I)^\omega$ over @@ -21,46 +21,34 @@ exists. Such controllers are easily represented as automata (or more specifically as Mealy machines). In the automaton representing the controller, the acceptance condition is irrelevant and trivially true. -=ltlsynt= has three mandatory options: -- =--ins=: a comma-separated list of input atomic propositions, or input regexes enclosed in slashes; -- =--outs=: a comma-separated list of output atomic propositions, or output regexes enclosed in slashes; -- =--formula= or =--file=: a specification in LTL or PSL. - -One of =--ins= or =--outs= may be omitted, as any atomic proposition -not listed as input can be assumed to be output and vice versa. If -both are omitted, =ltlsynts= will assume ~--ins=/^[iI]/~ and -~--outs=/^[oO]/~, i.e., atomic propositions will be classified as -input or output based on their first letter. - -The following example illustrates the synthesis of a controller -ensuring that input =i1= and =i2= are both true initially if and only -if eventually output =o1= will go from true to false at some point. -Note that this is an equivalence, not an implication. +Here is a small example where $I=\{i_1,i_2\}$ and $O=\{o_1\}$. The +specification asks that $o_1$ hold at some point if and only if $i_1$ +and $i_2$ hold one after the other at some point. #+NAME: example #+BEGIN_SRC sh :exports both -ltlsynt -f '(i1 & i2) <-> F(o1 & X(!o1))' +ltlsynt -f 'F(i1 & Xi2) <-> F(o1)' #+END_SRC #+RESULTS: example #+begin_example REALIZABLE HOA: v1 -States: 3 +States: 2 Start: 0 -AP: 3 "i1" "i2" "o1" +AP: 3 "i1" "o1" "i2" acc-name: all Acceptance: 0 t properties: trans-labels explicit-labels state-acc deterministic -controllable-AP: 2 +controllable-AP: 1 --BODY-- State: 0 -[0&1&2] 1 -[!0&2 | !1&2] 2 +[!0&!1] 0 +[0&!1] 1 State: 1 -[!2] 1 -State: 2 -[2] 2 +[!0&!1&!2] 0 +[0&!1&!2] 1 +[1&2] 1 --END-- #+end_example @@ -78,7 +66,7 @@ to visualize this machine. #+NAME: exampledot #+BEGIN_SRC sh :exports code -ltlsynt -f '(i1 & i2) <-> F(o1 & X(!o1))' --hide-status --dot +ltlsynt -f 'F(i1 & Xi2) <-> F(o1)' --hide-status --dot #+END_SRC #+BEGIN_SRC dot :file ltlsyntex.svg :var txt=exampledot :exports results @@ -99,75 +87,49 @@ ltlsynt --ins=a -f 'F a' #+RESULTS: : UNREALIZABLE -By default, the controller is output in HOA format, but it can be -output as an And-Inverter-Graph in [[http://fmv.jku.at/aiger/][AIGER format]] using the =--aiger= -flag. This is the output format required for the [[http://syntcomp.org/][SYNTCOMP]] competition. +* Input options + :PROPERTIES: + :CUSTOM_ID: input-options + :END: -#+NAME: exampleaig -#+BEGIN_SRC sh :exports both -ltlsynt -f '(i1 & i2) <-> F(o1 & X(!o1))' --aiger -#+END_SRC +=ltlsynt= require two pieces of information two solve a reactive +LTL/PSL synthesis problem: an LTL (or PSL) formula, and a partition of +its atomic propositions as input and output. -#+RESULTS: exampleaig -#+begin_example -REALIZABLE -aag 18 2 2 1 14 -2 -4 -6 23 -8 37 -7 -10 6 9 -12 4 9 -14 5 10 -16 13 15 -18 2 17 -20 3 10 -22 19 21 -24 7 8 -26 4 24 -28 5 7 -30 27 29 -32 2 31 -34 3 7 -36 33 35 -i0 i1 -i1 i2 -o0 o1 -#+end_example +The specification formula can be passed with =-f/--formula= or +=-F/--file=. If multiple specifications formulas are passed, they +will all be solved individually. -The above format is not very human friendly. Again, by passing both -=--aiger= and =--dot=, one can display the And-Inverter-Graph representing -the controller: +The input/output partition can be given in several ways. If it is +not specified, =ltlsynt= assumes that input variables should start +with =i=, and output variables should start with =o=. -#+NAME: exampleaigdot -#+BEGIN_SRC sh :exports code -ltlsynt -f '(i1 & i2) <-> F(o1 & X(!o1))' --hide-status --aiger --dot -#+END_SRC +Options =--ins= and =--outs= should be followed by a comma-separated +list of input atomic propositions, or input regexes enclosed in +slashes. E.g., =--ins=switch,/^in/,car=. If only one of these +options is given, atomic propositions not matched by that option +are assumed to belong to the other set. -#+BEGIN_SRC dot :file ltlsyntexaig.svg :var txt=exampleaigdot :exports results - $txt -#+END_SRC +Another way to specify the input/output partition is using a =*.part= +file passed to the =--part-file= option. Such a file is used by +several other synthesis tools. The format is space-separated list of +words representing atomic-propositions. Two keywords =.inputs= and +=.outputs= indicate the set of the atomic-propositions that follow. +For instance: -#+RESULTS: -[[file:ltlsyntexaig.svg]] +#+BEGIN_EXAMPLE +.inputs request cancel +.outputs grant ack +#+END_EXAMPLE -In the above diagram, round nodes represent AND gates. Small black -circles represent inversions (or negations), colored triangles are -used to represent input signals (at the bottom) and output signals (at -the top), and finally rectangles represent latches. A latch is a one -bit register that delays the signal by one step. Initially, all -latches are assumed to contain =false=, and they emit their value from -the =L0_out= and =L1_out= rectangles at the bottom. Their input value, -to be emitted at the next step, is received via the =L0_in= and =L1_in= -boxes at the top. In =ltlsynt='s encoding, the set of latches is used -to keep track of the current state of the Mealy machine. +Using =--part-file=THEABOVEFILE= is equivalent to +=--ins=request,cancel --outs=grant,ack=. -The generation of a controller can be disabled with the flag -=--realizability=. In this case, =ltlsynt='s output is limited to -=REALIZABLE= or =UNREALIZABLE=. +As an extension to this simple =*.part= format, words enclosed in +slashes are interpreted as regexes, like for the =--ins= and =--outs= +options. -* TLSF +* TLSF input =ltlsynt= was made with the [[http://syntcomp.org/][SYNTCOMP]] competition in mind, and more specifically the TLSF track of this competition. TLSF is a high-level @@ -193,6 +155,65 @@ ltlsynt --formula="$LTL" --outs="$OUT" #+END_SRC +* Output options + +By default, the controller is output in HOA format, but it can be +output as an And-Inverter-Graph in [[http://fmv.jku.at/aiger/][AIGER format]] using the =--aiger= +flag. This is the output format required for the [[http://syntcomp.org/][SYNTCOMP]] competition. + +#+NAME: exampleaig +#+BEGIN_SRC sh :exports both +ltlsynt -f 'F(i1 & Xi2) <-> F(o1)' --aiger +#+END_SRC + +#+RESULTS: exampleaig +#+begin_example +REALIZABLE +aag 5 2 1 1 2 +2 +4 +6 11 +8 +8 4 6 +10 3 9 +i0 i1 +i1 i2 +o0 o1 +#+end_example + +The above format is not very human friendly. Again, by passing both +=--aiger= and =--dot=, one can display the And-Inverter-Graph representing +the controller: + +#+NAME: exampleaigdot +#+BEGIN_SRC sh :exports code +ltlsynt -f 'F(i1 & Xi2) <-> F(o1)' --hide-status --aiger --dot +#+END_SRC + +#+RESULTS: exampleaigdot + +#+BEGIN_SRC dot :file ltlsyntexaig.svg :var txt=exampleaigdot :exports results + $txt +#+END_SRC + +#+RESULTS: +[[file:ltlsyntexaig.svg]] + +In the above diagram, round nodes represent AND gates. Small black +circles represent inversions (or negations), colored triangles are +used to represent input signals (at the bottom) and output signals (at +the top), and finally rectangles represent latches. A latch is a one +bit register that delays the signal by one step. Initially, all +latches are assumed to contain =false=, and they emit their value from +the =*_out= rectangles at the bottom. Their input value, to be +emitted at the next step, is received via the =*_in= boxes at the top. +In =ltlsynt='s encoding, the set of latches is used to keep track of +the current state of the Mealy machine. + +The generation of a controller can be disabled with the flag +=--realizability=. In this case, =ltlsynt='s output is limited to +=REALIZABLE= or =UNREALIZABLE=. + * Internal details The tool reduces the synthesis problem to a parity game, and solves the parity diff --git a/tests/core/ltlfilt.test b/tests/core/ltlfilt.test index 2352c4707..2019151ca 100755 --- a/tests/core/ltlfilt.test +++ b/tests/core/ltlfilt.test @@ -626,6 +626,18 @@ run 0 ltlfilt -s -u --relabel=io --ins='/[ab]/' --define in >out diff exp out run 0 ltlfilt -s -u --relabel=io --outs='/[^ab]/' --define in >out diff exp out +echo '.inputs a b' >rel.part +run 0 ltlfilt -s -u --relabel=io --part=rel.part --define in >out +diff exp out +echo '.inputs /[ab]/ # .output ignored' >rel.part +run 0 ltlfilt -s -u --relabel=io --part=rel.part --define in >out +diff exp out +echo '.outputs /[^ab]/ # .input ignored' >rel.part +run 0 ltlfilt -s -u --relabel=io --part=rel.part --define in >out +diff exp out +echo 'error /[^ab]/' >rel.part +run 2 ltlfilt -s -u --relabel=io --part=rel.part --define in 2>err +grep "expected '.inputs' or '.outputs'" err cat >exp < err && exit 1 cat err grep 'ins.*outs' err + +echo '.inputs a c .outputs b d ' > part.part +ltlmix -fXa -fGb -f'c U d' --part-file=part.part -C4 -A3,3 -n10 >out +cat >expected <stderr && : test $? -eq 2 -grep "[-]-ins or --outs" stderr +grep "[-]-ins.*--outs" stderr # Try to find a direct strategy for GFa <-> GFb and a direct strategy for # Gc @@ -1139,11 +1139,15 @@ ltlsynt --decomp=yes -f "$s" --realizability >out ltlsynt --decomp=no --outs='/^o[0-9]*$/' -f "$s" --realizability >>out ltlsynt --decomp=no --outs='/^o[0-9]$/' -f "$s" --realizability >>out && : ltlsynt -f "$s" --ins='/^i[0-9]*$/' --realizability >>out +echo ".inputs i1 i2 i3 i4 i5 i6 i7 i8" > part.part +echo ".outputs /^o1[0-9]*/ o01 o02 o03 o04 o05 o06 o07 o08 o09" >> part.part +ltlsynt -f "$s" --part-file=part.part --realizability >>out cat >expected < Date: Tue, 3 Sep 2024 17:37:59 +0200 Subject: [PATCH 487/606] game: avoid a spurious g++14 warning * spot/twaalgos/game.cc, spot/twaalgos/game.hh (get_state_winners): Declare a non-const version as well to avoid a "possibly dangling reference" error in code show by tut40.org. --- spot/twaalgos/game.cc | 33 +++++++++++++++++++++++---------- spot/twaalgos/game.hh | 26 ++++++++++++++++---------- 2 files changed, 39 insertions(+), 20 deletions(-) diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index e2c550531..9f739d423 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -1085,12 +1085,12 @@ namespace spot return aut; } - void set_state_players(twa_graph_ptr arena, const region_t& owners) + void set_state_players(twa_graph_ptr& arena, const region_t& owners) { set_state_players(arena, region_t(owners)); } - void set_state_players(twa_graph_ptr arena, region_t&& owners) + void set_state_players(twa_graph_ptr& arena, region_t&& owners) { if (owners.size() != arena->num_states()) throw std::runtime_error @@ -1100,7 +1100,7 @@ namespace spot new region_t(std::move(owners))); } - void set_state_player(twa_graph_ptr arena, unsigned state, bool owner) + void set_state_player(twa_graph_ptr& arena, unsigned state, bool owner) { if (state >= arena->num_states()) throw std::runtime_error("set_state_player(): invalid state number"); @@ -1141,7 +1141,7 @@ namespace spot return *owners; } - bool get_state_player(const_twa_graph_ptr arena, unsigned state) + bool get_state_player(const const_twa_graph_ptr& arena, unsigned state) { if (state >= arena->num_states()) throw std::runtime_error("get_state_player(): invalid state number"); @@ -1165,11 +1165,11 @@ namespace spot return *strat_ptr; } - void set_strategy(twa_graph_ptr arena, const strategy_t& strat) + void set_strategy(twa_graph_ptr& arena, const strategy_t& strat) { set_strategy(arena, strategy_t(strat)); } - void set_strategy(twa_graph_ptr arena, strategy_t&& strat) + void set_strategy(twa_graph_ptr& arena, strategy_t&& strat) { if (arena->num_states() != strat.size()) throw std::runtime_error("set_strategy(): strategies need to have " @@ -1214,12 +1214,12 @@ namespace spot } - void set_state_winners(twa_graph_ptr arena, const region_t& winners) + void set_state_winners(twa_graph_ptr& arena, const region_t& winners) { set_state_winners(arena, region_t(winners)); } - void set_state_winners(twa_graph_ptr arena, region_t&& winners) + void set_state_winners(twa_graph_ptr& arena, region_t&& winners) { if (winners.size() != arena->num_states()) throw std::runtime_error @@ -1229,7 +1229,7 @@ namespace spot new region_t(std::move(winners))); } - void set_state_winner(twa_graph_ptr arena, unsigned state, bool winner) + void set_state_winner(twa_graph_ptr& arena, unsigned state, bool winner) { if (state >= arena->num_states()) throw std::runtime_error("set_state_winner(): invalid state number"); @@ -1258,7 +1258,20 @@ namespace spot return *winners; } - bool get_state_winner(const_twa_graph_ptr arena, unsigned state) + // This second version should not be needed, but g++14 emits + // "possibly dangling reference" warnings when it sees that the + // first function is called with a temporary const_twa_graph_ptr to + // return a reference. + const region_t& get_state_winners(twa_graph_ptr& arena) + { + region_t *winners = arena->get_named_prop("state-winner"); + if (!winners) + throw std::runtime_error + ("get_state_winners(): state-winner property not defined, not a game?"); + return *winners; + } + + bool get_state_winner(const const_twa_graph_ptr& arena, unsigned state) { if (state >= arena->num_states()) throw std::runtime_error("get_state_winner(): invalid state number"); diff --git a/spot/twaalgos/game.hh b/spot/twaalgos/game.hh index c376304be..737a50d78 100644 --- a/spot/twaalgos/game.hh +++ b/spot/twaalgos/game.hh @@ -151,20 +151,20 @@ namespace spot /// \brief Set the owner for all the states. /// @{ SPOT_API - void set_state_players(twa_graph_ptr arena, const region_t& owners); + void set_state_players(twa_graph_ptr& arena, const region_t& owners); SPOT_API - void set_state_players(twa_graph_ptr arena, region_t&& owners); + void set_state_players(twa_graph_ptr& arena, region_t&& owners); /// @} /// \ingroup games /// \brief Set the owner of a state. SPOT_API - void set_state_player(twa_graph_ptr arena, unsigned state, bool owner); + void set_state_player(twa_graph_ptr& arena, unsigned state, bool owner); /// \ingroup games /// \brief Get the owner of a state. SPOT_API - bool get_state_player(const_twa_graph_ptr arena, unsigned state); + bool get_state_player(const const_twa_graph_ptr& arena, unsigned state); /// \ingroup games /// \brief Get the owner of all states @@ -181,9 +181,9 @@ namespace spot SPOT_API const strategy_t& get_strategy(const const_twa_graph_ptr& arena); SPOT_API - void set_strategy(twa_graph_ptr arena, const strategy_t& strat); + void set_strategy(twa_graph_ptr& arena, const strategy_t& strat); SPOT_API - void set_strategy(twa_graph_ptr arena, strategy_t&& strat); + void set_strategy(twa_graph_ptr& arena, strategy_t&& strat); /// @} /// \ingroup games @@ -205,23 +205,29 @@ namespace spot /// \brief Set the winner for all the states. /// @{ SPOT_API - void set_state_winners(twa_graph_ptr arena, const region_t& winners); + void set_state_winners(twa_graph_ptr& arena, const region_t& winners); SPOT_API - void set_state_winners(twa_graph_ptr arena, region_t&& winners); + void set_state_winners(twa_graph_ptr& arena, region_t&& winners); /// @} /// \ingroup games /// \brief Set the winner of a state. SPOT_API - void set_state_winner(twa_graph_ptr arena, unsigned state, bool winner); + void set_state_winner(twa_graph_ptr& arena, unsigned state, bool winner); /// \ingroup games /// \brief Get the winner of a state. SPOT_API - bool get_state_winner(const_twa_graph_ptr arena, unsigned state); + bool get_state_winner(const const_twa_graph_ptr& arena, unsigned state); /// \ingroup games /// \brief Get the winner of all states + /// @{ SPOT_API const region_t& get_state_winners(const const_twa_graph_ptr& arena); +#ifndef SWIG + SPOT_API + const region_t& get_state_winners(twa_graph_ptr& arena); +#endif + /// @} } From 6e5592fe6a16ddd2df23d8555fc73ed9220311fe Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 4 Sep 2024 16:17:46 +0200 Subject: [PATCH 488/606] ltlsynt: save source filename with --csv * bin/ltlsynt.cc (print_csv): Make filename mandatory, and add a "was_game" argument. (process_formula, process_aut_file): Adjust calls. * tests/core/ltlsynt2.test: Adjust test cases. --- NEWS | 3 +++ bin/ltlsynt.cc | 45 +++++++++++++++++++++++++++++----------- tests/core/ltlsynt2.test | 38 +++++++++++++++++++++++---------- 3 files changed, 63 insertions(+), 23 deletions(-) diff --git a/NEWS b/NEWS index b255d1319..35cd420d4 100644 --- a/NEWS +++ b/NEWS @@ -5,6 +5,9 @@ New in spot 2.12.0.dev (not yet released) - ltlmix is a new tool that generates formulas by combining existing ones. See https://spot.lre.epita.fr/ltlmix.html for examples. + - ltlsynt --csv=FILENAME will now save the source filename for + the processed formulas as the first column of the CSV file. + - ltlsynt learned a --part-file option, to specify the partition of input/output proposition from a *.part file, as used in several other tools. diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index f465e2bf7..12f7d1c91 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -334,12 +334,13 @@ namespace spot::print_hoa(std::cout, game, opt_print_hoa_args) << '\n'; } + // If filename is passed, it is printed instead of the formula. We // use that when processing games since we have no formula to print. // It would be cleaner to have two columns: one for location (that's // filename + line number if known), and one for formula (if known). static void - print_csv(const spot::formula& f, const char* filename = nullptr) + print_csv(const spot::formula& f, const char* filename, bool was_game) { auto& vs = gi->verbose_stream; auto& bv = gi->bv; @@ -357,7 +358,7 @@ namespace // (Even if that file was empty initially.) if (!outf.append()) { - out << ("\"formula\",\"algo\",\"tot_time\",\"trans_time\"," + out << ("\"source\",\"formula\",\"algo\",\"tot_time\",\"trans_time\"," "\"split_time\",\"todpa_time\""); if (!opt_print_pg && !opt_print_hoa) { @@ -374,14 +375,23 @@ namespace out << ",\"nb latches\",\"nb gates\""; out << '\n'; } - std::ostringstream os; - if (filename) - os << filename; - else - os << f; - spot::escape_rfc4180(out << '"', os.str()) << "\","; - // if a filename was given, assume the game has been read directly - if (!filename) + { + std::ostringstream os; + if (filename) + { + os << filename; + spot::escape_rfc4180(out << '"', os.str()) << '"'; + os.str(""); + os.clear(); + } + out << ','; + if (was_game) + os << filename; + else + os << f; + spot::escape_rfc4180(out << '"', os.str()) << "\","; + } + if (!was_game) out << '"' << algo_names[(int) gi->s] << '"'; out << ',' << bv->total_time << ',' << bv->trans_time @@ -752,7 +762,18 @@ namespace filter_list_of_aps(f, filename, linenum); int res = solve_formula(f, input_aps, output_aps); if (opt_csv) - print_csv(f); + { + if (filename == 0 || linenum <= 0) + { + print_csv(f, filename, false); + } + else + { + std::ostringstream os; + os << filename << ':' << linenum; + print_csv(f, os.str().c_str(), false); + } + } return res; } @@ -951,7 +972,7 @@ namespace std::string loc = os.str(); int res = process_pgame(haut->aut, loc); if (res < 2 && opt_csv) - print_csv(nullptr, loc.c_str()); + print_csv(nullptr, loc.c_str(), true); err = std::max(err, res); } } diff --git a/tests/core/ltlsynt2.test b/tests/core/ltlsynt2.test index 5e26b28d3..912a76b81 100755 --- a/tests/core/ltlsynt2.test +++ b/tests/core/ltlsynt2.test @@ -47,7 +47,8 @@ except ImportError: x = pandas.read_csv("out.csv") x.to_csv('filtered.csv', - columns=('formula', 'algo', 'realizable', 'strat_num_states'), + columns=('source', 'formula', 'algo', + 'realizable', 'strat_num_states'), index=False) EOF @@ -55,24 +56,39 @@ EOF $PYTHON test.py cat >expected < Xo1),lar,1,3 -F(i1 xor i2) <-> Fo1,lar,1,2 -i1 <-> F(o1 xor o2),lar,1,3 -Fi1 <-> Go2,lar,0,0 -o1 & F(i1 <-> o2),lar,1,2 +source,formula,algo,realizable,strat_num_states +formulas.ltl:1,G(i1 <-> Xo1),lar,1,3 +formulas.ltl:2,F(i1 xor i2) <-> Fo1,lar,1,2 +formulas.ltl:3,i1 <-> F(o1 xor o2),lar,1,3 +formulas.ltl:4,Fi1 <-> Go2,lar,0,0 +,o1 & F(i1 <-> o2),lar,1,2 EOF diff filtered.csv expected -# ltlfilt should be able to read the first columns +# ltlfilt should be able to read the second column mv filtered.csv input.csv -ltlsynt --ins=i1,i2 -F input.csv/-1 --csv=out.csv -q && exit 2 +ltlsynt --ins=i1,i2 -F input.csv/-2 --csv=out.csv -q && exit 2 test $? -eq 1 $PYTHON test.py +cat >expected < Xo1),lar,1,3 +input.csv:3,F(i1 xor i2) <-> Fo1,lar,1,2 +input.csv:4,i1 <-> F(o1 xor o2),lar,1,3 +input.csv:5,Fi1 <-> Go2,lar,0,0 +input.csv:6,o1 & F(i1 <-> o2),lar,1,2 +EOF diff filtered.csv expected grep -v 0,0 filtered.csv >input.csv -ltlsynt --ins=i1,i2 -F input.csv/-1 --csv=out.csv -q || exit 2 +ltlsynt -F input.csv/-2 --csv=out.csv -q || exit 2 $PYTHON test.py -diff filtered.csv input.csv +cat >expected < Xo1),lar,1,3 +input.csv:3,F(i1 xor i2) <-> Fo1,lar,1,2 +input.csv:4,i1 <-> F(o1 xor o2),lar,1,3 +input.csv:5,o1 & F(i1 <-> o2),lar,1,2 +EOF +diff filtered.csv expected From a22a05b8ec6cf12c57c191f1c40a7779306567bd Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 4 Sep 2024 16:33:48 +0200 Subject: [PATCH 489/606] ltlsynt: implement --csv-without-formula * bin/ltlsynt.cc: Implement that new option. * tests/core/ltlsynt2.test: Test it. * NEWS: Mention the change. --- NEWS | 7 +++++-- bin/ltlsynt.cc | 30 +++++++++++++++++++++++------- tests/core/ltlsynt2.test | 13 ++++++++++++- 3 files changed, 40 insertions(+), 10 deletions(-) diff --git a/NEWS b/NEWS index 35cd420d4..012ca6f32 100644 --- a/NEWS +++ b/NEWS @@ -5,8 +5,11 @@ New in spot 2.12.0.dev (not yet released) - ltlmix is a new tool that generates formulas by combining existing ones. See https://spot.lre.epita.fr/ltlmix.html for examples. - - ltlsynt --csv=FILENAME will now save the source filename for - the processed formulas as the first column of the CSV file. + - ltlsynt --csv=FILENAME will now save the source filename for the + processed formulas as the first column of the CSV file. A new + option --csv-without-formula=FILENAME can be used to save + everything but the formula column (as it can be very large, and + can be found from the source filename). - ltlsynt learned a --part-file option, to specify the partition of input/output proposition from a *.part file, as used in several diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 12f7d1c91..ee985b4be 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -50,6 +50,7 @@ enum OPT_ALGO = 256, OPT_BYPASS, OPT_CSV, + OPT_CSV_NO_FORMULA, OPT_DECOMPOSE, OPT_DOT, OPT_FROM_PGAME, @@ -153,6 +154,9 @@ static const argp_option options[] = "output statistics as CSV in FILENAME or on standard output " "(if '>>' is used to request append mode, the header line is " "not output)", 0 }, + { "csv-without-formula", OPT_CSV_NO_FORMULA, "[>>]FILENAME", + OPTION_ARG_OPTIONAL, "like --csv, but without 'fomula' column", 0 }, + { "csv-no-formula", 0, nullptr, OPTION_ALIAS, nullptr, 0 }, { "hide-status", OPT_HIDE, nullptr, 0, "Hide the REALIZABLE or UNREALIZABLE line. (Hint: exit status " "is enough of an indication.)", 0 }, @@ -182,6 +186,7 @@ Exit status:\n\ 2 if any error has been reported"; static const char* opt_csv = nullptr; +static bool opt_csv_with_formula = true; static bool opt_print_pg = false; static bool opt_print_hoa = false; static const char* opt_print_hoa_args = nullptr; @@ -358,7 +363,10 @@ namespace // (Even if that file was empty initially.) if (!outf.append()) { - out << ("\"source\",\"formula\",\"algo\",\"tot_time\",\"trans_time\"," + out << "\"source\","; + if (opt_csv_with_formula) + out << "\"formula\","; + out << ("\"algo\",\"tot_time\",\"trans_time\"," "\"split_time\",\"todpa_time\""); if (!opt_print_pg && !opt_print_hoa) { @@ -385,11 +393,14 @@ namespace os.clear(); } out << ','; - if (was_game) - os << filename; - else - os << f; - spot::escape_rfc4180(out << '"', os.str()) << "\","; + if (opt_csv_with_formula) + { + if (was_game) + os << filename; + else + os << f; + spot::escape_rfc4180(out << '"', os.str()) << "\","; + } } if (!was_game) out << '"' << algo_names[(int) gi->s] << '"'; @@ -763,7 +774,7 @@ namespace int res = solve_formula(f, input_aps, output_aps); if (opt_csv) { - if (filename == 0 || linenum <= 0) + if (!filename || linenum <= 0) { print_csv(f, filename, false); } @@ -996,6 +1007,11 @@ parse_opt(int key, char *arg, struct argp_state *) break; case OPT_CSV: opt_csv = arg ? arg : "-"; + opt_csv_with_formula = true; + break; + case OPT_CSV_NO_FORMULA: + opt_csv = arg ? arg : "-"; + opt_csv_with_formula = false; break; case OPT_DECOMPOSE: opt_decompose_ltl = XARGMATCH("--decompose", arg, diff --git a/tests/core/ltlsynt2.test b/tests/core/ltlsynt2.test index 912a76b81..108cb5f94 100755 --- a/tests/core/ltlsynt2.test +++ b/tests/core/ltlsynt2.test @@ -82,7 +82,7 @@ EOF diff filtered.csv expected grep -v 0,0 filtered.csv >input.csv -ltlsynt -F input.csv/-2 --csv=out.csv -q || exit 2 +ltlsynt -F input.csv/-2 --csv=out.csv -q $PYTHON test.py cat >expected < F(o1 xor o2),lar,1,3 input.csv:5,o1 & F(i1 <-> o2),lar,1,2 EOF diff filtered.csv expected + +ltlsynt -F input.csv/-2 --csv-without-formula=out.csv -q +cut out.csv -d, -f1,2 >filtered.csv +cat >expected < Date: Wed, 4 Sep 2024 22:47:50 +0200 Subject: [PATCH 490/606] ltlsynt: fix a few issues with --csv Some columns were superfluous, other had inconsistent names, and some times where not tracked. * spot/twaalgos/synthesis.cc: Improve tracking of times and verbose messages. * bin/ltlsynt.cc (print_csv): Adjust CSV columns. * tests/core/ltlsynt.test, tests/core/ltlsynt2.test, tests/core/ltlsynt-pgame.test: Adjust expected CSV and verbose messages. * doc/org/ltlsynt.org: Give some example. --- NEWS | 4 +- bin/ltlsynt.cc | 25 +++++---- doc/org/ltlsynt.org | 98 ++++++++++++++++++++++++++++++++++- spot/twaalgos/synthesis.cc | 56 +++++++++++++------- tests/core/ltlsynt-pgame.test | 5 +- tests/core/ltlsynt.test | 33 +++++++----- tests/core/ltlsynt2.test | 8 +-- 7 files changed, 175 insertions(+), 54 deletions(-) diff --git a/NEWS b/NEWS index 012ca6f32..e8f01b7ca 100644 --- a/NEWS +++ b/NEWS @@ -9,7 +9,9 @@ New in spot 2.12.0.dev (not yet released) processed formulas as the first column of the CSV file. A new option --csv-without-formula=FILENAME can be used to save everything but the formula column (as it can be very large, and - can be found from the source filename). + can be found from the source filename). Several column names have + also been renamed for consistency, so any script reading these + file might require some adjustment. - ltlsynt learned a --part-file option, to specify the partition of input/output proposition from a *.part file, as used in several diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index ee985b4be..7e1cfb5b2 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -349,8 +349,8 @@ namespace { auto& vs = gi->verbose_stream; auto& bv = gi->bv; - if (not bv) - error(2, 0, "no information available for csv (please send bug report)"); + if (!bv) + error(2, 0, "no information available for csv (please report bug)"); if (vs) *vs << "writing CSV to " << opt_csv << '\n'; @@ -377,10 +377,11 @@ namespace out << ",\"aig_time\""; out << ",\"realizable\""; //-1: Unknown, 0: Unreal, 1: Real } - out << ",\"dpa_num_states\",\"dpa_num_states_env\"" - << ",\"strat_num_states\",\"strat_num_edges\""; + out << ",\"game_states\",\"game_states_env\""; + if (!opt_real) + out << ",\"strat_states\",\"strat_edges\""; if (opt_print_aiger) - out << ",\"nb latches\",\"nb gates\""; + out << ",\"aig_latches\",\"aig_gates\""; out << '\n'; } { @@ -418,15 +419,13 @@ namespace out << ',' << bv->realizable; } out << ',' << bv->nb_states_arena - << ',' << bv->nb_states_arena_env - << ',' << bv->nb_strat_states - << ',' << bv->nb_strat_edges; - + << ',' << bv->nb_states_arena_env; + if (!opt_real) + out << ',' << bv->nb_strat_states + << ',' << bv->nb_strat_edges; if (opt_print_aiger) - { out << ',' << bv->nb_latches << ',' << bv->nb_gates; - } out << '\n'; outf.close(opt_csv); } @@ -608,8 +607,6 @@ namespace safe_tot_time(); return 1; } - if (gi->bv) - gi->bv->realizable = true; // Create the (partial) strategy // only if we need it if (!opt_real) @@ -654,6 +651,8 @@ namespace "(please send bug report)"); } } + if (gi->bv) + gi->bv->realizable = true; // If we only wanted to print the game we are done if (want_game()) diff --git a/doc/org/ltlsynt.org b/doc/org/ltlsynt.org index 93c92e29b..0230d0b11 100644 --- a/doc/org/ltlsynt.org +++ b/doc/org/ltlsynt.org @@ -268,6 +268,8 @@ be tried by separating them using commas. For instance * Other useful options +** Printing games + You can also ask =ltlsynt= to print to obtained parity game into [[https://github.com/tcsprojects/pgsolver][PGSolver]] format, with the flag =--print-pg=, or in the HOA format, using =--print-game-hoa=. These flags deactivate the resolution of the @@ -284,8 +286,102 @@ ltlsynt -f '(i1 & i2) <-> F(o1 & X(!o1))' --print-game-hoa --dot #+RESULTS: [[file:ltlsyntexgame.svg]] +** Saving statistics + For benchmarking purpose, the =--csv= option can be used to record -intermediate statistics about the resolution. +intermediate statistics about the resolution. The =--csv= option will +also save the formula into the CSV file, which can therefore become +very large. The variant =--csv-without-formula= is usually enough. + +For instance the following command tests the realizability of the 23 +demonstration specifications from [[http://www.ist.tugraz.at/staff/jobstmann/lily/][Lily 1.0.2]] while saving some +statistics in =bench.csv=. (If you compare our results with theirs, +keep in mind that Lily uses Moore's semantics, while =ltlsynt= uses +Mealy's semantics.) + +#+BEGIN_SRC sh :results verbatim :exports code :epilogue true + genltl --lily-patterns | + ltlsynt --algo=acd --realizability --csv-without-formula=bench.csv +#+END_SRC +#+RESULTS: +#+begin_example +UNREALIZABLE +UNREALIZABLE +REALIZABLE +REALIZABLE +REALIZABLE +REALIZABLE +REALIZABLE +REALIZABLE +REALIZABLE +REALIZABLE +UNREALIZABLE +REALIZABLE +REALIZABLE +REALIZABLE +REALIZABLE +REALIZABLE +REALIZABLE +REALIZABLE +REALIZABLE +REALIZABLE +REALIZABLE +REALIZABLE +REALIZABLE +#+end_example + +After execution, =bench.csv= contains a table like the following: + +#+BEGIN_SRC sh :results output raw :exports results +sed 's/"//g +s/|/\\vert{}/g +s/--/@@html:--@@/g +1a\ +|-| +s/^/| / +s/$/ |/ +s/,/|/g +' bench.csv +#+END_SRC + +#+RESULTS: +| source | algo | tot_time | trans_time | split_time | todpa_time | solve_time | realizable | game_states | game_states_env | +|--------+------+-------------+-------------+-------------+-------------+-------------+------------+-------------+-----------------| +| -:1 | acd | 0.000472663 | 0.00019603 | 2.0339e-05 | 2.0388e-05 | 1.4617e-05 | 0 | 3 | 2 | +| -:2 | acd | 0.00028595 | 0.000188466 | 1.4417e-05 | 2.0027e-05 | 5.861e-06 | 0 | 13 | 7 | +| -:3 | acd | 0.000741622 | 0.000591889 | 4.7229e-05 | 1.9516e-05 | 1.8014e-05 | 1 | 26 | 12 | +| -:4 | acd | 0.000917794 | 0.000725371 | 7.2026e-05 | 3.0328e-05 | 2.0349e-05 | 1 | 33 | 15 | +| -:5 | acd | 0.000878991 | 0.000612978 | 0.000102604 | 3.4155e-05 | 2.7913e-05 | 1 | 47 | 20 | +| -:6 | acd | 0.00100199 | 0.000761539 | 8.0191e-05 | 2.9817e-05 | 2.9075e-05 | 1 | 55 | 24 | +| -:7 | acd | 0.000587721 | 0.000425814 | 4.6268e-05 | 1.6261e-05 | 1.4106e-05 | 1 | 26 | 11 | +| -:8 | acd | 1.4046e-05 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | +| -:9 | acd | 0.000519242 | 0.000400918 | 2.2322e-05 | 2.9446e-05 | 1.3886e-05 | 1 | 16 | 6 | +| -:10 | acd | 6.0835e-05 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | +| -:11 | acd | 5.5245e-05 | 1.8335e-05 | 5.249e-06 | 4.007e-06 | 4.549e-06 | 0 | 3 | 2 | +| -:12 | acd | 1.6411e-05 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | +| -:13 | acd | 0.000192153 | 0.000134825 | 1.06e-05 | 8.506e-06 | 5.33e-06 | 1 | 5 | 2 | +| -:14 | acd | 0.000291931 | 0.000209857 | 1.0881e-05 | 1.4076e-05 | 6.182e-06 | 1 | 4 | 2 | +| -:15 | acd | 0.000690605 | 0.000480759 | 9.4349e-05 | 3.2541e-05 | 1.8675e-05 | 1 | 30 | 9 | +| -:16 | acd | 0.00232829 | 0.00173036 | 0.000348709 | 9.2966e-05 | 6.1276e-05 | 1 | 103 | 29 | +| -:17 | acd | 0.000554708 | 0.00038608 | 2.4887e-05 | 2.9205e-05 | 1.1902e-05 | 1 | 6 | 3 | +| -:18 | acd | 0.00114041 | 0.00088879 | 3.3784e-05 | 3.4585e-05 | 1.1602e-05 | 1 | 8 | 4 | +| -:19 | acd | 0.000761799 | 0.000517278 | 4.3132e-05 | 5.1968e-05 | 2.127e-05 | 1 | 11 | 4 | +| -:20 | acd | 0.0169891 | 0.0133503 | 0.00172203 | 0.00113707 | 0.000412299 | 1 | 1002 | 311 | +| -:21 | acd | 0.118002 | 0.115604 | 0.00165549 | 0.000149402 | 0.00024346 | 1 | 371 | 75 | +| -:22 | acd | 0.00316832 | 0.00240598 | 0.000305407 | 0.000103245 | 0.00010582 | 1 | 86 | 30 | +| -:23 | acd | 0.000824969 | 0.000632956 | 3.2161e-05 | 2.9766e-05 | 2.0299e-05 | 1 | 17 | 7 | + +A source of the form =-:N= designates the Nth line of the standard +input, as =ltlsynt= was reading from that. The various =*_time*= +columns refers to different steps in the processing pipeline. Note +that various bits and minor operations are not timed, so =tot_time= +(the total time) should be larger than the sum of times used for +translation, splitting, conversion to DPA, and game solving. Some of +these intermediate processing time are listed as =0= above because +(e.g., for input 8, 10, 12) because the specifications can be found to +be realizable trivially without building any game. + +** Verifying the output The =--verify= option requests that the produced strategy or aiger circuit are compatible with the specification. This is done by diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 6b5fa7af9..b2b54fcf3 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -2215,17 +2215,6 @@ namespace spot (twa_graph_ptr strat) { dict->unregister_all_my_variables(&tmp); - if (vs) - { - *vs << "direct strategy was found.\n"; - if (want_strategy) - { - *vs << "direct strat has " << strat->num_states() - << " states, " << strat->num_edges() - << " edges and " << strat->num_sets() << " colors\n"; - - } - } if (strat) { strat->merge_edges(); @@ -2238,6 +2227,13 @@ namespace spot set_synthesis_outputs(strat, outputs); } + if (vs) + { + *vs << "direct strategy was found.\n"; + if (strat && want_strategy) + *vs << "direct strategy has " << strat->num_states() + << " states and " << strat->num_edges() << " edges\n"; + } return mealy_like{ mealy_like::realizability_code::REALIZABLE_REGULAR, strat, @@ -2383,15 +2379,19 @@ namespace spot auto res = trans.run(f_left); if (!is_deterministic(res)) - return ret_sol_maybe(); + { + if (bv) + { + auto delta = sw.stop(); + bv->trans_time += delta; + if (vs) + *vs << "translating formula done in " << delta + << " seconds...\n... but it gave a " + << "non-deterministic automaton (rejected)\n"; + } + return ret_sol_maybe(); + } - if (bv) - { - auto delta = sw.stop(); - bv->trans_time += delta; - if (vs) - *vs << "translating formula done in " << delta << " seconds\n"; - } res->prop_complete(trival::maybe()); bdd output_bdd = bddtrue; @@ -2442,12 +2442,23 @@ namespace spot res->prop_terminal(trival::maybe()); res->set_named_prop("synthesis-outputs", new bdd(output_bdd)); + if (bv) + { + auto delta = sw.stop(); + bv->trans_time += delta; + if (vs) + *vs << "translating formula done in " << delta << " seconds\n"; + } return ret_sol_exists(res); } else if (f_other.is_tt()) { if (!want_strategy) return ret_sol_exists(nullptr); + stopwatch sw; + if (bv) + sw.start(); + auto res = make_twa_graph(dict); res->prop_weak(true); @@ -2463,6 +2474,13 @@ namespace spot bdd g_bdd = formula_to_bdd(f_g, dict, res); res->new_state(); res->new_edge(0, 0, g_bdd); + if (bv) + { + auto delta = sw.stop(); + bv->trans_time += delta; + if (vs) + *vs << "translating formula done in " << delta << " seconds\n"; + } return ret_sol_exists(res); } return ret_sol_maybe(); diff --git a/tests/core/ltlsynt-pgame.test b/tests/core/ltlsynt-pgame.test index 900e90120..0de7c9497 100755 --- a/tests/core/ltlsynt-pgame.test +++ b/tests/core/ltlsynt-pgame.test @@ -143,12 +143,11 @@ test 4 = `wc -l < out.csv` ltlsynt --from-pgame starve.ehoa \ --from-pgame UnderapproxDemo2.ehoa \ - --from-pgame aut7.hoa --csv=out.csv >result || : + --from-pgame aut7.hoa --csv-without-formula=out.csv >result || : test 4 = `wc -l < out.csv` cut -d, -f 9,10,11,12,13 right -end='"strat_num_states","strat_num_edges"' cat >expect < GFb translating formula done in X seconds direct strategy was found. -direct strat has 1 states, 2 edges and 0 colors +direct strategy has 1 states and 1 edges simplification took X seconds EOF ltlsynt --ins=a --outs=b -f 'GFa <-> GFb' --verbose --algo=ps 2> out @@ -660,7 +660,7 @@ there are 1 subformulas trying to create strategy directly for GFa <-> GFb translating formula done in X seconds direct strategy was found. -direct strat has 1 states, 2 edges and 0 colors +direct strategy has 1 states and 1 edges simplification took X seconds EOF ltlsynt -f '(GFa <-> GFb) && (G(c <-> d))' --outs=b,c --verbose 2> out @@ -676,7 +676,7 @@ do trying to create strategy directly for $f translating formula done in X seconds direct strategy was found. -direct strat has 1 states, 2 edges and 0 colors +direct strategy has 1 states and 1 edges simplification took X seconds EOF ltlsynt -f "$f" --outs=b,c --verbose --decompose=0 \ @@ -705,7 +705,7 @@ cat >exp < GFa) & G((a & c) | (!a & !c)) translating formula done in X seconds direct strategy was found. -direct strat has 1 states, 2 edges and 0 colors +direct strategy has 1 states and 1 edges simplification took X seconds EOF ltlsynt -f '(GFb <-> GFa) && (G((a&c)|(!a&!c)))' --outs=b,c --verbose\ @@ -719,7 +719,7 @@ cat >exp < FGb translating formula done in X seconds direct strategy was found. -direct strat has 2 states, 3 edges and 0 colors +direct strategy has 2 states and 3 edges simplification took X seconds EOF ltlsynt -f "Fa <-> FGb" --outs=b,c --verbose --decompose=0 --verify 2> out @@ -789,8 +789,9 @@ new formula: x & y new formula: 1 there are 1 subformulas trying to create strategy directly for 1 +translating formula done in X seconds direct strategy was found. -direct strat has 1 states, 1 edges and 0 colors +direct strategy has 1 states and 1 edges simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF @@ -804,13 +805,15 @@ cat >exp < GFo1 translating formula done in X seconds direct strategy was found. -direct strat has 1 states, 2 edges and 0 colors +direct strategy has 1 states and 1 edges simplification took X seconds EOF ltlsynt -f "G(o1|o2) & (GFi <-> GFo1)" --outs="o1,o2" --verbose\ @@ -1255,8 +1258,9 @@ new formula: G(o1 | !o2) & G(!o1 | o2) o2 := o1 new formula: G(o1 | !o1) trying to create strategy directly for G(o1 | !o1) +translating formula done in X seconds direct strategy was found. -direct strat has 1 states, 1 edges and 0 colors +direct strategy has 1 states and 1 edges simplification took X seconds working on subformula G(!i1 -> (o3 | !o4)) & G(!i2 -> (!o3 | o4)) the following signals can be temporarily removed: @@ -1266,8 +1270,9 @@ new formula: G(o3 | !o4) & G(!o3 | o4) o4 := o3 new formula: G(o3 | !o3) trying to create strategy directly for G(o3 | !o3) +translating formula done in X seconds direct strategy was found. -direct strat has 1 states, 1 edges and 0 colors +direct strategy has 1 states and 1 edges simplification took X seconds REALIZABLE HOA: v1 @@ -1298,13 +1303,15 @@ new formula: G(i1->(o1 | !o2)) & G(!i1->(o3 | !o4)) & $gg there are 2 subformulas working on subformula G(i1->(o1 | !o2)) & G(i2->(!o1 | o2)) trying to create strategy directly for G(i1->(o1 | !o2)) & G(i2->(!o1 | o2)) +translating formula done in X seconds direct strategy was found. -direct strat has 1 states, 1 edges and 0 colors +direct strategy has 1 states and 1 edges simplification took X seconds working on subformula G(!i1->(o3 | !o4)) & G(!i2->(!o3 | o4)) trying to create strategy directly for G(!i1->(o3 | !o4)) & G(!i2->(!o3 | o4)) +translating formula done in X seconds direct strategy was found. -direct strat has 1 states, 1 edges and 0 colors +direct strategy has 1 states and 1 edges simplification took X seconds REALIZABLE HOA: v1 diff --git a/tests/core/ltlsynt2.test b/tests/core/ltlsynt2.test index 108cb5f94..c43f928c5 100755 --- a/tests/core/ltlsynt2.test +++ b/tests/core/ltlsynt2.test @@ -48,7 +48,7 @@ except ImportError: x = pandas.read_csv("out.csv") x.to_csv('filtered.csv', columns=('source', 'formula', 'algo', - 'realizable', 'strat_num_states'), + 'realizable', 'strat_states'), index=False) EOF @@ -56,7 +56,7 @@ EOF $PYTHON test.py cat >expected < Xo1),lar,1,3 formulas.ltl:2,F(i1 xor i2) <-> Fo1,lar,1,2 formulas.ltl:3,i1 <-> F(o1 xor o2),lar,1,3 @@ -72,7 +72,7 @@ ltlsynt --ins=i1,i2 -F input.csv/-2 --csv=out.csv -q && exit 2 test $? -eq 1 $PYTHON test.py cat >expected < Xo1),lar,1,3 input.csv:3,F(i1 xor i2) <-> Fo1,lar,1,2 input.csv:4,i1 <-> F(o1 xor o2),lar,1,3 @@ -85,7 +85,7 @@ grep -v 0,0 filtered.csv >input.csv ltlsynt -F input.csv/-2 --csv=out.csv -q $PYTHON test.py cat >expected < Xo1),lar,1,3 input.csv:3,F(i1 xor i2) <-> Fo1,lar,1,2 input.csv:4,i1 <-> F(o1 xor o2),lar,1,3 From 5488cb75c6c83709e49958fd6c067634f61f45a7 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 9 Sep 2024 16:28:51 +0200 Subject: [PATCH 491/606] ltlsynt: overhaul CSV output Previous output was not very usable in presence of decomposed specifications. We now keep track of the number of parts, and also prefix the columns names with "max_" or "sum_" to indicate how their statistics are updated in presence of multiple part. Other missing statistics, like the size of the translated automaton, or maximal number of colors seen in a game, have been added. * spot/twaalgos/synthesis.hh (bench_var): Rename, augment, and document each statistsic. * spot/twaalgos/mealy_machine.cc, spot/twaalgos/synthesis.cc, bin/ltlsynt.cc: Adjust to the new naming scheme. * doc/org/ltlsynt.org: Show a CSV file, and document its columns. * tests/core/ltlsynt-pgame.test, tests/core/ltlsynt2.test, tests/core/ltlsynt.test: Adjust test cases. * NEWS: Mention the backward incompatible change. --- NEWS | 19 +++-- bin/ltlsynt.cc | 143 ++++++++++++++++++++++----------- doc/org/ltlsynt.org | 124 +++++++++++++++++++--------- spot/twaalgos/mealy_machine.cc | 20 +++-- spot/twaalgos/synthesis.cc | 96 +++++++++++++--------- spot/twaalgos/synthesis.hh | 81 +++++++++++++++---- tests/core/ltlsynt-pgame.test | 11 +-- tests/core/ltlsynt.test | 2 +- tests/core/ltlsynt2.test | 20 ++--- 9 files changed, 352 insertions(+), 164 deletions(-) diff --git a/NEWS b/NEWS index e8f01b7ca..89330adfb 100644 --- a/NEWS +++ b/NEWS @@ -5,13 +5,22 @@ New in spot 2.12.0.dev (not yet released) - ltlmix is a new tool that generates formulas by combining existing ones. See https://spot.lre.epita.fr/ltlmix.html for examples. - - ltlsynt --csv=FILENAME will now save the source filename for the - processed formulas as the first column of the CSV file. A new + - (BACKWARD INCOMPATIBILITY) ltlsynt's CSV output has been largely + overhauled, and the output columns have been both renamed (for + consistency) and augmented (with new statistics). The new CSV + output should more useful when the input specification was + decomposed, in particular, there is a column giving the number of + sub-specifications obained, and other statistics columns have name + starting with "max_" or "sum_" indicating how said statistics are + updated across sub-speciications. + + See https://spot.lre.epita.fr/ltlsynt.html#csv for an example. + + Additionally, --csv=FILENAME will now save the source filename for + the processed formulas as the first column of the CSV file. A new option --csv-without-formula=FILENAME can be used to save everything but the formula column (as it can be very large, and - can be found from the source filename). Several column names have - also been renamed for consistency, so any script reading these - file might require some adjustment. + can be found from the source filename). - ltlsynt learned a --part-file option, to specify the partition of input/output proposition from a *.part file, as used in several diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 7e1cfb5b2..0e846c386 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -298,18 +298,18 @@ ARGMATCH_VERIFY(simplify_args, simplify_values); static const char* const splittype_args[] = { + "auto", "expl", "semisym", "fullysym", - "auto", nullptr }; static spot::synthesis_info::splittype splittype_values[] = { + spot::synthesis_info::splittype::AUTO, spot::synthesis_info::splittype::EXPL, spot::synthesis_info::splittype::SEMISYM, spot::synthesis_info::splittype::FULLYSYM, - spot::synthesis_info::splittype::AUTO, }; ARGMATCH_VERIFY(splittype_args, splittype_values); @@ -363,25 +363,44 @@ namespace // (Even if that file was empty initially.) if (!outf.append()) { - out << "\"source\","; + out << "source"; if (opt_csv_with_formula) - out << "\"formula\","; - out << ("\"algo\",\"tot_time\",\"trans_time\"," - "\"split_time\",\"todpa_time\""); + out << ",formula"; + if (!was_game) + { + out << ",subspecs"; + out << ",algo"; + } + out << ",split,total_time"; + if (!was_game) + out << ",sum_trans_time"; + out << ",sum_split_time"; + if (!was_game) + out << ",sum_todpa_time"; if (!opt_print_pg && !opt_print_hoa) { - out << ",\"solve_time\""; + out << ",sum_solve_time"; if (!opt_real) - out << ",\"strat2aut_time\""; + out << ",sum_strat2aut_time"; if (opt_print_aiger) - out << ",\"aig_time\""; - out << ",\"realizable\""; //-1: Unknown, 0: Unreal, 1: Real + out << ",aig_time"; + out << ",realizable"; //-1: Unknown, 0: Unreal, 1: Real } - out << ",\"game_states\",\"game_states_env\""; + if (!was_game) + out << (",max_trans_states,max_trans_edges" + ",max_trans_colors,max_trans_ap"); + out << ",max_game_states,max_game_colors"; if (!opt_real) - out << ",\"strat_states\",\"strat_edges\""; + { + out << ",max_strat_states,max_strat_edges"; + if (!was_game) + out << ",sum_strat_states,sum_strat_edges"; + out << ",max_simpl_strat_states,max_simpl_strat_edges"; + if (!was_game) + out << ",sum_simpl_strat_states,sum_simpl_strat_edges"; + } if (opt_print_aiger) - out << ",\"aig_latches\",\"aig_gates\""; + out << ",aig_latches,aig_gates"; out << '\n'; } { @@ -404,28 +423,48 @@ namespace } } if (!was_game) - out << '"' << algo_names[(int) gi->s] << '"'; - out << ',' << bv->total_time - << ',' << bv->trans_time - << ',' << bv->split_time - << ',' << bv->paritize_time; + { + out << bv->sub_specs << ','; + out << algo_names[(int) gi->s] << ','; + } + out << splittype_args[(int) gi->sp] << ',' << bv->total_time; + if (!was_game) + out << ',' << bv->sum_trans_time; + out << ',' << bv->sum_split_time; + if (!was_game) + out << ',' << bv->sum_paritize_time; if (!opt_print_pg && !opt_print_hoa) { - out << ',' << bv->solve_time; + out << ',' << bv->sum_solve_time; if (!opt_real) - out << ',' << bv->strat2aut_time; + out << ',' << bv->sum_strat2aut_time; if (opt_print_aiger) out << ',' << bv->aig_time; out << ',' << bv->realizable; } - out << ',' << bv->nb_states_arena - << ',' << bv->nb_states_arena_env; + if (!was_game) + out << ',' << bv->max_trans_states + << ',' << bv->max_trans_edges + << ',' << bv->max_trans_colors + << ',' << bv->max_trans_ap; + out << ',' << bv->max_game_states + << ',' << bv->max_game_colors; if (!opt_real) - out << ',' << bv->nb_strat_states - << ',' << bv->nb_strat_edges; + { + out << ',' << bv->max_strat_states + << ',' << bv->max_strat_edges; + if (!was_game) + out << ',' << bv->sum_strat_states + << ',' << bv->sum_strat_edges; + out << ',' << bv->max_simpl_strat_states + << ',' << bv->max_simpl_strat_edges; + if (!was_game) + out << ',' << bv->sum_simpl_strat_states + << ',' << bv->sum_simpl_strat_edges; + } if (opt_print_aiger) - out << ',' << bv->nb_latches - << ',' << bv->nb_gates; + out << ',' << bv->aig_latches + << ',' << bv->aig_gates; out << '\n'; outf.close(opt_csv); } @@ -515,6 +554,8 @@ namespace } } + if (gi->bv) + gi->bv->sub_specs = sub_form.size(); std::vector> sub_outs_str; std::transform(sub_outs.begin(), sub_outs.end(), std::back_inserter(sub_outs_str), @@ -584,16 +625,23 @@ namespace case spot::mealy_like::realizability_code::UNKNOWN: { auto arena = spot::ltl_to_game(*sub_f, *sub_o, *gi); +#ifndef NDEBUG + auto spptr = + arena->get_named_prop>("state-player"); + assert(spptr); + assert((spptr->at(arena->get_init_state_number()) == false) + && "Env needs first turn"); +#endif if (gi->bv) { - gi->bv->nb_states_arena += arena->num_states(); - auto spptr = - arena->get_named_prop>("state-player"); - assert(spptr); - gi->bv->nb_states_arena_env += - std::count(spptr->cbegin(), spptr->cend(), false); - assert((spptr->at(arena->get_init_state_number()) == false) - && "Env needs first turn"); + unsigned ns = arena->num_states(); + unsigned nc = arena->num_sets(); + if (std::tie(gi->bv->max_game_states, gi->bv->max_game_colors) + < std::tie(ns, nc)) + { + gi->bv->max_game_states = ns; + gi->bv->max_game_colors = nc; + } } if (want_game()) { @@ -690,8 +738,8 @@ namespace if (gi->bv) { gi->bv->aig_time = sw2.stop(); - gi->bv->nb_latches = saig->num_latches(); - gi->bv->nb_gates = saig->num_gates(); + gi->bv->aig_latches = saig->num_latches(); + gi->bv->aig_gates = saig->num_gates(); } if (gi->verbose_stream) { @@ -828,7 +876,10 @@ namespace const std::string& location) { if (opt_csv) // reset benchmark data - gi->bv = spot::synthesis_info::bench_var(); + { + gi->bv = spot::synthesis_info::bench_var(); + gi->bv->sub_specs = 1; // We do not know how to split a game + } spot::stopwatch sw_global; spot::stopwatch sw_local; if (gi->bv) @@ -881,13 +932,9 @@ namespace } if (gi->bv) { - gi->bv->split_time += sw_local.stop(); - gi->bv->nb_states_arena += arena->num_states(); - auto spptr = - arena->get_named_prop>("state-player"); - assert(spptr); - gi->bv->nb_states_arena_env += - std::count(spptr->cbegin(), spptr->cend(), false); + gi->bv->sum_split_time += sw_local.stop(); + gi->bv->max_game_states = arena->num_states(); + gi->bv->max_game_colors = arena->num_sets(); } if (opt_print_pg || opt_print_hoa) { @@ -931,16 +978,16 @@ namespace if (gi->bv) { gi->bv->aig_time = sw_local.stop(); - gi->bv->nb_latches = saig->num_latches(); - gi->bv->nb_gates = saig->num_gates(); + gi->bv->aig_latches = saig->num_latches(); + gi->bv->aig_gates = saig->num_gates(); } if (gi->verbose_stream) { *gi->verbose_stream << "AIG circuit was created in " << gi->bv->aig_time - << " seconds and has " << saig->num_latches() + << " seconds and has " << gi->bv->aig_latches << " latches and " - << saig->num_gates() << " gates\n"; + << gi->bv->aig_gates << " gates\n"; } spot::print_aiger(std::cout, saig) << '\n'; } diff --git a/doc/org/ltlsynt.org b/doc/org/ltlsynt.org index 0230d0b11..82a3399c6 100644 --- a/doc/org/ltlsynt.org +++ b/doc/org/ltlsynt.org @@ -286,7 +286,10 @@ ltlsynt -f '(i1 & i2) <-> F(o1 & X(!o1))' --print-game-hoa --dot #+RESULTS: [[file:ltlsyntexgame.svg]] -** Saving statistics +** Saving statistics in CSV + :PROPERTIES: + :CUSTOM_ID: csv + :END: For benchmarking purpose, the =--csv= option can be used to record intermediate statistics about the resolution. The =--csv= option will @@ -301,7 +304,7 @@ Mealy's semantics.) #+BEGIN_SRC sh :results verbatim :exports code :epilogue true genltl --lily-patterns | - ltlsynt --algo=acd --realizability --csv-without-formula=bench.csv + ltlsynt --algo=acd -q --csv-without-formula=bench.csv #+END_SRC #+RESULTS: #+begin_example @@ -344,42 +347,91 @@ s/,/|/g ' bench.csv #+END_SRC +#+ATTR_HTML: :class csv-table #+RESULTS: -| source | algo | tot_time | trans_time | split_time | todpa_time | solve_time | realizable | game_states | game_states_env | -|--------+------+-------------+-------------+-------------+-------------+-------------+------------+-------------+-----------------| -| -:1 | acd | 0.000472663 | 0.00019603 | 2.0339e-05 | 2.0388e-05 | 1.4617e-05 | 0 | 3 | 2 | -| -:2 | acd | 0.00028595 | 0.000188466 | 1.4417e-05 | 2.0027e-05 | 5.861e-06 | 0 | 13 | 7 | -| -:3 | acd | 0.000741622 | 0.000591889 | 4.7229e-05 | 1.9516e-05 | 1.8014e-05 | 1 | 26 | 12 | -| -:4 | acd | 0.000917794 | 0.000725371 | 7.2026e-05 | 3.0328e-05 | 2.0349e-05 | 1 | 33 | 15 | -| -:5 | acd | 0.000878991 | 0.000612978 | 0.000102604 | 3.4155e-05 | 2.7913e-05 | 1 | 47 | 20 | -| -:6 | acd | 0.00100199 | 0.000761539 | 8.0191e-05 | 2.9817e-05 | 2.9075e-05 | 1 | 55 | 24 | -| -:7 | acd | 0.000587721 | 0.000425814 | 4.6268e-05 | 1.6261e-05 | 1.4106e-05 | 1 | 26 | 11 | -| -:8 | acd | 1.4046e-05 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | -| -:9 | acd | 0.000519242 | 0.000400918 | 2.2322e-05 | 2.9446e-05 | 1.3886e-05 | 1 | 16 | 6 | -| -:10 | acd | 6.0835e-05 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | -| -:11 | acd | 5.5245e-05 | 1.8335e-05 | 5.249e-06 | 4.007e-06 | 4.549e-06 | 0 | 3 | 2 | -| -:12 | acd | 1.6411e-05 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | -| -:13 | acd | 0.000192153 | 0.000134825 | 1.06e-05 | 8.506e-06 | 5.33e-06 | 1 | 5 | 2 | -| -:14 | acd | 0.000291931 | 0.000209857 | 1.0881e-05 | 1.4076e-05 | 6.182e-06 | 1 | 4 | 2 | -| -:15 | acd | 0.000690605 | 0.000480759 | 9.4349e-05 | 3.2541e-05 | 1.8675e-05 | 1 | 30 | 9 | -| -:16 | acd | 0.00232829 | 0.00173036 | 0.000348709 | 9.2966e-05 | 6.1276e-05 | 1 | 103 | 29 | -| -:17 | acd | 0.000554708 | 0.00038608 | 2.4887e-05 | 2.9205e-05 | 1.1902e-05 | 1 | 6 | 3 | -| -:18 | acd | 0.00114041 | 0.00088879 | 3.3784e-05 | 3.4585e-05 | 1.1602e-05 | 1 | 8 | 4 | -| -:19 | acd | 0.000761799 | 0.000517278 | 4.3132e-05 | 5.1968e-05 | 2.127e-05 | 1 | 11 | 4 | -| -:20 | acd | 0.0169891 | 0.0133503 | 0.00172203 | 0.00113707 | 0.000412299 | 1 | 1002 | 311 | -| -:21 | acd | 0.118002 | 0.115604 | 0.00165549 | 0.000149402 | 0.00024346 | 1 | 371 | 75 | -| -:22 | acd | 0.00316832 | 0.00240598 | 0.000305407 | 0.000103245 | 0.00010582 | 1 | 86 | 30 | -| -:23 | acd | 0.000824969 | 0.000632956 | 3.2161e-05 | 2.9766e-05 | 2.0299e-05 | 1 | 17 | 7 | +| source | subspecs | algo | split | total_time | sum_trans_time | sum_split_time | sum_todpa_time | sum_solve_time | sum_strat2aut_time | realizable | max_trans_states | max_trans_edges | max_trans_colors | max_trans_ap | max_game_states | max_game_colors | max_strat_states | max_strat_edges | sum_strat_states | sum_strat_edges | max_simpl_strat_states | max_simpl_strat_edges | sum_simpl_strat_states | sum_simpl_strat_edges | +|--------+----------+------+-------+-------------+----------------+----------------+----------------+----------------+--------------------+------------+------------------+-----------------+------------------+--------------+-----------------+-----------------+------------------+-----------------+------------------+-----------------+------------------------+-----------------------+------------------------+-----------------------| +| -:1 | 2 | acd | auto | 0.000327418 | 0.000135325 | 1.5128e-05 | 1.543e-05 | 6.171e-06 | 0 | 0 | 1 | 0 | 0 | 0 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| -:2 | 2 | acd | auto | 0.00020147 | 0.000117201 | 9.227e-06 | 1.2634e-05 | 3.607e-06 | 6.642e-06 | 0 | 5 | 8 | 0 | 1 | 10 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | +| -:3 | 1 | acd | auto | 0.000487781 | 0.000360129 | 2.8053e-05 | 1.1211e-05 | 1.0851e-05 | 5.741e-06 | 1 | 12 | 46 | 1 | 3 | 26 | 1 | 5 | 5 | 5 | 5 | 2 | 4 | 2 | 4 | +| -:4 | 1 | acd | auto | 0.000491777 | 0.000356242 | 3.3032e-05 | 1.5269e-05 | 1.0379e-05 | 6.683e-06 | 1 | 15 | 62 | 1 | 3 | 33 | 1 | 6 | 6 | 6 | 6 | 3 | 7 | 3 | 7 | +| -:5 | 1 | acd | auto | 0.000476078 | 0.000314904 | 4.4103e-05 | 1.5029e-05 | 1.2133e-05 | 1.4237e-05 | 1 | 20 | 88 | 1 | 3 | 47 | 1 | 8 | 9 | 8 | 9 | 6 | 17 | 6 | 17 | +| -:6 | 1 | acd | auto | 0.000486699 | 0.000300587 | 4.4013e-05 | 1.6531e-05 | 1.3766e-05 | 1.57e-05 | 1 | 24 | 111 | 1 | 3 | 55 | 1 | 11 | 12 | 11 | 12 | 7 | 21 | 7 | 21 | +| -:7 | 1 | acd | auto | 0.000394895 | 0.00024404 | 2.7942e-05 | 9.317e-06 | 1.2443e-05 | 7.344e-06 | 1 | 11 | 38 | 1 | 3 | 26 | 1 | 7 | 8 | 7 | 8 | 6 | 14 | 6 | 14 | +| -:8 | 1 | acd | auto | 1.3125e-05 | 1.293e-06 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | +| -:9 | 1 | acd | auto | 0.000309784 | 0.000223411 | 1.3456e-05 | 1.9046e-05 | 8.466e-06 | 3.968e-06 | 1 | 6 | 19 | 2 | 2 | 16 | 2 | 2 | 3 | 2 | 3 | 2 | 3 | 2 | 3 | +| -:10 | 1 | acd | auto | 1.4206e-05 | 6.81e-07 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | +| -:11 | 1 | acd | auto | 2.8453e-05 | 9.968e-06 | 2.504e-06 | 2.114e-06 | 2.295e-06 | 0 | 0 | 1 | 0 | 0 | 0 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| -:12 | 1 | acd | auto | 1.3826e-05 | 8.81e-07 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | +| -:13 | 1 | acd | auto | 0.000124284 | 7.6274e-05 | 6.192e-06 | 4.869e-06 | 3.066e-06 | 3.196e-06 | 1 | 2 | 3 | 1 | 2 | 5 | 1 | 2 | 2 | 2 | 2 | 1 | 2 | 1 | 2 | +| -:14 | 1 | acd | auto | 0.000184107 | 0.000122141 | 6.412e-06 | 8.275e-06 | 3.567e-06 | 2.725e-06 | 1 | 1 | 3 | 2 | 2 | 4 | 1 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | +| -:15 | 1 | acd | auto | 0.00042922 | 0.000279297 | 3.7861e-05 | 1.9096e-05 | 1.057e-05 | 6.933e-06 | 1 | 8 | 40 | 2 | 4 | 30 | 1 | 7 | 12 | 7 | 12 | 5 | 13 | 5 | 13 | +| -:16 | 1 | acd | auto | 0.0015915 | 0.00103173 | 0.000202432 | 5.328e-05 | 3.6118e-05 | 1.8925e-05 | 1 | 22 | 225 | 3 | 6 | 103 | 1 | 22 | 40 | 22 | 40 | 17 | 71 | 17 | 71 | +| -:17 | 1 | acd | auto | 0.000271983 | 0.000184298 | 8.636e-06 | 9.919e-06 | 3.988e-06 | 3.046e-06 | 1 | 1 | 4 | 3 | 3 | 6 | 1 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | +| -:18 | 1 | acd | auto | 0.000380097 | 0.000274818 | 1.1502e-05 | 1.1481e-05 | 4.098e-06 | 3.366e-06 | 1 | 1 | 5 | 4 | 4 | 8 | 1 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | +| -:19 | 1 | acd | auto | 0.000263427 | 0.000170873 | 1.4458e-05 | 1.6912e-05 | 7.214e-06 | 2.835e-06 | 1 | 4 | 15 | 2 | 3 | 11 | 2 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | +| -:20 | 1 | acd | auto | 0.00668276 | 0.00504907 | 0.000916819 | 0.000353497 | 0.000170251 | 9.368e-06 | 1 | 311 | 3488 | 2 | 5 | 1002 | 2 | 10 | 10 | 10 | 10 | 6 | 12 | 6 | 12 | +| -:21 | 1 | acd | auto | 0.0414155 | 0.0392309 | 0.000744745 | 5.4213e-05 | 8.4029e-05 | 7.5533e-05 | 1 | 75 | 546 | 1 | 8 | 371 | 1 | 74 | 228 | 74 | 228 | 71 | 339 | 71 | 339 | +| -:22 | 1 | acd | auto | 0.00126613 | 0.000785011 | 0.000112663 | 3.5397e-05 | 3.5998e-05 | 1.7854e-05 | 1 | 30 | 161 | 2 | 4 | 86 | 1 | 22 | 25 | 22 | 25 | 15 | 67 | 15 | 67 | +| -:23 | 1 | acd | auto | 0.000305085 | 0.000213032 | 1.061e-05 | 1.0009e-05 | 6.752e-06 | 5.23e-06 | 1 | 7 | 16 | 1 | 2 | 17 | 1 | 5 | 6 | 5 | 6 | 3 | 6 | 3 | 6 | + +The names of the columns should be mostly self explanatory. The +decomposition of the specification into multiple sub-specifications +makes it slightly incoveniant to track statistics in a run. The +column =subspecs= indicates how many sub-specifications were found in +the original specification. Columns with names of the form =sum_*= +accumulate their statistics over all subspecifications. Columns with +names of the form =max_*= keep only the largest statistics. The following +statistics are gathered: + +- =source=: location of the specification in the form FILENAME:LINE + (FILENAME is =-= when reading from standard input as in the above + example). +- =formula= (if requested): the actual LTL formula used for the + specification +- =subspecs=: the number of sub-specifications resulting from the + decomposition +- =algo=: the name of the approach used to construct game, as + specified with the =--algo= option +- =split=: the name of the approach used to split the automaton into + input and output steps, as specified with the =--split= option +- =total_time=: total time measured by =ltlsynt= to solve the problem + once the problem has been loaded (parsing of the formula, conversion + from TSLF, or parsing of a parity game are all excluded) +- =sum_trans_time=: sum of the translation time needed to obtain an + automaton from each subspecification. +- =sum_split_time=: sum of the times needed to split the automata +- =sum_todpa_time=: sum of the times needed to paritize the automata +- =sum_solve_time=: sum of the times needed to solve the game for each + subspecification +- =sum_strat2aut_time= sum of the time needed to extract the + strategies +- =realizable=: whether the specification is realizable +- =max_trans_states,max_trans_edges,max_trans_colors,max_trans_ap=: + Size of the largest automaton constructed for a subspecification. + The largest size is actually the largest quadruple of the form + (states,edges,colors,ap), so those maximum values are not + independent. +- =max_game_states=: maximum number of state in any game constructed +- =max_game_colors=: maximum numbers of colors in any game constructed + (might not be the same game as for =max_game_states=) +- =max_strat_states,max_strat_edges=: size of the largest strategy + found. The largest size is the largest pair (states,edges), so + those two values are not indeendent. +- =sum_strat_states,sum_strat_edges=: sum of the states/edges in + strategies for all subspecifications +- =max_simpl_strat_states,max_simpl_strat_edges=: size of the largest + simplified strategy. +- =sum_simpl_strat_states,sum_simpl_strat_edges=: sum of the + states/edges in simplified strategies for all subspecifications +- =aig_gates,aig_latches=: Size of the AIG circuit, if requested. + + +In the table from the previous section some of the intermediate +processing times are listed as =0= (e.g., for input 8, 10, 12) because +the specifications can be found to be realizable trivially without +building any game. -A source of the form =-:N= designates the Nth line of the standard -input, as =ltlsynt= was reading from that. The various =*_time*= -columns refers to different steps in the processing pipeline. Note -that various bits and minor operations are not timed, so =tot_time= -(the total time) should be larger than the sum of times used for -translation, splitting, conversion to DPA, and game solving. Some of -these intermediate processing time are listed as =0= above because -(e.g., for input 8, 10, 12) because the specifications can be found to -be realizable trivially without building any game. ** Verifying the output diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index 5021f2a94..11a05d5cf 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -4353,9 +4353,10 @@ namespace spot if (si.verbose_stream) *si.verbose_stream << "simplification took " << sw.stop() << " seconds\n"; - si.bv->simplify_strat_time += sw.stop(); - auto n_s_env = 0u; - auto n_e_env = 0u; + si.bv->sum_simplify_strat_time += sw.stop(); + unsigned n_s_env = 0; + unsigned n_e_env = 0; + // If the strategy is split, count only the environment's states if (auto sp = m->get_named_prop("state-player")) { n_s_env = sp->size() - std::accumulate(sp->begin(), @@ -4365,15 +4366,22 @@ namespace spot [&n_e_env, &sp](const auto& e) { n_e_env += (*sp)[e.src]; - }); + }); } else { n_s_env = m->num_states(); n_e_env = m->num_edges(); } - si.bv->nb_simpl_strat_states += n_s_env; - si.bv->nb_simpl_strat_edges += n_e_env; + if (std::tie(si.bv->max_simpl_strat_states, + si.bv->max_simpl_strat_edges) + < std::tie(n_s_env, n_e_env)) + { + si.bv->max_simpl_strat_states = n_s_env; + si.bv->max_simpl_strat_edges = n_e_env; + } + si.bv->sum_simpl_strat_states += n_s_env; + si.bv->sum_simpl_strat_edges += n_e_env; } } } diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index b2b54fcf3..5231f98ad 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1574,11 +1574,11 @@ namespace spot // release the variables // Release the pairs for (auto pair_ptr : {replace_fwd, - replace_bkwd, - replace_in_fwd, - replace_in_bkwd, - replace_out_fwd, - replace_out_bkwd}) + replace_bkwd, + replace_in_fwd, + replace_in_bkwd, + replace_out_fwd, + replace_out_bkwd}) bdd_freepair(pair_ptr); aut->get_dict()->unregister_all_my_variables(&N); @@ -1602,17 +1602,17 @@ namespace spot : sp; switch (sp) - { - case (synthesis_info::splittype::EXPL): + { + case synthesis_info::splittype::EXPL: return split_2step_expl_impl(aut, output_bdd, complete_env); - case (synthesis_info::splittype::SEMISYM): + case synthesis_info::splittype::SEMISYM: return split_2step_sym_impl(aut, output_bdd, complete_env); - case (synthesis_info::splittype::FULLYSYM): + case synthesis_info::splittype::FULLYSYM: return split_2step_sym_impl(aut, output_bdd, complete_env); default: throw std::runtime_error("split_2step_(): " "Expected explicit splittype."); - } + } } } // End anonymous @@ -1826,13 +1826,29 @@ namespace spot sw.start(); auto aut = trans.run(f); if (bv) - bv->trans_time += sw.stop(); - + { + bv->sum_trans_time += sw.stop(); + unsigned ns = aut->num_states(); + unsigned ne = aut->num_edges(); + unsigned nc = aut->num_sets(); + unsigned na = aut->ap().size(); + if (std::tie(bv->max_trans_states, + bv->max_trans_edges, + bv->max_trans_colors, + bv->max_trans_ap) + < std::tie(ns, ne, nc, na)) + { + bv->max_trans_states = ns; + bv->max_trans_edges = ne; + bv->max_trans_colors = nc; + bv->max_trans_ap = na; + } + } if (vs) { assert(bv); *vs << "translating formula done in " - << bv->trans_time << " seconds\n"; + << bv->sum_trans_time << " seconds\n"; *vs << "automaton has " << aut->num_states() << " states and " << aut->num_sets() << " colors\n"; } @@ -1879,20 +1895,20 @@ namespace spot << tmp->num_sets() << " colors\n"; tmp->merge_states(); if (bv) - bv->paritize_time += sw.stop(); + bv->sum_paritize_time += sw.stop(); if (vs) *vs << "simplification done\nDPA has " << tmp->num_states() << " states\n" << "determinization and simplification took " - << bv->paritize_time << " seconds\n"; + << bv->sum_paritize_time << " seconds\n"; if (bv) sw.start(); dpa = set_split(tmp); if (bv) - bv->split_time += sw.stop(); + bv->sum_split_time += sw.stop(); if (vs) - *vs << "split inputs and outputs done in " << bv->split_time + *vs << "split inputs and outputs done in " << bv->sum_split_time << " seconds\nautomaton has " << tmp->num_states() << " states\n"; break; @@ -1903,18 +1919,18 @@ namespace spot sw.start(); aut->merge_states(); if (bv) - bv->paritize_time += sw.stop(); + bv->sum_paritize_time += sw.stop(); if (vs) - *vs << "simplification done in " << bv->paritize_time + *vs << "simplification done in " << bv->sum_paritize_time << " seconds\nDPA has " << aut->num_states() << " states\n"; if (bv) sw.start(); dpa = set_split(aut); if (bv) - bv->split_time += sw.stop(); + bv->sum_split_time += sw.stop(); if (vs) - *vs << "split inputs and outputs done in " << bv->split_time + *vs << "split inputs and outputs done in " << bv->sum_split_time << " seconds\nautomaton has " << dpa->num_states() << " states\n"; break; @@ -1924,9 +1940,9 @@ namespace spot sw.start(); auto split = set_split(aut); if (bv) - bv->split_time += sw.stop(); + bv->sum_split_time += sw.stop(); if (vs) - *vs << "split inputs and outputs done in " << bv->split_time + *vs << "split inputs and outputs done in " << bv->sum_split_time << " seconds\nautomaton has " << split->num_states() << " states\n"; if (bv) @@ -1942,12 +1958,12 @@ namespace spot // Merge states knows about players dpa->merge_states(); if (bv) - bv->paritize_time += sw.stop(); + bv->sum_paritize_time += sw.stop(); if (vs) *vs << "simplification done\nDPA has " << dpa->num_states() << " states\n" << "determinization and simplification took " - << bv->paritize_time << " seconds\n"; + << bv->sum_paritize_time << " seconds\n"; break; } case algo::ACD: @@ -1971,10 +1987,10 @@ namespace spot else dpa = acd_transform(aut); if (bv) - bv->paritize_time += sw.stop(); + bv->sum_paritize_time += sw.stop(); if (vs) *vs << (gi.s == algo::ACD ? "ACD" : "LAR") - << " construction done in " << bv->paritize_time + << " construction done in " << bv->sum_paritize_time << " seconds\nDPA has " << dpa->num_states() << " states, " << dpa->num_sets() << " colors\n"; @@ -1983,9 +1999,9 @@ namespace spot sw.start(); dpa = set_split(dpa); if (bv) - bv->split_time += sw.stop(); + bv->sum_split_time += sw.stop(); if (vs) - *vs << "split inputs and outputs done in " << bv->split_time + *vs << "split inputs and outputs done in " << bv->sum_split_time << " seconds\nautomaton has " << dpa->num_states() << " states\n"; break; @@ -2045,6 +2061,7 @@ namespace spot if (gi.bv) { + gi.bv->sum_strat2aut_time += sw.stop(); auto sp = get_state_players(m); auto n_s_env = sp.size() - std::accumulate(sp.begin(), sp.end(), @@ -2055,9 +2072,14 @@ namespace spot { n_e_env += sp[e.src]; }); - gi.bv->strat2aut_time += sw.stop(); - gi.bv->nb_strat_states += n_s_env; - gi.bv->nb_strat_edges += n_e_env; + if (std::tie(gi.bv->max_strat_states, gi.bv->max_strat_edges) + < std::tie(n_s_env, n_e_env)) + { + gi.bv->max_strat_states = n_s_env; + gi.bv->max_strat_edges = n_e_env; + } + gi.bv->sum_strat_states += n_s_env; + gi.bv->sum_strat_edges += n_e_env; } assert(is_mealy(m)); @@ -2383,7 +2405,7 @@ namespace spot if (bv) { auto delta = sw.stop(); - bv->trans_time += delta; + bv->sum_trans_time += delta; if (vs) *vs << "translating formula done in " << delta << " seconds...\n... but it gave a " @@ -2445,7 +2467,7 @@ namespace spot if (bv) { auto delta = sw.stop(); - bv->trans_time += delta; + bv->sum_trans_time += delta; if (vs) *vs << "translating formula done in " << delta << " seconds\n"; } @@ -2477,7 +2499,7 @@ namespace spot if (bv) { auto delta = sw.stop(); - bv->trans_time += delta; + bv->sum_trans_time += delta; if (vs) *vs << "translating formula done in " << delta << " seconds\n"; } @@ -2922,10 +2944,10 @@ namespace spot } bool res = solve_game(arena); if (gi.bv) - gi.bv->solve_time += sw.stop(); + gi.bv->sum_solve_time += sw.stop(); if (gi.verbose_stream) *(gi.verbose_stream) << "game solved in " - << gi.bv->solve_time << " seconds\n"; + << gi.bv->sum_solve_time << " seconds\n"; return res; } diff --git a/spot/twaalgos/synthesis.hh b/spot/twaalgos/synthesis.hh index 2c5bdff1b..34bd57ea0 100644 --- a/spot/twaalgos/synthesis.hh +++ b/spot/twaalgos/synthesis.hh @@ -32,7 +32,7 @@ namespace spot { enum class algo { - DET_SPLIT=0, + DET_SPLIT = 0, SPLIT_DET, DPA_SPLIT, LAR, @@ -42,30 +42,79 @@ namespace spot enum class splittype { - AUTO=0, // Uses a heuristic to choose + AUTO = 0, // Uses a heuristic to choose EXPL, // Explicit enumerations of inputs SEMISYM, // Works on one bdd per env state FULLYSYM // Works on a fully symbolic version of the automaton }; + // These statistics are recorded by various steps of the synthesis + // process. struct bench_var { + // Number of sub-spefications resulting from the decomposition. + // Updated by ltlsynt. + unsigned sub_specs = 0; + // Total time needed for the synthesis. Computed by ltlsynt. double total_time = 0.0; - double trans_time = 0.0; - double split_time = 0.0; - double paritize_time = 0.0; - double solve_time = 0.0; - double strat2aut_time = 0.0; - double simplify_strat_time = 0.0; + // Time needed to transform the LTL formula(s) into automata, summed + // over all subspecs. The type of automaton constructed depends on + // the "algo" parameter. + double sum_trans_time = 0.0; + // Time needed to split the automata into separate + // environment/controler steps. Summed over all subspecs. + // Splitting may occur before or after paritization depending on + // the "algo" parameter. + double sum_split_time = 0.0; + // Time needed to convert the automaton to deterministic parity + // automata. Summed over all subspecs. Paritization may occur + // before or after splitting depending on the "algo" parameter. + double sum_paritize_time = 0.0; + // Time needed to solve the game. Summed over all subspecs. + double sum_solve_time = 0.0; + // Time needed to convert the winning strategy into an + // automaton. Summed over all subspecs. + double sum_strat2aut_time = 0.0; + // Time needed to simplify the winning strategy. Summed over + // all subspecs. + double sum_simplify_strat_time = 0.0; + // Time needed to encode all the strategies into one AIG. double aig_time = 0.0; - unsigned nb_states_arena = 0; - unsigned nb_states_arena_env = 0; - unsigned nb_strat_states = 0; - unsigned nb_strat_edges = 0; - unsigned nb_simpl_strat_states = 0; - unsigned nb_simpl_strat_edges = 0; - unsigned nb_latches = 0; - unsigned nb_gates = 0; + // Size of the automaton resulting from the main translation. + // If multiple subspecifications are used, only the largest + // (states,edges,colors,aps) triplet is kept. + unsigned max_trans_states = 0; + unsigned max_trans_edges = 0; + unsigned max_trans_colors = 0; + unsigned max_trans_ap = 0; + // Size of the game that should be solved. If multiple + // subspecifications are used, only the maximum states and + // colors are kept (those are compared independently). + unsigned max_game_states = 0; + unsigned max_game_colors = 0; + // Size of the strategy extracted from the game. If multiple + // subspecifications are used, only the maximum pair (states, + // edges) is kept. + unsigned max_strat_states = 0; + unsigned max_strat_edges = 0; + // Size of the strategy extracted from the game, summed over all + // subspecifications. + unsigned sum_strat_states = 0; + unsigned sum_strat_edges = 0; + // Size of the strategy after simplification game. If multiple + // subspecifications are used, only the maximum pair (states, + // edges) is kept. + unsigned max_simpl_strat_states = 0; + unsigned max_simpl_strat_edges = 0; + // Size of the strategy after simplification, summed over all + // subspecifications. + unsigned sum_simpl_strat_states = 0; + unsigned sum_simpl_strat_edges = 0; + // Size of the AIG + unsigned aig_latches = 0; + unsigned aig_gates = 0; + // Whether the (global) specification is realizable. Updated by + // ltlsynt. bool realizable = false; }; diff --git a/tests/core/ltlsynt-pgame.test b/tests/core/ltlsynt-pgame.test index 0de7c9497..bdc117b77 100755 --- a/tests/core/ltlsynt-pgame.test +++ b/tests/core/ltlsynt-pgame.test @@ -145,11 +145,12 @@ ltlsynt --from-pgame starve.ehoa \ --from-pgame UnderapproxDemo2.ehoa \ --from-pgame aut7.hoa --csv-without-formula=out.csv >result || : test 4 = `wc -l < out.csv` -cut -d, -f 9,10,11,12,13 right +cut -d, -f 1,2,7,8,9,10,11,12,13 right +REST=strat_states,max_strat_edges,max_simpl_strat_states,max_simpl_strat_edges cat >expect < /->/g;' out > outx diff outx exp -genltl --lily-patterns | ltlsynt -q > out && exit 2 +genltl --lily-patterns | ltlsynt --realizability > out && exit 2 cat >expected <expected < Xo1),lar,1,3 formulas.ltl:2,F(i1 xor i2) <-> Fo1,lar,1,2 formulas.ltl:3,i1 <-> F(o1 xor o2),lar,1,3 @@ -72,7 +72,7 @@ ltlsynt --ins=i1,i2 -F input.csv/-2 --csv=out.csv -q && exit 2 test $? -eq 1 $PYTHON test.py cat >expected < Xo1),lar,1,3 input.csv:3,F(i1 xor i2) <-> Fo1,lar,1,2 input.csv:4,i1 <-> F(o1 xor o2),lar,1,3 @@ -85,7 +85,7 @@ grep -v 0,0 filtered.csv >input.csv ltlsynt -F input.csv/-2 --csv=out.csv -q $PYTHON test.py cat >expected < Xo1),lar,1,3 input.csv:3,F(i1 xor i2) <-> Fo1,lar,1,2 input.csv:4,i1 <-> F(o1 xor o2),lar,1,3 @@ -94,12 +94,12 @@ EOF diff filtered.csv expected ltlsynt -F input.csv/-2 --csv-without-formula=out.csv -q -cut out.csv -d, -f1,2 >filtered.csv +cut out.csv -d, -f1,2,3 >filtered.csv cat >expected < Date: Mon, 9 Sep 2024 16:45:00 +0200 Subject: [PATCH 492/606] ltlsynt: have --csv exclude the formula column by default * bin/ltlsynt.cc: Add the --csv-with-formula option. * doc/org/ltlsynt.org, tests/core/ltlsynt2.test, NEWS: Adjust. --- NEWS | 16 +++++------ bin/ltlsynt.cc | 17 ++++++------ doc/org/ltlsynt.org | 58 +++++++++++++++++++--------------------- tests/core/ltlsynt2.test | 9 ++++--- 4 files changed, 50 insertions(+), 50 deletions(-) diff --git a/NEWS b/NEWS index 89330adfb..91c1979cb 100644 --- a/NEWS +++ b/NEWS @@ -8,19 +8,19 @@ New in spot 2.12.0.dev (not yet released) - (BACKWARD INCOMPATIBILITY) ltlsynt's CSV output has been largely overhauled, and the output columns have been both renamed (for consistency) and augmented (with new statistics). The new CSV - output should more useful when the input specification was + output should be more useful when the input specification is decomposed, in particular, there is a column giving the number of - sub-specifications obained, and other statistics columns have name - starting with "max_" or "sum_" indicating how said statistics are - updated across sub-speciications. + sub-specifications obained, and other statistics columns have + names starting with "max_" or "sum_" indicating how said + statistics are updated across sub-specifications. See https://spot.lre.epita.fr/ltlsynt.html#csv for an example. Additionally, --csv=FILENAME will now save the source filename for - the processed formulas as the first column of the CSV file. A new - option --csv-without-formula=FILENAME can be used to save - everything but the formula column (as it can be very large, and - can be found from the source filename). + the processed formulas as the first column of the CSV file, and + the "formula" column is no longer output by default to save space. + This "formula" column can be added back with the new option + --csv-with-formula=FILENAME if really needed. - ltlsynt learned a --part-file option, to specify the partition of input/output proposition from a *.part file, as used in several diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 0e846c386..4522e4a88 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -49,8 +49,8 @@ enum { OPT_ALGO = 256, OPT_BYPASS, - OPT_CSV, - OPT_CSV_NO_FORMULA, + OPT_CSV_WITH_FORMULA, + OPT_CSV_WITHOUT_FORMULA, OPT_DECOMPOSE, OPT_DOT, OPT_FROM_PGAME, @@ -150,13 +150,14 @@ static const argp_option options[] = "For games and strategies, standard automata rendering " "options are supported (e.g., see ltl2tgba --dot). For AIG circuit, " "use (h) for horizontal and (v) for vertical layouts.", 0 }, - { "csv", OPT_CSV, "[>>]FILENAME", OPTION_ARG_OPTIONAL, + { "csv", OPT_CSV_WITHOUT_FORMULA, "[>>]FILENAME", OPTION_ARG_OPTIONAL, "output statistics as CSV in FILENAME or on standard output " "(if '>>' is used to request append mode, the header line is " "not output)", 0 }, - { "csv-without-formula", OPT_CSV_NO_FORMULA, "[>>]FILENAME", - OPTION_ARG_OPTIONAL, "like --csv, but without 'fomula' column", 0 }, - { "csv-no-formula", 0, nullptr, OPTION_ALIAS, nullptr, 0 }, + { "csv-without-formula", 0, nullptr, OPTION_ALIAS, nullptr, 0 }, + { "csv-with-formula", OPT_CSV_WITH_FORMULA, "[>>]FILENAME", + OPTION_ARG_OPTIONAL, + "like --csv, but with an additional 'fomula' column", 0 }, { "hide-status", OPT_HIDE, nullptr, 0, "Hide the REALIZABLE or UNREALIZABLE line. (Hint: exit status " "is enough of an indication.)", 0 }, @@ -1051,11 +1052,11 @@ parse_opt(int key, char *arg, struct argp_state *) case OPT_BYPASS: opt_bypass = XARGMATCH("--bypass", arg, bypass_args, bypass_values); break; - case OPT_CSV: + case OPT_CSV_WITH_FORMULA: opt_csv = arg ? arg : "-"; opt_csv_with_formula = true; break; - case OPT_CSV_NO_FORMULA: + case OPT_CSV_WITHOUT_FORMULA: opt_csv = arg ? arg : "-"; opt_csv_with_formula = false; break; diff --git a/doc/org/ltlsynt.org b/doc/org/ltlsynt.org index 82a3399c6..f9d5554d0 100644 --- a/doc/org/ltlsynt.org +++ b/doc/org/ltlsynt.org @@ -292,9 +292,7 @@ ltlsynt -f '(i1 & i2) <-> F(o1 & X(!o1))' --print-game-hoa --dot :END: For benchmarking purpose, the =--csv= option can be used to record -intermediate statistics about the resolution. The =--csv= option will -also save the formula into the CSV file, which can therefore become -very large. The variant =--csv-without-formula= is usually enough. +intermediate statistics about the resolution. For instance the following command tests the realizability of the 23 demonstration specifications from [[http://www.ist.tugraz.at/staff/jobstmann/lily/][Lily 1.0.2]] while saving some @@ -303,8 +301,7 @@ keep in mind that Lily uses Moore's semantics, while =ltlsynt= uses Mealy's semantics.) #+BEGIN_SRC sh :results verbatim :exports code :epilogue true - genltl --lily-patterns | - ltlsynt --algo=acd -q --csv-without-formula=bench.csv + genltl --lily-patterns | ltlsynt --algo=acd -q --csv=bench.csv #+END_SRC #+RESULTS: #+begin_example @@ -351,29 +348,29 @@ s/,/|/g #+RESULTS: | source | subspecs | algo | split | total_time | sum_trans_time | sum_split_time | sum_todpa_time | sum_solve_time | sum_strat2aut_time | realizable | max_trans_states | max_trans_edges | max_trans_colors | max_trans_ap | max_game_states | max_game_colors | max_strat_states | max_strat_edges | sum_strat_states | sum_strat_edges | max_simpl_strat_states | max_simpl_strat_edges | sum_simpl_strat_states | sum_simpl_strat_edges | |--------+----------+------+-------+-------------+----------------+----------------+----------------+----------------+--------------------+------------+------------------+-----------------+------------------+--------------+-----------------+-----------------+------------------+-----------------+------------------+-----------------+------------------------+-----------------------+------------------------+-----------------------| -| -:1 | 2 | acd | auto | 0.000327418 | 0.000135325 | 1.5128e-05 | 1.543e-05 | 6.171e-06 | 0 | 0 | 1 | 0 | 0 | 0 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | -| -:2 | 2 | acd | auto | 0.00020147 | 0.000117201 | 9.227e-06 | 1.2634e-05 | 3.607e-06 | 6.642e-06 | 0 | 5 | 8 | 0 | 1 | 10 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | -| -:3 | 1 | acd | auto | 0.000487781 | 0.000360129 | 2.8053e-05 | 1.1211e-05 | 1.0851e-05 | 5.741e-06 | 1 | 12 | 46 | 1 | 3 | 26 | 1 | 5 | 5 | 5 | 5 | 2 | 4 | 2 | 4 | -| -:4 | 1 | acd | auto | 0.000491777 | 0.000356242 | 3.3032e-05 | 1.5269e-05 | 1.0379e-05 | 6.683e-06 | 1 | 15 | 62 | 1 | 3 | 33 | 1 | 6 | 6 | 6 | 6 | 3 | 7 | 3 | 7 | -| -:5 | 1 | acd | auto | 0.000476078 | 0.000314904 | 4.4103e-05 | 1.5029e-05 | 1.2133e-05 | 1.4237e-05 | 1 | 20 | 88 | 1 | 3 | 47 | 1 | 8 | 9 | 8 | 9 | 6 | 17 | 6 | 17 | -| -:6 | 1 | acd | auto | 0.000486699 | 0.000300587 | 4.4013e-05 | 1.6531e-05 | 1.3766e-05 | 1.57e-05 | 1 | 24 | 111 | 1 | 3 | 55 | 1 | 11 | 12 | 11 | 12 | 7 | 21 | 7 | 21 | -| -:7 | 1 | acd | auto | 0.000394895 | 0.00024404 | 2.7942e-05 | 9.317e-06 | 1.2443e-05 | 7.344e-06 | 1 | 11 | 38 | 1 | 3 | 26 | 1 | 7 | 8 | 7 | 8 | 6 | 14 | 6 | 14 | -| -:8 | 1 | acd | auto | 1.3125e-05 | 1.293e-06 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | -| -:9 | 1 | acd | auto | 0.000309784 | 0.000223411 | 1.3456e-05 | 1.9046e-05 | 8.466e-06 | 3.968e-06 | 1 | 6 | 19 | 2 | 2 | 16 | 2 | 2 | 3 | 2 | 3 | 2 | 3 | 2 | 3 | -| -:10 | 1 | acd | auto | 1.4206e-05 | 6.81e-07 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | -| -:11 | 1 | acd | auto | 2.8453e-05 | 9.968e-06 | 2.504e-06 | 2.114e-06 | 2.295e-06 | 0 | 0 | 1 | 0 | 0 | 0 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | -| -:12 | 1 | acd | auto | 1.3826e-05 | 8.81e-07 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | -| -:13 | 1 | acd | auto | 0.000124284 | 7.6274e-05 | 6.192e-06 | 4.869e-06 | 3.066e-06 | 3.196e-06 | 1 | 2 | 3 | 1 | 2 | 5 | 1 | 2 | 2 | 2 | 2 | 1 | 2 | 1 | 2 | -| -:14 | 1 | acd | auto | 0.000184107 | 0.000122141 | 6.412e-06 | 8.275e-06 | 3.567e-06 | 2.725e-06 | 1 | 1 | 3 | 2 | 2 | 4 | 1 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | -| -:15 | 1 | acd | auto | 0.00042922 | 0.000279297 | 3.7861e-05 | 1.9096e-05 | 1.057e-05 | 6.933e-06 | 1 | 8 | 40 | 2 | 4 | 30 | 1 | 7 | 12 | 7 | 12 | 5 | 13 | 5 | 13 | -| -:16 | 1 | acd | auto | 0.0015915 | 0.00103173 | 0.000202432 | 5.328e-05 | 3.6118e-05 | 1.8925e-05 | 1 | 22 | 225 | 3 | 6 | 103 | 1 | 22 | 40 | 22 | 40 | 17 | 71 | 17 | 71 | -| -:17 | 1 | acd | auto | 0.000271983 | 0.000184298 | 8.636e-06 | 9.919e-06 | 3.988e-06 | 3.046e-06 | 1 | 1 | 4 | 3 | 3 | 6 | 1 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | -| -:18 | 1 | acd | auto | 0.000380097 | 0.000274818 | 1.1502e-05 | 1.1481e-05 | 4.098e-06 | 3.366e-06 | 1 | 1 | 5 | 4 | 4 | 8 | 1 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | -| -:19 | 1 | acd | auto | 0.000263427 | 0.000170873 | 1.4458e-05 | 1.6912e-05 | 7.214e-06 | 2.835e-06 | 1 | 4 | 15 | 2 | 3 | 11 | 2 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | -| -:20 | 1 | acd | auto | 0.00668276 | 0.00504907 | 0.000916819 | 0.000353497 | 0.000170251 | 9.368e-06 | 1 | 311 | 3488 | 2 | 5 | 1002 | 2 | 10 | 10 | 10 | 10 | 6 | 12 | 6 | 12 | -| -:21 | 1 | acd | auto | 0.0414155 | 0.0392309 | 0.000744745 | 5.4213e-05 | 8.4029e-05 | 7.5533e-05 | 1 | 75 | 546 | 1 | 8 | 371 | 1 | 74 | 228 | 74 | 228 | 71 | 339 | 71 | 339 | -| -:22 | 1 | acd | auto | 0.00126613 | 0.000785011 | 0.000112663 | 3.5397e-05 | 3.5998e-05 | 1.7854e-05 | 1 | 30 | 161 | 2 | 4 | 86 | 1 | 22 | 25 | 22 | 25 | 15 | 67 | 15 | 67 | -| -:23 | 1 | acd | auto | 0.000305085 | 0.000213032 | 1.061e-05 | 1.0009e-05 | 6.752e-06 | 5.23e-06 | 1 | 7 | 16 | 1 | 2 | 17 | 1 | 5 | 6 | 5 | 6 | 3 | 6 | 3 | 6 | +| -:1 | 2 | acd | auto | 0.000645823 | 0.000271534 | 2.6961e-05 | 2.8614e-05 | 1.0931e-05 | 0 | 0 | 1 | 0 | 0 | 0 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| -:2 | 2 | acd | auto | 0.000560151 | 0.000345114 | 2.4457e-05 | 3.6228e-05 | 9.639e-06 | 1.1773e-05 | 0 | 5 | 8 | 0 | 1 | 10 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | +| -:3 | 1 | acd | auto | 0.0013147 | 0.000951121 | 8.1794e-05 | 3.2291e-05 | 2.9255e-05 | 1.5389e-05 | 1 | 12 | 46 | 1 | 3 | 26 | 1 | 5 | 5 | 5 | 5 | 2 | 4 | 2 | 4 | +| -:4 | 1 | acd | auto | 0.00131727 | 0.000944167 | 9.2395e-05 | 3.6679e-05 | 3.0989e-05 | 1.8685e-05 | 1 | 15 | 62 | 1 | 3 | 33 | 1 | 6 | 6 | 6 | 6 | 3 | 7 | 3 | 7 | +| -:5 | 1 | acd | auto | 0.00137646 | 0.000919601 | 0.000126099 | 4.3342e-05 | 3.4295e-05 | 4.1769e-05 | 1 | 20 | 88 | 1 | 3 | 47 | 1 | 8 | 9 | 8 | 9 | 6 | 17 | 6 | 17 | +| -:6 | 1 | acd | auto | 0.00140189 | 0.000877561 | 0.000128823 | 4.8351e-05 | 3.9455e-05 | 4.5746e-05 | 1 | 24 | 111 | 1 | 3 | 55 | 1 | 11 | 12 | 11 | 12 | 7 | 21 | 7 | 21 | +| -:7 | 1 | acd | auto | 0.00109549 | 0.000722327 | 7.5212e-05 | 2.8033e-05 | 2.3525e-05 | 1.6241e-05 | 1 | 11 | 38 | 1 | 3 | 26 | 1 | 7 | 8 | 7 | 8 | 6 | 14 | 6 | 14 | +| -:8 | 1 | acd | auto | 3.4956e-05 | 2.364e-06 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | +| -:9 | 1 | acd | auto | 0.000963203 | 0.00068121 | 4.9574e-05 | 5.1888e-05 | 2.8173e-05 | 1.1412e-05 | 1 | 6 | 19 | 2 | 2 | 16 | 2 | 2 | 3 | 2 | 3 | 2 | 3 | 2 | 3 | +| -:10 | 1 | acd | auto | 5.5886e-05 | 4.098e-06 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | +| -:11 | 1 | acd | auto | 0.000113636 | 3.7351e-05 | 1.5379e-05 | 1.1382e-05 | 6.893e-06 | 0 | 0 | 1 | 0 | 0 | 0 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| -:12 | 1 | acd | auto | 4.6338e-05 | 2.535e-06 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | +| -:13 | 1 | acd | auto | 0.000361985 | 0.000218364 | 1.7153e-05 | 1.3826e-05 | 8.587e-06 | 8.576e-06 | 1 | 2 | 3 | 1 | 2 | 5 | 1 | 2 | 2 | 2 | 2 | 1 | 2 | 1 | 2 | +| -:14 | 1 | acd | auto | 0.000529893 | 0.000350774 | 1.8105e-05 | 2.3745e-05 | 1.0009e-05 | 7.585e-06 | 1 | 1 | 3 | 2 | 2 | 4 | 1 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | +| -:15 | 1 | acd | auto | 0.0011819 | 0.000769016 | 0.000107514 | 5.5094e-05 | 3.1399e-05 | 1.9367e-05 | 1 | 8 | 40 | 2 | 4 | 30 | 1 | 7 | 12 | 7 | 12 | 5 | 13 | 5 | 13 | +| -:16 | 1 | acd | auto | 0.00427915 | 0.00279572 | 0.000574768 | 0.000156947 | 0.000103396 | 5.4894e-05 | 1 | 22 | 225 | 3 | 6 | 103 | 1 | 22 | 40 | 22 | 40 | 17 | 71 | 17 | 71 | +| -:17 | 1 | acd | auto | 0.000811907 | 0.000552656 | 2.4486e-05 | 2.9295e-05 | 1.1221e-05 | 8.436e-06 | 1 | 1 | 4 | 3 | 3 | 6 | 1 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | +| -:18 | 1 | acd | auto | 0.00109892 | 0.000793111 | 3.2972e-05 | 3.3543e-05 | 1.1472e-05 | 9.668e-06 | 1 | 1 | 5 | 4 | 4 | 8 | 1 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | +| -:19 | 1 | acd | auto | 0.000800405 | 0.00051217 | 4.2461e-05 | 5.0525e-05 | 2.4046e-05 | 1.2293e-05 | 1 | 4 | 15 | 2 | 3 | 11 | 2 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | +| -:20 | 1 | acd | auto | 0.0196147 | 0.0149021 | 0.00268845 | 0.00104262 | 0.000504074 | 2.5819e-05 | 1 | 311 | 3488 | 2 | 5 | 1002 | 2 | 10 | 10 | 10 | 10 | 6 | 12 | 6 | 12 | +| -:21 | 1 | acd | auto | 0.119126 | 0.113659 | 0.00162097 | 0.000147229 | 0.000243901 | 0.000215418 | 1 | 75 | 546 | 1 | 8 | 371 | 1 | 74 | 228 | 74 | 228 | 71 | 339 | 71 | 339 | +| -:22 | 1 | acd | auto | 0.00351029 | 0.00222552 | 0.000303284 | 0.00010043 | 0.000104838 | 5.3962e-05 | 1 | 30 | 161 | 2 | 4 | 86 | 1 | 22 | 25 | 22 | 25 | 15 | 67 | 15 | 67 | +| -:23 | 1 | acd | auto | 0.000890897 | 0.000617519 | 3.165e-05 | 2.9737e-05 | 2.0017e-05 | 1.5259e-05 | 1 | 7 | 16 | 1 | 2 | 17 | 1 | 5 | 6 | 5 | 6 | 3 | 6 | 3 | 6 | The names of the columns should be mostly self explanatory. The decomposition of the specification into multiple sub-specifications @@ -387,8 +384,9 @@ statistics are gathered: - =source=: location of the specification in the form FILENAME:LINE (FILENAME is =-= when reading from standard input as in the above example). -- =formula= (if requested): the actual LTL formula used for the - specification +- =formula= (if requested with option =--csv-with-formula=): is the + actual LTL formula used for the specification, is usually makes the + CSV file very large - =subspecs=: the number of sub-specifications resulting from the decomposition - =algo=: the name of the approach used to construct game, as diff --git a/tests/core/ltlsynt2.test b/tests/core/ltlsynt2.test index 18a2c9ab3..7fe4e86e3 100755 --- a/tests/core/ltlsynt2.test +++ b/tests/core/ltlsynt2.test @@ -31,7 +31,8 @@ i1 <-> F(o1 xor o2) F(i1) <-> G(o2) EOF -ltlsynt --ins=i1,i2 -F formulas.ltl -f 'o1 & F(i1 <-> o2)' -q --csv=out.csv &&\ +ltlsynt --ins=i1,i2 -F formulas.ltl -f 'o1 & F(i1 <-> o2)' -q \ + --csv-with-formula=out.csv &&\ exit 2 test $? -eq 1 || exit 2 @@ -68,7 +69,7 @@ diff filtered.csv expected # ltlfilt should be able to read the second column mv filtered.csv input.csv -ltlsynt --ins=i1,i2 -F input.csv/-2 --csv=out.csv -q && exit 2 +ltlsynt --ins=i1,i2 -F input.csv/-2 --csv-with-formula=out.csv -q && exit 2 test $? -eq 1 $PYTHON test.py cat >expected <input.csv -ltlsynt -F input.csv/-2 --csv=out.csv -q +ltlsynt -F input.csv/-2 --csv-with-formula=out.csv -q $PYTHON test.py cat >expected < o2),lar,1,2 EOF diff filtered.csv expected -ltlsynt -F input.csv/-2 --csv-without-formula=out.csv -q +ltlsynt -F input.csv/-2 --csv=out.csv -q cut out.csv -d, -f1,2,3 >filtered.csv cat >expected < Date: Mon, 9 Sep 2024 17:02:49 +0200 Subject: [PATCH 493/606] ltlsynt: -q should also hide status and AIG output * bin/ltlsynt.cc: Honnor -q properly. * doc/org/ltlsynt.org, tests/core/ltlsynt.test: Adjust. * NEWS: Mention this bug. --- NEWS | 4 ++ bin/ltlsynt.cc | 48 ++++++++--------- doc/org/ltlsynt.org | 112 +++++++++++++++++----------------------- tests/core/ltlsynt.test | 2 +- 4 files changed, 77 insertions(+), 89 deletions(-) diff --git a/NEWS b/NEWS index 91c1979cb..5465c6b67 100644 --- a/NEWS +++ b/NEWS @@ -105,6 +105,10 @@ New in spot 2.12.0.dev (not yet released) - "ltlsynt ... --print-game --dot=ARGS" was ignoring ARGS. + - "ltlsynt --aiger -q ..." was still printing the realizability + status and the AIG circuit; it now does the job silently as + requested. + New in spot 2.12 (2024-05-16) Build: diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 4522e4a88..691995b7b 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -47,7 +47,8 @@ enum { - OPT_ALGO = 256, + OPT_AIGER = 256, + OPT_ALGO, OPT_BYPASS, OPT_CSV_WITH_FORMULA, OPT_CSV_WITHOUT_FORMULA, @@ -61,7 +62,6 @@ enum OPT_PART_FILE, OPT_POLARITY, OPT_PRINT, - OPT_PRINT_AIGER, OPT_PRINT_HOA, OPT_REAL, OPT_SIMPLIFY, @@ -134,7 +134,7 @@ static const argp_option options[] = "print the parity game in the HOA format, do not solve it", 0 }, { "realizability", OPT_REAL, nullptr, 0, "realizability only, do not compute a winning strategy", 0 }, - { "aiger", OPT_PRINT_AIGER, "ite|isop|both[+ud][+dc]" + { "aiger", OPT_AIGER, "ite|isop|both[+ud][+dc]" "[+sub0|sub1|sub2]", OPTION_ARG_OPTIONAL, "encode the winning strategy as an AIG circuit and print it in AIGER" " format. The first word indicates the encoding to used: \"ite\" for " @@ -193,7 +193,7 @@ static bool opt_print_hoa = false; static const char* opt_print_hoa_args = nullptr; static bool opt_real = false; static bool opt_do_verify = false; -static const char* opt_print_aiger = nullptr; +static const char* opt_aiger = nullptr; static const char* opt_dot_arg = nullptr; static bool opt_dot = false; static spot::synthesis_info* gi; @@ -383,7 +383,7 @@ namespace out << ",sum_solve_time"; if (!opt_real) out << ",sum_strat2aut_time"; - if (opt_print_aiger) + if (opt_aiger) out << ",aig_time"; out << ",realizable"; //-1: Unknown, 0: Unreal, 1: Real } @@ -400,7 +400,7 @@ namespace if (!was_game) out << ",sum_simpl_strat_states,sum_simpl_strat_edges"; } - if (opt_print_aiger) + if (opt_aiger) out << ",aig_latches,aig_gates"; out << '\n'; } @@ -439,7 +439,7 @@ namespace out << ',' << bv->sum_solve_time; if (!opt_real) out << ',' << bv->sum_strat2aut_time; - if (opt_print_aiger) + if (opt_aiger) out << ',' << bv->aig_time; out << ',' << bv->realizable; } @@ -463,7 +463,7 @@ namespace out << ',' << bv->sum_simpl_strat_states << ',' << bv->sum_simpl_strat_edges; } - if (opt_print_aiger) + if (opt_aiger) out << ',' << bv->aig_latches << ',' << bv->aig_gates; out << '\n'; @@ -668,8 +668,7 @@ namespace spot::solved_game_to_mealy(arena, *gi); // Keep the machine split for aiger // else -> separated - spot::simplify_mealy_here(ml.mealy_like, *gi, - opt_print_aiger); + spot::simplify_mealy_here(ml.mealy_like, *gi, opt_aiger); ml.glob_cond = bddfalse; mealy_machines.push_back(ml); } @@ -686,8 +685,7 @@ namespace { // Keep the machine split for aiger // else -> separated - spot::simplify_mealy_here(m_like.mealy_like, *gi, - opt_print_aiger); + spot::simplify_mealy_here(m_like.mealy_like, *gi, opt_aiger); } SPOT_FALLTHROUGH; } @@ -728,14 +726,13 @@ namespace automaton_printer printer; spot::process_timer timer_printer_dummy; - if (opt_print_aiger) + if (opt_aiger) { spot::stopwatch sw2; if (gi->bv) sw2.start(); - saig = spot::mealy_machines_to_aig(mealy_machines, opt_print_aiger, - input_aps, - sub_outs_str, rs); + saig = spot::mealy_machines_to_aig(mealy_machines, opt_aiger, + input_aps, sub_outs_str, rs); if (gi->bv) { gi->bv->aig_time = sw2.stop(); @@ -752,7 +749,7 @@ namespace } if (opt_dot) spot::print_dot(std::cout, saig, opt_dot_arg); - else + else if (automaton_format != Quiet) spot::print_aiger(std::cout, saig) << '\n'; } else @@ -966,16 +963,16 @@ namespace spot::twa_graph_ptr mealy_like = spot::solved_game_to_mealy(arena, *gi); // Keep the machine split for aiger otherwise, separate it. - spot::simplify_mealy_here(mealy_like, *gi, opt_print_aiger); + spot::simplify_mealy_here(mealy_like, *gi, opt_aiger); automaton_printer printer; spot::process_timer timer_printer_dummy; - if (opt_print_aiger) + if (opt_aiger) { if (gi->bv) sw_local.start(); spot::aig_ptr saig = - spot::mealy_machine_to_aig(mealy_like, opt_print_aiger); + spot::mealy_machine_to_aig(mealy_like, opt_aiger); if (gi->bv) { gi->bv->aig_time = sw_local.stop(); @@ -990,7 +987,8 @@ namespace << " latches and " << gi->bv->aig_gates << " gates\n"; } - spot::print_aiger(std::cout, saig) << '\n'; + if (automaton_format != Quiet) + spot::print_aiger(std::cout, saig) << '\n'; } else { @@ -1102,8 +1100,8 @@ parse_opt(int key, char *arg, struct argp_state *) opt_print_hoa = true; opt_print_hoa_args = arg; break; - case OPT_PRINT_AIGER: - opt_print_aiger = arg ? arg : "ite"; + case OPT_AIGER: + opt_aiger = arg ? arg : "ite"; break; case OPT_REAL: opt_real = true; @@ -1159,6 +1157,10 @@ main(int argc, char **argv) check_no_formula(); process_io_options(); + // -q implies --hide-status + if (automaton_format == Quiet) + show_status = false; + ltl_processor processor; if (int res = processor.run(); res == 0 || res == 1) { diff --git a/doc/org/ltlsynt.org b/doc/org/ltlsynt.org index f9d5554d0..8f4876b17 100644 --- a/doc/org/ltlsynt.org +++ b/doc/org/ltlsynt.org @@ -294,41 +294,17 @@ ltlsynt -f '(i1 & i2) <-> F(o1 & X(!o1))' --print-game-hoa --dot For benchmarking purpose, the =--csv= option can be used to record intermediate statistics about the resolution. -For instance the following command tests the realizability of the 23 -demonstration specifications from [[http://www.ist.tugraz.at/staff/jobstmann/lily/][Lily 1.0.2]] while saving some -statistics in =bench.csv=. (If you compare our results with theirs, -keep in mind that Lily uses Moore's semantics, while =ltlsynt= uses -Mealy's semantics.) +For instance the following command builds controllers (when they +exist) for the 23 demonstration specifications from [[http://www.ist.tugraz.at/staff/jobstmann/lily/][Lily 1.0.2]] while +saving some statistics in =bench.csv=. (If you compare our results +with theirs, keep in mind that Lily uses Moore's semantics, while +=ltlsynt= uses Mealy's semantics.) We use =-q= to hide the +constructed controllers, as we are only interested in the statistics. #+BEGIN_SRC sh :results verbatim :exports code :epilogue true - genltl --lily-patterns | ltlsynt --algo=acd -q --csv=bench.csv + genltl --lily-patterns | ltlsynt --algo=acd --aiger -q --csv=bench.csv #+END_SRC #+RESULTS: -#+begin_example -UNREALIZABLE -UNREALIZABLE -REALIZABLE -REALIZABLE -REALIZABLE -REALIZABLE -REALIZABLE -REALIZABLE -REALIZABLE -REALIZABLE -UNREALIZABLE -REALIZABLE -REALIZABLE -REALIZABLE -REALIZABLE -REALIZABLE -REALIZABLE -REALIZABLE -REALIZABLE -REALIZABLE -REALIZABLE -REALIZABLE -REALIZABLE -#+end_example After execution, =bench.csv= contains a table like the following: @@ -346,31 +322,37 @@ s/,/|/g #+ATTR_HTML: :class csv-table #+RESULTS: -| source | subspecs | algo | split | total_time | sum_trans_time | sum_split_time | sum_todpa_time | sum_solve_time | sum_strat2aut_time | realizable | max_trans_states | max_trans_edges | max_trans_colors | max_trans_ap | max_game_states | max_game_colors | max_strat_states | max_strat_edges | sum_strat_states | sum_strat_edges | max_simpl_strat_states | max_simpl_strat_edges | sum_simpl_strat_states | sum_simpl_strat_edges | -|--------+----------+------+-------+-------------+----------------+----------------+----------------+----------------+--------------------+------------+------------------+-----------------+------------------+--------------+-----------------+-----------------+------------------+-----------------+------------------+-----------------+------------------------+-----------------------+------------------------+-----------------------| -| -:1 | 2 | acd | auto | 0.000645823 | 0.000271534 | 2.6961e-05 | 2.8614e-05 | 1.0931e-05 | 0 | 0 | 1 | 0 | 0 | 0 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | -| -:2 | 2 | acd | auto | 0.000560151 | 0.000345114 | 2.4457e-05 | 3.6228e-05 | 9.639e-06 | 1.1773e-05 | 0 | 5 | 8 | 0 | 1 | 10 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | -| -:3 | 1 | acd | auto | 0.0013147 | 0.000951121 | 8.1794e-05 | 3.2291e-05 | 2.9255e-05 | 1.5389e-05 | 1 | 12 | 46 | 1 | 3 | 26 | 1 | 5 | 5 | 5 | 5 | 2 | 4 | 2 | 4 | -| -:4 | 1 | acd | auto | 0.00131727 | 0.000944167 | 9.2395e-05 | 3.6679e-05 | 3.0989e-05 | 1.8685e-05 | 1 | 15 | 62 | 1 | 3 | 33 | 1 | 6 | 6 | 6 | 6 | 3 | 7 | 3 | 7 | -| -:5 | 1 | acd | auto | 0.00137646 | 0.000919601 | 0.000126099 | 4.3342e-05 | 3.4295e-05 | 4.1769e-05 | 1 | 20 | 88 | 1 | 3 | 47 | 1 | 8 | 9 | 8 | 9 | 6 | 17 | 6 | 17 | -| -:6 | 1 | acd | auto | 0.00140189 | 0.000877561 | 0.000128823 | 4.8351e-05 | 3.9455e-05 | 4.5746e-05 | 1 | 24 | 111 | 1 | 3 | 55 | 1 | 11 | 12 | 11 | 12 | 7 | 21 | 7 | 21 | -| -:7 | 1 | acd | auto | 0.00109549 | 0.000722327 | 7.5212e-05 | 2.8033e-05 | 2.3525e-05 | 1.6241e-05 | 1 | 11 | 38 | 1 | 3 | 26 | 1 | 7 | 8 | 7 | 8 | 6 | 14 | 6 | 14 | -| -:8 | 1 | acd | auto | 3.4956e-05 | 2.364e-06 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | -| -:9 | 1 | acd | auto | 0.000963203 | 0.00068121 | 4.9574e-05 | 5.1888e-05 | 2.8173e-05 | 1.1412e-05 | 1 | 6 | 19 | 2 | 2 | 16 | 2 | 2 | 3 | 2 | 3 | 2 | 3 | 2 | 3 | -| -:10 | 1 | acd | auto | 5.5886e-05 | 4.098e-06 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | -| -:11 | 1 | acd | auto | 0.000113636 | 3.7351e-05 | 1.5379e-05 | 1.1382e-05 | 6.893e-06 | 0 | 0 | 1 | 0 | 0 | 0 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | -| -:12 | 1 | acd | auto | 4.6338e-05 | 2.535e-06 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | -| -:13 | 1 | acd | auto | 0.000361985 | 0.000218364 | 1.7153e-05 | 1.3826e-05 | 8.587e-06 | 8.576e-06 | 1 | 2 | 3 | 1 | 2 | 5 | 1 | 2 | 2 | 2 | 2 | 1 | 2 | 1 | 2 | -| -:14 | 1 | acd | auto | 0.000529893 | 0.000350774 | 1.8105e-05 | 2.3745e-05 | 1.0009e-05 | 7.585e-06 | 1 | 1 | 3 | 2 | 2 | 4 | 1 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | -| -:15 | 1 | acd | auto | 0.0011819 | 0.000769016 | 0.000107514 | 5.5094e-05 | 3.1399e-05 | 1.9367e-05 | 1 | 8 | 40 | 2 | 4 | 30 | 1 | 7 | 12 | 7 | 12 | 5 | 13 | 5 | 13 | -| -:16 | 1 | acd | auto | 0.00427915 | 0.00279572 | 0.000574768 | 0.000156947 | 0.000103396 | 5.4894e-05 | 1 | 22 | 225 | 3 | 6 | 103 | 1 | 22 | 40 | 22 | 40 | 17 | 71 | 17 | 71 | -| -:17 | 1 | acd | auto | 0.000811907 | 0.000552656 | 2.4486e-05 | 2.9295e-05 | 1.1221e-05 | 8.436e-06 | 1 | 1 | 4 | 3 | 3 | 6 | 1 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | -| -:18 | 1 | acd | auto | 0.00109892 | 0.000793111 | 3.2972e-05 | 3.3543e-05 | 1.1472e-05 | 9.668e-06 | 1 | 1 | 5 | 4 | 4 | 8 | 1 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | -| -:19 | 1 | acd | auto | 0.000800405 | 0.00051217 | 4.2461e-05 | 5.0525e-05 | 2.4046e-05 | 1.2293e-05 | 1 | 4 | 15 | 2 | 3 | 11 | 2 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | -| -:20 | 1 | acd | auto | 0.0196147 | 0.0149021 | 0.00268845 | 0.00104262 | 0.000504074 | 2.5819e-05 | 1 | 311 | 3488 | 2 | 5 | 1002 | 2 | 10 | 10 | 10 | 10 | 6 | 12 | 6 | 12 | -| -:21 | 1 | acd | auto | 0.119126 | 0.113659 | 0.00162097 | 0.000147229 | 0.000243901 | 0.000215418 | 1 | 75 | 546 | 1 | 8 | 371 | 1 | 74 | 228 | 74 | 228 | 71 | 339 | 71 | 339 | -| -:22 | 1 | acd | auto | 0.00351029 | 0.00222552 | 0.000303284 | 0.00010043 | 0.000104838 | 5.3962e-05 | 1 | 30 | 161 | 2 | 4 | 86 | 1 | 22 | 25 | 22 | 25 | 15 | 67 | 15 | 67 | -| -:23 | 1 | acd | auto | 0.000890897 | 0.000617519 | 3.165e-05 | 2.9737e-05 | 2.0017e-05 | 1.5259e-05 | 1 | 7 | 16 | 1 | 2 | 17 | 1 | 5 | 6 | 5 | 6 | 3 | 6 | 3 | 6 | +| source | subspecs | algo | split | total_time | sum_trans_time | sum_split_time | sum_todpa_time | sum_solve_time | sum_strat2aut_time | aig_time | realizable | max_trans_states | max_trans_edges | max_trans_colors | max_trans_ap | max_game_states | max_game_colors | max_strat_states | max_strat_edges | sum_strat_states | sum_strat_edges | max_simpl_strat_states | max_simpl_strat_edges | sum_simpl_strat_states | sum_simpl_strat_edges | aig_latches | aig_gates | +|--------+----------+------+-------+-------------+----------------+----------------+----------------+----------------+--------------------+-------------+------------+------------------+-----------------+------------------+--------------+-----------------+-----------------+------------------+-----------------+------------------+-----------------+------------------------+-----------------------+------------------------+-----------------------+-------------+-----------| +| -:1 | 2 | acd | auto | 0.000408974 | 0.000183978 | 1.9226e-05 | 1.8736e-05 | 7.654e-06 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| -:2 | 2 | acd | auto | 0.000297303 | 0.000176515 | 1.3345e-05 | 1.7834e-05 | 5.431e-06 | 8.075e-06 | 0 | 0 | 5 | 8 | 0 | 1 | 10 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 0 | 0 | +| -:3 | 1 | acd | auto | 0.000791278 | 0.000549861 | 4.4484e-05 | 1.7753e-05 | 1.6862e-05 | 8.436e-06 | 5.246e-05 | 1 | 12 | 46 | 1 | 3 | 26 | 1 | 5 | 5 | 5 | 5 | 2 | 2 | 2 | 2 | 1 | 0 | +| -:4 | 1 | acd | auto | 0.00078738 | 0.000526687 | 6.6005e-05 | 2.0329e-05 | 1.7083e-05 | 1.028e-05 | 3.678e-05 | 1 | 15 | 62 | 1 | 3 | 33 | 1 | 6 | 6 | 6 | 6 | 3 | 3 | 3 | 3 | 2 | 8 | +| -:5 | 1 | acd | auto | 0.000835672 | 0.000494376 | 6.9522e-05 | 2.3264e-05 | 1.8816e-05 | 2.2202e-05 | 8.9289e-05 | 1 | 20 | 88 | 1 | 3 | 47 | 1 | 8 | 9 | 8 | 9 | 6 | 7 | 6 | 7 | 3 | 46 | +| -:6 | 1 | acd | auto | 0.000872972 | 0.000479157 | 7.4541e-05 | 2.64e-05 | 2.1601e-05 | 2.5038e-05 | 8.8497e-05 | 1 | 24 | 111 | 1 | 3 | 55 | 1 | 11 | 12 | 11 | 12 | 7 | 9 | 7 | 9 | 3 | 46 | +| -:7 | 1 | acd | auto | 0.000787119 | 0.000382314 | 4.238e-05 | 1.4988e-05 | 1.3055e-05 | 8.546e-06 | 0.000155214 | 1 | 11 | 38 | 1 | 3 | 26 | 1 | 7 | 8 | 7 | 8 | 6 | 7 | 6 | 7 | 3 | 29 | +| -:8 | 1 | acd | auto | 3.2521e-05 | 1.794e-06 | 0 | 0 | 0 | 0 | 1.052e-05 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | +| -:9 | 1 | acd | auto | 0.000505467 | 0.000354952 | 2.106e-05 | 2.7602e-05 | 1.3225e-05 | 6.282e-06 | 1.8395e-05 | 1 | 6 | 19 | 2 | 2 | 16 | 2 | 2 | 3 | 2 | 3 | 2 | 3 | 2 | 3 | 1 | 1 | +| -:10 | 1 | acd | auto | 3.2231e-05 | 1.092e-06 | 0 | 0 | 0 | 0 | 1.0921e-05 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | +| -:11 | 1 | acd | auto | 4.1779e-05 | 1.5269e-05 | 3.847e-06 | 3.436e-06 | 3.737e-06 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| -:12 | 1 | acd | auto | 3.4015e-05 | 1.352e-06 | 0 | 0 | 0 | 0 | 1.2925e-05 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | +| -:13 | 1 | acd | auto | 0.000229304 | 0.000135867 | 9.848e-06 | 7.855e-06 | 4.819e-06 | 4.749e-06 | 1.2514e-05 | 1 | 2 | 3 | 1 | 2 | 5 | 1 | 2 | 2 | 2 | 2 | 1 | 2 | 1 | 2 | 0 | 0 | +| -:14 | 1 | acd | auto | 0.000310568 | 0.000199397 | 1.0069e-05 | 1.2905e-05 | 5.571e-06 | 3.888e-06 | 1.7733e-05 | 1 | 1 | 3 | 2 | 2 | 4 | 1 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 1 | 0 | +| -:15 | 1 | acd | auto | 0.00075019 | 0.000435724 | 6.0634e-05 | 2.9616e-05 | 1.6872e-05 | 1.075e-05 | 8.2957e-05 | 1 | 8 | 40 | 2 | 4 | 30 | 1 | 7 | 12 | 7 | 12 | 5 | 10 | 5 | 10 | 3 | 38 | +| -:16 | 1 | acd | auto | 0.00297522 | 0.00156048 | 0.000310197 | 8.474e-05 | 5.5796e-05 | 2.9766e-05 | 0.000558948 | 1 | 22 | 225 | 3 | 6 | 103 | 1 | 22 | 40 | 22 | 40 | 17 | 36 | 17 | 36 | 5 | 326 | +| -:17 | 1 | acd | auto | 0.000468838 | 0.000296471 | 1.3916e-05 | 1.5699e-05 | 6.362e-06 | 4.568e-06 | 2.9867e-05 | 1 | 1 | 4 | 3 | 3 | 6 | 1 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 2 | 3 | +| -:18 | 1 | acd | auto | 0.000630864 | 0.000437838 | 1.8946e-05 | 1.7714e-05 | 6.342e-06 | 5.139e-06 | 3.2902e-05 | 1 | 1 | 5 | 4 | 4 | 8 | 1 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 2 | 5 | +| -:19 | 1 | acd | auto | 0.000424403 | 0.00026955 | 2.2813e-05 | 2.641e-05 | 1.1281e-05 | 4.309e-06 | 1.5469e-05 | 1 | 4 | 15 | 2 | 3 | 11 | 2 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | +| -:20 | 1 | acd | auto | 0.0109 | 0.00806685 | 0.0014951 | 0.000575409 | 0.000282314 | 1.8305e-05 | 0.000173057 | 1 | 311 | 3488 | 2 | 5 | 1002 | 2 | 10 | 10 | 10 | 10 | 6 | 8 | 6 | 8 | 3 | 30 | +| -:21 | 1 | acd | auto | 0.0674067 | 0.0618205 | 0.00108741 | 8.1494e-05 | 0.000130147 | 0.00011593 | 0.00220405 | 1 | 75 | 546 | 1 | 8 | 371 | 1 | 74 | 228 | 74 | 228 | 71 | 213 | 71 | 213 | 7 | 1299 | +| -:22 | 1 | acd | auto | 0.00230346 | 0.00124588 | 0.000171444 | 5.5064e-05 | 5.4483e-05 | 2.7612e-05 | 0.000318924 | 1 | 30 | 161 | 2 | 4 | 86 | 1 | 22 | 25 | 22 | 25 | 15 | 19 | 15 | 19 | 4 | 194 | +| -:23 | 1 | acd | auto | 0.000509164 | 0.000336807 | 1.6551e-05 | 1.5599e-05 | 1.046e-05 | 8.035e-06 | 3.3092e-05 | 1 | 7 | 16 | 1 | 2 | 17 | 1 | 5 | 6 | 5 | 6 | 3 | 4 | 3 | 4 | 2 | 10 | + +The subset of columns output is adjusted according to the task +performed by =ltlsynt=. For instance with =--realizability=, the CSV +file will not include statistics about the winning strategies or the +AIG circuits. When reading a game with =--from-pgame=, columns giving +statistics about LTL translation will be omitted. The names of the columns should be mostly self explanatory. The decomposition of the specification into multiple sub-specifications @@ -396,14 +378,15 @@ statistics are gathered: - =total_time=: total time measured by =ltlsynt= to solve the problem once the problem has been loaded (parsing of the formula, conversion from TSLF, or parsing of a parity game are all excluded) -- =sum_trans_time=: sum of the translation time needed to obtain an +- =sum_trans_time=: sum of the translation time used to obtain an automaton from each subspecification. -- =sum_split_time=: sum of the times needed to split the automata -- =sum_todpa_time=: sum of the times needed to paritize the automata -- =sum_solve_time=: sum of the times needed to solve the game for each +- =sum_split_time=: sum of the times used to split the automata +- =sum_todpa_time=: sum of the times used to paritize the automata +- =sum_solve_time=: sum of the times used to solve the game for each subspecification -- =sum_strat2aut_time= sum of the time needed to extract the +- =sum_strat2aut_time=: sum of the time needed to extract the strategies +- =aig_time=: time used to encode all strategies into one AIG circuit - =realizable=: whether the specification is realizable - =max_trans_states,max_trans_edges,max_trans_colors,max_trans_ap=: Size of the largest automaton constructed for a subspecification. @@ -425,10 +408,9 @@ statistics are gathered: - =aig_gates,aig_latches=: Size of the AIG circuit, if requested. -In the table from the previous section some of the intermediate -processing times are listed as =0= (e.g., for input 8, 10, 12) because -the specifications can be found to be realizable trivially without -building any game. +In this example table, some of the intermediate processing times are +listed as =0= (e.g., for input 8, 10, 12) because the specifications +can be found to be realizable trivially without building any game. ** Verifying the output diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 4ac16a39a..f0cb242d2 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -267,7 +267,7 @@ for r in '' '--real'; do done) done for a in sd ds lar lar.old; do - test 1 = `grep -c ",.$a.," FILE` || exit 1 + test 1 = `grep -c ",$a," FILE` || exit 1 done # ltlsynt --algo=lar --ins=a --outs=b -f 'FGa <-> GF(c&a)' --print-pg --csv >out From 90fb7b1cd9ce4fa8a0095fdebc291482bbb0d764 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 11 Sep 2024 11:38:12 +0200 Subject: [PATCH 494/606] * doc/org/ltlmix.org: Fix example. --- doc/org/ltlmix.org | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/org/ltlmix.org b/doc/org/ltlmix.org index c62a649e5..d68cc937b 100644 --- a/doc/org/ltlmix.org +++ b/doc/org/ltlmix.org @@ -476,7 +476,7 @@ conflict. Therefore, increasing the number of atomic propositions to chose from may help to get more realizable formulas. #+BEGIN_SRC sh :exports both :epilogue true - genltl --lily=8..11 | ltlmix -A50,50 -C3 -n6 | ltlsynt -q + genltl --lily=8..11 | ltlmix -A50,50 -C3 -n6 | ltlsynt --realizability #+END_SRC #+RESULTS: From 99a622059c31d16fb294657b02186406523570f4 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 12 Sep 2024 14:00:08 +0200 Subject: [PATCH 495/606] synthesis: fix handling of deadstates * spot/twaalgos/synthesis.cc: Remove a debuging print from the semisym code, and add an additional case in the fullysym code. * tests/core/ltlsynt.test: Add a some test case, and remove some bashism. --- spot/twaalgos/synthesis.cc | 14 +++++--------- tests/core/ltlsynt.test | 10 +++++----- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 5231f98ad..c064ccc93 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1386,6 +1386,8 @@ namespace spot { assert(std::holds_alternative(ccondin)); const bdd& ccond = std::get(ccondin); + if (ccond == bddfalse) + return; int clvl = ccond == bddtrue ? inIdx : bdd_var(ccond); if (clvl >= inIdx) { @@ -1468,17 +1470,11 @@ namespace spot for (const auto &e: aut->out(s)) enc_out_s |= encode_edge(e); // Switch to new ins and outs - // Can only be false if there is no outgoing edge - // In this case: Nothing to do - assert(enc_out_s != bddfalse - || (!(aut->out(s).begin()))); - if (enc_out_s == bddfalse) { - std::cerr << "Dead end state: " << s << '\n'; -#ifndef NDEBUG - print_hoa(std::cerr, aut); -#endif + // Can only be false if there is no outgoing edge + // In this case: Nothing to do + assert(!(aut->out(s).begin())); continue; } diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index f0cb242d2..d89ecd292 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -494,12 +494,12 @@ diff out exp for splitt in expl semisym fullysym auto do - res=$(ltlsynt -f "G((i0 && i1)<->X(o0)) && G((i2|i3)<->X(o1))" \ + # REALIZABLE + ltlsynt -f "G((i0 && i1)<->X(o0)) && G((i2|i3)<->X(o1))" \ --outs="o0,o1" --aiger=isop+ud --algo=lar --decompose=no \ - --simpl=no --splittype="$splitt" --realizability) - if [[ "$res" != "REALIZABLE" ]]; then - echo "Expected realizable" - fi + --simpl=no --splittype="$splitt" --realizability || exit 2 + # UNREALIZABLE + ltlsynt -f "Gi & Fo" --splittype="$splitt" --realizability && exit 2 done From 4ccdcb4a5bb64021833db0e9aab2c0d9083299a2 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 12 Sep 2024 17:28:17 +0200 Subject: [PATCH 496/606] tests: rewrite the syntimpl test * tests/core/syntimpl.cc: Rewrite to test multiple formulas at once, and test them with three different implication checks. * tests/core/syntimpl.test: Adjust the test to execute syntimpl only once. --- tests/core/syntimpl.cc | 113 ++++++++++++++++------------------ tests/core/syntimpl.test | 127 ++++++++++++++++++--------------------- 2 files changed, 109 insertions(+), 131 deletions(-) diff --git a/tests/core/syntimpl.cc b/tests/core/syntimpl.cc index 65995898a..329c3e01d 100644 --- a/tests/core/syntimpl.cc +++ b/tests/core/syntimpl.cc @@ -18,85 +18,74 @@ #include "config.h" #include +#include +#include #include #include +#include #include -#include -#include #include +#include +#include static void syntax(char* prog) { - std::cerr << prog << " formula1 formula2?\n"; + std::cerr << prog << " file\n"; exit(2); } int main(int argc, char** argv) { - if (argc < 4) + if (argc != 2) syntax(argv[0]); - - int opt = atoi(argv[1]); - int exit_return = 0; - - { - auto ftmp1 = spot::parse_infix_psl(argv[2]); - - if (ftmp1.format_errors(std::cerr)) + std::ifstream input(argv[1]); + if (!input) + { + std::cerr << "failed to open " << argv[1] << '\n'; return 2; + } - auto ftmp2 = spot::parse_infix_psl(argv[3]); + spot::tl_simplifier* c = new spot::tl_simplifier; + std::string s; + unsigned line = 0; + while (std::getline(input, s)) + { + ++line; + std::cerr << line << ": " << s << '\n'; + if (s[0] == '#') // Skip comments + continue; - if (ftmp2.format_errors(std::cerr)) - return 2; - - spot::formula f1 = spot::negative_normal_form(ftmp1.f); - spot::formula f2 = spot::negative_normal_form(ftmp2.f); - - std::string f1s = spot::str_psl(f1); - std::string f2s = spot::str_psl(f2); - - spot::tl_simplifier* c = new spot::tl_simplifier; - - switch (opt) - { - case 0: - std::cout << "Test f1 < f2" << std::endl; - if (c->syntactic_implication(f1, f2)) - { - std::cout << f1s << " < " << f2s << '\n'; - exit_return = 1; - } - break; - - case 1: - std::cout << "Test !f1 < f2" << std::endl; - if (c->syntactic_implication_neg(f1, f2, false)) - { - std::cout << "!(" << f1s << ") < " << f2s << '\n'; - exit_return = 1; - } - break; - - case 2: - std::cout << "Test f1 < !f2" << std::endl; - if (c->syntactic_implication_neg(f1, f2, true)) - { - std::cout << f1s << " < !(" << f2s << ")\n"; - exit_return = 1; - } - break; - default: - break; - } - - f1.dump(std::cout) << '\n'; - f2.dump(std::cout) << '\n'; - - delete c; - } + spot::formula f[2]; + std::istringstream ss(s); + for (unsigned i = 0; i < 2; ++i) + { + std::string form; + if (!std::getline(ss, form, ',')) + { + std::cerr << "missing first formula\n"; + exit(2); + } + std::string tmp; + while (form.size() > 0 && form.back() == '\\' + && std::getline(ss, tmp, ',')) + { + form.back() = ','; + form += tmp; + } + auto pf = spot::parse_infix_psl(form); + if (pf.format_errors(std::cerr)) + return 2; + f[i] = spot::negative_normal_form(pf.f); + } + // ignore the rest of the line + std::cout << spot::str_psl(f[0]) << ',' << spot::str_psl(f[1]) << ',' + << c->syntactic_implication(f[0], f[1]) << ',' + << c->syntactic_implication_neg(f[0], f[1], false) << ',' + << c->syntactic_implication_neg(f[0], f[1], true) << '\n'; + } + delete c; assert(spot::fnode::instances_check()); - return exit_return; + return 0; } diff --git a/tests/core/syntimpl.test b/tests/core/syntimpl.test index 663672c4c..413b01802 100755 --- a/tests/core/syntimpl.test +++ b/tests/core/syntimpl.test @@ -22,74 +22,63 @@ . ./defs || exit 1 -# -#GFa && GFb && FG(!a && !b) -run 1 ../syntimpl 0 'a' 'a | b' -run 1 ../syntimpl 0 'F(a)' 'F(a | b)' -run 1 ../syntimpl 0 'G(a)' 'G(a | b)' -run 1 ../syntimpl 0 'GF(a)' 'GF(a | b)' -run 1 ../syntimpl 0 'GF(a)' '!FG(!a && !b)' -run 1 ../syntimpl 0 'Xa' 'X(b U a)' -run 1 ../syntimpl 0 'XXa' 'XX(b U a)' +cat >input < output +diff input output From 77a17881a3222bd8604e5e33974a542351490638 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 17 Sep 2024 17:10:27 +0200 Subject: [PATCH 497/606] defrag_states: allow a permutation of state numbers * spot/misc/permute.hh: New file. * spot/misc/Makefile.am: Add it. * spot/graph/graph.hh, spot/twa/twagraph.cc, spot/twaalgos/randomize.cc: Use the new permute_vector() function. * spot/twa/twagraph.hh: Update documentation. * NEWS: Update. --- NEWS | 3 ++ spot/graph/graph.hh | 31 ++++++--------- spot/misc/Makefile.am | 1 + spot/misc/permute.hh | 81 ++++++++++++++++++++++++++++++++++++++ spot/twa/twagraph.cc | 37 +++-------------- spot/twa/twagraph.hh | 3 +- spot/twaalgos/randomize.cc | 8 ++-- 7 files changed, 108 insertions(+), 56 deletions(-) create mode 100644 spot/misc/permute.hh diff --git a/NEWS b/NEWS index 5465c6b67..0da353568 100644 --- a/NEWS +++ b/NEWS @@ -93,6 +93,9 @@ New in spot 2.12.0.dev (not yet released) any word accepted by A from state x satisfies V[x]. Related to Issue #591. + - twa_graph::defrag_states(num) no longer require num[i]≤i; num + can now describe a permutation of the state numbers. + Bug fixes: - Generating random formulas without any unary opertor would very diff --git a/spot/graph/graph.hh b/spot/graph/graph.hh index 04d0a8421..3b43f751b 100644 --- a/spot/graph/graph.hh +++ b/spot/graph/graph.hh @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -1446,8 +1447,7 @@ namespace spot /// dangling. /// /// \param newst A vector indicating how each state should be - /// renumbered. Use -1U to erase an unreachable state. All other - /// numbers are expected to satisfy newst[i] ≤ i for all i. + /// renumbered. Use -1U to erase an unreachable state. /// /// \param used_states the number of states used (after /// renumbering) @@ -1461,24 +1461,19 @@ namespace spot //std::cerr << "\nbefore defrag\n"; //dump_storage(std::cerr); - // Shift all states in states_, as indicated by newst. + // Permute all states in states_, as indicated by newst. + // This will put erased states after used_states. + permute_vector(states_, newst); unsigned send = states_.size(); - for (state s = 0; s < send; ++s) + for (state s = used_states; s < send; ++s) { - state dst = newst[s]; - if (dst == s) - continue; - if (dst == -1U) - { - // This is an erased state. Mark all its edges as - // dead (i.e., t.next_succ should point to t for each of - // them). - auto t = states_[s].succ; - while (t) - std::swap(t, edges_[t].next_succ); - continue; - } - states_[dst] = std::move(states_[s]); + // This is an erased state. Mark all its edges as + // dead (i.e., t.next_succ should point to t for each of + // them). + auto t = states_[s].succ; + while (t) + std::swap(t, edges_[t].next_succ); + continue; } states_.resize(used_states); diff --git a/spot/misc/Makefile.am b/spot/misc/Makefile.am index 747153500..085ac55a0 100644 --- a/spot/misc/Makefile.am +++ b/spot/misc/Makefile.am @@ -46,6 +46,7 @@ misc_HEADERS = \ memusage.hh \ mspool.hh \ optionmap.hh \ + permute.hh \ position.hh \ random.hh \ satsolver.hh \ diff --git a/spot/misc/permute.hh b/spot/misc/permute.hh new file mode 100644 index 000000000..1bcd39d86 --- /dev/null +++ b/spot/misc/permute.hh @@ -0,0 +1,81 @@ +// -*- coding: utf-8 -*- +// Copyright (C) by the Spot authors, see the AUTHORS file for details. +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include +#include +#include + +namespace spot +{ + + // Reorder `data` according the permutation in `indices` by + // following the cycles in the permutation. Additionally, if an + // index is -1, the corresponding value is moved to the end of the + // data. + // + // After running this algorithm, data[i] should be moved to + // data[indices[i]] or to the end of the data if indices[i] == -1U. + // + // If indices.size() != data.size(), the minimum of both size is + // used, and indices are expected to stay in this range. + template + void permute_vector(std::vector& data, + const std::vector& indices) + { + unsigned n = std::min(data.size(), indices.size()); + if (n == 0) + return; + std::vector done(n, false); + unsigned end_of_data = n - 1; // index for the first -1 + for (unsigned i = 0; i < n; ++i) + { + if (done[i] || indices[i] == i) + continue; // already done or identity + unsigned next = indices[i]; + if (next == -1U) + { + next = end_of_data--; + if (next == i) + continue; + } + values tmp = std::move(data[i]); + while (next != i) + { + SPOT_ASSERT(next < n); + if (done[next]) + throw std::invalid_argument + ("permute_vector: invalid permutation"); + // this is a swap, but std::swap will not work + // when data[next] is a bool_reference. + values tmp2 = std::move(data[next]); + data[next] = std::move(tmp); + tmp = std::move(tmp2); + done[next] = true; + + next = indices[next]; + if (next == -1U) + next = end_of_data--; + } + data[i] = std::move(tmp); + done[i] = true; + } + } + +} diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 2ccc411a9..882714ab2 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -1257,18 +1257,10 @@ namespace spot for (auto& e: edges()) fixup(e.dst); } - + // Update properties... if (auto* names = get_named_prop>("state-names")) { - unsigned size = names->size(); - for (unsigned s = 0; s < size; ++s) - { - unsigned dst = newst[s]; - if (dst == s || dst == -1U) - continue; - assert(dst < s); - (*names)[dst] = std::move((*names)[s]); - } + permute_vector(*names, newst); names->resize(used_states); } if (auto hs = get_named_prop> @@ -1325,15 +1317,7 @@ namespace spot "degen-levels"}) if (auto os = get_named_prop>(prop)) { - unsigned size = os->size(); - for (unsigned s = 0; s < size; ++s) - { - unsigned dst = newst[s]; - if (dst == s || dst == -1U) - continue; - assert(dst < s); - (*os)[dst] = (*os)[s]; - } + permute_vector(*os, newst); os->resize(used_states); } if (auto ss = get_named_prop>("simulated-states")) @@ -1349,19 +1333,10 @@ namespace spot // Reassign the state-players if (auto sp = get_named_prop>("state-player")) { - const auto ns = (unsigned) used_states; - const auto sps = (unsigned) sp->size(); - assert(ns <= sps); - assert(sps == newst.size()); - - for (unsigned i = 0; i < sps; ++i) - { - if (newst[i] == -1u) - continue; - (*sp)[newst[i]] = (*sp)[i]; - } - sp->resize(ns); + permute_vector(*sp, newst); + sp->resize(used_states); } + // Finally, update all states and edges. init_number_ = newst[init_number_]; g_.defrag_states(newst, used_states); } diff --git a/spot/twa/twagraph.hh b/spot/twa/twagraph.hh index 36fa31836..614aeb9d9 100644 --- a/spot/twa/twagraph.hh +++ b/spot/twa/twagraph.hh @@ -734,8 +734,7 @@ namespace spot /// /// \param newst A vector indicating how each state should be /// renumbered. Use -1U to mark an unreachable state for removal. - /// Ignoring the occurrences of -1U, the renumbering is expected - /// to satisfy newst[i] ≤ i for all i. If the automaton contains + /// Ignoring the occurrences of -1U. If the automaton contains /// universal branching, this vector is likely to be modified by /// this function, so do not reuse it afterwards. /// diff --git a/spot/twaalgos/randomize.cc b/spot/twaalgos/randomize.cc index 19c32bea0..c7975a82a 100644 --- a/spot/twaalgos/randomize.cc +++ b/spot/twaalgos/randomize.cc @@ -20,6 +20,7 @@ #include #include #include +#include #include namespace spot @@ -47,11 +48,8 @@ namespace spot if (auto sn = aut->get_named_prop>("state-names")) { - unsigned sns = sn->size(); // Might be != n. - auto nn = new std::vector(n); - for (unsigned i = 0; i < sns && i < n; ++i) - (*nn)[nums[i]] = (*sn)[i]; - aut->set_named_prop("state-names", nn); + sn->resize(n); + permute_vector(*sn, nums); } if (auto hs = aut->get_named_prop> ("highlight-states")) From 7b0e15a7fb5eed76fb8cbd3ad766e87800b5b4bf Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 18 Sep 2024 12:04:23 +0200 Subject: [PATCH 498/606] implement maximum cardinality search * spot/twaalgos/mcs.cc, spot/twaalgos/mcs.hh: New files. * spot/twaalgos/Makefile.am: Add them. * python/spot/impl.i: Include mcs.hh. * bin/autfilt.cc: Add --mcs option. * NEWS: Mention it. * doc/spot.bib: Add reference. * tests/core/mcs.test: New file. * tests/Makefile.am: Add it. --- NEWS | 9 ++ bin/autfilt.cc | 35 ++++++- doc/spot.bib | 14 +++ python/spot/impl.i | 2 + spot/twaalgos/Makefile.am | 6 +- spot/twaalgos/mcs.cc | 196 ++++++++++++++++++++++++++++++++++++++ spot/twaalgos/mcs.hh | 63 ++++++++++++ tests/Makefile.am | 1 + tests/core/mcs.test | 180 ++++++++++++++++++++++++++++++++++ 9 files changed, 501 insertions(+), 5 deletions(-) create mode 100644 spot/twaalgos/mcs.cc create mode 100644 spot/twaalgos/mcs.hh create mode 100755 tests/core/mcs.test diff --git a/NEWS b/NEWS index 0da353568..9a5c0a1c7 100644 --- a/NEWS +++ b/NEWS @@ -51,6 +51,10 @@ New in spot 2.12.0.dev (not yet released) - autfilt learned --track-formula=F to label states with formulas derived from F. (This is more precise on deterministic automata.) + - autfilt learned --mcs[=any|scc] to reorder states according to a + maximum cardinality search. The argument specifies how to break + ties. + - ltlfilt learned --pi1, --sigma1, --delta1, --pi2, --sigma2, and --delta2 to filter according to classes Π₁,Σ₁,Δ₁,Π₂,Σ₂, and Δ₂. @@ -96,6 +100,11 @@ New in spot 2.12.0.dev (not yet released) - twa_graph::defrag_states(num) no longer require num[i]≤i; num can now describe a permutation of the state numbers. + - spot::maximum_cardinality_search() and + spot::maximum_cardinality_search_reorder_here() are new function + to compute (and apply) an ordering of state based on a maximum + cardinality search. + Bug fixes: - Generating random formulas without any unary opertor would very diff --git a/bin/autfilt.cc b/bin/autfilt.cc index 0252a0562..a6ef46d57 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -65,6 +65,7 @@ #include #include #include +#include #include #include #include @@ -137,6 +138,7 @@ enum { OPT_KEEP_STATES, OPT_KILL_STATES, OPT_MASK_ACC, + OPT_MCS, OPT_MERGE, OPT_NONDET_STATES, OPT_PARTIAL_DEGEN, @@ -296,6 +298,9 @@ static const argp_option options[] = WORD_DOC, /**************************************************/ { nullptr, 0, nullptr, 0, "Transformations:", 7 }, + { "mcs-order", OPT_MCS, "any|scc", OPTION_ARG_OPTIONAL, + "reorder states using a maximum cardinality search; use option to" + " specify how to break ties", 0 }, { "merge-transitions", OPT_MERGE, nullptr, 0, "merge transitions with same destination and acceptance", 0 }, { "product", OPT_PRODUCT_AND, "FILENAME", 0, @@ -522,6 +527,21 @@ static bool const aliases_types[] = }; ARGMATCH_VERIFY(aliases_args, aliases_types); +spot::mcs_tie_break opt_mcs_tie = spot::MCS_TIE_ANY; +static char const* const mcs_args[] = +{ + "any", + "scc", + nullptr, +}; +static spot::mcs_tie_break const mcs_types[] = +{ + spot::MCS_TIE_ANY, + spot::MCS_TIE_SCC, +}; +ARGMATCH_VERIFY(mcs_args, mcs_types); + + enum acc_type { ACC_Any = 0, ACC_Given, @@ -653,6 +673,7 @@ static struct opt_t std::vector> hl_words; }* opt; +static bool opt_mcs = false; static bool opt_merge = false; static bool opt_has_univ_branching = false; static bool opt_has_exist_branching = false; @@ -1107,9 +1128,6 @@ parse_opt(int key, char* arg, struct argp_state*) opt_rem_dead = true; break; } - case OPT_MERGE: - opt_merge = true; - break; case OPT_MASK_ACC: { for (auto res : to_longs(arg)) @@ -1125,6 +1143,14 @@ parse_opt(int key, char* arg, struct argp_state*) } break; } + case OPT_MCS: + opt_mcs = true; + if (arg) + opt_mcs_tie = XARGMATCH("--mcs", arg, mcs_args, mcs_types); + break; + case OPT_MERGE: + opt_merge = true; + break; case OPT_NONDET_STATES: opt_nondet_states = parse_range(arg, 0, std::numeric_limits::max()); opt_nondet_states_set = true; @@ -1744,6 +1770,9 @@ namespace if (auto run = spot::product(aut, word_aut)->accepting_run()) run->project(aut)->highlight(color); + if (opt_mcs) + spot::maximum_cardinality_search_reorder_here(aut, opt_mcs_tie); + timer.stop(); if (opt->uniq) { diff --git a/doc/spot.bib b/doc/spot.bib index bc2b39a1f..ba25b5070 100644 --- a/doc/spot.bib +++ b/doc/spot.bib @@ -1085,6 +1085,20 @@ doi = {10.1007/978-3-642-16612-9_33} } +@Article{ tarjan.84.sicomp, + author = {Robert E. Tarjan and Mihalis Yannakakis}, + title = {Simple linear-time algorithms to test chordality of + graphs, test acyclicity of hypergraphs, and selectively + reduce acyclic hypergraphs}, + journal = {SIAM Journal on Computing}, + year = {1984}, + volume = {13}, + number = {3}, + pages = {566--579}, + month = aug, + doi = {10.1137/0213035} +} + @TechReport{ tauriainen.00.tr, author = {Heikki Tauriainen}, title = {Automated Testing of {B\"u}chi Automata Translators for diff --git a/python/spot/impl.i b/python/spot/impl.i index 1f9c11fde..2291730ed 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -133,6 +133,7 @@ #include #include #include +#include #include #include #include @@ -742,6 +743,7 @@ def state_is_accepting(self, src) -> "bool": %include %include %include +%include %include %include %include diff --git a/spot/twaalgos/Makefile.am b/spot/twaalgos/Makefile.am index b5993aaef..8e5b929d0 100644 --- a/spot/twaalgos/Makefile.am +++ b/spot/twaalgos/Makefile.am @@ -37,6 +37,7 @@ twaalgos_HEADERS = \ compsusp.hh \ contains.hh \ copy.hh \ + couvreurnew.hh \ cycles.hh \ dbranch.hh \ deadends.hh \ @@ -65,9 +66,9 @@ twaalgos_HEADERS = \ magic.hh \ mask.hh \ matchstates.hh \ + mcs.hh \ minimize.hh \ mealy_machine.hh \ - couvreurnew.hh \ neverclaim.hh \ parity.hh \ postproc.hh \ @@ -114,6 +115,7 @@ libtwaalgos_la_SOURCES = \ complement.cc \ compsusp.cc \ contains.cc \ + couvreurnew.cc \ cycles.cc \ dbranch.cc \ deadends.cc \ @@ -141,9 +143,9 @@ libtwaalgos_la_SOURCES = \ magic.cc \ mask.cc \ matchstates.cc \ + mcs.cc \ minimize.cc \ mealy_machine.cc \ - couvreurnew.cc \ ndfs_result.hxx \ neverclaim.cc \ parity.cc \ diff --git a/spot/twaalgos/mcs.cc b/spot/twaalgos/mcs.cc new file mode 100644 index 000000000..b73016a15 --- /dev/null +++ b/spot/twaalgos/mcs.cc @@ -0,0 +1,196 @@ +// -*- coding: utf-8 -*- +// Copyright (C) by the Spot authors, see the AUTHORS file for details. +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" +#include +#include +#include + +namespace spot +{ + namespace + { + struct mcs_vertex + { + // Each vertex is part of a circular doubly-linked list. + mcs_vertex* prev = nullptr; + mcs_vertex* next = nullptr; + // The weight of a vertex, initially 0, is the number of + // neighbors that already have an MCS-number. The weight is -1 + // if the vertex itself has an MCS-number. + int weight = 0; + }; + + struct mcs_data + { + std::vector vertex; + // set is an array of doubly-linked list built using vertex elements + std::vector set; + + // Initialy, all n vertices are in set[0] + mcs_data(unsigned n) + : vertex(n), set(n, nullptr) + { + // make a circular list of everything in vertex + vertex[0].prev = &vertex[n - 1]; + for (unsigned i = 0; i < n - 1; i++) + { + vertex[i].next = &vertex[i + 1]; + vertex[i + 1].prev = &vertex[i]; + } + vertex[n - 1].next = &vertex[0]; + set[0] = &vertex[0]; + } + + void remove_vertex(unsigned vertex_i, unsigned from_set) + { + mcs_vertex* v = &vertex[vertex_i]; + mcs_vertex* prev = v->prev; + mcs_vertex* next = v->next; + prev->next = next; + next->prev = prev; + if (v == set[from_set]) + set[from_set] = (v == next) ? nullptr : next; + } + + void insert_vertex(unsigned vertex_i, unsigned to_set) + { + mcs_vertex* v = &vertex[vertex_i]; + mcs_vertex* next = set[to_set]; + if (next == nullptr) + { + v->prev = v; + v->next = v; + set[to_set] = v; + } + else + { + mcs_vertex* prev = next->prev; + v->prev = prev; + v->next = next; + prev->next = v; + next->prev = v; + } + } + + void increase_vertex_weight(unsigned vertex_i) + { + mcs_vertex* v = &vertex[vertex_i]; + if (v->weight >= 0) + { + remove_vertex(vertex_i, v->weight); + ++v->weight; + insert_vertex(vertex_i, v->weight); + } + } + + unsigned select_any_vertex(unsigned from_set) + { + mcs_vertex* start = set[from_set]; + assert(start); + return start - &vertex[0]; + } + + scc_info* si; + unsigned select_best_vertex_scc(unsigned from_set) + { + mcs_vertex* start = set[from_set]; + assert(start); + assert(si); + unsigned best = start - &vertex[0]; + unsigned best_scc = si->scc_of(best); + mcs_vertex* v = start->next; + while (v != start) + { + unsigned i = v - &vertex[0]; + if (si->scc_of(i) > best_scc) + { + best = i; + best_scc = si->scc_of(i); + } + v = v->next; + } + return best; + } + }; + } + + /// \brief Return an ordering of the vertices computed by + /// a maximum cardinality search. + /// + /// Unlike Tarjan's paper \cite tarjan.84.sicomp , where states are + /// numbered from N to 1, this number the states from 0 to N-1, + /// starting from the initial state. The next number is assigned to + /// a state that maximizes the number of already-numbered neighbors. + std::vector + maximum_cardinality_search(const const_twa_graph_ptr& a, mcs_tie_break tie) + { + unsigned n = a->num_states(); + mcs_data data(n); + + // We need to compute the neighbors of each state independently of + // the orientation of the edges. + std::vector> neighbors(n); + for (auto& e: a->edges()) + { + neighbors[e.src].insert(e.dst); + neighbors[e.dst].insert(e.src); + } + + // How to break ties when selecting the next vertex? + unsigned (mcs_data::* pick_state)(unsigned) = &mcs_data::select_any_vertex; + if (tie == MCS_TIE_SCC) + { + data.si = new scc_info(a, scc_info_options::NONE); + pick_state = &mcs_data::select_best_vertex_scc; + } + + std::vector order(n, 0U); // order is α in Tarjan's paper + unsigned index = 0; // index is n-i in Tarjan's paper + int max_weight = 0; // max_weight is j in Tarjan's paper + auto number_state = [&](unsigned i) + { + order[i] = index++; + int& w = data.vertex[i].weight; + data.remove_vertex(i, w); + w = -1; + for (unsigned j: neighbors[i]) + data.increase_vertex_weight(j); + ++max_weight; + }; + + unsigned init = a->get_init_state_number(); + number_state(init); + + while (index < n) + { + while (max_weight > 0 && data.set[max_weight] == nullptr) + --max_weight; + number_state((data.*pick_state)(max_weight)); + } + return order; + } + + twa_graph_ptr + maximum_cardinality_search_reorder_here(twa_graph_ptr a, mcs_tie_break tie) + { + std::vector order = maximum_cardinality_search(a, tie); + a->defrag_states(order, order.size()); + return a; + } +} diff --git a/spot/twaalgos/mcs.hh b/spot/twaalgos/mcs.hh new file mode 100644 index 000000000..76a67b74a --- /dev/null +++ b/spot/twaalgos/mcs.hh @@ -0,0 +1,63 @@ +// -*- coding: utf-8 -*- +// Copyright (C) by the Spot authors, see the AUTHORS file for details. +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include +#include +#include + +namespace spot +{ + enum mcs_tie_break + { + ///\brief Break ties by picking the first possible state. + MCS_TIE_ANY = 0, + /// \brief Break ties by picking states from the "highest" SCCs + /// + /// This is based on the topological ordering of SCCs computed + /// by scc_info. The initial state is always in the highest + /// SCC, and the smallest SCC has no exit. + MCS_TIE_SCC, + }; + + + /// \brief Return an ordering of the vertices computed by + /// a maximum cardinality search. + /// + /// Unlike Tarjan's paper \cite tarjan.84.sicomp , where states are + /// numbered from N to 1, this numbers the states from 0 to N-1, + /// starting from the initial state. The next number is assigned to + /// a state that maximizes the number of already-numbered neighbors. + /// + /// This version returns a vector such that RESULTS[I] is the rank + /// of state I in the computed order. + /// + /// \param tie specify how to break ties. + SPOT_API std::vector + maximum_cardinality_search(const const_twa_graph_ptr& a, + mcs_tie_break tie = MCS_TIE_ANY); + + /// \brief Reorder the state of \a a according to the order + /// computed by maximum_cardinality_search(). + /// + /// This works in place and return the same automaton. + SPOT_API twa_graph_ptr + maximum_cardinality_search_reorder_here(twa_graph_ptr a, + mcs_tie_break tie = MCS_TIE_ANY); +} diff --git a/tests/Makefile.am b/tests/Makefile.am index 898bcccd8..6f7abf994 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -280,6 +280,7 @@ TESTS_twa = \ core/randomize.test \ core/lbttparse.test \ core/ltlf.test \ + core/mcs.test \ core/scc.test \ core/sccdot.test \ core/sccif.test \ diff --git a/tests/core/mcs.test b/tests/core/mcs.test new file mode 100755 index 000000000..da654a547 --- /dev/null +++ b/tests/core/mcs.test @@ -0,0 +1,180 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) by the Spot authors, see the AUTHORS file for details. +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs +set -e + +cat >in.hoa < out1.hoa +autfilt --mcs=scc in.hoa > out2.hoa + +cat >expected1.hoa <expected2.hoa < Date: Thu, 19 Sep 2024 08:50:41 +0200 Subject: [PATCH 499/606] * doc/org/ltlmix.org: Typos. --- doc/org/ltlmix.org | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/org/ltlmix.org b/doc/org/ltlmix.org index d68cc937b..57371b18f 100644 --- a/doc/org/ltlmix.org +++ b/doc/org/ltlmix.org @@ -198,7 +198,7 @@ or 1 Note that in the LTL case, =false= and =true= can be generated by default: when building leave, =alse= and =true= have the same probability to be selected as any input formula. -example). He + * Randomizing atomic propositions with =-A= or =-P= @@ -207,7 +207,7 @@ used in the input formulas. This works as follows: if option =-A N= was given, every time an input formula φ is selected, its atomic propositions are replaced by atomic propositions randomly selected in a set of size $N$. If φ uses $i$ atomic propositions and $i\ge N$, -then those $i$ atomic proposition will be remapped to $i$ distinct +then those $i$ atomic propositions will be remapped to $i$ distinct atomic propositions chosen randomly in that set. If $i>N$, some of the new atomic propositions may replace several of the original atomic propositions. @@ -231,7 +231,7 @@ These options solve two problems: Here is that same example with a single formula, =GFa=, whose atomic -proposition will be randomly replaced by one of +propositions will be randomly replaced by one of $\{p_0,p_1,p_2,p_3,p_4\}$. #+BEGIN_SRC sh :exports both @@ -397,7 +397,7 @@ because we have to deal with two sets of atomic proposition: one set for input, and one set for output. [[https://www.ijcai.org/proceedings/2017/0189.pdf][Zhu et al. (IJCAI'17)]] generate their benchmark for LTL synthesis using -a setup similar to the above, except that when atomic proposition are +a setup similar to the above, except that when atomic propositions are randomized, we must make sure not to change their input or output nature. @@ -435,7 +435,7 @@ When randomizing the atomic propositions in these formulas before combining them, we want to replace each input (resp. output) proposition by a random input (resp. output) proposition. This is achieved by passing two numbers to =-A= or =-P=. In the case of [[https://www.ijcai.org/proceedings/2017/0189.pdf][Zhu -et al.]], they do not change the polarity of the proposition during +et al.]], they do not change the polarity of the propositions during their generation, so we would use =-A= to mimic their setup. Here are 6 random conjunctions of the above four patterns, in which @@ -489,6 +489,6 @@ chose from may help to get more realizable formulas. When the original LTL synthesis specification formulas have atomic -proposition that do not start with =i= or =o=, options =--ins=, -=--outs=, or =--part-file= can be used to specify the nature of the +propositions that do not start with =i= or =o=, options =--ins=, +=--outs=, or =--part-file= can be used to specify the nature of these atomic propositions. These options work as [[file:ltlsynt.org::#input-options][=ltlsynt='s input options]]. From b9cb4022cf1b788fa7b71e0254e6fad0bcb9309e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 19 Sep 2024 11:40:53 +0200 Subject: [PATCH 500/606] gen: rename pps-arbiter's APs * bin/genltl.cc, spot/gen/formulas.cc: Adjust the name of the AP produced by pps-arbiter. * NEWS: Mention the change. --- NEWS | 8 +++++++- bin/genltl.cc | 8 ++++---- spot/gen/formulas.cc | 4 ++-- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/NEWS b/NEWS index 9a5c0a1c7..fe5355e49 100644 --- a/NEWS +++ b/NEWS @@ -10,7 +10,7 @@ New in spot 2.12.0.dev (not yet released) consistency) and augmented (with new statistics). The new CSV output should be more useful when the input specification is decomposed, in particular, there is a column giving the number of - sub-specifications obained, and other statistics columns have + sub-specifications obtained, and other statistics columns have names starting with "max_" or "sum_" indicating how said statistics are updated across sub-specifications. @@ -44,6 +44,12 @@ New in spot 2.12.0.dev (not yet released) % genltl --lily-patterns | ltlsynt -q + - genltl's --pps-arbiter-standard and --pps-arbiter-strict have been + changed to rename variables {r1,r2,...,g1,g2...} as + {i1,i1,...,o1,o2,...} so that these formula can be fed directly to + ltlsynt. + + - autfilt learned --restrict-dead-end-edges, to restricts labels of edges leading to dead-ends. See the description of restrict_dead_end_edges_here() below. diff --git a/bin/genltl.cc b/bin/genltl.cc index 5a3ab3539..80789fc9d 100644 --- a/bin/genltl.cc +++ b/bin/genltl.cc @@ -140,11 +140,11 @@ static const argp_option options[] = OPT_ALIAS(beem-patterns), OPT_ALIAS(p), { "pps-arbiter-standard", gen::LTL_PPS_ARBITER_STANDARD, "RANGE", 0, - "Arbiter with n clients that sent requests (ri) and " - "receive grants (gi). Standard semantics.", 0 }, + "Arbiter with n clients that sent requests (iN) and " + "receive grants (oN). Standard semantics.", 0 }, { "pps-arbiter-strict", gen::LTL_PPS_ARBITER_STRICT, "RANGE", 0, - "Arbiter with n clients that sent requests (ri) and " - "receive grants (gi). Strict semantics.", 0 }, + "Arbiter with n clients that sent requests (iN) and " + "receive grants (oN). Strict semantics.", 0 }, { "r-left", gen::LTL_R_LEFT, "RANGE", 0, "(((p1 R p2) R p3) ... R pn)", 0 }, { "r-right", gen::LTL_R_RIGHT, "RANGE", 0, "(p1 R (p2 R (... R pn)))", 0 }, { "rv-counter", gen::LTL_RV_COUNTER, "RANGE", 0, "n-bit counter", 0 }, diff --git a/spot/gen/formulas.cc b/spot/gen/formulas.cc index ad7f2e8d4..f8314151b 100644 --- a/spot/gen/formulas.cc +++ b/spot/gen/formulas.cc @@ -1428,9 +1428,9 @@ namespace spot case LTL_P_PATTERNS: return p_pattern(n); case LTL_PPS_ARBITER_STANDARD: - return pps_arbiter("r", "g", n, false); + return pps_arbiter("i", "o", n, false); case LTL_PPS_ARBITER_STRICT: - return pps_arbiter("r", "g", n, true); + return pps_arbiter("i", "o", n, true); case LTL_R_LEFT: return bin_n("p", n, op::R, false); case LTL_R_RIGHT: From e13deeb1434f599d5487898a0016f531c09142bb Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 23 Sep 2024 11:49:26 +0200 Subject: [PATCH 501/606] * .gitlab-ci.yml (publish-stable): Add scp for LRE's dload host. --- .gitlab-ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8aabfb8fa..a48b23ae8 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -436,11 +436,12 @@ publish-stable: - cd .. - ls -l - tgz=`ls spot-*.tar.* | head -n 1` - - case $tgz in *[0-9].tar.*) scp $tgz doc@perso:/var/www/dload/spot/;; esac + - case $tgz in *[0-9].tar.*) scp $tgz doc@perso:/var/www/dload/spot/; scp $tgz doc@dload:/var/www/html/spot/;; esac - rm -rf ./* - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=stable" https://gitlab.lre.epita.fr/api/v4/projects/131/trigger/pipeline - curl -X POST "https://archive.softwareheritage.org/api/1/origin/save/git/url/https://gitlab.lre.epita.fr/spot/spot/" - curl "https://web.archive.org/save/https://www.lrde.epita.fr/dload/spot/$tgz" + - curl "https://web.archive.org/save/https://www.lre.epita.fr/dload/spot/$tgz" publish-unstable: only: From e48506f5484c216a085ea9108ed8d35685e166e5 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 4 Jul 2024 12:11:10 +0200 Subject: [PATCH 502/606] improve some comments * spot/twaalgos/complement.hh, spot/twaalgos/complement.cc: Here. --- spot/twaalgos/complement.cc | 7 ++++--- spot/twaalgos/complement.hh | 3 ++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/spot/twaalgos/complement.cc b/spot/twaalgos/complement.cc index 00e9cb0ce..9fad4eac4 100644 --- a/spot/twaalgos/complement.cc +++ b/spot/twaalgos/complement.cc @@ -515,7 +515,7 @@ namespace spot twa_graph_ptr res = dualize(aut); // There are cases with "t" acceptance that get converted to // Büchi during completion, then dualized to co-Büchi, but the - // acceptance is still not used. To try to clean it up in this + // acceptance is still not used. Try to clean it up in this // case. if (aut->num_sets() == 0 || // Also dualize removes sink states, but doesn't simplify @@ -525,8 +525,9 @@ namespace spot return res; } if (is_very_weak_automaton(aut)) - // removing alternation may need more acceptance sets than we support. - // in this case res==nullptr and we try the other determinization. + // Removing alternation may need more acceptance sets than Spot + // supports. When this happens res==nullptr and we fall back to + // determinization-based complementation. if (twa_graph_ptr res = remove_alternation(dualize(aut), false, aborter, false)) return res; diff --git a/spot/twaalgos/complement.hh b/spot/twaalgos/complement.hh index 4b74f27b8..6bb8ff1d9 100644 --- a/spot/twaalgos/complement.hh +++ b/spot/twaalgos/complement.hh @@ -73,7 +73,8 @@ namespace spot /// If an output_aborter is supplied, it is used to /// abort the construction of larger automata. /// - /// complement_semidet() is not yet used. + /// complement_semidet() is not yet used, as it is not always better + /// when the input is semi-deterministic. SPOT_API twa_graph_ptr complement(const const_twa_graph_ptr& aut, const output_aborter* aborter = nullptr); From 783efa2fe800089ee80505bf1580ceb20deffab7 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 25 Jul 2024 18:01:42 +0200 Subject: [PATCH 503/606] * doc/tl/tl.tex: Some typos. --- doc/tl/tl.tex | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/tl/tl.tex b/doc/tl/tl.tex index 141daa7b8..80c47e973 100644 --- a/doc/tl/tl.tex +++ b/doc/tl/tl.tex @@ -1473,10 +1473,10 @@ The goals in most of these simplification are to: \end{itemize} Rewritings defined with $\equivEU$ are applied only when -\verb|tl_simplifier_options::favor_event_univ|' is \texttt{true}: +`\verb|tl_simplifier_options::favor_event_univ|' is \texttt{true}: they try to lift subformulas that are both eventual and universal \emph{higher} in the syntax tree. Conversely, rules defined with $\equivNeu$ -are applied only when \verb|favor_event_univ|' is \texttt{false}: they +are applied only when `\verb|favor_event_univ|' is \texttt{false}: they try to \textit{lower} subformulas that are both eventual and universal. Currently all these simplifications assume LTL semantics, so they make @@ -1486,10 +1486,10 @@ only listed with $\X$. \subsection{Basic Simplifications}\label{sec:basic-simp} These simplifications are enabled with -\verb|tl_simplifier_options::reduce_basics|'. A couple of them may +`\verb|tl_simplifier_options::reduce_basics|'. A couple of them may enlarge the size of the formula: they are denoted using $\equiV$ instead of $\equiv$, and they can be disabled by setting the -\verb|tl_simplifier_options::reduce_size_strictly|' option to +`\verb|tl_simplifier_options::reduce_size_strictly|' option to \texttt{true}. \subsubsection{Basic Simplifications for Temporal Operators} @@ -1715,7 +1715,7 @@ $\Esuffix$. They assume that $b$, denote a Boolean formula. As noted at the beginning for section~\ref{sec:basic-simp}, rewritings denoted with $\equiV$ can be disabled by setting the -\verb|tl_simplifier_options::reduce_size_strictly|' option to +`\verb|tl_simplifier_options::reduce_size_strictly|' option to \texttt{true}. \begin{align*} @@ -1818,7 +1818,7 @@ $q,\,q_i$ & a pure eventuality that is also purely universal \\ \G(f_1\AND\ldots\AND f_n \AND q_1 \AND \ldots \AND q_p)&\equivEU \G(f_1\AND\ldots\AND f_n)\AND q_1 \AND \ldots \AND q_p \\ \G\F(f_1\AND\ldots\AND f_n \AND q_1 \AND \ldots \AND q_p)&\equiv \G(\F(f_1\AND\ldots\AND f_n)\AND q_1 \AND \ldots \AND q_p) \\ \G(f_1\AND\ldots\AND f_n \AND e_1 \AND \ldots \AND e_m \AND \G(e_{m+1}) \AND \ldots\AND \G(e_p))&\equivEU \G(f_1\AND\ldots\AND f_n)\AND \G(e_1 \AND \ldots \AND e_p) \\ - \G(f_1\AND\ldots\AND f_n \AND \G(g_1) \AND \ldots \AND \G(g_m) &\equiv \G(f_1\AND\ldots\AND f_n\AND g_1 \AND \ldots \AND g_m) \\ + \G(f_1\AND\ldots\AND f_n \AND \G(g_1) \AND \ldots \AND \G(g_m)) &\equiv \G(f_1\AND\ldots\AND f_n\AND g_1 \AND \ldots \AND g_m) \\ \F(f_1 \OR \ldots \OR f_n \OR u_1 \OR \ldots \OR u_m \OR \F(u_{m+1})\OR\ldots\OR \F(u_p)) &\equivEU \F(f_1\OR \ldots\OR f_n) \OR \F(u_1 \OR \ldots \OR u_p)\\ \F(f_1 \OR \ldots \OR f_n \OR \F(g_1) \OR \ldots \OR \G(g_m)) &\equiv \F(f_1\OR \ldots\OR f_n \OR g_1 \OR \ldots \OR g_m)\\ \G(f_1)\AND\ldots\AND \G(f_n) \AND \G(e_1) \AND \ldots\AND \G(e_p)&\equivEU \G(f_1\AND\ldots\AND f_n)\AND \G(e_1 \AND \ldots \AND e_p) \\ @@ -1837,19 +1837,19 @@ implication can be done in two ways: \begin{description} \item[Syntactic Implication Checks] were initially proposed by~\citet{somenzi.00.cav}. This detection is enabled by the - ``\verb|tl_simplifier_options::synt_impl|'' option. This is a + `\verb|tl_simplifier_options::synt_impl|' option. This is a cheap way to detect implications, but it may miss some. The rules we implement are described in Appendix~\ref{ann:syntimpl}. \item[Language Containment Checks] were initially proposed by~\citet{tauriainen.03.tr}. This detection is enabled by the - ``\verb|tl_simplifier_options::containment_checks|'' option. + `\verb|tl_simplifier_options::containment_checks|' option. \end{description} In the following rewritings rules, $f\simp g$ means that $g$ was proved to be implied by $f$ using either of the above two methods. Additionally, implications denoted by $f\Simp g$ are only checked if -the ``\verb|tl_simplifier_options::containment_checks_stronger|'' +the `\verb|tl_simplifier_options::containment_checks_stronger|' option is set (otherwise the rewriting rule is not applied). We write $f\simpe g$ iff $f\simp g$ and $g\simp f$. @@ -1936,7 +1936,7 @@ The first six rules, about n-ary operators $\AND$ and $\OR$, are implemented for $n$ operands by testing each operand against all other. To prevent the complexity to escalate, this is only performed with up to 16 operands. That value can be changed in -``\verb|tl_simplifier_options::containment_max_ops|''. +`\verb|tl_simplifier_options::containment_max_ops|'. The following rules mix implication-based checks with formulas that are pure eventualities ($e$) or that are purely universal ($u$). From bdc63db9f041b753134d5342965edee60e58be20 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 21 Aug 2024 11:18:14 +0200 Subject: [PATCH 504/606] ltlgrind: improve error message when formulas are missing The error message, inherited from ltl2tgba, used to say "No formula to translate", but "translate" isn't appropriate here. * bin/common_finput.cc, bin/common_finput.hh (check_no_formula): Allow "translate" to be changed. * bin/ltlgrind.cc: Change it. * tests/core/ltlgrind.test: Test it. --- bin/common_finput.cc | 6 +++--- bin/common_finput.hh | 2 +- bin/ltlgrind.cc | 2 +- tests/core/ltlgrind.test | 8 ++++++++ 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/bin/common_finput.cc b/bin/common_finput.cc index df0343dd1..14cd06b36 100644 --- a/bin/common_finput.cc +++ b/bin/common_finput.cc @@ -388,14 +388,14 @@ job_processor::run() return error; } -void check_no_formula() +void check_no_formula(const char* action) { if (!jobs.empty()) return; if (isatty(STDIN_FILENO)) - error(2, 0, "No formula to translate? Run '%s --help' for help.\n" + error(2, 0, "No formula to %s? Run '%s --help' for help.\n" "Use '%s -' to force reading formulas from the standard " - "input.", program_name, program_name); + "input.", action, program_name, program_name); jobs.emplace_back("-", job_type::LTL_FILENAME); } diff --git a/bin/common_finput.hh b/bin/common_finput.hh index 30b7f333c..491364d19 100644 --- a/bin/common_finput.hh +++ b/bin/common_finput.hh @@ -93,5 +93,5 @@ public: // Report and error message or add a default job depending on whether // the input is a tty. -void check_no_formula(); +void check_no_formula(const char* action = "translate"); void check_no_automaton(); diff --git a/bin/ltlgrind.cc b/bin/ltlgrind.cc index 626211adc..61ffc3cd5 100644 --- a/bin/ltlgrind.cc +++ b/bin/ltlgrind.cc @@ -199,7 +199,7 @@ main(int argc, char* argv[]) mut_opts |= opt_all; - check_no_formula(); + check_no_formula("mutate"); mutate_processor processor; if (processor.run()) diff --git a/tests/core/ltlgrind.test b/tests/core/ltlgrind.test index f508c4826..bfceef88a 100755 --- a/tests/core/ltlgrind.test +++ b/tests/core/ltlgrind.test @@ -200,3 +200,11 @@ EOF ltlgrind -f 'a U b' -m 999999999999999999999999999 2>err && exit 1 grep 'too large' err + +# The following message appears only if run from a tty. +if (: > /dev/tty) >/dev/null 2>&1 ; then + ltlgrind err && exit 1 + grep 'No formula to mutate' err +fi + +: From 97832af321e1081c07a1473e2f0feefece750fbf Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 21 Aug 2024 21:36:54 +0200 Subject: [PATCH 505/606] randltl: fix generation without unary operators * spot/tl/randomltl.hh (has_unary_ops): New method. * spot/tl/randomltl.cc: Avoid creating subformulas of even size when we do not have unary operators. * tests/core/randpsl.test: Test it. * NEWS: Mention it. --- NEWS | 5 ++++- spot/tl/randomltl.cc | 38 +++++++++++++++++++++++++++++++++----- spot/tl/randomltl.hh | 6 ++++++ tests/core/randpsl.test | 18 ++++++++++++++++++ 4 files changed, 61 insertions(+), 6 deletions(-) diff --git a/NEWS b/NEWS index c0ab55c77..47cd487cc 100644 --- a/NEWS +++ b/NEWS @@ -1,6 +1,9 @@ New in spot 2.12.0.dev (not yet released) - Nothing yet. + Bug fixes: + + - Generating random formula without any unary opertors would very + often create formulas much smaller than asked. New in spot 2.12 (2024-05-16) diff --git a/spot/tl/randomltl.cc b/spot/tl/randomltl.cc index 9aa604ee2..e415535b2 100644 --- a/spot/tl/randomltl.cc +++ b/spot/tl/randomltl.cc @@ -92,14 +92,20 @@ namespace spot { assert(n >= 3); --n; - int l = rrand(1, n - 1); + int l; // size of left + if ((n & 1) | rl->has_unary_ops()) + l = rrand(1, n - 1); + else + // if we do not have unary ops, we must split n in two odd sizes + l = rrand(0, n/2 - 1)*2 + 1; + // Force the order of generation of operands to be right, then // left. This is historical, because gcc evaluates argument // from right to left and we used to make the two calls to // generate() inside of the call to instance() before // discovering that clang would perform the nested calls from // left to right. - auto right = rl->generate(n - l); + formula right = rl->generate(n - l); return formula::binop(Op, rl->generate(l), right); } @@ -110,7 +116,25 @@ namespace spot assert(n >= 3); --n; const random_psl* rp = static_cast(rl); - int l = rrand(1, n - 1); + int l; // size of left + bool left_must_be_odd = !rp->rs.has_unary_ops(); + bool right_must_be_odd = !rl->has_unary_ops(); + if (n & 1) + { + if (left_must_be_odd && !right_must_be_odd) + l = rrand(0, n/2 - 1) * 2 + 1; + else if (!left_must_be_odd && right_must_be_odd) + l = rrand(1, n/2) * 2; + else + l = rrand(1, n - 1); + } + else + { + if (left_must_be_odd || right_must_be_odd) + l = rrand(0, n/2 - 1) * 2 + 1; + else + l = rrand(1, n - 1); + } // See comment in binop_builder. auto right = rl->generate(n - l); return formula::binop(Op, rp->rs.generate(l), right); @@ -152,9 +176,13 @@ namespace spot { assert(n >= 3); --n; - int l = rrand(1, n - 1); // See comment in binop_builder. - auto right = rl->generate(n - l); + int l; // size of left + if ((n & 1) | rl->has_unary_ops()) + l = rrand(1, n - 1); + else + l = rrand(0, n/2 - 1)*2 + 1; + formula right = rl->generate(n - l); return formula::multop(Op, {rl->generate(l), right}); } diff --git a/spot/tl/randomltl.hh b/spot/tl/randomltl.hh index d4c52debf..a7ea3561c 100644 --- a/spot/tl/randomltl.hh +++ b/spot/tl/randomltl.hh @@ -72,6 +72,12 @@ namespace spot /// occurrences of the \c F operator. const char* parse_options(char* options); + /// \brief whether we can use unary operators + bool has_unary_ops() const + { + return total_2_ > 0.0; + } + protected: void update_sums(); diff --git a/tests/core/randpsl.test b/tests/core/randpsl.test index 5e7192894..9d4f825aa 100755 --- a/tests/core/randpsl.test +++ b/tests/core/randpsl.test @@ -36,3 +36,21 @@ test `wc -l < formulas` = 50 randltl --psl --sere-priorities=first_match=10 -n 100 2 | grep first_match + +# the random generator had trouble generating formulas of the proper size when +# unary operators were disabled +P=true=0,false=0,not=0 +randltl --tree-size=19 -B --boolean-prio=$P 1000 -n10 --stats=%a >out +cat >expected < Date: Fri, 30 Aug 2024 16:05:50 +0200 Subject: [PATCH 506/606] game: fix solving "parity min" games with multi-colored edges * spot/twaalgos/game.cc: If the original acceptance is "parity min", use min_set(), not max_set(), to read edge priorities. * tests/python/game.py: Add a test case. * NEWS: Mention the bug. --- NEWS | 5 +++++ spot/twaalgos/game.cc | 3 ++- tests/python/game.py | 25 ++++++++++++++++++++++++- 3 files changed, 31 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index 47cd487cc..343c36543 100644 --- a/NEWS +++ b/NEWS @@ -5,6 +5,11 @@ New in spot 2.12.0.dev (not yet released) - Generating random formula without any unary opertors would very often create formulas much smaller than asked. + - The parity game solver, which internally works on "parity max + odd", but actually accept any type of parity acceptance, could be + confused by games with "parity min" acceptance using transition + with several colors (a rather uncommon situation). + New in spot 2.12 (2024-05-16) Build: diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index 404fa4778..e2c550531 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -357,7 +357,8 @@ namespace spot // Takes an edge and returns the "equivalent" max odd parity auto equiv_par = [max, odd, next_max_par, inv = 2*max-1](const auto& e) { - par_t e_par = e.acc.max_set() - 1; // -1 for empty + par_t e_par = + (max ? e.acc.max_set() : e.acc.min_set()) - 1; // -1 for empty // If "min" and empty -> set to n if (!max & (e_par == -1)) e_par = next_max_par; diff --git a/tests/python/game.py b/tests/python/game.py index e0d880647..3eba9bf41 100644 --- a/tests/python/game.py +++ b/tests/python/game.py @@ -351,6 +351,21 @@ State: 17 --END--""" ) +def maximize_colors(aut, is_max): + ns = aut.num_sets() + v = [] + if is_max: + for c in range(ns+1): + v.append(spot.mark_t(list(range(c)))) + for e in aut.edges(): + e.acc = v[e.acc.max_set()] + else: + for c in range(ns+1): + v.append(spot.mark_t(list(range(c, ns)))) + v.insert(0, spot.mark_t([])) + for e in aut.edges(): + e.acc = v[e.acc.min_set()] + # Test the different parity conditions gdpa = spot.tgba_determinize(spot.degeneralize_tba(g), False, True, True, False) @@ -370,6 +385,14 @@ for kind in [spot.parity_kind_min, spot.parity_kind_max]: tc.assertTrue(spot.solve_parity_game(g_test_split1)) c_strat1 = spot.get_strategy(g_test_split1) tc.assertTrue(c_strat == c_strat1) + # Same test, but adding a lot of useless colors in the game + g_test_split2 = spot.change_parity(g_test_split, kind, style) + maximize_colors(g_test_split2, kind == spot.parity_kind_max) + spot.set_state_players(g_test_split2, sp) + tc.assertTrue(spot.solve_parity_game(g_test_split2)) + c_strat2 = spot.get_strategy(g_test_split2) + tc.assertTrue(c_strat == c_strat2) + # Test that strategies are not appended # if solve is called multiple times @@ -520,4 +543,4 @@ f1 = "((((G (F (idle))) && (G (((idle) && (X ((! (grant_0)) \ && (! (F (G ((request_0) && (X (! (grant_0)))))))) \ && (! (F (G ((request_1) && (X (! (grant_1)))))))))" outs = ["grant_0", "grant1"] -tc.assertEqual(synt_ltlf(f1, outs)[0], False) \ No newline at end of file +tc.assertEqual(synt_ltlf(f1, outs)[0], False) From f5ab5678b5f6145f21af0f9da2f258073d5a9f40 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 2 Sep 2024 13:50:36 +0200 Subject: [PATCH 507/606] python: improve support of spot-extra, and recent swig I could not run "make check" in a copy of seminator 2.0 regenerated with swig 4.0, because of changes in the way Swig imports its shared libraries. * python/spot/__init__.py: If sys.path contains "/spot-extra" directory, add it to spot.__path__ as well. This helps situations where a plugin use libtool and the development tree has the shared libraries in .../spot-extra/.libs/ --- python/spot/__init__.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/python/spot/__init__.py b/python/spot/__init__.py index 1c6133390..6f571f932 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -38,11 +38,17 @@ if 'SPOT_UNINSTALLED' in os.environ: # We may have third-party plugins that want to be loaded as "spot.xxx", but # that are installed in a different $prefix. This sets things so that any # file that looks like spot-extra/xxx.py can be loaded with "import spot.xxx". +# When libtool is used in a development build, it is likely that PYTHONPATH +# is already set up to contains something like .../spot-extra/.libs, so we +# want to copy those as well. for path in sys.path: if path not in __path__: - path += "/spot-extra" - if os.path.isdir(path): + if "/spot-extra" in path: __path__.append(path) + else: + path += "/spot-extra" + if os.path.isdir(path): + __path__.append(path) from spot.impl import * @@ -58,7 +64,7 @@ from spot.aux import \ ostream_to_svg as _ostream_to_svg -# The parrameters used by default when show() is called on an automaton. +# The parameters used by default when show() is called on an automaton. _show_default = None From 514209e80f9097de974da2cc31c6dbc6db95de7f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 2 Sep 2024 13:54:36 +0200 Subject: [PATCH 508/606] * configure.ac: Typo. --- configure.ac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index 4ec7633e4..e85feb8cf 100644 --- a/configure.ac +++ b/configure.ac @@ -220,7 +220,7 @@ AC_CHECK_PROG([LBTT_TRANSLATE], [lbtt-translate], [lbtt-translate]) AX_CHECK_VALGRIND # Debian used to reserve the name 'swig' for swig-2.0. So prefer # swig4.0 (available in Debian bullseye) to swig3.0 (available in Debian buster) -# ti swig. +# to swig. AC_CHECK_PROGS([SWIG], [swig4.0 swig3.0 swig], [swig]) AC_SUBST([CROSS_COMPILING], [$cross_compiling]) From 1a36ea6ce4248ad50fb5169d37489b77ae29b283 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 2 Sep 2024 17:26:42 +0200 Subject: [PATCH 509/606] ltlsynt: fix usage for --dot's argument * bin/ltlsynt.cc (dispatch_print_hoa): Pass the right argument to print_dot. * tests/core/ltlsynt.test: Test it. * NEWS: Mention the bug. --- NEWS | 2 ++ bin/ltlsynt.cc | 2 +- tests/core/ltlsynt.test | 6 +++++- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index 343c36543..c1045dcca 100644 --- a/NEWS +++ b/NEWS @@ -10,6 +10,8 @@ New in spot 2.12.0.dev (not yet released) confused by games with "parity min" acceptance using transition with several colors (a rather uncommon situation). + - "ltlsynt ... --print-game --dot=ARGS" was ignoring ARGS. + New in spot 2.12 (2024-05-16) Build: diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 24f4af16a..a85dbc3a9 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -322,7 +322,7 @@ namespace rs->patch_game(game); if (opt_dot) - spot::print_dot(std::cout, game, opt_print_hoa_args); + spot::print_dot(std::cout, game, opt_dot_arg); else if (opt_print_pg) spot::print_pg(std::cout, game); else diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index 1e0397a5f..dd3e4152c 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -1085,7 +1085,11 @@ diff outx exp # Test --dot and --hide-status ltlsynt -f 'i <-> Fo' --ins=i --aiger --dot | grep arrowhead=dot -ltlsynt -f 'i <-> Fo' --ins=i --print-game-hoa --dot | grep 'shape="diamond"' +ltlsynt -f 'i <-> Fo' --ins=i --print-game-hoa --dot > out +grep 'shape="diamond"' out +grep 'Inf(0)' out +ltlsynt -f 'i <-> Fo' --ins=i --print-game-hoa --dot=bar > out +grep 'label= Fo' --ins=i --dot --hide-status > res cat >exp < Date: Mon, 2 Sep 2024 17:28:28 +0200 Subject: [PATCH 510/606] fix spurious g++-14 warning * spot/twaalgos/mealy_machine.cc (mm_sat_prob_t::get_sol): Here. --- spot/twaalgos/mealy_machine.cc | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index e63193cdc..5021f2a94 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -2604,9 +2604,14 @@ namespace return {}; case PICOSAT_SATISFIABLE: { - std::vector - res(1 + (unsigned) picosat_variables(lm.psat_), -1); - SPOT_ASSUME(res.data()); // g++ 11 believes data might be nullptr + unsigned nvar = 1 + (unsigned) picosat_variables(lm.psat_); + // Asssuming res.data() non-null was enough to prevent g++ + // 11 from issuing a spurious "potential null pointer + // dereference" on the res[0] assignment. Since g++14 we + // also need to assume nvar>0. + SPOT_ASSUME(nvar > 0); + std::vector res(nvar, -1); + SPOT_ASSUME(res.data()); res[0] = 0; // Convention for (int lit : lm.all_lits) res[lit] = picosat_deref(lm.psat_, lit); From 376755dbd46eec1b12ca111c3a2ea94fa0d27519 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 3 Sep 2024 17:37:59 +0200 Subject: [PATCH 511/606] game: avoid a spurious g++14 warning * spot/twaalgos/game.cc, spot/twaalgos/game.hh (get_state_winners): Declare a non-const version as well to avoid a "possibly dangling reference" error in code show by tut40.org. --- spot/twaalgos/game.cc | 33 +++++++++++++++++++++++---------- spot/twaalgos/game.hh | 26 ++++++++++++++++---------- 2 files changed, 39 insertions(+), 20 deletions(-) diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index e2c550531..9f739d423 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -1085,12 +1085,12 @@ namespace spot return aut; } - void set_state_players(twa_graph_ptr arena, const region_t& owners) + void set_state_players(twa_graph_ptr& arena, const region_t& owners) { set_state_players(arena, region_t(owners)); } - void set_state_players(twa_graph_ptr arena, region_t&& owners) + void set_state_players(twa_graph_ptr& arena, region_t&& owners) { if (owners.size() != arena->num_states()) throw std::runtime_error @@ -1100,7 +1100,7 @@ namespace spot new region_t(std::move(owners))); } - void set_state_player(twa_graph_ptr arena, unsigned state, bool owner) + void set_state_player(twa_graph_ptr& arena, unsigned state, bool owner) { if (state >= arena->num_states()) throw std::runtime_error("set_state_player(): invalid state number"); @@ -1141,7 +1141,7 @@ namespace spot return *owners; } - bool get_state_player(const_twa_graph_ptr arena, unsigned state) + bool get_state_player(const const_twa_graph_ptr& arena, unsigned state) { if (state >= arena->num_states()) throw std::runtime_error("get_state_player(): invalid state number"); @@ -1165,11 +1165,11 @@ namespace spot return *strat_ptr; } - void set_strategy(twa_graph_ptr arena, const strategy_t& strat) + void set_strategy(twa_graph_ptr& arena, const strategy_t& strat) { set_strategy(arena, strategy_t(strat)); } - void set_strategy(twa_graph_ptr arena, strategy_t&& strat) + void set_strategy(twa_graph_ptr& arena, strategy_t&& strat) { if (arena->num_states() != strat.size()) throw std::runtime_error("set_strategy(): strategies need to have " @@ -1214,12 +1214,12 @@ namespace spot } - void set_state_winners(twa_graph_ptr arena, const region_t& winners) + void set_state_winners(twa_graph_ptr& arena, const region_t& winners) { set_state_winners(arena, region_t(winners)); } - void set_state_winners(twa_graph_ptr arena, region_t&& winners) + void set_state_winners(twa_graph_ptr& arena, region_t&& winners) { if (winners.size() != arena->num_states()) throw std::runtime_error @@ -1229,7 +1229,7 @@ namespace spot new region_t(std::move(winners))); } - void set_state_winner(twa_graph_ptr arena, unsigned state, bool winner) + void set_state_winner(twa_graph_ptr& arena, unsigned state, bool winner) { if (state >= arena->num_states()) throw std::runtime_error("set_state_winner(): invalid state number"); @@ -1258,7 +1258,20 @@ namespace spot return *winners; } - bool get_state_winner(const_twa_graph_ptr arena, unsigned state) + // This second version should not be needed, but g++14 emits + // "possibly dangling reference" warnings when it sees that the + // first function is called with a temporary const_twa_graph_ptr to + // return a reference. + const region_t& get_state_winners(twa_graph_ptr& arena) + { + region_t *winners = arena->get_named_prop("state-winner"); + if (!winners) + throw std::runtime_error + ("get_state_winners(): state-winner property not defined, not a game?"); + return *winners; + } + + bool get_state_winner(const const_twa_graph_ptr& arena, unsigned state) { if (state >= arena->num_states()) throw std::runtime_error("get_state_winner(): invalid state number"); diff --git a/spot/twaalgos/game.hh b/spot/twaalgos/game.hh index c376304be..737a50d78 100644 --- a/spot/twaalgos/game.hh +++ b/spot/twaalgos/game.hh @@ -151,20 +151,20 @@ namespace spot /// \brief Set the owner for all the states. /// @{ SPOT_API - void set_state_players(twa_graph_ptr arena, const region_t& owners); + void set_state_players(twa_graph_ptr& arena, const region_t& owners); SPOT_API - void set_state_players(twa_graph_ptr arena, region_t&& owners); + void set_state_players(twa_graph_ptr& arena, region_t&& owners); /// @} /// \ingroup games /// \brief Set the owner of a state. SPOT_API - void set_state_player(twa_graph_ptr arena, unsigned state, bool owner); + void set_state_player(twa_graph_ptr& arena, unsigned state, bool owner); /// \ingroup games /// \brief Get the owner of a state. SPOT_API - bool get_state_player(const_twa_graph_ptr arena, unsigned state); + bool get_state_player(const const_twa_graph_ptr& arena, unsigned state); /// \ingroup games /// \brief Get the owner of all states @@ -181,9 +181,9 @@ namespace spot SPOT_API const strategy_t& get_strategy(const const_twa_graph_ptr& arena); SPOT_API - void set_strategy(twa_graph_ptr arena, const strategy_t& strat); + void set_strategy(twa_graph_ptr& arena, const strategy_t& strat); SPOT_API - void set_strategy(twa_graph_ptr arena, strategy_t&& strat); + void set_strategy(twa_graph_ptr& arena, strategy_t&& strat); /// @} /// \ingroup games @@ -205,23 +205,29 @@ namespace spot /// \brief Set the winner for all the states. /// @{ SPOT_API - void set_state_winners(twa_graph_ptr arena, const region_t& winners); + void set_state_winners(twa_graph_ptr& arena, const region_t& winners); SPOT_API - void set_state_winners(twa_graph_ptr arena, region_t&& winners); + void set_state_winners(twa_graph_ptr& arena, region_t&& winners); /// @} /// \ingroup games /// \brief Set the winner of a state. SPOT_API - void set_state_winner(twa_graph_ptr arena, unsigned state, bool winner); + void set_state_winner(twa_graph_ptr& arena, unsigned state, bool winner); /// \ingroup games /// \brief Get the winner of a state. SPOT_API - bool get_state_winner(const_twa_graph_ptr arena, unsigned state); + bool get_state_winner(const const_twa_graph_ptr& arena, unsigned state); /// \ingroup games /// \brief Get the winner of all states + /// @{ SPOT_API const region_t& get_state_winners(const const_twa_graph_ptr& arena); +#ifndef SWIG + SPOT_API + const region_t& get_state_winners(twa_graph_ptr& arena); +#endif + /// @} } From c92418b51c7606f27197061970b070fcb047f531 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 23 Sep 2024 11:49:26 +0200 Subject: [PATCH 512/606] * .gitlab-ci.yml (publish-stable): Add scp for LRE's dload host. --- .gitlab-ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8aabfb8fa..a48b23ae8 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -436,11 +436,12 @@ publish-stable: - cd .. - ls -l - tgz=`ls spot-*.tar.* | head -n 1` - - case $tgz in *[0-9].tar.*) scp $tgz doc@perso:/var/www/dload/spot/;; esac + - case $tgz in *[0-9].tar.*) scp $tgz doc@perso:/var/www/dload/spot/; scp $tgz doc@dload:/var/www/html/spot/;; esac - rm -rf ./* - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=stable" https://gitlab.lre.epita.fr/api/v4/projects/131/trigger/pipeline - curl -X POST "https://archive.softwareheritage.org/api/1/origin/save/git/url/https://gitlab.lre.epita.fr/spot/spot/" - curl "https://web.archive.org/save/https://www.lrde.epita.fr/dload/spot/$tgz" + - curl "https://web.archive.org/save/https://www.lre.epita.fr/dload/spot/$tgz" publish-unstable: only: From b63f16060ab9c9437bcd15cb704fecb71eec8978 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 23 Sep 2024 12:04:28 +0200 Subject: [PATCH 513/606] release Spot 2.12.1 * NEWS, configure.ac, doc/org/setup.org: Update. --- NEWS | 7 ++++++- configure.ac | 2 +- doc/org/setup.org | 4 ++-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/NEWS b/NEWS index c1045dcca..329cc51ad 100644 --- a/NEWS +++ b/NEWS @@ -1,4 +1,4 @@ -New in spot 2.12.0.dev (not yet released) +New in spot 2.12.1 (2024-09-23) Bug fixes: @@ -12,6 +12,11 @@ New in spot 2.12.0.dev (not yet released) - "ltlsynt ... --print-game --dot=ARGS" was ignoring ARGS. + - Work around various warnings from g++14. + + - Improved handling of spot-extra/ directory with newer Swig + versions. Necessary to recompile Seminator 2 with Swig 4. + New in spot 2.12 (2024-05-16) Build: diff --git a/configure.ac b/configure.ac index e85feb8cf..9f229582b 100644 --- a/configure.ac +++ b/configure.ac @@ -17,7 +17,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.12.0.dev], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.12.1], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) diff --git a/doc/org/setup.org b/doc/org/setup.org index 440dcfa00..d8f8f2a3e 100644 --- a/doc/org/setup.org +++ b/doc/org/setup.org @@ -1,11 +1,11 @@ #+OPTIONS: H:2 num:nil toc:t html-postamble:nil ^:nil #+EMAIL: spot@lrde.epita.fr #+HTML_LINK_HOME: index.html -#+MACRO: LASTDATE 2024-05-14 +#+MACRO: LASTDATE 2024-09-23 #+NAME: SPOT_VERSION #+BEGIN_SRC python :exports none :results value :wrap org -return "2.12" +return "2.12.1" #+END_SRC #+NAME: TARBALL_LINK From b05c90cd87caa5183706a463e3e0095a23b661c5 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 23 Sep 2024 13:33:08 +0200 Subject: [PATCH 514/606] bump version to 2.12.1.dev * NEWS, configure.ac: Here. --- NEWS | 4 ++++ configure.ac | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 329cc51ad..68457a365 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,7 @@ +New in spot 2.12.1.dev (not yet released) + + Nothing yet. + New in spot 2.12.1 (2024-09-23) Bug fixes: diff --git a/configure.ac b/configure.ac index 9f229582b..6f64eab68 100644 --- a/configure.ac +++ b/configure.ac @@ -17,7 +17,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.12.1], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.12.1.dev], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) From 4a33f0fe6502da6034ae9103c6b3d68e94ed2229 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 30 Oct 2024 12:07:55 +0100 Subject: [PATCH 515/606] hierarchy: improve error message * spot/tl/hierarchy.cc (mp_class): Fix type of o so that it is displayed as an character in error messages. --- spot/tl/hierarchy.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spot/tl/hierarchy.cc b/spot/tl/hierarchy.cc index 3ab6f5930..5112c9c12 100644 --- a/spot/tl/hierarchy.cc +++ b/spot/tl/hierarchy.cc @@ -312,7 +312,7 @@ namespace spot bool wide = false; if (opt) for (;;) - switch (int o = *opt++) + switch (char o = *opt++) { case 'v': verbose = true; From 82401b3254c4c9fa49d4e60cc7b4c0ff0b156254 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 2 Dec 2024 23:24:30 +0100 Subject: [PATCH 516/606] correct to_finite This fixes issue #596. * spot/twaalgos/remprop.cc: Rewrite main loop. * tests/core/ltlf.test: Add test case. * tests/python/game.py: Remove a test that appears to make incorrect assumptions about to_finite. * NEWS: Mention the bug. --- NEWS | 3 + spot/twaalgos/remprop.cc | 21 +++--- tests/core/ltlf.test | 37 +++++++---- tests/python/game.py | 137 --------------------------------------- 4 files changed, 39 insertions(+), 159 deletions(-) diff --git a/NEWS b/NEWS index 9730a59d5..655914f1d 100644 --- a/NEWS +++ b/NEWS @@ -117,6 +117,9 @@ New in spot 2.12.1.dev (not yet released) status and the AIG circuit; it now does the job silently as requested. + - to_finite() was dealing incorrectly with edges that were + both alive and dead. (Issue #596.) + New in spot 2.12.1 (2024-09-23) Bug fixes: diff --git a/spot/twaalgos/remprop.cc b/spot/twaalgos/remprop.cc index 14570f148..eb7c54dd0 100644 --- a/spot/twaalgos/remprop.cc +++ b/spot/twaalgos/remprop.cc @@ -19,6 +19,7 @@ #include "config.h" #include #include +#include #include #include @@ -180,6 +181,8 @@ namespace spot make_twa_graph(aut, { false, false, true, false, false, false }); + scc_info si(aut, scc_info_options::TRACK_SUCCS); + bdd rem = bddtrue; bdd neg = bddfalse; int v = res->get_dict()-> @@ -194,18 +197,18 @@ namespace spot unsigned ns = res->num_states(); std::vector isacc(ns, false); for (unsigned s = 0; s < ns; ++s) - { - for (auto& e: res->out(s)) - if (bdd_implies(e.cond, neg)) + for (auto& e: res->out(s)) + { + if (!si.is_useful_state(e.dst)) { - isacc[e.src] = true; e.cond = bddfalse; + continue; } - else - { - e.cond = bdd_restrict(e.cond, rem); - } - } + if (bdd_have_common_assignment(e.cond, neg)) + isacc[e.src] = true; + e.cond = bdd_restrict(e.cond, rem); + } + res->set_buchi(); res->prop_state_acc(true); diff --git a/tests/core/ltlf.test b/tests/core/ltlf.test index b8691adc8..77e68d8d8 100755 --- a/tests/core/ltlf.test +++ b/tests/core/ltlf.test @@ -173,51 +173,62 @@ cmp out3 out4 && exit 1 # make sure we did remove something autfilt out3 > out4 diff out4 expected3 -# Issue #526 -ltlfilt -f '(i->XXo)|G(i<->Xo2)' --from-ltlf | ltl2tgba -D |\ +# Issue #526 and Issue #596 +ltlfilt -f '(i->XXo)|G(i<->Xo2)' -f XXXo --from-ltlf | ltl2tgba -D |\ autfilt -C --to-finite > out cat >exp < selfloop - # 2 Prune - acc_state = set() - sp = list(spot.get_state_players(auts)) - for e in auts.edges(): - if e.acc: - acc_state.add(e.src) - for s in acc_state: - e_kill = auts.out_iteraser(s) - while (e_kill): - e_kill.erase() - for s in acc_state: - sprime = auts.new_state() - sp.append(not sp[s]) - auts.new_edge(s, sprime, buddy.bddtrue, [0]) - auts.new_edge(sprime, s, buddy.bddtrue, [0]) - spot.set_state_players(auts, sp) - auts.purge_dead_states() - spot.alternate_players(auts, False, False) - return auts - -def is_input_complete(auts): - sp = spot.get_state_players(auts) - for s in range(auts.num_states()): - if sp[s]: - continue # Player - cumul = buddy.bddfalse - for e in auts.out(s): - cumul |= e.cond - if cumul != buddy.bddtrue: - return False - - return True - -def synt_from_ltlf(f:str, outs): - ff = spot.from_ltlf(f, alive) - aut = ff.translate("buchi", "sbacc") - outbdd = buddy.bddtrue - for out in outs: - outbdd &= buddy.bdd_ithvar(aut.register_ap(out)) - alive_bdd = buddy.bdd_ithvar(aut.register_ap(alive)) - auts = spot.split_2step(aut, outbdd & alive_bdd, False) - auts = spot.to_finite(auts, alive) - spot.alternate_players(auts, False, False) - spot.set_synthesis_outputs(auts, outbdd) - if not is_input_complete(auts): - print("Not synthesizable") - return None - auts = finite_existential(auts) - - return auts - -def synt_ltlf(f:str, outs, res:str = "aut"): - auts = synt_from_ltlf(f, outs) - - succ = spot.solve_parity_game(auts) - if not succ: - if res == "aut": - return False, auts - else: - return False, None - - mealy_cc = spot.solved_game_to_split_mealy(auts) - - if res == "aut": - return True, mealy_cc - elif res == "aig": - return True, spot.mealy_machine_to_aig(mealy_cc, "isop") - else: - raise RuntimeError("Unknown option") - - -sink_player = None - -def negate_ltlf(f:str, outs, opt = "buchi"): - - global sink_player - sink_player = None - - aut = synt_from_ltlf(f, outs) - # Implies input completeness - # We need output completeness - acc = [] - - sp = list(spot.get_state_players(aut)) - - def get_sink(): - global sink_player - if sink_player is None: - sink_player = aut.new_states(2) - aut.new_edge(sink_player, sink_player + 1, buddy.bddtrue, acc) - aut.new_edge(sink_player + 1, sink_player, buddy.bddtrue, acc) - sp.append(False) - sp.append(True) - spot.set_state_players(aut, sp) - return sink_player - - for s in range(aut.num_states()): - if not sp[s]: - continue - rem = buddy.bddtrue - for e in aut.out(s): - rem -= e.cond - if rem != buddy.bddfalse: - aut.new_edge(s, get_sink(), rem) - - # Better to invert colors or condition? - if opt == "buchi": - for e in aut.edges(): - if e.acc: - e.acc = spot.mark_t() - else: - e.acc = spot.mark_t([0]) - elif opt == "cobuchi": - aut.set_co_buchi() - else: - raise RuntimeError("Unknown opt") - return aut - -# Game where the edge_vector is larger -# than the number of transitions -f1 = "((((G (F (idle))) && (G (((idle) && (X ((! (grant_0)) \ - && (! (grant_1))))) -> (X (idle))))) && (G ((X (! (grant_0))) \ - || (X (((! (request_0)) && (! (idle))) U ((! (request_0)) \ - && (idle))))))) -> (((G (((((X (((! (grant_0)) && (true)) \ - || ((true) && (! (grant_1))))) && ((X (grant_0)) -> (request_0))) \ - && ((X (grant_1)) -> (request_1))) && ((request_0) -> (grant_1))) \ - && ((! (idle)) -> (X ((! (grant_0)) && (! (grant_1))))))) \ - && (! (F (G ((request_0) && (X (! (grant_0)))))))) \ - && (! (F (G ((request_1) && (X (! (grant_1)))))))))" -outs = ["grant_0", "grant1"] -tc.assertEqual(synt_ltlf(f1, outs)[0], False) From c971ce57a6b85d591b0c447d3924ca268c818e4c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 9 Dec 2024 12:09:15 +0100 Subject: [PATCH 517/606] Fix LaTeX rendering of strong next Fix #597. * spot/tl/print.cc: Fix rendering of X[!]. * doc/tl/spotltl.sty: Add a \StrongX definition. * tests/core/latex.test: Add a test case. * NEWS: Mention the issue. --- NEWS | 3 +++ doc/tl/spotltl.sty | 1 + spot/tl/print.cc | 4 ++-- tests/core/latex.test | 1 + 4 files changed, 7 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index 655914f1d..99687e173 100644 --- a/NEWS +++ b/NEWS @@ -120,6 +120,9 @@ New in spot 2.12.1.dev (not yet released) - to_finite() was dealing incorrectly with edges that were both alive and dead. (Issue #596.) + - LaTeX output of the X[!] operator with broken in both + LaTeX and self-contained LaTeX mode. (Issue #597) + New in spot 2.12.1 (2024-09-23) Bug fixes: diff --git a/doc/tl/spotltl.sty b/doc/tl/spotltl.sty index b8178ee6b..e4b612b20 100644 --- a/doc/tl/spotltl.sty +++ b/doc/tl/spotltl.sty @@ -12,6 +12,7 @@ \newcommand{\F}{\mathsf{F}} % eventually \newcommand{\G}{\mathsf{G}} % always \newcommand{\X}{\mathsf{X}} % next +\newcommand{\StrongX}{\mathsf{X^{[!]}}} % strong next % The \mathbin tells TeX to adjust spacing for binary operators \newcommand{\M}{\mathbin{\mathsf{M}}} % strong release \newcommand{\R}{\mathbin{\mathsf{R}}} % release diff --git a/spot/tl/print.cc b/spot/tl/print.cc index ef998ba86..bb5c5c068 100644 --- a/spot/tl/print.cc +++ b/spot/tl/print.cc @@ -273,7 +273,7 @@ namespace spot "\\SereEqual{", "\\SereGoto{", "\\FirstMatch", - "\\StrongX", + "\\StrongX ", }; const char* sclatex_kw[] = { @@ -318,7 +318,7 @@ namespace spot "^{=", "^{\\to", "\\mathsf{first\\_match}", - "\\textcircled{\\mathsf{X}}", + "\\mathsf{X^{[!]}}", }; static bool diff --git a/tests/core/latex.test b/tests/core/latex.test index 4101e25bb..c2218b4d3 100755 --- a/tests/core/latex.test +++ b/tests/core/latex.test @@ -37,6 +37,7 @@ a U b W c R (d & e) M f {a*;(b;c)[:*3..4];(c;d)[:+];d}! G(uglyname->Fuglierlongname42) "#foo/$bar$" U "baz~yes^no" +X[!]XX[!]a | G[2:4!]b EOF ( From 461dc842e9ae5d9a7ba6c5e2ad3ead2be91bbbd7 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 1 Jan 2025 21:46:35 +0100 Subject: [PATCH 518/606] work around a change in python 3.13 * python/spot/__init__.py: Unindent the docstring for formula.__format__. Because Python 3.13 strips the indentation but previous version didn't, the following test case failed with Python 3.13. * tests/python/formulas.ipynb: Adjust to unindented docstring. --- python/spot/__init__.py | 67 ++++++++++++++++++++----------------- tests/python/formulas.ipynb | 66 ++++++++++++++++++------------------ 2 files changed, 69 insertions(+), 64 deletions(-) diff --git a/python/spot/__init__.py b/python/spot/__init__.py index 6f571f932..126d7e625 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -360,45 +360,50 @@ class formula: raise ValueError("unknown string format: " + format) def __format__(self, spec): + # Some test case print this docstring, and different + # Python version will handled indentation differently. + # (Python versions before 3.13 don't strip indentation.) + # So we cannot indent this until Python 3.13 is thee + # smallest version we support. """Format the formula according to `spec`. - Parameters - ---------- - spec : str, optional - a list of letters that specify how the formula - should be formatted. +Parameters +---------- +spec : str, optional + a list of letters that specify how the formula + should be formatted. - Supported specifiers - -------------------- +Supported specifiers +-------------------- - - 'f': use Spot's syntax (default) - - '8': use Spot's syntax in UTF-8 mode - - 's': use Spin's syntax - - 'l': use LBT's syntax - - 'w': use Wring's syntax - - 'x': use LaTeX output - - 'X': use self-contained LaTeX output - - 'j': use self-contained LaTeX output, adjusted for MathJax +- 'f': use Spot's syntax (default) +- '8': use Spot's syntax in UTF-8 mode +- 's': use Spin's syntax +- 'l': use LBT's syntax +- 'w': use Wring's syntax +- 'x': use LaTeX output +- 'X': use self-contained LaTeX output +- 'j': use self-contained LaTeX output, adjusted for MathJax - Add some of those letters for additional options: +Add some of those letters for additional options: - - 'p': use full parentheses - - 'c': escape the formula for CSV output (this will - enclose the formula in double quotes, and escape - any included double quotes) - - 'h': escape the formula for HTML output - - 'd': escape double quotes and backslash, - for use in C-strings (the outermost double - quotes are *not* added) - - 'q': quote and escape for shell output, using single - quotes or double quotes depending on the contents. - - '[...]': rewrite away all the operators specified in brackets, - using spot.unabbreviate(). +- 'p': use full parentheses +- 'c': escape the formula for CSV output (this will + enclose the formula in double quotes, and escape + any included double quotes) +- 'h': escape the formula for HTML output +- 'd': escape double quotes and backslash, + for use in C-strings (the outermost double + quotes are *not* added) +- 'q': quote and escape for shell output, using single + quotes or double quotes depending on the contents. +- '[...]': rewrite away all the operators specified in brackets, + using spot.unabbreviate(). - - ':spec': pass the remaining specification to the - formating function for strings. +- ':spec': pass the remaining specification to the + formating function for strings. - """ +""" syntax = 'f' parent = False diff --git a/tests/python/formulas.ipynb b/tests/python/formulas.ipynb index 2882ac7ac..c7e894b09 100644 --- a/tests/python/formulas.ipynb +++ b/tests/python/formulas.ipynb @@ -246,43 +246,43 @@ "text": [ "Format the formula according to `spec`.\n", "\n", - " Parameters\n", - " ----------\n", - " spec : str, optional\n", - " a list of letters that specify how the formula\n", - " should be formatted.\n", + "Parameters\n", + "----------\n", + "spec : str, optional\n", + " a list of letters that specify how the formula\n", + " should be formatted.\n", "\n", - " Supported specifiers\n", - " --------------------\n", + "Supported specifiers\n", + "--------------------\n", "\n", - " - 'f': use Spot's syntax (default)\n", - " - '8': use Spot's syntax in UTF-8 mode\n", - " - 's': use Spin's syntax\n", - " - 'l': use LBT's syntax\n", - " - 'w': use Wring's syntax\n", - " - 'x': use LaTeX output\n", - " - 'X': use self-contained LaTeX output\n", - " - 'j': use self-contained LaTeX output, adjusted for MathJax\n", + "- 'f': use Spot's syntax (default)\n", + "- '8': use Spot's syntax in UTF-8 mode\n", + "- 's': use Spin's syntax\n", + "- 'l': use LBT's syntax\n", + "- 'w': use Wring's syntax\n", + "- 'x': use LaTeX output\n", + "- 'X': use self-contained LaTeX output\n", + "- 'j': use self-contained LaTeX output, adjusted for MathJax\n", "\n", - " Add some of those letters for additional options:\n", + "Add some of those letters for additional options:\n", "\n", - " - 'p': use full parentheses\n", - " - 'c': escape the formula for CSV output (this will\n", - " enclose the formula in double quotes, and escape\n", - " any included double quotes)\n", - " - 'h': escape the formula for HTML output\n", - " - 'd': escape double quotes and backslash,\n", - " for use in C-strings (the outermost double\n", - " quotes are *not* added)\n", - " - 'q': quote and escape for shell output, using single\n", - " quotes or double quotes depending on the contents.\n", - " - '[...]': rewrite away all the operators specified in brackets,\n", - " using spot.unabbreviate().\n", + "- 'p': use full parentheses\n", + "- 'c': escape the formula for CSV output (this will\n", + " enclose the formula in double quotes, and escape\n", + " any included double quotes)\n", + "- 'h': escape the formula for HTML output\n", + "- 'd': escape double quotes and backslash,\n", + " for use in C-strings (the outermost double\n", + " quotes are *not* added)\n", + "- 'q': quote and escape for shell output, using single\n", + " quotes or double quotes depending on the contents.\n", + "- '[...]': rewrite away all the operators specified in brackets,\n", + " using spot.unabbreviate().\n", "\n", - " - ':spec': pass the remaining specification to the\n", - " formating function for strings.\n", + "- ':spec': pass the remaining specification to the\n", + " formating function for strings.\n", "\n", - " \n" + "\n" ] } ], @@ -505,7 +505,7 @@ "\n", "\n", - "\n", "\n", " Date: Sun, 5 Jan 2025 13:18:35 +0100 Subject: [PATCH 519/606] Fix slight error in aiger The negation of global equivalences for outputs contained a slight error when the output corresponded to a negated gate. * spot/twaalgos/aiger.cc: Fix * tests/core/ltlsynt.test: Test --- spot/twaalgos/aiger.cc | 5 ++++- tests/core/ltlsynt.test | 13 +++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index 95c758ede..8c29dfe41 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -2061,7 +2061,10 @@ namespace assert(it2 != output_names_all.end()); unsigned outnum = it2 - output_names_all.begin(); unsigned outvar = circuit.output(outnum); - circuit.set_output(i, outvar + neg_repr); + assert(outvar != -1u); + if (neg_repr) + outvar = circuit.aig_not(outvar); + circuit.set_output(i, outvar); } } } diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index d89ecd292..6f54cd425 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -1360,3 +1360,16 @@ REALIZABLE REALIZABLE EOF diff out expected + +# Additional test for global equivalences +# Specifically if the output is set to the negation of another output + +ltlsynt -f "((G((p0)<->(!(p1))))&&(((F(a))||(G(b)))<->(G(F(p0)))))" \ + --outs "p1, p0" \ + --verify --aiger | tail -n 1 > out + +cat > expected < Date: Wed, 30 Oct 2024 12:07:55 +0100 Subject: [PATCH 520/606] hierarchy: improve error message * spot/tl/hierarchy.cc (mp_class): Fix type of o so that it is displayed as an character in error messages. --- spot/tl/hierarchy.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spot/tl/hierarchy.cc b/spot/tl/hierarchy.cc index 3ab6f5930..5112c9c12 100644 --- a/spot/tl/hierarchy.cc +++ b/spot/tl/hierarchy.cc @@ -312,7 +312,7 @@ namespace spot bool wide = false; if (opt) for (;;) - switch (int o = *opt++) + switch (char o = *opt++) { case 'v': verbose = true; From 6e6219af54ad332f932d267fb3e31415b58ca57d Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 2 Dec 2024 23:24:30 +0100 Subject: [PATCH 521/606] correct to_finite This fixes issue #596. * spot/twaalgos/remprop.cc: Rewrite main loop. * tests/core/ltlf.test: Add test case. * tests/python/game.py: Remove a test that appears to make incorrect assumptions about to_finite. * NEWS: Mention the bug. --- NEWS | 3 + spot/twaalgos/remprop.cc | 21 +++--- tests/core/ltlf.test | 37 +++++++---- tests/python/game.py | 137 --------------------------------------- 4 files changed, 39 insertions(+), 159 deletions(-) diff --git a/NEWS b/NEWS index 68457a365..0afe8a878 100644 --- a/NEWS +++ b/NEWS @@ -2,6 +2,9 @@ New in spot 2.12.1.dev (not yet released) Nothing yet. + - to_finite() was dealing incorrectly with edges that were + both alive and dead. (Issue #596.) + New in spot 2.12.1 (2024-09-23) Bug fixes: diff --git a/spot/twaalgos/remprop.cc b/spot/twaalgos/remprop.cc index 14570f148..eb7c54dd0 100644 --- a/spot/twaalgos/remprop.cc +++ b/spot/twaalgos/remprop.cc @@ -19,6 +19,7 @@ #include "config.h" #include #include +#include #include #include @@ -180,6 +181,8 @@ namespace spot make_twa_graph(aut, { false, false, true, false, false, false }); + scc_info si(aut, scc_info_options::TRACK_SUCCS); + bdd rem = bddtrue; bdd neg = bddfalse; int v = res->get_dict()-> @@ -194,18 +197,18 @@ namespace spot unsigned ns = res->num_states(); std::vector isacc(ns, false); for (unsigned s = 0; s < ns; ++s) - { - for (auto& e: res->out(s)) - if (bdd_implies(e.cond, neg)) + for (auto& e: res->out(s)) + { + if (!si.is_useful_state(e.dst)) { - isacc[e.src] = true; e.cond = bddfalse; + continue; } - else - { - e.cond = bdd_restrict(e.cond, rem); - } - } + if (bdd_have_common_assignment(e.cond, neg)) + isacc[e.src] = true; + e.cond = bdd_restrict(e.cond, rem); + } + res->set_buchi(); res->prop_state_acc(true); diff --git a/tests/core/ltlf.test b/tests/core/ltlf.test index b8691adc8..77e68d8d8 100755 --- a/tests/core/ltlf.test +++ b/tests/core/ltlf.test @@ -173,51 +173,62 @@ cmp out3 out4 && exit 1 # make sure we did remove something autfilt out3 > out4 diff out4 expected3 -# Issue #526 -ltlfilt -f '(i->XXo)|G(i<->Xo2)' --from-ltlf | ltl2tgba -D |\ +# Issue #526 and Issue #596 +ltlfilt -f '(i->XXo)|G(i<->Xo2)' -f XXXo --from-ltlf | ltl2tgba -D |\ autfilt -C --to-finite > out cat >exp < selfloop - # 2 Prune - acc_state = set() - sp = list(spot.get_state_players(auts)) - for e in auts.edges(): - if e.acc: - acc_state.add(e.src) - for s in acc_state: - e_kill = auts.out_iteraser(s) - while (e_kill): - e_kill.erase() - for s in acc_state: - sprime = auts.new_state() - sp.append(not sp[s]) - auts.new_edge(s, sprime, buddy.bddtrue, [0]) - auts.new_edge(sprime, s, buddy.bddtrue, [0]) - spot.set_state_players(auts, sp) - auts.purge_dead_states() - spot.alternate_players(auts, False, False) - return auts - -def is_input_complete(auts): - sp = spot.get_state_players(auts) - for s in range(auts.num_states()): - if sp[s]: - continue # Player - cumul = buddy.bddfalse - for e in auts.out(s): - cumul |= e.cond - if cumul != buddy.bddtrue: - return False - - return True - -def synt_from_ltlf(f:str, outs): - ff = spot.from_ltlf(f, alive) - aut = ff.translate("buchi", "sbacc") - outbdd = buddy.bddtrue - for out in outs: - outbdd &= buddy.bdd_ithvar(aut.register_ap(out)) - alive_bdd = buddy.bdd_ithvar(aut.register_ap(alive)) - auts = spot.split_2step(aut, outbdd & alive_bdd, False) - auts = spot.to_finite(auts, alive) - spot.alternate_players(auts, False, False) - spot.set_synthesis_outputs(auts, outbdd) - if not is_input_complete(auts): - print("Not synthesizable") - return None - auts = finite_existential(auts) - - return auts - -def synt_ltlf(f:str, outs, res:str = "aut"): - auts = synt_from_ltlf(f, outs) - - succ = spot.solve_parity_game(auts) - if not succ: - if res == "aut": - return False, auts - else: - return False, None - - mealy_cc = spot.solved_game_to_split_mealy(auts) - - if res == "aut": - return True, mealy_cc - elif res == "aig": - return True, spot.mealy_machine_to_aig(mealy_cc, "isop") - else: - raise RuntimeError("Unknown option") - - -sink_player = None - -def negate_ltlf(f:str, outs, opt = "buchi"): - - global sink_player - sink_player = None - - aut = synt_from_ltlf(f, outs) - # Implies input completeness - # We need output completeness - acc = [] - - sp = list(spot.get_state_players(aut)) - - def get_sink(): - global sink_player - if sink_player is None: - sink_player = aut.new_states(2) - aut.new_edge(sink_player, sink_player + 1, buddy.bddtrue, acc) - aut.new_edge(sink_player + 1, sink_player, buddy.bddtrue, acc) - sp.append(False) - sp.append(True) - spot.set_state_players(aut, sp) - return sink_player - - for s in range(aut.num_states()): - if not sp[s]: - continue - rem = buddy.bddtrue - for e in aut.out(s): - rem -= e.cond - if rem != buddy.bddfalse: - aut.new_edge(s, get_sink(), rem) - - # Better to invert colors or condition? - if opt == "buchi": - for e in aut.edges(): - if e.acc: - e.acc = spot.mark_t() - else: - e.acc = spot.mark_t([0]) - elif opt == "cobuchi": - aut.set_co_buchi() - else: - raise RuntimeError("Unknown opt") - return aut - -# Game where the edge_vector is larger -# than the number of transitions -f1 = "((((G (F (idle))) && (G (((idle) && (X ((! (grant_0)) \ - && (! (grant_1))))) -> (X (idle))))) && (G ((X (! (grant_0))) \ - || (X (((! (request_0)) && (! (idle))) U ((! (request_0)) \ - && (idle))))))) -> (((G (((((X (((! (grant_0)) && (true)) \ - || ((true) && (! (grant_1))))) && ((X (grant_0)) -> (request_0))) \ - && ((X (grant_1)) -> (request_1))) && ((request_0) -> (grant_1))) \ - && ((! (idle)) -> (X ((! (grant_0)) && (! (grant_1))))))) \ - && (! (F (G ((request_0) && (X (! (grant_0)))))))) \ - && (! (F (G ((request_1) && (X (! (grant_1)))))))))" -outs = ["grant_0", "grant1"] -tc.assertEqual(synt_ltlf(f1, outs)[0], False) From c67332f8250010bd5ea19b3927561fd87d77e91c Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Mon, 9 Dec 2024 12:09:15 +0100 Subject: [PATCH 522/606] Fix LaTeX rendering of strong next Fix #597. * spot/tl/print.cc: Fix rendering of X[!]. * doc/tl/spotltl.sty: Add a \StrongX definition. * tests/core/latex.test: Add a test case. * NEWS: Mention the issue. --- NEWS | 3 +++ doc/tl/spotltl.sty | 1 + spot/tl/print.cc | 4 ++-- tests/core/latex.test | 1 + 4 files changed, 7 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index 0afe8a878..e0aba4488 100644 --- a/NEWS +++ b/NEWS @@ -5,6 +5,9 @@ New in spot 2.12.1.dev (not yet released) - to_finite() was dealing incorrectly with edges that were both alive and dead. (Issue #596.) + - LaTeX output of the X[!] operator with broken in both + LaTeX and self-contained LaTeX mode. (Issue #597) + New in spot 2.12.1 (2024-09-23) Bug fixes: diff --git a/doc/tl/spotltl.sty b/doc/tl/spotltl.sty index b8178ee6b..e4b612b20 100644 --- a/doc/tl/spotltl.sty +++ b/doc/tl/spotltl.sty @@ -12,6 +12,7 @@ \newcommand{\F}{\mathsf{F}} % eventually \newcommand{\G}{\mathsf{G}} % always \newcommand{\X}{\mathsf{X}} % next +\newcommand{\StrongX}{\mathsf{X^{[!]}}} % strong next % The \mathbin tells TeX to adjust spacing for binary operators \newcommand{\M}{\mathbin{\mathsf{M}}} % strong release \newcommand{\R}{\mathbin{\mathsf{R}}} % release diff --git a/spot/tl/print.cc b/spot/tl/print.cc index ef998ba86..bb5c5c068 100644 --- a/spot/tl/print.cc +++ b/spot/tl/print.cc @@ -273,7 +273,7 @@ namespace spot "\\SereEqual{", "\\SereGoto{", "\\FirstMatch", - "\\StrongX", + "\\StrongX ", }; const char* sclatex_kw[] = { @@ -318,7 +318,7 @@ namespace spot "^{=", "^{\\to", "\\mathsf{first\\_match}", - "\\textcircled{\\mathsf{X}}", + "\\mathsf{X^{[!]}}", }; static bool diff --git a/tests/core/latex.test b/tests/core/latex.test index 4101e25bb..c2218b4d3 100755 --- a/tests/core/latex.test +++ b/tests/core/latex.test @@ -37,6 +37,7 @@ a U b W c R (d & e) M f {a*;(b;c)[:*3..4];(c;d)[:+];d}! G(uglyname->Fuglierlongname42) "#foo/$bar$" U "baz~yes^no" +X[!]XX[!]a | G[2:4!]b EOF ( From e4a49cda02491249af49ff4ded9a598571da8a12 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Wed, 1 Jan 2025 21:46:35 +0100 Subject: [PATCH 523/606] work around a change in python 3.13 * python/spot/__init__.py: Unindent the docstring for formula.__format__. Because Python 3.13 strips the indentation but previous version didn't, the following test case failed with Python 3.13. * tests/python/formulas.ipynb: Adjust to unindented docstring. --- python/spot/__init__.py | 67 ++++++++++++++++++++----------------- tests/python/formulas.ipynb | 66 ++++++++++++++++++------------------ 2 files changed, 69 insertions(+), 64 deletions(-) diff --git a/python/spot/__init__.py b/python/spot/__init__.py index 6f571f932..126d7e625 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -360,45 +360,50 @@ class formula: raise ValueError("unknown string format: " + format) def __format__(self, spec): + # Some test case print this docstring, and different + # Python version will handled indentation differently. + # (Python versions before 3.13 don't strip indentation.) + # So we cannot indent this until Python 3.13 is thee + # smallest version we support. """Format the formula according to `spec`. - Parameters - ---------- - spec : str, optional - a list of letters that specify how the formula - should be formatted. +Parameters +---------- +spec : str, optional + a list of letters that specify how the formula + should be formatted. - Supported specifiers - -------------------- +Supported specifiers +-------------------- - - 'f': use Spot's syntax (default) - - '8': use Spot's syntax in UTF-8 mode - - 's': use Spin's syntax - - 'l': use LBT's syntax - - 'w': use Wring's syntax - - 'x': use LaTeX output - - 'X': use self-contained LaTeX output - - 'j': use self-contained LaTeX output, adjusted for MathJax +- 'f': use Spot's syntax (default) +- '8': use Spot's syntax in UTF-8 mode +- 's': use Spin's syntax +- 'l': use LBT's syntax +- 'w': use Wring's syntax +- 'x': use LaTeX output +- 'X': use self-contained LaTeX output +- 'j': use self-contained LaTeX output, adjusted for MathJax - Add some of those letters for additional options: +Add some of those letters for additional options: - - 'p': use full parentheses - - 'c': escape the formula for CSV output (this will - enclose the formula in double quotes, and escape - any included double quotes) - - 'h': escape the formula for HTML output - - 'd': escape double quotes and backslash, - for use in C-strings (the outermost double - quotes are *not* added) - - 'q': quote and escape for shell output, using single - quotes or double quotes depending on the contents. - - '[...]': rewrite away all the operators specified in brackets, - using spot.unabbreviate(). +- 'p': use full parentheses +- 'c': escape the formula for CSV output (this will + enclose the formula in double quotes, and escape + any included double quotes) +- 'h': escape the formula for HTML output +- 'd': escape double quotes and backslash, + for use in C-strings (the outermost double + quotes are *not* added) +- 'q': quote and escape for shell output, using single + quotes or double quotes depending on the contents. +- '[...]': rewrite away all the operators specified in brackets, + using spot.unabbreviate(). - - ':spec': pass the remaining specification to the - formating function for strings. +- ':spec': pass the remaining specification to the + formating function for strings. - """ +""" syntax = 'f' parent = False diff --git a/tests/python/formulas.ipynb b/tests/python/formulas.ipynb index 2882ac7ac..c7e894b09 100644 --- a/tests/python/formulas.ipynb +++ b/tests/python/formulas.ipynb @@ -246,43 +246,43 @@ "text": [ "Format the formula according to `spec`.\n", "\n", - " Parameters\n", - " ----------\n", - " spec : str, optional\n", - " a list of letters that specify how the formula\n", - " should be formatted.\n", + "Parameters\n", + "----------\n", + "spec : str, optional\n", + " a list of letters that specify how the formula\n", + " should be formatted.\n", "\n", - " Supported specifiers\n", - " --------------------\n", + "Supported specifiers\n", + "--------------------\n", "\n", - " - 'f': use Spot's syntax (default)\n", - " - '8': use Spot's syntax in UTF-8 mode\n", - " - 's': use Spin's syntax\n", - " - 'l': use LBT's syntax\n", - " - 'w': use Wring's syntax\n", - " - 'x': use LaTeX output\n", - " - 'X': use self-contained LaTeX output\n", - " - 'j': use self-contained LaTeX output, adjusted for MathJax\n", + "- 'f': use Spot's syntax (default)\n", + "- '8': use Spot's syntax in UTF-8 mode\n", + "- 's': use Spin's syntax\n", + "- 'l': use LBT's syntax\n", + "- 'w': use Wring's syntax\n", + "- 'x': use LaTeX output\n", + "- 'X': use self-contained LaTeX output\n", + "- 'j': use self-contained LaTeX output, adjusted for MathJax\n", "\n", - " Add some of those letters for additional options:\n", + "Add some of those letters for additional options:\n", "\n", - " - 'p': use full parentheses\n", - " - 'c': escape the formula for CSV output (this will\n", - " enclose the formula in double quotes, and escape\n", - " any included double quotes)\n", - " - 'h': escape the formula for HTML output\n", - " - 'd': escape double quotes and backslash,\n", - " for use in C-strings (the outermost double\n", - " quotes are *not* added)\n", - " - 'q': quote and escape for shell output, using single\n", - " quotes or double quotes depending on the contents.\n", - " - '[...]': rewrite away all the operators specified in brackets,\n", - " using spot.unabbreviate().\n", + "- 'p': use full parentheses\n", + "- 'c': escape the formula for CSV output (this will\n", + " enclose the formula in double quotes, and escape\n", + " any included double quotes)\n", + "- 'h': escape the formula for HTML output\n", + "- 'd': escape double quotes and backslash,\n", + " for use in C-strings (the outermost double\n", + " quotes are *not* added)\n", + "- 'q': quote and escape for shell output, using single\n", + " quotes or double quotes depending on the contents.\n", + "- '[...]': rewrite away all the operators specified in brackets,\n", + " using spot.unabbreviate().\n", "\n", - " - ':spec': pass the remaining specification to the\n", - " formating function for strings.\n", + "- ':spec': pass the remaining specification to the\n", + " formating function for strings.\n", "\n", - " \n" + "\n" ] } ], @@ -505,7 +505,7 @@ "\n", "\n", - "\n", "\n", " Date: Fri, 17 Jan 2025 21:46:43 +0100 Subject: [PATCH 524/606] python: improve ACD's CSS Some colleagues complained that the highlighting of edges and nodes in the ACD display where not very readable, especially when sharing screen during some video call. This should improve it. * python/spot/__init__.py (acd): Fill the contents of the nodes when they are highlighted. Add some glowing effect the the highlighted edges. * tests/python/zlktree.ipynb: Adjust. --- python/spot/__init__.py | 6 +- tests/python/zlktree.ipynb | 276 +++++++++++++++++++------------------ 2 files changed, 148 insertions(+), 134 deletions(-) diff --git a/python/spot/__init__.py b/python/spot/__init__.py index 126d7e625..91b77c7e9 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -517,8 +517,10 @@ class acd: num = _acdnum _acdnum += 1 style = ''' -.acdhigh ellipse,.acdacc ellipse,.acdacc path,.acdacc polygon{stroke:green;} -.acdhigh polygon,.acdrej ellipse,.acdrej path,.acdrej polygon{stroke:red;} +.acdhigh ellipse,.acdacc ellipse{stroke:green;fill:rgb(220,255,220);} +.acdhigh polygon,.acdrej ellipse{stroke:red;fill:rgb(255,220,220);} +.acdacc polygon,.acdacc path{stroke:green;filter:drop-shadow(green 0 0 2px);} +.acdrej polygon,.acdrej path{stroke:red;filter:drop-shadow(red 0 0 2px);} .acdbold ellipse,.acdbold polygon,.acdbold path{stroke-width:2;} .acdrej polygon{fill:red;} .acdacc polygon{fill:green;} diff --git a/tests/python/zlktree.ipynb b/tests/python/zlktree.ipynb index 3eb6f013f..5cd797fc7 100644 --- a/tests/python/zlktree.ipynb +++ b/tests/python/zlktree.ipynb @@ -48,7 +48,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 2, @@ -640,7 +640,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f2d01decab0> >" + " *' at 0x7f718149d980> >" ] }, "execution_count": 10, @@ -1010,12 +1010,12 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", + "\n", "\n", "Fin(\n", "\n", @@ -1411,12 +1411,12 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", + "\n", "\n", "Fin(\n", "\n", @@ -1809,7 +1809,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f2d01decd50> >" + " *' at 0x7f718149f750> >" ] }, "execution_count": 11, @@ -1876,7 +1876,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f2d01ded4d0> >" + " *' at 0x7f718149fc90> >" ] }, "execution_count": 13, @@ -2156,12 +2156,12 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", + "\n", "\n", "Fin(\n", "\n", @@ -2579,12 +2579,12 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", + "\n", "\n", "Fin(\n", "\n", @@ -2999,7 +2999,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f2d01dedc50> >" + " *' at 0x7f71814bc870> >" ] }, "execution_count": 14, @@ -3082,7 +3082,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -3728,7 +3728,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4374,7 +4374,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4530,7 +4530,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4736,7 +4736,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4796,7 +4796,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -4911,7 +4911,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 18, @@ -5298,7 +5298,7 @@ "\n", "\n", - "\n", "\n", "\n", - ".acdhigh ellipse,.acdacc ellipse,.acdacc path,.acdacc polygon{stroke:green;}\n", - ".acdhigh polygon,.acdrej ellipse,.acdrej path,.acdrej polygon{stroke:red;}\n", + ".acdhigh ellipse,.acdacc ellipse{stroke:green;fill:rgb(220,255,220);}\n", + ".acdhigh polygon,.acdrej ellipse{stroke:red;fill:rgb(255,220,220);}\n", + ".acdacc polygon,.acdacc path{stroke:green;filter:drop-shadow(green 0 0 2px);}\n", + ".acdrej polygon,.acdrej path{stroke:red;filter:drop-shadow(red 0 0 2px);}\n", ".acdbold ellipse,.acdbold polygon,.acdbold path{stroke-width:2;}\n", ".acdrej polygon{fill:red;}\n", ".acdacc polygon{fill:green;}\n", "
\n", "\n", - "\n", "\n", "\n", - "\n", + "\n", "(FGp0 & ((XFp0 & F!p1) | F(Gp1 &\\nXG!p0))) | G(F!p0 &\\n(XFp0 | F!p1) & F(Gp1 | G!p0))\n", "\n", "(Fin(\n", @@ -6106,7 +6108,7 @@ "
\n", "\n", - "\n", "\n", "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 20, @@ -6789,12 +6791,12 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", + "\n", "\n", "Fin(\n", "\n", @@ -7388,12 +7390,12 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", + "\n", "\n", "Fin(\n", "\n", @@ -7984,7 +7986,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f2d005206c0> >" + " *' at 0x7f71814d4a80> >" ] }, "execution_count": 29, @@ -8047,12 +8049,12 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", + "\n", "\n", "Fin(\n", "\n", @@ -8626,12 +8628,12 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", + "\n", "\n", "Fin(\n", "\n", @@ -9202,7 +9204,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f2d005207b0> >" + " *' at 0x7f71814d68b0> >" ] }, "execution_count": 31, @@ -9412,7 +9414,7 @@ "\n", "\n", - "\n", "\n", "\n", - ".acdhigh ellipse,.acdacc ellipse,.acdacc path,.acdacc polygon{stroke:green;}\n", - ".acdhigh polygon,.acdrej ellipse,.acdrej path,.acdrej polygon{stroke:red;}\n", + ".acdhigh ellipse,.acdacc ellipse{stroke:green;fill:rgb(220,255,220);}\n", + ".acdhigh polygon,.acdrej ellipse{stroke:red;fill:rgb(255,220,220);}\n", + ".acdacc polygon,.acdacc path{stroke:green;filter:drop-shadow(green 0 0 2px);}\n", + ".acdrej polygon,.acdrej path{stroke:red;filter:drop-shadow(red 0 0 2px);}\n", ".acdbold ellipse,.acdbold polygon,.acdbold path{stroke-width:2;}\n", ".acdrej polygon{fill:red;}\n", ".acdacc polygon{fill:green;}\n", "
\n", "\n", - "\n", "\n", "\n", - "\n", + "\n", "(FGp0 & ((XFp0 & F!p1) | F(Gp1 &\\nXG!p0))) | G(F!p0 &\\n(XFp0 | F!p1) & F(Gp1 | G!p0))\n", "\n", "(Fin(\n", @@ -10220,7 +10224,7 @@ "
\n", "\n", - "\n", "\n", "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 40, @@ -10749,12 +10753,12 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", + "\n", "\n", "Fin(\n", "\n", @@ -11540,12 +11544,12 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", + "\n", "\n", "Fin(\n", "\n", @@ -12328,7 +12332,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f2d00521bc0> >" + " *' at 0x7f71814e9b60> >" ] }, "execution_count": 45, @@ -12392,7 +12396,7 @@ "\n", "\n", - "\n", "\n", "\n", - ".acdhigh ellipse,.acdacc ellipse,.acdacc path,.acdacc polygon{stroke:green;}\n", - ".acdhigh polygon,.acdrej ellipse,.acdrej path,.acdrej polygon{stroke:red;}\n", + ".acdhigh ellipse,.acdacc ellipse{stroke:green;fill:rgb(220,255,220);}\n", + ".acdhigh polygon,.acdrej ellipse{stroke:red;fill:rgb(255,220,220);}\n", + ".acdacc polygon,.acdacc path{stroke:green;filter:drop-shadow(green 0 0 2px);}\n", + ".acdrej polygon,.acdrej path{stroke:red;filter:drop-shadow(red 0 0 2px);}\n", ".acdbold ellipse,.acdbold polygon,.acdbold path{stroke-width:2;}\n", ".acdrej polygon{fill:red;}\n", ".acdacc polygon{fill:green;}\n", "
\n", "\n", - "\n", "\n", "
\n", "\n", - "\n", "\n", "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 47, @@ -12716,7 +12722,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f2d005214d0> >" + " *' at 0x7f71814ea370> >" ] }, "execution_count": 48, @@ -13123,7 +13129,7 @@ "\n", "\n", - "\n", "\n", "\n", - ".acdhigh ellipse,.acdacc ellipse,.acdacc path,.acdacc polygon{stroke:green;}\n", - ".acdhigh polygon,.acdrej ellipse,.acdrej path,.acdrej polygon{stroke:red;}\n", + ".acdhigh ellipse,.acdacc ellipse{stroke:green;fill:rgb(220,255,220);}\n", + ".acdhigh polygon,.acdrej ellipse{stroke:red;fill:rgb(255,220,220);}\n", + ".acdacc polygon,.acdacc path{stroke:green;filter:drop-shadow(green 0 0 2px);}\n", + ".acdrej polygon,.acdrej path{stroke:red;filter:drop-shadow(red 0 0 2px);}\n", ".acdbold ellipse,.acdbold polygon,.acdbold path{stroke-width:2;}\n", ".acdrej polygon{fill:red;}\n", ".acdacc polygon{fill:green;}\n", "
\n", "\n", - "\n", "\n", "
\n", "\n", - "\n", "\n", "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 49, @@ -13442,7 +13450,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f2d00522310> >" + " *' at 0x7f71814eac70> >" ] }, "execution_count": 50, @@ -13801,7 +13809,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f2d00522970> >" + " *' at 0x7f71814ebba0> >" ] }, "execution_count": 51, @@ -14080,7 +14088,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 52, @@ -14243,7 +14251,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f2d00522ac0> >" + " *' at 0x7f7180d08360> >" ] }, "execution_count": 53, @@ -14564,7 +14572,7 @@ "\n", "\n", - "\n", "\n", "\n", - ".acdhigh ellipse,.acdacc ellipse,.acdacc path,.acdacc polygon{stroke:green;}\n", - ".acdhigh polygon,.acdrej ellipse,.acdrej path,.acdrej polygon{stroke:red;}\n", + ".acdhigh ellipse,.acdacc ellipse{stroke:green;fill:rgb(220,255,220);}\n", + ".acdhigh polygon,.acdrej ellipse{stroke:red;fill:rgb(255,220,220);}\n", + ".acdacc polygon,.acdacc path{stroke:green;filter:drop-shadow(green 0 0 2px);}\n", + ".acdrej polygon,.acdrej path{stroke:red;filter:drop-shadow(red 0 0 2px);}\n", ".acdbold ellipse,.acdbold polygon,.acdbold path{stroke-width:2;}\n", ".acdrej polygon{fill:red;}\n", ".acdacc polygon{fill:green;}\n", "
\n", "\n", - "\n", "\n", "
\n", "\n", - "\n", "\n", "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 55, @@ -14897,7 +14907,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f2d00523150> >" + " *' at 0x7f71814ebd80> >" ] }, "execution_count": 57, @@ -15118,7 +15128,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f2d00523240> >" + " *' at 0x7f7180d08ab0> >" ] }, "execution_count": 58, @@ -15411,7 +15421,7 @@ "\n", "\n", - "\n", "\n", "\n", - ".acdhigh ellipse,.acdacc ellipse,.acdacc path,.acdacc polygon{stroke:green;}\n", - ".acdhigh polygon,.acdrej ellipse,.acdrej path,.acdrej polygon{stroke:red;}\n", + ".acdhigh ellipse,.acdacc ellipse{stroke:green;fill:rgb(220,255,220);}\n", + ".acdhigh polygon,.acdrej ellipse{stroke:red;fill:rgb(255,220,220);}\n", + ".acdacc polygon,.acdacc path{stroke:green;filter:drop-shadow(green 0 0 2px);}\n", + ".acdrej polygon,.acdrej path{stroke:red;filter:drop-shadow(red 0 0 2px);}\n", ".acdbold ellipse,.acdbold polygon,.acdbold path{stroke-width:2;}\n", ".acdrej polygon{fill:red;}\n", ".acdacc polygon{fill:green;}\n", "
\n", "\n", - "\n", "\n", "
\n", "\n", - "\n", "\n", "" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 60, @@ -15769,7 +15781,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f2d005239c0> >" + " *' at 0x7f7180d0a160> >" ] }, "execution_count": 61, @@ -15990,7 +16002,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f2d005238a0> >" + " *' at 0x7f7180d0a580> >" ] }, "execution_count": 62, @@ -16229,12 +16241,12 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", + "\n", "\n", "Inf(\n", "\n", @@ -16488,12 +16500,12 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", + "\n", "\n", "Inf(\n", "\n", @@ -16744,7 +16756,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f2d00523720> >" + " *' at 0x7f7180d0aa60> >" ] }, "execution_count": 63, @@ -16778,7 +16790,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f2d00523fc0> >" + " *' at 0x7f71814e94d0> >" ] }, "execution_count": 64, @@ -16959,7 +16971,7 @@ "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f2d00550090> >" + " *' at 0x7f7180d0b720> >" ] }, "execution_count": 66, @@ -17126,7 +17138,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.8" + "version": "3.12.7" } }, "nbformat": 4, From 480e5e999b2079e0f0477ca299fe81b47b300972 Mon Sep 17 00:00:00 2001 From: Philipp Schlehuber-Caissier Date: Sun, 5 Jan 2025 13:18:35 +0100 Subject: [PATCH 525/606] Fix slight error in aiger The negation of global equivalences for outputs contained a slight error when the output corresponded to a negated gate. * spot/twaalgos/aiger.cc: Fix * tests/core/ltlsynt.test: Test --- spot/twaalgos/aiger.cc | 5 ++++- tests/core/ltlsynt.test | 12 ++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index 95c758ede..8c29dfe41 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -2061,7 +2061,10 @@ namespace assert(it2 != output_names_all.end()); unsigned outnum = it2 - output_names_all.begin(); unsigned outvar = circuit.output(outnum); - circuit.set_output(i, outvar + neg_repr); + assert(outvar != -1u); + if (neg_repr) + outvar = circuit.aig_not(outvar); + circuit.set_output(i, outvar); } } } diff --git a/tests/core/ltlsynt.test b/tests/core/ltlsynt.test index dd3e4152c..d8a4c2f44 100644 --- a/tests/core/ltlsynt.test +++ b/tests/core/ltlsynt.test @@ -1304,3 +1304,15 @@ f2='G(i1->(o1|!o2)) & G(!i1->(o3|!o4)) & G(i2->(!o1|o2)) & G(!i2->(!o3|o4))&Go5' ltlsynt -f "$f2" --polarity=before-decom --verbose 2>out 1>&2 sed 's/ [0-9.e-]* seconds/ X seconds/g;s/ -> /->/g;' out > outx diff outx exp + +# Additional test for global equivalences +# Specifically if the output is set to the negation of another output + +ltlsynt -f "((G((p0)<->(!(p1))))&&(((F(a))||(G(b)))<->(G(F(p0)))))" \ + --outs "p1, p0" \ + --verify --aiger | tail -n 1 > out + +cat > expected < Date: Fri, 17 Jan 2025 22:50:20 +0100 Subject: [PATCH 526/606] release Spot 2.12.2 * configure.ac, doc/org/setup.org: Bump version to 2.12.2. * bin/common_setup.cc, debian/copyright: Bump copyright year to 2025. * NEWS: Update. --- NEWS | 9 +++++++-- bin/common_setup.cc | 2 +- configure.ac | 2 +- debian/copyright | 2 +- doc/org/setup.org | 4 ++-- 5 files changed, 12 insertions(+), 7 deletions(-) diff --git a/NEWS b/NEWS index e0aba4488..7de69604b 100644 --- a/NEWS +++ b/NEWS @@ -1,6 +1,6 @@ -New in spot 2.12.1.dev (not yet released) +New in spot 2.12.2 (2025-01-18) - Nothing yet. + Bug fixes: - to_finite() was dealing incorrectly with edges that were both alive and dead. (Issue #596.) @@ -8,6 +8,11 @@ New in spot 2.12.1.dev (not yet released) - LaTeX output of the X[!] operator with broken in both LaTeX and self-contained LaTeX mode. (Issue #597) + - Fix a bug in the AIGER encoding with certain forms of + global-equivalence. + + - Work around a spurious test failure with Python 3.13. + New in spot 2.12.1 (2024-09-23) Bug fixes: diff --git a/bin/common_setup.cc b/bin/common_setup.cc index 1b23833df..7a25a77e9 100644 --- a/bin/common_setup.cc +++ b/bin/common_setup.cc @@ -35,7 +35,7 @@ display_version(FILE *stream, struct argp_state*) fputs(program_name, stream); fputs(" (" PACKAGE_NAME ") " PACKAGE_VERSION "\n\ \n\ -Copyright (C) 2024 by the Spot authors, see the AUTHORS File for details.\n\ +Copyright (C) 2025 by the Spot authors, see the AUTHORS File for details.\n\ License GPLv3+: \ GNU GPL version 3 or later .\n\ This is free software: you are free to change and redistribute it.\n\ diff --git a/configure.ac b/configure.ac index 6f64eab68..69bc4faae 100644 --- a/configure.ac +++ b/configure.ac @@ -17,7 +17,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.12.1.dev], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.12.2], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) diff --git a/debian/copyright b/debian/copyright index 66fdb75c2..ae0fbe68d 100644 --- a/debian/copyright +++ b/debian/copyright @@ -3,7 +3,7 @@ Upstream-Name: spot Source: http://www.lrde.epita.fr/dload/spot/ Files: * -Copyright: 2003-2024 the Spot authors +Copyright: 2003-2025 the Spot authors License: GPL-3+ Spot is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/doc/org/setup.org b/doc/org/setup.org index d8f8f2a3e..bf1a21760 100644 --- a/doc/org/setup.org +++ b/doc/org/setup.org @@ -1,11 +1,11 @@ #+OPTIONS: H:2 num:nil toc:t html-postamble:nil ^:nil #+EMAIL: spot@lrde.epita.fr #+HTML_LINK_HOME: index.html -#+MACRO: LASTDATE 2024-09-23 +#+MACRO: LASTDATE 2025-01-18 #+NAME: SPOT_VERSION #+BEGIN_SRC python :exports none :results value :wrap org -return "2.12.1" +return "2.12.2" #+END_SRC #+NAME: TARBALL_LINK From 5cbc28897e16f565d2085d2993f70ee0020334f4 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Sat, 18 Jan 2025 11:12:22 +0100 Subject: [PATCH 527/606] * configure.ac, NEWS: Bump version to 2.12.2.dev. --- NEWS | 4 ++++ configure.ac | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 7de69604b..643483799 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,7 @@ +New in spot 2.12.2.dev (not yet released) + + Nothing yet. + New in spot 2.12.2 (2025-01-18) Bug fixes: diff --git a/configure.ac b/configure.ac index 69bc4faae..e361d79c6 100644 --- a/configure.ac +++ b/configure.ac @@ -17,7 +17,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.12.2], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.12.2.dev], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) From 602aad013f6ee16f3ec0112a4a2bb3f93a43d932 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 4 Feb 2025 21:08:01 +0100 Subject: [PATCH 528/606] aiger: never use state names for encoding * spot/twaalgos/aiger.cc (mealy_machine_to_aig): Remove the code that attempted to convert state names to integer, throwing exceptions on failure. That code was not exercised anywhere, but it caused failure in the implementation of an LTLf->AIG pipeline in which LTLf formulas that label states are preserved. --- spot/twaalgos/aiger.cc | 59 ++++++++++++++---------------------------- 1 file changed, 20 insertions(+), 39 deletions(-) diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index 8c29dfe41..c3359fe72 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -1503,7 +1503,7 @@ namespace state_to_vec(std::vector& v, unsigned s, unsigned offset) { - assert(s != -1u && "-1u is not a valid sstate"); + assert(s != -1u && "-1u is not a valid state"); v.clear(); unsigned i = offset; while (s > 0) @@ -1553,7 +1553,7 @@ namespace const std::vector& unused_outs = {}, const realizability_simplifier* rs = nullptr) { - // The aiger circuit can currently noly encode separated mealy machines + // The aiger circuit can currently only encode separated mealy machines for (const auto& astrat : strat_vec) if (!astrat.first->acc().is_t()) @@ -1681,49 +1681,30 @@ namespace state_numbers.emplace_back(N, -1u); auto& sn = state_numbers.back(); unsigned max_index = 0; - // Check if named - auto sp_ptr = astrat.first->get_named_prop("state-player"); - if (const auto* s_names = - astrat.first-> - get_named_prop>("state-names")) + if (auto sp_ptr = + astrat.first->get_named_prop("state-player")) { + const region_t& sp = *sp_ptr; + // Split + unsigned n_next = 0; + // Player -1u + // Env: Succesively numbered according to appearance for (unsigned n = 0; n < N; ++n) - { - if (sp_ptr && (*sp_ptr)[n]) - continue; - // Remains -1u - unsigned su = std::stoul((*s_names)[n]); - max_index = std::max(max_index, su); - sn[n] = su; - } - ++max_index; + if (!sp[n]) + sn[n] = n_next++; + max_index = n_next; } else { - if (sp_ptr) - { - auto sp = *sp_ptr; - // Split - unsigned n_next = 0; - // Player -1u - // Env: Succesively numbered according to appearance - for (unsigned n = 0; n < N; ++n) - if (!sp[n]) - sn[n] = n_next++; - max_index = n_next; - } - else - { - std::iota(state_numbers.back().begin(), - state_numbers.back().end(), 0); - max_index = N; - } - - // Ensure 0 <-> init state - std::swap(state_numbers.back()[0], - state_numbers.back()[astrat.first-> - get_init_state_number()]); + std::iota(state_numbers.back().begin(), + state_numbers.back().end(), 0); + max_index = N; } + // Ensure 0 <-> init state + std::swap(state_numbers.back()[0], + state_numbers.back()[astrat.first-> + get_init_state_number()]); + // Largest index to encode -> num_states()-1 log2n.push_back(std::ceil(std::log2(max_index))); latch_offset.push_back(n_latches); From aba0e8dd24e60ba63651fa28bba556d30e3569c1 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 4 Feb 2025 21:12:13 +0100 Subject: [PATCH 529/606] * spot/twaalgos/mealy_machine.hh: Typo in documentation. --- spot/twaalgos/mealy_machine.hh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spot/twaalgos/mealy_machine.hh b/spot/twaalgos/mealy_machine.hh index baeca6c4f..74cef93bf 100644 --- a/spot/twaalgos/mealy_machine.hh +++ b/spot/twaalgos/mealy_machine.hh @@ -60,7 +60,7 @@ namespace spot /// \brief Checks whether or not the automaton is a split mealy machine /// /// A split mealy machine is a mealy machine machine that has - /// be converted into a game. It should have the named property + /// been converted into a game. It should have the named property /// `"state-player"`, moreover the game should be alternating /// between the two players. Transitions leaving states owned by /// player 0 (the environment) should use only input propositions, From 27fb175276a1d14a8663a25dc18462108405952f Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 6 Feb 2025 17:15:48 +0100 Subject: [PATCH 530/606] ltlsynt: fix a memory leak * bin/ltlsynt.cc: Declare the realizability_simplifier as a unique_ptr so that it gets deleted after use. * NEWS: Mention the bug. --- NEWS | 2 ++ bin/ltlsynt.cc | 12 ++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/NEWS b/NEWS index 17fb1a573..2e4d094f2 100644 --- a/NEWS +++ b/NEWS @@ -117,6 +117,8 @@ New in spot 2.12.2.dev (not yet released) status and the AIG circuit; it now does the job silently as requested. + - ltlsynt had a minor memory leak + New in spot 2.12.2 (2025-01-18) Bug fixes: diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 691995b7b..b5e909cb1 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -489,7 +489,7 @@ namespace }; // Attempt to remove superfluous atomic propositions - spot::realizability_simplifier* rs = nullptr; + std::unique_ptr rs = nullptr; if (opt_polarity != pol_no || opt_gequiv != pol_no) { unsigned opt = 0; @@ -502,9 +502,9 @@ namespace else opt |= spot::realizability_simplifier::global_equiv; } - rs = - new spot::realizability_simplifier(original_f, input_aps, opt, - gi ? gi->verbose_stream : nullptr); + rs.reset(new spot::realizability_simplifier(original_f, input_aps, opt, + gi ? gi->verbose_stream + : nullptr)); f = rs->simplified_formula(); } @@ -646,7 +646,7 @@ namespace } if (want_game()) { - dispatch_print_hoa(arena, rs); + dispatch_print_hoa(arena, rs.get()); continue; } if (!spot::solve_game(arena, *gi)) @@ -732,7 +732,7 @@ namespace if (gi->bv) sw2.start(); saig = spot::mealy_machines_to_aig(mealy_machines, opt_aiger, - input_aps, sub_outs_str, rs); + input_aps, sub_outs_str, rs.get()); if (gi->bv) { gi->bv->aig_time = sw2.stop(); From d0e404fec012e7a1367dfdd88bab47254023bb1e Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Thu, 6 Feb 2025 21:51:09 +0100 Subject: [PATCH 531/606] gnulib: fix argp --help formatting This is a patch that was sent by Simon Reinhardt to gnulib and has never been applied. It fixes a several formatting issues in --help. https://lists.gnu.org/archive/html/bug-gnulib/2016-02/msg00013.html * lib/argp-fmtstream.c (__argp_fmtstream_update): Flush output as soon as possible. * lib/argp-fmtstream.h (struct argp_fmtstream): Member point_offs is no longer needed. * lib/argp-help.c (indent_to): Flush output to avoid a spurious newline before an overlong word. --- lib/argp-fmtstream.c | 198 +++++++++++-------------------------------- lib/argp-fmtstream.h | 12 ++- lib/argp-help.c | 3 + 3 files changed, 56 insertions(+), 157 deletions(-) diff --git a/lib/argp-fmtstream.c b/lib/argp-fmtstream.c index 870c21cfe..f0d0c8290 100644 --- a/lib/argp-fmtstream.c +++ b/lib/argp-fmtstream.c @@ -68,7 +68,6 @@ __argp_make_fmtstream (FILE *stream, fs->rmargin = rmargin; fs->wmargin = wmargin; fs->point_col = 0; - fs->point_offs = 0; fs->buf = (char *) malloc (INIT_BUF_SIZE); if (! fs->buf) @@ -115,8 +114,19 @@ weak_alias (__argp_fmtstream_free, argp_fmtstream_free) #endif #endif -/* Process FS's buffer so that line wrapping is done from POINT_OFFS to the - end of its buffer. This code is mostly from glibc stdio/linewrap.c. */ + +static void +write_block (argp_fmtstream_t fs, char *buf, int len) +{ +#ifdef _LIBC + __fxprintf (fs->stream, "%.*s", len, buf); +#else + fwrite_unlocked (buf, 1, len, fs->stream); +#endif +} + +/* Process FS's buffer so that line wrapping is done and flush all of it. So + after return fs->p will be set to fb->buf. */ void __argp_fmtstream_update (argp_fmtstream_t fs) { @@ -124,7 +134,7 @@ __argp_fmtstream_update (argp_fmtstream_t fs) size_t len; /* Scan the buffer for newlines. */ - buf = fs->buf + fs->point_offs; + buf = fs->buf; while (buf < fs->p) { size_t r; @@ -132,31 +142,10 @@ __argp_fmtstream_update (argp_fmtstream_t fs) if (fs->point_col == 0 && fs->lmargin != 0) { /* We are starting a new line. Print spaces to the left margin. */ - const size_t pad = fs->lmargin; - if (fs->p + pad < fs->end) - { - /* We can fit in them in the buffer by moving the - buffer text up and filling in the beginning. */ - memmove (buf + pad, buf, fs->p - buf); - fs->p += pad; /* Compensate for bigger buffer. */ - memset (buf, ' ', pad); /* Fill in the spaces. */ - buf += pad; /* Don't bother searching them. */ - } - else - { - /* No buffer space for spaces. Must flush. */ - size_t i; - for (i = 0; i < pad; i++) - { -#ifdef _LIBC - if (_IO_fwide (fs->stream, 0) > 0) - putwc_unlocked (L' ', fs->stream); - else -#endif - putc_unlocked (' ', fs->stream); - } - } - fs->point_col = pad; + size_t i; + for (i = 0; i < fs->lmargin; i++) + write_block(fs, " ", 1); + fs->point_col = fs->lmargin; } len = fs->p - buf; @@ -172,8 +161,9 @@ __argp_fmtstream_update (argp_fmtstream_t fs) if (fs->point_col + len < fs->rmargin) { /* The remaining buffer text is a partial line and fits - within the maximum line width. Advance point for the - characters to be written and stop scanning. */ + within the maximum line width. Output the line and increment + point. */ + write_block(fs, buf, len); fs->point_col += len; break; } @@ -185,7 +175,9 @@ __argp_fmtstream_update (argp_fmtstream_t fs) else if (fs->point_col + (nl - buf) < (ssize_t) fs->rmargin) { /* The buffer contains a full line that fits within the maximum - line width. Reset point and scan the next line. */ + line width. Output the line, reset point and scan the next + line. */ + write_block(fs, buf, nl + 1 - buf); fs->point_col = 0; buf = nl + 1; continue; @@ -196,23 +188,23 @@ __argp_fmtstream_update (argp_fmtstream_t fs) if (fs->wmargin < 0) { - /* Truncate the line by overwriting the excess with the - newline and anything after it in the buffer. */ + /* Truncated everything past the right margin. */ if (nl < fs->p) { - memmove (buf + (r - fs->point_col), nl, fs->p - nl); - fs->p -= buf + (r - fs->point_col) - nl; + write_block(fs, buf, r - fs->point_col); + write_block(fs, "\n", 1); /* Reset point for the next line and start scanning it. */ fs->point_col = 0; - buf += r + 1; /* Skip full line plus \n. */ + buf = nl + 1; /* Skip full line plus \n. */ + continue; } else { /* The buffer ends with a partial line that is beyond the maximum line width. Advance point for the characters written, and discard those past the max from the buffer. */ - fs->point_col += len; - fs->p -= fs->point_col - r; + write_block(fs, buf, r - fs->point_col); + fs->point_col += len; break; } } @@ -249,99 +241,26 @@ __argp_fmtstream_update (argp_fmtstream_t fs) do ++p; while (p < nl && !isblank ((unsigned char) *p)); - if (p == nl) - { - /* It already ends a line. No fussing required. */ - fs->point_col = 0; - buf = nl + 1; - continue; - } - /* We will move the newline to replace the first blank. */ - nl = p; - /* Swallow separating blanks. */ - do - ++p; - while (isblank ((unsigned char) *p)); - /* The next line will start here. */ - nextline = p; - } + nl = p; + nextline = nl + 1; + } - /* Note: There are a bunch of tests below for - NEXTLINE == BUF + LEN + 1; this case is where NL happens to fall - at the end of the buffer, and NEXTLINE is in fact empty (and so - we need not be careful to maintain its contents). */ - - if ((nextline == buf + len + 1 - ? fs->end - nl < fs->wmargin + 1 - : nextline - (nl + 1) < fs->wmargin) - && fs->p > nextline) - { - /* The margin needs more blanks than we removed. */ - if (fs->end - fs->p > fs->wmargin + 1) - /* Make some space for them. */ - { - size_t mv = fs->p - nextline; - memmove (nl + 1 + fs->wmargin, nextline, mv); - nextline = nl + 1 + fs->wmargin; - len = nextline + mv - buf; - *nl++ = '\n'; - } - else - /* Output the first line so we can use the space. */ - { -#ifdef _LIBC - __fxprintf (fs->stream, "%.*s\n", - (int) (nl - fs->buf), fs->buf); -#else - if (nl > fs->buf) - fwrite_unlocked (fs->buf, 1, nl - fs->buf, fs->stream); - putc_unlocked ('\n', fs->stream); -#endif - - len += buf - fs->buf; - nl = buf = fs->buf; - } - } - else - /* We can fit the newline and blanks in before - the next word. */ - *nl++ = '\n'; - - if (nextline - nl >= fs->wmargin - || (nextline == buf + len + 1 && fs->end - nextline >= fs->wmargin)) - /* Add blanks up to the wrap margin column. */ - for (i = 0; i < fs->wmargin; ++i) - *nl++ = ' '; - else - for (i = 0; i < fs->wmargin; ++i) -#ifdef _LIBC - if (_IO_fwide (fs->stream, 0) > 0) - putwc_unlocked (L' ', fs->stream); - else -#endif - putc_unlocked (' ', fs->stream); - - /* Copy the tail of the original buffer into the current buffer - position. */ - if (nl < nextline) - memmove (nl, nextline, buf + len - nextline); - len -= nextline - buf; - - /* Continue the scan on the remaining lines in the buffer. */ - buf = nl; - - /* Restore bufp to include all the remaining text. */ - fs->p = nl + len; - - /* Reset the counter of what has been output this line. If wmargin - is 0, we want to avoid the lmargin getting added, so we set - point_col to a magic value of -1 in that case. */ - fs->point_col = fs->wmargin ? fs->wmargin : -1; - } + write_block(fs, buf, nl - buf); + if (nextline < fs->p) + { + /* There are more lines to process. Do line break and print + blanks up to the wrap margin. */ + write_block(fs, "\n", 1); + for (i = 0; i < fs->wmargin; ++i) + write_block(fs, " ", 1); + fs->point_col = fs->wmargin; + } + buf = nextline; + } } - /* Remember that we've scanned as far as the end of the buffer. */ - fs->point_offs = fs->p - fs->buf; + /* Remember that we've flushed everything. */ + fs->p = fs->buf; } /* Ensure that FS has space for AMOUNT more bytes in its buffer, either by @@ -351,30 +270,9 @@ __argp_fmtstream_ensure (struct argp_fmtstream *fs, size_t amount) { if ((size_t) (fs->end - fs->p) < amount) { - ssize_t wrote; - /* Flush FS's buffer. */ __argp_fmtstream_update (fs); -#ifdef _LIBC - __fxprintf (fs->stream, "%.*s", (int) (fs->p - fs->buf), fs->buf); - wrote = fs->p - fs->buf; -#else - wrote = fwrite_unlocked (fs->buf, 1, fs->p - fs->buf, fs->stream); -#endif - if (wrote == fs->p - fs->buf) - { - fs->p = fs->buf; - fs->point_offs = 0; - } - else - { - fs->p -= wrote; - fs->point_offs -= wrote; - memmove (fs->buf, fs->buf + wrote, fs->p - fs->buf); - return 0; - } - if ((size_t) (fs->end - fs->buf) < amount) /* Gotta grow the buffer. */ { diff --git a/lib/argp-fmtstream.h b/lib/argp-fmtstream.h index e7713c4fd..e43f7bb47 100644 --- a/lib/argp-fmtstream.h +++ b/lib/argp-fmtstream.h @@ -95,9 +95,7 @@ struct argp_fmtstream size_t lmargin, rmargin; /* Left and right margins. */ ssize_t wmargin; /* Margin to wrap to, or -1 to truncate. */ - /* Point in buffer to which we've processed for wrapping, but not output. */ - size_t point_offs; - /* Output column at POINT_OFFS, or -1 meaning 0 but don't add lmargin. */ + /* Output column at buf, or -1 meaning 0 but don't add lmargin. */ ssize_t point_col; char *buf; /* Output buffer. */ @@ -250,7 +248,7 @@ ARGP_FS_EI size_t __argp_fmtstream_set_lmargin (argp_fmtstream_t __fs, size_t __lmargin) { size_t __old; - if ((size_t) (__fs->p - __fs->buf) > __fs->point_offs) + if (__fs->p > __fs->buf) __argp_fmtstream_update (__fs); __old = __fs->lmargin; __fs->lmargin = __lmargin; @@ -262,7 +260,7 @@ ARGP_FS_EI size_t __argp_fmtstream_set_rmargin (argp_fmtstream_t __fs, size_t __rmargin) { size_t __old; - if ((size_t) (__fs->p - __fs->buf) > __fs->point_offs) + if (__fs->p > __fs->buf) __argp_fmtstream_update (__fs); __old = __fs->rmargin; __fs->rmargin = __rmargin; @@ -274,7 +272,7 @@ ARGP_FS_EI size_t __argp_fmtstream_set_wmargin (argp_fmtstream_t __fs, size_t __wmargin) { size_t __old; - if ((size_t) (__fs->p - __fs->buf) > __fs->point_offs) + if (__fs->p > __fs->buf) __argp_fmtstream_update (__fs); __old = __fs->wmargin; __fs->wmargin = __wmargin; @@ -285,7 +283,7 @@ __argp_fmtstream_set_wmargin (argp_fmtstream_t __fs, size_t __wmargin) ARGP_FS_EI size_t __argp_fmtstream_point (argp_fmtstream_t __fs) { - if ((size_t) (__fs->p - __fs->buf) > __fs->point_offs) + if (__fs->p > __fs->buf) __argp_fmtstream_update (__fs); return __fs->point_col >= 0 ? __fs->point_col : 0; } diff --git a/lib/argp-help.c b/lib/argp-help.c index 550dfd98d..2f4b81000 100644 --- a/lib/argp-help.c +++ b/lib/argp-help.c @@ -931,6 +931,9 @@ indent_to (argp_fmtstream_t stream, unsigned col) int needed = col - __argp_fmtstream_point (stream); while (needed-- > 0) __argp_fmtstream_putc (stream, ' '); + /* Flush stream to avoid spurious newline before overlong word + (see argp-test.c). */ + __argp_fmtstream_update(stream); } /* Output to STREAM either a space, or a newline if there isn't room for at From b1b06ef7bd2a1b3fc15e9ec4a699efc5eb4d3709 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 7 Feb 2025 21:26:08 +0100 Subject: [PATCH 532/606] ltlsynt: remove superfluous output options No point in having options such as --spin, --lbtt, --check, etc. Also --dot was documented twice... * bin/ltlsynt.cc (children): Remove aoutput_argp. (options): Add explicit support for -d, -H, -q. * bin/common_aoutput.cc, bin/common_aoutput.hh: Share the HOA help text. --- bin/common_aoutput.cc | 26 +++++++++++++++----------- bin/common_aoutput.hh | 4 ++++ bin/ltlsynt.cc | 23 ++++++++++++++--------- 3 files changed, 33 insertions(+), 20 deletions(-) diff --git a/bin/common_aoutput.cc b/bin/common_aoutput.cc index f5ba8d625..b465dde27 100644 --- a/bin/common_aoutput.cc +++ b/bin/common_aoutput.cc @@ -91,6 +91,19 @@ enum { OPT_CHECK, }; +const char* hoa_option_doc_short = "1.1|b|i|k|l|m|s|t|v"; +const char* hoa_option_doc_long = + "Output the automaton in HOA format (default). Add letters to select " + "(1.1) version 1.1 of the format, " + "(b) create an alias basis if >=2 AP are used, " + "(i) use implicit labels for complete deterministic automata, " + "(s) prefer state-based acceptance when possible [default], " + "(t) force transition-based acceptance, " + "(m) mix state and transition-based acceptance, " + "(k) use state labels when possible, " + "(l) single-line output, " + "(v) verbose properties"; + static const argp_option options[] = { /**************************************************/ @@ -129,17 +142,8 @@ static const argp_option options[] = "(+INT) add INT to all set numbers, " "(=2 AP are used, " - "(i) use implicit labels for complete deterministic automata, " - "(s) prefer state-based acceptance when possible [default], " - "(t) force transition-based acceptance, " - "(m) mix state and transition-based acceptance, " - "(k) use state labels when possible, " - "(l) single-line output, " - "(v) verbose properties", 0 }, + { "hoaf", 'H', hoa_option_doc_short, OPTION_ARG_OPTIONAL, + hoa_option_doc_long, 0 }, { "lbtt", OPT_LBTT, "t", OPTION_ARG_OPTIONAL, "LBTT's format (add =t to force transition-based acceptance even" " on Büchi automata)", 0 }, diff --git a/bin/common_aoutput.hh b/bin/common_aoutput.hh index 24066699a..96dc31100 100644 --- a/bin/common_aoutput.hh +++ b/bin/common_aoutput.hh @@ -52,6 +52,10 @@ extern const char* opt_name; // Output options extern const struct argp aoutput_argp; +// help text for --hoaf +extern const char* hoa_option_doc_short; +extern const char* hoa_option_doc_long; + // help text for %F and %L extern char F_doc[32]; extern char L_doc[32]; diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index b5e909cb1..4b5858c01 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -53,7 +53,6 @@ enum OPT_CSV_WITH_FORMULA, OPT_CSV_WITHOUT_FORMULA, OPT_DECOMPOSE, - OPT_DOT, OPT_FROM_PGAME, OPT_GEQUIV, OPT_HIDE, @@ -143,13 +142,15 @@ static const argp_option options[] = "\"both\" tries both and keeps the smaller one. " "Other options further " "refine the encoding, see aiger::encode_bdd. Defaults to \"ite\".", 0 }, - { "dot", OPT_DOT, "options", OPTION_ARG_OPTIONAL, + { "dot", 'd', "options", OPTION_ARG_OPTIONAL, "Use dot format when printing the result (game, strategy, or " "AIG circuit, depending on other options). The options that " "may be passed to --dot depend on the nature of what is printed. " "For games and strategies, standard automata rendering " "options are supported (e.g., see ltl2tgba --dot). For AIG circuit, " "use (h) for horizontal and (v) for vertical layouts.", 0 }, + { "hoaf", 'H', hoa_option_doc_short, OPTION_ARG_OPTIONAL, + hoa_option_doc_long, 0 }, { "csv", OPT_CSV_WITHOUT_FORMULA, "[>>]FILENAME", OPTION_ARG_OPTIONAL, "output statistics as CSV in FILENAME or on standard output " "(if '>>' is used to request append mode, the header line is " @@ -159,8 +160,9 @@ static const argp_option options[] = OPTION_ARG_OPTIONAL, "like --csv, but with an additional 'fomula' column", 0 }, { "hide-status", OPT_HIDE, nullptr, 0, - "Hide the REALIZABLE or UNREALIZABLE line. (Hint: exit status " + "hide the REALIZABLE or UNREALIZABLE line (The exit status " "is enough of an indication.)", 0 }, + { "quiet", 'q', nullptr, 0, "suppress all normal output", 0 }, /**************************************************/ { nullptr, 0, nullptr, 0, "Miscellaneous options:", -1 }, { "extra-options", 'x', "OPTS", 0, @@ -174,7 +176,6 @@ static const argp_option options[] = static const struct argp_child children[] = { { &finput_argp_headless, 0, nullptr, 0 }, - { &aoutput_argp, 0, nullptr, 0 }, { &misc_argp, 0, nullptr, 0 }, { nullptr, 0, nullptr, 0 } }; @@ -1044,6 +1045,15 @@ parse_opt(int key, char *arg, struct argp_state *) BEGIN_EXCEPTION_PROTECT; switch (key) { + case 'd': + opt_dot = true; + automaton_format_opt = opt_dot_arg = arg; + automaton_format = Dot; + break; + case 'q': + automaton_format = Quiet; + show_status = false; + break; case OPT_ALGO: gi->s = XARGMATCH("--algo", arg, algo_args, algo_types); break; @@ -1062,11 +1072,6 @@ parse_opt(int key, char *arg, struct argp_state *) opt_decompose_ltl = XARGMATCH("--decompose", arg, decompose_args, decompose_values); break; - case OPT_DOT: - opt_dot = true; - automaton_format_opt = opt_dot_arg = arg; - automaton_format = Dot; - break; case OPT_FROM_PGAME: jobs.emplace_back(arg, job_type::AUT_FILENAME); break; From 00456e5211c992460bc9301230f6ccb97f73c914 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Fri, 14 Feb 2025 14:27:48 +0100 Subject: [PATCH 533/606] ltlfilt: add a --save-part-file option * bin/ltlfilt.cc: Add support for --save-part-file. * NEWS, doc/org/ltlfilt.org: Mention it. * tests/core/ltlfilt.test: Test it. --- NEWS | 4 ++++ bin/ltlfilt.cc | 50 ++++++++++++++++++++++++++++++++++++++--- doc/org/ltlfilt.org | 28 +++++++++++++++++++---- tests/core/ltlfilt.test | 16 +++++++++++++ 4 files changed, 91 insertions(+), 7 deletions(-) diff --git a/NEWS b/NEWS index 2e4d094f2..d95157cdd 100644 --- a/NEWS +++ b/NEWS @@ -37,6 +37,10 @@ New in spot 2.12.2.dev (not yet released) be inferred from their name. (This suspports a --part-file option as well.) + - ltlfilt learned --save-part-file[=FILENAME] option that can be + used to create partition files suitable for many synthesis tools + (including ltlsynt). + - genltl learned --lily-patterns to generate the example LTL synthesis specifications from Lily 1.0.2. Those come with input and output atomic proposition rewriten in the form "iNN" or "oNN", diff --git a/bin/ltlfilt.cc b/bin/ltlfilt.cc index 4c17f2214..1669a707d 100644 --- a/bin/ltlfilt.cc +++ b/bin/ltlfilt.cc @@ -103,6 +103,7 @@ enum { OPT_REMOVE_WM, OPT_REMOVE_X, OPT_SAFETY, + OPT_SAVE_PART_FILE, OPT_SIGMA2, OPT_SIZE, OPT_SIZE_MAX, @@ -184,13 +185,18 @@ static const argp_option options[] = " proposition", 0 }, { "ins", OPT_INS, "PROPS", 0, "comma-separated list of input atomic propositions to use with " - "--relabel=io, interpreted as a regex if enclosed in slashes", 0 }, + "--relabel=io or --save-part-file, interpreted as a regex if enclosed " + "in slashes", 0 }, { "outs", OPT_OUTS, "PROPS", 0, "comma-separated list of output atomic propositions to use with " - "--relabel=io, interpreted as a regex if enclosed in slashes", 0 }, + "--relabel=io or --save-part-file, interpreted as a regex if " + "enclosed in slashes", 0 }, { "part-file", OPT_PART_FILE, "FILENAME", 0, "file containing the partition of atomic propositions to use with " "--relabel=io", 0 }, + { "save-part-file", OPT_SAVE_PART_FILE, "FILENAME", OPTION_ARG_OPTIONAL, + "file containing the partition of atomic propositions, " + "readable by --part-file", 0 }, DECLARE_OPT_R, LEVEL_DOC(4), /**************************************************/ @@ -377,6 +383,7 @@ static struct opt_t { spot::bdd_dict_ptr dict = spot::make_bdd_dict(); spot::exclusive_ap excl_ap; + std::unique_ptr output_part = nullptr; std::unique_ptr output_define = nullptr; std::unique_ptr output_sonf = nullptr; spot::formula implied_by = nullptr; @@ -596,6 +603,9 @@ parse_opt(int key, char* arg, struct argp_state*) case OPT_SAFETY: safety = obligation = true; break; + case OPT_SAVE_PART_FILE: + opt->output_part.reset(new output_file(arg ? arg : "-")); + break; case OPT_SIZE: size = parse_range(arg, 0, std::numeric_limits::max()); break; @@ -954,6 +964,40 @@ namespace oldname, filename, std::to_string(linenum).c_str()) << ")\n"; } + if (opt->output_part + && output_format != count_output + && output_format != quiet_output) + { + std::vector ins; + std::vector outs; + spot::atomic_prop_set* s = atomic_prop_collect(f); + for (spot::formula ap: *s) + { + spot::formula apo = ap; + if (auto it = relmap.find(ap); it != relmap.end()) + apo = it->second; + if (is_output(apo.ap_name(), filename, linenum)) + outs.push_back(ap); + else + ins.push_back(ap); + } + delete s; + auto& os = opt->output_part->ostream(); + if (!ins.empty()) + { + os << ".inputs"; + for (const auto& ap: ins) + os << ' ' << str_psl(ap); + os << '\n'; + } + if (!outs.empty()) + { + os << ".outputs"; + for (const auto& ap: outs) + os << ' ' << str_psl(ap); + os << '\n'; + } + } one_match = true; output_formula_checked(f, &timer, filename, linenum, match_count, prefix, suffix); @@ -982,7 +1026,7 @@ main(int argc, char** argv) if (jobs.empty()) jobs.emplace_back("-", job_type::LTL_FILENAME); - if (relabeling == IOApRelabeling) + if (relabeling == IOApRelabeling || opt->output_part) process_io_options(); if (boolean_to_isop && simplification_level == 0) diff --git a/doc/org/ltlfilt.org b/doc/org/ltlfilt.org index d28d265eb..fad49bcf5 100644 --- a/doc/org/ltlfilt.org +++ b/doc/org/ltlfilt.org @@ -301,14 +301,18 @@ ltldo ltl3ba -f '"proc@loc1" U "proc@loc2"' --spin This case also relabels the formula before calling =ltl3ba=, and it then renames all the atomic propositions in the output. +An example showing how to use the =--from-ltlf= option is on [[file:tut12.org][a +separate page]]. + +* I/O-partitioned formulas A special relabeling mode related to LTL synthesis is =--relabel=io=. In LTL synthesis (see [[file:ltlsynt.org][=ltlsynt=]]), atomic propositions are partitioned in two sets: the /input/ propositions represent choices from the -environment, while /output/ proposition represent choices by the +environment, while /output/ propositions represent choices by the controller to be synthesized. For instance =G(req -> Fack) & G(go -> Fgrant)= -represents could be a specification where =req= and =go= are inputs, +could be a specification where =req= and =go= are inputs, while =ack= and =grant= are outputs. Tool such as =ltlsynt= need to be told using options such as =--ins= or =--outs= which atomic propositions are input or output. Often these atomic propositions @@ -331,8 +335,24 @@ when these two options are missing the convention is that anything starting with =i= is an input, and anything starting with =o= is an output. -An example showing how to use the =--from-ltlf= option is on [[file:tut12.org][a -separate page]]. +=ltlfilt= can also be instructed to create a partition file (usually +named =*.part=) that can be used by synthesis tools. + +#+BEGIN_SRC sh +ltlfilt -f 'G(req -> Fack) & G(go -> Fgrant)' --relabel=io \ + --ins=req,go --save-part=out.part +#+END_SRC +#+RESULTS: +: G(i1 -> Fo0) & G(i0 -> Fo1) + +In addition to the relabeling, this also created a file =out.part= +containing the following: +#+BEGIN_SRC sh :exports results +cat out.part +#+END_SRC +#+RESULTS: +: .inputs i0 i1 +: .outputs o0 o1 * Filtering diff --git a/tests/core/ltlfilt.test b/tests/core/ltlfilt.test index 2019151ca..2c456878a 100755 --- a/tests/core/ltlfilt.test +++ b/tests/core/ltlfilt.test @@ -872,3 +872,19 @@ diff expected out # would stick to the rest of the file unless overridden. run 2 ltlfilt -Ffile/1 -Ffile : + + +run 0 ltlfilt -f 'i1 U o2 U i3' --save-part=p.part +cat > exp.part<out +cat > exp< Date: Wed, 26 Feb 2025 10:44:59 +0100 Subject: [PATCH 534/606] * spot/twaalgos/gtec/gtec.cc: Work around spurious warning. --- spot/twaalgos/gtec/gtec.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/spot/twaalgos/gtec/gtec.cc b/spot/twaalgos/gtec/gtec.cc index e50e0b7ad..af7f0b64b 100644 --- a/spot/twaalgos/gtec/gtec.cc +++ b/spot/twaalgos/gtec/gtec.cc @@ -459,6 +459,9 @@ namespace spot auto i = ecs_->h.find(curr); assert(i != ecs_->h.end()); assert(i->first == curr); + // work around g++-14.2.0 spurious "potential null + // pointer dereference" warning. + SPOT_ASSUME(i != ecs_->h.end()); ecs_->root.rem().push_front(i->first); inc_depth(); } From c4e3509d18ed9eb95edc93e091b1acf468ff2cd4 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 7 Mar 2025 10:28:43 +0100 Subject: [PATCH 535/606] * bin/.gitignore: Add ltlmix to gitignore --- bin/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/bin/.gitignore b/bin/.gitignore index 935ab53b1..cc3b3e67b 100644 --- a/bin/.gitignore +++ b/bin/.gitignore @@ -9,6 +9,7 @@ ltlcross ltldo ltlfilt ltlgrind +ltlmix ltlsynt randaut randltl From 1dd2ce3ae24d9324e1c521ff0e4fa020afc4c9be Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 11 Mar 2025 09:29:31 +0100 Subject: [PATCH 536/606] sanity: improve bin.test * tests/sanity/bin.test: Add missing exit status on error, and report manpage and binaries missing from spot.spec.in. * spot.spec.in: Add ltlmix and ltlmix.1. * bin/ltlsynt.cc: Fix formating for --algo. --- bin/ltlsynt.cc | 20 +++++++++----------- spot.spec.in | 4 +++- tests/sanity/bin.test | 20 +++++++++++++++++++- 3 files changed, 31 insertions(+), 13 deletions(-) diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 4b5858c01..1b741fa9a 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -91,17 +91,15 @@ static const argp_option options[] = /**************************************************/ { nullptr, 0, nullptr, 0, "Fine tuning:", 10 }, { "algo", OPT_ALGO, "sd|ds|ps|lar|lar.old|acd", 0, - "choose the algorithm for synthesis:" - " \"sd\": translate to tgba, split, then determinize;" - " \"ds\": translate to tgba, determinize, then split;" - " \"ps\": translate to dpa, then split;" - " \"lar\": translate to a deterministic automaton with arbitrary" - " acceptance condition, then use LAR to turn to parity," - " then split (default);" - " \"lar.old\": old version of LAR, for benchmarking;" - " \"acd\": translate to a deterministic automaton with arbitrary" - " acceptance condition, then use ACD to turn to parity," - " then split.\n", 0 }, + "choose the algorithm for synthesis:\n" + " sd: translate to TGBA, split, determinize\n" + " ds: translate to TGBA, determinize, split\n" + " ps: translate to DPA, split\n" + " lar: translate to a deterministic TELA, convert to DPA" + " with LAR, split (default)\n" + " lar.old: old version of LAR, for benchmarking;\n" + " acd: translate to a deterministic TELA, convert to DPA" + " with ACD, split", 0 }, { "bypass", OPT_BYPASS, "yes|no", 0, "whether to try to avoid to construct a parity game " "(enabled by default)", 0}, diff --git a/spot.spec.in b/spot.spec.in index bd465fd62..a2630d2de 100755 --- a/spot.spec.in +++ b/spot.spec.in @@ -38,6 +38,7 @@ logic (LTL & PSL). %{_bindir}/ltldo %{_bindir}/ltlfilt %{_bindir}/ltlgrind +%{_bindir}/ltlmix %{_bindir}/ltlsynt %{_bindir}/randaut %{_bindir}/randltl @@ -52,11 +53,12 @@ logic (LTL & PSL). %{_mandir}/man1/ltldo.1* %{_mandir}/man1/ltlfilt.1* %{_mandir}/man1/ltlgrind.1* +%{_mandir}/man1/ltlmix.1* %{_mandir}/man1/ltlsynt.1* %{_mandir}/man1/randaut.1* %{_mandir}/man1/randltl.1* -%{_mandir}/man7/spot-x.7* %{_mandir}/man7/spot.7* +%{_mandir}/man7/spot-x.7* %license COPYING %doc AUTHORS COPYING NEWS README THANKS diff --git a/tests/sanity/bin.test b/tests/sanity/bin.test index 6e9e5eafb..0ad836daa 100644 --- a/tests/sanity/bin.test +++ b/tests/sanity/bin.test @@ -52,6 +52,11 @@ do echo "bin/man/$manpage is not listed in man/Makefile.am" exit_status=2 fi + if ! grep -q "%{_mandir}/man./$manpage\*\$" $top_srcdir/spot.spec.in; + then + echo "$manpage is not listed in spot.spec.in" + exit_status=2 + fi fi case $binary in @@ -74,7 +79,7 @@ do ;; esac - # All man pages + # All tools case $manpage in *.1) if ! test -f $top_srcdir/doc/org/$binary.org; then @@ -94,8 +99,20 @@ do echo "$binary does not occur in doc/org/arch.tex" exit_status=2 fi + if ! grep -q "%{_bindir}/$binary\$" $top_srcdir/spot.spec.in; then + echo "$binary does is not listed in spot.spec.in"; + exit_status=2 + fi + esac + if test -f $top_srcdir/bin/.gitignore; then + if ! grep -q "^$binary\$" $top_srcdir/bin/.gitignore; then + echo "$binary is not listed in bin/.gitignore" + exit_status=2 + fi + fi + # Check --help text. Set a high rmargin to workaround some # bug in argp where an extra line it sometimes added if the end # of the document string fall right into the rmargin. @@ -107,6 +124,7 @@ do echo "bin/$binary --help has options after blank line;" \ "missing section header?" cat help-err + exit_status=2 fi rm -f help-$binary.tmp help-err From b6e782589e2672aba4deab7f5068c11afd357952 Mon Sep 17 00:00:00 2001 From: Alexandre Duret-Lutz Date: Tue, 11 Mar 2025 10:14:46 +0100 Subject: [PATCH 537/606] bin: handle '--parity=X --colored-parity' like '--colored-parity=X' Fixes #602. * bin/common_post.cc (-P, -p): Do not overwrite an existing parity specification if none were given. * tests/core/parity2.test: Test this. --- bin/common_post.cc | 9 +++++- tests/core/parity2.test | 66 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 73 insertions(+), 2 deletions(-) diff --git a/bin/common_post.cc b/bin/common_post.cc index fdb1dc903..7bead8135 100644 --- a/bin/common_post.cc +++ b/bin/common_post.cc @@ -231,7 +231,14 @@ parse_opt_post(int key, char* arg, struct argp_state*) if (arg) type = XARGMATCH(key == 'P' ? "--parity" : "--colored-parity", arg, parity_args, parity_types); - else + else if (!(type & spot::postprocessor::Parity)) + // If no argument was given, we just require Parity. + // However, if a Parity condition was already set before, + // don't overwrite it. This way if someone mistakenly write + // `--parity='max even' --colored` without realizing that + // `--colored` is just the abbreviation for + // `--colored-parity=...` with the default argument, we + // won't reset the 'max even' setting. type = spot::postprocessor::Parity; if (key == 'p') colored = spot::postprocessor::Colored; diff --git a/tests/core/parity2.test b/tests/core/parity2.test index 8fb35e365..b2b95f790 100755 --- a/tests/core/parity2.test +++ b/tests/core/parity2.test @@ -26,7 +26,19 @@ for x in P 'Pmin odd' 'Pmax even' p 'pmin odd' 'pmax even'; do autfilt --name=%M --high "-$x" >>res2 ltl2tgba -D "-$x" FGa 'GFa & GFb' '(p0 W XXGp0) & GFp1 & FGp2' >>res3 ltl2tgba FGa 'GFa & GFb' '(p0 W XXGp0) & GFp1 & FGp2' | - autfilt -D --name=%M --high "-$x" >>res4 + autfilt -D --name=%M --high "-$x" >>res4 + ( + ## --colored is normally short for --colored-parity=any + ## But in case someone types something like + ## --parity='max odd' --colored + ## let's make sure we handle it like --colored-parity='max odd'. + echo "=== -$x ===" + ltl2tgba FGa 'GFa & GFb' '(p0 W XXGp0) & GFp1 & FGp2' \ + "-$x" --stats='%[]g' + echo "=== -$x --colored ===" + ltl2tgba FGa 'GFa & GFb' '(p0 W XXGp0) & GFp1 & FGp2' \ + "-$x" --colored --stats='%[]g' + ) >> res5 done cat >expected<expected5 < XXXb)' -f '(p0 W XXGp0) & GFp1 & FGp2' From 2ae9da1bc6922e9013ad7d10175e61d8dca4f208 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 10 Mar 2022 12:16:18 +0100 Subject: [PATCH 538/606] twagraph: merge_edges supports finite automata * spot/twa/twagraph.cc: don't remove false-labeled edges if the automaton uses state-based acceptance and the edge is a self loop --- spot/twa/twagraph.cc | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 882714ab2..d422becb0 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -230,11 +230,15 @@ namespace spot // them. }); + bool is_state_acc = this->prop_state_acc().is_true(); + unsigned out = 0; unsigned in = 1; // Skip any leading false edge. - while (in < tend && trans[in].cond == bddfalse) + while (in < tend + && trans[in].cond == bddfalse + && (!is_state_acc || trans[in].src != trans[in].dst)) ++in; if (in < tend) { @@ -243,7 +247,9 @@ namespace spot trans[out] = trans[in]; for (++in; in < tend; ++in) { - if (trans[in].cond == bddfalse) // Unusable edge + if (trans[in].cond == bddfalse + && (!is_state_acc + || trans[in].src != trans[in].dst)) // Unusable edge continue; // Merge edges with the same source, destination, and // colors. (We test the source last, because this is the From 4a646e5aa0eeeb9527db0076a30b45877661226a Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 14 Jan 2022 08:56:28 +0100 Subject: [PATCH 539/606] tl: implement SERE derivation --- python/spot/impl.i | 2 + spot/tl/Makefile.am | 2 + spot/tl/derive.cc | 383 ++++++++++++++++++++++++++++++++++++++++++++ spot/tl/derive.hh | 43 +++++ 4 files changed, 430 insertions(+) create mode 100644 spot/tl/derive.cc create mode 100644 spot/tl/derive.hh diff --git a/python/spot/impl.i b/python/spot/impl.i index 2291730ed..09e29f6e9 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -86,6 +86,7 @@ #include #include +#include #include #include #include @@ -631,6 +632,7 @@ namespace std { %include %include +%include %include %include %include diff --git a/spot/tl/Makefile.am b/spot/tl/Makefile.am index 6c7650875..1e5a68363 100644 --- a/spot/tl/Makefile.am +++ b/spot/tl/Makefile.am @@ -28,6 +28,7 @@ tl_HEADERS = \ declenv.hh \ defaultenv.hh \ delta2.hh \ + derive.hh \ dot.hh \ environment.hh \ exclusive.hh \ @@ -54,6 +55,7 @@ libtl_la_SOURCES = \ declenv.cc \ defaultenv.cc \ delta2.cc \ + derive.cc \ dot.cc \ exclusive.cc \ formula.cc \ diff --git a/spot/tl/derive.cc b/spot/tl/derive.cc new file mode 100644 index 000000000..cec4f3bcd --- /dev/null +++ b/spot/tl/derive.cc @@ -0,0 +1,383 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2021 Laboratoire de Recherche et Développement de +// l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" +#include +#include +#include +#include + +namespace spot +{ + namespace + { + static std::vector + formula_aps(formula f) + { + auto res = std::unordered_set(); + + f.traverse([&res](formula f) + { + if (f.is(op::ap)) + { + res.insert(f.ap_name()); + return true; + } + + return false; + }); + + return std::vector(res.begin(), res.end()); + } + } + + twa_graph_ptr + derive_finite_automaton(formula f, bool deterministic) + { + auto bdd_dict = make_bdd_dict(); + auto aut = make_twa_graph(bdd_dict); + + aut->prop_state_acc(true); + const auto acc_mark = aut->set_buchi(); + + auto formula2state = robin_hood::unordered_map(); + + unsigned init_state = aut->new_state(); + aut->set_init_state(init_state); + + formula2state.insert({ f, init_state }); + + auto f_aps = formula_aps(f); + for (auto& ap : f_aps) + aut->register_ap(ap); + bdd all_aps = aut->ap_vars(); + + auto todo = std::vector>(); + todo.push_back({f, init_state}); + + auto state_names = new std::vector(); + std::ostringstream ss; + ss << f; + state_names->push_back(ss.str()); + + auto find_dst = [&](formula derivative) -> unsigned + { + unsigned dst; + auto it = formula2state.find(derivative); + if (it != formula2state.end()) + { + dst = it->second; + } + else + { + dst = aut->new_state(); + todo.push_back({derivative, dst}); + formula2state.insert({derivative, dst}); + std::ostringstream ss; + ss << derivative; + state_names->push_back(ss.str()); + } + + return dst; + }; + + while (!todo.empty()) + { + auto [curr_f, curr_state] = todo[todo.size() - 1]; + todo.pop_back(); + + auto curr_acc_mark = curr_f.accepts_eword() + ? acc_mark + : acc_cond::mark_t(); + + for (const bdd one : minterms_of(bddtrue, all_aps)) + { + formula derivative = + partial_derivation(curr_f, one, bdd_dict, aut.get()); + + // no transition possible from this letter + if (derivative.is(op::ff)) + continue; + + // either the formula isn't an OrRat, or if it is we consider it as + // as a whole to get a deterministic automaton + if (deterministic || !derivative.is(op::OrRat)) + { + auto dst = find_dst(derivative); + aut->new_edge(curr_state, dst, one, curr_acc_mark); + continue; + } + + // formula is an OrRat and we want a non deterministic automaton, + // so consider each child as a destination + for (const auto& subformula : derivative) + { + auto dst = find_dst(subformula); + aut->new_edge(curr_state, dst, one, curr_acc_mark); + } + } + + // if state has no transitions and should be accepting, create + // artificial transition + if (aut->get_graph().state_storage(curr_state).succ == 0 + && curr_f.accepts_eword()) + aut->new_edge(curr_state, curr_state, bddfalse, acc_mark); + } + + aut->set_named_prop("state-names", state_names); + + aut->merge_edges(); + + return aut; + } + + twa_graph_ptr + derive_automaton(formula f, bool deterministic) + { + auto bdd_dict = make_bdd_dict(); + auto aut = make_twa_graph(bdd_dict); + + aut->prop_state_acc(true); + const auto acc_mark = aut->set_buchi(); + + auto formula2state = robin_hood::unordered_map(); + + unsigned init_state = aut->new_state(); + aut->set_init_state(init_state); + + formula2state.insert({ f, init_state }); + + auto f_aps = formula_aps(f); + for (auto& ap : f_aps) + aut->register_ap(ap); + bdd all_aps = aut->ap_vars(); + bdd alive = bdd_ithvar(aut->register_ap("alive")); + + auto todo = std::vector>(); + todo.push_back({f, init_state}); + + auto state_names = new std::vector(); + std::ostringstream ss; + ss << f; + state_names->push_back(ss.str()); + + auto find_dst = [&](formula derivative) -> unsigned + { + unsigned dst; + auto it = formula2state.find(derivative); + if (it != formula2state.end()) + { + dst = it->second; + } + else + { + dst = aut->new_state(); + todo.push_back({derivative, dst}); + formula2state.insert({derivative, dst}); + std::ostringstream ss; + ss << derivative; + state_names->push_back(ss.str()); + } + + return dst; + }; + + while (!todo.empty()) + { + auto [curr_f, curr_state] = todo[todo.size() - 1]; + todo.pop_back(); + + for (const bdd one : minterms_of(bddtrue, all_aps)) + { + formula derivative = + partial_derivation(curr_f, one, bdd_dict, aut.get()); + + // no transition possible from this letter + if (derivative.is(op::ff)) + continue; + + // either the formula isn't an OrRat, or if it is we consider it as + // a whole to get a deterministic automaton + if (deterministic || !derivative.is(op::OrRat)) + { + auto dst = find_dst(derivative); + aut->new_edge(curr_state, dst, one & alive); + continue; + } + + // formula is an OrRat and we want a non deterministic automaton, + // so consider each child as a destination + for (const auto& subformula : derivative) + { + auto dst = find_dst(subformula); + aut->new_edge(curr_state, dst, one & alive); + } + } + } + + unsigned end_state = aut->new_state(); + state_names->push_back("end"); + + for (const auto& [state_formula, state] : formula2state) + { + if (!state_formula.accepts_eword()) + continue; + + aut->new_edge(state, end_state, !alive); + } + + aut->new_edge(end_state, end_state, !alive, acc_mark); + + aut->set_named_prop("state-names", state_names); + + return aut; + } + + formula + partial_derivation(formula f, const bdd var, const bdd_dict_ptr& d, + void* owner) + { + if (f.is_boolean()) + { + auto f_bdd = formula_to_bdd(f, d, owner); + + if (bdd_implies(var, f_bdd)) + return formula::eword(); + + return formula::ff(); + } + + switch (f.kind()) + { + // handled by is_boolean above + case op::ff: + case op::tt: + case op::ap: + SPOT_UNREACHABLE(); + + case op::eword: + return formula::ff(); + + // d(E.F) = { d(E).F } U { c(E).d(F) } + case op::Concat: + { + formula E = f[0]; + formula F = f.all_but(0); + + auto res = + formula::Concat({ partial_derivation(E, var, d, owner), F }); + + if (E.accepts_eword()) + res = formula::OrRat( + { res, partial_derivation(F, var, d, owner) }); + + return res; + } + + // d(E*) = d(E).E* + // d(E[*i..j]) = d(E).E[*(i-1)..(j-1)] + case op::Star: + { + auto min = f.min() == 0 ? 0 : (f.min() - 1); + auto max = f.max() == formula::unbounded() + ? formula::unbounded() + : (f.max() - 1); + + formula d_E = partial_derivation(f[0], var, d, owner); + + return formula::Concat({ d_E, formula::Star(f[0], min, max) }); + } + + // d(E[:*i..j]) = E:E[:*(i-1)..(j-1)] + (eword if i == 0 or c(d(E))) + case op::FStar: + { + formula E = f[0]; + + if (f.min() == 0 && f.max() == 0) + return formula::tt(); + + auto d_E = partial_derivation(E, var, d, owner); + + auto min = f.min() == 0 ? 0 : (f.min() - 1); + auto max = f.max() == formula::unbounded() + ? formula::unbounded() + : (f.max() - 1); + + auto results = std::vector(); + + auto E_i_j_minus = formula::FStar(E, min, max); + results.push_back(formula::Fusion({ d_E, E_i_j_minus })); + + if (d_E.accepts_eword()) + results.push_back(d_E); + + if (f.min() == 0) + results.push_back(formula::eword()); + + return formula::OrRat(std::move(results)); + } + + // d(E && F) = d(E) && d(F) + // d(E + F) = {d(E)} U {d(F)} + case op::AndRat: + case op::OrRat: + { + std::vector subderivations; + for (auto subformula : f) + { + auto subderivation = + partial_derivation(subformula, var, d, owner); + subderivations.push_back(subderivation); + } + return formula::multop(f.kind(), std::move(subderivations)); + } + + // d(E:F) = {d(E):F} U {c(d(E)).d(F)} + case op::Fusion: + { + formula E = f[0]; + formula F = f.all_but(0); + + auto d_E = partial_derivation(E, var, d, owner); + auto res = formula::Fusion({ d_E, F }); + + if (d_E.accepts_eword()) + res = + formula::OrRat({ res, partial_derivation(F, var, d, owner) }); + + return res; + } + + case op::first_match: + { + formula E = f[0]; + auto d_E = partial_derivation(E, var, d, owner); + // if d_E.accepts_eword(), first_match(d_E) will return eword + return formula::first_match(d_E); + } + + default: + std::cerr << "unimplemented kind " + << static_cast(f.kind()) + << std::endl; + SPOT_UNIMPLEMENTED(); + } + return formula::ff(); + } +} diff --git a/spot/tl/derive.hh b/spot/tl/derive.hh new file mode 100644 index 000000000..410a24e37 --- /dev/null +++ b/spot/tl/derive.hh @@ -0,0 +1,43 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2021 Laboratoire de Recherche et Développement de +// l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include + +#include + +#include +#include +#include + +namespace spot +{ + /// \ingroup tl_misc + /// \brief Produce a SERE formula's partial derivative + SPOT_API formula + partial_derivation(formula f, const bdd var, const bdd_dict_ptr& d, + void* owner); + + SPOT_API twa_graph_ptr + derive_automaton(formula f, bool deterministic = true); + + SPOT_API twa_graph_ptr + derive_finite_automaton(formula f, bool deterministic = true); +} From 175012b919f38c89b8275671578537b8acafbda4 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 10 Mar 2022 15:45:50 +0100 Subject: [PATCH 540/606] twaalgos: extract internal sere2dfa --- spot/twaalgos/ltl2tgba_fm.cc | 56 ++++++++++++++++++++++++++++++++++++ spot/twaalgos/ltl2tgba_fm.hh | 3 ++ 2 files changed, 59 insertions(+) diff --git a/spot/twaalgos/ltl2tgba_fm.cc b/spot/twaalgos/ltl2tgba_fm.cc index 09e88acae..a3f0f2aa3 100644 --- a/spot/twaalgos/ltl2tgba_fm.cc +++ b/spot/twaalgos/ltl2tgba_fm.cc @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -2226,4 +2227,59 @@ namespace spot return a; } + twa_graph_ptr + sere_to_tgba(formula f, const bdd_dict_ptr& dict) + { + f = negative_normal_form(f); + + tl_simplifier* s = new tl_simplifier(dict); + twa_graph_ptr a = make_twa_graph(dict); + + translate_dict d(a, s, false, false, false); + ratexp_to_dfa sere2dfa(d); + + auto [dfa, namer, state] = sere2dfa.succ(f); + + // language was empty, build an automaton with one non accepting state + if (dfa == nullptr) + { + auto res = make_twa_graph(dict); + res->set_init_state(res->new_state()); + res->prop_universal(true); + res->prop_complete(false); + res->prop_stutter_invariant(true); + res->prop_terminal(true); + res->prop_state_acc(true); + return res; + } + + auto res = make_twa_graph(dfa, {false, false, true, false, false, false}); + + // HACK: translate_dict registers the atomic propositions in the "final" + // automaton that would be produced by a full translation, not in the + // intermediate automaton we're interested in. We can copy them from the + // resulting automaton. + res->copy_ap_of(a); + + res->prop_state_acc(true); + const auto acc_mark = res->set_buchi(); + + size_t sn = namer->state_to_name.size(); + for (size_t i = 0; i < sn; ++i) + { + formula g = namer->state_to_name[i]; + if (g.accepts_eword()) + { + if (res->get_graph().state_storage(i).succ == 0) + res->new_edge(i, i, bddfalse, acc_mark); + else + { + for (auto& e : res->out(i)) + e.acc = acc_mark; + } + } + } + + return res; + } } diff --git a/spot/twaalgos/ltl2tgba_fm.hh b/spot/twaalgos/ltl2tgba_fm.hh index 717ae6b1b..7dba4aee0 100644 --- a/spot/twaalgos/ltl2tgba_fm.hh +++ b/spot/twaalgos/ltl2tgba_fm.hh @@ -89,4 +89,7 @@ namespace spot bool unambiguous = false, const output_aborter* aborter = nullptr, bool label_with_ltl = false); + + SPOT_API twa_graph_ptr + sere_to_tgba(formula f, const bdd_dict_ptr& dict); } From 3b3ec16b20c343eb00b3053d0ff308f7410cd854 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Mon, 14 Mar 2022 15:17:51 +0100 Subject: [PATCH 541/606] twaalgos: add from_finite * spot/twaalgos/remprop.cc, spot/twaalgos/remprop.hh: add a from_finite function to perform the opposite operation to to_finite --- spot/twaalgos/remprop.cc | 47 ++++++++++++++++++++++++++++++++++++++++ spot/twaalgos/remprop.hh | 3 +++ 2 files changed, 50 insertions(+) diff --git a/spot/twaalgos/remprop.cc b/spot/twaalgos/remprop.cc index eb7c54dd0..092c1f50c 100644 --- a/spot/twaalgos/remprop.cc +++ b/spot/twaalgos/remprop.cc @@ -245,4 +245,51 @@ namespace spot } + twa_graph_ptr from_finite(const_twa_graph_ptr aut, const char* alive) + { + twa_graph_ptr res = + make_twa_graph(aut, + { true, false, true, false, false, false }); + + if (aut->get_named_prop>("state-names")) + res->copy_state_names_from(aut); + auto* names = res->get_named_prop>("state-names"); + + unsigned alive_sink = res->new_state(); + if (names != nullptr) + names->push_back("sink"); + auto acc = res->acc().all_sets(); + auto alive_bdd = bdd_ithvar(res->register_ap(alive)); + res->new_edge(alive_sink, alive_sink, !alive_bdd, acc); + + unsigned ns = res->num_states(); + for (unsigned s = 0; s < ns; ++s) + { + if (s == alive_sink) + continue; + + bool was_acc = res->state_is_accepting(s); + + // erase accepting marks, require alive on non-accepting transition, + // and remove self-loop edges used to mark acceptance + auto i = res->out_iteraser(s); + while (i) + { + if (i->src == i->dst && i->cond == bddfalse) + { + i.erase(); + continue; + } + + i->cond &= alive_bdd; + i->acc = {}; + ++i; + } + + if (was_acc) + res->new_edge(s, alive_sink, !alive_bdd); + } + + return res; + } } diff --git a/spot/twaalgos/remprop.hh b/spot/twaalgos/remprop.hh index ab234fed9..4121d6554 100644 --- a/spot/twaalgos/remprop.hh +++ b/spot/twaalgos/remprop.hh @@ -53,5 +53,8 @@ namespace spot SPOT_API twa_graph_ptr to_finite(const_twa_graph_ptr aut, const char* alive = "alive"); + /// \brief The opposite of the to_finite operation + SPOT_API twa_graph_ptr + from_finite(const_twa_graph_ptr aut, const char* alive = "alive"); } From 2df8e200d8bcd19584bb997aa72a4206a83de485 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Tue, 15 Mar 2022 17:06:05 +0100 Subject: [PATCH 542/606] derive: use from_finite --- spot/tl/derive.cc | 106 +++++----------------------------------------- 1 file changed, 11 insertions(+), 95 deletions(-) diff --git a/spot/tl/derive.cc b/spot/tl/derive.cc index cec4f3bcd..9def9e2eb 100644 --- a/spot/tl/derive.cc +++ b/spot/tl/derive.cc @@ -22,6 +22,7 @@ #include #include #include +#include namespace spot { @@ -140,6 +141,12 @@ namespace spot aut->new_edge(curr_state, curr_state, bddfalse, acc_mark); } + // if we only have an initial state with no transitions, then our language + // is empty + if (aut->num_states() == 1 + && aut->get_graph().state_storage(aut->get_init_state_number()).succ == 0) + return nullptr; + aut->set_named_prop("state-names", state_names); aut->merge_edges(); @@ -150,103 +157,12 @@ namespace spot twa_graph_ptr derive_automaton(formula f, bool deterministic) { - auto bdd_dict = make_bdd_dict(); - auto aut = make_twa_graph(bdd_dict); + auto finite = derive_finite_automaton(f, deterministic); - aut->prop_state_acc(true); - const auto acc_mark = aut->set_buchi(); + if (finite == nullptr) + return nullptr; - auto formula2state = robin_hood::unordered_map(); - - unsigned init_state = aut->new_state(); - aut->set_init_state(init_state); - - formula2state.insert({ f, init_state }); - - auto f_aps = formula_aps(f); - for (auto& ap : f_aps) - aut->register_ap(ap); - bdd all_aps = aut->ap_vars(); - bdd alive = bdd_ithvar(aut->register_ap("alive")); - - auto todo = std::vector>(); - todo.push_back({f, init_state}); - - auto state_names = new std::vector(); - std::ostringstream ss; - ss << f; - state_names->push_back(ss.str()); - - auto find_dst = [&](formula derivative) -> unsigned - { - unsigned dst; - auto it = formula2state.find(derivative); - if (it != formula2state.end()) - { - dst = it->second; - } - else - { - dst = aut->new_state(); - todo.push_back({derivative, dst}); - formula2state.insert({derivative, dst}); - std::ostringstream ss; - ss << derivative; - state_names->push_back(ss.str()); - } - - return dst; - }; - - while (!todo.empty()) - { - auto [curr_f, curr_state] = todo[todo.size() - 1]; - todo.pop_back(); - - for (const bdd one : minterms_of(bddtrue, all_aps)) - { - formula derivative = - partial_derivation(curr_f, one, bdd_dict, aut.get()); - - // no transition possible from this letter - if (derivative.is(op::ff)) - continue; - - // either the formula isn't an OrRat, or if it is we consider it as - // a whole to get a deterministic automaton - if (deterministic || !derivative.is(op::OrRat)) - { - auto dst = find_dst(derivative); - aut->new_edge(curr_state, dst, one & alive); - continue; - } - - // formula is an OrRat and we want a non deterministic automaton, - // so consider each child as a destination - for (const auto& subformula : derivative) - { - auto dst = find_dst(subformula); - aut->new_edge(curr_state, dst, one & alive); - } - } - } - - unsigned end_state = aut->new_state(); - state_names->push_back("end"); - - for (const auto& [state_formula, state] : formula2state) - { - if (!state_formula.accepts_eword()) - continue; - - aut->new_edge(state, end_state, !alive); - } - - aut->new_edge(end_state, end_state, !alive, acc_mark); - - aut->set_named_prop("state-names", state_names); - - return aut; + return from_finite(finite); } formula From 5b3b292b1064beb211a48bbbb1c2de932b7f7266 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 18 Mar 2022 18:05:53 +0100 Subject: [PATCH 543/606] derive: no nullptr handling --- spot/tl/derive.cc | 9 --------- 1 file changed, 9 deletions(-) diff --git a/spot/tl/derive.cc b/spot/tl/derive.cc index 9def9e2eb..699c8634f 100644 --- a/spot/tl/derive.cc +++ b/spot/tl/derive.cc @@ -141,12 +141,6 @@ namespace spot aut->new_edge(curr_state, curr_state, bddfalse, acc_mark); } - // if we only have an initial state with no transitions, then our language - // is empty - if (aut->num_states() == 1 - && aut->get_graph().state_storage(aut->get_init_state_number()).succ == 0) - return nullptr; - aut->set_named_prop("state-names", state_names); aut->merge_edges(); @@ -159,9 +153,6 @@ namespace spot { auto finite = derive_finite_automaton(f, deterministic); - if (finite == nullptr) - return nullptr; - return from_finite(finite); } From eea35cdb31df47e565cbb8a675cc0afa441ad6eb Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 18 Mar 2022 19:27:19 +0100 Subject: [PATCH 544/606] derive: extract AndNLM rewriting --- spot/tl/derive.cc | 55 ++++++++++++++++++++++++++++++++++++ spot/tl/derive.hh | 3 ++ spot/twaalgos/ltl2tgba_fm.cc | 51 ++------------------------------- 3 files changed, 60 insertions(+), 49 deletions(-) diff --git a/spot/tl/derive.cc b/spot/tl/derive.cc index 699c8634f..a24fbac53 100644 --- a/spot/tl/derive.cc +++ b/spot/tl/derive.cc @@ -48,6 +48,61 @@ namespace spot } } + formula + rewrite_and_nlm(formula f) + { + unsigned s = f.size(); + std::vector final; + std::vector non_final; + + for (auto g: f) + if (g.accepts_eword()) + final.emplace_back(g); + else + non_final.emplace_back(g); + + if (non_final.empty()) + // (a* & b*);c = (a*|b*);c + return formula::OrRat(std::move(final)); + if (!final.empty()) + { + // let F_i be final formulae + // N_i be non final formula + // (F_1 & ... & F_n & N_1 & ... & N_m) + // = (F_1 | ... | F_n);[*] && (N_1 & ... & N_m) + // | (F_1 | ... | F_n) && (N_1 & ... & N_m);[*] + formula f = formula::OrRat(std::move(final)); + formula n = formula::AndNLM(std::move(non_final)); + formula t = formula::one_star(); + formula ft = formula::Concat({f, t}); + formula nt = formula::Concat({n, t}); + formula ftn = formula::AndRat({ft, n}); + formula fnt = formula::AndRat({f, nt}); + return formula::OrRat({ftn, fnt}); + } + // No final formula. + // Translate N_1 & N_2 & ... & N_n into + // N_1 && (N_2;[*]) && ... && (N_n;[*]) + // | (N_1;[*]) && N_2 && ... && (N_n;[*]) + // | (N_1;[*]) && (N_2;[*]) && ... && N_n + formula star = formula::one_star(); + std::vector disj; + for (unsigned n = 0; n < s; ++n) + { + std::vector conj; + for (unsigned m = 0; m < s; ++m) + { + formula g = f[m]; + if (n != m) + g = formula::Concat({g, star}); + conj.emplace_back(g); + } + disj.emplace_back(formula::AndRat(std::move(conj))); + } + return formula::OrRat(std::move(disj)); + } + + twa_graph_ptr derive_finite_automaton(formula f, bool deterministic) { diff --git a/spot/tl/derive.hh b/spot/tl/derive.hh index 410a24e37..247f85b59 100644 --- a/spot/tl/derive.hh +++ b/spot/tl/derive.hh @@ -40,4 +40,7 @@ namespace spot SPOT_API twa_graph_ptr derive_finite_automaton(formula f, bool deterministic = true); + + SPOT_API formula + rewrite_and_nlm(formula f); } diff --git a/spot/twaalgos/ltl2tgba_fm.cc b/spot/twaalgos/ltl2tgba_fm.cc index a3f0f2aa3..9c7674b0f 100644 --- a/spot/twaalgos/ltl2tgba_fm.cc +++ b/spot/twaalgos/ltl2tgba_fm.cc @@ -19,6 +19,7 @@ #include "config.h" #include #include +#include #include #include #include @@ -748,55 +749,7 @@ namespace spot SPOT_UNREACHABLE(); case op::AndNLM: { - unsigned s = f.size(); - vec final; - vec non_final; - - for (auto g: f) - if (g.accepts_eword()) - final.emplace_back(g); - else - non_final.emplace_back(g); - - if (non_final.empty()) - // (a* & b*);c = (a*|b*);c - return recurse_and_concat(formula::OrRat(std::move(final))); - if (!final.empty()) - { - // let F_i be final formulae - // N_i be non final formula - // (F_1 & ... & F_n & N_1 & ... & N_m) - // = (F_1 | ... | F_n);[*] && (N_1 & ... & N_m) - // | (F_1 | ... | F_n) && (N_1 & ... & N_m);[*] - formula f = formula::OrRat(std::move(final)); - formula n = formula::AndNLM(std::move(non_final)); - formula t = formula::one_star(); - formula ft = formula::Concat({f, t}); - formula nt = formula::Concat({n, t}); - formula ftn = formula::AndRat({ft, n}); - formula fnt = formula::AndRat({f, nt}); - return recurse_and_concat(formula::OrRat({ftn, fnt})); - } - // No final formula. - // Translate N_1 & N_2 & ... & N_n into - // N_1 && (N_2;[*]) && ... && (N_n;[*]) - // | (N_1;[*]) && N_2 && ... && (N_n;[*]) - // | (N_1;[*]) && (N_2;[*]) && ... && N_n - formula star = formula::one_star(); - vec disj; - for (unsigned n = 0; n < s; ++n) - { - vec conj; - for (unsigned m = 0; m < s; ++m) - { - formula g = f[m]; - if (n != m) - g = formula::Concat({g, star}); - conj.emplace_back(g); - } - disj.emplace_back(formula::AndRat(std::move(conj))); - } - return recurse_and_concat(formula::OrRat(std::move(disj))); + return recurse_and_concat(rewrite_and_nlm(f)); } case op::AndRat: { From 1925910f4a6d4b2d88c76a4d3fb207f84dc24aaa Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 18 Mar 2022 19:27:37 +0100 Subject: [PATCH 545/606] derive: handle AndNLM --- spot/tl/derive.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/spot/tl/derive.cc b/spot/tl/derive.cc index a24fbac53..3180d4815 100644 --- a/spot/tl/derive.cc +++ b/spot/tl/derive.cc @@ -310,6 +310,12 @@ namespace spot return formula::multop(f.kind(), std::move(subderivations)); } + case op::AndNLM: + { + formula rewrite = rewrite_and_nlm(f); + return partial_derivation(rewrite, var, d, owner); + } + // d(E:F) = {d(E):F} U {c(d(E)).d(F)} case op::Fusion: { From a046a4983cf56842b6eb733e7e6847ad8cfdd068 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 30 Mar 2022 21:52:34 +0200 Subject: [PATCH 546/606] derive: use first --- spot/tl/derive.cc | 207 ++++++++++++++++++++++++++++++++++++++++++++++ spot/tl/derive.hh | 6 ++ 2 files changed, 213 insertions(+) diff --git a/spot/tl/derive.cc b/spot/tl/derive.cc index 3180d4815..2b1873ed2 100644 --- a/spot/tl/derive.cc +++ b/spot/tl/derive.cc @@ -28,6 +28,106 @@ namespace spot { namespace { + static std::pair + first(formula f, const bdd_dict_ptr& d, void* owner) + { + if (f.is_boolean()) + { + bdd res = formula_to_bdd(f, d, owner); + return { res, bdd_support(res) }; + } + + switch (f.kind()) + { + // handled by is_boolean above + case op::ff: + case op::tt: + case op::ap: + case op::And: + case op::Or: + SPOT_UNREACHABLE(); + + case op::eword: + return { bddfalse, bddtrue }; + + case op::OrRat: + { + bdd res = bddfalse; + bdd support = bddtrue; + for (auto subformula : f) + { + auto [r, sup] = first(subformula, d, owner); + res |= r; + support &= sup; + } + return { res, support }; + } + + case op::AndRat: + { + bdd res = bddtrue; + bdd support = bddtrue; + for (auto subformula : f) + { + auto [r, sup] = first(subformula, d, owner); + res &= r; + support &= sup; + } + return { res, support }; + } + + case op::AndNLM: + return first(rewrite_and_nlm(f), d, owner); + + case op::Concat: + { + auto [res, support] = first(f[0], d, owner); + + if (f[0].accepts_eword()) + { + auto [r, sup] = first(f.all_but(0), d, owner); + res |= r; + support &= sup; + } + + return { res, support }; + } + + case op::Fusion: + { + auto [res, support] = first(f[0], d, owner); + + // this should be computed only if f[0] recognizes words of size 1 + // or accepts eword ? + auto p = first(f.all_but(0), d, owner); + + return { res, support & p.second }; + } + + case op::Star: + case op::first_match: + return first(f[0], d, owner); + + case op::FStar: + { + auto [res, support] = first(f[0], d, owner); + + if (f.min() == 0) + res = bddtrue; + + return { res, support }; + } + + default: + std::cerr << "unimplemented kind " + << static_cast(f.kind()) + << std::endl; + SPOT_UNIMPLEMENTED(); + } + + return { bddfalse, bddtrue }; + } + static std::vector formula_aps(formula f) { @@ -102,6 +202,105 @@ namespace spot return formula::OrRat(std::move(disj)); } + twa_graph_ptr + derive_finite_automaton_with_first(formula f, bool deterministic) + { + auto bdd_dict = make_bdd_dict(); + auto aut = make_twa_graph(bdd_dict); + + aut->prop_state_acc(true); + const auto acc_mark = aut->set_buchi(); + + auto formula2state = robin_hood::unordered_map(); + + unsigned init_state = aut->new_state(); + aut->set_init_state(init_state); + + formula2state.insert({ f, init_state }); + + auto f_aps = formula_aps(f); + for (auto& ap : f_aps) + aut->register_ap(ap); + + auto todo = std::vector>(); + todo.push_back({f, init_state}); + + auto state_names = new std::vector(); + std::ostringstream ss; + ss << f; + state_names->push_back(ss.str()); + + auto find_dst = [&](formula derivative) -> unsigned + { + unsigned dst; + auto it = formula2state.find(derivative); + if (it != formula2state.end()) + { + dst = it->second; + } + else + { + dst = aut->new_state(); + todo.push_back({derivative, dst}); + formula2state.insert({derivative, dst}); + std::ostringstream ss; + ss << derivative; + state_names->push_back(ss.str()); + } + + return dst; + }; + + while (!todo.empty()) + { + auto [curr_f, curr_state] = todo[todo.size() - 1]; + todo.pop_back(); + + auto curr_acc_mark = curr_f.accepts_eword() + ? acc_mark + : acc_cond::mark_t(); + + auto [firsts, firsts_support] = first(curr_f, bdd_dict, aut.get()); + for (const bdd one : minterms_of(firsts, firsts_support)) + { + formula derivative = + partial_derivation(curr_f, one, bdd_dict, aut.get()); + + // no transition possible from this letter + if (derivative.is(op::ff)) + continue; + + // either the formula isn't an OrRat, or if it is we consider it as + // as a whole to get a deterministic automaton + if (deterministic || !derivative.is(op::OrRat)) + { + auto dst = find_dst(derivative); + aut->new_edge(curr_state, dst, one, curr_acc_mark); + continue; + } + + // formula is an OrRat and we want a non deterministic automaton, + // so consider each child as a destination + for (const auto& subformula : derivative) + { + auto dst = find_dst(subformula); + aut->new_edge(curr_state, dst, one, curr_acc_mark); + } + } + + // if state has no transitions and should be accepting, create + // artificial transition + if (aut->get_graph().state_storage(curr_state).succ == 0 + && curr_f.accepts_eword()) + aut->new_edge(curr_state, curr_state, bddfalse, acc_mark); + } + + aut->set_named_prop("state-names", state_names); + + aut->merge_edges(); + + return aut; + } twa_graph_ptr derive_finite_automaton(formula f, bool deterministic) @@ -203,6 +402,14 @@ namespace spot return aut; } + twa_graph_ptr + derive_automaton_with_first(formula f, bool deterministic) + { + auto finite = derive_finite_automaton_with_first(f, deterministic); + + return from_finite(finite); + } + twa_graph_ptr derive_automaton(formula f, bool deterministic) { diff --git a/spot/tl/derive.hh b/spot/tl/derive.hh index 247f85b59..1947951ed 100644 --- a/spot/tl/derive.hh +++ b/spot/tl/derive.hh @@ -38,9 +38,15 @@ namespace spot SPOT_API twa_graph_ptr derive_automaton(formula f, bool deterministic = true); + SPOT_API twa_graph_ptr + derive_automaton_with_first(formula f, bool deterministic = true); + SPOT_API twa_graph_ptr derive_finite_automaton(formula f, bool deterministic = true); + SPOT_API twa_graph_ptr + derive_finite_automaton_with_first(formula f, bool deterministic = true); + SPOT_API formula rewrite_and_nlm(formula f); } From 00ad02070b0d410b4f4bb858d961c7edadc03b1c Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 7 Jul 2022 16:38:33 +0200 Subject: [PATCH 547/606] graph: filter accepting sinks in univ_dest_mapper --- spot/graph/graph.hh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/spot/graph/graph.hh b/spot/graph/graph.hh index 3b43f751b..16d117ffc 100644 --- a/spot/graph/graph.hh +++ b/spot/graph/graph.hh @@ -557,10 +557,11 @@ namespace spot { std::map, unsigned> uniq_; G& g_; + unsigned acc_sink_; public: - univ_dest_mapper(G& graph) - : g_(graph) + univ_dest_mapper(G& graph, unsigned sink = -1u) + : g_(graph), acc_sink_(sink) { } @@ -570,6 +571,9 @@ namespace spot std::vector tmp(begin, end); std::sort(tmp.begin(), tmp.end()); tmp.erase(std::unique(tmp.begin(), tmp.end()), tmp.end()); + if (acc_sink_ != -1u && tmp.size() > 1) + tmp.erase(std::remove(tmp.begin(), tmp.end(), acc_sink_), + tmp.end()); auto p = uniq_.emplace(tmp, 0); if (p.second) p.first->second = g_.new_univ_dests(tmp.begin(), tmp.end()); From 6ebbb930240e5411c43ec5a0665d52d80235def9 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 7 Jul 2022 16:40:41 +0200 Subject: [PATCH 548/606] twaalgos: filter accepting sinks in oe combiner --- spot/twaalgos/alternation.cc | 21 ++++++++++++++++++--- spot/twaalgos/alternation.hh | 3 ++- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/spot/twaalgos/alternation.cc b/spot/twaalgos/alternation.cc index c7b2a17d5..d5d59a961 100644 --- a/spot/twaalgos/alternation.cc +++ b/spot/twaalgos/alternation.cc @@ -28,8 +28,8 @@ namespace spot { - outedge_combiner::outedge_combiner(const twa_graph_ptr& aut) - : aut_(aut), vars_(bddtrue) + outedge_combiner::outedge_combiner(const twa_graph_ptr& aut, unsigned sink) + : aut_(aut), vars_(bddtrue), acc_sink_(sink) { } @@ -50,6 +50,9 @@ namespace spot bdd out = bddtrue; for (unsigned d: aut_->univ_dests(e.dst)) { + if (d == acc_sink_) + continue; + auto p = state_to_var.emplace(d, 0); if (p.second) { @@ -78,7 +81,17 @@ namespace spot { bdd cond = bdd_exist(cube, vars_); bdd dest = bdd_existcomp(cube, vars_); - while (dest != bddtrue) + + if (dest == bddtrue) + { + // if dest is bddtrue then the accepting sink is the only + // destination for this edge, in that case don't filter it out + assert(acc_sink_ != -1u); + aut_->new_edge(st, acc_sink_, cond); + continue; + } + + do { assert(bdd_low(dest) == bddfalse); auto it = var_to_state.find(bdd_var(dest)); @@ -86,6 +99,8 @@ namespace spot univ_dest.push_back(it->second); dest = bdd_high(dest); } + while (dest != bddtrue); + std::sort(univ_dest.begin(), univ_dest.end()); aut_->new_univ_edge(st, univ_dest.begin(), univ_dest.end(), cond); univ_dest.clear(); diff --git a/spot/twaalgos/alternation.hh b/spot/twaalgos/alternation.hh index a03ddc121..9490272a1 100644 --- a/spot/twaalgos/alternation.hh +++ b/spot/twaalgos/alternation.hh @@ -49,8 +49,9 @@ namespace spot std::map state_to_var; std::map var_to_state; bdd vars_; + unsigned acc_sink_; public: - outedge_combiner(const twa_graph_ptr& aut); + outedge_combiner(const twa_graph_ptr& aut, unsigned sink = -1u); ~outedge_combiner(); bdd operator()(unsigned st); void new_dests(unsigned st, bdd out) const; From e4bfebf36f5ce6b9d44ad1fe03d389d63ebc62de Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Tue, 21 Jun 2022 13:54:32 +0200 Subject: [PATCH 549/606] twaalgos: add LTL to AA translation --- python/spot/impl.i | 2 + spot/twaalgos/Makefile.am | 2 + spot/twaalgos/translate_aa.cc | 320 ++++++++++++++++++++++++++++++++++ spot/twaalgos/translate_aa.hh | 32 ++++ 4 files changed, 356 insertions(+) create mode 100644 spot/twaalgos/translate_aa.cc create mode 100644 spot/twaalgos/translate_aa.hh diff --git a/python/spot/impl.i b/python/spot/impl.i index 09e29f6e9..471b85cbb 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -163,6 +163,7 @@ #include #include #include +#include #include #include #include @@ -790,6 +791,7 @@ def state_is_accepting(self, src) -> "bool": %include %include %include +%include %include %include %include diff --git a/spot/twaalgos/Makefile.am b/spot/twaalgos/Makefile.am index 8e5b929d0..432f1a85d 100644 --- a/spot/twaalgos/Makefile.am +++ b/spot/twaalgos/Makefile.am @@ -99,6 +99,7 @@ twaalgos_HEADERS = \ totgba.hh \ toweak.hh \ translate.hh \ + translate_aa.hh \ word.hh \ zlktree.hh @@ -177,6 +178,7 @@ libtwaalgos_la_SOURCES = \ totgba.cc \ toweak.cc \ translate.cc \ + translate_aa.cc \ word.cc \ zlktree.cc diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc new file mode 100644 index 000000000..0663651de --- /dev/null +++ b/spot/twaalgos/translate_aa.cc @@ -0,0 +1,320 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2013-2018, 2020-2021 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" +#include +#include +#include + +#include + +namespace spot +{ + namespace + { + struct ltl_to_aa_builder + { + ltl_to_aa_builder(twa_graph_ptr aut, unsigned accepting_sink) + : aut_(aut) + , accepting_sink_(accepting_sink) + , uniq_(aut_->get_graph(), accepting_sink) + , oe_(aut_, accepting_sink) + { + } + + twa_graph_ptr aut_; + unsigned accepting_sink_; + internal::univ_dest_mapper uniq_; + outedge_combiner oe_; + + unsigned recurse(formula f) + { + switch (f.kind()) + { + case op::ff: + return aut_->new_state(); + + case op::tt: + { + unsigned init_state = aut_->new_state(); + aut_->new_edge(init_state, accepting_sink_, bddtrue, {}); + return init_state; + } + + case op::ap: + case op::Not: + { + unsigned init_state = aut_->new_state(); + + bdd ap; + if (f.kind() == op::ap) + ap = bdd_ithvar(aut_->register_ap(f.ap_name())); + else + ap = bdd_nithvar(aut_->register_ap(f[0].ap_name())); + + aut_->new_edge(init_state, accepting_sink_, ap, {}); + return init_state; + } + + // FIXME: is this right for LTLf? + case op::strong_X: + case op::X: + { + unsigned sub_init_state = recurse(f[0]); + unsigned new_init_state = aut_->new_state(); + aut_->new_edge(new_init_state, sub_init_state, bddtrue, {}); + return new_init_state; + } + + case op::Or: + { + unsigned init_state = aut_->new_state(); + + for (const auto& sub_formula : f) + { + unsigned sub_init = recurse(sub_formula); + for (auto& e : aut_->out(sub_init)) + { + unsigned dst = e.dst; + if (aut_->is_univ_dest(e.dst)) + { + auto dests = aut_->univ_dests(e); + dst = uniq_.new_univ_dests(dests.begin(), dests.end()); + } + aut_->new_edge(init_state, dst, e.cond, {}); + } + } + + return init_state; + } + + case op::And: + { + unsigned init_state = aut_->new_state(); + + outedge_combiner oe(aut_, accepting_sink_); + bdd comb = bddtrue; + for (const auto& sub_formula : f) + { + unsigned sub_init = recurse(sub_formula); + comb &= oe_(sub_init); + } + oe_.new_dests(init_state, comb); + + return init_state; + } + + case op::U: + case op::W: + { + auto acc = f.kind() == op::U + ? acc_cond::mark_t{0} + : acc_cond::mark_t{}; + + unsigned init_state = aut_->new_state(); + + unsigned lhs_init = recurse(f[0]); + unsigned rhs_init = recurse(f[1]); + + std::vector new_dests; + for (auto& e : aut_->out(lhs_init)) + { + auto dests = aut_->univ_dests(e); + std::copy(dests.begin(), dests.end(), + std::back_inserter(new_dests)); + new_dests.push_back(init_state); + + unsigned dest = uniq_.new_univ_dests(new_dests.begin(), + new_dests.end()); + aut_->new_edge(init_state, dest, e.cond, acc); + + new_dests.clear(); + } + + for (auto& e : aut_->out(rhs_init)) + { + unsigned dst = e.dst; + if (aut_->is_univ_dest(e.dst)) + { + auto dests = aut_->univ_dests(e); + dst = uniq_.new_univ_dests(dests.begin(), dests.end()); + } + aut_->new_edge(init_state, dst, e.cond, {}); + } + + return init_state; + } + + case op::R: + case op::M: + { + auto acc = f.kind() == op::M + ? acc_cond::mark_t{0} + : acc_cond::mark_t{}; + + unsigned init_state = aut_->new_state(); + + unsigned lhs_init = recurse(f[0]); + unsigned rhs_init = recurse(f[1]); + + std::vector new_dests; + for (auto& e : aut_->out(rhs_init)) + { + auto dests = aut_->univ_dests(e); + std::copy(dests.begin(), dests.end(), + std::back_inserter(new_dests)); + new_dests.push_back(init_state); + + unsigned dst = uniq_.new_univ_dests(new_dests.begin(), + new_dests.end()); + aut_->new_edge(init_state, dst, e.cond, acc); + + new_dests.clear(); + } + + std::vector dsts; + for (const auto& lhs_e : aut_->out(lhs_init)) + { + const auto& lhs_dsts = aut_->univ_dests(lhs_e); + std::copy(lhs_dsts.begin(), lhs_dsts.end(), + std::back_inserter(dsts)); + size_t lhs_dest_num = dsts.size(); + + for (const auto& rhs_e : aut_->out(rhs_init)) + { + const auto& rhs_dsts = aut_->univ_dests(rhs_e); + std::copy(rhs_dsts.begin(), rhs_dsts.end(), + std::back_inserter(dsts)); + + bdd cond = lhs_e.cond & rhs_e.cond; + + unsigned dst = uniq_.new_univ_dests(dsts.begin(), + dsts.end()); + aut_->new_edge(init_state, dst, cond, {}); + + // reset to only lhs' current edge destinations + dsts.resize(lhs_dest_num); + } + dsts.clear(); + } + + return init_state; + } + + // F(phi) = tt U phi + case op::F: + { + auto acc = acc_cond::mark_t{0}; + + // if phi is boolean then we can reuse its initial state (otherwise + // we can't because of potential self loops) + if (f[0].is_boolean()) + { + unsigned init_state = recurse(f[0]); + aut_->new_edge(init_state, init_state, bddtrue, acc); + return init_state; + } + + unsigned init_state = aut_->new_state(); + unsigned sub_init = recurse(f[0]); + + aut_->new_edge(init_state, init_state, bddtrue, acc); + + for (auto& e : aut_->out(sub_init)) + aut_->new_edge(init_state, e.dst, e.cond, {}); + + return init_state; + } + + // G phi = ff R phi + case op::G: + { + unsigned init_state = aut_->new_state(); + + unsigned sub_init = recurse(f[0]); + + // translate like R, but only the self loop part; `ff` cancels out + // the product of edges + std::vector new_dests; + for (auto& e : aut_->out(sub_init)) + { + auto dests = aut_->univ_dests(e); + std::copy(dests.begin(), dests.end(), + std::back_inserter(new_dests)); + new_dests.push_back(init_state); + + unsigned dst = uniq_.new_univ_dests(new_dests.begin(), + new_dests.end()); + aut_->new_edge(init_state, dst, e.cond, {}); + + new_dests.clear(); + } + + return init_state; + } + + case op::eword: + case op::Xor: + case op::Implies: + case op::Equiv: + case op::Closure: + case op::NegClosure: + case op::NegClosureMarked: + case op::EConcat: + case op::EConcatMarked: + case op::UConcat: + case op::OrRat: + case op::AndRat: + case op::AndNLM: + case op::Concat: + case op::Fusion: + case op::Star: + case op::FStar: + case op::first_match: + SPOT_UNREACHABLE(); + return -1; + } + + SPOT_UNREACHABLE(); + } + }; + } + + twa_graph_ptr + ltl_to_aa(formula f, bdd_dict_ptr& dict, bool purge_dead_states) + { + SPOT_ASSERT(f.is_ltl_formula()); + f = negative_normal_form(f); + + auto aut = make_twa_graph(dict); + aut->set_co_buchi(); + + unsigned accepting_sink = aut->new_state(); + aut->new_edge(accepting_sink, accepting_sink, bddtrue, {}); + auto builder = ltl_to_aa_builder(aut, accepting_sink); + + unsigned init_state = builder.recurse(f); + aut->set_init_state(init_state); + + if (purge_dead_states) + aut->purge_dead_states(); + + return aut; + } +} diff --git a/spot/twaalgos/translate_aa.hh b/spot/twaalgos/translate_aa.hh new file mode 100644 index 000000000..9a8760072 --- /dev/null +++ b/spot/twaalgos/translate_aa.hh @@ -0,0 +1,32 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2010-2015, 2017, 2019-2020 Laboratoire de +// Recherche et Développement de l'Epita (LRDE). +// Copyright (C) 2003, 2004, 2005, 2006 Laboratoire d'Informatique de +// Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), +// Université Pierre et Marie Curie. +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include +#include + +namespace spot +{ + SPOT_API twa_graph_ptr + ltl_to_aa(formula f, bdd_dict_ptr& dict, bool purge_dead_states = false); +} From 43ed07d283240509ea7a45dda57841542b61f1a6 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 7 Jul 2022 17:57:05 +0200 Subject: [PATCH 550/606] ltl2aa: factorize self-loop creation --- spot/twaalgos/translate_aa.cc | 63 +++++++++++++++-------------------- 1 file changed, 26 insertions(+), 37 deletions(-) diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index 0663651de..531196442 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -43,6 +43,29 @@ namespace spot internal::univ_dest_mapper uniq_; outedge_combiner oe_; + void add_self_loop(twa_graph::edge_storage_t const& e, + unsigned init_state, + acc_cond::mark_t acc) + { + if (e.dst == accepting_sink_) + { + // avoid creating a univ_dests vector if the only dest is an + // accepting sink, which can be simplified, only keeping the self + // loop + aut_->new_edge(init_state, init_state, e.cond, acc); + return; + } + + auto dests = aut_->univ_dests(e); + std::vector new_dests(dests.begin(), dests.end()); + new_dests.push_back(init_state); + + unsigned dst = uniq_.new_univ_dests(new_dests.begin(), + new_dests.end()); + aut_->new_edge(init_state, dst, e.cond, acc); + } + + unsigned recurse(formula f) { switch (f.kind()) @@ -134,18 +157,7 @@ namespace spot std::vector new_dests; for (auto& e : aut_->out(lhs_init)) - { - auto dests = aut_->univ_dests(e); - std::copy(dests.begin(), dests.end(), - std::back_inserter(new_dests)); - new_dests.push_back(init_state); - - unsigned dest = uniq_.new_univ_dests(new_dests.begin(), - new_dests.end()); - aut_->new_edge(init_state, dest, e.cond, acc); - - new_dests.clear(); - } + add_self_loop(e, init_state, acc); for (auto& e : aut_->out(rhs_init)) { @@ -173,20 +185,8 @@ namespace spot unsigned lhs_init = recurse(f[0]); unsigned rhs_init = recurse(f[1]); - std::vector new_dests; for (auto& e : aut_->out(rhs_init)) - { - auto dests = aut_->univ_dests(e); - std::copy(dests.begin(), dests.end(), - std::back_inserter(new_dests)); - new_dests.push_back(init_state); - - unsigned dst = uniq_.new_univ_dests(new_dests.begin(), - new_dests.end()); - aut_->new_edge(init_state, dst, e.cond, acc); - - new_dests.clear(); - } + add_self_loop(e, init_state, acc); std::vector dsts; for (const auto& lhs_e : aut_->out(lhs_init)) @@ -253,18 +253,7 @@ namespace spot // the product of edges std::vector new_dests; for (auto& e : aut_->out(sub_init)) - { - auto dests = aut_->univ_dests(e); - std::copy(dests.begin(), dests.end(), - std::back_inserter(new_dests)); - new_dests.push_back(init_state); - - unsigned dst = uniq_.new_univ_dests(new_dests.begin(), - new_dests.end()); - aut_->new_edge(init_state, dst, e.cond, {}); - - new_dests.clear(); - } + add_self_loop(e, init_state, {}); return init_state; } From ffd60219b5216c572405de97d3843da0ae89c1c6 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 13 Jul 2022 16:11:54 +0200 Subject: [PATCH 551/606] psl not working --- spot/twaalgos/translate_aa.cc | 78 ++++++++++++++++++++++++++++++++++- 1 file changed, 76 insertions(+), 2 deletions(-) diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index 531196442..c68b30268 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -20,6 +20,7 @@ #include "config.h" #include #include +#include #include #include @@ -258,6 +259,81 @@ namespace spot return init_state; } + case op::UConcat: + { + // FIXME: combine out edges with rhs ! + //unsigned rhs_init = recurse(f[1]); + twa_graph_ptr sere_aut = derive_finite_automaton_with_first(f[0]); + + const auto& dict = sere_aut->get_dict(); + + std::map old_to_new; + std::map state_to_var; + std::map var_to_state; + bdd vars = bddtrue; + bdd aps = sere_aut->ap_vars(); + std::vector univ_dest; + + // registers a state in various maps and returns the index of the + // anonymous bdd var representing that state + auto register_state = [&](unsigned st) -> int { + auto p = state_to_var.emplace(st, 0); + if (p.second) + { + int v = dict->register_anonymous_variables(1, this); + p.first->second = v; + var_to_state.emplace(v, st); + + unsigned new_st = aut_->new_state(); + old_to_new.emplace(st, new_st); + + vars &= bdd_ithvar(v); + } + + return p.first->second; + }; + + unsigned ns = sere_aut->num_states(); + for (unsigned st = 0; st < ns; ++st) + { + register_state(st); + + bdd sig = bddfalse; + for (const auto& e : sere_aut->out(st)) + { + int st_bddi = register_state(e.dst); + sig |= e.cond & bdd_ithvar(st_bddi); + } + + for (bdd cond : minterms_of(bddtrue, aps)) + { + bdd dest = bdd_appex(sig, cond, bddop_and, aps); + while (dest != bddtrue) + { + assert(bdd_low(dest) == bddfalse); + auto it = var_to_state.find(bdd_var(dest)); + assert(it != var_to_state.end()); + auto it2 = old_to_new.find(it->second); + assert(it2 != old_to_new.end()); + univ_dest.push_back(it2->second); + dest = bdd_high(dest); + } + + auto it = old_to_new.find(st); + assert(it != old_to_new.end()); + unsigned src = it->second; + unsigned dst = uniq_.new_univ_dests(univ_dest.begin(), + univ_dest.end()); + aut_->new_edge(src, dst, cond, {}); + } + } + + auto it = old_to_new.find(sere_aut->get_init_state_number()); + assert(it != old_to_new.end()); + + return it->second; + } + case op::eword: case op::Xor: case op::Implies: @@ -267,7 +343,6 @@ namespace spot case op::NegClosureMarked: case op::EConcat: case op::EConcatMarked: - case op::UConcat: case op::OrRat: case op::AndRat: case op::AndNLM: @@ -288,7 +363,6 @@ namespace spot twa_graph_ptr ltl_to_aa(formula f, bdd_dict_ptr& dict, bool purge_dead_states) { - SPOT_ASSERT(f.is_ltl_formula()); f = negative_normal_form(f); auto aut = make_twa_graph(dict); From 7b376a212c6e0e3762ff1ea50568f0ad4533b091 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 31 Aug 2022 10:59:39 +0200 Subject: [PATCH 552/606] Add ltl2aa binary to tests/core --- tests/Makefile.am | 2 ++ tests/core/.gitignore | 1 + tests/core/ltl2aa.cc | 22 ++++++++++++++++++++++ 3 files changed, 25 insertions(+) create mode 100644 tests/core/ltl2aa.cc diff --git a/tests/Makefile.am b/tests/Makefile.am index 6f7abf994..9bf0cef73 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -76,6 +76,7 @@ check_PROGRAMS = \ core/intvcomp \ core/intvcmp2 \ core/kripkecat \ + core/ltl2aa \ core/ltl2dot \ core/ltl2text \ core/ltlrel \ @@ -125,6 +126,7 @@ core_cube_SOURCES = core/cube.cc core_equals_SOURCES = core/equalsf.cc core_kind_SOURCES = core/kind.cc core_length_SOURCES = core/length.cc +core_ltl2aa_SOURCES = core/ltl2aa.cc core_ltl2dot_SOURCES = core/readltl.cc core_ltl2dot_CPPFLAGS = $(AM_CPPFLAGS) -DDOTTY core_ltl2text_SOURCES = core/readltl.cc diff --git a/tests/core/.gitignore b/tests/core/.gitignore index d4ebfae45..fdee02715 100644 --- a/tests/core/.gitignore +++ b/tests/core/.gitignore @@ -33,6 +33,7 @@ kripkecat length .libs ikwiad +ltl2aa ltl2dot ltl2text ltlmagic diff --git a/tests/core/ltl2aa.cc b/tests/core/ltl2aa.cc new file mode 100644 index 000000000..82b4b9c7e --- /dev/null +++ b/tests/core/ltl2aa.cc @@ -0,0 +1,22 @@ +#include "config.h" + +#include + +#include +#include +#include +#include + +int main(int argc, char * argv[]) +{ + if (argc < 3) + return 1; + + spot::formula f = spot::parse_formula(argv[1]); + spot::bdd_dict_ptr d = spot::make_bdd_dict(); + auto aut = ltl_to_aa(f, d, true); + + std::ofstream out(argv[2]); + spot::print_hoa(out, aut); + return 0; +} From 8e8e44c5f9222882b5f0e3aa9f2a9c6a9ecd50fc Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Tue, 9 Aug 2022 12:24:37 +0200 Subject: [PATCH 553/606] ltl2aa: fix R & M operators handling --- spot/twaalgos/translate_aa.cc | 28 +++------------------------- 1 file changed, 3 insertions(+), 25 deletions(-) diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index c68b30268..490ffd7a7 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -189,31 +189,9 @@ namespace spot for (auto& e : aut_->out(rhs_init)) add_self_loop(e, init_state, acc); - std::vector dsts; - for (const auto& lhs_e : aut_->out(lhs_init)) - { - const auto& lhs_dsts = aut_->univ_dests(lhs_e); - std::copy(lhs_dsts.begin(), lhs_dsts.end(), - std::back_inserter(dsts)); - size_t lhs_dest_num = dsts.size(); - - for (const auto& rhs_e : aut_->out(rhs_init)) - { - const auto& rhs_dsts = aut_->univ_dests(rhs_e); - std::copy(rhs_dsts.begin(), rhs_dsts.end(), - std::back_inserter(dsts)); - - bdd cond = lhs_e.cond & rhs_e.cond; - - unsigned dst = uniq_.new_univ_dests(dsts.begin(), - dsts.end()); - aut_->new_edge(init_state, dst, cond, {}); - - // reset to only lhs' current edge destinations - dsts.resize(lhs_dest_num); - } - dsts.clear(); - } + bdd comb = oe_(lhs_init); + comb &= oe_(rhs_init); + oe_.new_dests(init_state, comb); return init_state; } From 0c76e6dd211476ef79e5c0146719bfeb4ba76b03 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 31 Aug 2022 13:59:05 +0200 Subject: [PATCH 554/606] ltl2aa: fix bdd manipulation in UConcat --- spot/twaalgos/translate_aa.cc | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index 490ffd7a7..b15dfc279 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -286,22 +286,26 @@ namespace spot for (bdd cond : minterms_of(bddtrue, aps)) { bdd dest = bdd_appex(sig, cond, bddop_and, aps); - while (dest != bddtrue) + while (dest != bddfalse) { - assert(bdd_low(dest) == bddfalse); + assert(bdd_high(dest) == bddtrue); auto it = var_to_state.find(bdd_var(dest)); assert(it != var_to_state.end()); auto it2 = old_to_new.find(it->second); assert(it2 != old_to_new.end()); univ_dest.push_back(it2->second); - dest = bdd_high(dest); + dest = bdd_low(dest); } auto it = old_to_new.find(st); assert(it != old_to_new.end()); unsigned src = it->second; - unsigned dst = uniq_.new_univ_dests(univ_dest.begin(), - univ_dest.end()); + + unsigned dst = univ_dest.empty() + ? accepting_sink_ + : (uniq_.new_univ_dests(univ_dest.begin(), + univ_dest.end())); + aut_->new_edge(src, dst, cond, {}); } } From 4153ce0655ba34eccb94a8374b442382fdf5e105 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Tue, 6 Sep 2022 16:07:28 +0200 Subject: [PATCH 555/606] ltl2aa: share dict between sere and final aut --- spot/tl/derive.cc | 10 ++++++---- spot/tl/derive.hh | 6 ++++-- spot/twaalgos/translate_aa.cc | 10 ++++++++-- 3 files changed, 18 insertions(+), 8 deletions(-) diff --git a/spot/tl/derive.cc b/spot/tl/derive.cc index 2b1873ed2..c6c328786 100644 --- a/spot/tl/derive.cc +++ b/spot/tl/derive.cc @@ -203,9 +203,9 @@ namespace spot } twa_graph_ptr - derive_finite_automaton_with_first(formula f, bool deterministic) + derive_finite_automaton_with_first(formula f, bdd_dict_ptr bdd_dict, + bool deterministic) { - auto bdd_dict = make_bdd_dict(); auto aut = make_twa_graph(bdd_dict); aut->prop_state_acc(true); @@ -403,9 +403,11 @@ namespace spot } twa_graph_ptr - derive_automaton_with_first(formula f, bool deterministic) + derive_automaton_with_first(formula f, bdd_dict_ptr bdd_dict, + bool deterministic) { - auto finite = derive_finite_automaton_with_first(f, deterministic); + auto finite = derive_finite_automaton_with_first(f, bdd_dict, + deterministic); return from_finite(finite); } diff --git a/spot/tl/derive.hh b/spot/tl/derive.hh index 1947951ed..9e094c7b6 100644 --- a/spot/tl/derive.hh +++ b/spot/tl/derive.hh @@ -39,13 +39,15 @@ namespace spot derive_automaton(formula f, bool deterministic = true); SPOT_API twa_graph_ptr - derive_automaton_with_first(formula f, bool deterministic = true); + derive_automaton_with_first(formula f, bdd_dict_ptr bdd_dict, + bool deterministic = true); SPOT_API twa_graph_ptr derive_finite_automaton(formula f, bool deterministic = true); SPOT_API twa_graph_ptr - derive_finite_automaton_with_first(formula f, bool deterministic = true); + derive_finite_automaton_with_first(formula f, bdd_dict_ptr bdd_dict, + bool deterministic = true); SPOT_API formula rewrite_and_nlm(formula f); diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index b15dfc279..0a29a0671 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -39,6 +39,11 @@ namespace spot { } + ~ltl_to_aa_builder() + { + aut_->get_dict()->unregister_all_my_variables(this); + } + twa_graph_ptr aut_; unsigned accepting_sink_; internal::univ_dest_mapper uniq_; @@ -241,9 +246,9 @@ namespace spot { // FIXME: combine out edges with rhs ! //unsigned rhs_init = recurse(f[1]); - twa_graph_ptr sere_aut = derive_finite_automaton_with_first(f[0]); + const auto& dict = aut_->get_dict(); + twa_graph_ptr sere_aut = derive_finite_automaton_with_first(f[0], dict); - const auto& dict = sere_aut->get_dict(); std::map old_to_new; std::map state_to_var; @@ -271,6 +276,7 @@ namespace spot return p.first->second; }; + aut_->copy_ap_of(sere_aut); unsigned ns = sere_aut->num_states(); for (unsigned st = 0; st < ns; ++st) { From 58965475fb32c5415a5fc009ac739b3c18b606de Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Tue, 6 Sep 2022 16:31:17 +0200 Subject: [PATCH 556/606] ltl2aa: implem closure --- spot/twaalgos/translate_aa.cc | 41 ++++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index 0a29a0671..5330c787d 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -71,6 +71,35 @@ namespace spot aut_->new_edge(init_state, dst, e.cond, acc); } + unsigned copy_sere_aut_to_res(twa_graph_ptr sere_aut) + { + std::map old_to_new; + auto register_state = [&](unsigned st) -> unsigned { + auto p = old_to_new.emplace(st, 0); + if (p.second) + { + unsigned new_st = aut_->new_state(); + p.first->second = new_st; + } + return p.first->second; + }; + + unsigned ns = sere_aut->num_states(); + for (unsigned st = 0; st < ns; ++st) + { + unsigned new_st = register_state(st); + for (const auto& e : sere_aut->out(st)) + { + if (sere_aut->state_is_accepting(e.dst)) + aut_->new_edge(new_st, accepting_sink_, e.cond); + else + aut_->new_edge(new_st, register_state(e.dst), e.cond); + } + } + + return register_state(sere_aut->get_init_state_number()); + } + unsigned recurse(formula f) { @@ -322,13 +351,19 @@ namespace spot return it->second; } + case op::Closure: + { + twa_graph_ptr sere_aut = + derive_finite_automaton_with_first(f[0], aut_->get_dict()); + return copy_sere_aut_to_res(sere_aut); + } + + case op::NegClosure: + case op::NegClosureMarked: case op::eword: case op::Xor: case op::Implies: case op::Equiv: - case op::Closure: - case op::NegClosure: - case op::NegClosureMarked: case op::EConcat: case op::EConcatMarked: case op::OrRat: From 93c50e1610158a3ace711b58929197a53fc7dc7a Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Tue, 6 Sep 2022 16:31:50 +0200 Subject: [PATCH 557/606] ltl2aa: place new state in var_to_state map --- spot/twaalgos/translate_aa.cc | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index 5330c787d..c82b81564 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -294,10 +294,10 @@ namespace spot { int v = dict->register_anonymous_variables(1, this); p.first->second = v; - var_to_state.emplace(v, st); unsigned new_st = aut_->new_state(); old_to_new.emplace(st, new_st); + var_to_state.emplace(v, new_st); vars &= bdd_ithvar(v); } @@ -326,9 +326,7 @@ namespace spot assert(bdd_high(dest) == bddtrue); auto it = var_to_state.find(bdd_var(dest)); assert(it != var_to_state.end()); - auto it2 = old_to_new.find(it->second); - assert(it2 != old_to_new.end()); - univ_dest.push_back(it2->second); + univ_dest.push_back(it->second); dest = bdd_low(dest); } From eca0bd459036a13a88ab7b4a594d5ef5f0a5fdb6 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 16 Sep 2022 03:40:22 +0200 Subject: [PATCH 558/606] ltl2aa: fix two bugs in SERE aut merge --- spot/twaalgos/translate_aa.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index c82b81564..daf9126cb 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -73,6 +73,7 @@ namespace spot unsigned copy_sere_aut_to_res(twa_graph_ptr sere_aut) { + aut_->copy_ap_of(sere_aut); std::map old_to_new; auto register_state = [&](unsigned st) -> unsigned { auto p = old_to_new.emplace(st, 0); @@ -340,6 +341,7 @@ namespace spot univ_dest.end())); aut_->new_edge(src, dst, cond, {}); + univ_dest.clear(); } } From 0957c11c9429f34e9558c03b65db6b3ba1f35e12 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 16 Sep 2022 03:41:30 +0200 Subject: [PATCH 559/606] ltl2aa: finish SERE aut merging with rhs outedges --- spot/twaalgos/alternation.cc | 21 +++++++++++++++++++-- spot/twaalgos/alternation.hh | 2 +- spot/twaalgos/translate_aa.cc | 20 ++++++++++++++++++-- 3 files changed, 38 insertions(+), 5 deletions(-) diff --git a/spot/twaalgos/alternation.cc b/spot/twaalgos/alternation.cc index d5d59a961..03b8d5c2a 100644 --- a/spot/twaalgos/alternation.cc +++ b/spot/twaalgos/alternation.cc @@ -38,7 +38,7 @@ namespace spot aut_->get_dict()->unregister_all_my_variables(this); } - bdd outedge_combiner::operator()(unsigned st) + bdd outedge_combiner::operator()(unsigned st, const std::vector& dst_filter) { const auto& dict = aut_->get_dict(); bdd res = bddtrue; @@ -47,6 +47,21 @@ namespace spot bdd res2 = bddfalse; for (auto& e: aut_->out(d1)) { + // handle edge filtering + if (!dst_filter.empty()) + { + // if any edge destination is an accepting state in the SERE + // automaton, handle the edge, otherwise skip it + auto univ_dests = aut_->univ_dests(e.dst); + if (std::all_of(univ_dests.begin(), univ_dests.end(), + [&](unsigned dst) + { + return std::find(dst_filter.begin(), dst_filter.end(), dst) + == dst_filter.end(); + })) + continue; + } + bdd out = bddtrue; for (unsigned d: aut_->univ_dests(e.dst)) { @@ -65,7 +80,9 @@ namespace spot } res2 |= e.cond & out; } - res &= res2; + + if (res2 != bddfalse) + res &= res2; } return res; } diff --git a/spot/twaalgos/alternation.hh b/spot/twaalgos/alternation.hh index 9490272a1..4949006f2 100644 --- a/spot/twaalgos/alternation.hh +++ b/spot/twaalgos/alternation.hh @@ -53,7 +53,7 @@ namespace spot public: outedge_combiner(const twa_graph_ptr& aut, unsigned sink = -1u); ~outedge_combiner(); - bdd operator()(unsigned st); + bdd operator()(unsigned st, const std::vector& dst_filter = std::vector()); void new_dests(unsigned st, bdd out) const; }; diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index daf9126cb..11b783691 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -274,8 +274,7 @@ namespace spot case op::UConcat: { - // FIXME: combine out edges with rhs ! - //unsigned rhs_init = recurse(f[1]); + unsigned rhs_init = recurse(f[1]); const auto& dict = aut_->get_dict(); twa_graph_ptr sere_aut = derive_finite_automaton_with_first(f[0], dict); @@ -286,6 +285,7 @@ namespace spot bdd vars = bddtrue; bdd aps = sere_aut->ap_vars(); std::vector univ_dest; + std::vector acc_states; // registers a state in various maps and returns the index of the // anonymous bdd var representing that state @@ -300,6 +300,9 @@ namespace spot old_to_new.emplace(st, new_st); var_to_state.emplace(v, new_st); + if (sere_aut->state_is_accepting(st)) + acc_states.push_back(new_st); + vars &= bdd_ithvar(v); } @@ -345,9 +348,22 @@ namespace spot } } + for (unsigned st = 0; st < ns; ++st) + { + auto it = old_to_new.find(st); + assert(it != old_to_new.end()); + unsigned new_st = it->second; + + bdd comb = bddtrue; + comb &= oe_(new_st, acc_states); + comb &= oe_(rhs_init); + oe_.new_dests(new_st, comb); + } + auto it = old_to_new.find(sere_aut->get_init_state_number()); assert(it != old_to_new.end()); + //aut_->merge_edges(); return it->second; } From dec854ee079763fbd25f64453ac54795846a4be3 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 16 Sep 2022 15:48:07 +0200 Subject: [PATCH 560/606] ltl2aa: finalize UConcat --- spot/twaalgos/alternation.cc | 6 +++++- spot/twaalgos/alternation.hh | 3 ++- spot/twaalgos/translate_aa.cc | 11 +++++++---- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/spot/twaalgos/alternation.cc b/spot/twaalgos/alternation.cc index 03b8d5c2a..205979f92 100644 --- a/spot/twaalgos/alternation.cc +++ b/spot/twaalgos/alternation.cc @@ -38,7 +38,8 @@ namespace spot aut_->get_dict()->unregister_all_my_variables(this); } - bdd outedge_combiner::operator()(unsigned st, const std::vector& dst_filter) + bdd outedge_combiner::operator()(unsigned st, const std::vector& dst_filter, + bool remove_original_edges) { const auto& dict = aut_->get_dict(); bdd res = bddtrue; @@ -79,6 +80,9 @@ namespace spot out &= bdd_ithvar(p.first->second); } res2 |= e.cond & out; + + if (remove_original_edges) + e.cond = bddfalse; } if (res2 != bddfalse) diff --git a/spot/twaalgos/alternation.hh b/spot/twaalgos/alternation.hh index 4949006f2..8d1027e8b 100644 --- a/spot/twaalgos/alternation.hh +++ b/spot/twaalgos/alternation.hh @@ -53,7 +53,8 @@ namespace spot public: outedge_combiner(const twa_graph_ptr& aut, unsigned sink = -1u); ~outedge_combiner(); - bdd operator()(unsigned st, const std::vector& dst_filter = std::vector()); + bdd operator()(unsigned st, const std::vector& dst_filter = std::vector(), + bool remove_original_edges = false); void new_dests(unsigned st, bdd out) const; }; diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index 11b783691..bd1a1d3de 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -355,15 +355,18 @@ namespace spot unsigned new_st = it->second; bdd comb = bddtrue; - comb &= oe_(new_st, acc_states); - comb &= oe_(rhs_init); - oe_.new_dests(new_st, comb); + comb &= oe_(new_st, acc_states, true); + if (comb != bddtrue) + { + comb &= oe_(rhs_init); + oe_.new_dests(new_st, comb); + } } auto it = old_to_new.find(sere_aut->get_init_state_number()); assert(it != old_to_new.end()); - //aut_->merge_edges(); + aut_->merge_edges(); return it->second; } From e5d7ba9e22afb9b412e036a7a43f495552481fcc Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 16 Sep 2022 15:49:56 +0200 Subject: [PATCH 561/606] ltl2aa: comment --- spot/twaalgos/translate_aa.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index bd1a1d3de..1fd6e03df 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -285,6 +285,7 @@ namespace spot bdd vars = bddtrue; bdd aps = sere_aut->ap_vars(); std::vector univ_dest; + // TODO: this should be a std::vector ! std::vector acc_states; // registers a state in various maps and returns the index of the From 465b135f44839b624648cbd355f14231051ad2e4 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Tue, 20 Sep 2022 22:42:40 +0200 Subject: [PATCH 562/606] ltl2aa: implement EConcat --- spot/twaalgos/translate_aa.cc | 69 +++++++++++++++++++++++++++++++---- 1 file changed, 61 insertions(+), 8 deletions(-) diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index 1fd6e03df..d4128ed4f 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -71,34 +71,43 @@ namespace spot aut_->new_edge(init_state, dst, e.cond, acc); } - unsigned copy_sere_aut_to_res(twa_graph_ptr sere_aut) + unsigned copy_sere_aut_to_res(twa_graph_ptr sere_aut, + std::map& old_to_new, + std::vector* acc_states = nullptr, + bool use_accepting_sink = true) { + unsigned ns = sere_aut->num_states(); + + // TODO: create all new states at once, keeping an initial offset (the + // number of states already present in aut_) aut_->copy_ap_of(sere_aut); - std::map old_to_new; auto register_state = [&](unsigned st) -> unsigned { auto p = old_to_new.emplace(st, 0); if (p.second) { unsigned new_st = aut_->new_state(); p.first->second = new_st; + if (acc_states != nullptr && sere_aut->state_is_accepting(st)) + acc_states->push_back(new_st); } return p.first->second; }; - unsigned ns = sere_aut->num_states(); for (unsigned st = 0; st < ns; ++st) { unsigned new_st = register_state(st); for (const auto& e : sere_aut->out(st)) { - if (sere_aut->state_is_accepting(e.dst)) + if (use_accepting_sink && sere_aut->state_is_accepting(e.dst)) aut_->new_edge(new_st, accepting_sink_, e.cond); else aut_->new_edge(new_st, register_state(e.dst), e.cond); } } - return register_state(sere_aut->get_init_state_number()); + auto it = old_to_new.find(sere_aut->get_init_state_number()); + assert(it != old_to_new.end()); + return it->second; } @@ -272,6 +281,47 @@ namespace spot return init_state; } + case op::EConcat: + { + unsigned rhs_init = recurse(f[1]); + const auto& dict = aut_->get_dict(); + twa_graph_ptr sere_aut = derive_finite_automaton_with_first(f[0], dict); + + // TODO: this should be a std::vector ! + std::vector acc_states; + std::map old_to_new; + copy_sere_aut_to_res(sere_aut, old_to_new, &acc_states, false); + + std::vector acc_edges; + unsigned ns = sere_aut->num_states(); + for (unsigned st = 0; st < ns; ++st) + { + auto it = old_to_new.find(st); + assert(it != old_to_new.end()); + unsigned new_st = it->second; + + for (auto& e : aut_->out(new_st)) + { + e.acc = acc_cond::mark_t{0}; + if (std::find(acc_states.begin(), acc_states.end(), e.dst) + != acc_states.end()) + acc_edges.push_back(aut_->edge_number(e)); + } + } + + for (unsigned i : acc_edges) + { + auto& e1 = aut_->edge_storage(i); + for (const auto& e2 : aut_->out(rhs_init)) + aut_->new_edge(e1.src, e2.dst, e1.cond & e2.cond); + } + + auto it = old_to_new.find(sere_aut->get_init_state_number()); + assert(it != old_to_new.end()); + + return it->second; + } + case op::UConcat: { unsigned rhs_init = recurse(f[1]); @@ -375,7 +425,8 @@ namespace spot { twa_graph_ptr sere_aut = derive_finite_automaton_with_first(f[0], aut_->get_dict()); - return copy_sere_aut_to_res(sere_aut); + std::map old_to_new; + return copy_sere_aut_to_res(sere_aut, old_to_new); } case op::NegClosure: @@ -384,7 +435,6 @@ namespace spot case op::Xor: case op::Implies: case op::Equiv: - case op::EConcat: case op::EConcatMarked: case op::OrRat: case op::AndRat: @@ -419,7 +469,10 @@ namespace spot aut->set_init_state(init_state); if (purge_dead_states) - aut->purge_dead_states(); + { + aut->purge_dead_states(); + aut->merge_edges(); + } return aut; } From 07a283498ff985147f9a66797ffd809cafe63451 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 30 Sep 2022 01:25:45 +0200 Subject: [PATCH 563/606] alternation: fix bug introduced in oe_combiner turns out sometimes we want to account for bddfalse --- spot/twaalgos/alternation.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/spot/twaalgos/alternation.cc b/spot/twaalgos/alternation.cc index 205979f92..fb95fd6f3 100644 --- a/spot/twaalgos/alternation.cc +++ b/spot/twaalgos/alternation.cc @@ -85,8 +85,7 @@ namespace spot e.cond = bddfalse; } - if (res2 != bddfalse) - res &= res2; + res &= res2; } return res; } From 7b936819cc1bb7847dda4d61cb3b8ef77b550b68 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 30 Sep 2022 01:32:01 +0200 Subject: [PATCH 564/606] ltl2aa: handle edge case in UConcat If SERE recognizes false, then combined with UConcat the property is always true. --- spot/twaalgos/translate_aa.cc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index d4128ed4f..c18570d41 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -328,6 +328,13 @@ namespace spot const auto& dict = aut_->get_dict(); twa_graph_ptr sere_aut = derive_finite_automaton_with_first(f[0], dict); + // DFA recognizes the empty language, so {0} []-> rhs is always true + unsigned ns = sere_aut->num_states(); + bool has_accepting_state = false; + for (unsigned st = 0; st < ns && !has_accepting_state; ++st) + has_accepting_state = sere_aut->state_is_accepting(st); + if (!has_accepting_state) + return accepting_sink_; std::map old_to_new; std::map state_to_var; @@ -361,7 +368,6 @@ namespace spot }; aut_->copy_ap_of(sere_aut); - unsigned ns = sere_aut->num_states(); for (unsigned st = 0; st < ns; ++st) { register_state(st); From e80c98751d679d736520a1fe0af17c7e80d97782 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 3 Nov 2022 06:58:21 +0100 Subject: [PATCH 565/606] sere_to_tgba: produce state-names --- spot/twaalgos/ltl2tgba_fm.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/spot/twaalgos/ltl2tgba_fm.cc b/spot/twaalgos/ltl2tgba_fm.cc index 9c7674b0f..838db28be 100644 --- a/spot/twaalgos/ltl2tgba_fm.cc +++ b/spot/twaalgos/ltl2tgba_fm.cc @@ -2218,9 +2218,11 @@ namespace spot const auto acc_mark = res->set_buchi(); size_t sn = namer->state_to_name.size(); + auto names = new std::vector(sn); for (size_t i = 0; i < sn; ++i) { formula g = namer->state_to_name[i]; + (*names)[i] = str_psl(g); if (g.accepts_eword()) { if (res->get_graph().state_storage(i).succ == 0) @@ -2233,6 +2235,8 @@ namespace spot } } + res->set_named_prop("state-names", names); + return res; } } From 89543e6a73881efc5e4cfdd9dbec1bed13af603b Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 10 Nov 2022 07:18:29 +0100 Subject: [PATCH 566/606] derive: option for some optimisations --- spot/tl/derive.cc | 17 ++++++++++++++--- spot/tl/derive.hh | 10 ++++++++-- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/spot/tl/derive.cc b/spot/tl/derive.cc index c6c328786..f55998660 100644 --- a/spot/tl/derive.cc +++ b/spot/tl/derive.cc @@ -204,7 +204,7 @@ namespace spot twa_graph_ptr derive_finite_automaton_with_first(formula f, bdd_dict_ptr bdd_dict, - bool deterministic) + bool deterministic, derive_opts options) { auto aut = make_twa_graph(bdd_dict); @@ -264,7 +264,7 @@ namespace spot for (const bdd one : minterms_of(firsts, firsts_support)) { formula derivative = - partial_derivation(curr_f, one, bdd_dict, aut.get()); + partial_derivation(curr_f, one, bdd_dict, aut.get(), options); // no transition possible from this letter if (derivative.is(op::ff)) @@ -422,7 +422,7 @@ namespace spot formula partial_derivation(formula f, const bdd var, const bdd_dict_ptr& d, - void* owner) + void* owner, derive_opts options) { if (f.is_boolean()) { @@ -472,6 +472,17 @@ namespace spot formula d_E = partial_derivation(f[0], var, d, owner); + if (options.concat_star_distribute && !f[0].is_finite() && d_E.is(op::OrRat)) + { + std::vector distributed; + for (auto g : d_E) + { + distributed.push_back(formula::Concat({g, formula::Star(f[0], min, max)})); + } + + return formula::OrRat(distributed); + } + return formula::Concat({ d_E, formula::Star(f[0], min, max) }); } diff --git a/spot/tl/derive.hh b/spot/tl/derive.hh index 9e094c7b6..993db2ed2 100644 --- a/spot/tl/derive.hh +++ b/spot/tl/derive.hh @@ -29,11 +29,17 @@ namespace spot { + + struct derive_opts + { + bool concat_star_distribute = true; + }; + /// \ingroup tl_misc /// \brief Produce a SERE formula's partial derivative SPOT_API formula partial_derivation(formula f, const bdd var, const bdd_dict_ptr& d, - void* owner); + void* owner, derive_opts options = {}); SPOT_API twa_graph_ptr derive_automaton(formula f, bool deterministic = true); @@ -47,7 +53,7 @@ namespace spot SPOT_API twa_graph_ptr derive_finite_automaton_with_first(formula f, bdd_dict_ptr bdd_dict, - bool deterministic = true); + bool deterministic = true, derive_opts options = {}); SPOT_API formula rewrite_and_nlm(formula f); From 0fdd3c31f4b7b6840947e9f73f207e640b78eed3 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Mon, 21 Nov 2022 10:37:14 +0100 Subject: [PATCH 567/606] derive: add options to control distribution --- spot/tl/derive.cc | 38 ++++++++++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/spot/tl/derive.cc b/spot/tl/derive.cc index f55998660..5e8526eec 100644 --- a/spot/tl/derive.cc +++ b/spot/tl/derive.cc @@ -451,12 +451,30 @@ namespace spot formula E = f[0]; formula F = f.all_but(0); - auto res = - formula::Concat({ partial_derivation(E, var, d, owner), F }); + formula d_E = partial_derivation(E, var, d, owner, options); + + formula res; + + if (options.concat_star_distribute && d_E.is(op::OrRat)) + { + std::vector distributed; + for (auto g : d_E) + { + distributed.push_back(formula::Concat({g, F})); + } + + res = formula::OrRat(distributed); + } + else + { + res = + formula::Concat({ partial_derivation(E, var, d, owner, options), F }); + } + if (E.accepts_eword()) res = formula::OrRat( - { res, partial_derivation(F, var, d, owner) }); + { res, partial_derivation(F, var, d, owner, options) }); return res; } @@ -470,7 +488,7 @@ namespace spot ? formula::unbounded() : (f.max() - 1); - formula d_E = partial_derivation(f[0], var, d, owner); + formula d_E = partial_derivation(f[0], var, d, owner, options); if (options.concat_star_distribute && !f[0].is_finite() && d_E.is(op::OrRat)) { @@ -494,7 +512,7 @@ namespace spot if (f.min() == 0 && f.max() == 0) return formula::tt(); - auto d_E = partial_derivation(E, var, d, owner); + auto d_E = partial_derivation(E, var, d, owner, options); auto min = f.min() == 0 ? 0 : (f.min() - 1); auto max = f.max() == formula::unbounded() @@ -524,7 +542,7 @@ namespace spot for (auto subformula : f) { auto subderivation = - partial_derivation(subformula, var, d, owner); + partial_derivation(subformula, var, d, owner, options); subderivations.push_back(subderivation); } return formula::multop(f.kind(), std::move(subderivations)); @@ -533,7 +551,7 @@ namespace spot case op::AndNLM: { formula rewrite = rewrite_and_nlm(f); - return partial_derivation(rewrite, var, d, owner); + return partial_derivation(rewrite, var, d, owner, options); } // d(E:F) = {d(E):F} U {c(d(E)).d(F)} @@ -542,12 +560,12 @@ namespace spot formula E = f[0]; formula F = f.all_but(0); - auto d_E = partial_derivation(E, var, d, owner); + auto d_E = partial_derivation(E, var, d, owner, options); auto res = formula::Fusion({ d_E, F }); if (d_E.accepts_eword()) res = - formula::OrRat({ res, partial_derivation(F, var, d, owner) }); + formula::OrRat({ res, partial_derivation(F, var, d, owner, options) }); return res; } @@ -555,7 +573,7 @@ namespace spot case op::first_match: { formula E = f[0]; - auto d_E = partial_derivation(E, var, d, owner); + auto d_E = partial_derivation(E, var, d, owner, options); // if d_E.accepts_eword(), first_match(d_E) will return eword return formula::first_match(d_E); } From b9f461c0257bf8eb69eab9d35729f916b301945a Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 30 Nov 2022 15:28:49 +0100 Subject: [PATCH 568/606] expansions: draft --- python/spot/impl.i | 2 + spot/tl/Makefile.am | 2 + spot/tl/expansions.cc | 418 ++++++++++++++++++++++++++++++++++++++++++ spot/tl/expansions.hh | 46 +++++ tests/Makefile.am | 2 + tests/core/expand.cc | 25 +++ 6 files changed, 495 insertions(+) create mode 100644 spot/tl/expansions.cc create mode 100644 spot/tl/expansions.hh create mode 100644 tests/core/expand.cc diff --git a/python/spot/impl.i b/python/spot/impl.i index 471b85cbb..725655c08 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -88,6 +88,7 @@ #include #include #include +#include #include #include #include @@ -634,6 +635,7 @@ namespace std { %include %include %include +%include %include %include %include diff --git a/spot/tl/Makefile.am b/spot/tl/Makefile.am index 1e5a68363..abb431267 100644 --- a/spot/tl/Makefile.am +++ b/spot/tl/Makefile.am @@ -32,6 +32,7 @@ tl_HEADERS = \ dot.hh \ environment.hh \ exclusive.hh \ + expansions.hh \ formula.hh \ hierarchy.hh \ length.hh \ @@ -58,6 +59,7 @@ libtl_la_SOURCES = \ derive.cc \ dot.cc \ exclusive.cc \ + expansions.cc \ formula.cc \ hierarchy.cc \ length.cc \ diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc new file mode 100644 index 000000000..4fb2c91c3 --- /dev/null +++ b/spot/tl/expansions.cc @@ -0,0 +1,418 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2021 Laboratoire de Recherche et Développement de +// l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" +#include +#include +#include +#include +#include + +namespace spot +{ + namespace + { + static void + insert_or_merge(expansion_t& exp, bdd letter, formula suffix) + { + auto res = exp.insert({letter, suffix}); + if (!res.second) + { + auto it = res.first; + it->second = formula::OrRat({it->second, suffix}); + } + } + + // FIXME: could probably just return a map directly + static std::vector + formula_aps(formula f) + { + auto res = std::unordered_set(); + + f.traverse([&res](formula f) + { + if (f.is(op::ap)) + { + res.insert(f.ap_name()); + return true; + } + + return false; + }); + + return std::vector(res.begin(), res.end()); + } + formula + rewrite_and_nlm(formula f) + { + unsigned s = f.size(); + std::vector final; + std::vector non_final; + + for (auto g: f) + if (g.accepts_eword()) + final.emplace_back(g); + else + non_final.emplace_back(g); + + if (non_final.empty()) + // (a* & b*);c = (a*|b*);c + return formula::OrRat(std::move(final)); + if (!final.empty()) + { + // let F_i be final formulae + // N_i be non final formula + // (F_1 & ... & F_n & N_1 & ... & N_m) + // = (F_1 | ... | F_n);[*] && (N_1 & ... & N_m) + // | (F_1 | ... | F_n) && (N_1 & ... & N_m);[*] + formula f = formula::OrRat(std::move(final)); + formula n = formula::AndNLM(std::move(non_final)); + formula t = formula::one_star(); + formula ft = formula::Concat({f, t}); + formula nt = formula::Concat({n, t}); + formula ftn = formula::AndRat({ft, n}); + formula fnt = formula::AndRat({f, nt}); + return formula::OrRat({ftn, fnt}); + } + // No final formula. + // Translate N_1 & N_2 & ... & N_n into + // N_1 && (N_2;[*]) && ... && (N_n;[*]) + // | (N_1;[*]) && N_2 && ... && (N_n;[*]) + // | (N_1;[*]) && (N_2;[*]) && ... && N_n + formula star = formula::one_star(); + std::vector disj; + for (unsigned n = 0; n < s; ++n) + { + std::vector conj; + for (unsigned m = 0; m < s; ++m) + { + formula g = f[m]; + if (n != m) + g = formula::Concat({g, star}); + conj.emplace_back(g); + } + disj.emplace_back(formula::AndRat(std::move(conj))); + } + return formula::OrRat(std::move(disj)); + } + } + + formula + expansion_to_formula(expansion_t e, bdd_dict_ptr& d) + { + std::vector res; + + for (const auto& [key, val] : e) + { + formula prefix = bdd_to_formula(key, d); + res.push_back(formula::Concat({prefix, val})); + } + + return formula::OrRat(res); + } + + expansion_t + expansion(formula f, const bdd_dict_ptr& d, void *owner) + { + if (f.is_boolean()) + { + auto f_bdd = formula_to_bdd(f, d, owner); + + if (f_bdd == bddfalse) + return {}; + + return {{f_bdd, formula::eword()}}; + } + + + switch (f.kind()) + { + case op::ff: + case op::tt: + case op::ap: + SPOT_UNREACHABLE(); + + case op::eword: + return {{bddfalse, formula::ff()}}; + + case op::Concat: + { + auto exps = expansion(f[0], d, owner); + + expansion_t res; + for (const auto& [bdd_l, form] : exps) + { + res.insert({bdd_l, formula::Concat({form, f.all_but(0)})}); + } + + if (f[0].accepts_eword()) + { + auto exps_rest = expansion(f.all_but(0), d, owner); + for (const auto& [bdd_l, form] : exps_rest) + { + insert_or_merge(res, bdd_l, form); + } + } + return res; + } + + case op::FStar: + { + formula E = f[0]; + + if (f.min() == 0 && f.max() == 0) + return {{bddtrue, formula::eword()}}; + + auto min = f.min() == 0 ? 0 : (f.min() - 1); + auto max = f.max() == formula::unbounded() + ? formula::unbounded() + : (f.max() - 1); + + auto E_i_j_minus = formula::FStar(E, min, max); + + auto exp = expansion(E, d, owner); + expansion_t res; + for (const auto& [li, ei] : exp) + { + insert_or_merge(res, li, formula::Fusion({ei, E_i_j_minus})); + + if (ei.accepts_eword() && f.min() != 0) + { + for (const auto& [ki, fi] : expansion(E_i_j_minus, d, owner)) + { + // FIXME: build bdd once + if ((li & ki) != bddfalse) + insert_or_merge(res, li & ki, fi); + } + } + } + if (f.min() == 0) + insert_or_merge(res, bddtrue, formula::eword()); + + return res; + } + + case op::Star: + { + auto min = f.min() == 0 ? 0 : (f.min() - 1); + auto max = f.max() == formula::unbounded() + ? formula::unbounded() + : (f.max() - 1); + + auto exps = expansion(f[0], d, owner); + + expansion_t res; + for (const auto& [bdd_l, form] : exps) + { + res.insert({bdd_l, formula::Concat({form, formula::Star(f[0], min, max)})}); + } + + return res; + } + + case op::AndNLM: + { + formula rewrite = rewrite_and_nlm(f); + return expansion(rewrite, d, owner); + } + + case op::first_match: + { + auto exps = expansion(f[0], d, owner); + + expansion_t res; + for (const auto& [bdd_l, form] : exps) + { + res.insert({bdd_l, formula::first_match(form)}); + } + + return res; + } + + case op::Fusion: + { + expansion_t res; + formula E = f[0]; + formula F = f.all_but(0); + + expansion_t Ei = expansion(E, d, owner); + // TODO: std::option + expansion_t Fj = expansion(F, d, owner); + + for (const auto& [li, ei] : Ei) + { + if (ei.accepts_eword()) + { + for (const auto& [kj, fj] : Fj) + if ((li & kj) != bddfalse) + insert_or_merge(res, li & kj, fj); + } + insert_or_merge(res, li, formula::Fusion({ei, F})); + } + + return res; + } + + case op::AndRat: + case op::OrRat: + { + expansion_t res; + for (const auto& sub_f : f) + { + auto exps = expansion(sub_f, d, owner); + + if (exps.empty()) + { + if (f.kind() == op::OrRat) + continue; + + // op::AndRat: one of the expansions was empty (the only + // edge was `false`), so the AndRat is empty as + // well + res.clear(); + break; + } + + if (res.empty()) + { + res = std::move(exps); + continue; + } + + expansion_t new_res; + for (const auto& [l_key, l_val] : exps) + { + for (const auto& [r_key, r_val] : res) + { + if ((l_key & r_key) != bddfalse) + insert_or_merge(new_res, l_key & r_key, formula::multop(f.kind(), {l_val, r_val})); + + if (f.is(op::OrRat)) + { + if ((l_key & !r_key) != bddfalse) + insert_or_merge(new_res, l_key & !r_key, l_val); + + if ((!l_key & r_key) != bddfalse) + insert_or_merge(new_res, !l_key & r_key, r_val); + } + } + } + + res = std::move(new_res); + } + + return res; + } + + default: + std::cerr << "unimplemented kind " + << static_cast(f.kind()) + << std::endl; + SPOT_UNIMPLEMENTED(); + } + + return {}; + } + + twa_graph_ptr + expand_automaton(formula f, bdd_dict_ptr d) + { + auto finite = expand_finite_automaton(f, d); + return from_finite(finite); + } + + twa_graph_ptr + expand_finite_automaton(formula f, bdd_dict_ptr d) + { + auto aut = make_twa_graph(d); + + aut->prop_state_acc(true); + const auto acc_mark = aut->set_buchi(); + + auto formula2state = robin_hood::unordered_map(); + + unsigned init_state = aut->new_state(); + aut->set_init_state(init_state); + formula2state.insert({ f, init_state }); + + auto f_aps = formula_aps(f); + for (auto& ap : f_aps) + aut->register_ap(ap); + + auto todo = std::vector>(); + todo.push_back({f, init_state}); + + auto state_names = new std::vector(); + std::ostringstream ss; + ss << f; + state_names->push_back(ss.str()); + + auto find_dst = [&](formula suffix) -> unsigned + { + unsigned dst; + auto it = formula2state.find(suffix); + if (it != formula2state.end()) + { + dst = it->second; + } + else + { + dst = aut->new_state(); + todo.push_back({suffix, dst}); + formula2state.insert({suffix, dst}); + std::ostringstream ss; + ss << suffix; + state_names->push_back(ss.str()); + } + + return dst; + }; + + while (!todo.empty()) + { + auto [curr_f, curr_state] = todo[todo.size() - 1]; + todo.pop_back(); + + auto curr_acc_mark= curr_f.accepts_eword() + ? acc_mark + : acc_cond::mark_t(); + + auto exp = expansion(curr_f, d, aut.get()); + + for (const auto& [letter, suffix] : exp) + { + if (suffix.is(op::ff)) + continue; + + auto dst = find_dst(suffix); + aut->new_edge(curr_state, dst, letter, curr_acc_mark); + } + + // if state has no transitions and should be accepting, create + // artificial transition + if (aut->get_graph().state_storage(curr_state).succ == 0 + && curr_f.accepts_eword()) + aut->new_edge(curr_state, curr_state, bddfalse, acc_mark); + } + + aut->set_named_prop("state-names", state_names); + aut->merge_edges(); + return aut; + } +} diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh new file mode 100644 index 000000000..af80d7e8b --- /dev/null +++ b/spot/tl/expansions.hh @@ -0,0 +1,46 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2021 Laboratoire de Recherche et Développement de +// l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include + +#include + +#include +#include +#include +#include + +namespace spot +{ + using expansion_t = std::map; + + SPOT_API expansion_t + expansion(formula f, const bdd_dict_ptr& d, void *owner); + + SPOT_API formula + expansion_to_formula(expansion_t e, bdd_dict_ptr& d); + + SPOT_API twa_graph_ptr + expand_automaton(formula f, bdd_dict_ptr d); + + SPOT_API twa_graph_ptr + expand_finite_automaton(formula f, bdd_dict_ptr d); +} diff --git a/tests/Makefile.am b/tests/Makefile.am index 9bf0cef73..a061ba23d 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -69,6 +69,7 @@ check_PROGRAMS = \ core/cube \ core/emptchk \ core/equals \ + core/expand \ core/graph \ core/kind \ core/length \ @@ -111,6 +112,7 @@ core_bricks_SOURCES = core/bricks.cc core_checkpsl_SOURCES = core/checkpsl.cc core_checkta_SOURCES = core/checkta.cc core_emptchk_SOURCES = core/emptchk.cc +core_expand_SOURCES = core/expand.cc core_graph_SOURCES = core/graph.cc core_ikwiad_SOURCES = core/ikwiad.cc core_intvcomp_SOURCES = core/intvcomp.cc diff --git a/tests/core/expand.cc b/tests/core/expand.cc new file mode 100644 index 000000000..a589d6370 --- /dev/null +++ b/tests/core/expand.cc @@ -0,0 +1,25 @@ +#include "config.h" + +#include +#include +#include +#include + +int main(int argc, char** argv) +{ + if (argc != 2) + return 1; + + spot::formula f = spot::parse_infix_sere(argv[1]).f; + auto d = spot::make_bdd_dict(); + + auto m = spot::expansion(f, d, nullptr); + + for (const auto& [bdd_l, form] : m) + std::cout << '[' << bdd_to_formula(bdd_l, d) << ']' << ": " << form << std::endl; + std::cout << "formula: " << expansion_to_formula(m, d) << std::endl; + + d->unregister_all_my_variables(nullptr); + + return 0; +} From 1240fec39beeb851933bf54c0cf223f91b58892c Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 15 Dec 2022 08:39:13 +0100 Subject: [PATCH 569/606] expansions: first_match deterministic --- spot/tl/expansions.cc | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 4fb2c91c3..689e90a82 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -236,10 +236,45 @@ namespace spot { auto exps = expansion(f[0], d, owner); - expansion_t res; + expansion_t ndet_res; for (const auto& [bdd_l, form] : exps) { - res.insert({bdd_l, formula::first_match(form)}); + ndet_res.insert({bdd_l, form}); + } + + bdd or_labels = bddfalse; + bdd support = bddtrue; + bool is_det = true; + for (const auto& [l, _] : ndet_res) + { + support &= bdd_support(l); + if (is_det) + is_det = !bdd_have_common_assignment(l, or_labels); + or_labels |= l; + } + + if (is_det) + { + // we don't need to determinize the expansion, it's already + // deterministic + for (auto& [_, dest] : ndet_res) + dest = formula::first_match(dest); + return ndet_res; + } + + expansion_t res; + // TODO: extraire en fonction indépendante + lambda choix wrapper + std::vector dests; + for (bdd l: minterms_of(or_labels, support)) + { + for (const auto& [ndet_label, ndet_dest] : ndet_res) + { + if (bdd_implies(l, ndet_label)) + dests.push_back(ndet_dest); + } + formula or_dests = formula::OrRat(dests); + res.insert({l, formula::first_match(or_dests)}); + dests.clear(); } return res; From 3c6929829d8c70f993442d03f8412ccf8eab78d1 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 15 Dec 2022 10:44:37 +0100 Subject: [PATCH 570/606] expansions: split-off OrRat case --- spot/tl/expansions.cc | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 689e90a82..b3f6eed9b 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -305,7 +305,6 @@ namespace spot } case op::AndRat: - case op::OrRat: { expansion_t res; for (const auto& sub_f : f) @@ -314,9 +313,6 @@ namespace spot if (exps.empty()) { - if (f.kind() == op::OrRat) - continue; - // op::AndRat: one of the expansions was empty (the only // edge was `false`), so the AndRat is empty as // well @@ -337,15 +333,6 @@ namespace spot { if ((l_key & r_key) != bddfalse) insert_or_merge(new_res, l_key & r_key, formula::multop(f.kind(), {l_val, r_val})); - - if (f.is(op::OrRat)) - { - if ((l_key & !r_key) != bddfalse) - insert_or_merge(new_res, l_key & !r_key, l_val); - - if ((!l_key & r_key) != bddfalse) - insert_or_merge(new_res, !l_key & r_key, r_val); - } } } @@ -355,6 +342,28 @@ namespace spot return res; } + case op::OrRat: + { + expansion_t res; + for (const auto& sub_f : f) + { + auto exps = expansion(sub_f, d, owner); + if (exps.empty()) + continue; + + if (res.empty()) + { + res = std::move(exps); + continue; + } + + for (const auto& [label, dest] : exps) + insert_or_merge(res, label, dest); + } + + return res; + } + default: std::cerr << "unimplemented kind " << static_cast(f.kind()) From 9361116431a0a39aabce1418906a2d51306881a5 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 21 Dec 2022 11:05:16 +0100 Subject: [PATCH 571/606] expansions: multiple implementations --- spot/tl/expansions.cc | 214 ++++++++++++++++++++++++++++++++++-------- spot/tl/expansions.hh | 12 +++ 2 files changed, 188 insertions(+), 38 deletions(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index b3f6eed9b..8a7e3d8ad 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -28,10 +28,47 @@ namespace spot { namespace { - static void - insert_or_merge(expansion_t& exp, bdd letter, formula suffix) + class expansion_basic final : expansion_builder { - auto res = exp.insert({letter, suffix}); + public: + using exp_map = expansion_builder::exp_map; + + expansion_basic() + {} + + expansion_basic(exp_map&& m) + : bdd2formula_(m) + , formula2bdd_() + {} + + void insert(bdd letter, formula suffix) final; + + void finalize() final + {} + + exp_map& result() final + { + return bdd2formula_; + } + + bool empty() final + { + return bdd2formula_.empty(); + } + + void clear() final + { + bdd2formula_.clear(); + } + + private: + exp_map bdd2formula_; + std::map formula2bdd_; + }; + + void expansion_basic::insert(bdd letter, formula suffix) + { + auto res = bdd2formula_.insert({letter, suffix}); if (!res.second) { auto it = res.first; @@ -39,6 +76,93 @@ namespace spot } } + class expansion_merge_formulas final : expansion_builder + { + public: + using exp_map = expansion_builder::exp_map; + + expansion_merge_formulas() + {} + + expansion_merge_formulas(exp_map&& m) + : res_() + , terms_(m.begin(), m.end()) + {} + + void insert(bdd letter, formula suffix) final; + + void finalize() final; + + exp_map& result() final + { + return res_; + } + + bool empty() final + { + return terms_.empty(); + } + + void clear() final + { + terms_.clear(); + res_.clear(); + } + + private: + std::vector> terms_; + exp_map res_; + }; + + void expansion_merge_formulas::insert(bdd letter, formula suffix) + { + terms_.push_back({letter, suffix}); + } + + void expansion_merge_formulas::finalize() + { + res_.clear(); + + // Given such terms: + // + // - a . ϕ1 + // - a . ϕ2 + // - b . ϕ1 + // + // Merge them by suffix: + // + // - (a ∨ b) . ϕ1 + // - a . ϕ2 + std::map suffix2letter; + for (const auto& [letter, suffix]: terms_) + { + auto res = suffix2letter.insert({suffix, letter}); + if (!res.second) + { + auto it = res.first; + it->second |= letter; + } + } + + // Given such terms: + // + // - a . ϕ1 + // - a . ϕ2 + // + // Merge them by letter: + // + // - a . (ϕ1 ∨ ϕ2) + for (const auto& [suffix, letter]: suffix2letter) + { + auto res = res_.insert({letter, suffix}); + if (!res.second) + { + auto it = res.first; + it->second = formula::OrRat({it->second, suffix}); + } + } + } + // FIXME: could probably just return a map directly static std::vector formula_aps(formula f) @@ -58,6 +182,7 @@ namespace spot return std::vector(res.begin(), res.end()); } + formula rewrite_and_nlm(formula f) { @@ -130,6 +255,8 @@ namespace spot expansion_t expansion(formula f, const bdd_dict_ptr& d, void *owner) { + using expansion_type = expansion_merge_formulas; + if (f.is_boolean()) { auto f_bdd = formula_to_bdd(f, d, owner); @@ -155,10 +282,10 @@ namespace spot { auto exps = expansion(f[0], d, owner); - expansion_t res; + expansion_type res; for (const auto& [bdd_l, form] : exps) { - res.insert({bdd_l, formula::Concat({form, f.all_but(0)})}); + res.insert(bdd_l, formula::Concat({form, f.all_but(0)})); } if (f[0].accepts_eword()) @@ -166,10 +293,12 @@ namespace spot auto exps_rest = expansion(f.all_but(0), d, owner); for (const auto& [bdd_l, form] : exps_rest) { - insert_or_merge(res, bdd_l, form); + res.insert(bdd_l, form); } } - return res; + + res.finalize(); + return res.result(); } case op::FStar: @@ -187,10 +316,10 @@ namespace spot auto E_i_j_minus = formula::FStar(E, min, max); auto exp = expansion(E, d, owner); - expansion_t res; + expansion_type res; for (const auto& [li, ei] : exp) { - insert_or_merge(res, li, formula::Fusion({ei, E_i_j_minus})); + res.insert(li, formula::Fusion({ei, E_i_j_minus})); if (ei.accepts_eword() && f.min() != 0) { @@ -198,14 +327,15 @@ namespace spot { // FIXME: build bdd once if ((li & ki) != bddfalse) - insert_or_merge(res, li & ki, fi); + res.insert(li & ki, fi); } } } if (f.min() == 0) - insert_or_merge(res, bddtrue, formula::eword()); + res.insert(bddtrue, formula::eword()); - return res; + res.finalize(); + return res.result(); } case op::Star: @@ -217,13 +347,14 @@ namespace spot auto exps = expansion(f[0], d, owner); - expansion_t res; + expansion_type res; for (const auto& [bdd_l, form] : exps) { - res.insert({bdd_l, formula::Concat({form, formula::Star(f[0], min, max)})}); + res.insert(bdd_l, formula::Concat({form, formula::Star(f[0], min, max)})); } - return res; + res.finalize(); + return res.result(); } case op::AndNLM: @@ -236,16 +367,17 @@ namespace spot { auto exps = expansion(f[0], d, owner); - expansion_t ndet_res; + expansion_type ndet_res; for (const auto& [bdd_l, form] : exps) { - ndet_res.insert({bdd_l, form}); + ndet_res.insert(bdd_l, form); } bdd or_labels = bddfalse; bdd support = bddtrue; bool is_det = true; - for (const auto& [l, _] : ndet_res) + ndet_res.finalize(); + for (const auto& [l, _] : ndet_res.result()) { support &= bdd_support(l); if (is_det) @@ -257,32 +389,33 @@ namespace spot { // we don't need to determinize the expansion, it's already // deterministic - for (auto& [_, dest] : ndet_res) + for (auto& [_, dest] : ndet_res.result()) dest = formula::first_match(dest); - return ndet_res; + return ndet_res.result(); } - expansion_t res; + expansion_type res; // TODO: extraire en fonction indépendante + lambda choix wrapper std::vector dests; for (bdd l: minterms_of(or_labels, support)) { - for (const auto& [ndet_label, ndet_dest] : ndet_res) + for (const auto& [ndet_label, ndet_dest] : ndet_res.result()) { if (bdd_implies(l, ndet_label)) dests.push_back(ndet_dest); } formula or_dests = formula::OrRat(dests); - res.insert({l, formula::first_match(or_dests)}); + res.insert(l, formula::first_match(or_dests)); dests.clear(); } - return res; + res.finalize(); + return res.result(); } case op::Fusion: { - expansion_t res; + expansion_type res; formula E = f[0]; formula F = f.all_but(0); @@ -296,17 +429,18 @@ namespace spot { for (const auto& [kj, fj] : Fj) if ((li & kj) != bddfalse) - insert_or_merge(res, li & kj, fj); + res.insert(li & kj, fj); } - insert_or_merge(res, li, formula::Fusion({ei, F})); + res.insert(li, formula::Fusion({ei, F})); } - return res; + res.finalize(); + return res.result(); } case op::AndRat: { - expansion_t res; + expansion_type res; for (const auto& sub_f : f) { auto exps = expansion(sub_f, d, owner); @@ -322,29 +456,32 @@ namespace spot if (res.empty()) { - res = std::move(exps); + res = expansion_type(std::move(exps)); + res.finalize(); continue; } - expansion_t new_res; + expansion_type new_res; for (const auto& [l_key, l_val] : exps) { - for (const auto& [r_key, r_val] : res) + for (const auto& [r_key, r_val] : res.result()) { if ((l_key & r_key) != bddfalse) - insert_or_merge(new_res, l_key & r_key, formula::multop(f.kind(), {l_val, r_val})); + new_res.insert(l_key & r_key, formula::multop(f.kind(), {l_val, r_val})); } } res = std::move(new_res); + res.finalize(); } - return res; + res.finalize(); + return res.result(); } case op::OrRat: { - expansion_t res; + expansion_type res; for (const auto& sub_f : f) { auto exps = expansion(sub_f, d, owner); @@ -353,15 +490,16 @@ namespace spot if (res.empty()) { - res = std::move(exps); + res = expansion_type(std::move(exps)); continue; } for (const auto& [label, dest] : exps) - insert_or_merge(res, label, dest); + res.insert(label, dest); } - return res; + res.finalize(); + return res.result(); } default: diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index af80d7e8b..8a5e3cb07 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -32,6 +32,18 @@ namespace spot { using expansion_t = std::map; + class expansion_builder + { + public: + using exp_map = std::map; + + virtual void insert(bdd letter, formula suffix) = 0; + virtual void finalize() = 0; + virtual exp_map& result() = 0; + virtual bool empty() = 0; + virtual void clear() = 0; + }; + SPOT_API expansion_t expansion(formula f, const bdd_dict_ptr& d, void *owner); From 12a8d5382dd8d157b8b4e80b4dd4615184fafd48 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 20 Jan 2023 14:28:35 +0100 Subject: [PATCH 572/606] expansions: add BDD method --- spot/tl/expansions.cc | 260 ++++++++++++++++++++++++++++++++++++------ spot/tl/expansions.hh | 12 +- 2 files changed, 233 insertions(+), 39 deletions(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 8a7e3d8ad..593bd0c4d 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -18,6 +18,7 @@ // along with this program. If not, see . #include "config.h" +#include #include #include #include @@ -33,10 +34,10 @@ namespace spot public: using exp_map = expansion_builder::exp_map; - expansion_basic() + expansion_basic(bdd_dict_ptr d) {} - expansion_basic(exp_map&& m) + expansion_basic(exp_map&& m, bdd_dict_ptr d) : bdd2formula_(m) , formula2bdd_() {} @@ -81,10 +82,10 @@ namespace spot public: using exp_map = expansion_builder::exp_map; - expansion_merge_formulas() + expansion_merge_formulas(bdd_dict_ptr d) {} - expansion_merge_formulas(exp_map&& m) + expansion_merge_formulas(exp_map&& m, bdd_dict_ptr d) : res_() , terms_(m.begin(), m.end()) {} @@ -163,6 +164,182 @@ namespace spot } } + class expansion_bdd final : expansion_builder + { + public: + using exp_map = expansion_builder::exp_map; + + expansion_bdd(bdd_dict_ptr d) + : anon_set_(bddtrue) + , d_(d) + {} + + expansion_bdd(exp_map&& m, bdd_dict_ptr d) + : anon_set_(bddtrue) + , d_(d) + { + for (const auto& [letter, suffix] : m) + { + insert(letter, suffix); + } + } + + expansion_bdd(const expansion_bdd&) = delete; + + expansion_bdd& + operator=(const expansion_bdd& other) = delete; + + expansion_bdd& + operator=(const expansion_bdd&& other) + { + d_->unregister_all_my_variables(this); + + anon_set_ = std::move(other.anon_set_); + exp_ = std::move(other.exp_); + res_ = std::move(other.res_); + formula2bdd_ = std::move(other.formula2bdd_); + bdd2formula_ = std::move(other.bdd2formula_); + + d_ = other.d_; + d_->register_all_variables_of(&other, this); + + return *this; + } + + ~expansion_bdd() + { + d_->unregister_all_my_variables(this); + } + + void insert(bdd letter, formula suffix) final; + + void finalize() final; + + exp_map& result() final + { + return res_; + } + + bool empty() final + { + return formula2bdd_.empty(); + } + + void clear() final + { + formula2bdd_.clear(); + bdd2formula_.clear(); + exp_ = bddfalse; + anon_set_ = bddtrue; + res_.clear(); + } + + private: + bdd exp_; + bdd anon_set_; + std::map formula2bdd_; + std::map bdd2formula_; + exp_map res_; + bdd_dict_ptr d_; + + formula var_to_formula(int var); + formula conj_bdd_to_sere(bdd b); + }; + + formula + expansion_bdd::var_to_formula(int var) + { + formula f = bdd2formula_[var]; + assert(f); + return f; + } + + formula + expansion_bdd::conj_bdd_to_sere(bdd b) + { + if (b == bddtrue) + return formula::tt(); + if (b == bddfalse) + return formula::ff(); + + // Unroll the first loop of the next do/while loop so that we + // do not have to create v when b is not a conjunction. + formula res = var_to_formula(bdd_var(b)); + bdd high = bdd_high(b); + if (high == bddfalse) + { + res = formula::Not(res); + b = bdd_low(b); + } + else + { + assert(bdd_low(b) == bddfalse); + b = high; + } + if (b == bddtrue) + return res; + std::vector v{std::move(res)}; + do + { + res = var_to_formula(bdd_var(b)); + high = bdd_high(b); + if (high == bddfalse) + { + res = formula::Not(res); + b = bdd_low(b); + } + else + { + assert(bdd_low(b) == bddfalse); + b = high; + } + assert(b != bddfalse); + v.emplace_back(std::move(res)); + } + while (b != bddtrue); + return formula::multop(op::AndRat, std::move(v)); + } + + void expansion_bdd::insert(bdd letter, formula suffix) + { + + int anon_var_num; + auto it = formula2bdd_.find(suffix); + if (it != formula2bdd_.end()) + { + anon_var_num = it->second; + } + else + { + anon_var_num = d_->register_anonymous_variables(1, this); + formula2bdd_.insert({suffix, anon_var_num}); + bdd2formula_.insert({anon_var_num, suffix}); + } + + bdd var = bdd_ithvar(anon_var_num); + anon_set_ &= var; + exp_ |= letter & var; + } + + void expansion_bdd::finalize() + { + minato_isop isop(exp_); + bdd cube; + while ((cube = isop.next()) != bddfalse) + { + bdd letter = bdd_exist(cube, anon_set_); + bdd suffix = bdd_existcomp(cube, anon_set_); + formula dest = conj_bdd_to_sere(suffix); + + auto it = res_.insert({letter, dest}); + if (!it.second) + { + auto it2 = it.first; + it2->second = formula::OrRat({it2->second, dest}); + } + } + } + // FIXME: could probably just return a map directly static std::vector formula_aps(formula f) @@ -252,11 +429,11 @@ namespace spot return formula::OrRat(res); } - expansion_t - expansion(formula f, const bdd_dict_ptr& d, void *owner) - { - using expansion_type = expansion_merge_formulas; + template + expansion_t + expansion_impl(formula f, const bdd_dict_ptr& d, void *owner, expansion_builder::expand_opt opts) + { if (f.is_boolean()) { auto f_bdd = formula_to_bdd(f, d, owner); @@ -280,9 +457,9 @@ namespace spot case op::Concat: { - auto exps = expansion(f[0], d, owner); + auto exps = expansion(f[0], d, owner, opts); - expansion_type res; + ExpansionBuilder res(d); for (const auto& [bdd_l, form] : exps) { res.insert(bdd_l, formula::Concat({form, f.all_but(0)})); @@ -290,7 +467,7 @@ namespace spot if (f[0].accepts_eword()) { - auto exps_rest = expansion(f.all_but(0), d, owner); + auto exps_rest = expansion(f.all_but(0), d, owner, opts); for (const auto& [bdd_l, form] : exps_rest) { res.insert(bdd_l, form); @@ -315,15 +492,15 @@ namespace spot auto E_i_j_minus = formula::FStar(E, min, max); - auto exp = expansion(E, d, owner); - expansion_type res; + auto exp = expansion(E, d, owner, opts); + ExpansionBuilder res(d); for (const auto& [li, ei] : exp) { res.insert(li, formula::Fusion({ei, E_i_j_minus})); if (ei.accepts_eword() && f.min() != 0) { - for (const auto& [ki, fi] : expansion(E_i_j_minus, d, owner)) + for (const auto& [ki, fi] : expansion(E_i_j_minus, d, owner, opts)) { // FIXME: build bdd once if ((li & ki) != bddfalse) @@ -345,9 +522,9 @@ namespace spot ? formula::unbounded() : (f.max() - 1); - auto exps = expansion(f[0], d, owner); + auto exps = expansion(f[0], d, owner, opts); - expansion_type res; + ExpansionBuilder res(d); for (const auto& [bdd_l, form] : exps) { res.insert(bdd_l, formula::Concat({form, formula::Star(f[0], min, max)})); @@ -360,14 +537,14 @@ namespace spot case op::AndNLM: { formula rewrite = rewrite_and_nlm(f); - return expansion(rewrite, d, owner); + return expansion(rewrite, d, owner, opts); } case op::first_match: { - auto exps = expansion(f[0], d, owner); + auto exps = expansion(f[0], d, owner, opts); - expansion_type ndet_res; + ExpansionBuilder ndet_res(d); for (const auto& [bdd_l, form] : exps) { ndet_res.insert(bdd_l, form); @@ -394,7 +571,7 @@ namespace spot return ndet_res.result(); } - expansion_type res; + ExpansionBuilder res(d); // TODO: extraire en fonction indépendante + lambda choix wrapper std::vector dests; for (bdd l: minterms_of(or_labels, support)) @@ -415,13 +592,13 @@ namespace spot case op::Fusion: { - expansion_type res; + ExpansionBuilder res(d); formula E = f[0]; formula F = f.all_but(0); - expansion_t Ei = expansion(E, d, owner); + expansion_t Ei = expansion(E, d, owner, opts); // TODO: std::option - expansion_t Fj = expansion(F, d, owner); + expansion_t Fj = expansion(F, d, owner, opts); for (const auto& [li, ei] : Ei) { @@ -440,10 +617,10 @@ namespace spot case op::AndRat: { - expansion_type res; + ExpansionBuilder res(d); for (const auto& sub_f : f) { - auto exps = expansion(sub_f, d, owner); + auto exps = expansion(sub_f, d, owner, opts); if (exps.empty()) { @@ -456,12 +633,12 @@ namespace spot if (res.empty()) { - res = expansion_type(std::move(exps)); + res = ExpansionBuilder(std::move(exps), d); res.finalize(); continue; } - expansion_type new_res; + ExpansionBuilder new_res(d); for (const auto& [l_key, l_val] : exps) { for (const auto& [r_key, r_val] : res.result()) @@ -481,16 +658,16 @@ namespace spot case op::OrRat: { - expansion_type res; + ExpansionBuilder res(d); for (const auto& sub_f : f) { - auto exps = expansion(sub_f, d, owner); + auto exps = expansion(sub_f, d, owner, opts); if (exps.empty()) continue; if (res.empty()) { - res = expansion_type(std::move(exps)); + res = ExpansionBuilder(std::move(exps), d); continue; } @@ -509,18 +686,29 @@ namespace spot SPOT_UNIMPLEMENTED(); } - return {}; - } + return {}; + } + + expansion_t + expansion(formula f, const bdd_dict_ptr& d, void *owner, expansion_builder::expand_opt opts) + { + if (opts & expansion_builder::Basic) + return expansion_impl(f, d, owner, opts); + else if (opts & expansion_builder::MergeSuffix) + return expansion_impl(f, d, owner, opts); + else // expansion_builder::Bdd + return expansion_impl(f, d, owner, opts); + } twa_graph_ptr - expand_automaton(formula f, bdd_dict_ptr d) + expand_automaton(formula f, bdd_dict_ptr d, expansion_builder::expand_opt opts) { - auto finite = expand_finite_automaton(f, d); + auto finite = expand_finite_automaton(f, d, opts); return from_finite(finite); } twa_graph_ptr - expand_finite_automaton(formula f, bdd_dict_ptr d) + expand_finite_automaton(formula f, bdd_dict_ptr d, expansion_builder::expand_opt opts) { auto aut = make_twa_graph(d); @@ -575,7 +763,7 @@ namespace spot ? acc_mark : acc_cond::mark_t(); - auto exp = expansion(curr_f, d, aut.get()); + auto exp = expansion(curr_f, d, aut.get(), opts); for (const auto& [letter, suffix] : exp) { diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index 8a5e3cb07..eb6d6e60f 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -42,17 +42,23 @@ namespace spot virtual exp_map& result() = 0; virtual bool empty() = 0; virtual void clear() = 0; + enum expand_opt { + Deterministic = 1, + Basic = 2, + MergeSuffix = 4, + Bdd = 8, + }; }; SPOT_API expansion_t - expansion(formula f, const bdd_dict_ptr& d, void *owner); + expansion(formula f, const bdd_dict_ptr& d, void *owner, expansion_builder::expand_opt opts); SPOT_API formula expansion_to_formula(expansion_t e, bdd_dict_ptr& d); SPOT_API twa_graph_ptr - expand_automaton(formula f, bdd_dict_ptr d); + expand_automaton(formula f, bdd_dict_ptr d, expansion_builder::expand_opt opts); SPOT_API twa_graph_ptr - expand_finite_automaton(formula f, bdd_dict_ptr d); + expand_finite_automaton(formula f, bdd_dict_ptr d, expansion_builder::expand_opt opts); } From faaefa74244fe3bc9bdfd3ab783c690e1d7c12d2 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 1 Feb 2023 17:31:22 +0100 Subject: [PATCH 573/606] expansions: fix bdd method --- spot/tl/expansions.cc | 231 +++++++++++++++++++++++++++++------------- spot/tl/expansions.hh | 19 ++-- 2 files changed, 168 insertions(+), 82 deletions(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 593bd0c4d..1a966936a 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -29,6 +29,18 @@ namespace spot { namespace { + class expansion_builder + { + public: + using exp_map = std::map; + + virtual void insert(bdd letter, formula suffix) = 0; + virtual void finalize(bool deterministic, std::function wrap = formula_identity) = 0; + virtual exp_map& result() = 0; + virtual bool empty() = 0; + virtual void clear() = 0; + }; + class expansion_basic final : expansion_builder { public: @@ -44,8 +56,7 @@ namespace spot void insert(bdd letter, formula suffix) final; - void finalize() final - {} + void finalize(bool deterministic, std::function wrap = formula_identity) final; exp_map& result() final { @@ -77,6 +88,48 @@ namespace spot } } + void expansion_basic::finalize(bool deterministic, std::function wrap) + { + if (!deterministic) + return; + + bdd or_labels = bddfalse; + bdd support = bddtrue; + bool is_det = true; + for (const auto& [l, _] : bdd2formula_) + { + support &= bdd_support(l); + if (is_det) + is_det = !bdd_have_common_assignment(l, or_labels); + or_labels |= l; + } + + if (is_det) + { + // we don't need to determinize the expansion, it's already + // deterministic + for (auto& [_, dest] : bdd2formula_) + dest = wrap(dest); + return; + } + + exp_map res; + std::vector dests; + for (bdd l: minterms_of(or_labels, support)) + { + for (const auto& [ndet_label, ndet_dest] : bdd2formula_) + { + if (bdd_implies(l, ndet_label)) + dests.push_back(ndet_dest); + } + formula or_dests = formula::OrRat(dests); + res.insert({l, wrap(or_dests)}); + dests.clear(); + } + + bdd2formula_ = std::move(res); + } + class expansion_merge_formulas final : expansion_builder { public: @@ -92,7 +145,7 @@ namespace spot void insert(bdd letter, formula suffix) final; - void finalize() final; + void finalize(bool deterministic, std::function wrap = formula_identity) final; exp_map& result() final { @@ -120,7 +173,7 @@ namespace spot terms_.push_back({letter, suffix}); } - void expansion_merge_formulas::finalize() + void expansion_merge_formulas::finalize(bool deterministic, std::function wrap) { res_.clear(); @@ -162,6 +215,45 @@ namespace spot it->second = formula::OrRat({it->second, suffix}); } } + + if (!deterministic) + return; + + bdd or_labels = bddfalse; + bdd support = bddtrue; + bool is_det = true; + for (const auto& [l, _] : res_) + { + support &= bdd_support(l); + if (is_det) + is_det = !bdd_have_common_assignment(l, or_labels); + or_labels |= l; + } + + if (is_det) + { + // we don't need to determinize the expansion, it's already + // deterministic + for (auto& [_, dest] : res_) + dest = wrap(dest); + return; + } + + exp_map res; + std::vector dests; + for (bdd l: minterms_of(or_labels, support)) + { + for (const auto& [ndet_label, ndet_dest] : res_) + { + if (bdd_implies(l, ndet_label)) + dests.push_back(ndet_dest); + } + formula or_dests = formula::OrRat(dests); + res.insert({l, wrap(or_dests)}); + dests.clear(); + } + + res_ = std::move(res); } class expansion_bdd final : expansion_builder @@ -213,7 +305,7 @@ namespace spot void insert(bdd letter, formula suffix) final; - void finalize() final; + void finalize(bool deterministic, std::function wrap = formula_identity) final; exp_map& result() final { @@ -244,6 +336,7 @@ namespace spot formula var_to_formula(int var); formula conj_bdd_to_sere(bdd b); + formula bdd_to_sere(bdd b); }; formula @@ -320,22 +413,52 @@ namespace spot anon_set_ &= var; exp_ |= letter & var; } + formula + expansion_bdd::bdd_to_sere(bdd f) + { + if (f == bddfalse) + return formula::ff(); - void expansion_bdd::finalize() + std::vector v; + minato_isop isop(f); + bdd cube; + while ((cube = isop.next()) != bddfalse) + v.emplace_back(conj_bdd_to_sere(cube)); + return formula::OrRat(std::move(v)); + } + + void expansion_bdd::finalize(bool deterministic, std::function wrap) { - minato_isop isop(exp_); - bdd cube; - while ((cube = isop.next()) != bddfalse) + if (deterministic) { - bdd letter = bdd_exist(cube, anon_set_); - bdd suffix = bdd_existcomp(cube, anon_set_); - formula dest = conj_bdd_to_sere(suffix); - - auto it = res_.insert({letter, dest}); - if (!it.second) + bdd prop_set = bdd_exist(bdd_support(exp_), anon_set_); + bdd or_labels = bdd_exist(exp_, anon_set_); + for (bdd letter: minterms_of(exp_, prop_set)) { - auto it2 = it.first; - it2->second = formula::OrRat({it2->second, dest}); + bdd dest_bdd = bdd_restrict(exp_, letter); + formula dest = wrap(bdd_to_sere(dest_bdd)); + + auto it = res_.insert({letter, dest}); + assert(it.second); + (void) it; + } + } + else + { + minato_isop isop(exp_); + bdd cube; + while ((cube = isop.next()) != bddfalse) + { + bdd letter = bdd_exist(cube, anon_set_); + bdd suffix = bdd_existcomp(cube, anon_set_); + formula dest = conj_bdd_to_sere(suffix); + + auto it = res_.insert({letter, dest}); + if (!it.second) + { + auto it2 = it.first; + it2->second = formula::OrRat({it2->second, dest}); + } } } } @@ -432,7 +555,7 @@ namespace spot template expansion_t - expansion_impl(formula f, const bdd_dict_ptr& d, void *owner, expansion_builder::expand_opt opts) + expansion_impl(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts) { if (f.is_boolean()) { @@ -474,7 +597,7 @@ namespace spot } } - res.finalize(); + res.finalize(opts & exp_opts::Deterministic); return res.result(); } @@ -511,7 +634,7 @@ namespace spot if (f.min() == 0) res.insert(bddtrue, formula::eword()); - res.finalize(); + res.finalize(opts & exp_opts::Deterministic); return res.result(); } @@ -530,7 +653,7 @@ namespace spot res.insert(bdd_l, formula::Concat({form, formula::Star(f[0], min, max)})); } - res.finalize(); + res.finalize(opts & exp_opts::Deterministic); return res.result(); } @@ -544,49 +667,15 @@ namespace spot { auto exps = expansion(f[0], d, owner, opts); - ExpansionBuilder ndet_res(d); + ExpansionBuilder res(d); for (const auto& [bdd_l, form] : exps) { - ndet_res.insert(bdd_l, form); + res.insert(bdd_l, form); } - bdd or_labels = bddfalse; - bdd support = bddtrue; - bool is_det = true; - ndet_res.finalize(); - for (const auto& [l, _] : ndet_res.result()) - { - support &= bdd_support(l); - if (is_det) - is_det = !bdd_have_common_assignment(l, or_labels); - or_labels |= l; - } - - if (is_det) - { - // we don't need to determinize the expansion, it's already - // deterministic - for (auto& [_, dest] : ndet_res.result()) - dest = formula::first_match(dest); - return ndet_res.result(); - } - - ExpansionBuilder res(d); - // TODO: extraire en fonction indépendante + lambda choix wrapper - std::vector dests; - for (bdd l: minterms_of(or_labels, support)) - { - for (const auto& [ndet_label, ndet_dest] : ndet_res.result()) - { - if (bdd_implies(l, ndet_label)) - dests.push_back(ndet_dest); - } - formula or_dests = formula::OrRat(dests); - res.insert(l, formula::first_match(or_dests)); - dests.clear(); - } - - res.finalize(); + res.finalize(true, [](formula f){ + return formula::first_match(f); + }); return res.result(); } @@ -611,7 +700,7 @@ namespace spot res.insert(li, formula::Fusion({ei, F})); } - res.finalize(); + res.finalize(opts & exp_opts::Deterministic); return res.result(); } @@ -634,7 +723,7 @@ namespace spot if (res.empty()) { res = ExpansionBuilder(std::move(exps), d); - res.finalize(); + res.finalize(false); continue; } @@ -649,10 +738,10 @@ namespace spot } res = std::move(new_res); - res.finalize(); + res.finalize(false); } - res.finalize(); + res.finalize(opts & exp_opts::Deterministic); return res.result(); } @@ -675,7 +764,7 @@ namespace spot res.insert(label, dest); } - res.finalize(); + res.finalize(opts & exp_opts::Deterministic); return res.result(); } @@ -690,25 +779,25 @@ namespace spot } expansion_t - expansion(formula f, const bdd_dict_ptr& d, void *owner, expansion_builder::expand_opt opts) + expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts) { - if (opts & expansion_builder::Basic) + if (opts & exp_opts::Basic) return expansion_impl(f, d, owner, opts); - else if (opts & expansion_builder::MergeSuffix) + else if (opts & exp_opts::MergeSuffix) return expansion_impl(f, d, owner, opts); - else // expansion_builder::Bdd + else // exp_opts::Bdd return expansion_impl(f, d, owner, opts); } twa_graph_ptr - expand_automaton(formula f, bdd_dict_ptr d, expansion_builder::expand_opt opts) + expand_automaton(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts) { auto finite = expand_finite_automaton(f, d, opts); return from_finite(finite); } twa_graph_ptr - expand_finite_automaton(formula f, bdd_dict_ptr d, expansion_builder::expand_opt opts) + expand_finite_automaton(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts) { auto aut = make_twa_graph(d); diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index eb6d6e60f..4ca15b174 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -32,16 +32,13 @@ namespace spot { using expansion_t = std::map; - class expansion_builder + formula formula_identity(formula f) { - public: - using exp_map = std::map; + return f; + } - virtual void insert(bdd letter, formula suffix) = 0; - virtual void finalize() = 0; - virtual exp_map& result() = 0; - virtual bool empty() = 0; - virtual void clear() = 0; + struct exp_opts + { enum expand_opt { Deterministic = 1, Basic = 2, @@ -51,14 +48,14 @@ namespace spot }; SPOT_API expansion_t - expansion(formula f, const bdd_dict_ptr& d, void *owner, expansion_builder::expand_opt opts); + expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts); SPOT_API formula expansion_to_formula(expansion_t e, bdd_dict_ptr& d); SPOT_API twa_graph_ptr - expand_automaton(formula f, bdd_dict_ptr d, expansion_builder::expand_opt opts); + expand_automaton(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts); SPOT_API twa_graph_ptr - expand_finite_automaton(formula f, bdd_dict_ptr d, expansion_builder::expand_opt opts); + expand_finite_automaton(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts); } From ce9a94f224cf16c31979ea1c78ef3746106ddee6 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Mon, 6 Feb 2023 11:04:47 +0100 Subject: [PATCH 574/606] expansions: determinize only once per state --- spot/tl/expansions.cc | 59 ++++++++++++++++++++++--------------------- spot/tl/expansions.hh | 6 +---- 2 files changed, 31 insertions(+), 34 deletions(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 1a966936a..40fe68415 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -35,7 +35,7 @@ namespace spot using exp_map = std::map; virtual void insert(bdd letter, formula suffix) = 0; - virtual void finalize(bool deterministic, std::function wrap = formula_identity) = 0; + virtual void finalize(bool deterministic) = 0; virtual exp_map& result() = 0; virtual bool empty() = 0; virtual void clear() = 0; @@ -56,7 +56,7 @@ namespace spot void insert(bdd letter, formula suffix) final; - void finalize(bool deterministic, std::function wrap = formula_identity) final; + void finalize(bool deterministic) final; exp_map& result() final { @@ -88,7 +88,7 @@ namespace spot } } - void expansion_basic::finalize(bool deterministic, std::function wrap) + void expansion_basic::finalize(bool deterministic) { if (!deterministic) return; @@ -108,8 +108,6 @@ namespace spot { // we don't need to determinize the expansion, it's already // deterministic - for (auto& [_, dest] : bdd2formula_) - dest = wrap(dest); return; } @@ -123,7 +121,7 @@ namespace spot dests.push_back(ndet_dest); } formula or_dests = formula::OrRat(dests); - res.insert({l, wrap(or_dests)}); + res.insert({l, or_dests}); dests.clear(); } @@ -145,7 +143,7 @@ namespace spot void insert(bdd letter, formula suffix) final; - void finalize(bool deterministic, std::function wrap = formula_identity) final; + void finalize(bool deterministic) final; exp_map& result() final { @@ -173,7 +171,7 @@ namespace spot terms_.push_back({letter, suffix}); } - void expansion_merge_formulas::finalize(bool deterministic, std::function wrap) + void expansion_merge_formulas::finalize(bool deterministic) { res_.clear(); @@ -234,8 +232,6 @@ namespace spot { // we don't need to determinize the expansion, it's already // deterministic - for (auto& [_, dest] : res_) - dest = wrap(dest); return; } @@ -249,7 +245,7 @@ namespace spot dests.push_back(ndet_dest); } formula or_dests = formula::OrRat(dests); - res.insert({l, wrap(or_dests)}); + res.insert({l, or_dests}); dests.clear(); } @@ -305,7 +301,7 @@ namespace spot void insert(bdd letter, formula suffix) final; - void finalize(bool deterministic, std::function wrap = formula_identity) final; + void finalize(bool deterministic) final; exp_map& result() final { @@ -427,7 +423,7 @@ namespace spot return formula::OrRat(std::move(v)); } - void expansion_bdd::finalize(bool deterministic, std::function wrap) + void expansion_bdd::finalize(bool deterministic) { if (deterministic) { @@ -436,7 +432,7 @@ namespace spot for (bdd letter: minterms_of(exp_, prop_set)) { bdd dest_bdd = bdd_restrict(exp_, letter); - formula dest = wrap(bdd_to_sere(dest_bdd)); + formula dest = bdd_to_sere(dest_bdd); auto it = res_.insert({letter, dest}); assert(it.second); @@ -567,6 +563,10 @@ namespace spot return {{f_bdd, formula::eword()}}; } + auto rec = [&d, owner](formula f){ + return expansion_impl(f, d, owner, exp_opts::None); + }; + switch (f.kind()) { @@ -580,7 +580,7 @@ namespace spot case op::Concat: { - auto exps = expansion(f[0], d, owner, opts); + auto exps = rec(f[0]); ExpansionBuilder res(d); for (const auto& [bdd_l, form] : exps) @@ -590,7 +590,7 @@ namespace spot if (f[0].accepts_eword()) { - auto exps_rest = expansion(f.all_but(0), d, owner, opts); + auto exps_rest = rec(f.all_but(0)); for (const auto& [bdd_l, form] : exps_rest) { res.insert(bdd_l, form); @@ -615,7 +615,7 @@ namespace spot auto E_i_j_minus = formula::FStar(E, min, max); - auto exp = expansion(E, d, owner, opts); + auto exp = rec(E); ExpansionBuilder res(d); for (const auto& [li, ei] : exp) { @@ -623,7 +623,7 @@ namespace spot if (ei.accepts_eword() && f.min() != 0) { - for (const auto& [ki, fi] : expansion(E_i_j_minus, d, owner, opts)) + for (const auto& [ki, fi] : rec(E_i_j_minus)) { // FIXME: build bdd once if ((li & ki) != bddfalse) @@ -645,7 +645,7 @@ namespace spot ? formula::unbounded() : (f.max() - 1); - auto exps = expansion(f[0], d, owner, opts); + auto exps = rec(f[0]); ExpansionBuilder res(d); for (const auto& [bdd_l, form] : exps) @@ -660,12 +660,12 @@ namespace spot case op::AndNLM: { formula rewrite = rewrite_and_nlm(f); - return expansion(rewrite, d, owner, opts); + return rec(rewrite); } case op::first_match: { - auto exps = expansion(f[0], d, owner, opts); + auto exps = rec(f[0]); ExpansionBuilder res(d); for (const auto& [bdd_l, form] : exps) @@ -673,10 +673,11 @@ namespace spot res.insert(bdd_l, form); } - res.finalize(true, [](formula f){ - return formula::first_match(f); - }); - return res.result(); + res.finalize(true); + auto res2 = res.result(); + for (auto& [_, dest] : res2) + dest = formula::first_match(dest); + return res2; } case op::Fusion: @@ -685,9 +686,9 @@ namespace spot formula E = f[0]; formula F = f.all_but(0); - expansion_t Ei = expansion(E, d, owner, opts); + expansion_t Ei = rec(E); // TODO: std::option - expansion_t Fj = expansion(F, d, owner, opts); + expansion_t Fj = rec(F); for (const auto& [li, ei] : Ei) { @@ -709,7 +710,7 @@ namespace spot ExpansionBuilder res(d); for (const auto& sub_f : f) { - auto exps = expansion(sub_f, d, owner, opts); + auto exps = rec(sub_f); if (exps.empty()) { @@ -750,7 +751,7 @@ namespace spot ExpansionBuilder res(d); for (const auto& sub_f : f) { - auto exps = expansion(sub_f, d, owner, opts); + auto exps = rec(sub_f); if (exps.empty()) continue; diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index 4ca15b174..c8046a7a4 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -32,14 +32,10 @@ namespace spot { using expansion_t = std::map; - formula formula_identity(formula f) - { - return f; - } - struct exp_opts { enum expand_opt { + None = 0, Deterministic = 1, Basic = 2, MergeSuffix = 4, From 003230ed19d6a977c3144b9529c34786b6a11da4 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Mon, 13 Feb 2023 14:35:47 +0100 Subject: [PATCH 575/606] expansions: multimap version --- spot/tl/expansions.cc | 335 ++++++++++++++++++++++++++++++++++++++++++ spot/tl/expansions.hh | 9 ++ 2 files changed, 344 insertions(+) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 40fe68415..20efe4eb2 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -548,6 +548,255 @@ namespace spot return formula::OrRat(res); } + std::multimap + expansion_simple(formula f, const bdd_dict_ptr& d, void *owner) + { + using exp_t = std::multimap; + + if (f.is_boolean()) + { + auto f_bdd = formula_to_bdd(f, d, owner); + + if (f_bdd == bddfalse) + return {}; + + return {{f_bdd, formula::eword()}}; + } + + auto rec = [&d, owner](formula f){ + return expansion_simple(f, d, owner); + }; + + + switch (f.kind()) + { + case op::ff: + case op::tt: + case op::ap: + SPOT_UNREACHABLE(); + + case op::eword: + return {{bddfalse, formula::ff()}}; + + case op::Concat: + { + auto exps = rec(f[0]); + + exp_t res; + for (const auto& [bdd_l, form] : exps) + { + res.insert({bdd_l, formula::Concat({form, f.all_but(0)})}); + } + + if (f[0].accepts_eword()) + { + auto exps_rest = rec(f.all_but(0)); + for (const auto& [bdd_l, form] : exps_rest) + { + res.insert({bdd_l, form}); + } + } + + return res; + } + + case op::FStar: + { + formula E = f[0]; + + if (f.min() == 0 && f.max() == 0) + return {{bddtrue, formula::eword()}}; + + auto min = f.min() == 0 ? 0 : (f.min() - 1); + auto max = f.max() == formula::unbounded() + ? formula::unbounded() + : (f.max() - 1); + + auto E_i_j_minus = formula::FStar(E, min, max); + + auto exp = rec(E); + exp_t res; + for (const auto& [li, ei] : exp) + { + res.insert({li, formula::Fusion({ei, E_i_j_minus})}); + + if (ei.accepts_eword() && f.min() != 0) + { + for (const auto& [ki, fi] : rec(E_i_j_minus)) + { + // FIXME: build bdd once + if ((li & ki) != bddfalse) + res.insert({li & ki, fi}); + } + } + } + if (f.min() == 0) + res.insert({bddtrue, formula::eword()}); + + return res; + } + + case op::Star: + { + auto min = f.min() == 0 ? 0 : (f.min() - 1); + auto max = f.max() == formula::unbounded() + ? formula::unbounded() + : (f.max() - 1); + + auto exps = rec(f[0]); + + exp_t res; + for (const auto& [bdd_l, form] : exps) + { + res.insert({bdd_l, formula::Concat({form, formula::Star(f[0], min, max)})}); + } + + return res; + } + + case op::AndNLM: + { + formula rewrite = rewrite_and_nlm(f); + return rec(rewrite); + } + + case op::first_match: + { + auto exps = rec(f[0]); + + exp_t res; + for (const auto& [bdd_l, form] : exps) + { + res.insert({bdd_l, form}); + } + + // determinize + bdd or_labels = bddfalse; + bdd support = bddtrue; + bool is_det = true; + for (const auto& [l, _] : res) + { + support &= bdd_support(l); + if (is_det) + is_det = !bdd_have_common_assignment(l, or_labels); + or_labels |= l; + } + + if (is_det) + return res; + + exp_t res_det; + std::vector dests; + for (bdd l: minterms_of(or_labels, support)) + { + for (const auto& [ndet_label, ndet_dest] : res) + { + if (bdd_implies(l, ndet_label)) + dests.push_back(ndet_dest); + } + formula or_dests = formula::OrRat(dests); + res_det.insert({l, or_dests}); + dests.clear(); + } + + for (auto& [_, dest] : res_det) + dest = formula::first_match(dest); + return res_det; + } + + case op::Fusion: + { + exp_t res; + formula E = f[0]; + formula F = f.all_but(0); + + exp_t Ei = rec(E); + // TODO: std::option + exp_t Fj = rec(F); + + for (const auto& [li, ei] : Ei) + { + if (ei.accepts_eword()) + { + for (const auto& [kj, fj] : Fj) + if ((li & kj) != bddfalse) + res.insert({li & kj, fj}); + } + res.insert({li, formula::Fusion({ei, F})}); + } + + return res; + } + + case op::AndRat: + { + exp_t res; + for (const auto& sub_f : f) + { + auto exps = rec(sub_f); + + if (exps.empty()) + { + // op::AndRat: one of the expansions was empty (the only + // edge was `false`), so the AndRat is empty as + // well + res.clear(); + break; + } + + if (res.empty()) + { + res = std::move(exps); + continue; + } + + exp_t new_res; + for (const auto& [l_key, l_val] : exps) + { + for (const auto& [r_key, r_val] : res) + { + if ((l_key & r_key) != bddfalse) + new_res.insert({l_key & r_key, formula::multop(f.kind(), {l_val, r_val})}); + } + } + + res = std::move(new_res); + } + + return res; + } + + case op::OrRat: + { + exp_t res; + for (const auto& sub_f : f) + { + auto exps = rec(sub_f); + if (exps.empty()) + continue; + + if (res.empty()) + { + res = std::move(exps); + continue; + } + + for (const auto& [label, dest] : exps) + res.insert({label, dest}); + } + + return res; + } + + default: + std::cerr << "unimplemented kind " + << static_cast(f.kind()) + << std::endl; + SPOT_UNIMPLEMENTED(); + } + + return {}; + } template expansion_t @@ -875,4 +1124,90 @@ namespace spot aut->merge_edges(); return aut; } + + twa_graph_ptr + expand_simple_automaton(formula f, bdd_dict_ptr d) + { + auto finite = expand_simple_finite_automaton(f, d); + return from_finite(finite); + } + + twa_graph_ptr + expand_simple_finite_automaton(formula f, bdd_dict_ptr d) + { + auto aut = make_twa_graph(d); + + aut->prop_state_acc(true); + const auto acc_mark = aut->set_buchi(); + + auto formula2state = robin_hood::unordered_map(); + + unsigned init_state = aut->new_state(); + aut->set_init_state(init_state); + formula2state.insert({ f, init_state }); + + auto f_aps = formula_aps(f); + for (auto& ap : f_aps) + aut->register_ap(ap); + + auto todo = std::vector>(); + todo.push_back({f, init_state}); + + auto state_names = new std::vector(); + std::ostringstream ss; + ss << f; + state_names->push_back(ss.str()); + + auto find_dst = [&](formula suffix) -> unsigned + { + unsigned dst; + auto it = formula2state.find(suffix); + if (it != formula2state.end()) + { + dst = it->second; + } + else + { + dst = aut->new_state(); + todo.push_back({suffix, dst}); + formula2state.insert({suffix, dst}); + std::ostringstream ss; + ss << suffix; + state_names->push_back(ss.str()); + } + + return dst; + }; + + while (!todo.empty()) + { + auto [curr_f, curr_state] = todo[todo.size() - 1]; + todo.pop_back(); + + auto curr_acc_mark= curr_f.accepts_eword() + ? acc_mark + : acc_cond::mark_t(); + + auto exp = expansion_simple(curr_f, d, aut.get()); + + for (const auto& [letter, suffix] : exp) + { + if (suffix.is(op::ff)) + continue; + + auto dst = find_dst(suffix); + aut->new_edge(curr_state, dst, letter, curr_acc_mark); + } + + // if state has no transitions and should be accepting, create + // artificial transition + if (aut->get_graph().state_storage(curr_state).succ == 0 + && curr_f.accepts_eword()) + aut->new_edge(curr_state, curr_state, bddfalse, acc_mark); + } + + aut->set_named_prop("state-names", state_names); + aut->merge_edges(); + return aut; + } } diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index c8046a7a4..ff6977f9b 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -43,6 +43,15 @@ namespace spot }; }; + SPOT_API twa_graph_ptr + expand_simple_automaton(formula f, bdd_dict_ptr d); + + SPOT_API twa_graph_ptr + expand_simple_finite_automaton(formula f, bdd_dict_ptr d); + + SPOT_API std::multimap + expansion_simple(formula f, const bdd_dict_ptr& d, void *owner); + SPOT_API expansion_t expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts); From 518c58fe5245b801439c0a7a2bdc88ec3b8e7afb Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Mon, 6 Mar 2023 18:37:12 +0100 Subject: [PATCH 576/606] expansions: latest implementation --- spot/tl/expansions.cc | 560 +++++++++++++++++++++++++++++++++++++++++- spot/tl/expansions.hh | 20 ++ 2 files changed, 576 insertions(+), 4 deletions(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 20efe4eb2..1d225b603 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -548,6 +548,471 @@ namespace spot return formula::OrRat(res); } + class bdd_finalizer + { + public: + bdd_finalizer(std::multimap& exp, bdd_dict_ptr d) + : anon_set_(bddtrue) + , d_(d) + { + for (const auto& [prefix, suffix] : exp) + { + int anon_var_num; + auto it = formula2bdd_.find(suffix); + if (it != formula2bdd_.end()) + { + anon_var_num = it->second; + } + else + { + anon_var_num = d_->register_anonymous_variables(1, this); + formula2bdd_.insert({suffix, anon_var_num}); + bdd2formula_.insert({anon_var_num, suffix}); + } + + bdd var = bdd_ithvar(anon_var_num); + anon_set_ &= var; + exp_ |= prefix & var; + } + } + + ~bdd_finalizer() + { + d_->unregister_all_my_variables(this); + } + + std::multimap + simplify(exp_opts_new::expand_opt_new opts); + + private: + bdd exp_; + bdd anon_set_; + std::map formula2bdd_; + std::map bdd2formula_; + bdd_dict_ptr d_; + + formula var_to_formula(int var); + formula conj_bdd_to_sere(bdd b); + formula bdd_to_sere(bdd b); + }; + + formula + bdd_finalizer::var_to_formula(int var) + { + formula f = bdd2formula_[var]; + assert(f); + return f; + } + + formula + bdd_finalizer::bdd_to_sere(bdd f) + { + if (f == bddfalse) + return formula::ff(); + + std::vector v; + minato_isop isop(f); + bdd cube; + while ((cube = isop.next()) != bddfalse) + v.emplace_back(conj_bdd_to_sere(cube)); + return formula::OrRat(std::move(v)); + } + + formula + bdd_finalizer::conj_bdd_to_sere(bdd b) + { + if (b == bddtrue) + return formula::tt(); + if (b == bddfalse) + return formula::ff(); + + // Unroll the first loop of the next do/while loop so that we + // do not have to create v when b is not a conjunction. + formula res = var_to_formula(bdd_var(b)); + bdd high = bdd_high(b); + if (high == bddfalse) + { + res = formula::Not(res); + b = bdd_low(b); + } + else + { + assert(bdd_low(b) == bddfalse); + b = high; + } + if (b == bddtrue) + return res; + std::vector v{std::move(res)}; + do + { + res = var_to_formula(bdd_var(b)); + high = bdd_high(b); + if (high == bddfalse) + { + res = formula::Not(res); + b = bdd_low(b); + } + else + { + assert(bdd_low(b) == bddfalse); + b = high; + } + assert(b != bddfalse); + v.emplace_back(std::move(res)); + } + while (b != bddtrue); + return formula::multop(op::AndRat, std::move(v)); + } + + std::multimap + bdd_finalizer::simplify(exp_opts_new::expand_opt_new opts) + { + std::multimap res; + + if (opts & exp_opts_new::expand_opt_new::BddMinterm) + { + bdd prop_set = bdd_exist(bdd_support(exp_), anon_set_); + bdd or_labels = bdd_exist(exp_, anon_set_); + for (bdd letter: minterms_of(exp_, prop_set)) + { + bdd dest_bdd = bdd_restrict(exp_, letter); + formula dest = bdd_to_sere(dest_bdd); + + auto it = res.insert({letter, dest}); + assert(it.second); + (void) it; + } + } + else // BddIsop + { + minato_isop isop(exp_); + bdd cube; + while ((cube = isop.next()) != bddfalse) + { + bdd letter = bdd_exist(cube, anon_set_); + bdd suffix = bdd_existcomp(cube, anon_set_); + formula dest = conj_bdd_to_sere(suffix); + + res.insert({letter, dest}); + } + } + + return res; + } + + void + finalize_new(std::multimap& exp, exp_opts_new::expand_opt_new opts, bdd_dict_ptr d) + { + if (opts & (exp_opts_new::expand_opt_new::BddIsop + | exp_opts_new::expand_opt_new::BddMinterm)) + { + bdd_finalizer bddf(exp, d); + exp = bddf.simplify(opts); + } + + if (opts & exp_opts_new::expand_opt_new::UniqueSuffix) + { + std::map unique_map; + for (const auto& [prefix, suffix] : exp) + { + auto res = unique_map.insert({suffix, prefix}); + if (!res.second) + { + auto it = res.first; + it->second |= prefix; + } + } + + exp.clear(); + for (const auto [suffix, prefix] : unique_map) + { + exp.insert({prefix, suffix}); + } + } + + if (opts & exp_opts_new::expand_opt_new::UniquePrefix) + { + std::map unique_map; + for (const auto& [prefix, suffix] : exp) + { + auto res = unique_map.insert({prefix, suffix}); + if (!res.second) + { + auto it = res.first; + it->second = formula::OrRat({it->second, suffix}); + } + } + + exp.clear(); + for (const auto [prefix, suffix] : unique_map) + { + exp.insert({prefix, suffix}); + } + } + } + + std::multimap + expansion_new(formula f, const bdd_dict_ptr& d, void *owner, exp_opts_new::expand_opt_new opts) + { + using exp_t = std::multimap; + + if (f.is_boolean()) + { + auto f_bdd = formula_to_bdd(f, d, owner); + + if (f_bdd == bddfalse) + return {}; + + return {{f_bdd, formula::eword()}}; + } + + auto rec = [&d, owner, opts](formula f){ + return expansion_new(f, d, owner, exp_opts_new::None); + }; + + + switch (f.kind()) + { + case op::ff: + case op::tt: + case op::ap: + SPOT_UNREACHABLE(); + + case op::eword: + return {{bddfalse, formula::ff()}}; + + case op::Concat: + { + auto exps = rec(f[0]); + + exp_t res; + for (const auto& [bdd_l, form] : exps) + { + res.insert({bdd_l, formula::Concat({form, f.all_but(0)})}); + } + + if (f[0].accepts_eword()) + { + auto exps_rest = rec(f.all_but(0)); + for (const auto& [bdd_l, form] : exps_rest) + { + res.insert({bdd_l, form}); + } + } + + finalize_new(res, opts, d); + return res; + } + + case op::FStar: + { + formula E = f[0]; + + if (f.min() == 0 && f.max() == 0) + return {{bddtrue, formula::eword()}}; + + auto min = f.min() == 0 ? 0 : (f.min() - 1); + auto max = f.max() == formula::unbounded() + ? formula::unbounded() + : (f.max() - 1); + + auto E_i_j_minus = formula::FStar(E, min, max); + + auto exp = rec(E); + exp_t res; + for (const auto& [li, ei] : exp) + { + res.insert({li, formula::Fusion({ei, E_i_j_minus})}); + + if (ei.accepts_eword() && f.min() != 0) + { + for (const auto& [ki, fi] : rec(E_i_j_minus)) + { + // FIXME: build bdd once + if ((li & ki) != bddfalse) + res.insert({li & ki, fi}); + } + } + } + if (f.min() == 0) + res.insert({bddtrue, formula::eword()}); + + finalize_new(res, opts, d); + return res; + } + + case op::Star: + { + auto min = f.min() == 0 ? 0 : (f.min() - 1); + auto max = f.max() == formula::unbounded() + ? formula::unbounded() + : (f.max() - 1); + + auto exps = rec(f[0]); + + exp_t res; + for (const auto& [bdd_l, form] : exps) + { + res.insert({bdd_l, formula::Concat({form, formula::Star(f[0], min, max)})}); + } + + finalize_new(res, opts, d); + return res; + } + + case op::AndNLM: + { + formula rewrite = rewrite_and_nlm(f); + auto res = rec(rewrite); + finalize_new(res, opts, d); + return res; + } + + case op::first_match: + { + auto exps = rec(f[0]); + + exp_t res; + for (const auto& [bdd_l, form] : exps) + { + res.insert({bdd_l, form}); + } + + // determinize + bdd or_labels = bddfalse; + bdd support = bddtrue; + bool is_det = true; + for (const auto& [l, _] : res) + { + support &= bdd_support(l); + if (is_det) + is_det = !bdd_have_common_assignment(l, or_labels); + or_labels |= l; + } + + if (is_det) + { + finalize_new(res, opts, d); + return res; + } + + exp_t res_det; + std::vector dests; + for (bdd l: minterms_of(or_labels, support)) + { + for (const auto& [ndet_label, ndet_dest] : res) + { + if (bdd_implies(l, ndet_label)) + dests.push_back(ndet_dest); + } + formula or_dests = formula::OrRat(dests); + res_det.insert({l, or_dests}); + dests.clear(); + } + + for (auto& [_, dest] : res_det) + dest = formula::first_match(dest); + finalize_new(res_det, opts, d); + return res_det; + } + + case op::Fusion: + { + exp_t res; + formula E = f[0]; + formula F = f.all_but(0); + + exp_t Ei = rec(E); + // TODO: std::option + exp_t Fj = rec(F); + + for (const auto& [li, ei] : Ei) + { + if (ei.accepts_eword()) + { + for (const auto& [kj, fj] : Fj) + if ((li & kj) != bddfalse) + res.insert({li & kj, fj}); + } + res.insert({li, formula::Fusion({ei, F})}); + } + + finalize_new(res, opts, d); + return res; + } + + case op::AndRat: + { + exp_t res; + for (const auto& sub_f : f) + { + auto exps = rec(sub_f); + + if (exps.empty()) + { + // op::AndRat: one of the expansions was empty (the only + // edge was `false`), so the AndRat is empty as + // well + res.clear(); + break; + } + + if (res.empty()) + { + res = std::move(exps); + continue; + } + + exp_t new_res; + for (const auto& [l_key, l_val] : exps) + { + for (const auto& [r_key, r_val] : res) + { + if ((l_key & r_key) != bddfalse) + new_res.insert({l_key & r_key, formula::multop(f.kind(), {l_val, r_val})}); + } + } + + res = std::move(new_res); + } + + finalize_new(res, opts, d); + return res; + } + + case op::OrRat: + { + exp_t res; + for (const auto& sub_f : f) + { + auto exps = rec(sub_f); + if (exps.empty()) + continue; + + if (res.empty()) + { + res = std::move(exps); + continue; + } + + for (const auto& [label, dest] : exps) + res.insert({label, dest}); + } + + finalize_new(res, opts, d); + return res; + } + + default: + std::cerr << "unimplemented kind " + << static_cast(f.kind()) + << std::endl; + SPOT_UNIMPLEMENTED(); + } + + return {}; + } + std::multimap expansion_simple(formula f, const bdd_dict_ptr& d, void *owner) { @@ -1031,12 +1496,13 @@ namespace spot expansion_t expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts) { - if (opts & exp_opts::Basic) - return expansion_impl(f, d, owner, opts); + + if (opts & exp_opts::Bdd) + return expansion_impl(f, d, owner, opts); else if (opts & exp_opts::MergeSuffix) return expansion_impl(f, d, owner, opts); - else // exp_opts::Bdd - return expansion_impl(f, d, owner, opts); + else // exp_opts::Basic + return expansion_impl(f, d, owner, opts); } twa_graph_ptr @@ -1210,4 +1676,90 @@ namespace spot aut->merge_edges(); return aut; } + + twa_graph_ptr + expand_new_automaton(formula f, bdd_dict_ptr d, exp_opts_new::expand_opt_new opts) + { + auto finite = expand_new_finite_automaton(f, d, opts); + return from_finite(finite); + } + + twa_graph_ptr + expand_new_finite_automaton(formula f, bdd_dict_ptr d, exp_opts_new::expand_opt_new opts) + { + auto aut = make_twa_graph(d); + + aut->prop_state_acc(true); + const auto acc_mark = aut->set_buchi(); + + auto formula2state = robin_hood::unordered_map(); + + unsigned init_state = aut->new_state(); + aut->set_init_state(init_state); + formula2state.insert({ f, init_state }); + + auto f_aps = formula_aps(f); + for (auto& ap : f_aps) + aut->register_ap(ap); + + auto todo = std::vector>(); + todo.push_back({f, init_state}); + + auto state_names = new std::vector(); + std::ostringstream ss; + ss << f; + state_names->push_back(ss.str()); + + auto find_dst = [&](formula suffix) -> unsigned + { + unsigned dst; + auto it = formula2state.find(suffix); + if (it != formula2state.end()) + { + dst = it->second; + } + else + { + dst = aut->new_state(); + todo.push_back({suffix, dst}); + formula2state.insert({suffix, dst}); + std::ostringstream ss; + ss << suffix; + state_names->push_back(ss.str()); + } + + return dst; + }; + + while (!todo.empty()) + { + auto [curr_f, curr_state] = todo[todo.size() - 1]; + todo.pop_back(); + + auto curr_acc_mark= curr_f.accepts_eword() + ? acc_mark + : acc_cond::mark_t(); + + auto exp = expansion_new(curr_f, d, aut.get(), opts); + + for (const auto& [letter, suffix] : exp) + { + if (suffix.is(op::ff)) + continue; + + auto dst = find_dst(suffix); + aut->new_edge(curr_state, dst, letter, curr_acc_mark); + } + + // if state has no transitions and should be accepting, create + // artificial transition + if (aut->get_graph().state_storage(curr_state).succ == 0 + && curr_f.accepts_eword()) + aut->new_edge(curr_state, curr_state, bddfalse, acc_mark); + } + + aut->set_named_prop("state-names", state_names); + aut->merge_edges(); + return aut; + } } diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index ff6977f9b..4286e8fd6 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -43,6 +43,26 @@ namespace spot }; }; + struct exp_opts_new + { + enum expand_opt_new { + None = 0, + UniqueSuffix = 1, + UniquePrefix = 2, + BddIsop = 4, + BddMinterm = 8, + }; + }; + + SPOT_API std::multimap + expansion_new(formula f, const bdd_dict_ptr& d, void *owner, exp_opts_new::expand_opt_new opts); + + SPOT_API twa_graph_ptr + expand_new_automaton(formula f, bdd_dict_ptr d, exp_opts_new::expand_opt_new opts); + + SPOT_API twa_graph_ptr + expand_new_finite_automaton(formula f, bdd_dict_ptr d, exp_opts_new::expand_opt_new opts); + SPOT_API twa_graph_ptr expand_simple_automaton(formula f, bdd_dict_ptr d); From b5f11f7366bf92816cbac5cf6c7b39dbfce17786 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Mon, 6 Mar 2023 18:37:28 +0100 Subject: [PATCH 577/606] expansions: allow toggling merge_edges off --- spot/tl/expansions.cc | 6 +++++- spot/tl/expansions.hh | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 1d225b603..f68fb2d9d 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -1759,7 +1759,11 @@ namespace spot } aut->set_named_prop("state-names", state_names); - aut->merge_edges(); + + if ((opts & exp_opts_new::MergeEdges) + && !(opts & exp_opts_new::UniqueSuffix)) + aut->merge_edges(); + return aut; } } diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index 4286e8fd6..52e83917f 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -51,6 +51,7 @@ namespace spot UniquePrefix = 2, BddIsop = 4, BddMinterm = 8, + MergeEdges = 16, }; }; From 382c57923c8dfcd4ca00e15d5e830f4d1a3a36e4 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Mon, 6 Mar 2023 18:37:44 +0100 Subject: [PATCH 578/606] twaalgos: ltl2tgba_fm: allow disabling SCC trim --- spot/twaalgos/ltl2tgba_fm.cc | 16 ++++++++++++---- spot/twaalgos/ltl2tgba_fm.hh | 2 +- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/spot/twaalgos/ltl2tgba_fm.cc b/spot/twaalgos/ltl2tgba_fm.cc index 838db28be..dd7bb9182 100644 --- a/spot/twaalgos/ltl2tgba_fm.cc +++ b/spot/twaalgos/ltl2tgba_fm.cc @@ -102,7 +102,7 @@ namespace spot { typedef twa_graph::namer namer; public: - ratexp_to_dfa(translate_dict& dict); + ratexp_to_dfa(translate_dict& dict, bool disable_scc_trimming = false); std::vector> succ(formula f); ~ratexp_to_dfa(); @@ -117,6 +117,7 @@ namespace spot typedef robin_hood::unordered_node_map f2a_t; std::vector automata_; f2a_t f2a_; + bool disable_scc_trimming_; }; // Helper dictionary. We represent formulae using BDDs to @@ -902,8 +903,9 @@ namespace spot } - ratexp_to_dfa::ratexp_to_dfa(translate_dict& dict) + ratexp_to_dfa::ratexp_to_dfa(translate_dict& dict, bool disable_scc_trimming) : dict_(dict) + , disable_scc_trimming_(disable_scc_trimming) { } @@ -956,6 +958,12 @@ namespace spot } } + if (disable_scc_trimming_) + { + automata_.emplace_back(a, namer); + return labelled_aut(a, namer); + } + // The following code trims the automaton in a crude way by // eliminating SCCs that are not coaccessible. It does not // actually remove the states, it simply marks the corresponding @@ -2181,7 +2189,7 @@ namespace spot } twa_graph_ptr - sere_to_tgba(formula f, const bdd_dict_ptr& dict) + sere_to_tgba(formula f, const bdd_dict_ptr& dict, bool disable_scc_trimming) { f = negative_normal_form(f); @@ -2189,7 +2197,7 @@ namespace spot twa_graph_ptr a = make_twa_graph(dict); translate_dict d(a, s, false, false, false); - ratexp_to_dfa sere2dfa(d); + ratexp_to_dfa sere2dfa(d, disable_scc_trimming); auto [dfa, namer, state] = sere2dfa.succ(f); diff --git a/spot/twaalgos/ltl2tgba_fm.hh b/spot/twaalgos/ltl2tgba_fm.hh index 7dba4aee0..51de038e1 100644 --- a/spot/twaalgos/ltl2tgba_fm.hh +++ b/spot/twaalgos/ltl2tgba_fm.hh @@ -91,5 +91,5 @@ namespace spot bool label_with_ltl = false); SPOT_API twa_graph_ptr - sere_to_tgba(formula f, const bdd_dict_ptr& dict); + sere_to_tgba(formula f, const bdd_dict_ptr& dict, bool disable_scc_trimming = false); } From 77d25d87a1b1404d5a60eba3d89c1bef73c9968e Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 9 Mar 2023 10:26:50 +0100 Subject: [PATCH 579/606] expansions: fix first_match case --- spot/tl/expansions.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index f68fb2d9d..e92a74621 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -892,6 +892,8 @@ namespace spot if (is_det) { + for (auto& [_, dest] : res) + dest = formula::first_match(dest); finalize_new(res, opts, d); return res; } From a4091ffc371dceed08f73f218cb2c4e250539309 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 16 Mar 2023 07:39:34 +0100 Subject: [PATCH 580/606] expansions: remove multiple old implementations --- spot/tl/expansions.cc | 1419 +++++------------------------------------ spot/tl/expansions.hh | 37 +- 2 files changed, 167 insertions(+), 1289 deletions(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index e92a74621..4a6e6ca0c 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -29,436 +29,6 @@ namespace spot { namespace { - class expansion_builder - { - public: - using exp_map = std::map; - - virtual void insert(bdd letter, formula suffix) = 0; - virtual void finalize(bool deterministic) = 0; - virtual exp_map& result() = 0; - virtual bool empty() = 0; - virtual void clear() = 0; - }; - - class expansion_basic final : expansion_builder - { - public: - using exp_map = expansion_builder::exp_map; - - expansion_basic(bdd_dict_ptr d) - {} - - expansion_basic(exp_map&& m, bdd_dict_ptr d) - : bdd2formula_(m) - , formula2bdd_() - {} - - void insert(bdd letter, formula suffix) final; - - void finalize(bool deterministic) final; - - exp_map& result() final - { - return bdd2formula_; - } - - bool empty() final - { - return bdd2formula_.empty(); - } - - void clear() final - { - bdd2formula_.clear(); - } - - private: - exp_map bdd2formula_; - std::map formula2bdd_; - }; - - void expansion_basic::insert(bdd letter, formula suffix) - { - auto res = bdd2formula_.insert({letter, suffix}); - if (!res.second) - { - auto it = res.first; - it->second = formula::OrRat({it->second, suffix}); - } - } - - void expansion_basic::finalize(bool deterministic) - { - if (!deterministic) - return; - - bdd or_labels = bddfalse; - bdd support = bddtrue; - bool is_det = true; - for (const auto& [l, _] : bdd2formula_) - { - support &= bdd_support(l); - if (is_det) - is_det = !bdd_have_common_assignment(l, or_labels); - or_labels |= l; - } - - if (is_det) - { - // we don't need to determinize the expansion, it's already - // deterministic - return; - } - - exp_map res; - std::vector dests; - for (bdd l: minterms_of(or_labels, support)) - { - for (const auto& [ndet_label, ndet_dest] : bdd2formula_) - { - if (bdd_implies(l, ndet_label)) - dests.push_back(ndet_dest); - } - formula or_dests = formula::OrRat(dests); - res.insert({l, or_dests}); - dests.clear(); - } - - bdd2formula_ = std::move(res); - } - - class expansion_merge_formulas final : expansion_builder - { - public: - using exp_map = expansion_builder::exp_map; - - expansion_merge_formulas(bdd_dict_ptr d) - {} - - expansion_merge_formulas(exp_map&& m, bdd_dict_ptr d) - : res_() - , terms_(m.begin(), m.end()) - {} - - void insert(bdd letter, formula suffix) final; - - void finalize(bool deterministic) final; - - exp_map& result() final - { - return res_; - } - - bool empty() final - { - return terms_.empty(); - } - - void clear() final - { - terms_.clear(); - res_.clear(); - } - - private: - std::vector> terms_; - exp_map res_; - }; - - void expansion_merge_formulas::insert(bdd letter, formula suffix) - { - terms_.push_back({letter, suffix}); - } - - void expansion_merge_formulas::finalize(bool deterministic) - { - res_.clear(); - - // Given such terms: - // - // - a . ϕ1 - // - a . ϕ2 - // - b . ϕ1 - // - // Merge them by suffix: - // - // - (a ∨ b) . ϕ1 - // - a . ϕ2 - std::map suffix2letter; - for (const auto& [letter, suffix]: terms_) - { - auto res = suffix2letter.insert({suffix, letter}); - if (!res.second) - { - auto it = res.first; - it->second |= letter; - } - } - - // Given such terms: - // - // - a . ϕ1 - // - a . ϕ2 - // - // Merge them by letter: - // - // - a . (ϕ1 ∨ ϕ2) - for (const auto& [suffix, letter]: suffix2letter) - { - auto res = res_.insert({letter, suffix}); - if (!res.second) - { - auto it = res.first; - it->second = formula::OrRat({it->second, suffix}); - } - } - - if (!deterministic) - return; - - bdd or_labels = bddfalse; - bdd support = bddtrue; - bool is_det = true; - for (const auto& [l, _] : res_) - { - support &= bdd_support(l); - if (is_det) - is_det = !bdd_have_common_assignment(l, or_labels); - or_labels |= l; - } - - if (is_det) - { - // we don't need to determinize the expansion, it's already - // deterministic - return; - } - - exp_map res; - std::vector dests; - for (bdd l: minterms_of(or_labels, support)) - { - for (const auto& [ndet_label, ndet_dest] : res_) - { - if (bdd_implies(l, ndet_label)) - dests.push_back(ndet_dest); - } - formula or_dests = formula::OrRat(dests); - res.insert({l, or_dests}); - dests.clear(); - } - - res_ = std::move(res); - } - - class expansion_bdd final : expansion_builder - { - public: - using exp_map = expansion_builder::exp_map; - - expansion_bdd(bdd_dict_ptr d) - : anon_set_(bddtrue) - , d_(d) - {} - - expansion_bdd(exp_map&& m, bdd_dict_ptr d) - : anon_set_(bddtrue) - , d_(d) - { - for (const auto& [letter, suffix] : m) - { - insert(letter, suffix); - } - } - - expansion_bdd(const expansion_bdd&) = delete; - - expansion_bdd& - operator=(const expansion_bdd& other) = delete; - - expansion_bdd& - operator=(const expansion_bdd&& other) - { - d_->unregister_all_my_variables(this); - - anon_set_ = std::move(other.anon_set_); - exp_ = std::move(other.exp_); - res_ = std::move(other.res_); - formula2bdd_ = std::move(other.formula2bdd_); - bdd2formula_ = std::move(other.bdd2formula_); - - d_ = other.d_; - d_->register_all_variables_of(&other, this); - - return *this; - } - - ~expansion_bdd() - { - d_->unregister_all_my_variables(this); - } - - void insert(bdd letter, formula suffix) final; - - void finalize(bool deterministic) final; - - exp_map& result() final - { - return res_; - } - - bool empty() final - { - return formula2bdd_.empty(); - } - - void clear() final - { - formula2bdd_.clear(); - bdd2formula_.clear(); - exp_ = bddfalse; - anon_set_ = bddtrue; - res_.clear(); - } - - private: - bdd exp_; - bdd anon_set_; - std::map formula2bdd_; - std::map bdd2formula_; - exp_map res_; - bdd_dict_ptr d_; - - formula var_to_formula(int var); - formula conj_bdd_to_sere(bdd b); - formula bdd_to_sere(bdd b); - }; - - formula - expansion_bdd::var_to_formula(int var) - { - formula f = bdd2formula_[var]; - assert(f); - return f; - } - - formula - expansion_bdd::conj_bdd_to_sere(bdd b) - { - if (b == bddtrue) - return formula::tt(); - if (b == bddfalse) - return formula::ff(); - - // Unroll the first loop of the next do/while loop so that we - // do not have to create v when b is not a conjunction. - formula res = var_to_formula(bdd_var(b)); - bdd high = bdd_high(b); - if (high == bddfalse) - { - res = formula::Not(res); - b = bdd_low(b); - } - else - { - assert(bdd_low(b) == bddfalse); - b = high; - } - if (b == bddtrue) - return res; - std::vector v{std::move(res)}; - do - { - res = var_to_formula(bdd_var(b)); - high = bdd_high(b); - if (high == bddfalse) - { - res = formula::Not(res); - b = bdd_low(b); - } - else - { - assert(bdd_low(b) == bddfalse); - b = high; - } - assert(b != bddfalse); - v.emplace_back(std::move(res)); - } - while (b != bddtrue); - return formula::multop(op::AndRat, std::move(v)); - } - - void expansion_bdd::insert(bdd letter, formula suffix) - { - - int anon_var_num; - auto it = formula2bdd_.find(suffix); - if (it != formula2bdd_.end()) - { - anon_var_num = it->second; - } - else - { - anon_var_num = d_->register_anonymous_variables(1, this); - formula2bdd_.insert({suffix, anon_var_num}); - bdd2formula_.insert({anon_var_num, suffix}); - } - - bdd var = bdd_ithvar(anon_var_num); - anon_set_ &= var; - exp_ |= letter & var; - } - formula - expansion_bdd::bdd_to_sere(bdd f) - { - if (f == bddfalse) - return formula::ff(); - - std::vector v; - minato_isop isop(f); - bdd cube; - while ((cube = isop.next()) != bddfalse) - v.emplace_back(conj_bdd_to_sere(cube)); - return formula::OrRat(std::move(v)); - } - - void expansion_bdd::finalize(bool deterministic) - { - if (deterministic) - { - bdd prop_set = bdd_exist(bdd_support(exp_), anon_set_); - bdd or_labels = bdd_exist(exp_, anon_set_); - for (bdd letter: minterms_of(exp_, prop_set)) - { - bdd dest_bdd = bdd_restrict(exp_, letter); - formula dest = bdd_to_sere(dest_bdd); - - auto it = res_.insert({letter, dest}); - assert(it.second); - (void) it; - } - } - else - { - minato_isop isop(exp_); - bdd cube; - while ((cube = isop.next()) != bddfalse) - { - bdd letter = bdd_exist(cube, anon_set_); - bdd suffix = bdd_existcomp(cube, anon_set_); - formula dest = conj_bdd_to_sere(suffix); - - auto it = res_.insert({letter, dest}); - if (!it.second) - { - auto it2 = it.first; - it2->second = formula::OrRat({it2->second, dest}); - } - } - } - } - // FIXME: could probably just return a map directly static std::vector formula_aps(formula f) @@ -479,8 +49,8 @@ namespace spot return std::vector(res.begin(), res.end()); } - formula - rewrite_and_nlm(formula f) + formula + rewrite_and_nlm(formula f) { unsigned s = f.size(); std::vector final; @@ -532,30 +102,15 @@ namespace spot } return formula::OrRat(std::move(disj)); } - } - formula - expansion_to_formula(expansion_t e, bdd_dict_ptr& d) - { - std::vector res; - - for (const auto& [key, val] : e) - { - formula prefix = bdd_to_formula(key, d); - res.push_back(formula::Concat({prefix, val})); - } - - return formula::OrRat(res); - } - - class bdd_finalizer - { - public: - bdd_finalizer(std::multimap& exp, bdd_dict_ptr d) - : anon_set_(bddtrue) - , d_(d) - { - for (const auto& [prefix, suffix] : exp) + class bdd_finalizer + { + public: + bdd_finalizer(std::multimap& exp, bdd_dict_ptr d) + : anon_set_(bddtrue) + , d_(d) + { + for (const auto& [prefix, suffix] : exp) { int anon_var_num; auto it = formula2bdd_.find(suffix); @@ -574,185 +129,201 @@ namespace spot anon_set_ &= var; exp_ |= prefix & var; } - } + } - ~bdd_finalizer() - { - d_->unregister_all_my_variables(this); - } + ~bdd_finalizer() + { + d_->unregister_all_my_variables(this); + } - std::multimap - simplify(exp_opts_new::expand_opt_new opts); + expansion_t + simplify(exp_opts::expand_opt opts); - private: - bdd exp_; - bdd anon_set_; - std::map formula2bdd_; - std::map bdd2formula_; - bdd_dict_ptr d_; + private: + bdd exp_; + bdd anon_set_; + std::map formula2bdd_; + std::map bdd2formula_; + bdd_dict_ptr d_; - formula var_to_formula(int var); - formula conj_bdd_to_sere(bdd b); - formula bdd_to_sere(bdd b); - }; + formula var_to_formula(int var); + formula conj_bdd_to_sere(bdd b); + formula bdd_to_sere(bdd b); + }; - formula - bdd_finalizer::var_to_formula(int var) - { - formula f = bdd2formula_[var]; - assert(f); - return f; - } + formula + bdd_finalizer::var_to_formula(int var) + { + formula f = bdd2formula_[var]; + assert(f); + return f; + } - formula - bdd_finalizer::bdd_to_sere(bdd f) - { - if (f == bddfalse) - return formula::ff(); + formula + bdd_finalizer::bdd_to_sere(bdd f) + { + if (f == bddfalse) + return formula::ff(); - std::vector v; - minato_isop isop(f); - bdd cube; - while ((cube = isop.next()) != bddfalse) - v.emplace_back(conj_bdd_to_sere(cube)); - return formula::OrRat(std::move(v)); - } + std::vector v; + minato_isop isop(f); + bdd cube; + while ((cube = isop.next()) != bddfalse) + v.emplace_back(conj_bdd_to_sere(cube)); + return formula::OrRat(std::move(v)); + } - formula - bdd_finalizer::conj_bdd_to_sere(bdd b) - { - if (b == bddtrue) - return formula::tt(); - if (b == bddfalse) - return formula::ff(); + formula + bdd_finalizer::conj_bdd_to_sere(bdd b) + { + if (b == bddtrue) + return formula::tt(); + if (b == bddfalse) + return formula::ff(); - // Unroll the first loop of the next do/while loop so that we - // do not have to create v when b is not a conjunction. - formula res = var_to_formula(bdd_var(b)); - bdd high = bdd_high(b); - if (high == bddfalse) + // Unroll the first loop of the next do/while loop so that we + // do not have to create v when b is not a conjunction. + formula res = var_to_formula(bdd_var(b)); + bdd high = bdd_high(b); + if (high == bddfalse) { res = formula::Not(res); b = bdd_low(b); } - else + else { assert(bdd_low(b) == bddfalse); b = high; } - if (b == bddtrue) - return res; - std::vector v{std::move(res)}; - do + if (b == bddtrue) + return res; + std::vector v{std::move(res)}; + do { res = var_to_formula(bdd_var(b)); high = bdd_high(b); if (high == bddfalse) - { - res = formula::Not(res); - b = bdd_low(b); - } + { + res = formula::Not(res); + b = bdd_low(b); + } else - { - assert(bdd_low(b) == bddfalse); - b = high; - } + { + assert(bdd_low(b) == bddfalse); + b = high; + } assert(b != bddfalse); v.emplace_back(std::move(res)); } - while (b != bddtrue); - return formula::multop(op::AndRat, std::move(v)); - } - - std::multimap - bdd_finalizer::simplify(exp_opts_new::expand_opt_new opts) - { - std::multimap res; - - if (opts & exp_opts_new::expand_opt_new::BddMinterm) - { - bdd prop_set = bdd_exist(bdd_support(exp_), anon_set_); - bdd or_labels = bdd_exist(exp_, anon_set_); - for (bdd letter: minterms_of(exp_, prop_set)) - { - bdd dest_bdd = bdd_restrict(exp_, letter); - formula dest = bdd_to_sere(dest_bdd); - - auto it = res.insert({letter, dest}); - assert(it.second); - (void) it; - } - } - else // BddIsop - { - minato_isop isop(exp_); - bdd cube; - while ((cube = isop.next()) != bddfalse) - { - bdd letter = bdd_exist(cube, anon_set_); - bdd suffix = bdd_existcomp(cube, anon_set_); - formula dest = conj_bdd_to_sere(suffix); - - res.insert({letter, dest}); - } + while (b != bddtrue); + return formula::multop(op::AndRat, std::move(v)); } - return res; - } + expansion_t + bdd_finalizer::simplify(exp_opts::expand_opt opts) + { + expansion_t res; - void - finalize_new(std::multimap& exp, exp_opts_new::expand_opt_new opts, bdd_dict_ptr d) - { - if (opts & (exp_opts_new::expand_opt_new::BddIsop - | exp_opts_new::expand_opt_new::BddMinterm)) + if (opts & exp_opts::expand_opt::BddMinterm) + { + bdd prop_set = bdd_exist(bdd_support(exp_), anon_set_); + bdd or_labels = bdd_exist(exp_, anon_set_); + for (bdd letter: minterms_of(exp_, prop_set)) + { + bdd dest_bdd = bdd_restrict(exp_, letter); + formula dest = bdd_to_sere(dest_bdd); + + auto it = res.insert({letter, dest}); + assert(it.second); + (void) it; + } + } + else // BddIsop + { + minato_isop isop(exp_); + bdd cube; + while ((cube = isop.next()) != bddfalse) + { + bdd letter = bdd_exist(cube, anon_set_); + bdd suffix = bdd_existcomp(cube, anon_set_); + formula dest = conj_bdd_to_sere(suffix); + + res.insert({letter, dest}); + } + } + + return res; + } + + void + finalize(expansion_t& exp, exp_opts::expand_opt opts, bdd_dict_ptr d) + { + if (opts & (exp_opts::expand_opt::BddIsop + | exp_opts::expand_opt::BddMinterm)) { bdd_finalizer bddf(exp, d); exp = bddf.simplify(opts); } - if (opts & exp_opts_new::expand_opt_new::UniqueSuffix) + if (opts & exp_opts::expand_opt::UniqueSuffix) { std::map unique_map; for (const auto& [prefix, suffix] : exp) + { + auto res = unique_map.insert({suffix, prefix}); + if (!res.second) { - auto res = unique_map.insert({suffix, prefix}); - if (!res.second) - { - auto it = res.first; - it->second |= prefix; - } + auto it = res.first; + it->second |= prefix; } + } exp.clear(); for (const auto [suffix, prefix] : unique_map) - { - exp.insert({prefix, suffix}); - } + { + exp.insert({prefix, suffix}); + } } - if (opts & exp_opts_new::expand_opt_new::UniquePrefix) + if (opts & exp_opts::expand_opt::UniquePrefix) { std::map unique_map; for (const auto& [prefix, suffix] : exp) + { + auto res = unique_map.insert({prefix, suffix}); + if (!res.second) { - auto res = unique_map.insert({prefix, suffix}); - if (!res.second) - { - auto it = res.first; - it->second = formula::OrRat({it->second, suffix}); - } + auto it = res.first; + it->second = formula::OrRat({it->second, suffix}); } + } exp.clear(); for (const auto [prefix, suffix] : unique_map) - { - exp.insert({prefix, suffix}); - } + { + exp.insert({prefix, suffix}); + } } + } } - std::multimap - expansion_new(formula f, const bdd_dict_ptr& d, void *owner, exp_opts_new::expand_opt_new opts) + formula + expansion_to_formula(expansion_t e, bdd_dict_ptr& d) + { + std::vector res; + + for (const auto& [key, val] : e) + { + formula prefix = bdd_to_formula(key, d); + res.push_back(formula::Concat({prefix, val})); + } + + return formula::OrRat(res); + } + + + expansion_t + expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts) { using exp_t = std::multimap; @@ -767,7 +338,7 @@ namespace spot } auto rec = [&d, owner, opts](formula f){ - return expansion_new(f, d, owner, exp_opts_new::None); + return expansion(f, d, owner, exp_opts::None); }; @@ -800,7 +371,7 @@ namespace spot } } - finalize_new(res, opts, d); + finalize(res, opts, d); return res; } @@ -837,7 +408,7 @@ namespace spot if (f.min() == 0) res.insert({bddtrue, formula::eword()}); - finalize_new(res, opts, d); + finalize(res, opts, d); return res; } @@ -856,7 +427,7 @@ namespace spot res.insert({bdd_l, formula::Concat({form, formula::Star(f[0], min, max)})}); } - finalize_new(res, opts, d); + finalize(res, opts, d); return res; } @@ -864,7 +435,7 @@ namespace spot { formula rewrite = rewrite_and_nlm(f); auto res = rec(rewrite); - finalize_new(res, opts, d); + finalize(res, opts, d); return res; } @@ -894,7 +465,7 @@ namespace spot { for (auto& [_, dest] : res) dest = formula::first_match(dest); - finalize_new(res, opts, d); + finalize(res, opts, d); return res; } @@ -914,7 +485,7 @@ namespace spot for (auto& [_, dest] : res_det) dest = formula::first_match(dest); - finalize_new(res_det, opts, d); + finalize(res_det, opts, d); return res_det; } @@ -939,7 +510,7 @@ namespace spot res.insert({li, formula::Fusion({ei, F})}); } - finalize_new(res, opts, d); + finalize(res, opts, d); return res; } @@ -978,7 +549,7 @@ namespace spot res = std::move(new_res); } - finalize_new(res, opts, d); + finalize(res, opts, d); return res; } @@ -1001,7 +572,7 @@ namespace spot res.insert({label, dest}); } - finalize_new(res, opts, d); + finalize(res, opts, d); return res; } @@ -1015,498 +586,6 @@ namespace spot return {}; } - std::multimap - expansion_simple(formula f, const bdd_dict_ptr& d, void *owner) - { - using exp_t = std::multimap; - - if (f.is_boolean()) - { - auto f_bdd = formula_to_bdd(f, d, owner); - - if (f_bdd == bddfalse) - return {}; - - return {{f_bdd, formula::eword()}}; - } - - auto rec = [&d, owner](formula f){ - return expansion_simple(f, d, owner); - }; - - - switch (f.kind()) - { - case op::ff: - case op::tt: - case op::ap: - SPOT_UNREACHABLE(); - - case op::eword: - return {{bddfalse, formula::ff()}}; - - case op::Concat: - { - auto exps = rec(f[0]); - - exp_t res; - for (const auto& [bdd_l, form] : exps) - { - res.insert({bdd_l, formula::Concat({form, f.all_but(0)})}); - } - - if (f[0].accepts_eword()) - { - auto exps_rest = rec(f.all_but(0)); - for (const auto& [bdd_l, form] : exps_rest) - { - res.insert({bdd_l, form}); - } - } - - return res; - } - - case op::FStar: - { - formula E = f[0]; - - if (f.min() == 0 && f.max() == 0) - return {{bddtrue, formula::eword()}}; - - auto min = f.min() == 0 ? 0 : (f.min() - 1); - auto max = f.max() == formula::unbounded() - ? formula::unbounded() - : (f.max() - 1); - - auto E_i_j_minus = formula::FStar(E, min, max); - - auto exp = rec(E); - exp_t res; - for (const auto& [li, ei] : exp) - { - res.insert({li, formula::Fusion({ei, E_i_j_minus})}); - - if (ei.accepts_eword() && f.min() != 0) - { - for (const auto& [ki, fi] : rec(E_i_j_minus)) - { - // FIXME: build bdd once - if ((li & ki) != bddfalse) - res.insert({li & ki, fi}); - } - } - } - if (f.min() == 0) - res.insert({bddtrue, formula::eword()}); - - return res; - } - - case op::Star: - { - auto min = f.min() == 0 ? 0 : (f.min() - 1); - auto max = f.max() == formula::unbounded() - ? formula::unbounded() - : (f.max() - 1); - - auto exps = rec(f[0]); - - exp_t res; - for (const auto& [bdd_l, form] : exps) - { - res.insert({bdd_l, formula::Concat({form, formula::Star(f[0], min, max)})}); - } - - return res; - } - - case op::AndNLM: - { - formula rewrite = rewrite_and_nlm(f); - return rec(rewrite); - } - - case op::first_match: - { - auto exps = rec(f[0]); - - exp_t res; - for (const auto& [bdd_l, form] : exps) - { - res.insert({bdd_l, form}); - } - - // determinize - bdd or_labels = bddfalse; - bdd support = bddtrue; - bool is_det = true; - for (const auto& [l, _] : res) - { - support &= bdd_support(l); - if (is_det) - is_det = !bdd_have_common_assignment(l, or_labels); - or_labels |= l; - } - - if (is_det) - return res; - - exp_t res_det; - std::vector dests; - for (bdd l: minterms_of(or_labels, support)) - { - for (const auto& [ndet_label, ndet_dest] : res) - { - if (bdd_implies(l, ndet_label)) - dests.push_back(ndet_dest); - } - formula or_dests = formula::OrRat(dests); - res_det.insert({l, or_dests}); - dests.clear(); - } - - for (auto& [_, dest] : res_det) - dest = formula::first_match(dest); - return res_det; - } - - case op::Fusion: - { - exp_t res; - formula E = f[0]; - formula F = f.all_but(0); - - exp_t Ei = rec(E); - // TODO: std::option - exp_t Fj = rec(F); - - for (const auto& [li, ei] : Ei) - { - if (ei.accepts_eword()) - { - for (const auto& [kj, fj] : Fj) - if ((li & kj) != bddfalse) - res.insert({li & kj, fj}); - } - res.insert({li, formula::Fusion({ei, F})}); - } - - return res; - } - - case op::AndRat: - { - exp_t res; - for (const auto& sub_f : f) - { - auto exps = rec(sub_f); - - if (exps.empty()) - { - // op::AndRat: one of the expansions was empty (the only - // edge was `false`), so the AndRat is empty as - // well - res.clear(); - break; - } - - if (res.empty()) - { - res = std::move(exps); - continue; - } - - exp_t new_res; - for (const auto& [l_key, l_val] : exps) - { - for (const auto& [r_key, r_val] : res) - { - if ((l_key & r_key) != bddfalse) - new_res.insert({l_key & r_key, formula::multop(f.kind(), {l_val, r_val})}); - } - } - - res = std::move(new_res); - } - - return res; - } - - case op::OrRat: - { - exp_t res; - for (const auto& sub_f : f) - { - auto exps = rec(sub_f); - if (exps.empty()) - continue; - - if (res.empty()) - { - res = std::move(exps); - continue; - } - - for (const auto& [label, dest] : exps) - res.insert({label, dest}); - } - - return res; - } - - default: - std::cerr << "unimplemented kind " - << static_cast(f.kind()) - << std::endl; - SPOT_UNIMPLEMENTED(); - } - - return {}; - } - - template - expansion_t - expansion_impl(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts) - { - if (f.is_boolean()) - { - auto f_bdd = formula_to_bdd(f, d, owner); - - if (f_bdd == bddfalse) - return {}; - - return {{f_bdd, formula::eword()}}; - } - - auto rec = [&d, owner](formula f){ - return expansion_impl(f, d, owner, exp_opts::None); - }; - - - switch (f.kind()) - { - case op::ff: - case op::tt: - case op::ap: - SPOT_UNREACHABLE(); - - case op::eword: - return {{bddfalse, formula::ff()}}; - - case op::Concat: - { - auto exps = rec(f[0]); - - ExpansionBuilder res(d); - for (const auto& [bdd_l, form] : exps) - { - res.insert(bdd_l, formula::Concat({form, f.all_but(0)})); - } - - if (f[0].accepts_eword()) - { - auto exps_rest = rec(f.all_but(0)); - for (const auto& [bdd_l, form] : exps_rest) - { - res.insert(bdd_l, form); - } - } - - res.finalize(opts & exp_opts::Deterministic); - return res.result(); - } - - case op::FStar: - { - formula E = f[0]; - - if (f.min() == 0 && f.max() == 0) - return {{bddtrue, formula::eword()}}; - - auto min = f.min() == 0 ? 0 : (f.min() - 1); - auto max = f.max() == formula::unbounded() - ? formula::unbounded() - : (f.max() - 1); - - auto E_i_j_minus = formula::FStar(E, min, max); - - auto exp = rec(E); - ExpansionBuilder res(d); - for (const auto& [li, ei] : exp) - { - res.insert(li, formula::Fusion({ei, E_i_j_minus})); - - if (ei.accepts_eword() && f.min() != 0) - { - for (const auto& [ki, fi] : rec(E_i_j_minus)) - { - // FIXME: build bdd once - if ((li & ki) != bddfalse) - res.insert(li & ki, fi); - } - } - } - if (f.min() == 0) - res.insert(bddtrue, formula::eword()); - - res.finalize(opts & exp_opts::Deterministic); - return res.result(); - } - - case op::Star: - { - auto min = f.min() == 0 ? 0 : (f.min() - 1); - auto max = f.max() == formula::unbounded() - ? formula::unbounded() - : (f.max() - 1); - - auto exps = rec(f[0]); - - ExpansionBuilder res(d); - for (const auto& [bdd_l, form] : exps) - { - res.insert(bdd_l, formula::Concat({form, formula::Star(f[0], min, max)})); - } - - res.finalize(opts & exp_opts::Deterministic); - return res.result(); - } - - case op::AndNLM: - { - formula rewrite = rewrite_and_nlm(f); - return rec(rewrite); - } - - case op::first_match: - { - auto exps = rec(f[0]); - - ExpansionBuilder res(d); - for (const auto& [bdd_l, form] : exps) - { - res.insert(bdd_l, form); - } - - res.finalize(true); - auto res2 = res.result(); - for (auto& [_, dest] : res2) - dest = formula::first_match(dest); - return res2; - } - - case op::Fusion: - { - ExpansionBuilder res(d); - formula E = f[0]; - formula F = f.all_but(0); - - expansion_t Ei = rec(E); - // TODO: std::option - expansion_t Fj = rec(F); - - for (const auto& [li, ei] : Ei) - { - if (ei.accepts_eword()) - { - for (const auto& [kj, fj] : Fj) - if ((li & kj) != bddfalse) - res.insert(li & kj, fj); - } - res.insert(li, formula::Fusion({ei, F})); - } - - res.finalize(opts & exp_opts::Deterministic); - return res.result(); - } - - case op::AndRat: - { - ExpansionBuilder res(d); - for (const auto& sub_f : f) - { - auto exps = rec(sub_f); - - if (exps.empty()) - { - // op::AndRat: one of the expansions was empty (the only - // edge was `false`), so the AndRat is empty as - // well - res.clear(); - break; - } - - if (res.empty()) - { - res = ExpansionBuilder(std::move(exps), d); - res.finalize(false); - continue; - } - - ExpansionBuilder new_res(d); - for (const auto& [l_key, l_val] : exps) - { - for (const auto& [r_key, r_val] : res.result()) - { - if ((l_key & r_key) != bddfalse) - new_res.insert(l_key & r_key, formula::multop(f.kind(), {l_val, r_val})); - } - } - - res = std::move(new_res); - res.finalize(false); - } - - res.finalize(opts & exp_opts::Deterministic); - return res.result(); - } - - case op::OrRat: - { - ExpansionBuilder res(d); - for (const auto& sub_f : f) - { - auto exps = rec(sub_f); - if (exps.empty()) - continue; - - if (res.empty()) - { - res = ExpansionBuilder(std::move(exps), d); - continue; - } - - for (const auto& [label, dest] : exps) - res.insert(label, dest); - } - - res.finalize(opts & exp_opts::Deterministic); - return res.result(); - } - - default: - std::cerr << "unimplemented kind " - << static_cast(f.kind()) - << std::endl; - SPOT_UNIMPLEMENTED(); - } - - return {}; - } - - expansion_t - expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts) - { - - if (opts & exp_opts::Bdd) - return expansion_impl(f, d, owner, opts); - else if (opts & exp_opts::MergeSuffix) - return expansion_impl(f, d, owner, opts); - else // exp_opts::Basic - return expansion_impl(f, d, owner, opts); - } - twa_graph_ptr expand_automaton(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts) { @@ -1589,181 +668,9 @@ namespace spot } aut->set_named_prop("state-names", state_names); - aut->merge_edges(); - return aut; - } - twa_graph_ptr - expand_simple_automaton(formula f, bdd_dict_ptr d) - { - auto finite = expand_simple_finite_automaton(f, d); - return from_finite(finite); - } - - twa_graph_ptr - expand_simple_finite_automaton(formula f, bdd_dict_ptr d) - { - auto aut = make_twa_graph(d); - - aut->prop_state_acc(true); - const auto acc_mark = aut->set_buchi(); - - auto formula2state = robin_hood::unordered_map(); - - unsigned init_state = aut->new_state(); - aut->set_init_state(init_state); - formula2state.insert({ f, init_state }); - - auto f_aps = formula_aps(f); - for (auto& ap : f_aps) - aut->register_ap(ap); - - auto todo = std::vector>(); - todo.push_back({f, init_state}); - - auto state_names = new std::vector(); - std::ostringstream ss; - ss << f; - state_names->push_back(ss.str()); - - auto find_dst = [&](formula suffix) -> unsigned - { - unsigned dst; - auto it = formula2state.find(suffix); - if (it != formula2state.end()) - { - dst = it->second; - } - else - { - dst = aut->new_state(); - todo.push_back({suffix, dst}); - formula2state.insert({suffix, dst}); - std::ostringstream ss; - ss << suffix; - state_names->push_back(ss.str()); - } - - return dst; - }; - - while (!todo.empty()) - { - auto [curr_f, curr_state] = todo[todo.size() - 1]; - todo.pop_back(); - - auto curr_acc_mark= curr_f.accepts_eword() - ? acc_mark - : acc_cond::mark_t(); - - auto exp = expansion_simple(curr_f, d, aut.get()); - - for (const auto& [letter, suffix] : exp) - { - if (suffix.is(op::ff)) - continue; - - auto dst = find_dst(suffix); - aut->new_edge(curr_state, dst, letter, curr_acc_mark); - } - - // if state has no transitions and should be accepting, create - // artificial transition - if (aut->get_graph().state_storage(curr_state).succ == 0 - && curr_f.accepts_eword()) - aut->new_edge(curr_state, curr_state, bddfalse, acc_mark); - } - - aut->set_named_prop("state-names", state_names); - aut->merge_edges(); - return aut; - } - - twa_graph_ptr - expand_new_automaton(formula f, bdd_dict_ptr d, exp_opts_new::expand_opt_new opts) - { - auto finite = expand_new_finite_automaton(f, d, opts); - return from_finite(finite); - } - - twa_graph_ptr - expand_new_finite_automaton(formula f, bdd_dict_ptr d, exp_opts_new::expand_opt_new opts) - { - auto aut = make_twa_graph(d); - - aut->prop_state_acc(true); - const auto acc_mark = aut->set_buchi(); - - auto formula2state = robin_hood::unordered_map(); - - unsigned init_state = aut->new_state(); - aut->set_init_state(init_state); - formula2state.insert({ f, init_state }); - - auto f_aps = formula_aps(f); - for (auto& ap : f_aps) - aut->register_ap(ap); - - auto todo = std::vector>(); - todo.push_back({f, init_state}); - - auto state_names = new std::vector(); - std::ostringstream ss; - ss << f; - state_names->push_back(ss.str()); - - auto find_dst = [&](formula suffix) -> unsigned - { - unsigned dst; - auto it = formula2state.find(suffix); - if (it != formula2state.end()) - { - dst = it->second; - } - else - { - dst = aut->new_state(); - todo.push_back({suffix, dst}); - formula2state.insert({suffix, dst}); - std::ostringstream ss; - ss << suffix; - state_names->push_back(ss.str()); - } - - return dst; - }; - - while (!todo.empty()) - { - auto [curr_f, curr_state] = todo[todo.size() - 1]; - todo.pop_back(); - - auto curr_acc_mark= curr_f.accepts_eword() - ? acc_mark - : acc_cond::mark_t(); - - auto exp = expansion_new(curr_f, d, aut.get(), opts); - - for (const auto& [letter, suffix] : exp) - { - if (suffix.is(op::ff)) - continue; - - auto dst = find_dst(suffix); - aut->new_edge(curr_state, dst, letter, curr_acc_mark); - } - - // if state has no transitions and should be accepting, create - // artificial transition - if (aut->get_graph().state_storage(curr_state).succ == 0 - && curr_f.accepts_eword()) - aut->new_edge(curr_state, curr_state, bddfalse, acc_mark); - } - - aut->set_named_prop("state-names", state_names); - - if ((opts & exp_opts_new::MergeEdges) - && !(opts & exp_opts_new::UniqueSuffix)) + if ((opts & exp_opts::MergeEdges) + && !(opts & exp_opts::UniqueSuffix)) aut->merge_edges(); return aut; diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index 52e83917f..43a51e721 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -30,22 +30,11 @@ namespace spot { - using expansion_t = std::map; + using expansion_t = std::multimap; struct exp_opts { enum expand_opt { - None = 0, - Deterministic = 1, - Basic = 2, - MergeSuffix = 4, - Bdd = 8, - }; - }; - - struct exp_opts_new - { - enum expand_opt_new { None = 0, UniqueSuffix = 1, UniquePrefix = 2, @@ -55,33 +44,15 @@ namespace spot }; }; - SPOT_API std::multimap - expansion_new(formula f, const bdd_dict_ptr& d, void *owner, exp_opts_new::expand_opt_new opts); - - SPOT_API twa_graph_ptr - expand_new_automaton(formula f, bdd_dict_ptr d, exp_opts_new::expand_opt_new opts); - - SPOT_API twa_graph_ptr - expand_new_finite_automaton(formula f, bdd_dict_ptr d, exp_opts_new::expand_opt_new opts); - - SPOT_API twa_graph_ptr - expand_simple_automaton(formula f, bdd_dict_ptr d); - - SPOT_API twa_graph_ptr - expand_simple_finite_automaton(formula f, bdd_dict_ptr d); - - SPOT_API std::multimap - expansion_simple(formula f, const bdd_dict_ptr& d, void *owner); - SPOT_API expansion_t expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts); - SPOT_API formula - expansion_to_formula(expansion_t e, bdd_dict_ptr& d); - SPOT_API twa_graph_ptr expand_automaton(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts); SPOT_API twa_graph_ptr expand_finite_automaton(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts); + + SPOT_API formula + expansion_to_formula(expansion_t e, bdd_dict_ptr& d); } From bbbcdc331af136253cc1574620063ad92ec200ad Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Tue, 4 Apr 2023 14:52:14 +0200 Subject: [PATCH 581/606] expansions: optimize sigma star encoding --- spot/tl/expansions.cc | 31 ++++++++++++++++++++++++++----- spot/tl/expansions.hh | 3 ++- 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 4a6e6ca0c..39e7b0303 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -106,9 +106,10 @@ namespace spot class bdd_finalizer { public: - bdd_finalizer(std::multimap& exp, bdd_dict_ptr d) + bdd_finalizer(std::multimap& exp, bdd_dict_ptr d, bool opt_sigma_star) : anon_set_(bddtrue) , d_(d) + , opt_sigma_star_(opt_sigma_star) { for (const auto& [prefix, suffix] : exp) { @@ -120,12 +121,25 @@ namespace spot } else { - anon_var_num = d_->register_anonymous_variables(1, this); + if (opt_sigma_star_ && (suffix.is(op::Star) + && suffix[0].is(op::tt) + && suffix.min() == 0 + && suffix.max() == formula::unbounded())) + { + anon_var_num = -1; + } + else + { + anon_var_num = d_->register_anonymous_variables(1, this); + } + formula2bdd_.insert({suffix, anon_var_num}); bdd2formula_.insert({anon_var_num, suffix}); } - bdd var = bdd_ithvar(anon_var_num); + bdd var = bddtrue; + if (anon_var_num != -1) + var = bdd_ithvar(anon_var_num); anon_set_ &= var; exp_ |= prefix & var; } @@ -145,6 +159,7 @@ namespace spot std::map formula2bdd_; std::map bdd2formula_; bdd_dict_ptr d_; + bool opt_sigma_star_; formula var_to_formula(int var); formula conj_bdd_to_sere(bdd b); @@ -177,7 +192,13 @@ namespace spot bdd_finalizer::conj_bdd_to_sere(bdd b) { if (b == bddtrue) - return formula::tt(); + { + if (opt_sigma_star_){ + return formula::Star(formula::tt(), 0, formula::unbounded()); + } else { + return formula::tt(); + } + } if (b == bddfalse) return formula::ff(); @@ -261,7 +282,7 @@ namespace spot if (opts & (exp_opts::expand_opt::BddIsop | exp_opts::expand_opt::BddMinterm)) { - bdd_finalizer bddf(exp, d); + bdd_finalizer bddf(exp, d, opts & exp_opts::expand_opt::BddSigmaStar); exp = bddf.simplify(opts); } diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index 43a51e721..1d2fbedba 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -40,7 +40,8 @@ namespace spot UniquePrefix = 2, BddIsop = 4, BddMinterm = 8, - MergeEdges = 16, + BddSigmaStar = 16, + MergeEdges = 32, }; }; From 931d39e73998a40a618abb9bbca4562b3de95d50 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 12 Apr 2023 15:15:36 +0200 Subject: [PATCH 582/606] expansions: signature merge impl --- spot/tl/expansions.cc | 40 ++++++++++++++++++++++++++++++++++++++++ spot/tl/expansions.hh | 1 + 2 files changed, 41 insertions(+) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 39e7b0303..dc0e09182 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -614,20 +614,44 @@ namespace spot return from_finite(finite); } + struct signature_hash + { + std::size_t + operator() (const std::pair>& sig) const + { + size_t hash = std::hash()(sig.first); + + for (const auto& keyvalue : sig.second) + { + hash ^= (bdd_hash()(keyvalue.first) ^ std::hash()(keyvalue.second)) + + 0x9e3779b9 + (hash << 6) + (hash >> 2); + } + + return hash; + } + }; + twa_graph_ptr expand_finite_automaton(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts) { + bool signature_merge = opts & exp_opts::expand_opt::SignatureMerge; + auto aut = make_twa_graph(d); aut->prop_state_acc(true); const auto acc_mark = aut->set_buchi(); auto formula2state = robin_hood::unordered_map(); + auto signature2state = std::unordered_map, unsigned, signature_hash>(); unsigned init_state = aut->new_state(); aut->set_init_state(init_state); formula2state.insert({ f, init_state }); + if (signature_merge) + signature2state.insert({ {f.accepts_eword(), expansion(f, d, aut.get(), opts)}, init_state}); + + auto f_aps = formula_aps(f); for (auto& ap : f_aps) aut->register_ap(ap); @@ -650,9 +674,25 @@ namespace spot } else { + if (signature_merge) + { + auto exp = expansion(suffix, d, aut.get(), opts); + bool accepting = suffix.accepts_eword(); + auto it2 = signature2state.find({accepting, exp}); + if (it2 != signature2state.end()) + { + formula2state.insert({suffix, it2->second}); + return it2->second; + } + } + dst = aut->new_state(); todo.push_back({suffix, dst}); + formula2state.insert({suffix, dst}); + if (signature_merge) + signature2state.insert({{suffix.accepts_eword(), expansion(suffix, d, aut.get(), opts)}, dst}); + std::ostringstream ss; ss << suffix; state_names->push_back(ss.str()); diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index 1d2fbedba..0aec0a106 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -42,6 +42,7 @@ namespace spot BddMinterm = 8, BddSigmaStar = 16, MergeEdges = 32, + SignatureMerge = 64, }; }; From f09c1dd7f32e018be75e636889c5d92868eb4059 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 12 May 2023 08:45:59 +0200 Subject: [PATCH 583/606] expansions: simple determinization --- spot/tl/expansions.cc | 23 +++++++++++++++++++++++ spot/tl/expansions.hh | 6 ++++-- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index dc0e09182..5fb13c0de 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -325,6 +325,29 @@ namespace spot exp.insert({prefix, suffix}); } } + + if (opts & exp_opts::expand_opt::Determinize) + { + std::multimap exp_new; + + bdd props = bddtrue; + for (const auto& [prefix, _] : exp) + props &= bdd_support(prefix); + + std::vector dests; + for (bdd letter : minterms_of(bddtrue, props)) + { + for (const auto& [prefix, suffix] : exp) + { + if (bdd_implies(letter, prefix)) + dests.push_back(suffix); + } + formula or_dests = formula::OrRat(dests); + exp_new.insert({letter, or_dests}); + dests.clear(); + } + exp = exp_new; + } } } diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index 0aec0a106..949b25e29 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -41,8 +41,10 @@ namespace spot BddIsop = 4, BddMinterm = 8, BddSigmaStar = 16, - MergeEdges = 32, - SignatureMerge = 64, + BddEncode = 32, + MergeEdges = 64, + SignatureMerge = 128, + Determinize = 256, }; }; From 29e0b22c2a898522346f74ffedb3ccfba1ac32f1 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Tue, 4 Jul 2023 07:21:20 +0200 Subject: [PATCH 584/606] expansions: fixes + BDD encode changes + printer --- spot/tl/expansions.cc | 98 +++++++++++++++++++++++++++++++------------ spot/tl/expansions.hh | 3 ++ 2 files changed, 74 insertions(+), 27 deletions(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 5fb13c0de..1086b0f67 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -106,41 +106,63 @@ namespace spot class bdd_finalizer { public: - bdd_finalizer(std::multimap& exp, bdd_dict_ptr d, bool opt_sigma_star) - : anon_set_(bddtrue) - , d_(d) - , opt_sigma_star_(opt_sigma_star) - { - for (const auto& [prefix, suffix] : exp) + int encode(formula f) { - int anon_var_num; - auto it = formula2bdd_.find(suffix); + bool is_anon = false; + int var_num; + auto it = formula2bdd_.find(f); if (it != formula2bdd_.end()) { - anon_var_num = it->second; + var_num = it->second; } else { - if (opt_sigma_star_ && (suffix.is(op::Star) - && suffix[0].is(op::tt) - && suffix.min() == 0 - && suffix.max() == formula::unbounded())) + if (opt_sigma_star_ && (f.is(op::Star) + && f[0].is(op::tt) + && f.min() == 0 + && f.max() == formula::unbounded())) { - anon_var_num = -1; + var_num = bddtrue.id(); + } + else if (opt_bdd_encode_ && (f.is(op::AndRat) || f.is(op::OrRat))) + { + bdd var = f.is(op::AndRat) ? bdd(bddtrue) : bdd(bddfalse); + for (const auto& sub_f : f) + { + int bddid = encode(sub_f); + bdd subvar = bdd_ithvar(bddid); + var = f.is(op::AndRat) ? var & subvar : var | subvar; + } + var_num = var.id(); } else { - anon_var_num = d_->register_anonymous_variables(1, this); + var_num = d_->register_anonymous_variables(1, this); + is_anon = true; } - formula2bdd_.insert({suffix, anon_var_num}); - bdd2formula_.insert({anon_var_num, suffix}); + formula2bdd_.insert({f, var_num}); + bdd2formula_.insert({var_num, f}); } - bdd var = bddtrue; - if (anon_var_num != -1) - var = bdd_ithvar(anon_var_num); - anon_set_ &= var; + bdd var = bdd_ithvar(var_num); + + if (is_anon) + anon_set_ &= var; + + return var_num; + } + + bdd_finalizer(std::multimap& exp, bdd_dict_ptr d, bool opt_sigma_star, bool opt_bdd_encode) + : anon_set_(bddtrue) + , d_(d) + , opt_sigma_star_(opt_sigma_star) + , opt_bdd_encode_(opt_bdd_encode) + { + for (const auto& [prefix, suffix] : exp) + { + int var_num = encode(suffix); + bdd var = bdd_ithvar(var_num); exp_ |= prefix & var; } } @@ -160,6 +182,7 @@ namespace spot std::map bdd2formula_; bdd_dict_ptr d_; bool opt_sigma_star_; + bool opt_bdd_encode_; formula var_to_formula(int var); formula conj_bdd_to_sere(bdd b); @@ -249,7 +272,8 @@ namespace spot { bdd prop_set = bdd_exist(bdd_support(exp_), anon_set_); bdd or_labels = bdd_exist(exp_, anon_set_); - for (bdd letter: minterms_of(exp_, prop_set)) + // TODO: check are_equivalent avec or_labels/exp_ en premier argument + for (bdd letter: minterms_of(or_labels, prop_set)) { bdd dest_bdd = bdd_restrict(exp_, letter); formula dest = bdd_to_sere(dest_bdd); @@ -282,7 +306,7 @@ namespace spot if (opts & (exp_opts::expand_opt::BddIsop | exp_opts::expand_opt::BddMinterm)) { - bdd_finalizer bddf(exp, d, opts & exp_opts::expand_opt::BddSigmaStar); + bdd_finalizer bddf(exp, d, opts & exp_opts::expand_opt::BddSigmaStar, opts & exp_opts::expand_opt::BddEncode); exp = bddf.simplify(opts); } @@ -365,6 +389,14 @@ namespace spot return formula::OrRat(res); } + void print_expansion(const expansion_t& exp, const bdd_dict_ptr& dict) + { + for (const auto& [prefix, suffix] : exp) + { + std::cout << bdd_to_formula(prefix, dict) << ": " << suffix << std::endl; + } + } + expansion_t expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts) @@ -581,15 +613,26 @@ namespace spot } exp_t new_res; + bool inserted = false; for (const auto& [l_key, l_val] : exps) { for (const auto& [r_key, r_val] : res) { if ((l_key & r_key) != bddfalse) - new_res.insert({l_key & r_key, formula::multop(f.kind(), {l_val, r_val})}); + { + new_res.insert({l_key & r_key, formula::multop(f.kind(), {l_val, r_val})}); + inserted = true; + } } } + if (!inserted) + { + // all prefix conjuctions led to bddfalse, And is empty + res.clear(); + break; + } + res = std::move(new_res); } @@ -671,14 +714,15 @@ namespace spot aut->set_init_state(init_state); formula2state.insert({ f, init_state }); - if (signature_merge) - signature2state.insert({ {f.accepts_eword(), expansion(f, d, aut.get(), opts)}, init_state}); - auto f_aps = formula_aps(f); for (auto& ap : f_aps) aut->register_ap(ap); + if (signature_merge) + signature2state.insert({ {f.accepts_eword(), expansion(f, d, aut.get(), opts)}, init_state}); + + auto todo = std::vector>(); todo.push_back({f, init_state}); diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index 949b25e29..eba71db9e 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -59,4 +59,7 @@ namespace spot SPOT_API formula expansion_to_formula(expansion_t e, bdd_dict_ptr& d); + + SPOT_API void + print_expansion(const expansion_t& exp, const bdd_dict_ptr& dict); } From e50be0692d65f9fbb59c33e9729789001f12f0c6 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 20 Sep 2023 17:52:18 +0200 Subject: [PATCH 585/606] expansions: UniquePrefixSeenOpt --- spot/tl/expansions.cc | 71 +++++++++++++++++++++++++++++++------------ spot/tl/expansions.hh | 3 +- 2 files changed, 54 insertions(+), 20 deletions(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 1086b0f67..8b0309246 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -301,7 +301,7 @@ namespace spot } void - finalize(expansion_t& exp, exp_opts::expand_opt opts, bdd_dict_ptr d) + finalize(expansion_t& exp, exp_opts::expand_opt opts, bdd_dict_ptr d, std::unordered_set* seen) { if (opts & (exp_opts::expand_opt::BddIsop | exp_opts::expand_opt::BddMinterm)) @@ -344,9 +344,37 @@ namespace spot } exp.clear(); + for (const auto [prefix, suffix] : unique_map) { - exp.insert({prefix, suffix}); + if ((opts & exp_opts::expand_opt::UniquePrefixSeenOpt) + && suffix.is(op::OrRat)) + { + std::vector merge; + std::vector single; + + for (const auto& sub_f : suffix) + { + if (seen->find(sub_f) != seen->end()) + { + single.push_back(sub_f); + } + else + { + merge.push_back(sub_f); + } + } + + for (const auto& sub_f : single) + exp.insert({prefix, sub_f}); + + if (!merge.empty()) + exp.insert({prefix, formula::OrRat(merge)}); + } + else + { + exp.insert({prefix, suffix}); + } } } @@ -399,7 +427,7 @@ namespace spot expansion_t - expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts) + expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts, std::unordered_set* seen) { using exp_t = std::multimap; @@ -413,8 +441,8 @@ namespace spot return {{f_bdd, formula::eword()}}; } - auto rec = [&d, owner, opts](formula f){ - return expansion(f, d, owner, exp_opts::None); + auto rec = [&d, owner, seen](formula f){ + return expansion(f, d, owner, exp_opts::None, seen); }; @@ -426,7 +454,8 @@ namespace spot SPOT_UNREACHABLE(); case op::eword: - return {{bddfalse, formula::ff()}}; + // return {{bddfalse, formula::ff()}}; + return {}; case op::Concat: { @@ -447,7 +476,7 @@ namespace spot } } - finalize(res, opts, d); + finalize(res, opts, d, seen); return res; } @@ -484,7 +513,7 @@ namespace spot if (f.min() == 0) res.insert({bddtrue, formula::eword()}); - finalize(res, opts, d); + finalize(res, opts, d, seen); return res; } @@ -503,7 +532,7 @@ namespace spot res.insert({bdd_l, formula::Concat({form, formula::Star(f[0], min, max)})}); } - finalize(res, opts, d); + finalize(res, opts, d, seen); return res; } @@ -511,7 +540,7 @@ namespace spot { formula rewrite = rewrite_and_nlm(f); auto res = rec(rewrite); - finalize(res, opts, d); + finalize(res, opts, d, seen); return res; } @@ -541,7 +570,7 @@ namespace spot { for (auto& [_, dest] : res) dest = formula::first_match(dest); - finalize(res, opts, d); + finalize(res, opts, d, seen); return res; } @@ -561,7 +590,7 @@ namespace spot for (auto& [_, dest] : res_det) dest = formula::first_match(dest); - finalize(res_det, opts, d); + finalize(res_det, opts, d, seen); return res_det; } @@ -586,7 +615,7 @@ namespace spot res.insert({li, formula::Fusion({ei, F})}); } - finalize(res, opts, d); + finalize(res, opts, d, seen); return res; } @@ -636,7 +665,7 @@ namespace spot res = std::move(new_res); } - finalize(res, opts, d); + finalize(res, opts, d, seen); return res; } @@ -659,7 +688,7 @@ namespace spot res.insert({label, dest}); } - finalize(res, opts, d); + finalize(res, opts, d, seen); return res; } @@ -709,6 +738,8 @@ namespace spot auto formula2state = robin_hood::unordered_map(); auto signature2state = std::unordered_map, unsigned, signature_hash>(); + auto seen = std::unordered_set(); + seen.insert(f); unsigned init_state = aut->new_state(); aut->set_init_state(init_state); @@ -720,7 +751,7 @@ namespace spot aut->register_ap(ap); if (signature_merge) - signature2state.insert({ {f.accepts_eword(), expansion(f, d, aut.get(), opts)}, init_state}); + signature2state.insert({ {f.accepts_eword(), expansion(f, d, aut.get(), opts, &seen)}, init_state}); auto todo = std::vector>(); @@ -743,7 +774,7 @@ namespace spot { if (signature_merge) { - auto exp = expansion(suffix, d, aut.get(), opts); + auto exp = expansion(suffix, d, aut.get(), opts, &seen); bool accepting = suffix.accepts_eword(); auto it2 = signature2state.find({accepting, exp}); if (it2 != signature2state.end()) @@ -755,10 +786,11 @@ namespace spot dst = aut->new_state(); todo.push_back({suffix, dst}); + seen.insert(suffix); formula2state.insert({suffix, dst}); if (signature_merge) - signature2state.insert({{suffix.accepts_eword(), expansion(suffix, d, aut.get(), opts)}, dst}); + signature2state.insert({{suffix.accepts_eword(), expansion(suffix, d, aut.get(), opts, &seen)}, dst}); std::ostringstream ss; ss << suffix; @@ -777,11 +809,12 @@ namespace spot ? acc_mark : acc_cond::mark_t(); - auto exp = expansion(curr_f, d, aut.get(), opts); + auto exp = expansion(curr_f, d, aut.get(), opts, &seen); for (const auto& [letter, suffix] : exp) { if (suffix.is(op::ff)) + // TODO ASSERT NOT continue; auto dst = find_dst(suffix); diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index eba71db9e..36476bd31 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -45,11 +45,12 @@ namespace spot MergeEdges = 64, SignatureMerge = 128, Determinize = 256, + UniquePrefixSeenOpt = 512, }; }; SPOT_API expansion_t - expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts); + expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts, std::unordered_set* seen = nullptr); SPOT_API twa_graph_ptr expand_automaton(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts); From d760d2cb3b65f094fb507585ad32255a2845a151 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 27 Sep 2023 11:36:17 +0200 Subject: [PATCH 586/606] expansions: US order in pipeline configurable --- spot/tl/expansions.cc | 24 ++++++++++++++++++++++-- spot/tl/expansions.hh | 3 ++- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 8b0309246..564468452 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -310,7 +310,7 @@ namespace spot exp = bddf.simplify(opts); } - if (opts & exp_opts::expand_opt::UniqueSuffix) + if (opts & exp_opts::expand_opt::UniqueSuffixPre) { std::map unique_map; for (const auto& [prefix, suffix] : exp) @@ -378,6 +378,26 @@ namespace spot } } + if (opts & exp_opts::expand_opt::UniqueSuffixPost) + { + std::map unique_map; + for (const auto& [prefix, suffix] : exp) + { + auto res = unique_map.insert({suffix, prefix}); + if (!res.second) + { + auto it = res.first; + it->second |= prefix; + } + } + + exp.clear(); + for (const auto [suffix, prefix] : unique_map) + { + exp.insert({prefix, suffix}); + } + } + if (opts & exp_opts::expand_opt::Determinize) { std::multimap exp_new; @@ -831,7 +851,7 @@ namespace spot aut->set_named_prop("state-names", state_names); if ((opts & exp_opts::MergeEdges) - && !(opts & exp_opts::UniqueSuffix)) + && !(opts & exp_opts::UniqueSuffixPre || opts & exp_opts::UniqueSuffixPost)) aut->merge_edges(); return aut; diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index 36476bd31..2418b1103 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -36,7 +36,7 @@ namespace spot { enum expand_opt { None = 0, - UniqueSuffix = 1, + UniqueSuffixPre = 1, UniquePrefix = 2, BddIsop = 4, BddMinterm = 8, @@ -46,6 +46,7 @@ namespace spot SignatureMerge = 128, Determinize = 256, UniquePrefixSeenOpt = 512, + UniqueSuffixPost = 1024, }; }; From 90ea02d42a59b8b6edb24538dcad05bf2f718032 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 12 Oct 2023 15:04:06 +0200 Subject: [PATCH 587/606] expansions: store as vector of pairs --- spot/tl/expansions.cc | 56 +++++++++++++++++++++++-------------------- spot/tl/expansions.hh | 2 +- 2 files changed, 31 insertions(+), 27 deletions(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 564468452..ea1ec4b95 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -153,7 +153,7 @@ namespace spot return var_num; } - bdd_finalizer(std::multimap& exp, bdd_dict_ptr d, bool opt_sigma_star, bool opt_bdd_encode) + bdd_finalizer(expansion_t& exp, bdd_dict_ptr d, bool opt_sigma_star, bool opt_bdd_encode) : anon_set_(bddtrue) , d_(d) , opt_sigma_star_(opt_sigma_star) @@ -278,9 +278,13 @@ namespace spot bdd dest_bdd = bdd_restrict(exp_, letter); formula dest = bdd_to_sere(dest_bdd); - auto it = res.insert({letter, dest}); - assert(it.second); - (void) it; + #ifndef NDEBUG + // make sure it didn't exist before + auto it = std::find(res.begin(), res.end(), {letter, dest}); + SPOT_ASSERT(it == res.end()); + #endif + + res.push_back({letter, dest}); } } else // BddIsop @@ -293,7 +297,7 @@ namespace spot bdd suffix = bdd_existcomp(cube, anon_set_); formula dest = conj_bdd_to_sere(suffix); - res.insert({letter, dest}); + res.push_back({letter, dest}); } } @@ -326,7 +330,7 @@ namespace spot exp.clear(); for (const auto [suffix, prefix] : unique_map) { - exp.insert({prefix, suffix}); + exp.push_back({prefix, suffix}); } } @@ -366,14 +370,14 @@ namespace spot } for (const auto& sub_f : single) - exp.insert({prefix, sub_f}); + exp.push_back({prefix, sub_f}); if (!merge.empty()) - exp.insert({prefix, formula::OrRat(merge)}); + exp.push_back({prefix, formula::OrRat(merge)}); } else { - exp.insert({prefix, suffix}); + exp.push_back({prefix, suffix}); } } } @@ -394,13 +398,13 @@ namespace spot exp.clear(); for (const auto [suffix, prefix] : unique_map) { - exp.insert({prefix, suffix}); + exp.push_back({prefix, suffix}); } } if (opts & exp_opts::expand_opt::Determinize) { - std::multimap exp_new; + expansion_t exp_new; bdd props = bddtrue; for (const auto& [prefix, _] : exp) @@ -415,7 +419,7 @@ namespace spot dests.push_back(suffix); } formula or_dests = formula::OrRat(dests); - exp_new.insert({letter, or_dests}); + exp_new.push_back({letter, or_dests}); dests.clear(); } exp = exp_new; @@ -449,7 +453,7 @@ namespace spot expansion_t expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts, std::unordered_set* seen) { - using exp_t = std::multimap; + using exp_t = expansion_t; if (f.is_boolean()) { @@ -484,7 +488,7 @@ namespace spot exp_t res; for (const auto& [bdd_l, form] : exps) { - res.insert({bdd_l, formula::Concat({form, f.all_but(0)})}); + res.push_back({bdd_l, formula::Concat({form, f.all_but(0)})}); } if (f[0].accepts_eword()) @@ -492,7 +496,7 @@ namespace spot auto exps_rest = rec(f.all_but(0)); for (const auto& [bdd_l, form] : exps_rest) { - res.insert({bdd_l, form}); + res.push_back({bdd_l, form}); } } @@ -518,7 +522,7 @@ namespace spot exp_t res; for (const auto& [li, ei] : exp) { - res.insert({li, formula::Fusion({ei, E_i_j_minus})}); + res.push_back({li, formula::Fusion({ei, E_i_j_minus})}); if (ei.accepts_eword() && f.min() != 0) { @@ -526,12 +530,12 @@ namespace spot { // FIXME: build bdd once if ((li & ki) != bddfalse) - res.insert({li & ki, fi}); + res.push_back({li & ki, fi}); } } } if (f.min() == 0) - res.insert({bddtrue, formula::eword()}); + res.push_back({bddtrue, formula::eword()}); finalize(res, opts, d, seen); return res; @@ -549,7 +553,7 @@ namespace spot exp_t res; for (const auto& [bdd_l, form] : exps) { - res.insert({bdd_l, formula::Concat({form, formula::Star(f[0], min, max)})}); + res.push_back({bdd_l, formula::Concat({form, formula::Star(f[0], min, max)})}); } finalize(res, opts, d, seen); @@ -571,7 +575,7 @@ namespace spot exp_t res; for (const auto& [bdd_l, form] : exps) { - res.insert({bdd_l, form}); + res.push_back({bdd_l, form}); } // determinize @@ -604,7 +608,7 @@ namespace spot dests.push_back(ndet_dest); } formula or_dests = formula::OrRat(dests); - res_det.insert({l, or_dests}); + res_det.push_back({l, or_dests}); dests.clear(); } @@ -630,9 +634,9 @@ namespace spot { for (const auto& [kj, fj] : Fj) if ((li & kj) != bddfalse) - res.insert({li & kj, fj}); + res.push_back({li & kj, fj}); } - res.insert({li, formula::Fusion({ei, F})}); + res.push_back({li, formula::Fusion({ei, F})}); } finalize(res, opts, d, seen); @@ -669,7 +673,7 @@ namespace spot { if ((l_key & r_key) != bddfalse) { - new_res.insert({l_key & r_key, formula::multop(f.kind(), {l_val, r_val})}); + new_res.push_back({l_key & r_key, formula::multop(f.kind(), {l_val, r_val})}); inserted = true; } } @@ -705,7 +709,7 @@ namespace spot } for (const auto& [label, dest] : exps) - res.insert({label, dest}); + res.push_back({label, dest}); } finalize(res, opts, d, seen); @@ -732,7 +736,7 @@ namespace spot struct signature_hash { std::size_t - operator() (const std::pair>& sig) const + operator() (const std::pair& sig) const { size_t hash = std::hash()(sig.first); diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index 2418b1103..9a350dbb8 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -30,7 +30,7 @@ namespace spot { - using expansion_t = std::multimap; + using expansion_t = std::vector>; struct exp_opts { From ed3d1ef4aab0abde62f25ba4a0225afffd087164 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 12 Oct 2023 15:04:40 +0200 Subject: [PATCH 588/606] expansions: expose easy expansion in python --- python/spot/impl.i | 2 ++ spot/tl/expansions.cc | 15 +++++++++++++++ spot/tl/expansions.hh | 3 +++ 3 files changed, 20 insertions(+) diff --git a/python/spot/impl.i b/python/spot/impl.i index 725655c08..ee2c6a81e 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -547,6 +547,8 @@ namespace std { %template(vectorofvectorofformulas) vector>; %template(setunsigned) set; %template(relabeling_map) map; + %template(pair_formula) pair; + %template(vector_pair_formula) vector>; } %include diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index ea1ec4b95..263f9a182 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -449,6 +449,21 @@ namespace spot } } + std::vector> + expansion_simple(formula f) + { + int owner = 42; + auto d = make_bdd_dict(); + + auto exp = expansion(f, d, &owner, exp_opts::None); + + std::vector> res; + for (const auto& [bdd, f] : exp) + res.push_back({bdd_to_formula(bdd, d), f}); + + d->unregister_all_my_variables(&owner); + return res; + } expansion_t expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts, std::unordered_set* seen) diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index 9a350dbb8..036ac945a 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -50,6 +50,9 @@ namespace spot }; }; + SPOT_API std::vector> + expansion_simple(formula f); + SPOT_API expansion_t expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts, std::unordered_set* seen = nullptr); From b15c0818c533087fd9cb3c55fa7d3f74dd018fb2 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 8 Feb 2024 13:39:11 +0100 Subject: [PATCH 589/606] expansions: up variants --- spot/tl/expansions.cc | 154 ++++++++++++++++++++++++++++++------------ 1 file changed, 110 insertions(+), 44 deletions(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 263f9a182..756af4082 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -304,6 +304,113 @@ namespace spot return res; } + expansion_t + unique_prefix(const expansion_t& exp) + { + std::map unique_map; + for (const auto& [prefix, suffix] : exp) + { + auto res = unique_map.insert({prefix, suffix}); + if (!res.second) + { + auto it = res.first; + it->second = formula::OrRat({it->second, suffix}); + } + } + + expansion_t res(unique_map.begin(), unique_map.end()); + return res; + } + + expansion_t + unique_prefix_seen(const expansion_t& exp, std::unordered_set* seen) + { + std::map unique_map; + for (const auto& [prefix, suffix] : exp) + { + auto res = unique_map.insert({prefix, suffix}); + if (!res.second) + { + auto it = res.first; + it->second = formula::OrRat({it->second, suffix}); + } + } + + expansion_t res; + + for (const auto [prefix, suffix] : unique_map) + { + if (!suffix.is(op::OrRat)) + { + res.push_back({prefix, suffix}); + continue; + } + + std::vector merge; + std::vector single; + + for (const auto& sub_f : suffix) + { + if (seen->find(sub_f) != seen->end()) + { + single.push_back(sub_f); + } + else + { + merge.push_back(sub_f); + } + } + + for (const auto& sub_f : single) + res.push_back({prefix, sub_f}); + + if (!merge.empty()) + res.push_back({prefix, formula::OrRat(merge)}); + } + + return res; + } + + size_t count_new(const expansion_t& exp, std::unordered_set* seen) + { + size_t count = 0; + for (const auto& [_, suffix] : exp) + { + if (seen->find(suffix) == seen->end()) + count++; + } + return count; + } + + const expansion_t& + find_smallest(const expansion_t& left, + const expansion_t& right, + std::unordered_set* seen) + { + size_t left_new = count_new(left, seen); + size_t right_new = count_new(right, seen); + + if (left_new < right_new) + return left; + + if (left_new == right_new && left.size() > right.size()) + return right; + + return right; + } + + expansion_t + unique_prefix_count(const expansion_t& exp, std::unordered_set* seen) + { + expansion_t up = unique_prefix(exp); + expansion_t up_seen = unique_prefix_seen(exp, seen); + + const expansion_t& maybe_smallest = find_smallest(exp, up, seen); + const expansion_t& smallest = find_smallest(maybe_smallest, up_seen, seen); + + return smallest; + } + void finalize(expansion_t& exp, exp_opts::expand_opt opts, bdd_dict_ptr d, std::unordered_set* seen) { @@ -336,50 +443,9 @@ namespace spot if (opts & exp_opts::expand_opt::UniquePrefix) { - std::map unique_map; - for (const auto& [prefix, suffix] : exp) - { - auto res = unique_map.insert({prefix, suffix}); - if (!res.second) - { - auto it = res.first; - it->second = formula::OrRat({it->second, suffix}); - } - } - - exp.clear(); - - for (const auto [prefix, suffix] : unique_map) - { - if ((opts & exp_opts::expand_opt::UniquePrefixSeenOpt) - && suffix.is(op::OrRat)) - { - std::vector merge; - std::vector single; - - for (const auto& sub_f : suffix) - { - if (seen->find(sub_f) != seen->end()) - { - single.push_back(sub_f); - } - else - { - merge.push_back(sub_f); - } - } - - for (const auto& sub_f : single) - exp.push_back({prefix, sub_f}); - - if (!merge.empty()) - exp.push_back({prefix, formula::OrRat(merge)}); - } - else - { - exp.push_back({prefix, suffix}); - } - } + exp = unique_prefix(exp); + //exp = unique_prefix_seen(exp, seen); + //exp = unique_prefix_count(exp, seen); } if (opts & exp_opts::expand_opt::UniqueSuffixPost) From 7cbf544d3384ff1167dfc06e74ad42ac350b3161 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 19 Sep 2024 08:41:04 +0200 Subject: [PATCH 590/606] expansions: split --- python/spot/impl.i | 2 + spot/tl/Makefile.am | 3 +- spot/tl/expansions.cc | 31 +- spot/tl/expansions.hh | 2 + spot/tl/expansions2.cc | 930 +++++++++++++++++++++++++++++++++++++++++ spot/tl/expansions2.hh | 45 ++ tests/Makefile.am | 2 - 7 files changed, 999 insertions(+), 16 deletions(-) create mode 100644 spot/tl/expansions2.cc create mode 100644 spot/tl/expansions2.hh diff --git a/python/spot/impl.i b/python/spot/impl.i index ee2c6a81e..a435758f6 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -89,6 +89,7 @@ #include #include #include +#include #include #include #include @@ -638,6 +639,7 @@ namespace std { %include %include %include +%include %include %include %include diff --git a/spot/tl/Makefile.am b/spot/tl/Makefile.am index abb431267..f2ff0fcad 100644 --- a/spot/tl/Makefile.am +++ b/spot/tl/Makefile.am @@ -32,7 +32,7 @@ tl_HEADERS = \ dot.hh \ environment.hh \ exclusive.hh \ - expansions.hh \ + expansions2.hh \ formula.hh \ hierarchy.hh \ length.hh \ @@ -60,6 +60,7 @@ libtl_la_SOURCES = \ dot.cc \ exclusive.cc \ expansions.cc \ + expansions2.cc \ formula.cc \ hierarchy.cc \ length.cc \ diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 756af4082..c09aec083 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -278,11 +278,11 @@ namespace spot bdd dest_bdd = bdd_restrict(exp_, letter); formula dest = bdd_to_sere(dest_bdd); - #ifndef NDEBUG - // make sure it didn't exist before - auto it = std::find(res.begin(), res.end(), {letter, dest}); - SPOT_ASSERT(it == res.end()); - #endif + // #ifndef NDEBUG + // // make sure it didn't exist before + // auto it = std::find(res.begin(), res.end(), {letter, dest}); + // SPOT_ASSERT(it == res.end()); + // #endif res.push_back({letter, dest}); } @@ -338,7 +338,7 @@ namespace spot expansion_t res; - for (const auto [prefix, suffix] : unique_map) + for (const auto& [prefix, suffix] : unique_map) { if (!suffix.is(op::OrRat)) { @@ -435,17 +435,22 @@ namespace spot } exp.clear(); - for (const auto [suffix, prefix] : unique_map) + for (const auto& [suffix, prefix] : unique_map) { exp.push_back({prefix, suffix}); } } - if (opts & exp_opts::expand_opt::UniquePrefix) + if (opts & exp_opts::expand_opt::UniquePrefix + || opts & exp_opts::expand_opt::UniquePrefixSeenOpt + || opts & exp_opts::expand_opt::UniquePrefixSeenCountOpt) { - exp = unique_prefix(exp); - //exp = unique_prefix_seen(exp, seen); - //exp = unique_prefix_count(exp, seen); + if (opts & exp_opts::expand_opt::UniquePrefixSeenOpt) + exp = unique_prefix_seen(exp, seen); + else if (opts & exp_opts::expand_opt::UniquePrefixSeenCountOpt) + exp = unique_prefix_count(exp, seen); + else + exp = unique_prefix(exp); } if (opts & exp_opts::expand_opt::UniqueSuffixPost) @@ -462,7 +467,7 @@ namespace spot } exp.clear(); - for (const auto [suffix, prefix] : unique_map) + for (const auto& [suffix, prefix] : unique_map) { exp.push_back({prefix, suffix}); } @@ -817,7 +822,7 @@ namespace spot struct signature_hash { std::size_t - operator() (const std::pair& sig) const + operator() (const std::pair& sig) const noexcept { size_t hash = std::hash()(sig.first); diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh index 036ac945a..9db8d2c8d 100644 --- a/spot/tl/expansions.hh +++ b/spot/tl/expansions.hh @@ -47,6 +47,8 @@ namespace spot Determinize = 256, UniquePrefixSeenOpt = 512, UniqueSuffixPost = 1024, + UniquePrefixSeenCountOpt = 2048, + TransitionBased = 4096, }; }; diff --git a/spot/tl/expansions2.cc b/spot/tl/expansions2.cc new file mode 100644 index 000000000..012ac11e8 --- /dev/null +++ b/spot/tl/expansions2.cc @@ -0,0 +1,930 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2021 Laboratoire de Recherche et Développement de +// l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" +#include +#include +#include +#include +#include +#include + +namespace spot +{ + namespace + { + // FIXME: could probably just return a map directly + static std::vector + formula_aps(formula f) + { + auto res = std::unordered_set(); + + f.traverse([&res](formula f) + { + if (f.is(op::ap)) + { + res.insert(f.ap_name()); + return true; + } + + return false; + }); + + return std::vector(res.begin(), res.end()); + } + + formula + rewrite_and_nlm(formula f) + { + unsigned s = f.size(); + std::vector final; + std::vector non_final; + + for (auto g: f) + if (g.accepts_eword()) + final.emplace_back(g); + else + non_final.emplace_back(g); + + if (non_final.empty()) + // (a* & b*);c = (a*|b*);c + return formula::OrRat(std::move(final)); + if (!final.empty()) + { + // let F_i be final formulae + // N_i be non final formula + // (F_1 & ... & F_n & N_1 & ... & N_m) + // = (F_1 | ... | F_n);[*] && (N_1 & ... & N_m) + // | (F_1 | ... | F_n) && (N_1 & ... & N_m);[*] + formula f = formula::OrRat(std::move(final)); + formula n = formula::AndNLM(std::move(non_final)); + formula t = formula::one_star(); + formula ft = formula::Concat({f, t}); + formula nt = formula::Concat({n, t}); + formula ftn = formula::AndRat({ft, n}); + formula fnt = formula::AndRat({f, nt}); + return formula::OrRat({ftn, fnt}); + } + // No final formula. + // Translate N_1 & N_2 & ... & N_n into + // N_1 && (N_2;[*]) && ... && (N_n;[*]) + // | (N_1;[*]) && N_2 && ... && (N_n;[*]) + // | (N_1;[*]) && (N_2;[*]) && ... && N_n + formula star = formula::one_star(); + std::vector disj; + for (unsigned n = 0; n < s; ++n) + { + std::vector conj; + for (unsigned m = 0; m < s; ++m) + { + formula g = f[m]; + if (n != m) + g = formula::Concat({g, star}); + conj.emplace_back(g); + } + disj.emplace_back(formula::AndRat(std::move(conj))); + } + return formula::OrRat(std::move(disj)); + } + + class bdd_finalizer + { + public: + int encode(formula f) + { + bool is_anon = false; + int var_num; + auto it = formula2bdd_.find(f); + if (it != formula2bdd_.end()) + { + var_num = it->second; + } + else + { + if (opt_sigma_star_ && (f.is(op::Star) + && f[0].is(op::tt) + && f.min() == 0 + && f.max() == formula::unbounded())) + { + var_num = bddtrue.id(); + } + else if (opt_bdd_encode_ && (f.is(op::AndRat) || f.is(op::OrRat))) + { + bdd var = f.is(op::AndRat) ? bdd(bddtrue) : bdd(bddfalse); + for (const auto& sub_f : f) + { + int bddid = encode(sub_f); + bdd subvar = bdd_ithvar(bddid); + var = f.is(op::AndRat) ? var & subvar : var | subvar; + } + var_num = var.id(); + } + else + { + var_num = d_->register_anonymous_variables(1, this); + is_anon = true; + } + + formula2bdd_.insert({f, var_num}); + bdd2formula_.insert({var_num, f}); + } + + bdd var = bdd_ithvar(var_num); + + if (is_anon) + anon_set_ &= var; + + return var_num; + } + + bdd_finalizer(expansion_t& exp, bdd_dict_ptr d, bool opt_sigma_star, bool opt_bdd_encode) + : anon_set_(bddtrue) + , d_(d) + , opt_sigma_star_(opt_sigma_star) + , opt_bdd_encode_(opt_bdd_encode) + { + for (const auto& [prefix, suffix] : exp) + { + int var_num = encode(suffix); + bdd var = bdd_ithvar(var_num); + exp_ |= prefix & var; + } + } + + ~bdd_finalizer() + { + d_->unregister_all_my_variables(this); + } + + expansion_t + simplify(exp_opts::expand_opt opts); + + private: + bdd exp_; + bdd anon_set_; + std::map formula2bdd_; + std::map bdd2formula_; + bdd_dict_ptr d_; + bool opt_sigma_star_; + bool opt_bdd_encode_; + + formula var_to_formula(int var); + formula conj_bdd_to_sere(bdd b); + formula bdd_to_sere(bdd b); + }; + + formula + bdd_finalizer::var_to_formula(int var) + { + formula f = bdd2formula_[var]; + assert(f); + return f; + } + + formula + bdd_finalizer::bdd_to_sere(bdd f) + { + if (f == bddfalse) + return formula::ff(); + + std::vector v; + minato_isop isop(f); + bdd cube; + while ((cube = isop.next()) != bddfalse) + v.emplace_back(conj_bdd_to_sere(cube)); + return formula::OrRat(std::move(v)); + } + + formula + bdd_finalizer::conj_bdd_to_sere(bdd b) + { + if (b == bddtrue) + { + if (opt_sigma_star_){ + return formula::Star(formula::tt(), 0, formula::unbounded()); + } else { + return formula::tt(); + } + } + if (b == bddfalse) + return formula::ff(); + + // Unroll the first loop of the next do/while loop so that we + // do not have to create v when b is not a conjunction. + formula res = var_to_formula(bdd_var(b)); + bdd high = bdd_high(b); + if (high == bddfalse) + { + res = formula::Not(res); + b = bdd_low(b); + } + else + { + assert(bdd_low(b) == bddfalse); + b = high; + } + if (b == bddtrue) + return res; + std::vector v{std::move(res)}; + do + { + res = var_to_formula(bdd_var(b)); + high = bdd_high(b); + if (high == bddfalse) + { + res = formula::Not(res); + b = bdd_low(b); + } + else + { + assert(bdd_low(b) == bddfalse); + b = high; + } + assert(b != bddfalse); + v.emplace_back(std::move(res)); + } + while (b != bddtrue); + return formula::multop(op::AndRat, std::move(v)); + } + + expansion_t + bdd_finalizer::simplify(exp_opts::expand_opt opts) + { + expansion_t res; + + if (opts & exp_opts::expand_opt::BddMinterm) + { + bdd prop_set = bdd_exist(bdd_support(exp_), anon_set_); + bdd or_labels = bdd_exist(exp_, anon_set_); + // TODO: check are_equivalent avec or_labels/exp_ en premier argument + for (bdd letter: minterms_of(or_labels, prop_set)) + { + bdd dest_bdd = bdd_restrict(exp_, letter); + formula dest = bdd_to_sere(dest_bdd); + + // #ifndef NDEBUG + // // make sure it didn't exist before + // auto it = std::find(res.begin(), res.end(), {letter, dest}); + // SPOT_ASSERT(it == res.end()); + // #endif + + res.push_back({letter, dest}); + } + } + else // BddIsop + { + minato_isop isop(exp_); + bdd cube; + while ((cube = isop.next()) != bddfalse) + { + bdd letter = bdd_exist(cube, anon_set_); + bdd suffix = bdd_existcomp(cube, anon_set_); + formula dest = conj_bdd_to_sere(suffix); + + res.push_back({letter, dest}); + } + } + + return res; + } + + expansion_t + unique_prefix(const expansion_t& exp) + { + std::map unique_map; + for (const auto& [prefix, suffix] : exp) + { + auto res = unique_map.insert({prefix, suffix}); + if (!res.second) + { + auto it = res.first; + it->second = formula::OrRat({it->second, suffix}); + } + } + + expansion_t res(unique_map.begin(), unique_map.end()); + return res; + } + + expansion_t + unique_prefix_seen(const expansion_t& exp, std::unordered_set* seen) + { + std::map unique_map; + for (const auto& [prefix, suffix] : exp) + { + auto res = unique_map.insert({prefix, suffix}); + if (!res.second) + { + auto it = res.first; + it->second = formula::OrRat({it->second, suffix}); + } + } + + expansion_t res; + + for (const auto& [prefix, suffix] : unique_map) + { + if (!suffix.is(op::OrRat)) + { + res.push_back({prefix, suffix}); + continue; + } + + std::vector merge; + std::vector single; + + for (const auto& sub_f : suffix) + { + if (seen->find(sub_f) != seen->end()) + { + single.push_back(sub_f); + } + else + { + merge.push_back(sub_f); + } + } + + for (const auto& sub_f : single) + res.push_back({prefix, sub_f}); + + if (!merge.empty()) + res.push_back({prefix, formula::OrRat(merge)}); + } + + return res; + } + + size_t count_new(const expansion_t& exp, std::unordered_set* seen) + { + size_t count = 0; + for (const auto& [_, suffix] : exp) + { + if (seen->find(suffix) == seen->end()) + count++; + } + return count; + } + + const expansion_t& + find_smallest(const expansion_t& left, + const expansion_t& right, + std::unordered_set* seen) + { + size_t left_new = count_new(left, seen); + size_t right_new = count_new(right, seen); + + if (left_new < right_new) + return left; + + if (left_new == right_new && left.size() > right.size()) + return right; + + return right; + } + + expansion_t + unique_prefix_count(const expansion_t& exp, std::unordered_set* seen) + { + expansion_t up = unique_prefix(exp); + expansion_t up_seen = unique_prefix_seen(exp, seen); + + const expansion_t& maybe_smallest = find_smallest(exp, up, seen); + const expansion_t& smallest = find_smallest(maybe_smallest, up_seen, seen); + + return smallest; + } + + void + finalize(expansion_t& exp, exp_opts::expand_opt opts, bdd_dict_ptr d, std::unordered_set* seen) + { + if (opts & (exp_opts::expand_opt::BddIsop + | exp_opts::expand_opt::BddMinterm)) + { + bdd_finalizer bddf(exp, d, opts & exp_opts::expand_opt::BddSigmaStar, opts & exp_opts::expand_opt::BddEncode); + exp = bddf.simplify(opts); + } + + if (opts & exp_opts::expand_opt::UniqueSuffixPre) + { + std::map unique_map; + for (const auto& [prefix, suffix] : exp) + { + auto res = unique_map.insert({suffix, prefix}); + if (!res.second) + { + auto it = res.first; + it->second |= prefix; + } + } + + exp.clear(); + for (const auto& [suffix, prefix] : unique_map) + { + exp.push_back({prefix, suffix}); + } + } + + if (opts & exp_opts::expand_opt::UniquePrefix + || opts & exp_opts::expand_opt::UniquePrefixSeenOpt + || opts & exp_opts::expand_opt::UniquePrefixSeenCountOpt) + { + if (opts & exp_opts::expand_opt::UniquePrefixSeenOpt) + exp = unique_prefix_seen(exp, seen); + else if (opts & exp_opts::expand_opt::UniquePrefixSeenCountOpt) + exp = unique_prefix_count(exp, seen); + else + exp = unique_prefix(exp); + } + + if (opts & exp_opts::expand_opt::UniqueSuffixPost) + { + std::map unique_map; + for (const auto& [prefix, suffix] : exp) + { + auto res = unique_map.insert({suffix, prefix}); + if (!res.second) + { + auto it = res.first; + it->second |= prefix; + } + } + + exp.clear(); + for (const auto& [suffix, prefix] : unique_map) + { + exp.push_back({prefix, suffix}); + } + } + + if (opts & exp_opts::expand_opt::Determinize) + { + expansion_t exp_new; + + bdd props = bddtrue; + for (const auto& [prefix, _] : exp) + props &= bdd_support(prefix); + + std::vector dests; + for (bdd letter : minterms_of(bddtrue, props)) + { + for (const auto& [prefix, suffix] : exp) + { + if (bdd_implies(letter, prefix)) + dests.push_back(suffix); + } + formula or_dests = formula::OrRat(dests); + exp_new.push_back({letter, or_dests}); + dests.clear(); + } + exp = exp_new; + } + } + } + + formula + expansion_to_formula2(expansion_t e, bdd_dict_ptr& d) + { + std::vector res; + + for (const auto& [key, val] : e) + { + formula prefix = bdd_to_formula(key, d); + res.push_back(formula::Concat({prefix, val})); + } + + return formula::OrRat(res); + } + + expansion_t + expansion2(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts, std::unordered_set* seen) + { + using exp_t = expansion_t; + + if (f.is_boolean()) + { + auto f_bdd = formula_to_bdd(f, d, owner); + + if (f_bdd == bddfalse) + return {}; + + return {{f_bdd, formula::eword()}}; + } + + auto rec = [&d, owner, opts, seen](formula f){ + return expansion2(f, d, owner, exp_opts::None, seen); + }; + + + switch (f.kind()) + { + case op::ff: + case op::tt: + case op::ap: + SPOT_UNREACHABLE(); + + case op::eword: + // return {{bddfalse, formula::ff()}}; + return {}; + + case op::Concat: + { + auto exps = rec(f[0]); + + exp_t res; + for (const auto& [bdd_l, form] : exps) + { + res.push_back({bdd_l, formula::Concat({form, f.all_but(0)})}); + } + + if (f[0].accepts_eword()) + { + auto exps_rest = rec(f.all_but(0)); + for (const auto& [bdd_l, form] : exps_rest) + { + res.push_back({bdd_l, form}); + } + } + + finalize(res, opts, d, seen); + return res; + } + + case op::FStar: + { + formula E = f[0]; + + if (f.min() == 0 && f.max() == 0) + return {{bddtrue, formula::eword()}}; + + auto min = f.min() == 0 ? 0 : (f.min() - 1); + auto max = f.max() == formula::unbounded() + ? formula::unbounded() + : (f.max() - 1); + + auto E_i_j_minus = formula::FStar(E, min, max); + + auto exp = rec(E); + exp_t res; + for (const auto& [li, ei] : exp) + { + res.push_back({li, formula::Fusion({ei, E_i_j_minus})}); + + if (ei.accepts_eword() && f.min() != 0) + { + for (const auto& [ki, fi] : rec(E_i_j_minus)) + { + // FIXME: build bdd once + if ((li & ki) != bddfalse) + res.push_back({li & ki, fi}); + } + } + } + if (f.min() == 0) + res.push_back({bddtrue, formula::eword()}); + + finalize(res, opts, d, seen); + return res; + } + + case op::Star: + { + auto min = f.min() == 0 ? 0 : (f.min() - 1); + auto max = f.max() == formula::unbounded() + ? formula::unbounded() + : (f.max() - 1); + + auto exps = rec(f[0]); + + exp_t res; + for (const auto& [bdd_l, form] : exps) + { + res.push_back({bdd_l, formula::Concat({form, formula::Star(f[0], min, max)})}); + } + + finalize(res, opts, d, seen); + return res; + } + + case op::AndNLM: + { + formula rewrite = rewrite_and_nlm(f); + auto res = rec(rewrite); + finalize(res, opts, d, seen); + return res; + } + + case op::first_match: + { + auto exps = rec(f[0]); + + exp_t res; + for (const auto& [bdd_l, form] : exps) + { + res.push_back({bdd_l, form}); + } + + // determinize + bdd or_labels = bddfalse; + bdd support = bddtrue; + bool is_det = true; + for (const auto& [l, _] : res) + { + support &= bdd_support(l); + if (is_det) + is_det = !bdd_have_common_assignment(l, or_labels); + or_labels |= l; + } + + if (is_det) + { + for (auto& [_, dest] : res) + dest = formula::first_match(dest); + finalize(res, opts, d, seen); + return res; + } + + exp_t res_det; + std::vector dests; + for (bdd l: minterms_of(or_labels, support)) + { + for (const auto& [ndet_label, ndet_dest] : res) + { + if (bdd_implies(l, ndet_label)) + dests.push_back(ndet_dest); + } + formula or_dests = formula::OrRat(dests); + res_det.push_back({l, or_dests}); + dests.clear(); + } + + for (auto& [_, dest] : res_det) + dest = formula::first_match(dest); + finalize(res_det, opts, d, seen); + return res_det; + } + + case op::Fusion: + { + exp_t res; + formula E = f[0]; + formula F = f.all_but(0); + + exp_t Ei = rec(E); + // TODO: std::option + exp_t Fj = rec(F); + + for (const auto& [li, ei] : Ei) + { + if (ei.accepts_eword()) + { + for (const auto& [kj, fj] : Fj) + if ((li & kj) != bddfalse) + res.push_back({li & kj, fj}); + } + res.push_back({li, formula::Fusion({ei, F})}); + } + + finalize(res, opts, d, seen); + return res; + } + + case op::AndRat: + { + exp_t res; + for (const auto& sub_f : f) + { + auto exps = rec(sub_f); + + if (exps.empty()) + { + // op::AndRat: one of the expansions was empty (the only + // edge was `false`), so the AndRat is empty as + // well + res.clear(); + break; + } + + if (res.empty()) + { + res = std::move(exps); + continue; + } + + exp_t new_res; + bool inserted = false; + for (const auto& [l_key, l_val] : exps) + { + for (const auto& [r_key, r_val] : res) + { + if ((l_key & r_key) != bddfalse) + { + new_res.push_back({l_key & r_key, formula::multop(f.kind(), {l_val, r_val})}); + inserted = true; + } + } + } + + if (!inserted) + { + // all prefix conjuctions led to bddfalse, And is empty + res.clear(); + break; + } + + res = std::move(new_res); + } + + finalize(res, opts, d, seen); + return res; + } + + case op::OrRat: + { + exp_t res; + for (const auto& sub_f : f) + { + auto exps = rec(sub_f); + if (exps.empty()) + continue; + + if (res.empty()) + { + res = std::move(exps); + continue; + } + + for (const auto& [label, dest] : exps) + res.push_back({label, dest}); + } + + finalize(res, opts, d, seen); + return res; + } + + default: + std::cerr << "unimplemented kind " + << static_cast(f.kind()) + << std::endl; + SPOT_UNIMPLEMENTED(); + } + + return {}; + } + + twa_graph_ptr + expand_automaton2(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts) + { + auto finite = expand_finite_automaton2(f, d, opts); + return from_finite(finite); + } + + struct signature_hash + { + std::size_t + operator() (const expansion_t& sig) const noexcept + { + size_t hash = 0; + + for (const auto& keyvalue : sig) + { + hash ^= (bdd_hash()(keyvalue.first) ^ std::hash()(keyvalue.second)) + + 0x9e3779b9 + (hash << 6) + (hash >> 2); + } + + return hash; + } + }; + + twa_graph_ptr + expand_finite_automaton2(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts) + { + bool signature_merge = opts & exp_opts::expand_opt::SignatureMerge; + + auto aut = make_twa_graph(d); + + aut->prop_state_acc(false); + const auto acc_mark = aut->set_buchi(); + + auto formula2state = robin_hood::unordered_map(); + auto signature2state = std::unordered_map(); + auto seen = std::unordered_set(); + seen.insert(f); + + unsigned init_state = aut->new_state(); + aut->set_init_state(init_state); + formula2state.insert({ f, init_state }); + + + auto f_aps = formula_aps(f); + for (auto& ap : f_aps) + aut->register_ap(ap); + + auto formula2signature = robin_hood::unordered_map(); + auto get_signature = [&](const formula& form) -> expansion_t + { + auto it = formula2signature.find(form); + if (it != formula2signature.end()) + { + return it->second; + } + auto exp = expansion2(form, d, aut.get(), opts, &seen); + formula2signature.insert({form, exp}); + return exp; + }; + + if (signature_merge) + signature2state.insert({ get_signature(f), init_state}); + + auto todo = std::vector>(); + todo.push_back({f, init_state}); + + auto state_names = new std::vector(); + std::ostringstream ss; + ss << f; + state_names->push_back(ss.str()); + + auto find_dst = [&](formula suffix) -> unsigned + { + unsigned dst; + auto it = formula2state.find(suffix); + if (it != formula2state.end()) + { + dst = it->second; + } + else + { + if (signature_merge) + { + auto exp = get_signature(suffix); + + auto it2 = signature2state.find(exp); + if (it2 != signature2state.end()) + { + formula2state.insert({suffix, it2->second}); + return it2->second; + } + } + + dst = aut->new_state(); + todo.push_back({suffix, dst}); + seen.insert(suffix); + + formula2state.insert({suffix, dst}); + if (signature_merge) + signature2state.insert({get_signature(suffix), dst}); + + std::ostringstream ss; + ss << suffix; + state_names->push_back(ss.str()); + } + + return dst; + }; + + while (!todo.empty()) + { + auto [curr_f, curr_state] = todo[todo.size() - 1]; + todo.pop_back(); + + + auto exp = get_signature(curr_f); + + for (const auto& [letter, suffix] : exp) + { + if (suffix.is(op::ff)) + // TODO ASSERT NOT + continue; + + auto dst = find_dst(suffix); + + auto curr_acc_mark = suffix.accepts_eword() ? acc_mark : acc_cond::mark_t(); + aut->new_edge(curr_state, dst, letter, curr_acc_mark); + } + } + + aut->set_named_prop("state-names", state_names); + + if ((opts & exp_opts::MergeEdges) + && !(opts & exp_opts::UniqueSuffixPre || opts & exp_opts::UniqueSuffixPost)) + aut->merge_edges(); + + return aut; + } +} diff --git a/spot/tl/expansions2.hh b/spot/tl/expansions2.hh new file mode 100644 index 000000000..e517bb87a --- /dev/null +++ b/spot/tl/expansions2.hh @@ -0,0 +1,45 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2021 Laboratoire de Recherche et Développement de +// l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include + +#include + +#include +#include +#include +#include +#include + +namespace spot +{ + SPOT_API expansion_t + expansion2(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts, std::unordered_set* seen = nullptr); + + SPOT_API twa_graph_ptr + expand_automaton2(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts); + + SPOT_API twa_graph_ptr + expand_finite_automaton2(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts); + + SPOT_API formula + expansion_to_formula2(expansion_t e, bdd_dict_ptr& d); +} diff --git a/tests/Makefile.am b/tests/Makefile.am index a061ba23d..9bf0cef73 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -69,7 +69,6 @@ check_PROGRAMS = \ core/cube \ core/emptchk \ core/equals \ - core/expand \ core/graph \ core/kind \ core/length \ @@ -112,7 +111,6 @@ core_bricks_SOURCES = core/bricks.cc core_checkpsl_SOURCES = core/checkpsl.cc core_checkta_SOURCES = core/checkta.cc core_emptchk_SOURCES = core/emptchk.cc -core_expand_SOURCES = core/expand.cc core_graph_SOURCES = core/graph.cc core_ikwiad_SOURCES = core/ikwiad.cc core_intvcomp_SOURCES = core/intvcomp.cc From f687ef7bbb07d0c3600e40a91f22815007e2c3d7 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Tue, 24 Sep 2024 11:36:17 +0200 Subject: [PATCH 591/606] ltl2tgba_fm: switch for expansions --- spot/twaalgos/ltl2tgba_fm.cc | 59 ++++++++++++++++++++++++++++++++---- spot/twaalgos/ltl2tgba_fm.hh | 2 ++ 2 files changed, 55 insertions(+), 6 deletions(-) diff --git a/spot/twaalgos/ltl2tgba_fm.cc b/spot/twaalgos/ltl2tgba_fm.cc index dd7bb9182..fc296c919 100644 --- a/spot/twaalgos/ltl2tgba_fm.cc +++ b/spot/twaalgos/ltl2tgba_fm.cc @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -101,15 +102,14 @@ namespace spot class ratexp_to_dfa final { typedef twa_graph::namer namer; + // Use robin_hood::pair because std::pair is not no-throw constructible + typedef robin_hood::pair labelled_aut; public: ratexp_to_dfa(translate_dict& dict, bool disable_scc_trimming = false); std::vector> succ(formula f); ~ratexp_to_dfa(); - protected: - // Use robin_hood::pair because std::pair is not no-throw constructible - typedef robin_hood::pair labelled_aut; labelled_aut translate(formula f); private: @@ -887,8 +887,25 @@ namespace spot bdd res; if (!f.is_boolean()) { - ratexp_trad_visitor v(dict, to_concat); - res = v.visit(f); + if (sere_translation_options() == 0) + { + ratexp_trad_visitor v(dict, to_concat); + res = v.visit(f); + } + else // version expansion + { + // auto d = make_bdd_dict(); + res = bddfalse; + for (auto [label, succ]: expansion(f, dict.dict, nullptr, exp_opts::expand_opt::None, nullptr)) + { + // std::cout << label << ' ' << succ << std::endl; + if (to_concat) + succ = formula::Concat({succ, to_concat}); + // std::cout << succ << std::endl; + int x = dict.register_next_variable(succ); + res |= label & bdd_ithvar(x); + } + } } else { @@ -899,6 +916,7 @@ namespace spot int x = dict.register_next_variable(to_concat); res &= bdd_ithvar(x); } + // std::cout << res << std::endl; return res; } @@ -2199,7 +2217,8 @@ namespace spot translate_dict d(a, s, false, false, false); ratexp_to_dfa sere2dfa(d, disable_scc_trimming); - auto [dfa, namer, state] = sere2dfa.succ(f); + auto [dfa, namer] = sere2dfa.translate(f); + auto state = dfa->state_from_number(namer->get_state(f)); // language was empty, build an automaton with one non accepting state if (dfa == nullptr) @@ -2247,4 +2266,32 @@ namespace spot return res; } + + int sere_translation_options(const char* version) + { + static int pref = -1; + const char *env = nullptr; + if (!version && pref < 0) + version = env = getenv("SPOT_SERE_TRANSLATE_OPT"); + if (version) + { + if (!strcasecmp(version, "bdd")) + pref = 0; + else if (!strcasecmp(version, "expansion")) + pref = 1; + else + { + const char* err = ("sere_translation_options(): argument" + " should be one of {bdd,expansion}"); + if (env) + err = "SPOT_SERE_TRANSLATE_OPT should be one of {bdd,expansion}"; + throw std::runtime_error(err); + } + } + else if (pref < 0) + { + pref = 0; + } + return pref; + } } diff --git a/spot/twaalgos/ltl2tgba_fm.hh b/spot/twaalgos/ltl2tgba_fm.hh index 51de038e1..7b536d6fe 100644 --- a/spot/twaalgos/ltl2tgba_fm.hh +++ b/spot/twaalgos/ltl2tgba_fm.hh @@ -92,4 +92,6 @@ namespace spot SPOT_API twa_graph_ptr sere_to_tgba(formula f, const bdd_dict_ptr& dict, bool disable_scc_trimming = false); + + SPOT_API int sere_translation_options(const char* version = nullptr); } From dfa828739b0374b083d78a107ecea066dc478b8e Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 23 Oct 2024 13:56:00 +0200 Subject: [PATCH 592/606] translate_aa: setup translation choice --- spot/twaalgos/translate_aa.cc | 70 +++++++++++++++++++++++++++++++++-- spot/twaalgos/translate_aa.hh | 2 + 2 files changed, 69 insertions(+), 3 deletions(-) diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index c18570d41..095f92c5a 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -18,9 +18,14 @@ // along with this program. If not, see . #include "config.h" + +#include + #include +#include #include #include +#include #include #include @@ -176,7 +181,6 @@ namespace spot { unsigned init_state = aut_->new_state(); - outedge_combiner oe(aut_, accepting_sink_); bdd comb = bddtrue; for (const auto& sub_formula : f) { @@ -285,7 +289,22 @@ namespace spot { unsigned rhs_init = recurse(f[1]); const auto& dict = aut_->get_dict(); - twa_graph_ptr sere_aut = derive_finite_automaton_with_first(f[0], dict); + twa_graph_ptr sere_aut; + if (sere_aa_translation_options() == 0) + { + // old bdd method + sere_aut = sere_to_tgba(f[0], dict, true); + } + else if (sere_aa_translation_options() == 1) + { + // derivation + sere_aut = derive_finite_automaton_with_first(f[0], dict); + } + else + { + // linear form + sere_aut = expand_finite_automaton(f[0], dict, exp_opts::expand_opt::None); + } // TODO: this should be a std::vector ! std::vector acc_states; @@ -326,7 +345,22 @@ namespace spot { unsigned rhs_init = recurse(f[1]); const auto& dict = aut_->get_dict(); - twa_graph_ptr sere_aut = derive_finite_automaton_with_first(f[0], dict); + twa_graph_ptr sere_aut; + if (sere_aa_translation_options() == 0) + { + // old bdd method + sere_aut = sere_to_tgba(f[0], dict, true); + } + else if (sere_aa_translation_options() == 1) + { + // derivation + sere_aut = derive_finite_automaton_with_first(f[0], dict); + } + else + { + // linear form + sere_aut = expand_finite_automaton(f[0], dict, exp_opts::expand_opt::None); + } // DFA recognizes the empty language, so {0} []-> rhs is always true unsigned ns = sere_aut->num_states(); @@ -482,4 +516,34 @@ namespace spot return aut; } + + int sere_aa_translation_options(const char* version) + { + static int pref = -1; + const char *env = nullptr; + if (!version && pref < 0) + version = env = getenv("SPOT_SERE_AA_TRANSLATE_OPT"); + if (version) + { + if (!strcasecmp(version, "bdd")) + pref = 0; + else if (!strcasecmp(version, "derive")) + pref = 1; + else if (!strcasecmp(version, "expansion")) + pref = 2; + else + { + const char* err = ("sere_aa_translation_options(): argument" + " should be one of {bdd,derive,expansion}"); + if (env) + err = "SPOT_SERE_AA_TRANSLATE_OPT should be one of {bdd,derive,expansion}"; + throw std::runtime_error(err); + } + } + else if (pref < 0) + { + pref = 0; + } + return pref; + } } diff --git a/spot/twaalgos/translate_aa.hh b/spot/twaalgos/translate_aa.hh index 9a8760072..aedcf07d4 100644 --- a/spot/twaalgos/translate_aa.hh +++ b/spot/twaalgos/translate_aa.hh @@ -29,4 +29,6 @@ namespace spot { SPOT_API twa_graph_ptr ltl_to_aa(formula f, bdd_dict_ptr& dict, bool purge_dead_states = false); + + SPOT_API int sere_aa_translation_options(const char* version = nullptr); } From a32431c34152debdb4865cc931fecea59981a1e4 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 23 Oct 2024 13:59:51 +0200 Subject: [PATCH 593/606] ltl2tgba_fm: setup switch between bdd and exp --- spot/twaalgos/ltl2tgba_fm.cc | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/spot/twaalgos/ltl2tgba_fm.cc b/spot/twaalgos/ltl2tgba_fm.cc index fc296c919..c5a8c9d02 100644 --- a/spot/twaalgos/ltl2tgba_fm.cc +++ b/spot/twaalgos/ltl2tgba_fm.cc @@ -224,6 +224,7 @@ namespace spot int register_proposition(formula f) { + // TODO: call this in expansions int num = dict->register_proposition(f, this); var_set &= bdd_ithvar(num); return num; @@ -894,14 +895,11 @@ namespace spot } else // version expansion { - // auto d = make_bdd_dict(); res = bddfalse; - for (auto [label, succ]: expansion(f, dict.dict, nullptr, exp_opts::expand_opt::None, nullptr)) + for (auto [label, succ]: expansion(f, dict.dict, &dict, exp_opts::expand_opt::None, nullptr)) { - // std::cout << label << ' ' << succ << std::endl; if (to_concat) succ = formula::Concat({succ, to_concat}); - // std::cout << succ << std::endl; int x = dict.register_next_variable(succ); res |= label & bdd_ithvar(x); } @@ -916,7 +914,6 @@ namespace spot int x = dict.register_next_variable(to_concat); res &= bdd_ithvar(x); } - // std::cout << res << std::endl; return res; } @@ -2209,6 +2206,10 @@ namespace spot twa_graph_ptr sere_to_tgba(formula f, const bdd_dict_ptr& dict, bool disable_scc_trimming) { + // make sure we use bdd translation in this case + auto old_opt = sere_translation_options(); + sere_translation_options("bdd"); + f = negative_normal_form(f); tl_simplifier* s = new tl_simplifier(dict); @@ -2218,7 +2219,6 @@ namespace spot ratexp_to_dfa sere2dfa(d, disable_scc_trimming); auto [dfa, namer] = sere2dfa.translate(f); - auto state = dfa->state_from_number(namer->get_state(f)); // language was empty, build an automaton with one non accepting state if (dfa == nullptr) @@ -2264,6 +2264,10 @@ namespace spot res->set_named_prop("state-names", names); + // restore previous option + if (old_opt != 0) + sere_translation_options("expansion"); + return res; } From 7fa19736136956cc3e0c9ca830e1750b740b832d Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 23 Oct 2024 14:00:15 +0200 Subject: [PATCH 594/606] expansions: fusion can produce false let's discard the result if it's false --- spot/tl/expansions.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index c09aec083..9978d925d 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -722,7 +722,10 @@ namespace spot if ((li & kj) != bddfalse) res.push_back({li & kj, fj}); } - res.push_back({li, formula::Fusion({ei, F})}); + + formula ei_fusion_F = formula::Fusion({ei, F}); + if (!ei_fusion_F.is(op::ff)) + res.push_back({li, ei_fusion_F}); } finalize(res, opts, d, seen); From 37b814c75092b34580154769a32c409bbc93fb48 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Tue, 4 Mar 2025 19:20:50 +0100 Subject: [PATCH 595/606] expansions: make signature canonical Linear forms are now sorted and duplicates are removed --- spot/tl/expansions.cc | 18 ++++++++++++++++++ spot/tl/expansions2.cc | 17 +++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 9978d925d..0896dd6b5 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -18,9 +18,11 @@ // along with this program. If not, see . #include "config.h" +#include #include #include #include +#include #include #include #include @@ -495,6 +497,22 @@ namespace spot } exp = exp_new; } + + // sort and remove duplicates from expansion to canonicalize it for + // eventual signature use + if (exp.size() >= 2) + { + std::sort(exp.begin(), exp.end(), + [](const auto& lhs, const auto& rhs) { + bdd_less_than_stable blt; + // first sort by label, then by suffix + if (blt(lhs.first, rhs.first)) + return true; + formula_ptr_less_than_bool_first flt; + return flt(lhs.second, rhs.second); + }); + exp.erase(std::unique(exp.begin(), exp.end()), exp.end()); + } } } diff --git a/spot/tl/expansions2.cc b/spot/tl/expansions2.cc index 012ac11e8..ecd98f5cb 100644 --- a/spot/tl/expansions2.cc +++ b/spot/tl/expansions2.cc @@ -18,9 +18,11 @@ // along with this program. If not, see . #include "config.h" +#include #include #include #include +#include #include #include #include @@ -495,6 +497,21 @@ namespace spot } exp = exp_new; } + + // sort expansion to canonicalize it for eventual signature use + if (exp.size() >= 2) + { + std::sort(exp.begin(), exp.end(), + [](const auto& lhs, const auto& rhs) { + bdd_less_than_stable blt; + // first sort by label, then by suffix + if (blt(lhs.first, rhs.first)) + return true; + formula_ptr_less_than_bool_first flt; + return flt(lhs.second, rhs.second); + }); + exp.erase(std::unique(exp.begin(), exp.end()), exp.end()); + } } } From 939942af304846192902e94ba9eb067d05667fb4 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 7 Mar 2025 11:24:00 +0100 Subject: [PATCH 596/606] expansions: fix sort behavior The previous implementation was wrong and led to segfaults when sorting large expansions --- spot/tl/expansions.cc | 10 +++------- spot/tl/expansions2.cc | 10 +++------- 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc index 0896dd6b5..402236ea6 100644 --- a/spot/tl/expansions.cc +++ b/spot/tl/expansions.cc @@ -504,13 +504,9 @@ namespace spot { std::sort(exp.begin(), exp.end(), [](const auto& lhs, const auto& rhs) { - bdd_less_than_stable blt; - // first sort by label, then by suffix - if (blt(lhs.first, rhs.first)) - return true; - formula_ptr_less_than_bool_first flt; - return flt(lhs.second, rhs.second); - }); + return std::make_pair(lhs.first.id(), lhs.second.id()) + < std::make_pair(rhs.first.id(), rhs.second.id()); + }); exp.erase(std::unique(exp.begin(), exp.end()), exp.end()); } } diff --git a/spot/tl/expansions2.cc b/spot/tl/expansions2.cc index ecd98f5cb..d80e5ffa3 100644 --- a/spot/tl/expansions2.cc +++ b/spot/tl/expansions2.cc @@ -503,13 +503,9 @@ namespace spot { std::sort(exp.begin(), exp.end(), [](const auto& lhs, const auto& rhs) { - bdd_less_than_stable blt; - // first sort by label, then by suffix - if (blt(lhs.first, rhs.first)) - return true; - formula_ptr_less_than_bool_first flt; - return flt(lhs.second, rhs.second); - }); + return std::make_pair(lhs.first.id(), lhs.second.id()) + < std::make_pair(rhs.first.id(), rhs.second.id()); + }); exp.erase(std::unique(exp.begin(), exp.end()), exp.end()); } } From 3d3f311733bbb83b6cf90ec6e13288cdee182860 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 13 Mar 2025 08:47:41 +0100 Subject: [PATCH 597/606] expansions: remove unused lambda capture --- spot/tl/expansions2.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spot/tl/expansions2.cc b/spot/tl/expansions2.cc index d80e5ffa3..96f970a94 100644 --- a/spot/tl/expansions2.cc +++ b/spot/tl/expansions2.cc @@ -540,7 +540,7 @@ namespace spot return {{f_bdd, formula::eword()}}; } - auto rec = [&d, owner, opts, seen](formula f){ + auto rec = [&d, owner, seen](formula f){ return expansion2(f, d, owner, exp_opts::None, seen); }; From 42221100dda39ccd032161637b75c6e4aae5436e Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 20 Oct 2021 11:54:16 +0200 Subject: [PATCH 598/606] nix: setup Nix Flake file * flake.nix, flake.lock: here --- flake.lock | 61 +++++++++++++++ flake.nix | 214 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 275 insertions(+) create mode 100644 flake.lock create mode 100644 flake.nix diff --git a/flake.lock b/flake.lock new file mode 100644 index 000000000..dd215f1c6 --- /dev/null +++ b/flake.lock @@ -0,0 +1,61 @@ +{ + "nodes": { + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1731533236, + "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1741196730, + "narHash": "sha256-0Sj6ZKjCpQMfWnN0NURqRCQn2ob7YtXTAOTwCuz7fkA=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "48913d8f9127ea6530a2a2f1bd4daa1b8685d8a3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-24.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "flake-utils": "flake-utils", + "nixpkgs": "nixpkgs" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 000000000..f0f3f95b5 --- /dev/null +++ b/flake.nix @@ -0,0 +1,214 @@ +{ + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11"; + flake-utils.url = "github:numtide/flake-utils"; + }; + outputs = { self, nixpkgs, flake-utils, ... }: + flake-utils.lib.eachSystem + [ + "x86_64-linux" + ] + + (system: + let + pkgs = import nixpkgs { inherit system; }; + lib = pkgs.lib; + + mkSpotApps = appNames: + pkgs.lib.genAttrs appNames + (name: flake-utils.lib.mkApp { + drv = self.packages.${system}.spot; + name = name; + }); + + spotPackage = + let + inherit (builtins) + filter + head + isString + match + readFile + split + ; + + # NOTE: Maintaining the version separately would be a pain, and we + # can't have a flake.nix.in with a @VERSION@ because it would make + # the flake unusable without running autoconf first, defeating some + # of its purpose. + # + # So let's get it the hard way instead :) + extractVersionRegex = ''^AC_INIT\(\[spot], \[([^]]+)], \[spot@lrde\.epita\.fr]\)$''; + getLines = (fileContent: + filter isString (split "\n" fileContent) + ); + findVersionLine = (lines: + lib.lists.findFirst + (l: lib.strings.hasPrefix "AC_INIT(" l) + null + lines + ); + getVersion = (file: + let + lines = getLines (readFile file); + versionLine = findVersionLine lines; + version = head (match extractVersionRegex versionLine); + in + version + ); + in + { + lib, + pkgs, + stdenv, + # FIXME: do we want this flag? + buildOrgDoc ? false, + # Whether to enable Spot's Python 3 bindings + enablePython ? false + }: + stdenv.mkDerivation { + pname = "spot"; + version = getVersion ./configure.ac; + + src = self; + + enableParallelBuilding = true; + + # NOTE: Nix enables a lot of hardening flags by default, some of + # these probably harm performance so I've disabled everything + # (haven't benchmarked with vs without these, though). + hardeningDisable = [ "all" ]; + + # NOTE: mktexpk fails without a HOME set + preBuild = '' + export HOME=$TMPDIR + patchShebangs tools + '' + (if buildOrgDoc then '' + ln -s ${pkgs.plantuml}/lib/plantuml.jar doc/org/plantuml.jar + '' else '' + touch doc/org-stamp + ''); + + configureFlags = [ + "--disable-devel" + "--enable-optimizations" + ] ++ lib.optional (!enablePython) [ + "--disable-python" + ]; + + nativeBuildInputs = with pkgs; [ + autoreconfHook + + autoconf + automake + bison + flex + libtool + perl + ] ++ lib.optional buildOrgDoc [ + graphviz + groff + plantuml + pdf2svg + R + ] ++ lib.optional enablePython [ + python3 + swig4 + ]; + + buildInputs = with pkgs; [ + # should provide the minimum amount of packages necessary for + # building tl.pdf + (texlive.combine { + inherit (texlive) + scheme-basic + latexmk + + booktabs + cm-super + doi + doublestroke + etoolbox + koma-script + mathabx-type1 + mathpazo + metafont + microtype + nag + pgf + standalone + stmaryrd + tabulary + todonotes + wasy-type1 + wasysym + ; + }) + ]; + }; + in + { + defaultPackage = self.packages.${system}.spot; + + packages = { + # binaries + library only + spot = pkgs.callPackage spotPackage {}; + + # NOTE: clang build is broken on Nix when linking to stdlib++, using + # libcxx instead. See: + # https://github.com/NixOS/nixpkgs/issues/91285 + spotClang = pkgs.callPackage spotPackage { + stdenv = pkgs.llvmPackages.libcxxStdenv; + }; + + spotWithOrgDoc = pkgs.callPackage spotPackage { + buildOrgDoc = true; + }; + + spotWithPython = pkgs.python3Packages.toPythonModule ( + pkgs.callPackage spotPackage { + enablePython = true; + } + ); + + spotFull = pkgs.python3Packages.toPythonModule ( + pkgs.callPackage spotPackage { + buildOrgDoc = true; enablePython = true; + } + ); + }; + + apps = mkSpotApps [ + "autcross" + "autfilt" + "dstar2tgba" + "genaut" + "genltl" + "ltl2tgba" + "ltl2tgta" + "ltlcross" + "ltldo" + "ltlfilt" + "ltlgrind" + "ltlmix" + "ltlsynt" + "randaut" + "randltl" + ]; + + devShell = pkgs.mkShell { + name = "spot-dev"; + inputsFrom = [ self.packages.${system}.spotFull ]; + buildInputs = [ + pkgs.gdb + pkgs.clang-tools # for clangd + pkgs.bear + + (pkgs.python3.withPackages (p: [ + p.jupyter + p.ipython # otherwise ipython module isn't found when running ipynb tests + ])) + ]; + }; + }); +} From c5746ef5cf1d9d939b5ddd1c987550c22c1c23df Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 13 Mar 2025 08:47:41 +0100 Subject: [PATCH 599/606] expansions: fix bogus false pairs in linear forms --- spot/tl/expansions2.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/spot/tl/expansions2.cc b/spot/tl/expansions2.cc index 96f970a94..c40298d7f 100644 --- a/spot/tl/expansions2.cc +++ b/spot/tl/expansions2.cc @@ -711,7 +711,10 @@ namespace spot if ((li & kj) != bddfalse) res.push_back({li & kj, fj}); } - res.push_back({li, formula::Fusion({ei, F})}); + + formula ei_fusion_F = formula::Fusion({ei, F}); + if (!ei_fusion_F.is(op::ff)) + res.push_back({li, ei_fusion_F}); } finalize(res, opts, d, seen); From 059c5072df4be153449a2f34477c4221720da41a Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 3 Mar 2022 11:31:03 +0100 Subject: [PATCH 600/606] nix: provide package in release tarballs --- .gitignore | 1 + Makefile.am | 6 +++++- default.nix.in | 35 +++++++++++++++++++++++++++++++++++ 3 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 default.nix.in diff --git a/.gitignore b/.gitignore index 155a9b5e7..73745a48f 100644 --- a/.gitignore +++ b/.gitignore @@ -82,3 +82,4 @@ GTAGS *.dsc *.gcov spot.spec +default.nix diff --git a/Makefile.am b/Makefile.am index e198a977c..a5d842b4c 100644 --- a/Makefile.am +++ b/Makefile.am @@ -65,7 +65,8 @@ EXTRA_DIST = HACKING ChangeLog.1 tools/gitlog-to-changelog \ tools/help2man tools/man2html.pl \ tools/test-driver-teamcity $(UTF8) $(DEBIAN) \ m4/gnulib-cache.m4 .dir-locals.el \ - spot.spec spot.spec.in README.ltsmin + spot.spec spot.spec.in README.ltsmin \ + default.nix default.nix.in dist-hook: gen-ChangeLog @@ -111,3 +112,6 @@ deb: dist spot.spec: configure.ac spot.spec.in sed 's/[@]VERSION[@]/$(VERSION)/;s/[@]GITPATCH[@]/@@@$(GITPATCH)/;s/@@@\.//' spot.spec.in > $@.tmp && mv $@.tmp $@ + +default.nix: configure.ac default.nix.in + sed 's/[@]VERSION[@]/$(VERSION)/' default.nix.in > $@.tmp && mv $@.tmp $@ diff --git a/default.nix.in b/default.nix.in new file mode 100644 index 000000000..8101e4f74 --- /dev/null +++ b/default.nix.in @@ -0,0 +1,35 @@ +# -*- mode: nix; coding: utf-8 -*- +# Copyright (C) 2022 Laboratoire de Recherche et Développement de l'Epita +# (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +{ pkgs ? import {} }: +let + version = "@VERSION@"; +in +pkgs.stdenv.mkDerivation { + inherit version; + pname = "spot"; + + buildInputs = [ + pkgs.python3 + ]; + + src = ./.; + + enableParallelBuilding = true; +} From 59cfd6ed17a18c3371b23dd07b7aad1b5bc9752e Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 10 Oct 2025 16:49:34 +0200 Subject: [PATCH 601/606] sonf: fix recursion of rewriting, was only called on operand * spot/tl/sonf.cc: Here. --- spot/tl/sonf.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spot/tl/sonf.cc b/spot/tl/sonf.cc index 29b613eaa..0b70ab5e3 100644 --- a/spot/tl/sonf.cc +++ b/spot/tl/sonf.cc @@ -131,7 +131,7 @@ namespace spot { // recurse into rhs first (_ []-> rhs) formula rhs = - f[1].map(extractor, extracted, extractor, false, false); + extractor(f[1], extracted, extractor, false, false); f = formula::binop(kind, f[0], rhs); formula ap = formula::ap(new_ap_name()); From be3597cb467ff41c789754e1dbbb3c67dbac20cc Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 10 Oct 2025 16:49:34 +0200 Subject: [PATCH 602/606] translate_aa: rename expansion option to lf * spot/twaalgos/translate_aa.cc: Here. --- spot/twaalgos/translate_aa.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index 095f92c5a..15633b4ec 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -529,12 +529,12 @@ namespace spot pref = 0; else if (!strcasecmp(version, "derive")) pref = 1; - else if (!strcasecmp(version, "expansion")) + else if (!strcasecmp(version, "lf")) pref = 2; else { const char* err = ("sere_aa_translation_options(): argument" - " should be one of {bdd,derive,expansion}"); + " should be one of {bdd,derive,lf}"); if (env) err = "SPOT_SERE_AA_TRANSLATE_OPT should be one of {bdd,derive,expansion}"; throw std::runtime_error(err); From 5156ac12865b9bd5b52691fdee63294a9991ae80 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 10 Oct 2025 16:49:34 +0200 Subject: [PATCH 603/606] translate_aa: Factorize sere translation choice * spot/twaalgos/translate_aa.cc: Here. --- spot/twaalgos/translate_aa.cc | 48 ++++++++++++----------------------- 1 file changed, 16 insertions(+), 32 deletions(-) diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index 15633b4ec..799a7e6f3 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -34,6 +34,20 @@ namespace spot { namespace { + twa_graph_ptr sere_aa_translate(formula f, const bdd_dict_ptr& dict) + { + // old bdd method + if (sere_aa_translation_options() == 0) + return sere_to_tgba(f, dict, true); + + // derivation + if (sere_aa_translation_options() == 1) + return derive_finite_automaton_with_first(f, dict); + + // linear form + return expand_finite_automaton(f, dict, exp_opts::expand_opt::None); + } + struct ltl_to_aa_builder { ltl_to_aa_builder(twa_graph_ptr aut, unsigned accepting_sink) @@ -289,22 +303,7 @@ namespace spot { unsigned rhs_init = recurse(f[1]); const auto& dict = aut_->get_dict(); - twa_graph_ptr sere_aut; - if (sere_aa_translation_options() == 0) - { - // old bdd method - sere_aut = sere_to_tgba(f[0], dict, true); - } - else if (sere_aa_translation_options() == 1) - { - // derivation - sere_aut = derive_finite_automaton_with_first(f[0], dict); - } - else - { - // linear form - sere_aut = expand_finite_automaton(f[0], dict, exp_opts::expand_opt::None); - } + twa_graph_ptr sere_aut = sere_aa_translate(f[0], dict); // TODO: this should be a std::vector ! std::vector acc_states; @@ -345,22 +344,7 @@ namespace spot { unsigned rhs_init = recurse(f[1]); const auto& dict = aut_->get_dict(); - twa_graph_ptr sere_aut; - if (sere_aa_translation_options() == 0) - { - // old bdd method - sere_aut = sere_to_tgba(f[0], dict, true); - } - else if (sere_aa_translation_options() == 1) - { - // derivation - sere_aut = derive_finite_automaton_with_first(f[0], dict); - } - else - { - // linear form - sere_aut = expand_finite_automaton(f[0], dict, exp_opts::expand_opt::None); - } + twa_graph_ptr sere_aut = sere_aa_translate(f[0], dict); // DFA recognizes the empty language, so {0} []-> rhs is always true unsigned ns = sere_aut->num_states(); From 8c6b1d90c62e5beec976505a2f927300f16bcb98 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 15 Oct 2025 14:24:08 +0200 Subject: [PATCH 604/606] translate_aa: expose lf-trans option * spot/twaalgos/translate_aa.cc: --- spot/twaalgos/translate_aa.cc | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index 799a7e6f3..163afaf3b 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -45,7 +46,11 @@ namespace spot return derive_finite_automaton_with_first(f, dict); // linear form - return expand_finite_automaton(f, dict, exp_opts::expand_opt::None); + if (sere_aa_translation_options() == 2) + return expand_finite_automaton(f, dict, exp_opts::expand_opt::None); + + // linear form - trans-based + return expand_finite_automaton2(f, dict, exp_opts::expand_opt::None); } struct ltl_to_aa_builder @@ -515,12 +520,14 @@ namespace spot pref = 1; else if (!strcasecmp(version, "lf")) pref = 2; + else if (!strcasecmp(version, "lft")) + pref = 3; else { const char* err = ("sere_aa_translation_options(): argument" - " should be one of {bdd,derive,lf}"); + " should be one of {bdd,derive,lf,lft}"); if (env) - err = "SPOT_SERE_AA_TRANSLATE_OPT should be one of {bdd,derive,expansion}"; + err = "SPOT_SERE_AA_TRANSLATE_OPT should be one of {bdd,derive,lf,lft}"; throw std::runtime_error(err); } } From bb33c5120fdcb778253627df6bf72ff96794ac5e Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Wed, 15 Oct 2025 14:24:08 +0200 Subject: [PATCH 605/606] translate_aa: fix construction for transition based acc * spot/twaalgos/translate_aa.cc: Here. --- spot/twaalgos/alternation.cc | 19 +++++++- spot/twaalgos/alternation.hh | 1 + spot/twaalgos/translate_aa.cc | 86 +++++++++++++++++++++++++---------- 3 files changed, 81 insertions(+), 25 deletions(-) diff --git a/spot/twaalgos/alternation.cc b/spot/twaalgos/alternation.cc index fb95fd6f3..8de30af15 100644 --- a/spot/twaalgos/alternation.cc +++ b/spot/twaalgos/alternation.cc @@ -39,6 +39,7 @@ namespace spot } bdd outedge_combiner::operator()(unsigned st, const std::vector& dst_filter, + const std::vector>& edge_filter, bool remove_original_edges) { const auto& dict = aut_->get_dict(); @@ -49,7 +50,23 @@ namespace spot for (auto& e: aut_->out(d1)) { // handle edge filtering - if (!dst_filter.empty()) + if (!edge_filter.empty()) + { + // Trying all univ dests for e, find if there was at least one + // compatible edge that was accepting in the original TFA + auto univ_dests = aut_->univ_dests(e.dst); + if (std::all_of(univ_dests.begin(), univ_dests.end(), + [&](unsigned dst) { + for (const auto& acc_e : edge_filter) + if(std::get<0>(acc_e) == e.src + && std::get<2>(acc_e) == dst + && bdd_implies(e.cond, std::get<1>(acc_e))) + return false; // false because we don't want to skip it + return true; + })) + continue; + } + else if (!dst_filter.empty()) // same for state-based acc { // if any edge destination is an accepting state in the SERE // automaton, handle the edge, otherwise skip it diff --git a/spot/twaalgos/alternation.hh b/spot/twaalgos/alternation.hh index 8d1027e8b..e2e719e1d 100644 --- a/spot/twaalgos/alternation.hh +++ b/spot/twaalgos/alternation.hh @@ -54,6 +54,7 @@ namespace spot outedge_combiner(const twa_graph_ptr& aut, unsigned sink = -1u); ~outedge_combiner(); bdd operator()(unsigned st, const std::vector& dst_filter = std::vector(), + const std::vector>& edge_filter = std::vector>(), bool remove_original_edges = false); void new_dests(unsigned st, bdd out) const; }; diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index 163afaf3b..6be7c4f85 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -97,10 +97,11 @@ namespace spot unsigned copy_sere_aut_to_res(twa_graph_ptr sere_aut, std::map& old_to_new, - std::vector* acc_states = nullptr, + std::vector* acc_edges = nullptr, bool use_accepting_sink = true) { unsigned ns = sere_aut->num_states(); + bool trans_based = sere_aut->prop_state_acc().is_false(); // TODO: create all new states at once, keeping an initial offset (the // number of states already present in aut_) @@ -111,8 +112,6 @@ namespace spot { unsigned new_st = aut_->new_state(); p.first->second = new_st; - if (acc_states != nullptr && sere_aut->state_is_accepting(st)) - acc_states->push_back(new_st); } return p.first->second; }; @@ -122,10 +121,22 @@ namespace spot unsigned new_st = register_state(st); for (const auto& e : sere_aut->out(st)) { - if (use_accepting_sink && sere_aut->state_is_accepting(e.dst)) - aut_->new_edge(new_st, accepting_sink_, e.cond); - else - aut_->new_edge(new_st, register_state(e.dst), e.cond); + bool edge_is_acc = ((trans_based && e.acc) + || (!trans_based && sere_aut->state_is_accepting(e.dst))); + + if (edge_is_acc) + { + // point to accepting sink instead of original dst if asked + if (use_accepting_sink) + aut_->new_edge(new_st, accepting_sink_, e.cond); + else + { + unsigned new_e = aut_->new_edge(new_st, register_state(e.dst), e.cond); + // remember if old edges were accepting + if (acc_edges != nullptr) + acc_edges->push_back(new_e); + } + } } } @@ -311,25 +322,19 @@ namespace spot twa_graph_ptr sere_aut = sere_aa_translate(f[0], dict); // TODO: this should be a std::vector ! - std::vector acc_states; - std::map old_to_new; - copy_sere_aut_to_res(sere_aut, old_to_new, &acc_states, false); - std::vector acc_edges; + std::map old_to_new; + copy_sere_aut_to_res(sere_aut, old_to_new, &acc_edges, false); + + // mark all edges from NFA in new automaton unsigned ns = sere_aut->num_states(); for (unsigned st = 0; st < ns; ++st) { auto it = old_to_new.find(st); assert(it != old_to_new.end()); unsigned new_st = it->second; - for (auto& e : aut_->out(new_st)) - { - e.acc = acc_cond::mark_t{0}; - if (std::find(acc_states.begin(), acc_states.end(), e.dst) - != acc_states.end()) - acc_edges.push_back(aut_->edge_number(e)); - } + e.acc = acc_cond::mark_t{0}; } for (unsigned i : acc_edges) @@ -350,13 +355,26 @@ namespace spot unsigned rhs_init = recurse(f[1]); const auto& dict = aut_->get_dict(); twa_graph_ptr sere_aut = sere_aa_translate(f[0], dict); + bool trans_based = sere_aut->prop_state_acc().is_false(); // DFA recognizes the empty language, so {0} []-> rhs is always true unsigned ns = sere_aut->num_states(); - bool has_accepting_state = false; - for (unsigned st = 0; st < ns && !has_accepting_state; ++st) - has_accepting_state = sere_aut->state_is_accepting(st); - if (!has_accepting_state) + bool accepts = false; + for (unsigned st = 0; st < ns && !accepts; ++st) + { + if (trans_based) + { + for (const auto& e : sere_aut->out(st)) + if (e.acc) + { + accepts = true; + break; + } + } + else + accepts = sere_aut->state_is_accepting(st); + } + if (!accepts) return accepting_sink_; std::map old_to_new; @@ -367,6 +385,8 @@ namespace spot std::vector univ_dest; // TODO: this should be a std::vector ! std::vector acc_states; + // any edge compatible with that should be considered accepting + std::vector> acc_edges; // registers a state in various maps and returns the index of the // anonymous bdd var representing that state @@ -381,7 +401,7 @@ namespace spot old_to_new.emplace(st, new_st); var_to_state.emplace(v, new_st); - if (sere_aut->state_is_accepting(st)) + if (!trans_based && sere_aut->state_is_accepting(st)) acc_states.push_back(new_st); vars &= bdd_ithvar(v); @@ -390,6 +410,15 @@ namespace spot return p.first->second; }; + // FIXME: this code handles dualization, but we cannot dualize if + // this situation arises: + // + // State: 0 + // [a] 1 + // [a] 2 {0} + // + // The quick fix is to simply determinize the NFA before dualizing, + // which removes any existentialism. aut_->copy_ap_of(sere_aut); for (unsigned st = 0; st < ns; ++st) { @@ -400,6 +429,15 @@ namespace spot { int st_bddi = register_state(e.dst); sig |= e.cond & bdd_ithvar(st_bddi); + + // register edge that was accepting in TFA + if (trans_based && e.acc) + { + unsigned new_src = old_to_new[e.src]; + unsigned new_dst = old_to_new[e.dst]; + + acc_edges.push_back({new_src, e.cond, new_dst}); + } } for (bdd cond : minterms_of(bddtrue, aps)) @@ -435,7 +473,7 @@ namespace spot unsigned new_st = it->second; bdd comb = bddtrue; - comb &= oe_(new_st, acc_states, true); + comb &= oe_(new_st, acc_states, acc_edges, true); if (comb != bddtrue) { comb &= oe_(rhs_init); From c63412ab947ce0e16d2ec637da128048c25f37b4 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Fri, 17 Oct 2025 17:36:40 +0200 Subject: [PATCH 606/606] translate_aa: fix sere copy When adapting the code to handle transition-based acceptance, a bug was introduced so that only accepting edges were copied to the result automaton. The self-loop labeled as false edge-case is here as prevention, I am not sure it happens in practice. * spot/twaalgos/translate_aa.cc: Here. --- spot/twaalgos/translate_aa.cc | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc index 6be7c4f85..50096a2c3 100644 --- a/spot/twaalgos/translate_aa.cc +++ b/spot/twaalgos/translate_aa.cc @@ -124,19 +124,16 @@ namespace spot bool edge_is_acc = ((trans_based && e.acc) || (!trans_based && sere_aut->state_is_accepting(e.dst))); - if (edge_is_acc) - { - // point to accepting sink instead of original dst if asked - if (use_accepting_sink) - aut_->new_edge(new_st, accepting_sink_, e.cond); - else - { - unsigned new_e = aut_->new_edge(new_st, register_state(e.dst), e.cond); - // remember if old edges were accepting - if (acc_edges != nullptr) - acc_edges->push_back(new_e); - } - } + unsigned new_edge; + // point to accepting sink instead of original dst if asked + if(use_accepting_sink && edge_is_acc) + new_edge = aut_->new_edge(new_st, accepting_sink_, e.cond); + else + new_edge = aut_->new_edge(new_st, register_state(e.dst), e.cond); + + // remember if old edges were accepting + if (acc_edges != nullptr && edge_is_acc) + acc_edges->push_back(new_edge); } } @@ -340,6 +337,9 @@ namespace spot for (unsigned i : acc_edges) { auto& e1 = aut_->edge_storage(i); + // self loop used to mark accepting state, skip it! + if (e1.cond == bddfalse) + continue; for (const auto& e2 : aut_->out(rhs_init)) aut_->new_edge(e1.src, e2.dst, e1.cond & e2.cond); }