diff --git a/.dir-locals.el b/.dir-locals.el index 7bc423371..5de24fdfc 100644 --- a/.dir-locals.el +++ b/.dir-locals.el @@ -3,9 +3,9 @@ (require-final-newline . t) (mode . global-whitespace) (bug-reference-bug-regexp - . "\\(?:[Ff]ix\\(es\\)? \\|[Ii]ssue \\)#\\(?2:[0-9]+\\)") + . "\\(?1:\\(?:[Ff]ix\\(?:es\\)? \\|[Ii]ssue \\)#\\(?2:[0-9]+\\)\\)") (bug-reference-url-format - . "https://gitlab.lrde.epita.fr/spot/spot/issues/%s") + . "https://gitlab.lre.epita.fr/spot/spot/issues/%s") (mode . bug-reference) (magit-branch-adjust-remote-upstream-alist ("origin/next" . "/")))) (c++-mode . ((c-default-style . "gnu") diff --git a/.gitignore b/.gitignore index 7392a79db..73745a48f 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ configure config.log config.status aclocal.m4 +ltargz.m4 autom4te.cache libtool auto @@ -81,3 +82,4 @@ GTAGS *.dsc *.gcov spot.spec +default.nix diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3c66af0b7..348bacba1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -19,12 +19,12 @@ debian-stable-gcc: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian:stable + image: gitlab-registry.lre.epita.fr/spot/buildenv/debian:stable script: - autoreconf -vfi - - ./configure --enable-max-accsets=256 + - ./configure --enable-max-accsets=256 --enable-pthread - make - - make distcheck + - make distcheck DISTCHECK_CONFIGURE_FLAGS='--enable-max-accsets=256 --enable-pthread' artifacts: when: always paths: @@ -32,22 +32,47 @@ debian-stable-gcc: - ./*.log - ./*.tar.gz +# We build on Debian unstable because we want an up-to-date Automake. +# (See issue #512.) We do not run distcheck here to speedup this build +# that several other builds depend upon. Other builds will run distcheck. +make-dist: + stage: build + only: + - branches + except: + - /wip/ + image: gitlab-registry.lre.epita.fr/spot/buildenv/debian + script: + - autoreconf -vfi + - ./configure --disable-static --enable-doxygen + - make + - make dist + - autoconf --trace='AC_INIT:$2' > VERSION + artifacts: + when: always + paths: + - spot-*/_build/sub/tests/*/*.log + - ./*.log + - ./*.tar.gz + - VERSION + +# We --disable-devel for coverage, because debug mode replaces +# SPOT_UNREACHABLE by an assertion wich is never reachable, lowering +# our coverage. debian-unstable-gcc-coverage: stage: build only: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian + image: gitlab-registry.lre.epita.fr/spot/buildenv/debian script: - autoreconf -vfi - - ./configure CXX='g++ --coverage' --enable-devel --disable-static --enable-doxygen + - ./configure CXX='g++ --coverage' --disable-devel --enable-warnings --disable-static --enable-doxygen - make - make check - - lcov --capture --directory . --no-external --output spot.info - - lcov --remove spot.info '*/bin/spot.cc' '*/bin/spot-x.cc' '*/spot/parse*/scan*.cc' '*/spot/parse*/parse*.cc' '*/utf8/*' '*/python/*' '*/buddy/*' '*/doc/org/tmp/*' --output spot2.info - - lcov --summary spot2.info - - genhtml --legend --demangle-cpp --output-directory coverage spot2.info + - gcovr --xml-pretty --exclude-unreachable-branches --print-summary -o coverage.xml --root $PWD -e 'bin/spot.cc' -e 'bin/spot-x.cc' -e 'spot/bricks/.*' -e 'spot/parse.*/scan.*.cc' -e 'spot/parse.*/parse.*.cc' -e 'utf8/.*' -e 'python/.*' -e 'buddy/.*' -e 'doc/org/tmp/.*' --html-details coverage.html --html-tab-size 8 --fail-under-line 90.7 + coverage: /^\s*lines:\s*\d+.\d+\%/ artifacts: when: always paths: @@ -55,50 +80,66 @@ debian-unstable-gcc-coverage: - ./*.log - doc/spot.html/ - doc/userdoc/ - - coverage/ + - coverage*.html + - coverage*.css - ./*.tar.gz - - spot2.info + reports: + coverage_report: + coverage_format: cobertura + path: coverage.xml debian-unstable-gcc-pypy: - stage: build + stage: build2 + needs: + - job: make-dist + artifacts: true + variables: + GIT_STRATEGY: none only: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian + image: gitlab-registry.lre.epita.fr/spot/buildenv/debian script: - - autoreconf -vfi + - VERSION=`cat VERSION` + - tar xvf spot-$VERSION.tar.gz + - cd spot-$VERSION - ./configure PYTHON=/usr/bin/pypy3 --disable-static - make - make check TESTS='$(TESTS_python) $(TESTS_ipython)' artifacts: when: always paths: - - tests/*/*.log - - ./*.log + - spot-*/tests/*/*.log + - spot-*/*.log +# With emacs now using gcc for on-the-fly compilation, +# we cannot rebuild the documentation using gcc-snapshot. So we start +# from the tarball instead. debian-gcc-snapshot: - stage: build + stage: build2 + needs: + - job: make-dist + artifacts: true only: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian + image: gitlab-registry.lre.epita.fr/spot/buildenv/debian script: - export PATH="/usr/lib/gcc-snapshot/bin:$PATH" LD_LIBRARY_PATH="/usr/lib/gcc-snapshot/lib:$LD_LIBRARY_PATH" - - autoreconf -vfi + - VERSION=`cat VERSION` + - tar xvf spot-$VERSION.tar.gz + - cd spot-$VERSION - ./configure --with-included-ltdl CXX='g++' - make - - make distcheck DISTCHECK_CONFIGURE_FLAGS='--with-included-ltdl' + - make distcheck DISTCHECK_CONFIGURE_FLAGS='--with-included-ltdl' allow_failure: true artifacts: when: always paths: - - ./spot-*/_build/sub/tests/*/*.log - - ./*.log - - doc/spot.html/ - - doc/userdoc/ - - ./*.tar.gz + - spot-*/tests/*/*.log + - spot-*/*.log alpine-gcc: stage: build @@ -106,12 +147,12 @@ alpine-gcc: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/alpine + image: gitlab-registry.lre.epita.fr/spot/buildenv/alpine script: - autoreconf -vfi - ./configure - make - - make distcheck || { chmod -R u+w ./spot-*; false; } + - make distcheck DISTCHECK_CONFIGURE_FLAGS='--enable-pthread' || { chmod -R u+w ./spot-*; false; } artifacts: when: always paths: @@ -125,7 +166,7 @@ arch-clang: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/arch + image: gitlab-registry.lre.epita.fr/spot/buildenv/arch script: - autoreconf -vfi - ./configure --prefix ~/install_dir CC='clang -Qunused-arguments' CXX='clang++ -Qunused-arguments' --enable-devel --enable-c++20 --enable-doxygen @@ -138,22 +179,30 @@ arch-clang: - ./*.log arch-gcc-glibcxxdebug: - stage: build + stage: build2 + needs: + - job: make-dist + artifacts: true + variables: + GIT_STRATEGY: none only: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/arch + image: gitlab-registry.lre.epita.fr/spot/buildenv/arch script: - - autoreconf -vfi - - ./configure --enable-devel --enable-c++20 --enable-glibcxx-debug + - VERSION=`cat VERSION` + - tar xvf spot-$VERSION.tar.gz + - mkdir build-$VERSION + - cd build-$VERSION + - ../spot-$VERSION/configure --enable-devel --enable-c++20 --enable-glibcxx-debug - make - make distcheck DISTCHECK_CONFIGURE_FLAGS='--enable-devel --enable-c++20 --enable-glibcxx-debug' artifacts: when: on_failure paths: - - ./spot-*/_build/sub/tests/*/*.log - - ./*.log + - build-*/spot-*/_build/sub/tests/*/*.log + - build-*/*.log mingw-shared: stage: build2 @@ -161,15 +210,17 @@ mingw-shared: # We start from the tarball generated from a non-cross-compiling # job, so that all generated files are included, especially those # built from the executables. - - job: debian-stable-gcc + - job: make-dist artifacts: true + variables: + GIT_STRATEGY: none only: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian + image: gitlab-registry.lre.epita.fr/spot/buildenv/debian script: - - VERSION=`autoconf --trace='AC_INIT:$2'` + - VERSION=`cat VERSION` - tar xvf spot-$VERSION.tar.gz - cd spot-$VERSION - ./configure CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++-posix --host i686-w64-mingw32 --disable-python @@ -186,15 +237,17 @@ mingw-static: # We start from the tarball generated from a non-cross-compiling # job, so that all generated files are included, especially those # built from the executables. - - job: debian-stable-gcc + - job: make-dist artifacts: true + variables: + GIT_STRATEGY: none only: - branches except: - /wip/ - image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian + image: gitlab-registry.lre.epita.fr/spot/buildenv/debian script: - - VERSION=`autoconf --trace='AC_INIT:$2'` + - VERSION=`cat VERSION` - tar xvf spot-$VERSION.tar.gz - cd spot-$VERSION - mkdir install_dir @@ -214,17 +267,19 @@ mingw-static: debpkg-stable: stage: build + variables: + GIT_STRATEGY: none only: - /-deb$/ - master - next - stable script: - - docker pull gitlab-registry.lrde.epita.fr/spot/buildenv/debian:stable - - vol=spot-stable-$CI_COMMIT_SHA + - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian:stable + - vol=spot-stable-$CI_COMMIT_SHA-$CI_PIPELINE_ID - docker volume create $vol - exitcode=0 - - docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lrde.epita.fr/spot/buildenv/debian:stable ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$? + - docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian:stable ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$? - docker cp helper-$vol:/build/result _build_stable || exitcode=$? - docker rm helper-$vol || exitcode=$? - docker volume rm $vol || exitcode=$? @@ -238,6 +293,8 @@ debpkg-stable: debpkg-stable-i386: stage: build2 + variables: + GIT_STRATEGY: none only: - /-deb$/ - master @@ -246,11 +303,11 @@ debpkg-stable-i386: tags: ["x86"] needs: ["debpkg-stable"] script: - - docker pull gitlab-registry.lrde.epita.fr/spot/buildenv/debian-i386:stable - - vol=spot-stable-$CI_COMMIT_SHA + - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386:stable + - vol=spot-stable-$CI_COMMIT_SHA-$CI_PIPELINE_ID - docker volume create $vol - exitcode=0 - - docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lrde.epita.fr/spot/buildenv/debian-i386:stable ./bin-spot.sh -j${NBPROC-1} || exitcode=$? + - docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386:stable ./bin-spot.sh -j${NBPROC-1} || exitcode=$? - docker cp _build_stable/. helper-$vol:/build/result || exitcode=$? - rm -rf _build_stable - docker start -a helper-$vol || exitcode=$? @@ -267,15 +324,17 @@ debpkg-stable-i386: debpkg-unstable: stage: build + variables: + GIT_STRATEGY: none only: - /-deb$/ - next script: - - docker pull gitlab-registry.lrde.epita.fr/spot/buildenv/debian - - vol=spot-unstable-$CI_COMMIT_SHA + - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian + - vol=spot-unstable-$CI_COMMIT_SHA-$CI_PIPELINE_ID - docker volume create $vol - exitcode=0 - - docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lrde.epita.fr/spot/buildenv/debian ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$? + - docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$? - docker cp helper-$vol:/build/result _build_unstable || exitcode=$? - docker rm helper-$vol || exitcode=$? - docker volume rm $vol || exitcode=$? @@ -289,17 +348,19 @@ debpkg-unstable: debpkg-unstable-i386: stage: build2 + variables: + GIT_STRATEGY: none only: - /-deb$/ - next tags: ["x86"] needs: ["debpkg-unstable"] script: - - docker pull gitlab-registry.lrde.epita.fr/spot/buildenv/debian-i386 - - vol=spot-unstable-$CI_COMMIT_SHA + - docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386 + - vol=spot-unstable-$CI_COMMIT_SHA-$CI_PIPELINE_ID - docker volume create $vol - exitcode=0 - - docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lrde.epita.fr/spot/buildenv/debian-i386 ./bin-spot.sh -j${NBPROC-1} || exitcode=$? + - docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386 ./bin-spot.sh -j${NBPROC-1} || exitcode=$? - docker cp _build_unstable/. helper-$vol:/build/result || exitcode=$? - rm -rf _build_unstable - docker start -a helper-$vol || exitcode=$? @@ -321,7 +382,7 @@ rpm-pkg: - master - next - stable - image: gitlab-registry.lrde.epita.fr/spot/buildenv/fedora + image: gitlab-registry.lre.epita.fr/spot/buildenv/fedora script: - autoreconf -vfi - ./configure @@ -340,6 +401,8 @@ rpm-pkg: publish-rpm: stage: publish + variables: + GIT_STRATEGY: none only: - /-rpm$/ - next @@ -350,6 +413,7 @@ publish-rpm: - rpm-pkg script: - case $CI_COMMIT_REF_NAME in stable) rput fedora stable *.rpm;; next) rput fedora unstable *.rpm;; esac + - rm -rf ./* publish-stable: only: @@ -357,15 +421,23 @@ publish-stable: tags: - dput stage: publish + variables: + GIT_STRATEGY: none dependencies: - debpkg-stable-i386 + - make-dist script: - cd _build_stable - ls -l - - dput lrde *.changes + - dput lrde `ls -t *amd64.changes | head -1` `ls -t *i386.changes | head -1` + - cd .. + - ls -l - tgz=`ls spot-*.tar.* | head -n 1` - case $tgz in *[0-9].tar.*) scp $tgz doc@perso:/var/www/dload/spot/;; esac - - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=stable" https://gitlab.lrde.epita.fr/api/v4/projects/131/trigger/pipeline + - rm -rf ./* + - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=stable" https://gitlab.lre.epita.fr/api/v4/projects/131/trigger/pipeline + - curl -X POST "https://archive.softwareheritage.org/api/1/origin/save/git/url/https://gitlab.lre.epita.fr/spot/spot/" + - curl "https://web.archive.org/save/https://www.lrde.epita.fr/dload/spot/$tgz" publish-unstable: only: @@ -373,14 +445,18 @@ publish-unstable: tags: - dput stage: publish + variables: + GIT_STRATEGY: none dependencies: - debpkg-unstable-i386 script: - cd _build_unstable - ls -l - - dput lrde *.changes - - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=next" https://gitlab.lrde.epita.fr/api/v4/projects/131/trigger/pipeline - - curl -X POST -F ref=master -F token=$TRIGGER_SANDBOX https://gitlab.lrde.epita.fr/api/v4/projects/181/trigger/pipeline + - dput lrde `ls -t *amd64.changes | head -1` `ls -t *i386.changes | head -1` + - cd .. + - rm -rf _build_unstable + - curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=next" https://gitlab.lre.epita.fr/api/v4/projects/131/trigger/pipeline + - curl -X POST -F ref=master -F token=$TRIGGER_SANDBOX https://gitlab.lre.epita.fr/api/v4/projects/181/trigger/pipeline raspbian: stage: build diff --git a/.mailmap b/.mailmap new file mode 100644 index 000000000..41bc60980 --- /dev/null +++ b/.mailmap @@ -0,0 +1,20 @@ +Ala-Eddine Ben-Salem +Ala-Eddine Ben-Salem +Ala-Eddine Ben-Salem +Antoine Martin +Arthur Remaud +Arthur Remaud +Damien Lefortier +Felix Abecassis +Felix Abecassis +Felix Abecassis +Guillaume Sadegh +Guillaume Sadegh +Henrich Lauko +Henrich Lauko +Jerome Dubois Jérôme Dubois +Philipp Schlehuber-Caissier +Thibaud Michaud +Thomas Badie +Rachid Rebiha +Thomas Martinez diff --git a/HACKING b/HACKING index 8841b033c..f2cf27e8c 100644 --- a/HACKING +++ b/HACKING @@ -5,11 +5,11 @@ Bootstraping from the GIT repository Spot's gitlab page is at - https://gitlab.lrde.epita.fr/spot/spot + https://gitlab.lre.epita.fr/spot/spot The GIT repository can be cloned with - git clone https://gitlab.lrde.epita.fr/spot/spot.git + git clone https://gitlab.lre.epita.fr/spot/spot.git Some files in SPOT's source tree are generated. They are distributed so that users do not need to install tools to rebuild them, but we @@ -25,7 +25,7 @@ since the generated files they produce are distributed.) GNU Automake >= 1.11 GNU Libtool >= 2.4 GNU Flex >= 2.6 - GNU Bison >= 3.0 + GNU Bison >= 3.3 GNU Emacs (preferably >= 24 but it may work with older versions) org-mode >= 9.1 (the version that comes bundled with your emacs version is likely out-of-date; but distribution often have @@ -290,8 +290,8 @@ would understand with: make check LOG_DRIVER=$PWD/tools/test-driver-teamcity -Coding conventions -================== +C++ Coding conventions +====================== Here some of the conventions we follow in Spot, so that the code looks homogeneous. Please follow these strictly. Since this is free @@ -682,3 +682,43 @@ Other style recommandations * Always code as if the person who ends up maintaining your code is a violent psychopath who knows where you live. + + +Coding conventions for Python Tests +=================================== + +Unless you have some specific reason to write test cases in C++ (for +instance do test some specific C++ constructions, or to use valgrind), +prefer writing test cases in Python. Writing test cases in C++ +requires some compilation, which slows down the test suite. Doing the +same test in Python is therefore faster, and it has the added benefit +of ensuring that the Python bindings works. + +We have two types of Python tests: Python scripts or jupyter +notebooks. Jupyter notebooks are usually used for a sequence of +examples and comments that can also serve as part of the +documentation. Such jupyter notebooks should be added to the list of +code examples in doc/org/tut.org. Testing a notebook is done by the +tests/python/ipnbdoctest.py scripts, which evaluate each cells, and +checks that the obtainted result is equivalent to the result saved in +the notebook. The process is a bit slow, so plain Python scripts +should be prefered for most tests. + +If you do need a notebook to tests Jupyter-specific code but this +notebook should not be shown in the documentation, use a filename +starting with '_'. + +Tests written as Python scripts should follow the same convention as +shell scripts: exit 0 for PASS, exit 77 for SKIP, and any other exit +code for FAIL. + +Do not use assert() in those scripts, as (1) asserts can be disabled, +and (2) they provide poor insights in case of failures. Instead do + + from unittest import TestCase + tc = TestCase() + +and then use tc.assertTrue(...), tc.assertEqual(..., ...), +tc.assertIn(..., ...), etc. In case of failures, those will print +useful messages in the trace of the tests. For instance multiline +strings that should have been equal will be presented with a diff. diff --git a/Makefile.am b/Makefile.am index a0dc9a316..f1751c2a2 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2011-2017, 2020 Laboratoire de Recherche et Développement +## Copyright (C) 2011-2017, 2020, 2022 Laboratoire de Recherche et Développement ## de l'Epita (LRDE). ## Copyright (C) 2003, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), ## département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -36,8 +36,9 @@ DOC_SUBDIR = doc SUBDIRS = picosat buddy lib ltdl spot bin tests $(PYTHON_SUBDIR) $(DOC_SUBDIR) \ $(NEVER_SUBDIRS) -UTF8 = utf8/README.md utf8/utf8.h \ - utf8/utf8/checked.h utf8/utf8/core.h utf8/utf8/unchecked.h +UTF8 = utf8/README.md utf8/LICENSE utf8/utf8.h \ + utf8/utf8/checked.h utf8/utf8/core.h utf8/utf8/unchecked.h \ + utf8/utf8/cpp11.h utf8/utf8/cpp17.h DEBIAN = \ debian/changelog \ @@ -68,7 +69,8 @@ EXTRA_DIST = HACKING ChangeLog.1 tools/gitlog-to-changelog \ tools/help2man tools/man2html.pl \ tools/test-driver-teamcity $(UTF8) $(DEBIAN) \ m4/gnulib-cache.m4 .dir-locals.el \ - spot.spec spot.spec.in + spot.spec spot.spec.in \ + default.nix default.nix.in dist-hook: gen-ChangeLog @@ -114,3 +116,6 @@ deb: dist spot.spec: configure.ac spot.spec.in sed 's/[@]VERSION[@]/$(VERSION)/;s/[@]GITPATCH[@]/@@@$(GITPATCH)/;s/@@@\.//' spot.spec.in > $@.tmp && mv $@.tmp $@ + +default.nix: configure.ac default.nix.in + sed 's/[@]VERSION[@]/$(VERSION)/' default.nix.in > $@.tmp && mv $@.tmp $@ diff --git a/NEWS b/NEWS index 928a25da2..9a2b7ccbd 100644 --- a/NEWS +++ b/NEWS @@ -1,16 +1,203 @@ -New in spot 2.10.4.dev (net yet released) +New in spot 2.11.5.dev (not yet released) + + Library: + + - The following new trivial simplifications have been implemented for SEREs: + - f|[+] = [+] if f rejects [*0] + - f|[*] = [*] if f accepts [*0] + - f&&[+] = f if f rejects [*0] + - b:b[*i..j] = b[*max(i,1)..j] + - b[*i..j]:b[*k..l] = b[*max(i,1)+max(k,1)-1, j+l-1] + + - The HOA parser is a bit smarter when merging multiple initial + states into a single initial state (Spot's automaton class + supports only one): it now reuse the edges leaving initial states + without incoming transitions. + + - spot::bdd_to_cnf_formula() is a new variant of spot::bdd_to_formula() + that converts a BDD into a CNF instead of a DNF. + +New in spot 2.11.5 (2023-04-20) + + Bug fixes: + + - Fix spurious failure of ltlsynt2.test when Python is not installed + (issue #530). + + - Building from the git repository would fail to report a missing + emacs (issue #528). + + - Fix exception raised by aut1.intersecting_run(aut2).as_twa() + because the run did not match transitions present in aut1 + verbatim. We also changed the behavior of as_twa() to not merge + identical states. + + - Fix segfaults occuring in determinization of 1-state terminal + automata. + + - Fix incorrect assertion in game solver when the edge vector + contains deleted transitions. + +New in spot 2.11.4 (2023-02-10) + + Python: + + - spot.acd() no longer depends on jQuery for interactivity. + + Bug fixes: + + - When merging initial states from state-based automata with + multiple initial states (because Spot supports only one), the HOA + parser could break state-based acceptance. (Issue #522.) + + - autfilt --highlight-word refused to work on automata with Fin + acceptance for historical reasons, however the code has been + perfectly able to handle this for a while. (Issue #523.) + + - delay_branching_here(), a new optimization of Spot 2.11 had an + incorrect handling of states without successors, causing some + segfaults. (Issue #524.) + + - Running delay_branching_here() on state-based automata (this was not + done in Spot so far) may require the output to use transition-based + acceptance. (Issue #525.) + + - to_finite(), introduce in 2.11, had a bug that could break the + completeness of automata and trigger an exception from the HOA + printer. (Issue #526.) + +New in spot 2.11.3 (2022-12-09) + + Bug fixes: + + - Automata-based implication checks, used to simplify formulas were + slower than necessary because the translator was configured to + favor determinism unnecessarily. (Issue #521.) + + - Automata-based implication checks for f&g and f|g could be + very slow when those n-ary operator had two many arguments. + They have been limited to 16 operands, but this value can be changed + with option -x tls-max-ops=N. (Issue #521 too.) + + - Running ltl_to_tgba_fm() with an output_aborter (which is done + during automata-based implication checks) would leak memory on + abort. + + - configure --with-pythondir should also redefine pyexecdir, + otherwise, libraries get installed in the wrong place on Debian. + (Issue #512.) + + - The HOA parser used to silently declare unused and undefined states + (e.g., when the State: header declare many more states than the body + of the file). It now warns about those. + + - 'autfilt -c ...' should display a match count even in presence of + parse errors. + + - Calling solve_parity_game() multiple times on the same automaton + used to append the new strategy to the existing one instead of + overwriting it. + +New in spot 2.11.2 (2022-10-26) + + Command-line tools: + + - The --stats specifications %s, %e, %t for printing the number of + (reachable) states, edges, and transitions, learned to support + options [r], [u], [a] to indicate if only reachable, unreachable, + or all elements should be counted. + + Library: + + - spot::reduce_parity() now has a "layered" option to force all + transition in the same parity layer to receive the same color; + like acd_transform() would do. + + Bugs fixed: + + - Fix pkg-config files containing @LIBSPOT_PTHREAD@ (issue #520) + - spot::relabel_bse() was incorrectly relabeling some dependent + Boolean subexpressions in SERE. (Note that this had no + consequence on automata translated from those SERE.) + +New in spot 2.11.1 (2022-10-10) + + Bugs fixed: + + - Fix a build issue preventing the update of website (issue #516). + - Fix a compilation error with clang-14 on FreeBSD (issue #515). + +New in spot 2.11 (2022-10-08) + + Build: + + - configure will now diagnose situations where Python bindings will + be installed in a directory that is not part of Python's search + path. A new configure option --with-pythondir can be used to + modify this installation path. + + - A new configure option --enable-pthread enables the compilation of + Spot with -pthread, and render available the parallel version of + some algorithms. If Spot is compiled with -pthread enabled, any + user linking with Spot should also link with the pthread library. + In order to not break existing build setups using Spot, this + option is currently disabled by default in this release. We plan + to turn it on by default in some future release. Third-party + project using Spot may want to start linking with -pthread in + prevision for this change. Command-line tools: - autfilt has a new options --aliases=drop|keep to specify - if the output code should attempt to preserve aliases + if the HOA printer should attempt to preserve aliases present in the HOA input. This defaults to "keep". - autfilt has a new --to-finite option, illustrated on https://spot.lrde.epita.fr/tut12.html + - ltlfilt has a new --sonf option to produce a formula's Suffix + Operator Normal Form, described in [cimatti.06.fmcad]. The + associated option --sonf-aps allows listing the newly introduced + atomic propositions. + + - autcross learned a --language-complemented option to assist in the + case one is testing tools that complement automata. (issue #504). + + - ltlsynt has a new option --tlsf that takes the filename of a TLSF + specification and calls syfco (which must be installed) to convert + it into an LTL formula. + + - ltlsynt has a new option --from-pgame that takes a parity game in + extended HOA format, as used in the Synthesis Competition. + + - ltlsynt has a new option --hide-status to hide the REALIZABLE or + UNREALIZABLE output expected by SYNTCOMP. (This line is + superfluous, because the exit status of ltlsynt already indicate + whether the formula is realizable or not.) + + - ltlsynt has a new option --dot to request GraphViz output instead + of most output. This works for displaying Mealy machines, games, + or AIG circuits. See https://spot.lrde.epita.fr/ltlsynt.html for + examples. + + - genaut learned the --cyclist-trace-nba and --cyclist-proof-dba + options. Those are used to generate pairs of automata that should + include each other, and are used to show a regression (in speed) + present in Spot 2.10.x and fixed in 2.11. + + - genltl learned --eil-gsi to generate a familly a function whose + translation and simplification used to be very slow. In particular + + genltl --eil-gsi=23 | ltlfilt --from-ltlf | ltl2tgba + + was reported as taking 9 days. This is now instantaneous. + Library: + - The new function suffix_operator_normal_form() implements + transformation of formulas to Suffix Operator Normal Form, + described in [cimatti.06.fmcad]. + - "original-classes" is a new named property similar to "original-states". It maps an each state to an unsigned integer such that if two classes are in the same class, they are expected @@ -21,16 +208,20 @@ New in spot 2.10.4.dev (net yet released) - tgba_determinize() learned to fill the "original-classes" property. States of the determinized automaton that correspond to the same subset of states of the original automaton belong to the same - class. Filling this property is only done on demand has it inccurs - on small overhead. + class. Filling this property is only done on demand as it inccurs + a small overhead. - sbacc() learned to take the "original-classes" property into - account and preserve it. + account and to preserve it. - The HOA parser and printer learned to map the synthesis-outputs property of Spot to the controllable-AP header for the Extended HOA format used in SyntComp. https://arxiv.org/abs/1912.05793 + - The automaton parser learned to parse games in the PGSolver format. + See the bottom of https://spot.lrde.epita.fr/ipynb/games.html for + an example. + - "aliases" is a new named property that is filled by the HOA parser using the list of aliases declared in the HOA file, and then used by the HOA printer on a best-effort basis. Aliases can be used to @@ -57,14 +248,147 @@ New in spot 2.10.4.dev (net yet released) - purge_dead_states() will now also remove edges labeled by false (except self-loops). + - When parsing formulas with a huge number of operands for an n-ary + operator (for instance 'p1 | p2 | ... | p1000') the LTL parser + would construct that formula two operand at a time, and the + formula constructor for that operator would be responsible for + inlining, sorting, deduplicating, ... all operands at each step. + This resulted in a worst-than-quadratic slowdown. This is now + averted in the parser by delaying the construction of such n-ary + nodes until all children are known. + + - complement() used to always turn tautological acceptance conditions + into Büchi. It now only does that if the automaton is modified. + + - The zielonka_tree construction was optimized using the same + memoization trick that is used in ACD. Additionally it can now be + run with additional options to abort when the tree as an unwanted + shape, or to turn the tree into a DAG. + + - contains() can now take a twa as a second argument, not just a + twa_graph. This allows for instance to do contains(ltl, kripke) + to obtain a simple model checker (that returns true or false, + without counterexample). + + - degeneralize() and degeneralize_tba() learned to work on + generalized-co-Büchi as well. + + - product() learned that the product of two co-Büchi automata + is a co-Büchi automaton. And product_or() learned that the + "or"-product of two Büchi automata is a Büchi automaton. + + - spot::postprocessor has a new extra option "merge-states-min" that + indicates above how many states twa_graph::merge_states() (which + perform a very cheap pass to fuse states with identicall + succesors) should be called before running simulation-based + reductions. + + - A new function delay_branching_here(aut) can be used to simplify + some non-deterministic branching. If two transitions (q₁,ℓ,M,q₂) + and (q₁,ℓ,M,q₃) differ only by their destination state, and are + the only incoming transitions of their destination states, then q₂ + and q₃ can be merged (taking the union of their outgoing + transitions). This is cheap function is automatically called by + spot::translate() after translation of a formula to GBA, before + further simplification. This was introduced to help with automata + produced from formulas output by "genltl --eil-gsi" (see above). + + - spot::postprocessor has new configuration variable branch-post + that can be used to control the use of branching-postponement + (disabled by default) or delayed-branching (see above, enabled by + default). See the spot-x(7) man page for details. + + - spot::postprocessor is now using acd_transform() by default when + building parity automata. Setting option "acd=0" will revert + to using "to_parity()" instead. + + - to_parity() has been almost entirely rewritten and is a bit + faster. + + - When asked to build parity automata, spot::translator is now more + aggressively using LTL decomposition, as done in the Generic + acceptance case before paritizing the result. This results in + much smaller automata in many cases. + + - spot::parallel_policy is an object that can be passed to some + algorithm to specify how many threads can be used if Spot has been + compiled with --enable-pthread. Currently, only + twa_graph::merge_states() supports it. + + Python bindings: + + - The to_str() method of automata can now export a parity game into + the PG-Solver format by passing option 'pg'. See + https://spot.lrde.epita.fr/ipynb/games.html for an example. + + Deprectation notice: + + - spot::pg_print() has been deprecated in favor of spot::print_pg() + for consistency with the rest of the API. + + Bugs fixed: + + - calling twa_graph::new_univ_edge(src, begin, end, cond, acc) could + produce unexpected result if begin and end where already pointing + into the universal edge vector, since the later can be + reallocated during that process. + + - Printing an alternating automaton with print_dot() using 'u' to + hide true state could produce some incorrect GraphViz output if + the automaton as a true state as part of a universal group. + + - Due to an optimization introduces in 2.10 to parse HOA label more + efficiently, the automaton parser could crash when parsing random + input (not HOA) containing '[' (issue #509). + +New in spot 2.10.6 (2022-05-18) + + Bugs fixed: + + - Fix compilation error on MacOS X. + + - Using -Ffile/N to read column N of a CSV file would not reset the + /N specification for the next file. + + - make_twa_graph() will now preserve state number when copying a + kripke_graph object. As a consequence, print_dot() and + print_hoa() will now use state numbers matching those of the + kripke_graph (issue #505). + + - Fix several compilation warning introduced by newer versions + of GCC and Clang. + +New in spot 2.10.5 (2022-05-03) + Bugs fixed: - reduce_parity() produced incorrect results when applied to automata with deleted edges. - - work around a portability issue in Flex 2.6.4 preventing + - An optimization of Zielonka could result in incorrect results + in some cases. + + - ltlsynt --print-pg incorrectly solved the game in addition to + printing it. + + - ltlsynt would fail if only one of --ins or --outs was set, and + if it was set empty. + + - Work around a portability issue in Flex 2.6.4 preventing compilation on OpenBSD. + - Do not use the seq command in test cases, it is not available + everywhere. + + - Do not erase the previous contents of the PYTHONPATH environment + variable when running tests, prepend to it instead. + + - Simplify Debian instructions for LTO build to work around newer + libtool version. + + - Fix invalid read in digraph::sort_edges_of_(), currently unused in + Spot. + New in spot 2.10.4 (2022-02-01) Bug fixed: @@ -1424,7 +1748,7 @@ New in spot 2.6.2 (2018-09-28) - We no longer distribute the Python-based CGI script + javascript code for the online translator. Its replacement has its own - repository: https://gitlab.lrde.epita.fr/spot/spot-web-app/ + repository: https://gitlab.lre.epita.fr/spot/spot-web-app/ Library: diff --git a/README b/README index 836bc51b0..458da2d99 100644 --- a/README +++ b/README @@ -110,16 +110,16 @@ Spot follows the traditional `./configure && make && make check && make install' process. People unfamiliar with the GNU Build System should read the file INSTALL for generic instructions. -If you plan to use the Python binding, we recommend you use one -of the following --prefix options when calling configure: +If you plan to use the Python bindings, we recommend you use the +following --prefix options when calling configure: - --prefix /usr - --prefix /usr/local (the default) - --prefix ~/.local (if you do not have root permissions) + --prefix ~/.local -The reason is that all these locations are usually automatically -searched by Python. If you use a different prefix directory, you may -have to tune the PYTHONPATH environment variable. +The reason is that ~/.local/lib/python3.X/site-packages, where Spot's +Python bindings will be installed, is automatically searched by +Python. If you use a different prefix directory, you may have to tune +the PYTHONPATH environment variable, or use the --with-pythondir +option to specify different installation paths. In addition to its usual options, ./configure will accept some flags specific to Spot: @@ -173,6 +173,12 @@ flags specific to Spot: client code should be compiled with -D_GLIBCXX_DEBUG as well. This options should normally only be useful to run Spot's test-suite. + --enable-pthread + Build and link with the -pthread option, and activate a few + parallel variants of the algorithms. This is currently disabled + by default, as it require all third-party tools using Spot to + build with -pthread as well. + --enable-c++20 Build everything in C++20 mode. We use that in our build farm to ensure that Spot can be used in C++20 projects as well. @@ -244,17 +250,31 @@ To test the Python bindings, try running >>> import spot >>> print(spot.version()) -If you installed Spot with a prefix that is not one of those suggested -in the "Building and installing" section, it is likely that the above -import statement will fail to locate the spot package. You can show -the list of directories that are searched by Python using: +If you installed Spot with a prefix that is not searched by Python by +default it is likely that the above import statement will fail to +locate the spot package. You can show the list of directories that +are searched by Python using: % python3 >>> import sys >>> print(sys.path) And you can modify that list of searched directories using the -PYTHONPATH environment variable. +PYTHONPATH environment variable. Alternatively, you can instruct Spot +to install its Python files in one of those directory using the +--with-pythondir configure option. As an example, an issue in +distributions derived from Debian is that if you run + + ./configure && make && make install + +Python files get installed in /usr/local/lib/python3.X/site-packages +while Debian's version of Python only looks for them into +/usr/local/lib/python3.X/dist-packages instead. You can fix that by +instructing configure that you want packages installed into the right +directory instead: + + ./configure --with-pythondir=/usr/local/lib/python3.X/dist-packages \ + && make && make install To test if man pages can be found, simply try: @@ -319,13 +339,13 @@ bench/ Benchmarks for ... wdba/ ... WDBA minimization (for obligation properties). python/ Python bindings for Spot and BuDDy -Third party software +Third-party software -------------------- buddy/ A customized version of BuDDy 2.3 (a BDD library). ltdl/ Libtool's portable dlopen() wrapper library. lib/ Gnulib's portability modules. -utf8/ Nemanja Trifunovic's utf-8 routines. +utf8/ Trifunovic's utf-8 routines. https://github.com/nemtrif/utfcpp elisp/ Related emacs modes, used for building the documentation. picosat/ A distribution of PicoSAT 965 (a satsolver library). spot/bricks/ A collection of useful C++ code provided by DiVinE diff --git a/THANKS b/THANKS index 9eb566483..93155f9d1 100644 --- a/THANKS +++ b/THANKS @@ -11,9 +11,12 @@ Christian Dax Christopher Ziegler Clément Tamines David Müller +Dávid Smolka +Edmond Irani Liu Ernesto Posse Étienne Renault Fabrice Kordon +Fangyi Zhou Felix Klaedtke Florian Perlié-Long František Blahoudek @@ -21,6 +24,7 @@ Gerard J. Holzmann Hashim Ali Heikki Tauriainen Henrich Lauko +Jacopo Binchi Jan Strejček Jean-Michel Couvreur Jean-Michel Ilié @@ -41,14 +45,18 @@ Michael Weber Mikuláš Klokočka Ming-Hsien Tsai Nikos Gorogiannis +Ondřej Lengál Paul Guénézan +Pierre Ganty +Raven Beutner Reuben Rowe Roei Nahum Rüdiger Ehlers -Silien Hong -Simon Jantsch +Shachar Itzhaky Shengping Shaw Shufang Zhu +Silien Hong +Simon Jantsch Sonali Dutta Tereza Šťastná Tobias Meggendorfer. diff --git a/bench/dtgbasat/gen.py b/bench/dtgbasat/gen.py index e96bf2825..dabf77971 100755 --- a/bench/dtgbasat/gen.py +++ b/bench/dtgbasat/gen.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (C) 2016-2018 Laboratoire de Recherche et Développement de +# Copyright (C) 2016-2018, 2023 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -55,12 +55,12 @@ class BenchConfig(object): if line[0] == '#' or line.isspace(): continue elif line[0:2] == "sh": - sh = re.search('sh (.+?)$', line).group(1) + sh = re.search('sh (.+)$', line).group(1) continue else: name = re.search('(.+?):', line).group(1) code = re.search(':(.+?)>', line).group(1) - xoptions = re.search('>(.+?)$', line).group(1) + xoptions = re.search('>(.+)$', line).group(1) b = Bench(name=name, code=code, xoptions=xoptions) self.l.append(b) self.sh.append(sh) diff --git a/bench/stutter/stutter_invariance_formulas.cc b/bench/stutter/stutter_invariance_formulas.cc index 2007891af..32bc45083 100644 --- a/bench/stutter/stutter_invariance_formulas.cc +++ b/bench/stutter/stutter_invariance_formulas.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2015, 2016, 2017 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2014, 2015, 2016, 2017, 2022 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -28,7 +28,7 @@ #include #include -const char argp_program_doc[] =""; +static const char argp_program_doc[] = ""; const struct argp_child children[] = { diff --git a/bin/autcross.cc b/bin/autcross.cc index 81b6bcef5..b3e504bb3 100644 --- a/bin/autcross.cc +++ b/bin/autcross.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2020 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) 2017-2020, 2022-2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -51,7 +51,7 @@ #include #include -const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Call several tools that process automata and cross-compare their output \ to detect bugs, or to gather statistics. The list of automata to use \ should be supplied on standard input, or using the -F option.\v\ @@ -64,6 +64,7 @@ Exit status:\n\ enum { OPT_BOGUS = 256, + OPT_COMPLEMENTED, OPT_CSV, OPT_HIGH, OPT_FAIL_ON_TIMEOUT, @@ -94,6 +95,8 @@ static const argp_option options[] = "consider timeouts as errors", 0 }, { "language-preserved", OPT_LANG, nullptr, 0, "expect that each tool preserves the input language", 0 }, + { "language-complemented", OPT_COMPLEMENTED, nullptr, 0, + "expect that each tool complements the input language", 0 }, { "no-checks", OPT_NOCHECKS, nullptr, 0, "do not perform any sanity checks", 0 }, /**************************************************/ @@ -144,6 +147,7 @@ static bool fail_on_timeout = false; static bool stop_on_error = false; static bool no_checks = false; static bool opt_language_preserved = false; +static bool opt_language_complemented = false; static bool opt_omit = false; static const char* csv_output = nullptr; static unsigned round_num = 0; @@ -158,7 +162,7 @@ parse_opt(int key, char* arg, struct argp_state*) switch (key) { case 'F': - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::AUT_FILENAME); break; case 'q': quiet = true; @@ -170,6 +174,9 @@ parse_opt(int key, char* arg, struct argp_state*) bogus_output_filename = arg; break; } + case OPT_COMPLEMENTED: + opt_language_complemented = true; + break; case OPT_CSV: csv_output = arg ? arg : "-"; break; @@ -209,7 +216,7 @@ parse_opt(int key, char* arg, struct argp_state*) break; case ARGP_KEY_ARG: if (arg[0] == '-' && !arg[1]) - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::AUT_FILENAME); else tools_push_autproc(arg); break; @@ -338,7 +345,6 @@ struct in_statistics struct out_statistics { - // If OK is false, output statistics are not available. bool ok; const char* status_str; @@ -346,7 +352,7 @@ struct out_statistics double time; aut_statistics output; - out_statistics() + out_statistics() noexcept : ok(false), status_str(nullptr), status_code(0), @@ -533,25 +539,32 @@ namespace const spot::const_twa_graph_ptr& aut_j, size_t i, size_t j) { + auto is_really_comp = [lc = opt_language_complemented, + ts = tools.size()](unsigned i) { + return lc && i == ts; + }; + if (aut_i->num_sets() + aut_j->num_sets() > spot::acc_cond::mark_t::max_accsets()) { if (!quiet) - std::cerr << "info: building " << autname(i) - << '*' << autname(j, true) + std::cerr << "info: building " << autname(i, is_really_comp(i)) + << '*' << autname(j, !is_really_comp(j)) << " requires more acceptance sets than supported\n"; return false; } if (verbose) std::cerr << "info: check_empty " - << autname(i) << '*' << autname(j, true) << '\n'; + << autname(i, is_really_comp(i)) + << '*' << autname(j, !is_really_comp(j)) << '\n'; auto w = aut_i->intersecting_word(aut_j); if (w) { std::ostream& err = global_error(); - err << "error: " << autname(i) << '*' << autname(j, true) + err << "error: " << autname(i, is_really_comp(i)) + << '*' << autname(j, !is_really_comp(j)) << (" is nonempty; both automata accept the infinite word:\n" " "); example() << *w << '\n'; @@ -600,7 +613,7 @@ namespace return src.str(); }(); - input_statistics.push_back(in_statistics()); + input_statistics.emplace_back(in_statistics()); input_statistics[round_num].input_source = std::move(source); if (auto name = input->get_named_prop("automaton-name")) @@ -621,12 +634,15 @@ namespace int problems = 0; size_t m = tools.size(); - size_t mi = m + opt_language_preserved; + size_t mi = m + opt_language_preserved + opt_language_complemented; std::vector pos(mi); std::vector neg(mi); vector_tool_statistics stats(m); - if (opt_language_preserved) + // For --language-complemented, we store the input automata in + // pos and will compute its complement in neg. Before running + // checks we will swap both automata. + if (opt_language_preserved || opt_language_complemented) pos[mi - 1] = input; if (verbose) @@ -642,7 +658,7 @@ namespace problems += prob; } spot::cleanup_tmpfiles(); - output_statistics.push_back(std::move(stats)); + output_statistics.emplace_back(std::move(stats)); if (verbose) { @@ -718,6 +734,9 @@ namespace }; } + if (opt_language_complemented) + std::swap(pos[mi - 1], neg[mi - 1]); + // Just make a circular implication check // A0 <= A1, A1 <= A2, ..., AN <= A0 unsigned ok = 0; @@ -824,10 +843,15 @@ main(int argc, char** argv) check_no_automaton(); - if (s == 1 && !opt_language_preserved && !no_checks) - error(2, 0, "Since --language-preserved is not used, you need " - "at least two tools to compare."); + if (s == 1 && !no_checks + && !opt_language_preserved + && !opt_language_complemented) + error(2, 0, "Since --language-preserved and --language-complemented " + "are not used, you need at least two tools to compare."); + if (opt_language_preserved && opt_language_complemented) + error(2, 0, "Options --language-preserved and --language-complemented " + "are incompatible."); setup_color(); setup_sig_handler(); diff --git a/bin/autfilt.cc b/bin/autfilt.cc index 8fe95c396..4487fad8b 100644 --- a/bin/autfilt.cc +++ b/bin/autfilt.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -75,7 +75,7 @@ #include #include -static const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Convert, transform, and filter omega-automata.\v\ Exit status:\n\ 0 if some automata were output\n\ @@ -448,7 +448,7 @@ struct canon_aut std::vector edges; std::string acc; - canon_aut(const spot::const_twa_graph_ptr& aut) + explicit canon_aut(const spot::const_twa_graph_ptr& aut) : num_states(aut->num_states()) , edges(aut->edge_vector().begin() + 1, aut->edge_vector().end()) @@ -713,10 +713,12 @@ ensure_deterministic(const spot::twa_graph_ptr& aut, bool nonalt = false) return p.run(aut); } -static spot::twa_graph_ptr ensure_tba(spot::twa_graph_ptr aut) +static spot::twa_graph_ptr +ensure_tba(spot::twa_graph_ptr aut, + spot::postprocessor::output_type type = spot::postprocessor::Buchi) { spot::postprocessor p; - p.set_type(spot::postprocessor::Buchi); + p.set_type(type); p.set_pref(spot::postprocessor::Any); p.set_level(spot::postprocessor::Low); return p.run(aut); @@ -726,12 +728,14 @@ static spot::twa_graph_ptr ensure_tba(spot::twa_graph_ptr aut) static spot::twa_graph_ptr product(spot::twa_graph_ptr left, spot::twa_graph_ptr right) { - if ((type == spot::postprocessor::Buchi) - && (left->num_sets() + right->num_sets() > - spot::acc_cond::mark_t::max_accsets())) + // Are we likely to fail because of too many colors? + if ((left->num_sets() + right->num_sets() > + spot::acc_cond::mark_t::max_accsets()) + && (type == spot::postprocessor::Buchi + || type == spot::postprocessor::CoBuchi)) { - left = ensure_tba(left); - right = ensure_tba(right); + left = ensure_tba(left, type); + right = ensure_tba(right, type); } return spot::product(left, right); } @@ -739,16 +743,34 @@ product(spot::twa_graph_ptr left, spot::twa_graph_ptr right) static spot::twa_graph_ptr product_or(spot::twa_graph_ptr left, spot::twa_graph_ptr right) { - if ((type == spot::postprocessor::Buchi) - && (left->num_sets() + right->num_sets() > - spot::acc_cond::mark_t::max_accsets())) + // Are we likely to fail because of too many colors? + if ((left->num_sets() + right->num_sets() > + spot::acc_cond::mark_t::max_accsets()) + && (type == spot::postprocessor::Buchi + || type == spot::postprocessor::CoBuchi)) { - left = ensure_tba(left); - right = ensure_tba(right); + left = ensure_tba(left, type); + right = ensure_tba(right, type); } return spot::product_or(left, right); } +static spot::twa_graph_ptr +word_to_aut(const char* word, const char *argname) +{ + try + { + return spot::parse_word(word, opt->dict)->as_automaton(); + } + catch (const spot::parse_error& e) + { + error(2, 0, "failed to parse the argument of --%s:\n%s", + argname, e.what()); + } + SPOT_UNREACHABLE(); + return nullptr; +} + static int parse_opt(int key, char* arg, struct argp_state*) { @@ -761,7 +783,7 @@ parse_opt(int key, char* arg, struct argp_state*) automaton_format = Count; break; case 'F': - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::AUT_FILENAME); break; case 'n': opt_max_count = to_pos_int(arg, "-n/--max-count"); @@ -770,17 +792,14 @@ parse_opt(int key, char* arg, struct argp_state*) opt_nth = parse_range(arg, 0, std::numeric_limits::max()); break; case 'u': - opt->uniq = std::unique_ptr(new std::set()); + opt->uniq = std::make_unique(); break; case 'v': opt_invert = true; break; case 'x': - { - const char* opt = extra_options.parse_options(arg); - if (opt) - error(2, 0, "failed to parse --options near '%s'", opt); - } + if (const char* opt = extra_options.parse_options(arg)) + error(2, 0, "failed to parse --options near '%s'", opt); break; case OPT_ALIASES: opt_aliases = XARGMATCH("--aliases", arg, aliases_args, aliases_types); @@ -796,16 +815,7 @@ parse_opt(int key, char* arg, struct argp_state*) opt_art_sccs_set = true; break; case OPT_ACCEPT_WORD: - try - { - opt->acc_words.push_back(spot::parse_word(arg, opt->dict) - ->as_automaton()); - } - catch (const spot::parse_error& e) - { - error(2, 0, "failed to parse the argument of --accept-word:\n%s", - e.what()); - } + opt->acc_words.emplace_back(word_to_aut(arg, "accept-word")); break; case OPT_ACCEPTANCE_IS: { @@ -958,16 +968,7 @@ parse_opt(int key, char* arg, struct argp_state*) "%d should be followed by a comma and WORD", res); arg = endptr + 1; } - try - { - opt->hl_words.emplace_back(spot::parse_word(arg, opt->dict) - ->as_automaton(), res); - } - catch (const spot::parse_error& e) - { - error(2, 0, "failed to parse the argument of --highlight-word:\n%s", - e.what()); - } + opt->hl_words.emplace_back(word_to_aut(arg, "highlight-word"), res); } break; case OPT_HIGHLIGHT_LANGUAGES: @@ -988,7 +989,7 @@ parse_opt(int key, char* arg, struct argp_state*) if (!opt->included_in) opt->included_in = aut; else - opt->included_in = spot::product_or(opt->included_in, aut); + opt->included_in = ::product_or(opt->included_in, aut); } break; case OPT_INHERENTLY_WEAK_SCCS: @@ -1151,16 +1152,7 @@ parse_opt(int key, char* arg, struct argp_state*) opt_art_sccs_set = true; break; case OPT_REJECT_WORD: - try - { - opt->rej_words.push_back(spot::parse_word(arg, opt->dict) - ->as_automaton()); - } - catch (const spot::parse_error& e) - { - error(2, 0, "failed to parse the argument of --reject-word:\n%s", - e.what()); - } + opt->rej_words.emplace_back(word_to_aut(arg, "reject-word")); break; case OPT_REM_AP: opt->rem_ap.add_ap(arg); @@ -1252,7 +1244,7 @@ parse_opt(int key, char* arg, struct argp_state*) opt_art_sccs_set = true; break; case ARGP_KEY_ARG: - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::AUT_FILENAME); break; default: @@ -1285,7 +1277,7 @@ namespace static bool match_acceptance(spot::twa_graph_ptr aut) { - auto& acc = aut->acc(); + const spot::acc_cond& acc = aut->acc(); switch (opt_acceptance_is) { case ACC_Any: @@ -1340,8 +1332,7 @@ namespace { bool max; bool odd; - bool is_p = acc.is_parity(max, odd, true); - if (!is_p) + if (!acc.is_parity(max, odd, true)) return false; switch (opt_acceptance_is) { @@ -1454,7 +1445,7 @@ namespace if (matched && opt_acceptance_is) matched = match_acceptance(aut); - if (matched && (opt_sccs_set | opt_art_sccs_set)) + if (matched && (opt_sccs_set || opt_art_sccs_set)) { spot::scc_info si(aut); unsigned n = si.scc_count(); @@ -1534,14 +1525,14 @@ namespace && spot::contains(aut, opt->equivalent_pos); if (matched && !opt->acc_words.empty()) - for (auto& word_aut: opt->acc_words) + for (const spot::twa_graph_ptr& word_aut: opt->acc_words) if (spot::product(aut, word_aut)->is_empty()) { matched = false; break; } if (matched && !opt->rej_words.empty()) - for (auto& word_aut: opt->rej_words) + for (const spot::twa_graph_ptr& word_aut: opt->rej_words) if (!spot::product(aut, word_aut)->is_empty()) { matched = false; @@ -1675,14 +1666,9 @@ namespace aut->accepting_run()->highlight(opt_highlight_accepting_run); if (!opt->hl_words.empty()) - for (auto& word_aut: opt->hl_words) - { - if (aut->acc().uses_fin_acceptance()) - error(2, 0, - "--highlight-word does not yet work with Fin acceptance"); - if (auto run = spot::product(aut, word_aut.first)->accepting_run()) - run->project(aut)->highlight(word_aut.second); - } + for (auto& [word_aut, color]: opt->hl_words) + if (auto run = spot::product(aut, word_aut)->accepting_run()) + run->project(aut)->highlight(color); timer.stop(); if (opt->uniq) @@ -1757,15 +1743,17 @@ main(int argc, char** argv) post.set_level(level); autfilt_processor processor(post, o.dict); - if (processor.run()) - return 2; - - // Diagnose unused -x options - extra_options.report_unused_options(); + int err = processor.run(); if (automaton_format == Count) std::cout << match_count << std::endl; + // Diagnose unused -x options + if (!err) + extra_options.report_unused_options(); + else + return 2; + check_cout(); return match_count ? 0 : 1; }); diff --git a/bin/common_aoutput.cc b/bin/common_aoutput.cc index 665fafc67..60f83289e 100644 --- a/bin/common_aoutput.cc +++ b/bin/common_aoutput.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -41,7 +41,7 @@ #include automaton_format_t automaton_format = Hoa; -static const char* automaton_format_opt = nullptr; +const char* automaton_format_opt = nullptr; const char* opt_name = nullptr; static const char* opt_output = nullptr; static const char* stats = ""; @@ -203,12 +203,18 @@ static const argp_option io_options[] = "to specify additional options as in --hoa=opt)", 0 }, { "%M, %m", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "name of the automaton", 0 }, - { "%S, %s", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "number of reachable states", 0 }, - { "%E, %e", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "number of reachable edges", 0 }, - { "%T, %t", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "number of reachable transitions", 0 }, + { "%S, %s, %[LETTER]S, %[LETTER]s", + 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "number of states (add one LETTER to select (r) reachable [default], " + "(u) unreachable, (a) all).", 0 }, + { "%E, %e, %[LETTER]E, %[LETTER]e", + 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "number of edges (add one LETTER to select (r) reachable [default], " + "(u) unreachable, (a) all).", 0 }, + { "%T, %t, %[LETTER]E, %[LETTER]e", + 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "number of transitions (add one LETTER to select (r) reachable " + "[default], (u) unreachable, (a) all).", 0 }, { "%A, %a", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "number of acceptance sets", 0 }, { "%G, %g, %[LETTERS]G, %[LETTERS]g", 0, nullptr, @@ -221,7 +227,7 @@ static const argp_option io_options[] = "(iw) inherently weak. Use uppercase letters to negate them.", 0 }, { "%R, %[LETTERS]R", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "CPU time (excluding parsing), in seconds; Add LETTERS to restrict to" + "CPU time (excluding parsing), in seconds; Add LETTERS to restrict to " "(u) user time, (s) system time, (p) parent process, " "or (c) children processes.", 0 }, { "%N, %n", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, @@ -268,12 +274,15 @@ static const argp_option o_options[] = "to specify additional options as in --hoa=opt)", 0 }, { "%m", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "name of the automaton", 0 }, - { "%s", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "number of reachable states", 0 }, - { "%e", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "number of reachable edges", 0 }, - { "%t", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, - "number of reachable transitions", 0 }, + { "%s, %[LETTER]s", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "number of states (add one LETTER to select (r) reachable [default], " + "(u) unreachable, (a) all).", 0 }, + { "%e, %[LETTER]e", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "number of edges (add one LETTER to select (r) reachable [default], " + "(u) unreachable, (a) all).", 0 }, + { "%t, %[LETTER]t", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, + "number of transitions (add one LETTER to select (r) reachable " + "[default], (u) unreachable, (a) all).", 0 }, { "%a", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, "number of acceptance sets", 0 }, { "%g, %[LETTERS]g", 0, nullptr, @@ -444,7 +453,7 @@ hoa_stat_printer::print(const spot::const_parsed_aut_ptr& haut, const spot::const_twa_graph_ptr& aut, spot::formula f, const char* filename, int loc, - spot::process_timer& ptimer, + const spot::process_timer& ptimer, const char* csv_prefix, const char* csv_suffix) { timer_ = ptimer; @@ -472,15 +481,15 @@ hoa_stat_printer::print(const spot::const_parsed_aut_ptr& haut, if (has('T')) { spot::twa_sub_statistics s = sub_stats_reachable(haut->aut); - haut_states_ = s.states; - haut_edges_ = s.edges; - haut_trans_ = s.transitions; + haut_states_.set(s.states, haut->aut->num_states()); + haut_edges_.set(s.edges, haut->aut->num_edges()); + haut_trans_.set(s.transitions, count_all_transitions(haut->aut)); } else if (has('E') || has('S')) { spot::twa_statistics s = stats_reachable(haut->aut); - haut_states_ = s.states; - haut_edges_ = s.edges; + haut_states_.set(s.states, haut->aut->num_states()); + haut_edges_.set(s.edges, haut->aut->num_edges()); } if (has('M')) { @@ -624,10 +633,10 @@ automaton_printer::print(const spot::twa_graph_ptr& aut, outputnamer.print(haut, aut, f, filename, loc, ptimer, csv_prefix, csv_suffix); std::string fname = outputname.str(); - auto p = outputfiles.emplace(fname, nullptr); - if (p.second) - p.first->second.reset(new output_file(fname.c_str())); - out = &p.first->second->ostream(); + auto [it, b] = outputfiles.try_emplace(fname, nullptr); + if (b) + it->second.reset(new output_file(fname.c_str())); + out = &it->second->ostream(); } // Output it. diff --git a/bin/common_aoutput.hh b/bin/common_aoutput.hh index 1b2e7ae41..f57beae84 100644 --- a/bin/common_aoutput.hh +++ b/bin/common_aoutput.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2018, 2020 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2014-2018, 2020, 2022, 2023 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -47,6 +47,7 @@ enum automaton_format_t { // The format to use in output_automaton() extern automaton_format_t automaton_format; +extern const char* automaton_format_opt; // Set to the argument of --name, else nullptr. extern const char* opt_name; // Output options @@ -154,7 +155,7 @@ public: print(const spot::const_parsed_aut_ptr& haut, const spot::const_twa_graph_ptr& aut, spot::formula f, - const char* filename, int loc, spot::process_timer& ptimer, + const char* filename, int loc, const spot::process_timer& ptimer, const char* csv_prefix, const char* csv_suffix); private: @@ -165,9 +166,9 @@ private: spot::printable_value aut_word_; spot::printable_value haut_word_; spot::printable_acc_cond haut_gen_acc_; - spot::printable_value haut_states_; - spot::printable_value haut_edges_; - spot::printable_value haut_trans_; + spot::printable_size haut_states_; + spot::printable_size haut_edges_; + spot::printable_long_size haut_trans_; spot::printable_value haut_acc_; printable_varset haut_ap_; printable_varset aut_ap_; @@ -195,7 +196,7 @@ class automaton_printer std::map> outputfiles; public: - automaton_printer(stat_style input = no_input); + explicit automaton_printer(stat_style input = no_input); ~automaton_printer(); void diff --git a/bin/common_conv.cc b/bin/common_conv.cc index e63969b16..b23a67c51 100644 --- a/bin/common_conv.cc +++ b/bin/common_conv.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2018 Laboratoire de Recherche et Développement +// Copyright (C) 2015, 2018, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -25,10 +25,15 @@ int to_int(const char* s, const char* where) { char* endptr; - int res = strtol(s, &endptr, 10); + errno = 0; + long int lres = strtol(s, &endptr, 10); if (*endptr) error(2, 0, "failed to parse '%s' as an integer (in argument of %s).", s, where); + int res = lres; + if (res != lres || errno == ERANGE) + error(2, 0, "value '%s' is too large for an int (in argument of %s).", + s, where); return res; } @@ -45,11 +50,17 @@ unsigned to_unsigned (const char *s, const char* where) { char* endptr; - unsigned res = strtoul(s, &endptr, 10); + errno = 0; + unsigned long lres = strtoul(s, &endptr, 10); if (*endptr) error(2, 0, "failed to parse '%s' as an unsigned integer (in argument of %s).", s, where); + unsigned res = lres; + if (res != lres || errno == ERANGE) + error(2, 0, + "value '%s' is too large for a unsigned int (in argument of %s).", + s, where); return res; } @@ -57,8 +68,9 @@ float to_float(const char* s, const char* where) { char* endptr; + errno = 0; float res = strtof(s, &endptr); - if (*endptr) + if (*endptr || errno == ERANGE) error(2, 0, "failed to parse '%s' as a float (in argument of %s)", s, where); return res; @@ -80,8 +92,9 @@ to_longs(const char* arg) while (*arg) { char* endptr; + errno = 0; long value = strtol(arg, &endptr, 10); - if (endptr == arg) + if (endptr == arg || errno) error(2, 0, "failed to parse '%s' as an integer.", arg); res.push_back(value); while (*endptr == ' ' || *endptr == ',') diff --git a/bin/common_file.cc b/bin/common_file.cc index ab89fbfe4..4e56c6d54 100644 --- a/bin/common_file.cc +++ b/bin/common_file.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) 2015, 2016, 2022, 2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -21,25 +21,27 @@ #include #include - -output_file::output_file(const char* name) +output_file::output_file(const char* name, bool force_append) { std::ios_base::openmode mode = std::ios_base::trunc; if (name[0] == '>' && name[1] == '>') { - mode = std::ios_base::app; append_ = true; name += 2; } + if (force_append) + append_ = true; + if (append_) + mode = std::ios_base::app; if (name[0] == '-' && name[1] == 0) { os_ = &std::cout; return; } - of_ = new std::ofstream(name, mode); + of_ = std::make_unique(name, mode); if (!*of_) error(2, errno, "cannot open '%s'", name); - os_ = of_; + os_ = of_.get(); } diff --git a/bin/common_file.hh b/bin/common_file.hh index fba62dec0..b6aa0bec3 100644 --- a/bin/common_file.hh +++ b/bin/common_file.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) 2015-2016, 2022-2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -21,27 +21,22 @@ #include "common_sys.hh" #include +#include #include -#include class output_file { std::ostream* os_; - std::ofstream* of_ = nullptr; + std::unique_ptr of_; bool append_ = false; public: // Open a file for output. "-" is interpreted as stdout. // Names that start with ">>" are opened for append. // The function calls error() on... error. - output_file(const char* name); + output_file(const char* name, bool force_append = false); void close(const std::string& name); - ~output_file() - { - delete of_; - } - bool append() const { return append_; diff --git a/bin/common_finput.cc b/bin/common_finput.cc index 8df1fb028..dbcdb3849 100644 --- a/bin/common_finput.cc +++ b/bin/common_finput.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2017, 2019, 2021 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2012-2017, 2019, 2021-2023 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -68,10 +68,10 @@ parse_opt_finput(int key, char* arg, struct argp_state*) switch (key) { case 'f': - jobs.emplace_back(arg, false); + jobs.emplace_back(arg, job_type::LTL_STRING); break; case 'F': - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::LTL_FILENAME); break; case OPT_LBT: lbt_input = true; @@ -96,12 +96,6 @@ parse_formula(const std::string& s) (s, spot::default_environment::instance(), false, lenient); } -job_processor::job_processor() - : abort_run(false), real_filename(nullptr), - col_to_read(0), prefix(nullptr), suffix(nullptr) -{ -} - job_processor::~job_processor() { if (real_filename) @@ -303,8 +297,22 @@ job_processor::process_stream(std::istream& is, } int -job_processor::process_file(const char* filename) +job_processor::process_aut_file(const char*) { + throw std::runtime_error("process_aut_file not defined for this tool"); +} + +int +job_processor::process_tlsf_file(const char*) +{ + throw std::runtime_error("process_tlsf_file not defined for this tool"); +} + +int +job_processor::process_ltl_file(const char* filename) +{ + col_to_read = 0; + // Special case for stdin. if (filename[0] == '-' && filename[1] == 0) return process_stream(std::cin, filename); @@ -356,12 +364,25 @@ int job_processor::run() { int error = 0; - for (auto& j: jobs) + for (const auto& j: jobs) { - if (!j.file_p) - error |= process_string(j.str); - else - error |= process_file(j.str); + switch (j.type) + { + case job_type::LTL_STRING: + error |= process_string(j.str); + break; + case job_type::LTL_FILENAME: + error |= process_ltl_file(j.str); + break; + case job_type::AUT_FILENAME: + error |= process_aut_file(j.str); + break; + case job_type::TLSF_FILENAME: + error |= process_tlsf_file(j.str); + break; + default: + throw std::runtime_error("unexpected job type"); + } if (abort_run) break; } @@ -376,7 +397,7 @@ void check_no_formula() error(2, 0, "No formula to translate? Run '%s --help' for help.\n" "Use '%s -' to force reading formulas from the standard " "input.", program_name, program_name); - jobs.emplace_back("-", true); + jobs.emplace_back("-", job_type::LTL_FILENAME); } void check_no_automaton() @@ -387,5 +408,5 @@ void check_no_automaton() error(2, 0, "No automaton to process? Run '%s --help' for help.\n" "Use '%s -' to force reading automata from the standard " "input.", program_name, program_name); - jobs.emplace_back("-", true); + jobs.emplace_back("-", job_type::AUT_FILENAME); } diff --git a/bin/common_finput.hh b/bin/common_finput.hh index 5d8feb3ed..9ecb5b025 100644 --- a/bin/common_finput.hh +++ b/bin/common_finput.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2017 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2012-2017, 2022, 2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -25,13 +25,18 @@ #include #include +enum class job_type : char { LTL_STRING, + LTL_FILENAME, + AUT_FILENAME, + TLSF_FILENAME }; + struct job { const char* str; - bool file_p; // true if str is a filename, false if it is a formula + job_type type; - job(const char* str, bool file_p) noexcept - : str(str), file_p(file_p) + job(const char* str, job_type type) noexcept + : str(str), type(type) { } }; @@ -51,9 +56,11 @@ spot::parsed_formula parse_formula(const std::string& s); class job_processor { protected: - bool abort_run; // Set to true in process_formula() to abort run(). + bool abort_run = false; // Set to true in process_formula() to abort run(). public: - job_processor(); + job_processor() = default; + job_processor(const job_processor&) = delete; + job_processor& operator=(const job_processor&) = delete; virtual ~job_processor(); @@ -68,15 +75,21 @@ public: process_stream(std::istream& is, const char* filename); virtual int - process_file(const char* filename); + process_ltl_file(const char* filename); + + virtual int + process_aut_file(const char* filename); + + virtual int + process_tlsf_file(const char* filename); virtual int run(); - char* real_filename; - long int col_to_read; - char* prefix; - char* suffix; + char* real_filename = nullptr; + long int col_to_read = 0; + char* prefix = nullptr; + char* suffix = nullptr; }; // Report and error message or add a default job depending on whether diff --git a/bin/common_hoaread.hh b/bin/common_hoaread.hh index b3cc912a5..e66967393 100644 --- a/bin/common_hoaread.hh +++ b/bin/common_hoaread.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2017, 2018 Laboratoire de Recherche et Développement de -// l'Epita (LRDE). +// Copyright (C) 2015, 2017, 2018, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -80,8 +80,10 @@ public: } int - process_file(const char* filename) override + process_aut_file(const char* filename) override { + col_to_read = 0; + // If we have a filename like "foo/NN" such // that: // ① foo/NN is not a file, diff --git a/bin/common_output.cc b/bin/common_output.cc index e9c61a513..93cb2dfaf 100644 --- a/bin/common_output.cc +++ b/bin/common_output.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2019 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2019, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -23,6 +23,7 @@ #include "common_setup.hh" #include #include +#include #include #include #include @@ -297,9 +298,9 @@ namespace }; } -static formula_printer* format = nullptr; +static std::unique_ptr format; static std::ostringstream outputname; -static formula_printer* outputnamer = nullptr; +static std::unique_ptr outputnamer; static std::map> outputfiles; int @@ -320,7 +321,7 @@ parse_opt_output(int key, char* arg, struct argp_state*) output_format = lbt_output; break; case 'o': - outputnamer = new formula_printer(outputname, arg); + outputnamer = std::make_unique(outputname, arg); break; case 'p': full_parenth = true; @@ -341,8 +342,7 @@ parse_opt_output(int key, char* arg, struct argp_state*) output_format = wring_output; break; case OPT_FORMAT: - delete format; - format = new formula_printer(std::cout, arg); + format = std::make_unique(std::cout, arg); break; default: return ARGP_ERR_UNKNOWN; @@ -417,10 +417,10 @@ output_formula_checked(spot::formula f, spot::process_timer* ptimer, formula_with_location fl = { f, filename, linenum, prefix, suffix }; outputnamer->print(fl, ptimer); std::string fname = outputname.str(); - auto p = outputfiles.emplace(fname, nullptr); - if (p.second) - p.first->second.reset(new output_file(fname.c_str())); - out = &p.first->second->ostream(); + auto [it, b] = outputfiles.try_emplace(fname, nullptr); + if (b) + it->second.reset(new output_file(fname.c_str())); + out = &it->second->ostream(); } output_formula(*out, f, ptimer, filename, linenum, prefix, suffix); *out << output_terminator; diff --git a/bin/common_range.cc b/bin/common_range.cc index 8909a26c0..98e568b41 100644 --- a/bin/common_range.cc +++ b/bin/common_range.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2014, 2016 Laboratoire de Recherche et +// Copyright (C) 2012, 2014, 2016, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -36,13 +36,17 @@ parse_range(const char* str, int missing_left, int missing_right) { range res; char* end; - res.min = strtol(str, &end, 10); + errno = 0; + long lres = strtol(str, &end, 10); + res.min = lres; + if (res.min != lres || errno == ERANGE) + error(2, 0, "start of range '%s' is too large for an int.", str); if (end == str) { // No leading number. It's OK as long as the string is not // empty. if (!*end) - error(1, 0, "invalid empty range"); + error(2, 0, "invalid empty range"); res.min = missing_left; } if (!*end) @@ -66,19 +70,23 @@ parse_range(const char* str, int missing_left, int missing_right) { // Parse the next integer. char* end2; - res.max = strtol(end, &end2, 10); + errno = 0; + lres = strtol(end, &end2, 10); + res.max = lres; + if (res.max != lres || errno == ERANGE) + error(2, 0, "end of range '%s' is too large for an int.", str); if (str == end2) - error(1, 0, "invalid range '%s' " + error(2, 0, "invalid range '%s' " "(should start with digits, dots, or colon)", str); if (end == end2) - error(1, 0, "invalid range '%s' (missing end?)", str); + error(2, 0, "invalid range '%s' (missing end?)", str); if (*end2) - error(1, 0, "invalid range '%s' (trailing garbage?)", str); + error(2, 0, "invalid range '%s' (trailing garbage?)", str); } } if (res.min < 0 || res.max < 0) - error(1, 0, "invalid range '%s': values must be positive", str); + error(2, 0, "invalid range '%s': values must be positive", str); return res; } diff --git a/bin/common_setup.cc b/bin/common_setup.cc index 24cacae85..c59ec0695 100644 --- a/bin/common_setup.cc +++ b/bin/common_setup.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -20,13 +20,14 @@ #include "common_setup.hh" #include "common_aoutput.hh" -#include "argp.h" -#include "closeout.h" +#include +#include #include #include #include #include #include +#include #include static void @@ -35,7 +36,7 @@ display_version(FILE *stream, struct argp_state*) fputs(program_name, stream); fputs(" (" PACKAGE_NAME ") " PACKAGE_VERSION "\n\ \n\ -Copyright (C) 2022 Laboratoire de Recherche et Développement de l'Epita.\n\ +Copyright (C) 2023 Laboratoire de Recherche de l'Epita (LRE)\n\ License GPLv3+: \ GNU GPL version 3 or later .\n\ This is free software: you are free to change and redistribute it.\n\ diff --git a/bin/common_setup.hh b/bin/common_setup.hh index e2fce84e0..94cd16f4f 100644 --- a/bin/common_setup.hh +++ b/bin/common_setup.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2018, 2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2012-2013, 2018-2019, 2023 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -34,5 +34,5 @@ int protected_main(char** progname, std::function mainfun); // Diagnose exceptions. [[noreturn]] void handle_any_exception(); -#define BEGIN_EXCEPTION_PROTECT try { (void)0; +#define BEGIN_EXCEPTION_PROTECT try { (void)0 #define END_EXCEPTION_PROTECT } catch (...) { handle_any_exception(); } diff --git a/bin/common_trans.cc b/bin/common_trans.cc index a9b823ff4..b93535173 100644 --- a/bin/common_trans.cc +++ b/bin/common_trans.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2015-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -27,6 +27,7 @@ #include #include #include +#include #if __has_include() #define HAVE_SPAWN_H 1 #include @@ -52,7 +53,7 @@ struct shorthands_t }; #define SHORTHAND(PRE, POST) { PRE, std::regex("^" PRE), POST } -static shorthands_t shorthands_ltl[] = { +static const shorthands_t shorthands_ltl[] = { SHORTHAND("delag", " %f>%O"), SHORTHAND("lbt", " <%L>%O"), SHORTHAND("ltl2ba", " -f %s>%O"), @@ -72,7 +73,7 @@ static shorthands_t shorthands_ltl[] = { SHORTHAND("owl.* ltl-utilities\\b", " -f %f"), }; -static shorthands_t shorthands_autproc[] = { +static const shorthands_t shorthands_autproc[] = { SHORTHAND("autfilt", " %H>%O"), SHORTHAND("dra2dpa", " <%H>%O"), SHORTHAND("dstar2tgba", " %H>%O"), @@ -84,7 +85,7 @@ static shorthands_t shorthands_autproc[] = { " <%H>%O"), }; -static void show_shorthands(shorthands_t* begin, shorthands_t* end) +static void show_shorthands(const shorthands_t* begin, const shorthands_t* end) { std::cout << ("If a COMMANDFMT does not use any %-sequence, and starts with one of\n" @@ -99,7 +100,8 @@ static void show_shorthands(shorthands_t* begin, shorthands_t* end) } -tool_spec::tool_spec(const char* spec, shorthands_t* begin, shorthands_t* end, +tool_spec::tool_spec(const char* spec, + const shorthands_t* begin, const shorthands_t* end, bool is_ref) noexcept : spec(spec), cmd(spec), name(spec), reference(is_ref) { @@ -112,15 +114,15 @@ tool_spec::tool_spec(const char* spec, shorthands_t* begin, shorthands_t* end, { if (*pos == '{') ++count; - else if (*pos == '}') - if (!--count) - { - name = strndup(cmd + 1, pos - cmd - 1); - cmd = pos + 1; - while (*cmd == ' ' || *cmd == '\t') - ++cmd; - break; - } + else if (*pos == '}' && --count == 0) + { + name = strndup(cmd + 1, pos - cmd - 1); + cmd = pos + 1; + // skip leading whitespace + while (*cmd == ' ' || *cmd == '\t') + ++cmd; + break; + } } } // If there is no % in the string, look for a known @@ -146,11 +148,11 @@ tool_spec::tool_spec(const char* spec, shorthands_t* begin, shorthands_t* end, auto& p = *begin++; if (std::regex_search(basename, p.rprefix)) { - int m = strlen(p.suffix); - int q = strlen(cmd); + size_t m = strlen(p.suffix); + size_t q = strlen(cmd); char* tmp = static_cast(malloc(q + m + 1)); - strcpy(tmp, cmd); - strcpy(tmp + q, p.suffix); + memcpy(tmp, cmd, q); + memcpy(tmp + q, p.suffix, m + 1); cmd = tmp; allocated = true; break; @@ -461,6 +463,92 @@ autproc_runner::round_automaton(spot::const_twa_graph_ptr aut, unsigned serial) filename_automaton.new_round(aut, serial); } + +std::string +read_stdout_of_command(char* const* args) +{ +#if HAVE_SPAWN_H + int cout_pipe[2]; + if (int err = pipe(cout_pipe)) + error(2, err, "pipe() failed"); + + posix_spawn_file_actions_t actions; + if (int err = posix_spawn_file_actions_init(&actions)) + error(2, err, "posix_spawn_file_actions_init() failed"); + + posix_spawn_file_actions_addclose(&actions, STDIN_FILENO); + posix_spawn_file_actions_addclose(&actions, cout_pipe[0]); + posix_spawn_file_actions_adddup2(&actions, cout_pipe[1], STDOUT_FILENO); + posix_spawn_file_actions_addclose(&actions, cout_pipe[1]); + + pid_t pid; + if (int err = posix_spawnp(&pid, args[0], &actions, nullptr, args, environ)) + error(2, err, "failed to run '%s'", args[0]); + + if (int err = posix_spawn_file_actions_destroy(&actions)) + error(2, err, "posix_spawn_file_actions_destroy() failed"); + + if (close(cout_pipe[1]) < 0) + error(2, errno, "closing write-side of pipe failed"); + + std::string results; + ssize_t bytes_read; + for (;;) + { + static char buffer[512]; + bytes_read = read(cout_pipe[0], buffer, sizeof(buffer)); + if (bytes_read > 0) + results.insert(results.end(), buffer, buffer + bytes_read); + else + break; + } + if (bytes_read < 0) + error(2, bytes_read, "failed to read from pipe"); + + if (cout_pipe[0] < 0) + error(2, errno, "closing read-side of pipe failed"); + + int exit_code = 0; + if (waitpid(pid, &exit_code, 0) == -1) + error(2, errno, "waitpid() failed"); + + if (exit_code) + error(2, 0, "'%s' exited with status %d", args[0], exit_code); + + return results; +#else + // We could provide a pipe+fork+exec alternative implementation, but + // systems without posix_spawn() might also not have fork and exec. + // For instance MinGW does not. So let's fallback to system+tmpfile + // instead for maximum portability. + char prefix[30]; + snprintf(prefix, sizeof prefix, "spot-tmp"); + spot::temporary_file* tmpfile = spot::create_tmpfile(prefix); + std::string tmpname = tmpfile->name(); + std::ostringstream cmd; + for (auto t = args; *t != nullptr; ++t) + spot::quote_shell_string(cmd, *t) << ' '; + cmd << '>'; + spot::quote_shell_string(cmd, tmpfile->name()); + std::string cmdstr = cmd.str(); + int exit_code = system(cmdstr.c_str()); + if (exit_code < 0) + error(2, errno, "failed to execute %s", cmdstr.c_str()); + if (exit_code > 0) + error(2, 0, "'%s' exited with status %d", args[0], exit_code); + + std::ifstream ifs(tmpname, std::ifstream::in); + if (!ifs) + error(2, 0, "failed to open %s (output of %s)", tmpname.c_str(), args[0]); + ifs.exceptions(std::ifstream::failbit | std::ifstream::badbit); + std::stringstream buffer; + buffer << ifs.rdbuf(); + delete tmpfile; + return buffer.str(); +#endif +} + + std::atomic timed_out{false}; unsigned timeout_count = 0; @@ -524,7 +612,7 @@ get_arg(const char*& cmd) { const char* start = cmd; std::string arg; - while (int c = *cmd) + while (char c = *cmd) { switch (c) { @@ -554,14 +642,14 @@ get_arg(const char*& cmd) goto end_loop; case '\'': { - int d = 0; + char d = '\0'; while ((d = *++cmd)) { if (d == '\'') break; arg.push_back(d); } - if (d == 0) + if (d == '\0') return nullptr; } break; @@ -706,6 +794,7 @@ parse_simple_command(const char* cmd) return res; } + #ifndef HAVE_SPAWN_H static void exec_command(const char* cmd) @@ -751,8 +840,6 @@ exec_command(const char* cmd) SPOT_UNREACHABLE(); return; } -#else -extern char **environ; #endif int diff --git a/bin/common_trans.hh b/bin/common_trans.hh index e01131350..0ebe59e8c 100644 --- a/bin/common_trans.hh +++ b/bin/common_trans.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2018, 2020 Laboratoire de Recherche et +// Copyright (C) 2015-2018, 2020, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -51,7 +51,8 @@ struct tool_spec // Whether the tool is a reference. bool reference; - tool_spec(const char* spec, shorthands_t* begin, shorthands_t* end, + tool_spec(const char* spec, + const shorthands_t* begin, const shorthands_t* end, bool is_ref) noexcept; tool_spec(const tool_spec& other) noexcept; tool_spec& operator=(const tool_spec& other); @@ -71,7 +72,7 @@ struct quoted_formula final: public spot::printable_value struct filed_formula final: public spot::printable { - filed_formula(const quoted_formula& ltl) : f_(ltl) + explicit filed_formula(const quoted_formula& ltl) : f_(ltl) { } @@ -89,9 +90,7 @@ struct filed_formula final: public spot::printable struct filed_automaton final: public spot::printable { - filed_automaton() - { - } + filed_automaton() = default; void print(std::ostream& os, const char* pos) const override; @@ -112,7 +111,7 @@ struct printable_result_filename final: unsigned translator_num; printable_result_filename(); - ~printable_result_filename(); + ~printable_result_filename() override; void reset(unsigned n); void cleanup(); @@ -126,7 +125,7 @@ protected: spot::bdd_dict_ptr dict; // Round-specific variables quoted_formula ltl_formula; - filed_formula filename_formula = ltl_formula; + filed_formula filename_formula{ltl_formula}; // Run-specific variables printable_result_filename output; public: @@ -151,9 +150,9 @@ protected: public: using spot::formater::has; - autproc_runner(// whether we accept the absence of output - // specifier - bool no_output_allowed = false); + explicit autproc_runner(// whether we accept the absence of output + // specifier + bool no_output_allowed = false); void round_automaton(spot::const_twa_graph_ptr aut, unsigned serial); }; @@ -175,3 +174,9 @@ int exec_with_timeout(const char* cmd); #define exec_with_timeout(cmd) system(cmd) #define setup_sig_handler() while (0); #endif // !ENABLE_TIMEOUT + +// Run a command (whose args[0], args[1], etc. are given by args), and +// return its captured stdout. Stderr is not captured. Will abort +// with an error message if the command is not found, or if it exit +// with a non-zero status code. +std::string read_stdout_of_command(char* const* args); diff --git a/bin/dstar2tgba.cc b/bin/dstar2tgba.cc index 3bf5b9393..4b2ec9662 100644 --- a/bin/dstar2tgba.cc +++ b/bin/dstar2tgba.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2019 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2019, 2022, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -48,7 +48,7 @@ #include #include -static const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Convert automata with any acceptance condition into variants of \ Büchi automata.\n\nThis reads automata into any supported format \ (HOA, LBTT, ltl2dstar, never claim) and outputs a \ @@ -89,7 +89,7 @@ parse_opt(int key, char* arg, struct argp_state*) switch (key) { case 'F': - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::AUT_FILENAME); break; case 'x': { @@ -99,7 +99,7 @@ parse_opt(int key, char* arg, struct argp_state*) } break; case ARGP_KEY_ARG: - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::AUT_FILENAME); break; default: return ARGP_ERR_UNKNOWN; @@ -117,7 +117,7 @@ namespace spot::postprocessor& post; automaton_printer printer; - dstar_processor(spot::postprocessor& post) + explicit dstar_processor(spot::postprocessor& post) : hoa_processor(spot::make_bdd_dict()), post(post), printer(aut_input) { } diff --git a/bin/genaut.cc b/bin/genaut.cc index eb2163cab..f8d6b93ff 100644 --- a/bin/genaut.cc +++ b/bin/genaut.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2017-2019, 2022-2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -43,7 +43,8 @@ using namespace spot; -const char argp_program_doc[] ="Generate ω-automata from predefined patterns."; +static const char argp_program_doc[] = + "Generate ω-automata from predefined patterns."; static const argp_option options[] = { @@ -62,6 +63,11 @@ static const argp_option options[] = { "m-nba", gen::AUT_M_NBA, "RANGE", 0, "An NBA with N+1 states whose determinization needs at least " "N! states", 0}, + { "cyclist-trace-nba", gen::AUT_CYCLIST_TRACE_NBA, "RANGE", 0, + "An NBA with N+2 states that should include cyclist-proof-dba=B.", 0}, + { "cyclist-proof-dba", gen::AUT_CYCLIST_PROOF_DBA, "RANGE", 0, + "A DBA with N+2 states that should be included " + "in cyclist-trace-nba=B.", 0}, RANGE_DOC, /**************************************************/ { nullptr, 0, nullptr, 0, "Miscellaneous options:", -1 }, @@ -122,7 +128,7 @@ output_pattern(gen::aut_pattern_id pattern, int n) static void run_jobs() { - for (auto& j: jobs) + for (const auto& j: jobs) { int inc = (j.range.max < j.range.min) ? -1 : 1; int n = j.range.min; diff --git a/bin/genltl.cc b/bin/genltl.cc index 6c632de7a..ef8049171 100644 --- a/bin/genltl.cc +++ b/bin/genltl.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2015-2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2012, 2013, 2015-2019, 2022-2023 Laboratoire de +// Recherche et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -45,7 +45,7 @@ using namespace spot; -const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Generate temporal logic formulas from predefined patterns."; // We reuse the values from gen::ltl_pattern_id as option keys. @@ -84,6 +84,8 @@ static const argp_option options[] = { "eh-patterns", gen::LTL_EH_PATTERNS, "RANGE", OPTION_ARG_OPTIONAL, "Etessami and Holzmann [Concur'00] patterns " "(range should be included in 1..12)", 0 }, + { "eil-gsi", gen::LTL_EIL_GSI, "RANGE", 0, + "G[0..n]((a S b) -> c) rewritten using future operators", 0 }, { "fxg-or", gen::LTL_FXG_OR, "RANGE", 0, "F(p0 | XG(p1 | XG(p2 | ... XG(pn))))", 0}, { "gf-equiv", gen::LTL_GF_EQUIV, "RANGE", 0, @@ -315,7 +317,7 @@ output_pattern(gen::ltl_pattern_id pattern, int n, int n2) static void run_jobs() { - for (auto& j: jobs) + for (const auto& j: jobs) { int inc = (j.range.max < j.range.min) ? -1 : 1; int n = j.range.min; diff --git a/bin/ltl2tgba.cc b/bin/ltl2tgba.cc index f3de65a56..73a9a23c6 100644 --- a/bin/ltl2tgba.cc +++ b/bin/ltl2tgba.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2019 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2012-2019, 2022-2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -39,7 +39,7 @@ #include #include -static const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Translate linear-time formulas (LTL/PSL) into various types of automata.\n\n\ By default it will apply all available optimizations to output \ the smallest Transition-based Generalized Büchi Automata, \ @@ -105,10 +105,9 @@ parse_opt(int key, char* arg, struct argp_state*) break; case ARGP_KEY_ARG: // FIXME: use stat() to distinguish filename from string? - if (*arg == '-' && !arg[1]) - jobs.emplace_back(arg, true); - else - jobs.emplace_back(arg, false); + jobs.emplace_back(arg, ((*arg == '-' && !arg[1]) + ? job_type::LTL_FILENAME + : job_type::LTL_STRING)); break; default: @@ -125,10 +124,10 @@ namespace { public: spot::translator& trans; - automaton_printer printer; + automaton_printer printer{ltl_input}; - trans_processor(spot::translator& trans) - : trans(trans), printer(ltl_input) + explicit trans_processor(spot::translator& trans) + : trans(trans) { } diff --git a/bin/ltl2tgta.cc b/bin/ltl2tgta.cc index ad3a64299..60afcf9e8 100644 --- a/bin/ltl2tgta.cc +++ b/bin/ltl2tgta.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2020 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2012-2020, 2022-2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -46,7 +46,7 @@ #include #include -const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Translate linear-time formulas (LTL/PSL) into Testing Automata.\n\n\ By default it outputs a transition-based generalized Testing Automaton \ the smallest Transition-based Generalized Büchi Automata, \ @@ -148,10 +148,9 @@ parse_opt(int key, char* arg, struct argp_state*) break; case ARGP_KEY_ARG: // FIXME: use stat() to distinguish filename from string? - if (*arg == '-' && !arg[1]) - jobs.emplace_back(arg, true); - else - jobs.emplace_back(arg, false); + jobs.emplace_back(arg, ((*arg == '-' && !arg[1]) + ? job_type::LTL_FILENAME + : job_type::LTL_STRING)); break; default: @@ -169,7 +168,7 @@ namespace public: spot::translator& trans; - trans_processor(spot::translator& trans) + explicit trans_processor(spot::translator& trans) : trans(trans) { } diff --git a/bin/ltlcross.cc b/bin/ltlcross.cc index d36478837..3219beb75 100644 --- a/bin/ltlcross.cc +++ b/bin/ltlcross.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2020 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2012-2020, 2022, 2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -69,7 +69,7 @@ #include #include -const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Call several LTL/PSL translators and cross-compare their output to detect \ bugs, or to gather statistics. The list of formulas to use should be \ supplied on standard input, or using the -f or -F options.\v\ @@ -264,55 +264,32 @@ end_error() struct statistics { - statistics() - : ok(false), - alternating(false), - status_str(nullptr), - status_code(0), - time(0), - states(0), - edges(0), - transitions(0), - acc(0), - scc(0), - nonacc_scc(0), - terminal_scc(0), - weak_scc(0), - strong_scc(0), - nondetstates(0), - nondeterministic(false), - terminal_aut(false), - weak_aut(false), - strong_aut(false) - { - } - // If OK is false, only the status_str, status_code, and time fields // should be valid. - bool ok; - bool alternating; - const char* status_str; - int status_code; - double time; - unsigned states; - unsigned edges; - unsigned long long transitions; - unsigned acc; - unsigned scc; - unsigned nonacc_scc; - unsigned terminal_scc; - unsigned weak_scc; - unsigned strong_scc; - unsigned nondetstates; - bool nondeterministic; - bool terminal_aut; - bool weak_aut; - bool strong_aut; + bool ok = false; + bool alternating = false; + const char* status_str = nullptr; + int status_code = 0; + double time = 0.0; + unsigned states = 0; + unsigned edges = 0; + unsigned long long transitions = 0; + unsigned acc = 0; + unsigned scc = 0; + unsigned nonacc_scc = 0; + unsigned terminal_scc = 0; + unsigned weak_scc = 0; + unsigned strong_scc = 0; + unsigned nondetstates = 0; + bool nondeterministic = false; + bool terminal_aut = false; + bool weak_aut = false; + bool strong_aut = false; std::vector product_states; std::vector product_transitions; std::vector product_scc; - bool ambiguous; - bool complete; + bool ambiguous = false; + bool complete = false; std::string hoa_str; static void @@ -484,7 +461,7 @@ parse_opt(int key, char* arg, struct argp_state*) break; case ARGP_KEY_ARG: if (arg[0] == '-' && !arg[1]) - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::LTL_FILENAME); else tools_push_trans(arg); break; @@ -581,7 +558,7 @@ namespace class xtranslator_runner final: public translator_runner { public: - xtranslator_runner(spot::bdd_dict_ptr dict) + explicit xtranslator_runner(spot::bdd_dict_ptr dict) : translator_runner(dict) { } @@ -1095,17 +1072,14 @@ namespace } // Make sure we do not translate the same formula twice. - if (!allow_dups) + if (!allow_dups && !unique_set.insert(f).second) { - if (!unique_set.insert(f).second) - { - if (!quiet) - std::cerr - << ("warning: This formula or its negation has already" - " been checked.\n Use --allow-dups if it " - "should not be ignored.\n\n"); - return 0; - } + if (!quiet) + std::cerr + << ("warning: This formula or its negation has already" + " been checked.\n Use --allow-dups if it " + "should not be ignored.\n\n"); + return 0; } int problems = 0; diff --git a/bin/ltldo.cc b/bin/ltldo.cc index f57a528b2..6e7bf5ec7 100644 --- a/bin/ltldo.cc +++ b/bin/ltldo.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2020 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2015-2020, 2022-2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -47,7 +47,7 @@ #include #include -const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Run LTL/PSL formulas through another program, performing conversion\n\ of input and output as required."; @@ -193,7 +193,7 @@ parse_opt(int key, char* arg, struct argp_state*) break; case ARGP_KEY_ARG: if (arg[0] == '-' && !arg[1]) - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::LTL_FILENAME); else tools_push_trans(arg); break; @@ -209,7 +209,7 @@ namespace class xtranslator_runner final: public translator_runner { public: - xtranslator_runner(spot::bdd_dict_ptr dict) + explicit xtranslator_runner(spot::bdd_dict_ptr dict) : translator_runner(dict, true) { } @@ -224,8 +224,6 @@ namespace format(command, tools[translator_num].cmd); std::string cmd = command.str(); - //std::cerr << "Running [" << l << translator_num << "]: " - // << cmd << std::endl; timer.start(); int es = exec_with_timeout(cmd.c_str()); timer.stop(); @@ -312,7 +310,7 @@ namespace spot::printable_value inputf; public: - processor(spot::postprocessor& post) + explicit processor(spot::postprocessor& post) : runner(dict), best_printer(best_stream, best_format), post(post) { printer.add_stat('T', &cmdname); @@ -323,9 +321,7 @@ namespace best_printer.declare('f', &inputf); } - ~processor() - { - } + ~processor() override = default; int process_string(const std::string& input, diff --git a/bin/ltlfilt.cc b/bin/ltlfilt.cc index cc9e0f02b..81e895d42 100644 --- a/bin/ltlfilt.cc +++ b/bin/ltlfilt.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -58,7 +59,7 @@ #include #include -const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Read a list of formulas and output them back after some optional processing.\v\ Exit status:\n\ 0 if some formulas were output (skipped syntax errors do not count)\n\ @@ -100,6 +101,8 @@ enum { OPT_SIZE_MAX, OPT_SIZE_MIN, OPT_SKIP_ERRORS, + OPT_SONF, + OPT_SONF_APS, OPT_STUTTER_INSENSITIVE, OPT_SUSPENDABLE, OPT_SYNTACTIC_GUARANTEE, @@ -127,6 +130,11 @@ static const argp_option options[] = { "negate", OPT_NEGATE, nullptr, 0, "negate each formula", 0 }, { "nnf", OPT_NNF, nullptr, 0, "rewrite formulas in negative normal form", 0 }, + { "sonf", OPT_SONF, "PREFIX", OPTION_ARG_OPTIONAL, + "rewrite formulas in suffix operator normal form", 0 }, + { "sonf-aps", OPT_SONF_APS, "FILENAME", OPTION_ARG_OPTIONAL, + "when used with --sonf, output the newly introduced atomic " + "propositions", 0 }, { "relabel", OPT_RELABEL, "abc|pnn", OPTION_ARG_OPTIONAL, "relabel all atomic propositions, alphabetically unless " \ "specified otherwise", 0 }, @@ -316,6 +324,7 @@ static range opt_nth = { 0, std::numeric_limits::max() }; static int opt_max_count = -1; static long int match_count = 0; static const char* from_ltlf = nullptr; +static const char* sonf = nullptr; // We want all these variables to be destroyed when we exit main, to @@ -327,6 +336,7 @@ static struct opt_t spot::bdd_dict_ptr dict = spot::make_bdd_dict(); spot::exclusive_ap excl_ap; std::unique_ptr output_define = nullptr; + std::unique_ptr output_sonf = nullptr; spot::formula implied_by = nullptr; spot::formula imply = nullptr; spot::formula equivalent_to = nullptr; @@ -377,7 +387,7 @@ parse_opt(int key, char* arg, struct argp_state*) break; case ARGP_KEY_ARG: // FIXME: use stat() to distinguish filename from string? - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::LTL_FILENAME); break; case OPT_ACCEPT_WORD: try @@ -460,6 +470,12 @@ parse_opt(int key, char* arg, struct argp_state*) case OPT_NNF: nnf = true; break; + case OPT_SONF: + sonf = arg ? arg : "sonf_"; + break; + case OPT_SONF_APS: + opt->output_sonf.reset(new output_file(arg ? arg : "-")); + break; case OPT_OBLIGATION: obligation = true; break; @@ -570,7 +586,7 @@ namespace fset_t unique_set; spot::relabeling_map relmap; - ltl_processor(spot::tl_simplifier& simpl) + explicit ltl_processor(spot::tl_simplifier& simpl) : simpl(simpl) { } @@ -650,6 +666,25 @@ namespace if (nnf) f = simpl.negative_normal_form(f); + if (sonf != nullptr) + { + std::vector new_aps; + std::tie(f, new_aps) = suffix_operator_normal_form(f, sonf); + + if (opt->output_sonf + && output_format != count_output + && output_format != quiet_output) + { + for (size_t i = 0; i < new_aps.size(); ++i) + { + if (i > 0) + opt->output_sonf->ostream() << ' '; + opt->output_sonf->ostream() << new_aps[i]; + } + opt->output_sonf->ostream() << '\n'; + } + } + switch (relabeling) { case ApRelabeling: @@ -687,7 +722,7 @@ namespace matched &= !syntactic_si || f.is_syntactic_stutter_invariant(); if (matched && (ap_n.min > 0 || ap_n.max >= 0)) { - auto s = atomic_prop_collect(f); + spot::atomic_prop_set* s = atomic_prop_collect(f); int n = s->size(); delete s; matched &= (ap_n.min <= 0) || (n >= ap_n.min); @@ -726,7 +761,7 @@ namespace aut = ltl_to_tgba_fm(f, simpl.get_dict(), true); if (matched && !opt->acc_words.empty()) - for (auto& word_aut: opt->acc_words) + for (const spot::twa_graph_ptr& word_aut: opt->acc_words) if (spot::product(aut, word_aut)->is_empty()) { matched = false; @@ -734,7 +769,7 @@ namespace } if (matched && !opt->rej_words.empty()) - for (auto& word_aut: opt->rej_words) + for (const spot::twa_graph_ptr& word_aut: opt->rej_words) if (!spot::product(aut, word_aut)->is_empty()) { matched = false; @@ -808,12 +843,12 @@ namespace { // Sort the formulas alphabetically. std::map m; - for (auto& p: relmap) - m.emplace(str_psl(p.first), p.second); - for (auto& p: m) + for (const auto& [newformula, oldname]: relmap) + m.emplace(str_psl(newformula), oldname); + for (const auto& [newname, oldname]: m) stream_formula(opt->output_define->ostream() - << "#define " << p.first << " (", - p.second, filename, + << "#define " << newname << " (", + oldname, filename, std::to_string(linenum).c_str()) << ")\n"; } one_match = true; @@ -841,7 +876,7 @@ main(int argc, char** argv) exit(err); if (jobs.empty()) - jobs.emplace_back("-", 1); + jobs.emplace_back("-", job_type::LTL_FILENAME); if (boolean_to_isop && simplification_level == 0) simplification_level = 1; diff --git a/bin/ltlgrind.cc b/bin/ltlgrind.cc index 393656b00..b59569a59 100644 --- a/bin/ltlgrind.cc +++ b/bin/ltlgrind.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014, 2015, 2016, 2017, 2018, 2019 Laboratoire de Recherche et +// Copyright (C) 2014-2019, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -143,7 +143,7 @@ parse_opt(int key, char* arg, struct argp_state*) break; case ARGP_KEY_ARG: // FIXME: use stat() to distinguish filename from string? - jobs.emplace_back(arg, true); + jobs.emplace_back(arg, job_type::LTL_FILENAME); break; case OPT_AP2CONST: opt_all = 0; diff --git a/bin/ltlsynt.cc b/bin/ltlsynt.cc index 45fd3b159..35ac4194b 100644 --- a/bin/ltlsynt.cc +++ b/bin/ltlsynt.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -23,8 +23,10 @@ #include "common_aoutput.hh" #include "common_finput.hh" +#include "common_hoaread.hh" #include "common_setup.hh" #include "common_sys.hh" +#include "common_trans.hh" #include #include @@ -35,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -44,8 +47,12 @@ enum { OPT_ALGO = 256, + OPT_BYPASS, OPT_CSV, OPT_DECOMPOSE, + OPT_DOT, + OPT_FROM_PGAME, + OPT_HIDE, OPT_INPUT, OPT_OUTPUT, OPT_PRINT, @@ -53,6 +60,7 @@ enum OPT_PRINT_HOA, OPT_REAL, OPT_SIMPLIFY, + OPT_TLSF, OPT_VERBOSE, OPT_VERIFY }; @@ -63,13 +71,19 @@ static const argp_option options[] = { nullptr, 0, nullptr, 0, "Input options:", 1 }, { "outs", OPT_OUTPUT, "PROPS", 0, "comma-separated list of controllable (a.k.a. output) atomic" - " propositions", 0}, + " propositions", 0 }, { "ins", OPT_INPUT, "PROPS", 0, - "comma-separated list of controllable (a.k.a. output) atomic" - " propositions", 0}, + "comma-separated list of uncontrollable (a.k.a. input) atomic" + " propositions", 0 }, + { "tlsf", OPT_TLSF, "FILENAME", 0, + "Read a TLSF specification from FILENAME, and call syfco to " + "convert it into LTL", 0 }, + { "from-pgame", OPT_FROM_PGAME, "FILENAME", 0, + "Read a parity game in Extended HOA format instead of building it.", + 0 }, /**************************************************/ { nullptr, 0, nullptr, 0, "Fine tuning:", 10 }, - { "algo", OPT_ALGO, "sd|ds|ps|lar|lar.old", 0, + { "algo", OPT_ALGO, "sd|ds|ps|lar|lar.old|acd", 0, "choose the algorithm for synthesis:" " \"sd\": translate to tgba, split, then determinize;" " \"ds\": translate to tgba, determinize, then split;" @@ -77,45 +91,60 @@ static const argp_option options[] = " \"lar\": translate to a deterministic automaton with arbitrary" " acceptance condition, then use LAR to turn to parity," " then split (default);" - " \"lar.old\": old version of LAR, for benchmarking.\n", 0 }, + " \"lar.old\": old version of LAR, for benchmarking;" + " \"acd\": translate to a deterministic automaton with arbitrary" + " acceptance condition, then use ACD to turn to parity," + " then split.\n", 0 }, + { "bypass", OPT_BYPASS, "yes|no", 0, + "whether to try to avoid to construct a parity game " + "(enabled by default)", 0}, { "decompose", OPT_DECOMPOSE, "yes|no", 0, "whether to decompose the specification as multiple output-disjoint " "problems to solve independently (enabled by default)", 0 }, { "simplify", OPT_SIMPLIFY, "no|bisim|bwoa|sat|bisim-sat|bwoa-sat", 0, - "simplification to apply to the controler (no) nothing, " - "(bisim) bisimulation-based reduction, (bwoa) bissimulation-based " + "simplification to apply to the controller (no) nothing, " + "(bisim) bisimulation-based reduction, (bwoa) bisimulation-based " "reduction with output assignment, (sat) SAT-based minimization, " "(bisim-sat) SAT after bisim, (bwoa-sat) SAT after bwoa. Defaults " "to 'bwoa'.", 0 }, /**************************************************/ { nullptr, 0, nullptr, 0, "Output options:", 20 }, { "print-pg", OPT_PRINT, nullptr, 0, - "print the parity game in the pgsolver format, do not solve it", 0}, + "print the parity game in the pgsolver format, do not solve it", 0 }, { "print-game-hoa", OPT_PRINT_HOA, "options", OPTION_ARG_OPTIONAL, - "print the parity game in the HOA format, do not solve it", 0}, + "print the parity game in the HOA format, do not solve it", 0 }, { "realizability", OPT_REAL, nullptr, 0, - "realizability only, do not compute a winning strategy", 0}, + "realizability only, do not compute a winning strategy", 0 }, { "aiger", OPT_PRINT_AIGER, "ite|isop|both[+ud][+dc]" "[+sub0|sub1|sub2]", OPTION_ARG_OPTIONAL, - "prints a winning strategy as an AIGER circuit. The first, and only " - "mandatory option defines the method to be used. \"ite\" for " - "If-then-else normal form; " - "\"isop\" for irreducible sum of producs; " - "\"both\" tries both encodings and keeps the smaller one. " - "The other options further " - "refine the encoding, see aiger::encode_bdd.", 0}, - { "verbose", OPT_VERBOSE, nullptr, 0, - "verbose mode", -1 }, - { "verify", OPT_VERIFY, nullptr, 0, - "verifies the strategy or (if demanded) aiger against the spec.", -1 }, + "encode the winning strategy as an AIG circuit and print it in AIGER" + " format. The first word indicates the encoding to used: \"ite\" for " + "If-Then-Else normal form; " + "\"isop\" for irreducible sum of products; " + "\"both\" tries both and keeps the smaller one. " + "Other options further " + "refine the encoding, see aiger::encode_bdd. Defaults to \"ite\".", 0 }, + { "dot", OPT_DOT, "options", OPTION_ARG_OPTIONAL, + "Use dot format when printing the result (game, strategy, or " + "AIG circuit, depending on other options). The options that " + "may be passed to --dot depend on the nature of what is printed. " + "For games and strategies, standard automata rendering " + "options are supported (e.g., see ltl2tgba --dot). For AIG circuit, " + "use (h) for horizontal and (v) for vertical layouts.", 0 }, { "csv", OPT_CSV, "[>>]FILENAME", OPTION_ARG_OPTIONAL, "output statistics as CSV in FILENAME or on standard output " "(if '>>' is used to request append mode, the header line is " "not output)", 0 }, + { "hide-status", OPT_HIDE, nullptr, 0, + "Hide the REALIZABLE or UNREALIZABLE line. (Hint: exit status " + "is enough of an indication.)", 0 }, /**************************************************/ { nullptr, 0, nullptr, 0, "Miscellaneous options:", -1 }, { "extra-options", 'x', "OPTS", 0, "fine-tuning options (see spot-x (7))", 0 }, + { "verbose", OPT_VERBOSE, nullptr, 0, "verbose mode", 0 }, + { "verify", OPT_VERIFY, nullptr, 0, + "verify the strategy or (if demanded) AIG against the formula", 0 }, { nullptr, 0, nullptr, 0, nullptr, 0 }, }; @@ -123,20 +152,19 @@ static const struct argp_child children[] = { { &finput_argp_headless, 0, nullptr, 0 }, { &aoutput_argp, 0, nullptr, 0 }, - //{ &aoutput_o_format_argp, 0, nullptr, 0 }, { &misc_argp, 0, nullptr, 0 }, { nullptr, 0, nullptr, 0 } }; -const char argp_program_doc[] = "\ +static const char argp_program_doc[] = "\ Synthesize a controller from its LTL specification.\v\ Exit status:\n\ - 0 if the input problem is realizable\n\ - 1 if the input problem is not realizable\n\ + 0 if all input problems were realizable\n\ + 1 if at least one input problem was not realizable\n\ 2 if any error has been reported"; -static std::vector all_output_aps; -static std::vector all_input_aps; +static std::optional> all_output_aps; +static std::optional> all_input_aps; static const char* opt_csv = nullptr; static bool opt_print_pg = false; @@ -145,8 +173,10 @@ static const char* opt_print_hoa_args = nullptr; static bool opt_real = false; static bool opt_do_verify = false; static const char* opt_print_aiger = nullptr; - +static const char* opt_dot_arg = nullptr; +static bool opt_dot = false; static spot::synthesis_info* gi; +static bool show_status = true; static char const *const algo_names[] = { @@ -154,7 +184,8 @@ static char const *const algo_names[] = "sd", "ps", "lar", - "lar.old" + "lar.old", + "acd", }; static char const *const algo_args[] = @@ -164,6 +195,7 @@ static char const *const algo_args[] = "dpasplit", "ps", "lar", "lar.old", + "acd", nullptr }; static spot::synthesis_info::algo const algo_types[] = @@ -173,9 +205,24 @@ static spot::synthesis_info::algo const algo_types[] = spot::synthesis_info::algo::DPA_SPLIT, spot::synthesis_info::algo::DPA_SPLIT, spot::synthesis_info::algo::LAR, spot::synthesis_info::algo::LAR_OLD, + spot::synthesis_info::algo::ACD, }; ARGMATCH_VERIFY(algo_args, algo_types); +static const char* const bypass_args[] = + { + "yes", "true", "enabled", "1", + "no", "false", "disabled", "0", + nullptr + }; +static bool bypass_values[] = + { + true, true, true, true, + false, false, false, false, + }; +ARGMATCH_VERIFY(bypass_args, bypass_values); +bool opt_bypass = true; + static const char* const decompose_args[] = { "yes", "true", "enabled", "1", @@ -221,7 +268,22 @@ namespace }; static void - print_csv(const spot::formula& f) + dispatch_print_hoa(const spot::const_twa_graph_ptr& game) + { + if (opt_dot) + spot::print_dot(std::cout, game, opt_print_hoa_args); + else if (opt_print_pg) + spot::print_pg(std::cout, game); + else + spot::print_hoa(std::cout, game, opt_print_hoa_args) << '\n'; + } + + // If filename is passed, it is printed instead of the formula. We + // use that when processing games since we have no formula to print. + // It would be cleaner to have two columns: one for location (that's + // filename + line number if known), and one for formula (if known). + static void + print_csv(const spot::formula& f, const char* filename = nullptr) { auto& vs = gi->verbose_stream; auto& bv = gi->bv; @@ -230,7 +292,9 @@ namespace if (vs) *vs << "writing CSV to " << opt_csv << '\n'; - output_file outf(opt_csv); + static bool not_first_time = false; + output_file outf(opt_csv, not_first_time); + not_first_time = true; // force append on next print. std::ostream& out = outf.ostream(); // Do not output the header line if we append to a file. @@ -255,10 +319,15 @@ namespace out << '\n'; } std::ostringstream os; - os << f; - spot::escape_rfc4180(out << '"', os.str()); - out << "\",\"" << algo_names[(int) gi->s] - << "\"," << bv->total_time + if (filename) + os << filename; + else + os << f; + spot::escape_rfc4180(out << '"', os.str()) << "\","; + // if a filename was given, assume the game has been read directly + if (!filename) + out << '"' << algo_names[(int) gi->s] << '"'; + out << ',' << bv->total_time << ',' << bv->trans_time << ',' << bv->split_time << ',' << bv->paritize_time; @@ -285,11 +354,13 @@ namespace outf.close(opt_csv); } - int + static int solve_formula(const spot::formula& f, const std::vector& input_aps, const std::vector& output_aps) { + if (opt_csv) // reset benchmark data + gi->bv = spot::synthesis_info::bench_var(); spot::stopwatch sw; if (gi->bv) sw.start(); @@ -305,6 +376,12 @@ namespace if (opt_decompose_ltl) { auto subs = split_independant_formulas(f, output_aps); + if (gi->verbose_stream) + { + *gi->verbose_stream << "there are " + << subs.first.size() + << " subformulas\n"; + } if (subs.first.size() > 1) { sub_form = subs.first; @@ -347,17 +424,6 @@ namespace auto sub_o = sub_outs_str.begin(); std::vector mealy_machines; - auto print_game = want_game ? - [](const spot::twa_graph_ptr& game)->void - { - if (opt_print_pg) - pg_print(std::cout, game); - else - spot::print_hoa(std::cout, game, opt_print_hoa_args) << '\n'; - } - : - [](const spot::twa_graph_ptr&)->void{}; - for (; sub_f != sub_form.end(); ++sub_f, ++sub_o) { spot::mealy_like m_like @@ -368,15 +434,16 @@ namespace }; // If we want to print a game, // we never use the direct approach - if (!want_game) + if (!want_game && opt_bypass) m_like = - spot::try_create_direct_strategy(*sub_f, *sub_o, *gi); + spot::try_create_direct_strategy(*sub_f, *sub_o, *gi, !opt_real); switch (m_like.success) { case spot::mealy_like::realizability_code::UNREALIZABLE: { - std::cout << "UNREALIZABLE" << std::endl; + if (show_status) + std::cout << "UNREALIZABLE" << std::endl; safe_tot_time(); return 1; } @@ -394,13 +461,20 @@ namespace assert((spptr->at(arena->get_init_state_number()) == false) && "Env needs first turn"); } - print_game(arena); + if (want_game) + { + dispatch_print_hoa(arena); + continue; + } if (!spot::solve_game(arena, *gi)) { - std::cout << "UNREALIZABLE" << std::endl; + if (show_status) + std::cout << "UNREALIZABLE" << std::endl; safe_tot_time(); return 1; } + if (gi->bv) + gi->bv->realizable = true; // Create the (partial) strategy // only if we need it if (!opt_real) @@ -408,14 +482,13 @@ namespace spot::mealy_like ml; ml.success = spot::mealy_like::realizability_code::REALIZABLE_REGULAR; - if (opt_print_aiger) - // we do not care about the type, - // machine to aiger can handle it - ml.mealy_like = - spot::solved_game_to_mealy(arena, *gi); - else - ml.mealy_like = - spot::solved_game_to_separated_mealy(arena, *gi); + // By default this produces a split machine + ml.mealy_like = + spot::solved_game_to_mealy(arena, *gi); + // Keep the machine split for aiger + // else -> separated + spot::simplify_mealy_here(ml.mealy_like, *gi, + opt_print_aiger); ml.glob_cond = bddfalse; mealy_machines.push_back(ml); } @@ -426,54 +499,14 @@ namespace // the direct approach yielded a strategy // which can now be minimized // We minimize only if we need it - assert(m_like.mealy_like && "Expected success but found no mealy!"); + assert(opt_real || + (m_like.mealy_like && "Expected success but found no mealy!")); if (!opt_real) { - spot::stopwatch sw_direct; - sw_direct.start(); - - if ((0 < gi->minimize_lvl) && (gi->minimize_lvl < 3)) - // Uses reduction or not, - // both work with mealy machines (non-separated) - reduce_mealy_here(m_like.mealy_like, gi->minimize_lvl == 2); - - auto delta = sw_direct.stop(); - - sw_direct.start(); - // todo better algo here? - m_like.mealy_like = - split_2step(m_like.mealy_like, - spot::get_synthesis_outputs(m_like.mealy_like), - false); - if (gi->bv) - gi->bv->split_time += sw_direct.stop(); - - sw_direct.start(); - if (gi->minimize_lvl >= 3) - { - sw_direct.start(); - // actual minimization, works on split mealy - m_like.mealy_like = minimize_mealy(m_like.mealy_like, - gi->minimize_lvl - 4); - delta = sw_direct.stop(); - } - - // If our goal is to have an aiger, - // we can use split or separated machines - if (!opt_print_aiger) - // Unsplit to have separated mealy - m_like.mealy_like = unsplit_mealy(m_like.mealy_like); - - if (gi->bv) - gi->bv->strat2aut_time += delta; - if (gi->verbose_stream) - *gi->verbose_stream << "final strategy has " - << m_like.mealy_like->num_states() - << " states and " - << m_like.mealy_like->num_edges() - << " edges\n" - << "minimization took " << delta - << " seconds\n"; + // Keep the machine split for aiger + // else -> separated + spot::simplify_mealy_here(m_like.mealy_like, *gi, + opt_print_aiger); } SPOT_FALLTHROUGH; } @@ -494,7 +527,8 @@ namespace return 0; } - std::cout << "REALIZABLE" << std::endl; + if (show_status) + std::cout << "REALIZABLE" << std::endl; if (opt_real) { safe_tot_time(); @@ -503,8 +537,8 @@ namespace // If we reach this line // a strategy was found for each subformula assert(mealy_machines.size() == sub_form.size() - && "There are subformula for which no mealy like object" - "has been created."); + && ("There are subformula for which no mealy like object" + " has been created.")); spot::aig_ptr saig = nullptr; spot::twa_graph_ptr tot_strat = nullptr; @@ -533,7 +567,10 @@ namespace << " latches and " << saig->num_gates() << " gates\n"; } - spot::print_aiger(std::cout, saig) << '\n'; + if (opt_dot) + spot::print_dot(std::cout, saig, opt_dot_arg); + else + spot::print_aiger(std::cout, saig) << '\n'; } else { @@ -545,7 +582,8 @@ namespace && "ltlsynt: Cannot handle TGBA as strategy."); tot_strat = mealy_machines.front().mealy_like; for (size_t i = 1; i < mealy_machines.size(); ++i) - tot_strat = spot::product(tot_strat, mealy_machines[i].mealy_like); + tot_strat = spot::mealy_product(tot_strat, + mealy_machines[i].mealy_like); printer.print(tot_strat, timer_printer_dummy); } @@ -583,15 +621,27 @@ namespace return 0; } + static void + split_aps(const std::string& arg, std::vector& where) + { + std::istringstream aps(arg); + std::string ap; + while (std::getline(aps, ap, ',')) + { + ap.erase(remove_if(ap.begin(), ap.end(), isspace), ap.end()); + where.push_back(str_tolower(ap)); + } + } + class ltl_processor final : public job_processor { private: - std::vector input_aps_; - std::vector output_aps_; + std::optional> input_aps_; + std::optional> output_aps_; public: - ltl_processor(std::vector input_aps_, - std::vector output_aps_) + ltl_processor(std::optional> input_aps_, + std::optional> output_aps_) : input_aps_(std::move(input_aps_)), output_aps_(std::move(output_aps_)) { @@ -601,11 +651,13 @@ namespace const char* filename, int linenum) override { auto unknown_aps = [](spot::formula f, - const std::vector& known, - const std::vector* known2 = nullptr) + const std::optional>& known, + const std::optional>& known2 = {}) { std::vector unknown; std::set seen; + // If we don't have --ins and --outs, we must not find an AP. + bool can_have_ap = known.has_value(); f.traverse([&](const spot::formula& s) { if (s.is(spot::op::ap)) @@ -613,10 +665,11 @@ namespace if (!seen.insert(s).second) return false; const std::string& a = s.ap_name(); - if (std::find(known.begin(), known.end(), a) == known.end() - && (!known2 + if (!can_have_ap + || (std::find(known->begin(), known->end(), a) == known->end() + && (!known2.has_value() || std::find(known2->begin(), - known2->end(), a) == known2->end())) + known2->end(), a) == known2->end()))) unknown.push_back(a); } return false; @@ -626,36 +679,233 @@ namespace // Decide which atomic propositions are input or output. int res; - if (input_aps_.empty() && !output_aps_.empty()) + if (!input_aps_.has_value() && output_aps_.has_value()) { - res = solve_formula(f, unknown_aps(f, output_aps_), output_aps_); + res = solve_formula(f, unknown_aps(f, output_aps_), *output_aps_); } - else if (output_aps_.empty() && !input_aps_.empty()) + else if (!output_aps_.has_value() && input_aps_.has_value()) { - res = solve_formula(f, input_aps_, unknown_aps(f, input_aps_)); + res = solve_formula(f, *input_aps_, unknown_aps(f, input_aps_)); } - else if (output_aps_.empty() && input_aps_.empty()) + else if (!output_aps_.has_value() && !input_aps_.has_value()) { - for (const std::string& ap: unknown_aps(f, input_aps_, &output_aps_)) + for (const std::string& ap: unknown_aps(f, input_aps_, output_aps_)) error_at_line(2, 0, filename, linenum, "one of --ins or --outs should list '%s'", ap.c_str()); - res = solve_formula(f, input_aps_, output_aps_); + res = solve_formula(f, *input_aps_, *output_aps_); } else { - for (const std::string& ap: unknown_aps(f, input_aps_, &output_aps_)) + for (const std::string& ap: unknown_aps(f, input_aps_, output_aps_)) error_at_line(2, 0, filename, linenum, "both --ins and --outs are specified, " "but '%s' is unlisted", ap.c_str()); - res = solve_formula(f, input_aps_, output_aps_); + res = solve_formula(f, *input_aps_, *output_aps_); } if (opt_csv) print_csv(f); return res; } + + int + process_tlsf_file(const char* filename) override + { + static char arg0[] = "syfco"; + static char arg1[] = "-f"; + static char arg2[] = "ltlxba"; + static char arg3[] = "-m"; + static char arg4[] = "fully"; + char* command[] = { arg0, arg1, arg2, arg3, arg4, + const_cast(filename), nullptr }; + std::string tlsf_string = read_stdout_of_command(command); + + // The set of atomic proposition will be temporary set to those + // given by syfco, unless they were forced from the command-line. + bool reset_aps = false; + if (!input_aps_.has_value() && !output_aps_.has_value()) + { + reset_aps = true; + static char arg5[] = "--print-output-signals"; + char* command[] = { arg0, arg5, + const_cast(filename), nullptr }; + std::string res = read_stdout_of_command(command); + + output_aps_.emplace(std::vector{}); + split_aps(res, *output_aps_); + } + int res = process_string(tlsf_string, filename); + if (reset_aps) + output_aps_.reset(); + return res; + } + + int process_pgame(spot::twa_graph_ptr arena, + const std::string& location) + { + if (opt_csv) // reset benchmark data + gi->bv = spot::synthesis_info::bench_var(); + spot::stopwatch sw_global; + spot::stopwatch sw_local; + if (gi->bv) + { + sw_global.start(); + sw_local.start(); + } + if (!arena->get_named_prop("synthesis-outputs")) + { + std::cerr << location << ": controllable-AP is not specified\n"; + return 2; + } + if (!arena->get_named_prop>("state-player")) + arena = spot::split_2step(arena, true); + else + { + // Check if the game is alternating and fix trivial cases + const unsigned N = arena->num_states(); + // Can not use get_state_players because we need a non-const version + auto spptr = + arena->get_named_prop>("state-player"); + assert(spptr); + const bdd& outs = get_synthesis_outputs(arena); + for (unsigned n = 0; n < N; ++n) + { + const bool p = (*spptr)[n]; + for (auto& e : arena->out(n)) + { + if (p != (*spptr)[e.dst]) + continue; // All good + // Check if the condition is a simply conjunction of input and + // output. If so insert an intermediate state + // This also covers trivial self-loops + bdd cond = e.cond; + bdd i_cond = bdd_exist(cond, outs); + bdd o_cond = bdd_existcomp(cond, outs); + if ((i_cond & o_cond) == cond) + { + unsigned inter = arena->new_state(); + spptr->push_back(!p); + e.cond = p ? o_cond : i_cond; + e.dst = inter; + arena->new_edge(inter, e.dst, !p ? o_cond : i_cond); + } + else + throw std::runtime_error("ltlsynt: given parity game is not" + "alternating and not trivially fixable!"); + } + } + } + if (gi->bv) + { + gi->bv->split_time += sw_local.stop(); + gi->bv->nb_states_arena += arena->num_states(); + auto spptr = + arena->get_named_prop>("state-player"); + assert(spptr); + gi->bv->nb_states_arena_env += + std::count(spptr->cbegin(), spptr->cend(), false); + } + if (opt_print_pg || opt_print_hoa) + { + dispatch_print_hoa(arena); + return 0; + } + auto safe_tot_time = [&]() { + if (gi->bv) + gi->bv->total_time = sw_global.stop(); + }; + if (!spot::solve_game(arena, *gi)) + { + if (show_status) + std::cout << "UNREALIZABLE" << std::endl; + safe_tot_time(); + return 1; + } + if (gi->bv) + gi->bv->realizable = true; + if (show_status) + std::cout << "REALIZABLE" << std::endl; + if (opt_real) + { + safe_tot_time(); + return 0; + } + sw_local.start(); + spot::twa_graph_ptr mealy_like = + spot::solved_game_to_mealy(arena, *gi); + // Keep the machine split for aiger otherwise, separate it. + spot::simplify_mealy_here(mealy_like, *gi, opt_print_aiger); + + automaton_printer printer; + spot::process_timer timer_printer_dummy; + if (opt_print_aiger) + { + if (gi->bv) + sw_local.start(); + spot::aig_ptr saig = + spot::mealy_machine_to_aig(mealy_like, opt_print_aiger); + if (gi->bv) + { + gi->bv->aig_time = sw_local.stop(); + gi->bv->nb_latches = saig->num_latches(); + gi->bv->nb_gates = saig->num_gates(); + } + if (gi->verbose_stream) + { + *gi->verbose_stream << "AIG circuit was created in " + << gi->bv->aig_time + << " seconds and has " << saig->num_latches() + << " latches and " + << saig->num_gates() << " gates\n"; + } + spot::print_aiger(std::cout, saig) << '\n'; + } + else + { + printer.print(mealy_like, timer_printer_dummy); + } + safe_tot_time(); + return 0; + } + + int + process_aut_file(const char* filename) override + { + spot::automaton_stream_parser hp(filename); + int err = 0; + while (!abort_run) + { + spot::parsed_aut_ptr haut = hp.parse(spot::make_bdd_dict()); + if (!haut->aut && haut->errors.empty()) + break; + if (haut->format_errors(std::cerr)) + err = 2; + if (!haut->aut /*|| (err && abort_on_error_)*/) + { + error(2, 0, "failed to read automaton from %s", + haut->filename.c_str()); + } + else if (haut->aborted) + { + std::cerr << haut->filename << ':' << haut->loc + << ": aborted input automaton\n"; + err = std::max(err, 2); + } + else + { + std::ostringstream os; + os << haut->filename << ':' << haut->loc; + std::string loc = os.str(); + int res = process_pgame(haut->aut, loc); + if (res < 2 && opt_csv) + print_csv(nullptr, loc.c_str()); + err = std::max(err, res); + } + } + return err; + } }; } @@ -669,35 +919,37 @@ parse_opt(int key, char *arg, struct argp_state *) case OPT_ALGO: gi->s = XARGMATCH("--algo", arg, algo_args, algo_types); break; + case OPT_BYPASS: + opt_bypass = XARGMATCH("--bypass", arg, bypass_args, bypass_values); + break; case OPT_CSV: opt_csv = arg ? arg : "-"; - if (not gi->bv) - gi->bv = spot::synthesis_info::bench_var(); break; case OPT_DECOMPOSE: opt_decompose_ltl = XARGMATCH("--decompose", arg, decompose_args, decompose_values); break; + case OPT_DOT: + opt_dot = true; + automaton_format_opt = opt_dot_arg = arg; + automaton_format = Dot; + break; + case OPT_FROM_PGAME: + jobs.emplace_back(arg, job_type::AUT_FILENAME); + break; + case OPT_HIDE: + show_status = false; + break; case OPT_INPUT: { - std::istringstream aps(arg); - std::string ap; - while (std::getline(aps, ap, ',')) - { - ap.erase(remove_if(ap.begin(), ap.end(), isspace), ap.end()); - all_input_aps.push_back(str_tolower(ap)); - } + all_input_aps.emplace(std::vector{}); + split_aps(arg, *all_input_aps); break; } case OPT_OUTPUT: { - std::istringstream aps(arg); - std::string ap; - while (std::getline(aps, ap, ',')) - { - ap.erase(remove_if(ap.begin(), ap.end(), isspace), ap.end()); - all_output_aps.push_back(str_tolower(ap)); - } + all_output_aps.emplace(std::vector{}); + split_aps(arg, *all_output_aps); break; } case OPT_PRINT: @@ -718,6 +970,9 @@ parse_opt(int key, char *arg, struct argp_state *) gi->minimize_lvl = XARGMATCH("--simplify", arg, simplify_args, simplify_values); break; + case OPT_TLSF: + jobs.emplace_back(arg, job_type::TLSF_FILENAME); + break; case OPT_VERBOSE: gi->verbose_stream = &std::cerr; if (not gi->bv) @@ -754,13 +1009,15 @@ main(int argc, char **argv) argp_program_doc, children, nullptr, nullptr }; if (int err = argp_parse(&ap, argc, argv, ARGP_NO_HELP, nullptr, nullptr)) exit(err); + check_no_formula(); // Check if inputs and outputs are distinct - for (const std::string& ai : all_input_aps) - if (std::find(all_output_aps.begin(), all_output_aps.end(), ai) - != all_output_aps.end()) - error(2, 0, "'%s' appears both in --ins and --outs", ai.c_str()); + if (all_input_aps.has_value() && all_output_aps.has_value()) + for (const std::string& ai : *all_input_aps) + if (std::find(all_output_aps->begin(), all_output_aps->end(), ai) + != all_output_aps->end()) + error(2, 0, "'%s' appears both in --ins and --outs", ai.c_str()); ltl_processor processor(all_input_aps, all_output_aps); if (int res = processor.run(); res == 0 || res == 1) diff --git a/bin/randaut.cc b/bin/randaut.cc index 27512c9ce..1ceb82ee0 100644 --- a/bin/randaut.cc +++ b/bin/randaut.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2016, 2018-2020 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2012-2016, 2018-2020, 2022 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -42,7 +42,7 @@ #include -const char argp_program_doc[] = "\ +static const char argp_program_doc[] = "\ Generate random connected automata.\n\n\ The automata are built over the atomic propositions named by PROPS...\n\ or, if N is a nonnegative number, using N arbitrary names.\n\ diff --git a/bin/randltl.cc b/bin/randltl.cc index cded77171..749fcf373 100644 --- a/bin/randltl.cc +++ b/bin/randltl.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2016, 2018-2019 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) 2012-2016, 2018-2019, 2022, 2023 Laboratoire de +// Recherche et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -40,7 +40,7 @@ #include #include -const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Generate random temporal logic formulas.\n\n\ The formulas are built over the atomic propositions named by PROPS...\n\ or, if N is a nonnegative number, using N arbitrary names.\v\ @@ -65,7 +65,6 @@ enum { OPT_DUMP_PRIORITIES, OPT_DUPS, OPT_LTL_PRIORITIES, - OPT_PSL_PRIORITIES, OPT_SEED, OPT_SERE_PRIORITIES, OPT_TREE_SIZE, @@ -194,7 +193,6 @@ parse_opt(int key, char* arg, struct argp_state* as) case OPT_DUMP_PRIORITIES: opt_dump_priorities = true; break; - // case OPT_PSL_PRIORITIES: break; case OPT_SERE_PRIORITIES: opt_pS = arg; break; diff --git a/bin/spot-x.cc b/bin/spot-x.cc index c4905c2e9..964710dc1 100644 --- a/bin/spot-x.cc +++ b/bin/spot-x.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -24,7 +24,7 @@ #include #include "common_setup.hh" -const char argp_program_doc[] ="\ +static const char argp_program_doc[] = "\ Common fine-tuning options for programs installed with Spot.\n\ \n\ The argument of -x or --extra-options is a comma-separated list of KEY=INT \ @@ -47,9 +47,17 @@ depends on the --low, --medium, or --high settings.") }, { DOC("tls-max-states", "Maximum number of states of automata involved in automata-based \ implication checks for formula simplifications. Defaults to 64.") }, + { DOC("tls-max-ops", + "Maximum number of operands in n-ary opertors (or, and) on which \ +implication-based simplifications are attempted. Defaults to 16.") }, { nullptr, 0, nullptr, 0, "Translation options:", 0 }, { DOC("ltl-split", "Set to 0 to disable the translation of automata \ as product or sum of subformulas.") }, + { DOC("branch-post", "Set to 0 to disable branching-postponement \ +(done during translation, may create more states) and delayed-branching \ +(almost similar, but done after translation to only remove states). \ +Set to 1 to force branching-postponement, and to 2 \ +to force delayed-branching. By default delayed-branching is used.") }, { DOC("comp-susp", "Set to 1 to enable compositional suspension, \ as described in our SPIN'13 paper (see Bibliography below). Set to 2, \ to build only the skeleton TGBA without composing it. Set to 0 (the \ @@ -75,6 +83,9 @@ only if it is smaller than the original skeleton. This option is only \ used when comp-susp=1 and default to 1 or 2 depending on whether --small \ or --deterministic is specified.") }, { nullptr, 0, nullptr, 0, "Postprocessing options:", 0 }, + { DOC("acd", "Set to 1 (the default) to use paritize automata using \ +the alternatinc cycle decomposition. Set to 0 to use paritization based \ +on latest appearance record variants.") }, { DOC("scc-filter", "Set to 1 (the default) to enable \ SCC-pruning and acceptance simplification at the beginning of \ post-processing. Transitions that are outside of accepting SCC are \ @@ -164,6 +175,10 @@ Set to 1 to use only direct simulation. Set to 2 to use only reverse \ simulation. Set to 3 to iterate both direct and reverse simulations. \ The default is the value of parameter \"simul\" in --high mode, and 0 \ therwise.") }, + { DOC("merge-states-min", "Number of states above which states are \ +merged using a cheap approximation of a bisimulation quotient before \ +attempting simulation-based reductions. Defaults to 128. Set to 0 to \ +never merge states.") }, { DOC("simul-max", "Number of states above which simulation-based \ reductions are skipped. Defaults to 4096. Set to 0 to disable. This \ applies to all simulation-based optimization, including thoses of the \ diff --git a/bin/spot.cc b/bin/spot.cc index 95ce7063a..c6bad3c70 100644 --- a/bin/spot.cc +++ b/bin/spot.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2013-2018, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -24,7 +24,8 @@ #include #include "common_setup.hh" -const char argp_program_doc[] ="Command-line tools installed by Spot."; +static const char argp_program_doc[] = + "Command-line tools installed by Spot."; #define DOC(NAME, TXT) NAME, 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, TXT, 0 diff --git a/buddy/src/bddx.h b/buddy/src/bddx.h index 0efd3a0a9..b3cb377a1 100644 --- a/buddy/src/bddx.h +++ b/buddy/src/bddx.h @@ -501,6 +501,7 @@ BUDDY_API_VAR const BDD bddtrue; *************************************************************************/ #ifdef CPLUSPLUS #include +#include /*=== User BDD class ===================================================*/ @@ -1092,6 +1093,11 @@ inline bddxfalse bdd_false(void) { return bddxfalse(); } +template<> +struct std::default_delete { + void operator()(bddPair *p) const { bdd_freepair(p); }; +}; + /*=== Iostream printing ================================================*/ class BUDDY_API bdd_ioformat diff --git a/buddy/src/reorder.c b/buddy/src/reorder.c index d61630801..b107c8b6a 100644 --- a/buddy/src/reorder.c +++ b/buddy/src/reorder.c @@ -210,7 +210,6 @@ static BddTree *reorder_win2ite(BddTree *t) { BddTree *this, *first=t; int lastsize; - int c=1; if (t == NULL) return t; @@ -246,7 +245,6 @@ static BddTree *reorder_win2ite(BddTree *t) if (verbose > 1) printf(" %d nodes\n", reorder_nodenum()); - c++; } while (reorder_nodenum() != lastsize); diff --git a/configure.ac b/configure.ac index 81d5a24e3..09fe45364 100644 --- a/configure.ac +++ b/configure.ac @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2008-2022, Laboratoire de Recherche et Développement +# Copyright (C) 2008-2023, Laboratoire de Recherche et Développement # de l'Epita (LRDE). # Copyright (C) 2003-2007 Laboratoire d'Informatique de Paris 6 # (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -21,7 +21,7 @@ # along with this program. If not, see . AC_PREREQ([2.69]) -AC_INIT([spot], [2.10.4.dev], [spot@lrde.epita.fr]) +AC_INIT([spot], [2.11.5.dev], [spot@lrde.epita.fr]) AC_CONFIG_AUX_DIR([tools]) AC_CONFIG_MACRO_DIR([m4]) AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests]) @@ -53,6 +53,15 @@ AC_ARG_ENABLE([c++20], [Compile in C++20 mode.])], [enable_20=$enableval], [enable_20=no]) +AC_ARG_ENABLE([pthread], + [AS_HELP_STRING([--enable-pthread], + [Allow libspot to use POSIX threads.])], + [enable_pthread=$enableval], [enable_pthread=no]) +if test "$enable_pthread" = yes; then + AC_DEFINE([ENABLE_PTHREAD], [1], [Whether Spot is compiled with -pthread.]) + AC_SUBST([LIBSPOT_PTHREAD], [-pthread]) +fi + AC_ARG_ENABLE([doxygen], [AS_HELP_STRING([--enable-doxygen], [enable generation of Doxygen documentation (requires Doxygen)])], @@ -70,6 +79,7 @@ if test 0 -eq `expr $enable_max_accsets % $default_max_accsets` then AC_DEFINE_UNQUOTED([MAX_ACCSETS], [$enable_max_accsets], [The maximal number of acceptance sets supported (also known as acceptance marks)]) + AC_SUBST([MAX_ACCSETS], [$enable_max_accsets]) else AC_MSG_ERROR([The argument of --enable-max-accsets must be a multiple of $default_max_accsets]) fi @@ -150,7 +160,7 @@ AX_CHECK_BUDDY AC_CHECK_FUNCS([times kill alarm sigaction sched_getcpu]) oLIBS=$LIBS -LIBS="$LIBS -lpthread" +LIBS="$LIBS -pthread" AC_CHECK_FUNCS([pthread_setaffinity_np]) LIBS=$oLIBS @@ -179,9 +189,14 @@ if test "x${enable_python:-yes}" = xyes; then AC_MSG_NOTICE([You may configure with --disable-python ]dnl [if you do not need Python bindings.]) adl_CHECK_PYTHON + + AC_ARG_WITH([pythondir], + [AS_HELP_STRING([--with-pythondir], [override the computed pythondir])], + [pythondir=$withval pyexecdir=$withval], []) fi + adl_ENABLE_DEBUG ad_GCC_OPTIM adl_NDEBUG @@ -202,7 +217,7 @@ AC_CHECK_PROG([LTL3BA], [ltl3ba], [ltl3ba]) AC_CHECK_PROG([PERL], [perl], [perl]) AC_CHECK_PROG([SPIN], [spin], [spin]) AC_CHECK_PROG([LBTT], [lbtt], [lbtt]) -AC_CHECK_PROG([EMACS], [emacs], [emacs]) +AM_MISSING_PROG([EMACS], [emacs]) AC_CHECK_PROGS([IPYTHON], [ipython3 ipython], [ipython]) AC_CHECK_PROGS([JUPYTER], [jupyter], [jupyter]) AC_CHECK_PROG([LBTT_TRANSLATE], [lbtt-translate], [lbtt-translate]) @@ -280,3 +295,23 @@ case $VERSION:$enable_devel in echo '===================================================================' ;; esac + +case $enable_python in + yes) + pd=$pythondir + eval pd=$pd + eval pd=$pd + $PYTHON -c " +import sys +if '$pd' in sys.path: + exit() +else: + print('\nWARNING: Python bindings will be installed in $pd') + print(' however this path is not searched by default by $PYTHON.') + print('\n$PYTHON\'s sys.path contains the following paths:\n', + '\n'.join(sys.path)) + print('\nUse --with-pythondir=... if you wish ' + 'to change this installation path.') +" + ;; +esac diff --git a/debian/control b/debian/control index d1f9c652c..e29454c54 100644 --- a/debian/control +++ b/debian/control @@ -2,7 +2,7 @@ Source: spot Section: science Priority: optional Maintainer: Alexandre Duret-Lutz -Build-Depends: debhelper (>= 12), python3-all-dev, ipython3-notebook | python3-ipykernel, ipython3-notebook | python3-nbconvert, libltdl-dev, dh-python +Build-Depends: debhelper (>= 12), python3-all-dev, ipython3-notebook | python3-ipykernel, ipython3-notebook | python3-nbconvert, libltdl-dev, dh-python, graphviz, jupyter-nbconvert, doxygen Standards-Version: 4.5.1 Homepage: http://spot.lrde.epita.fr/ diff --git a/debian/copyright b/debian/copyright index 9c4653c28..792afcec1 100644 --- a/debian/copyright +++ b/debian/copyright @@ -1,6 +1,6 @@ Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: spot -Source: http://spot.lrde.epita.fr/dload/spot/ +Source: http://www.lrde.epita.fr/dload/spot/ Files: * Copyright: 2003-2007 Laboratoire d'Informatique de Paris 6 (LIP6) diff --git a/debian/rules b/debian/rules index 0193e9a62..51daf21ed 100755 --- a/debian/rules +++ b/debian/rules @@ -20,20 +20,16 @@ include /usr/share/dpkg/default.mk %: dh $@ --with=python3 -# Find the LTO plugin, which we need to pass to ar, nm, and ranlib. -LTOPLUG := $(shell gcc -v 2>&1 | \ - sed -n 's:COLLECT_LTO_WRAPPER=\(/.*/\)[^/]*:\1:p')liblto_plugin.so - # ARFLAGS is for Automake -# AR_FLAGS is for Libtool -# These activate the LTO pluggin, but also remove the 'u' option -# from ar, since its now ignored with Debian's default to 'D'. -LTOSETUP = \ - LDFLAGS='-fuse-linker-plugin' \ - NM='nm --plugin $(LTOPLUG)' \ - ARFLAGS='cr --plugin $(LTOPLUG)' \ - AR_FLAGS='cr --plugin $(LTOPLUG)' \ - RANLIB='ranlib --plugin $(LTOPLUG)' \ +# AR_FLAGS is for Libtool, (but libtool 2.4.7 will now use ARFLAGS as well) +# The gcc-tools activate the LTO plugin. +LTOSETUP = \ + LDFLAGS='-fuse-linker-plugin' \ + NM='gcc-nm' \ + AR='gcc-ar' \ + ARFLAGS='cr' \ + AR_FLAGS='cr' \ + RANLIB='gcc-ranlib' \ VALGRIND=false GCDADIR := $(shell pwd)/gcda diff --git a/default.nix.in b/default.nix.in new file mode 100644 index 000000000..8101e4f74 --- /dev/null +++ b/default.nix.in @@ -0,0 +1,35 @@ +# -*- mode: nix; coding: utf-8 -*- +# Copyright (C) 2022 Laboratoire de Recherche et Développement de l'Epita +# (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +{ pkgs ? import {} }: +let + version = "@VERSION@"; +in +pkgs.stdenv.mkDerivation { + inherit version; + pname = "spot"; + + buildInputs = [ + pkgs.python3 + ]; + + src = ./.; + + enableParallelBuilding = true; +} diff --git a/doc/org/.dir-locals.el.in b/doc/org/.dir-locals.el.in index cba9892fb..80c0a1385 100644 --- a/doc/org/.dir-locals.el.in +++ b/doc/org/.dir-locals.el.in @@ -27,6 +27,9 @@ (setenv "SPOT_DOTEXTRA" "node[fontsize=12] fontsize=12 stylesheet=\"spot.css\" edge[arrowhead=vee, arrowsize=.7, fontsize=12]") (setq org-babel-temporary-directory "@abs_top_builddir@/doc/org/tmp") (make-directory org-babel-temporary-directory t) + ; has to be set globally, not buffer-local + (setq ess-ask-for-ess-directory nil) + (setq ess-startup-directory 'default-directory) (org-babel-do-load-languages 'org-babel-load-languages `((,(if (version< org-version "8.3") 'sh 'shell) . t) (python . t) @@ -39,7 +42,6 @@ (org-babel-python-command . "@PYTHON@") (org-babel-C++-compiler . "./g++wrap") (shell-file-name . "@SHELL@") - (ess-ask-for-ess-directory . nil) (org-export-html-postamble . nil) (org-html-table-header-tags "
" . "
") diff --git a/doc/org/autcross.org b/doc/org/autcross.org index 90a268b44..9e4972cf6 100644 --- a/doc/org/autcross.org +++ b/doc/org/autcross.org @@ -249,7 +249,7 @@ EOF | -:95.1-140.7 | automaton 2 | 2 | 10 | 26 | 26 | 1 | 2 | 6 | 1 | 0 | AF | ok | 0 | 0.0211636 | 2 | 21 | 66 | 84 | 2 | 4 | 0 | 0 | 0 | | -:95.1-140.7 | automaton 2 | 2 | 10 | 26 | 26 | 1 | 2 | 6 | 1 | 0 | L2D | ok | 0 | 0.0028508 | 2 | 24 | 74 | 96 | 2 | 4 | 0 | 0 | 0 | -* Language preserving transformation +* Transformation that preserve or complement languages By default =autcross= assumes that for a given input the automata produced by all tools should be equivalent. However it does not @@ -261,6 +261,13 @@ automaton, it is worth to pass the =--language-preserved= option to =autfilt=. Doing so a bit like adding =cat %H>%O= as another tool: it will also ensure that the output is equivalent to the input. +Similarly, if the tools being tested implement complementation +algorithm, adding the =--language-complemented= will additionally +compare the outputs using this own complementation algorithm. Using +this option is more efficient than passing =autfilt --complement= as a +tool, since =autcross= can save on complementation by using the input +automaton. + * Detecting problems :PROPERTIES: :CUSTOM_ID: checks diff --git a/doc/org/autfilt.org b/doc/org/autfilt.org index 4ccf09f07..5c8a8f1e5 100644 --- a/doc/org/autfilt.org +++ b/doc/org/autfilt.org @@ -145,7 +145,8 @@ ltl2tgba --help | sed -n '/ sequences:/,/^$/p' | sed '1d;$d' (iw) inherently weak. Use uppercase letters to negate them. %d 1 if the output is deterministic, 0 otherwise - %e number of reachable edges + %e, %[LETTER]e number of edges (add one LETTER to select (r) + reachable [default], (u) unreachable, (a) all). %f the formula, in Spot's syntax %F name of the input file %g, %[LETTERS]g acceptance condition (in HOA syntax); add brackets @@ -170,8 +171,11 @@ ltl2tgba --help | sed -n '/ sequences:/,/^$/p' | sed '1d;$d' LETTERS to restrict to(u) user time, (s) system time, (p) parent process, or (c) children processes. - %s number of reachable states - %t number of reachable transitions + %s, %[LETTER]s number of states (add one LETTER to select (r) + reachable [default], (u) unreachable, (a) all). + %t, %[LETTER]t number of transitions (add one LETTER to select + (r) reachable [default], (u) unreachable, (a) + all). %u, %[e]u number of states (or [e]dges) with universal branching %u, %[LETTER]u 1 if the automaton contains some universal diff --git a/doc/org/citing.org b/doc/org/citing.org index 0704eb886..8d669ae69 100644 --- a/doc/org/citing.org +++ b/doc/org/citing.org @@ -6,18 +6,22 @@ * Generic reference -If you need to cite the Spot project in some academic paper, please -use the following reference: +If you need to cite the Spot project, the latest tool paper about +it is the following reference: -- *Spot 2.0 — a framework for LTL and ω-automata manipulation*, - /Alexandre Duret-Lutz/, /Alexandre Lewkowicz/, /Amaury Fauchille/, - /Thibaud Michaud/, /Etienne Renault/, and /Laurent Xu/. In Proc. - of ATVA'16, LNCS 9938, pp. 122--129. Chiba, Japan, Oct. 2016. - ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#duret.16.atva2][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/duret.16.atva2.pdf][pdf]]) +- *From Spot 2.0 to Spot 2.10: What's new?*, /Alexandre Duret-Lutz/, + /Etienne Renault/, /Maximilien Colange/, /Florian Renkin/, + /Alexandre Gbaguidi Aisse/, /Philipp Schlehuber-Caissier/, /Thomas + Medioni/, /Antoine Martin/, /Jérôme Dubois/, /Clément Gillard/, and + Henrich Lauko/. In Proc. of CAV'22, LNCS 13372, pp. 174--187. + Haifa, Israel, Aug. 2022. + ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#duret.22.cav][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/duret.22.cav.pdf][pdf]]) - This provides a quick overview of the entire project (the features - of the library, [[file:tools.org][the tools]], the Python bindings), and provides many - references detailing more specific aspects. +#+begin_note + Tools evolve while published papers don't. Please always specify + the version of Spot (or any other tool) you are using when citing it + in a paper. Future versions might have different behaviors. +#+end_note * Other, more specific, references @@ -76,12 +80,28 @@ be more specific about a particular aspect of Spot. - *Generic Emptiness Check for Fun and Profit*, /Christel Baier/, /František Blahoudek/, /Alexandre Duret-Lutz/, /Joachim Klein/, /David Müller/, and /Jan Strejček/. - In. Proc. of ATVA'19, LNCS 11781, pp. 11781, Oct 2019. ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#baier.19.atva][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/baier.19.atva.pdf][pdf]] | + In. Proc. of ATVA'19, LNCS 11781, pp. 445--461, Oct 2019. ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#baier.19.atva][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/baier.19.atva.pdf][pdf]] | [[https://www.lrde.epita.fr/~adl/dl/adl/baier.19.atva.slides.mefosyloma.pdf][slides1]] | [[https://www.lrde.epita.fr/~adl/dl/adl/baier.19.atva.slides.pdf][slides2]]) Presents the generic emptiness-check implemented in Spot. -* Obsolete reference +- *Practical Applications of the Alternating Cycle Decomposition*, + /Antonio Casares/, /Alexandre Duret-Lutz/, /Klara J. Meyer/, /Florian Renkin/, + and /Salomon Sickert/. + In. Proc. of TACAS'22, LNCS 13244, pp. 99--117, Apr 2022. ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#casares.22.tacas][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/casares.22.tacas.pdf][pdf]] | + [[https://www.lrde.epita.fr/~adl/dl/adl/casares.22.tacas.slides.pdf][slides1]] | [[https://www.lrde.epita.fr/~adl/dl/adl/casares.22.tacas.slides2.pdf][slides2]]) + +* Obsolete references + +- *Spot 2.0 — a framework for LTL and ω-automata manipulation*, + /Alexandre Duret-Lutz/, /Alexandre Lewkowicz/, /Amaury Fauchille/, + /Thibaud Michaud/, /Etienne Renault/, and /Laurent Xu/. In Proc. + of ATVA'16, LNCS 9938, pp. 122--129. Chiba, Japan, Oct. 2016. + ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#duret.16.atva2][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/duret.16.atva2.pdf][pdf]]) + + This provides a quick overview of the entire project (the features + of the library, [[file:tools.org][the tools]], the Python bindings), and provides many + references detailing more specific aspects. - *Spot: an extensible model checking library using transition-based generalized Büchi automata*, /Alexandre Duret-Lutz/ and /Denis diff --git a/doc/org/compile.org b/doc/org/compile.org index 8d4a3b1ca..6c2f8e6c6 100644 --- a/doc/org/compile.org +++ b/doc/org/compile.org @@ -210,11 +210,14 @@ one library requiring another, you will need to link with the =bddx= library. This should be as simple as adding =-lbddx= after =-lspot= in the first three cases. +Similarly, if Spot has been configured with =--enable-pthread=, you +will need to add =-pthread= to the compiler flags. + In the fourth case where =libtool= is used to link against =libspot.la= linking against =libbddx.la= should not be necessary because Libtool already handles such dependencies. However the version of =libtool= distributed with Debian is patched to ignore those dependencies, so in this -case you 2 +case you have to list all dependencies. * Additional suggestions diff --git a/doc/org/concepts.org b/doc/org/concepts.org index d5641e42f..64f982eb8 100644 --- a/doc/org/concepts.org +++ b/doc/org/concepts.org @@ -381,13 +381,14 @@ When /transition-based acceptance/ is used, acceptance sets are now sets of /edges/ (or set of /transitions/ if you prefer), and runs are accepting if the edges they visit satisfy the acceptance condition. -Here is an example of Transition-based Generalized Büchi Automaton -(TGBA). +Here is an example of Transition-based Büchi Automaton +(TBA). #+NAME: tgba-example1 #+BEGIN_SRC sh ltl2tgba 'GF(a & X(a U b))' -d #+END_SRC + #+BEGIN_SRC dot :file concept-tgba1.svg :var txt=tgba-example1 :exports results $txt #+END_SRC @@ -399,27 +400,13 @@ This automaton accept all ω-words that infinitely often match the pattern $a^+;b$ (that is: a positive number of letters where $a$ is true are followed by one letter where $b$ is true). -Using transition-based acceptance allows for more compact automata. -The typical example is the LTL formula =GFa= (infinitely often $a$) -that can be represented using a one-state transition-based Büchi -automaton: -#+NAME: tgba-example2 -#+BEGIN_SRC sh -ltl2tgba 'GFa' -d -#+END_SRC -#+BEGIN_SRC dot :file concept-tgba2.svg :var txt=tgba-example2 :exports results -$txt -#+END_SRC - -#+RESULTS: -[[file:concept-tgba2.svg]] - -While the same property require a 2-state Büchi automaton using +Using transition-based acceptance often allows for more compact automata. +For instance the above automaton would need at least 3 states with state-based acceptance: #+NAME: tgba-example3 #+BEGIN_SRC sh -ltl2tgba 'GFa' -B -d +ltl2tgba 'GF(a & X(a U b))' -B -d #+END_SRC #+BEGIN_SRC dot :file concept-tba-vs-ba.svg :var txt=tgba-example3 :exports results $txt @@ -1035,7 +1022,7 @@ layers. dynamic libraries that [[http://fmt.cs.utwente.nl/tools/ltsmin/][LTSmin]] uses to represent state-spaces. It currently supports libraries generated from Promela models using SpinS or a patched version of DiVinE, but you have to install - those third-party tools first. See [[https://gitlab.lrde.epita.fr/spot/spot/blob/next/tests/ltsmin/README][=tests/ltsmin/README=]] + those third-party tools first. See [[https://gitlab.lre.epita.fr/spot/spot/blob/next/tests/ltsmin/README][=tests/ltsmin/README=]] for details. - In addition to the C++17 API, we also provide Python bindings for =libspotgen=, =libspotltsmin=, =libbddx=, and most of =libspot=. @@ -1047,8 +1034,8 @@ layers. distributed with the rest of Spot, their source-code is publicly available (in case you want to contribute or run a local version). The [[https://spot-sandbox.lrde.epita.fr/][=spot-sandbox=]] website runs from a Docker container whose - configuration can be found in [[https://gitlab.lrde.epita.fr/spot/sandbox/tree/master=][this repository]]. The client and - server parts of the [[https://spot.lrde.epita.fr/app/][online LTL translator]] can be found in [[https://gitlab.lrde.epita.fr/spot/spot-web-app/][this + configuration can be found in [[https://gitlab.lre.epita.fr/spot/sandbox/tree/master=][this repository]]. The client and + server parts of the [[https://spot.lrde.epita.fr/app/][online LTL translator]] can be found in [[https://gitlab.lre.epita.fr/spot/spot-web-app/][this repository]]. * Automaton property flags diff --git a/doc/org/g++wrap.in b/doc/org/g++wrap.in index c4a61a39c..b176af28b 100755 --- a/doc/org/g++wrap.in +++ b/doc/org/g++wrap.in @@ -1,6 +1,6 @@ #!/bin/sh # This is a wrapper around the compiler, to ensure that the code -# example run from the org-mode file are all linked with Spot. +# examples run from org-mode files are all linked with Spot. # # Also we save errors to org.errors, so that we can detect issues # after org-mode has exported everything. Otherwise these errors @@ -8,7 +8,7 @@ @top_builddir@/libtool link @CXX@ @CXXFLAGS@ @CPPFLAGS@ -Wall -Werror \ -I@abs_top_builddir@ -I@abs_top_srcdir@ -I@abs_top_srcdir@/buddy/src \ "$@" @abs_top_builddir@/spot/libspot.la \ - @abs_top_builddir@/buddy/src/libbddx.la 2> errors.$$ + @abs_top_builddir@/buddy/src/libbddx.la @LIBSPOT_PTHREAD@ 2> errors.$$ code=$? if test $code -ne 0 && test -s errors.$$; then cat errors.$$ >>org.errors diff --git a/doc/org/hoa.org b/doc/org/hoa.org index 26969e4ed..6994abdc5 100644 --- a/doc/org/hoa.org +++ b/doc/org/hoa.org @@ -66,7 +66,7 @@ the HOA format, the output may not be exactly the same as the input. sets. This hard-coded limit can be augmented at configure time - using option `--enable-max-accsets=N`, but doing so will consume + using option =--enable-max-accsets=N=, but doing so will consume more memory and time. - Multiple (or missing) initial states are emulated. @@ -76,7 +76,8 @@ the HOA format, the output may not be exactly the same as the input. is transformed into an equivalent TωA by merging the initial states into a single one. The merged state can either be one of the original initial states (if one of those has no incoming edge) or a - new state introduced for that purpose. + new state introduced for that purpose. This "conversion" may change + the completeness property of the automaton. Similarly, when an automaton with no initial state is loaded (this includes the case where the automaton has no state), a disconnected diff --git a/doc/org/index.org b/doc/org/index.org index f676b8aa4..08fa16a3d 100644 --- a/doc/org/index.org +++ b/doc/org/index.org @@ -25,7 +25,7 @@ checking. It has the following notable features: weak-DBA, removal of useless SCCs, acceptance-condition transformations, determinization, [[file:satmin.org][SAT-based minimization of deterministic automata]], [[https://spot.lrde.epita.fr/ipynb/zlktree.html][Alternating Cycle Decomposition]], etc. -- Support for [[file:tut40.org][Safety]] and [[https://spot-dev.lrde.epita.fr/ipynb/games.html][parity games]]. +- Support for [[file:tut40.org][Safety]] and [[https://spot.lrde.epita.fr/ipynb/games.html][parity games]]. - Applications to [[file:ltlsynt.org][reactive synthesis]] and [[https://spot.lrde.epita.fr/ipynb/atva16-fig2b.html][model checking]]. - In addition to the C++ interface, most of its algorithms are usable via [[file:tools.org][command-line tools]], and via [[file:tut.org][Python bindings]]. @@ -37,7 +37,7 @@ checking. It has the following notable features: * Latest version -The latest version is *{{{LASTRELEASE}}}* and was released on +The latest version is *call_SPOT_VERSION()* and was released on *{{{LASTDATE}}}*. Please see the [[file:install.org][download and installation instructions]]. * Documentation diff --git a/doc/org/init.el.in b/doc/org/init.el.in index e16d097cc..c46363096 100644 --- a/doc/org/init.el.in +++ b/doc/org/init.el.in @@ -51,7 +51,6 @@ (package-install ess))))) (require 'ox-publish) -(require 'org-install) (require 'hoa-mode) ; See https://github.com/emacs-ess/ESS/issues/1052 @@ -89,7 +88,9 @@ (setq org-babel-C++-compiler "./g++wrap") (setq shell-file-name "@SHELL@") (setq ess-ask-for-ess-directory nil) - +; setting ess-startup-directory to 'default-directory is enough with +; newer ESS version (after Fev 2022) but does not work with older ones. +(setq ess-startup-directory "@abs_top_builddir@/doc/org") (setq org-babel-default-header-args:plantuml '((:results . "file") (:exports . "results") @@ -159,7 +160,7 @@ up.html points to index.html, then the result is: (setq body res) (not cmp))) (concat "#+TITLE: " title - "\n#+SETUPFILE: setup.org\n#+HTML_LINK_UP: index.html\n\n" + "\n#+INCLUDE: setup.org\n#+HTML_LINK_UP: index.html\n\n" body))) (setq org-publish-project-alist @@ -184,6 +185,49 @@ up.html points to index.html, then the result is: :publishing-function org-publish-attachment) ("spot-all" :components ("spot-html" "spot-static")))) + + + +;;; Org-mode 9.5 is now using to render SVG images. +;;; Unfortunately, this breaks SVG images that use external style +;;; sheets as are expected to be self-contained. +;;; +;;; Since we do use such external style-sheets and never had +;;; any issue with , we revert +;;; to the previous behavior. +;;; +;;; The following function is based on org-html--svg-image from +;;; Org-mode 9.4.5, with the addition of the SVG extension test. +(defun spot-svg-output-as-object (source attributes info) + "If source is an SVG file, return an \"object\" embedding svg file +SOURCE with given ATTRIBUTES. +INFO is a plist used as a communication channel. Otherwise return nil. + +The special attribute \"fallback\" can be used to specify a +fallback image file to use if the object embedding is not +supported. CSS class \"org-svg\" is assigned as the class of the +object unless a different class is specified with an attribute." + (when (string= "svg" (file-name-extension source)) + (let ((fallback (plist-get attributes :fallback)) + (attrs (org-html--make-attribute-string + (org-combine-plists + ;; Remove fallback attribute, which is not meant to + ;; appear directly in the attributes string, and + ;; provide a default class if none is set. + '(:class "org-svg") attributes '(:fallback nil))))) + (format "\n%s" + source + attrs + (if fallback + (org-html-close-tag + "img" (format "src=\"%s\" %s" fallback attrs) info) + "Sorry, your browser does not support SVG."))))) +;;; Hack org-html--format-image to call the above first. +;;; (The org-html--svg-image function was removed when the formater code +;;; switched to for SVG.) +(unless (fboundp 'org-html--svg-image) + (advice-add 'org-html--format-image :before-until 'spot-svg-output-as-object)) + (org-publish-all t) ;;; org-babel-remove-temporary-directory does not correctly remove ;;; nested directories and we have some files in tmp/.libs/ because of diff --git a/doc/org/install.org b/doc/org/install.org index a5759da17..b65c02074 100644 --- a/doc/org/install.org +++ b/doc/org/install.org @@ -9,12 +9,12 @@ :CUSTOM_ID: tar :END: -The latest release of Spot is version {{{LASTRELEASE}}}: +The latest release of Spot is version call_SPOT_VERSION() and was released on {{{LASTDATE}}}: -- {{{LASTTARBALL}}} (see also the {{{LASTNEWS}}}) +- call_TARBALL_LINK() (see also the call_NEWS_LINK()) Past releases can be found [[https://www.lrde.epita.fr/dload/spot/][in the same directory]]. If you are -interested in /future/ releases, you can always peek at the [[https://gitlab.lrde.epita.fr/spot/spot/-/jobs/artifacts/next/browse?job=debian-stable-gcc][last +interested in /future/ releases, you can always peek at the [[https://gitlab.lre.epita.fr/spot/spot/-/jobs/artifacts/next/browse?job=make-dist][last successful development build]]. ** Requirements @@ -52,10 +52,13 @@ make make install #+END_SRC +Before running =make install=, you might want to run =make check= to +run our test-suite. + Files =INSTALL= and =README= included in the tarball contains more -explanations about the various options you can use during this -process. Also note that =README= has a section about troubleshooting -installations. +explanations about the various options you can use during the +compilation process. Also note that =README= has a section about +troubleshooting installations. * Installing the Debian packages :PROPERTIES: @@ -88,7 +91,9 @@ apt-get install spot libspot-dev spot-doc python3-spot # Or a subset of those Note that our Debian repository is signed since that is the new Debian policy, and both of the above command blocks start with a download of our [[https://www.lrde.epita.fr/repo/debian.gpg][GPG key]]. Its fingerprint is =209B 7362 CFD6 FECF B41D 717F 03D9 -9E74 44F2 A84A=, if you want to verify it. +9E74 44F2 A84A=, if you want to verify it. If you have an old copy of +the GPG key that expired, please download it again: the current +version should be valid until 2032. The package =spot= contains the [[file:tools.org][command-line tools]]. =libspot-dev= contains the header files if you plan to use Spot in a C++17 @@ -162,11 +167,11 @@ the (working) code that should be part of the next major release. To clone the git repository, use #+BEGIN_SRC sh -git clone https://gitlab.lrde.epita.fr/spot/spot.git +git clone https://gitlab.lre.epita.fr/spot/spot.git #+END_SRC This should put you on the =next= branch by default. From there, read -the [[https://gitlab.lrde.epita.fr/spot/spot/blob/next/HACKING][HACKING]] file that should be at the top of your cloned repository: +the [[https://gitlab.lre.epita.fr/spot/spot/blob/next/HACKING][HACKING]] file that should be at the top of your cloned repository: it lists all the tools you should install before attempting to compile the source tree. diff --git a/doc/org/ltlcross.org b/doc/org/ltlcross.org index 0fdebae1f..36cce5cbb 100644 --- a/doc/org/ltlcross.org +++ b/doc/org/ltlcross.org @@ -924,7 +924,7 @@ compare the number of states produced by the two configurations of =ltl2tgba= for each formula, we just need to plot column =dt2$state.small= against =dt2$state.deter=. -#+BEGIN_SRC R :results output graphics :width 5 :height 5 :file ltlcross-r.svg +#+BEGIN_SRC R :results output graphics file :width 5 :height 5 :file ltlcross-r.svg library(ggplot2) ggplot(dt2, aes(x=states.small, y=states.deter)) + geom_abline(colour='white') + geom_point() @@ -937,7 +937,7 @@ ggplot(dt2, aes(x=states.small, y=states.deter)) + We should probably print the formulas for the cases where the two sizes differ. -#+BEGIN_SRC R :results output graphics :width 5 :height 5 :file ltlcross-r2.svg +#+BEGIN_SRC R :results output graphics file :width 5 :height 5 :file ltlcross-r2.svg ggplot(dt2, aes(x=states.small, y=states.deter)) + geom_abline(colour='white') + geom_point() + geom_text(data=subset(dt2, states.small != states.deter), diff --git a/doc/org/ltlsynt.org b/doc/org/ltlsynt.org index 45b4b2b1c..e4fbc66e4 100644 --- a/doc/org/ltlsynt.org +++ b/doc/org/ltlsynt.org @@ -7,19 +7,19 @@ * Basic usage -This tool synthesizes controllers from LTL/PSL formulas. +This tool synthesizes reactive controllers from LTL/PSL formulas. Consider a set $I$ of /input/ atomic propositions, a set $O$ of output atomic propositions, and a PSL formula \phi over the propositions in $I \cup O$. A -=controller= realizing \phi is a function $c: (2^{I})^\star \times 2^I \mapsto +*reactive controller* realizing \phi is a function $c: (2^{I})^\star \times 2^I \mapsto 2^O$ such that, for every \omega-word $(u_i)_{i \in N} \in (2^I)^\omega$ over the input propositions, the word $(u_i \cup c(u_0 \dots u_{i-1}, u_i))_{i \in N}$ satisfies \phi. -If a controller exists, then one with finite memory exists. Such controllers -are easily represented as automata (or more specifically as I/O automata or -transducers). In the automaton representing the controller, the acceptance -condition is irrelevant and trivially true. +If a reactive controller exists, then one with finite memory +exists. Such controllers are easily represented as automata (or more +specifically as Mealy machines). In the automaton representing the +controller, the acceptance condition is irrelevant and trivially true. =ltlsynt= has three mandatory options: - =--ins=: a comma-separated list of input atomic propositions; @@ -27,45 +27,52 @@ condition is irrelevant and trivially true. - =--formula= or =--file=: a specification in LTL or PSL. One of =--ins= or =--outs= may be omitted, as any atomic proposition not listed -as input can be assumed to be an output and vice-versa. +as input can be assumed to be output and vice-versa. -The following example illustrates the synthesis of a controller acting as an -=AND= gate. We have two inputs =a= and =b= and one output =c=, and we want =c= -to always be the =AND= of the two inputs: +The following example illustrates the synthesis of a controller +ensuring that input =i1= and =i2= are both true initially if and only +if eventually output =o1= will go from true to false at some point. +Note that this is an equivalence, not an implication. #+NAME: example #+BEGIN_SRC sh :exports both -ltlsynt --ins=a,b -f 'G (a & b <=> c)' +ltlsynt --ins=i1,i2 -f '(i1 & i2) <-> F(o1 & X(!o1))' #+END_SRC #+RESULTS: example #+begin_example REALIZABLE HOA: v1 -States: 1 +States: 3 Start: 0 -AP: 3 "a" "b" "c" +AP: 3 "i1" "i2" "o1" acc-name: all Acceptance: 0 t properties: trans-labels explicit-labels state-acc deterministic +controllable-AP: 2 --BODY-- State: 0 -[!0&!2 | !1&!2] 0 -[0&1&2] 0 +[0&1&2] 1 +[!0&2 | !1&2] 2 +State: 1 +[!2] 0 +State: 2 +[2] 2 --END-- #+end_example The output is composed of two parts: -- the first one is a single line =REALIZABLE= or =UNREALIZABLE;= -- the second one, only present in the =REALIZABLE= case is an automaton describing the controller. - In this example, the controller has a single - state, with two loops labeled by =a & b & c= and =(!a | !b) & !c=. +- The first one is a single line =REALIZABLE= or =UNREALIZABLE=; the presence of this + line, required by the [[http://http://www.syntcomp.org/][SyntComp competition]], can be disabled with option =--hide-status=. +- The second one, only present in the =REALIZABLE= case, is an automaton describing the controller. + +The controller contains the line =controllable-AP: 2=, which means that this automaton +should be interpreted as a Mealy machine where =o0= is part of the output. +Using the =--dot= option, makes it easier to visualize this machine. #+NAME: exampledot -#+BEGIN_SRC sh :exports none :noweb yes -sed 1d <> -EOF +#+BEGIN_SRC sh :exports code +ltlsynt --ins=i1,i2 -f '(i1 & i2) <-> F(o1 & X(!o1))' --hide-status --dot #+END_SRC #+BEGIN_SRC dot :file ltlsyntex.svg :var txt=exampledot :exports results @@ -75,9 +82,6 @@ EOF #+RESULTS: [[file:ltlsyntex.svg]] -The label =a & b & c= should be understood as: "if the input is =a&b=, -the output should be =c=". - The following example illustrates the case of an unrealizable specification. As =a= is an input proposition, there is no way to guarantee that it will eventually hold. @@ -90,11 +94,68 @@ ltlsynt --ins=a -f 'F a' : UNREALIZABLE By default, the controller is output in HOA format, but it can be -output as an [[http://fmv.jku.at/aiger/][AIGER]] circuit thanks to the =--aiger= flag. This is the -output format required for the [[http://syntcomp.org/][SYNTCOMP]] competition. +output as an And-Inverter-Graph in [[http://fmv.jku.at/aiger/][AIGER format]] using the =--aiger= +flag. This is the output format required for the [[http://syntcomp.org/][SYNTCOMP]] competition. -The generation of a controller can be disabled with the flag =--realizability=. -In this case, =ltlsynt= output is limited to =REALIZABLE= or =UNREALIZABLE=. +#+NAME: exampleaig +#+BEGIN_SRC sh :exports both +ltlsynt --ins=i1,i2 -f '(i1 & i2) <-> F(o1 & X(!o1))' --aiger +#+END_SRC + +#+RESULTS: exampleaig +#+begin_example +REALIZABLE +aag 14 2 2 1 10 +2 +4 +6 14 +8 29 +7 +10 7 9 +12 4 10 +14 2 12 +16 7 8 +18 4 16 +20 5 7 +22 21 19 +24 2 23 +26 3 7 +28 27 25 +i0 i1 +i1 i2 +o0 o1 +#+end_example + +The above format is not very human friendly. Again, by passing both +=--aiger= and =--dot=, one can display the And-Inverter-Graph representing +the controller: + +#+NAME: exampleaigdot +#+BEGIN_SRC sh :exports code +ltlsynt --ins=i1,i2 -f '(i1 & i2) <-> F(o1 & X(!o1))' --hide-status --aiger --dot +#+END_SRC + +#+BEGIN_SRC dot :file ltlsyntexaig.svg :var txt=exampleaigdot :exports results + $txt +#+END_SRC + +#+RESULTS: +[[file:ltlsyntexaig.svg]] + +In the above diagram, round nodes represent AND gates. Small black +circles represent inversions (or negations), colored triangles are +used to represent input signals (at the bottom) and output signals (at +the top), and finally rectangles represent latches. A latch is a one +bit register that delays the signal by one step. Initially, all +latches are assumed to contain =false=, and them emit their value from +the =L0_out= and =L1_out= rectangles at the bottom. Their input value, +to be emitted at the next step, is received via the =L0_in= and =L1_in= +boxes at the top. In =ltlsynt='s encoding, the set of latches is used +to keep track of the current state of the Mealy machine. + +The generation of a controller can be disabled with the flag +=--realizability=. In this case, =ltlsynt='s output is limited to +=REALIZABLE= or =UNREALIZABLE=. * TLSF @@ -104,14 +165,20 @@ specification language created for the purpose of this competition. Fortunately, the SYNTCOMP organizers also provide a tool called [[https://github.com/reactive-systems/syfco][=syfco=]] which can translate a TLSF specification to an LTL formula. -The following four steps show you how a TLSF specification called =FILE= can +The following line shows how a TLSF specification called =FILE= can be synthesized using =syfco= and =ltlsynt=: #+BEGIN_SRC sh :export code -LTL=$(syfco FILE -f ltlxba -m fully) -IN=$(syfco FILE --print-input-signals) -OUT=$(syfco FILE --print-output-signals) -ltlsynt --formula="$LTL" --ins="$IN" --outs="$OUT" +ltlsynt --tlsf FILE +#+END_SRC + +The above =--tlsf= option will call =syfco= to perform the conversion +and extract output signals, as if you had used: + +#+BEGIN_SRC sh :export code +LTL=$(syfco -f ltlxba -m fully FILE) +OUT=$(syfco --print-output-signals FILE) +ltlsynt --formula="$LTL" --outs="$OUT" #+END_SRC * Internal details @@ -171,7 +238,18 @@ be tried by separating them using commas. For instance You can also ask =ltlsynt= to print to obtained parity game into [[https://github.com/tcsprojects/pgsolver][PGSolver]] format, with the flag =--print-pg=, or in the HOA format, using =--print-game-hoa=. These flag deactivate the resolution of the -parity game. +parity game. Note that if any of those flag is used with =--dot=, the game +will be printed in the Dot format instead: + +#+NAME: examplegamedot +#+BEGIN_SRC sh :exports code +ltlsynt --ins=i1,i2 -f '(i1 & i2) <-> F(o1 & X(!o1))' --print-game-hoa --dot +#+END_SRC +#+BEGIN_SRC dot :file ltlsyntexgame.svg :var txt=examplegamedot :exports results + $txt +#+END_SRC +#+RESULTS: +[[file:ltlsyntexgame.svg]] For benchmarking purpose, the =--csv= option can be used to record intermediate statistics about the resolution. @@ -194,6 +272,11 @@ Further improvements are described in the following paper: /Alexandre Duret-Lutz/, and /Adrien Pommellet/. Presented at the SYNT'21 workshop. ([[https://www.lrde.epita.fr/~adl/dl/adl/renkin.21.synt.pdf][pdf]] | [[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#renkin.21.synt][bib]]) +Simplification of Mealy machines is discussed in: + +- *Effective reductions of Mealy machines*, /Florian Renkin/, + /Philipp Schlehuber-Caissier/, /Alexandre Duret-Lutz/, and /Adrien Pommellet/. + Presented at FORTE'22. ([[https://www.lrde.epita.fr/~adl/dl/adl/renkin.22.forte.pdf][pdf]] | [[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#renkin.22.forte][bib]]) # LocalWords: utf ltlsynt AIGER html args mapsto SRC acc aiger TLSF # LocalWords: UNREALIZABLE unrealizable SYNTCOMP realizability Proc diff --git a/doc/org/setup.org b/doc/org/setup.org index 3b8b1b404..255a01c3d 100644 --- a/doc/org/setup.org +++ b/doc/org/setup.org @@ -1,11 +1,23 @@ #+OPTIONS: H:2 num:nil toc:t html-postamble:nil ^:nil #+EMAIL: spot@lrde.epita.fr #+HTML_LINK_HOME: index.html -#+MACRO: SPOTVERSION 2.10.4 -#+MACRO: LASTRELEASE 2.10.4 -#+MACRO: LASTTARBALL [[http://www.lrde.epita.fr/dload/spot/spot-2.10.4.tar.gz][=spot-2.10.4.tar.gz=]] -#+MACRO: LASTNEWS [[https://gitlab.lrde.epita.fr/spot/spot/blob/spot-2-10-4/NEWS][summary of the changes]] -#+MACRO: LASTDATE 2022-02-01 +#+MACRO: LASTDATE 2023-04-20 + +#+NAME: SPOT_VERSION +#+BEGIN_SRC python :exports none :results value :wrap org +return "2.11.5" +#+END_SRC + +#+NAME: TARBALL_LINK +#+BEGIN_SRC python :exports none :var version=SPOT_VERSION :results output :wrap org + print(f"[[http://www.lrde.epita.fr/dload/spot/spot-{version}.tar.gz][=spot-{version}.tar.gz=]]") +#+END_SRC + +#+NAME: NEWS_LINK +#+BEGIN_SRC python :exports none :var version=SPOT_VERSION :results output :wrap org + version = version.replace('.', '-') + print(f"[[https://gitlab.lre.epita.fr/spot/spot/blob/spot-{version}/NEWS][summary of the changes]]") +#+END_SRC #+ATTR_HTML: :id spotlogo [[file:spot2.svg]] diff --git a/doc/org/spot.css b/doc/org/spot.css index 74cbab5bf..569ca37a9 100644 --- a/doc/org/spot.css +++ b/doc/org/spot.css @@ -16,9 +16,9 @@ h1::before{content:"";position:absolute;z-index:-1;background-color:#ffe35e;left #table-of-contents #text-table-of-contents{text-align:left} #org-div-home-and-up{text-align:center;font-size:100%} .outline-2 h2{display:block;width:100%;position:relative} -.outline-2 h2::before{content:"";height:100%;width:calc(100% + 2em);position:absolute;z-index:-1;bottom:0em;left:-1em;background-color:#ffe35e;background:linear-gradient(45deg,#ffe35e 50%,transparent 75%);transform:skew(10deg);border-radius:5px;} +.outline-2 h2::before{content:"";height:100%;width:calc(100% + 2em);position:absolute;z-index:-1;bottom:0em;left:-1em;background:linear-gradient(45deg,#ffe35e 50%,transparent 75%);transform:skew(10deg);border-radius:5px;} .outline-3 h3{display:block;width:auto;position:relative} -.outline-3 h3::before{content:"";position:absolute;z-index:-1;width:calc(100% + 2em);height:100%;left:-1em;bottom:0em;;background-color:#ffe35e;background:linear-gradient(45deg,#ffe35e 25%,transparent 50%);transform:skew(10deg);border-radius:3px} +.outline-3 h3::before{content:"";position:absolute;z-index:-1;width:calc(100% + 2em);height:100%;left:-1em;bottom:0em;background:linear-gradient(45deg,#ffe35e 25%,transparent 50%);transform:skew(10deg);border-radius:3px} .outline-2 h2:hover::before,.outline-3 h3:hover::before{background-color:#ffe35e} pre{margin:1.2ex} pre.src{padding-top:8px;border-left-style:solid;border-color:#00adad;overflow:auto;margin-top:0;margin-bottom:0} @@ -77,11 +77,13 @@ thead tr{background:#ffe35e} .org-hoa-ap-number{color:#d70079} .implem{background:#fff0a6;padding:0.5ex 1ex 0.5ex 1ex;margin:1ex;border-color:#ffe35e;border-style:solid none} .implem::before{background:#ffe35e;content:"Implementation detail";padding:.5ex;position:relative;top:0;left:0;font-weight:bold} +.note{background:#fff0a6;padding:0.5ex 1ex 0.5ex 1ex;margin:1ex;border-color:#ffe35e;border-style:solid none} +.note::before{background:#ffe35e;content:"Note";padding:.5ex;position:relative;top:0;left:0;font-weight:bold} .caveat{background:#ef99c9;padding:0.5ex 1ex 0.5ex 1ex;margin:1ex;border-color:#d70079;border-style:solid none} .caveat::before{background:#d70079;content:"Caveat";padding:.5ex;position:relative;top:0;left:0;font-weight:bold} .spotlogo{transform-origin:50% 50%;animation-duration:2s;animation-name:animspotlogo} g.spotlogobg{transform-origin:50% 50%;animation-duration:2s;animation-name:animspotlogobg} -g#version{transform-origin:50% 50%;animation-duration:3s;animation-name:animspotlogover} +g.spotlogover{transform-origin:50% 50%;animation-duration:3s;animation-name:animspotlogover} @keyframes animspotlogo{ 0%{transform:rotateY(90deg)} 80%{transform:rotateY(0deg)} diff --git a/doc/org/spot2.svg b/doc/org/spot2.svg index 76b76525f..8d68ba9d3 100644 --- a/doc/org/spot2.svg +++ b/doc/org/spot2.svg @@ -14,7 +14,7 @@ - + diff --git a/doc/org/tools.org b/doc/org/tools.org index 5227f1b4e..46ca38ccd 100644 --- a/doc/org/tools.org +++ b/doc/org/tools.org @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- -#+TITLE: Command-line tools installed by Spot {{{SPOTVERSION}}} -#+DESCRIPTION: List of all the command-line tools installed by Spot {{{SPOTVERSION}}} #+INCLUDE: setup.org +#+TITLE: Command-line tools installed by Spot +#+DESCRIPTION: List of all the command-line tools installed by Spot #+HTML_LINK_UP: index.html #+PROPERTY: header-args:sh :results verbatim :exports both This document introduces command-line tools that are installed with -the Spot library. We give some examples to highlight possible +Spot call_SPOT_VERSION(). We give some examples to highlight possible use-cases but shall not attempt to cover all features exhaustively (please check the man pages for further inspiration). diff --git a/doc/org/tut.org b/doc/org/tut.org index 598276f38..8ae701bc5 100644 --- a/doc/org/tut.org +++ b/doc/org/tut.org @@ -89,6 +89,7 @@ real notebooks instead. automata. - [[https://spot.lrde.epita.fr/ipynb/atva16-fig2a.html][=atva16-fig2a.ipynb=]] first example from our [[https://www.lrde.epita.fr/~adl/dl/adl/duret.16.atva2.pdf][ATVA'16 tool paper]]. - [[https://spot.lrde.epita.fr/ipynb/atva16-fig2b.html][=atva16-fig2b.ipynb=]] second example from our [[https://www.lrde.epita.fr/~adl/dl/adl/duret.16.atva2.pdf][ATVA'16 tool paper]]. +- [[https://spot.lrde.epita.fr/ipynb/cav22-figs.html][=cav22-figs.ipynb=]] figures from our [[https://www.lrde.epita.fr/~adl/dl/adl/duret.22.cav.pdf][CAV'22 tool paper]]. - [[https://spot.lrde.epita.fr/ipynb/alternation.html][=alternation.ipynb=]] examples of alternating automata. - [[https://spot.lrde.epita.fr/ipynb/stutter-inv.html][=stutter-inv.ipynb=]] working with stutter-invariant formulas properties. - [[https://spot.lrde.epita.fr/ipynb/satmin.html][=satmin.ipynb=]] Python interface for [[file:satmin.org][SAT-based minimization of deterministic ω-automata]]. diff --git a/doc/org/tut03.org b/doc/org/tut03.org index b48366a82..c70a3dab3 100644 --- a/doc/org/tut03.org +++ b/doc/org/tut03.org @@ -81,7 +81,7 @@ simplifications called /trivial identities/. For instance =formula::F(formula::X(formula::tt()))= will return the same formula as =formula::tt()=. These simplifications are those that involve the true and false constants, impotence (=F(F(e))=F(e)=), involutions -(=Not(Not(e)=e=), associativity +(=Not(Not(e))=e=), associativity (=And({And({e1,e2},e3})=And({e1,e2,e3})=). See [[https://spot.lrde.epita.fr/tl.pdf][tl.pdf]] for a list of these /trivial identities/. @@ -113,7 +113,7 @@ detail of the top-level operator in the formula. std::cout << f << '\n'; - // kindstar() prints the name of the operator + // kindstr() prints the name of the operator // size() return the number of operands of the operators std::cout << f.kindstr() << ", " << f.size() << " children\n"; // operator[] accesses each operand @@ -157,7 +157,7 @@ The Python equivalent is similar: print(f) - # kindstar() prints the name of the operator + # kindstr() prints the name of the operator # size() return the number of operands of the operators print("{}, {} children".format(f.kindstr(), f.size())) # [] accesses each operand diff --git a/doc/org/tut10.org b/doc/org/tut10.org index 419a33197..d4c45708a 100644 --- a/doc/org/tut10.org +++ b/doc/org/tut10.org @@ -139,7 +139,7 @@ automaton. Finally, the output as a never claim is done via the int main() { - spot::parsed_formula pf = spot::parse_infix_psl("[]<>a || <>[]b"); + spot::parsed_formula pf = spot::parse_infix_psl("GFa -> GFb"); if (pf.format_errors(std::cerr)) return 1; spot::translator trans; @@ -158,22 +158,22 @@ never { T0_init: if :: (true) -> goto T0_init - :: (a) -> goto accept_S1 - :: (b) -> goto accept_S2 + :: (b) -> goto accept_S1 + :: (!(a)) -> goto accept_S2 fi; accept_S1: if - :: (a) -> goto accept_S1 - :: (!(a)) -> goto T0_S3 + :: (b) -> goto accept_S1 + :: (!(b)) -> goto T0_S3 fi; accept_S2: if - :: (b) -> goto accept_S2 + :: (!(a)) -> goto accept_S2 fi; T0_S3: if - :: (a) -> goto accept_S1 - :: (!(a)) -> goto T0_S3 + :: (b) -> goto accept_S1 + :: (!(b)) -> goto T0_S3 fi; } diff --git a/doc/org/tut40.org b/doc/org/tut40.org index b68efe558..8d9b004da 100644 --- a/doc/org/tut40.org +++ b/doc/org/tut40.org @@ -144,9 +144,11 @@ states. We now look at how to create such a game in Python. -Essentially, a game in Spot is just an automaton equiped with a -special property to indicate the owner of each states. So it can be -created using the usual interface: +Essentially, a game in Spot is just an automaton equiped with a [[file:concepts.org::#named-properties][named +property "state-player"]] that hold a Boolean vector indicating the +owner of each state. The game can be created using the usual +automaton interface, and the owners are set by calling +=game.set_state_players()= with a vector of Boolean at the very end. #+NAME: build_game #+BEGIN_SRC python :exports code @@ -173,7 +175,7 @@ created using the usual interface: todo = [] # Create the state (i, j) for a player if it does not exist yet and - # returns the state's number in the game. + # return the state's number in the game. def get_game_state(player, i, j): orig_state = s_orig_states if player else d_orig_states if (i, j) in orig_state: diff --git a/doc/spot.bib b/doc/spot.bib index 9f18ad2a9..6193cb1a2 100644 --- a/doc/spot.bib +++ b/doc/spot.bib @@ -1,4 +1,3 @@ - @InProceedings{ babiak.12.tacas, author = {Tom{\'a}{\v{s}} Babiak and Mojm{\'i}r K{\v{r}}et{\'i}nsk{\'y} and Vojt{\v{e}}ch {\v{R}}eh{\'a}k @@ -173,6 +172,22 @@ doi = {10.4230/LIPIcs.ICALP.2021.123} } +@InProceedings{ casares.22.tacas, + author = {Antonio Casares and Alexandre Duret-Lutz and Klara J. + Meyer and Florian Renkin and Salomon Sickert}, + title = {Practical Applications of the {A}lternating {C}ycle + {D}ecomposition}, + booktitle = {Proceedings of the 28th International Conference on Tools + and Algorithms for the Construction and Analysis of + Systems}, + year = {2022}, + series = {Lecture Notes in Computer Science}, + month = apr, + volume = {13244}, + pages = {99--117}, + doi = {10.1007/978-3-030-99527-0_6}, +} + @InProceedings{ cerna.03.mfcs, author = {Ivana {\v{C}}ern{\'a} and Radek Pel{\'a}nek}, title = {Relating Hierarchy of Temporal Properties to Model @@ -214,6 +229,18 @@ doi = {10.1109/DepCoS-RELCOMEX.2009.31} } +@InProceedings{ cimatti.06.fmcad, + author = {Cimatti, Alessandro and Roveri, Marco and Semprini, Simone + and Tonetta, Stefano}, + title = {From {PSL} to {NBA}: a Modular Symbolic Encoding}, + booktitle = {Proceedings of the 6th conference on Formal Methods in + Computer Aided Design (FMCAD'06)}, + pages = {125--133}, + year = {2006}, + publisher = {IEEE Computer Society}, + doi = {10.1109/FMCAD.2006.19} +} + @Article{ cimatti.08.tcad, author = {Alessandro Cimatti and Marco Roveri and Stefano Tonetta}, journal = {IEEE Transactions on Computer Aided Design of Integrated @@ -341,6 +368,41 @@ doi = {10.1504/IJCCBS.2014.059594} } +@InProceedings{ duret.16.atva, + author = {Alexandre Duret-Lutz and Fabrice Kordon and Denis + Poitrenaud and Etienne Renault}, + title = {Heuristics for Checking Liveness Properties with Partial + Order Reductions}, + booktitle = {Proceedings of the 14th International Symposium on + Automated Technology for Verification and Analysis + (ATVA'16)}, + series = {Lecture Notes in Computer Science}, + publisher = {Springer}, + volume = {9938}, + pages = {340--356}, + year = {2016}, + month = oct, + doi = {10.1007/978-3-319-46520-3_22} +} + +@InProceedings{ duret.22.cav, + author = {Alexandre~Duret-Lutz and Etienne Renault and Maximilien + Colange and Florian Renkin and Alexandre Gbaguidi~Aisse and + Philipp Schlehuber-Caissier and Thomas Medioni and Antoine + Martin and J{\'e}r{\^o}me Dubois and Cl{\'e}ment Gillard + and Henrich Lauko}, + title = {From {S}pot 2.0 to {S}pot 2.10: What's New?}, + booktitle = {Proceedings of the 34th International Conference on + Computer Aided Verification (CAV'22)}, + year = 2022, + volume = {13372}, + series = {Lecture Notes in Computer Science}, + pages = {174--187}, + month = aug, + publisher = {Springer}, + doi = {10.1007/978-3-031-13188-2_9} +} + @InProceedings{ dwyer.98.fmsp, author = {Matthew B. Dwyer and George S. Avrunin and James C. Corbett}, @@ -846,6 +908,22 @@ doi = {10.1007/978-3-030-59152-6_7} } +@InProceedings{ renkin.22.forte, + author = {Florian Renkin and Philipp Schlehuber-Caissier and + Alexandre Duret-Lutz and Adrien Pommellet}, + title = {Effective Reductions of {M}ealy Machines}, + year = 2022, + booktitle = {Proceedings of the 42nd International Conference on Formal + Techniques for Distributed Objects, Components, and Systems + (FORTE'22)}, + series = {Lecture Notes in Computer Science}, + volume = 13273, + pages = {170--187}, + month = jun, + publisher = {Springer}, + doi = {10.1007/978-3-031-08679-3_8} +} + @InProceedings{ rozier.07.spin, author = {Kristin Y. Rozier and Moshe Y. Vardi}, title = {LTL Satisfiability Checking}, @@ -1008,7 +1086,19 @@ publisher = {Elsevier}, editor = {Rance Cleaveland and Hubert Garavel}, year = {2002}, - month = jul, + month = jul, pdf = {adl/duret.16.atva.pdf}, + abstract = {Checking liveness properties with partial-order reductions + requires a cycle proviso to ensure that an action cannot be + postponed forever. The proviso forces each cycle to contain + at least one fully expanded state. We present new + heuristics to select which state to expand, hoping to + reduce the size of the resulting graph. The choice of the + state to expand is done when encountering a + \emph{dangerous} edge. Almost all existing provisos expand + the source of this edge, while this paper also explores the + expansion of the destination and the use of SCC-based + information.}, + address = {M{\'a}laga, Spain}, doi = {10.1016/S1571-0661(04)80409-2} } diff --git a/doc/tl/tl.tex b/doc/tl/tl.tex index 2c0599f82..e7c283bc6 100644 --- a/doc/tl/tl.tex +++ b/doc/tl/tl.tex @@ -395,7 +395,7 @@ following Boolean operators: (allowing better compatibility with Wring and VIS) may only used in temporal formulas. Boolean expressions that occur inside SERE (see Section~\ref{sec:sere}) may not use this form because the $\STARALT$ - symbol is used as the Kleen star.} + symbol is used as the Kleene star.} Additionally, an atomic proposition $a$ can be negated using the syntax \samp{$a$=0}, which is equivalent to \samp{$\NOT a$}. Also @@ -600,7 +600,7 @@ the source. It can mean either ``\textit{Sequential Extended Regular ``\textit{Semi-Extended Regular Expression}''~\citep{eisner.08.hvc}. In any case, the intent is the same: regular expressions with traditional operations (union `$\OR$', concatenation `$\CONCAT$', -Kleen star `$\STAR{}$') are extended with operators such as +Kleene star `$\STAR{}$') are extended with operators such as intersection `$\ANDALT$', and fusion `$\FUSION$'. Any Boolean formula (section~\ref{def:boolform}) is a SERE. SERE can @@ -638,7 +638,7 @@ denote arbitrary SERE. \end{tabular} \end{center} -\footnotetext{\emph{Non-Length-Matching} interesction.} +\footnotetext{\emph{Non-Length-Matching} intersection.} The character \samp{\$} or the string \samp{inf} can also be used as value for $\mvar{j}$ in the above operators to denote an unbounded @@ -668,20 +668,17 @@ $a$ is an atomic proposition. \sigma\VDash f\FUSION g&\iff \exists k\in\N,\,(\sigma^{0..k} \VDash f)\land(\sigma^{k..} \VDash g)\\ \sigma\VDash f\STAR{\mvar{i}..\mvar{j}}& \iff \begin{cases} - \text{either} & \mvar{i}=0 \land \sigma=\varepsilon \\ - \text{or} & \mvar{i}=0 \land \mvar{j}>0 \land (\exists k\in\N,\, - (\sigma^{0..k-1}\VDash f) \land (\sigma^{k..} - \VDash f\STAR{\mvar{0}..\mvar{j-1}}))\\ + \text{either} & \mvar{i}=0 \land\mvar{j}=0\land \sigma=\varepsilon \\ + \text{or} & \mvar{i}=0 \land \mvar{j}>0 \land \bigl((\sigma = \varepsilon) \lor (\sigma + \VDash f\STAR{\mvar{1}..\mvar{j}})\bigr)\\ \text{or} & \mvar{i}>0 \land \mvar{j}>0 \land (\exists k\in\N,\, (\sigma^{0..k-1}\VDash f) \land (\sigma^{k..} \VDash f\STAR{\mvar{i-1}..\mvar{j-1}}))\\ \end{cases}\\ \sigma\VDash f\STAR{\mvar{i}..} & \iff \begin{cases} - \text{either} & \mvar{i}=0 \land \sigma=\varepsilon \\ - \text{or} & \mvar{i}=0 \land (\exists k\in\N,\, - (\sigma^{0..k-1}\VDash f) \land (\sigma^{k..} - \VDash f\STAR{\mvar{0}..}))\\ + \text{either} & \mvar{i}=0 \land \bigl((\sigma=\varepsilon)\lor(\sigma + \VDash f\STAR{\mvar{1}..})\bigr)\\ \text{or} & \mvar{i}>0 \land (\exists k\in\N,\, (\sigma^{0..k-1}\VDash f) \land (\sigma^{k..} \VDash f\STAR{\mvar{i-1}..}))\\ @@ -689,25 +686,22 @@ $a$ is an atomic proposition. \sigma\VDash f\FSTAR{\mvar{i}..\mvar{j}}& \iff \begin{cases} \text{either} & \mvar{i}=0 \land \mvar{j}=0 \land \sigma\VDash\1 \\ - \text{or} & \mvar{i}=0 \land \mvar{j}>0 \land (\exists k\in\N,\, - (\sigma^{0..k}\VDash f) \land (\sigma^{k..} - \VDash f\FSTAR{\mvar{0}..\mvar{j-1}}))\\ + \text{or} & \mvar{i}=0 \land \mvar{j}>0 \land \bigl((\sigma\VDash\1)\lor(\sigma + \VDash f\FSTAR{\mvar{1}..\mvar{j}})\bigr)\\ \text{or} & \mvar{i}>0 \land \mvar{j}>0 \land (\exists k\in\N,\, (\sigma^{0..k}\VDash f) \land (\sigma^{k..} \VDash f\FSTAR{\mvar{i-1}..\mvar{j-1}}))\\ \end{cases}\\ \sigma\VDash f\FSTAR{\mvar{i}..} & \iff \begin{cases} - \text{either} & \mvar{i}=0 \land \sigma\VDash\1 \\ - \text{or} & \mvar{i}=0 \land (\exists k\in\N,\, - (\sigma^{0..k}\VDash f) \land (\sigma^{k..} - \VDash f\FSTAR{\mvar{0}..}))\\ + \text{either} & \mvar{i}=0 \land \bigl((\sigma\VDash\1) + \lor(\sigma \VDash f\FSTAR{\mvar{1}..})\bigr)\\ \text{or} & \mvar{i}>0 \land (\exists k\in\N,\, (\sigma^{0..k}\VDash f) \land (\sigma^{k..} \VDash f\FSTAR{\mvar{i-1}..}))\\ \end{cases}\\ \sigma\VDash \FIRSTMATCH\code(f\code) & \iff - (\sigma\VDash f)\land (\forall k<|\sigma|,\,\sigma^{0..k}\nVDash f) + (\sigma\VDash f)\land (\forall k<|\sigma|,\,\sigma^{0..k-1}\nVDash f) \end{align*}} Notes: @@ -859,10 +853,18 @@ The following rules are all valid with the two arguments swapped. \1\OR b &\equiv \1 & \1 \FUSION f & \equiv f\mathrlap{\text{~if~}\varepsilon\nVDash f}\\ && - \STAR{} \AND f &\equiv f & - \STAR{} \OR f &\equiv \1\mathrlap{\STAR{}} & + \STAR{} \ANDALT f &\equiv f & + \STAR{} \OR f &\equiv \mathrlap{\STAR{}} & && - \STAR{} \CONCAT f &\equiv \STAR{}\mathrlap{\text{~if~}\varepsilon\VDash f}& \\ + \STAR{} \CONCAT f &\equiv \STAR{}\text{~if~}\varepsilon\VDash f& \\ + && + \PLUS{} \ANDALT f &\equiv f \text{~if~}\varepsilon\nVDash f& + \PLUS{} \OR f &\equiv \begin{cases} + \mathrlap{\STAR{}\text{~if~} \varepsilon\VDash f} \\ + \mathrlap{\PLUS{}\text{~if~} \varepsilon\nVDash f} \\ + \end{cases} & + && + && \\ \eword\AND f &\equiv f & \eword\ANDALT f &\equiv \begin{cases} @@ -886,7 +888,9 @@ The following rules are all valid with the two arguments swapped. f\STAR{\mvar{i}..\mvar{j}}\CONCAT f&\equiv f\STAR{\mvar{i+1}..\mvar{j+1}} & f\STAR{\mvar{i}..\mvar{j}}\CONCAT f\STAR{\mvar{k}..\mvar{l}}&\equiv f\STAR{\mvar{i+k}..\mvar{j+l}}\\ f\FSTAR{\mvar{i}..\mvar{j}}\FUSION f&\equiv f\FSTAR{\mvar{i+1}..\mvar{j+1}} & -f\FSTAR{\mvar{i}..\mvar{j}}\FUSION f\FSTAR{\mvar{k}..\mvar{l}}&\equiv f\FSTAR{\mvar{i+k}..\mvar{j+l}} +f\FSTAR{\mvar{i}..\mvar{j}}\FUSION f\FSTAR{\mvar{k}..\mvar{l}}&\equiv f\FSTAR{\mvar{i+k}..\mvar{j+l}}\\ +b\STAR{\mvar{i}..\mvar{j}}\FUSION b &\equiv b\STAR{\mvar{\max(i,1)}..\mvar{j}} & +b\STAR{\mvar{i}..\mvar{j}}\FUSION b\STAR{\mvar{k}..\mvar{l}} &\equiv b\mathrlap{\STAR{\mvar{\max(i,1)+\max(k,1)-1}..\mvar{j+l-1}}} \end{align*} \section{SERE-LTL Binding Operators} @@ -1069,7 +1073,7 @@ psl2ba, Modella, and NuSMV all have $\U$ and $\R$ as left-associative, while Goal (hence Büchi store), LTL2AUT, and LTL2Büchi (from JavaPathFinder) have $\U$ and $\R$ as right-associative. Vis and LBTT have these two operators as non-associative (parentheses required). -Similarly the tools do not aggree on the associativity of $\IMPLIES$ +Similarly the tools do not agree on the associativity of $\IMPLIES$ and $\EQUIV$: some tools handle both operators as left-associative, or both right-associative, other have only $\IMPLIES$ as right-associative. @@ -1429,7 +1433,7 @@ $\NOT$ operator. \end{align*} Note that the above rules include the ``unabbreviation'' of operators -``$\EQUIV$'', ``$\IMPLIES$'', and ``$\XOR$'', correspondings to the +``$\EQUIV$'', ``$\IMPLIES$'', and ``$\XOR$'', corresponding to the rules \texttt{"ei\^"} of function `\verb=unabbreviate()= as described in Section~\ref{sec:unabbrev}. Therefore it is never necessary to apply these abbreviations before or after @@ -1926,6 +1930,12 @@ Many of the above rules were collected from the literature~\cite{somenzi.00.cav,tauriainen.03.tr,babiak.12.tacas} and sometimes generalized to support operators such as $\M$ and $\W$. +The first six rules, about n-ary operators $\AND$ and $\OR$, are +implemented for $n$ operands by testing each operand against all +other. To prevent the complexity to escalate, this is only performed +with up to 16 operands. That value can be changed in +``\verb|tl_simplifier_options::containment_max_ops|''. + The following rules mix implication-based checks with formulas that are pure eventualities ($e$) or that are purely universal ($u$). @@ -2097,3 +2107,14 @@ $f_1\AND f_2$ & \bor{f_1}{g}{f_2}{g} & & & %%% TeX-master: t %%% coding: utf-8 %%% End: + +% LocalWords: tabu Alexandre Duret Lutz toc subsequence Kripke unary +% LocalWords: LTL GFa INISHED ZX FX cccccrl UTF syntaxes disjunction +% LocalWords: VIS Kleene overline overbar ary cccrl EF sep FB LTLf +% LocalWords: rewritings TSLF NLM iter un SVA PSL SEREs DFA ccccc ba +% LocalWords: SystemVerilog clc ltl psl Modella NuSMV Büchi AUT Vis +% LocalWords: JavaPathFinder LBTT AST subtrees boolean nenoform lbt +% LocalWords: eword nn LBT's automata subformulas ottom unabbreviate +% LocalWords: Unabbreviations ei GRW RW WR unabbreviator simplifier +% LocalWords: tl unabbreviation indeterminism dnf cnf SNF rl iff BDD +% LocalWords: subformula diff --git a/elisp/Makefile.am b/elisp/Makefile.am index efdc604f5..c67a969e8 100644 --- a/elisp/Makefile.am +++ b/elisp/Makefile.am @@ -1,6 +1,6 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2015, 2016, 2017, 2018 Laboratoire de Recherche et Développement -## de l'Epita (LRDE). +## Copyright (C) 2015-2018, 2022 Laboratoire de Recherche et +## Développement de l'Epita (LRDE). ## ## This file is part of Spot, a model checking library. ## @@ -19,7 +19,7 @@ EXTRA_DIST = hoa-mode.el -GIT = https://gitlab.lrde.epita.fr/spot/emacs-modes/raw/master/ +GIT = https://gitlab.lre.epita.fr/spot/emacs-modes/raw/master/ .PHONY: update-el update-el: diff --git a/elisp/hoa-mode.el b/elisp/hoa-mode.el index 9083b529d..58730b971 100644 --- a/elisp/hoa-mode.el +++ b/elisp/hoa-mode.el @@ -1,10 +1,10 @@ ;;; hoa-mode.el --- Major mode for the HOA format -*- lexical-binding: t -*- -;; Copyright (C) 2015, 2017, 2019 Alexandre Duret-Lutz +;; Copyright (C) 2015, 2017, 2019, 2022 Alexandre Duret-Lutz ;; Author: Alexandre Duret-Lutz ;; Maintainer: Alexandre Duret-Lutz -;; URL: https://gitlab.lrde.epita.fr/spot/emacs-modes +;; URL: https://gitlab.lre.epita.fr/spot/emacs-modes ;; Keywords: major-mode, automata, convenience ;; Created: 2015-11-13 diff --git a/flake.lock b/flake.lock new file mode 100644 index 000000000..95016b487 --- /dev/null +++ b/flake.lock @@ -0,0 +1,43 @@ +{ + "nodes": { + "flake-utils": { + "locked": { + "lastModified": 1642700792, + "narHash": "sha256-XqHrk7hFb+zBvRg6Ghl+AZDq03ov6OshJLiSWOoX5es=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "846b2ae0fc4cc943637d3d1def4454213e203cba", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1673800717, + "narHash": "sha256-SFHraUqLSu5cC6IxTprex/nTsI81ZQAtDvlBvGDWfnA=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "2f9fd351ec37f5d479556cd48be4ca340da59b8f", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-22.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "flake-utils": "flake-utils", + "nixpkgs": "nixpkgs" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 000000000..cc29db3fe --- /dev/null +++ b/flake.nix @@ -0,0 +1,211 @@ +{ + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixos-22.11"; + flake-utils.url = "github:numtide/flake-utils"; + }; + outputs = { self, nixpkgs, flake-utils, ... }: + flake-utils.lib.eachSystem + [ + "x86_64-linux" + ] + + (system: + let + pkgs = import nixpkgs { inherit system; }; + lib = pkgs.lib; + + mkSpotApps = appNames: + pkgs.lib.genAttrs appNames + (name: flake-utils.lib.mkApp { + drv = self.packages.${system}.spot; + name = name; + }); + + spotPackage = + let + inherit (builtins) + filter + head + isString + match + readFile + split + ; + + # NOTE: Maintaining the version separately would be a pain, and we + # can't have a flake.nix.in with a @VERSION@ because it would make + # the flake unusable without running autoconf first, defeating some + # of its purpose. + # + # So let's get it the hard way instead :) + extractVersionRegex = ''^AC_INIT\(\[spot], \[([^]]+)], \[spot@lrde\.epita\.fr]\)$''; + getLines = (fileContent: + filter isString (split "\n" fileContent) + ); + findVersionLine = (lines: + lib.lists.findFirst + (l: lib.strings.hasPrefix "AC_INIT(" l) + null + lines + ); + getVersion = (file: + let + lines = getLines (readFile file); + versionLine = findVersionLine lines; + version = head (match extractVersionRegex versionLine); + in + version + ); + in + { + lib, + pkgs, + stdenv, + # FIXME: do we want this flag? + buildOrgDoc ? false, + # Whether to enable Spot's Python 3 bindings + enablePython ? false + }: + stdenv.mkDerivation { + pname = "spot"; + version = getVersion ./configure.ac; + + src = self; + + enableParallelBuilding = true; + + # NOTE: Nix enables a lot of hardening flags by default, some of + # these probably harm performance so I've disabled everything + # (haven't benchmarked with vs without these, though). + hardeningDisable = [ "all" ]; + + # NOTE: mktexpk fails without a HOME set + preBuild = '' + export HOME=$TMPDIR + patchShebangs tools + '' + (if buildOrgDoc then '' + ln -s ${pkgs.plantuml}/lib/plantuml.jar doc/org/plantuml.jar + '' else '' + touch doc/org-stamp + ''); + + configureFlags = [ + "--disable-devel" + "--enable-optimizations" + ] ++ lib.optional (!enablePython) [ + "--disable-python" + ]; + + nativeBuildInputs = with pkgs; [ + autoreconfHook + + autoconf + automake + bison + flex + libtool + perl + ] ++ lib.optional buildOrgDoc [ + graphviz + groff + plantuml + pdf2svg + R + ] ++ lib.optional enablePython [ + python3 + swig4 + ]; + + buildInputs = with pkgs; [ + # should provide the minimum amount of packages necessary for + # building tl.pdf + (texlive.combine { + inherit (texlive) + scheme-basic + latexmk + + booktabs + cm-super + doi + doublestroke + etoolbox + koma-script + mathabx-type1 + mathpazo + metafont + microtype + nag + pgf + standalone + stmaryrd + tabulary + todonotes + wasy-type1 + wasysym + ; + }) + ]; + }; + in + { + defaultPackage = self.packages.${system}.spot; + + packages = { + # binaries + library only + spot = pkgs.callPackage spotPackage {}; + + # NOTE: clang build is broken on Nix when linking to stdlib++, using + # libcxx instead. See: + # https://github.com/NixOS/nixpkgs/issues/91285 + spotClang = pkgs.callPackage spotPackage { + stdenv = pkgs.llvmPackages.libcxxStdenv; + }; + + spotWithOrgDoc = pkgs.callPackage spotPackage { + buildOrgDoc = true; + }; + + spotWithPython = pkgs.python3Packages.toPythonModule ( + pkgs.callPackage spotPackage { + enablePython = true; + } + ); + + spotFull = pkgs.python3Packages.toPythonModule ( + pkgs.callPackage spotPackage { + buildOrgDoc = true; enablePython = true; + } + ); + }; + + apps = mkSpotApps [ + "autcross" + "autfilt" + "dstar2tgba" + "genaut" + "genltl" + "ltl2tgba" + "ltl2tgta" + "ltlcross" + "ltldo" + "ltlfilt" + "ltlgrind" + "ltlsynt" + "randaut" + "randltl" + ]; + + devShell = pkgs.mkShell { + name = "spot-dev"; + inputsFrom = [ self.packages.${system}.spotFull ]; + buildInputs = [ + pkgs.gdb + + (pkgs.python3.withPackages (p: [ + p.jupyter + p.ipython # otherwise ipython module isn't found when running ipynb tests + ])) + ]; + }; + }); +} diff --git a/m4/environ.m4 b/m4/environ.m4 new file mode 100644 index 000000000..ae5329108 --- /dev/null +++ b/m4/environ.m4 @@ -0,0 +1,46 @@ +# environ.m4 serial 8 +dnl Copyright (C) 2001-2004, 2006-2021 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +AC_DEFUN_ONCE([gl_ENVIRON], +[ + AC_REQUIRE([gl_UNISTD_H_DEFAULTS]) + dnl Persuade glibc to declare environ. + AC_REQUIRE([gl_USE_SYSTEM_EXTENSIONS]) + + AC_CHECK_HEADERS_ONCE([unistd.h]) + gt_CHECK_VAR_DECL( + [#if HAVE_UNISTD_H + #include + #endif + /* mingw, BeOS, Haiku declare environ in , not in . */ + #include + ], + [environ]) + if test $gt_cv_var_environ_declaration != yes; then + HAVE_DECL_ENVIRON=0 + fi +]) + +# Check if a variable is properly declared. +# gt_CHECK_VAR_DECL(includes,variable) +AC_DEFUN([gt_CHECK_VAR_DECL], +[ + define([gt_cv_var], [gt_cv_var_]$2[_declaration]) + AC_CACHE_CHECK([if $2 is properly declared], [gt_cv_var], + [AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM( + [[$1 + typedef struct { int foo; } foo_t; + extern foo_t $2;]], + [[$2.foo = 1;]])], + [gt_cv_var=no], + [gt_cv_var=yes])]) + if test $gt_cv_var = yes; then + AC_DEFINE([HAVE_]m4_translit($2, [a-z], [A-Z])[_DECL], 1, + [Define if you have the declaration of $2.]) + fi + undefine([gt_cv_var]) +]) diff --git a/m4/gccwarn.m4 b/m4/gccwarn.m4 index 4f719e55f..13f770ccc 100644 --- a/m4/gccwarn.m4 +++ b/m4/gccwarn.m4 @@ -21,6 +21,14 @@ AC_DEFUN([CF_GXX_WARNINGS], cat > conftest.$ac_ext < +#include + +// From GCC bug 106159 +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106159 +struct left { virtual ~left() {} }; +struct right { virtual ~right() {} }; +struct both: public left, public right {}; + int main(int argc, char *argv[[]]) { // This string comparison is here to detect superfluous @@ -33,19 +41,26 @@ int main(int argc, char *argv[[]]) std::string a{"foo"}, b{"bar"}; if (b < a) return 1; + // GCC 12 has spurious warnings about ininialized values in regex. + // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105562 + // We need -Wno-maybe-uninitialized in this case. + std::regex r{"a"}; + (void)r; return argv[[argc-1]] == nullptr; } EOF cf_save_CXXFLAGS="$CXXFLAGS" - ac_cv_prog_gxx_warn_flags="-W -Wall" + ac_cv_prog_gxx_warn_flags="-W -Werror" +dnl The following list has options of the form OPT:BAD:GOOD +dnl if -OPT fails we try -OPT -BAD. If -OPT succeeds we add -GOOD. for cf_opt in \ - Werror \ + Wall:Wno-maybe-uninitialized:\ Wint-to-void-pointer-cast \ Wzero-as-null-pointer-constant \ Wcast-align \ Wpointer-arith \ Wwrite-strings \ - Wcast-qual \ + Wcast-qual::DXTSTRINGDEFINES \ Wdocumentation \ Wmissing-declarations \ Wnoexcept \ @@ -58,11 +73,26 @@ EOF Wsuggest-override \ Wpedantic do - CXXFLAGS="$cf_save_CXXFLAGS $ac_cv_prog_gxx_warn_flags -$cf_opt" - if AC_TRY_EVAL(ac_compile); then - ac_cv_prog_gxx_warn_flags="$ac_cv_prog_gxx_warn_flags -$cf_opt" - test "$cf_opt" = Wcast-qual && ac_cv_prog_gxx_warn_flags="$ac_cv_prog_gxx_warn_flags -DXTSTRINGDEFINES" - fi + fopt=${cf_opt%%:*} + CXXFLAGS="$cf_save_CXXFLAGS $ac_cv_prog_gxx_warn_flags -$fopt" + if AC_TRY_EVAL(ac_compile); then + ac_cv_prog_gxx_warn_flags="$ac_cv_prog_gxx_warn_flags -$fopt" + case $cf_opt in + *:*:);; + *:*:*)ac_cv_prog_gxx_warn_flags="$ac_cv_prog_gxx_warn_flags -${cf_opt##*:}";; + esac + else + case $cf_opt in + *::*);; + *:*:*) + sopt=${cf_opt%:*} + sopt=${sopt#*:} + CXXFLAGS="$cf_save_CXXFLAGS $ac_cv_prog_gxx_warn_flags -$fopt -$sopt" + if AC_TRY_EVAL(ac_compile); then + ac_cv_prog_gxx_warn_flags="$ac_cv_prog_gxx_warn_flags -$fopt -$sopt" + fi;; + esac + fi done rm -f conftest* CXXFLAGS="$cf_save_CXXFLAGS"]) diff --git a/m4/getopt.m4 b/m4/getopt.m4 index 595483d58..e291e0c66 100644 --- a/m4/getopt.m4 +++ b/m4/getopt.m4 @@ -1,5 +1,5 @@ # getopt.m4 serial 47 -dnl Copyright (C) 2002-2006, 2008-2020 Free Software Foundation, Inc. +dnl Copyright (C) 2002-2006, 2008-2020, 2022 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. @@ -363,13 +363,9 @@ dnl is ambiguous with environment values that contain newlines. AC_DEFUN([gl_GETOPT_SUBSTITUTE_HEADER], [ - AC_CHECK_HEADERS_ONCE([sys/cdefs.h]) - if test $ac_cv_header_sys_cdefs_h = yes; then - HAVE_SYS_CDEFS_H=1 - else - HAVE_SYS_CDEFS_H=0 - fi - AC_SUBST([HAVE_SYS_CDEFS_H]) + # pretend HAVE_SYS_CDEFS_H is always 0 including isn't + # really necessary and causes warning on Alpine Linux. + AC_SUBST([HAVE_SYS_CDEFS_H], [0]) AC_DEFINE([__GETOPT_PREFIX], [[rpl_]], [Define to rpl_ if the getopt replacement functions and variables diff --git a/m4/gnulib-cache.m4 b/m4/gnulib-cache.m4 index ad3802b82..e7f448f36 100644 --- a/m4/gnulib-cache.m4 +++ b/m4/gnulib-cache.m4 @@ -1,4 +1,4 @@ -# Copyright (C) 2002-2020 Free Software Foundation, Inc. +# Copyright (C) 2002-2020, 2022 Free Software Foundation, Inc. # # This file is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -57,6 +57,7 @@ gl_MODULES([ argmatch argp closeout + environ error isatty mkstemp diff --git a/m4/gnulib-comp.m4 b/m4/gnulib-comp.m4 index 54215ad69..66d18ca01 100644 --- a/m4/gnulib-comp.m4 +++ b/m4/gnulib-comp.m4 @@ -1,5 +1,5 @@ # DO NOT EDIT! GENERATED AUTOMATICALLY! -# Copyright (C) 2002-2020 Free Software Foundation, Inc. +# Copyright (C) 2002-2020, 2022 Free Software Foundation, Inc. # # This file is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -246,6 +246,8 @@ AC_SUBST([LTALLOCA]) AC_LIBOBJ([lstat]) gl_PREREQ_LSTAT fi + gl_ENVIRON + gl_UNISTD_MODULE_INDICATOR([environ]) gl_SYS_STAT_MODULE_INDICATOR([lstat]) gl_FUNC_MALLOC_GNU if test $REPLACE_MALLOC = 1; then diff --git a/m4/ltargz.m4 b/m4/ltargz.m4 deleted file mode 100644 index 0908d90b9..000000000 --- a/m4/ltargz.m4 +++ /dev/null @@ -1,74 +0,0 @@ -# Portability macros for glibc argz. -*- Autoconf -*- -# -# Copyright (C) 2004-2007, 2011-2015 Free Software Foundation, Inc. -# Written by Gary V. Vaughan -# -# This file is free software; the Free Software Foundation gives -# unlimited permission to copy and/or distribute it, with or without -# modifications, as long as this notice is preserved. - -# serial 1 ltargz.m4 - -AC_DEFUN([LT_FUNC_ARGZ], [ -AC_CHECK_HEADERS([argz.h], [], [], [AC_INCLUDES_DEFAULT]) - -AC_CHECK_TYPES([error_t], - [], - [AC_DEFINE([error_t], [int], - [Define to a type to use for 'error_t' if it is not otherwise available.]) - AC_DEFINE([__error_t_defined], [1], [Define so that glibc/gnulib argp.h - does not typedef error_t.])], - [#if defined(HAVE_ARGZ_H) -# include -#endif]) - -LT_ARGZ_H= -AC_CHECK_FUNCS([argz_add argz_append argz_count argz_create_sep argz_insert \ - argz_next argz_stringify], [], [LT_ARGZ_H=lt__argz.h; AC_LIBOBJ([lt__argz])]) - -dnl if have system argz functions, allow forced use of -dnl libltdl-supplied implementation (and default to do so -dnl on "known bad" systems). Could use a runtime check, but -dnl (a) detecting malloc issues is notoriously unreliable -dnl (b) only known system that declares argz functions, -dnl provides them, yet they are broken, is cygwin -dnl releases prior to 16-Mar-2007 (1.5.24 and earlier) -dnl So, it's more straightforward simply to special case -dnl this for known bad systems. -AS_IF([test -z "$LT_ARGZ_H"], - [AC_CACHE_CHECK( - [if argz actually works], - [lt_cv_sys_argz_works], - [[case $host_os in #( - *cygwin*) - lt_cv_sys_argz_works=no - if test no != "$cross_compiling"; then - lt_cv_sys_argz_works="guessing no" - else - lt_sed_extract_leading_digits='s/^\([0-9\.]*\).*/\1/' - save_IFS=$IFS - IFS=-. - set x `uname -r | sed -e "$lt_sed_extract_leading_digits"` - IFS=$save_IFS - lt_os_major=${2-0} - lt_os_minor=${3-0} - lt_os_micro=${4-0} - if test 1 -lt "$lt_os_major" \ - || { test 1 -eq "$lt_os_major" \ - && { test 5 -lt "$lt_os_minor" \ - || { test 5 -eq "$lt_os_minor" \ - && test 24 -lt "$lt_os_micro"; }; }; }; then - lt_cv_sys_argz_works=yes - fi - fi - ;; #( - *) lt_cv_sys_argz_works=yes ;; - esac]]) - AS_IF([test yes = "$lt_cv_sys_argz_works"], - [AC_DEFINE([HAVE_WORKING_ARGZ], 1, - [This value is set to 1 to indicate that the system argz facility works])], - [LT_ARGZ_H=lt__argz.h - AC_LIBOBJ([lt__argz])])]) - -AC_SUBST([LT_ARGZ_H]) -]) diff --git a/python/spot/__init__.py b/python/spot/__init__.py index a351e9c54..02bdcb1f6 100644 --- a/python/spot/__init__.py +++ b/python/spot/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2014-2021 Laboratoire de -# Recherche et Développement de l'Epita (LRDE). +# Copyright (C) 2014-2023 Laboratoire de Recherche et Développement de +# l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -104,19 +104,22 @@ def setup(**kwargs): os.environ['SPOT_DOTDEFAULT'] = d -# In version 3.0.2, Swig puts strongly typed enum in the main -# namespace without prefixing them. Latter versions fix this. So we -# can remove for following hack once 3.0.2 is no longer used in our -# build farm. -if 'op_ff' not in globals(): +# Swig versions prior to 4.1.0 export formula.X as formula_X as well, +# for all operators. Swig 4.1.0 stops doing this, breaking some +# existing code. +if 'formula_ff' not in globals(): for i in ('ff', 'tt', 'eword', 'ap', 'Not', 'X', 'F', 'G', 'Closure', 'NegClosure', 'NegClosureMarked', 'Xor', 'Implies', 'Equiv', 'U', 'R', 'W', 'M', 'EConcat', 'EConcatMarked', 'UConcat', 'Or', 'OrRat', 'And', 'AndRat', 'AndNLM', 'Concat', - 'Fusion', 'Star', 'FStar'): - globals()['op_' + i] = globals()[i] - del globals()[i] + 'Fusion', 'Star', 'FStar', 'nested_unop_range', + 'sugar_goto', 'sugar_equal', 'sugar_delay', 'unop', + 'binop', 'bunop', 'multop', 'first_match', 'unbounded'): + globals()['formula_' + i] = formula.__dict__[i].__func__ +if 'trival_maybe' not in globals(): + for i in ('maybe',): + globals()['trival_' + i] = trival.__dict__[i].__func__ # Global BDD dict so that we do not have to create one in user code. @@ -261,6 +264,12 @@ class twa: ostr = ostringstream() print_lbtt(ostr, a, opt) return ostr.str() + if format == 'pg': + if opt is not None: + raise ValueError("print_pg() has no option") + ostr = ostringstream() + print_pg(ostr, a) + return ostr.str() raise ValueError("unknown string format: " + format) def save(a, filename, format='hoa', opt=None, append=False): @@ -493,51 +502,57 @@ class acd: .acdacc polygon{fill:green;} ''' js = ''' -function acd{num}_clear(){{ - $("#acd{num} .node,#acdaut{num} .node,#acdaut{num} .edge") - .removeClass("acdhigh acdbold acdacc acdrej"); +function acdremclasses(sel, classes) {{ +document.querySelectorAll(sel).forEach(n=>{{n.classList.remove(...classes)}});}} +function acdaddclasses(sel, classes) {{ +document.querySelectorAll(sel).forEach(n=>{{n.classList.add(...classes)}});}} +function acdonclick(sel, fn) {{ + document.querySelectorAll(sel).forEach(n=> + {{n.addEventListener("click", fn)}}); +}} +function acd{num}_clear() {{ + acdremclasses("#acd{num} .node,#acdaut{num} .node,#acdaut{num} .edge", + ["acdhigh", "acdbold", "acdacc", "acdrej"]); }}; function acd{num}_state(state){{ - acd{num}_clear(); - $("#acd{num} .acdS" + state).addClass("acdhigh acdbold"); - $("#acdaut{num} #S" + state).addClass("acdbold"); + acd{num}_clear(); + acdaddclasses("#acd{num} .acdS" + state, ["acdhigh", "acdbold"]); + acdaddclasses("#acdaut{num} #S" + state, ["acdbold"]); }}; function acd{num}_edge(edge){{ - acd{num}_clear(); - var theedge = $('#acdaut{num} #E' + edge) - var classList = theedge.attr('class').split(/\s+/); - $.each(classList, function(index, item) {{ - if (item.startsWith('acdN')) {{ - $("#acd{num} #" + item.substring(3)).addClass("acdhigh acdbold"); - }} - }}); - theedge.addClass("acdbold"); + acd{num}_clear(); + var theedge = document.querySelector('#acdaut{num} #E' + edge); + theedge.classList.forEach(function(item, index) {{ + if (item.startsWith('acdN')) {{ + acdaddclasses("#acd{num} #" + item.substring(3), ["acdhigh", "acdbold"]); + }} + }}); + theedge.classList.add("acdbold"); }}; function acd{num}_node(node, acc){{ acd{num}_clear(); - $("#acdaut{num} .acdN" + node).addClass(acc - ? "acdacc acdbold" - : "acdrej acdbold"); - $("#acd{num} #N" + node).addClass("acdbold acdhigh"); + acdaddclasses("#acdaut{num} .acdN" + node, + [acc ? "acdacc" : "acdrej", "acdbold"]); + acdaddclasses("#acd{num} #N" + node, ["acdbold", "acdhigh"]); }};'''.format(num=num) me = 0 for n in range(self.node_count()): for e in self.edges_of_node(n): me = max(e, me) - js += '$("#acdaut{num} #E{e}").addClass("acdN{n}");'\ + js += 'acdaddclasses("#acdaut{num} #E{e}", ["acdN{n}"]);\n'\ .format(num=num, e=e, n=n) for e in range(1, me + 1): - js += '$("#acdaut{num} #E{e}")'\ - '.click(function(){{acd{num}_edge({e});}});'\ + js += 'acdonclick("#acdaut{num} #E{e}",'\ + 'function(){{acd{num}_edge({e});}});\n'\ .format(num=num, e=e) for s in range(self.get_aut().num_states()): - js += '$("#acdaut{num} #S{s}")'\ - '.click(function(){{acd{num}_state({s});}});'\ + js += 'acdonclick("#acdaut{num} #S{s}",'\ + 'function(){{acd{num}_state({s});}});\n'\ .format(num=num, s=s) for n in range(self.node_count()): v = int(self.node_acceptance(n)) - js += '$("#acd{num} #N{n}")'\ - '.click(function(){{acd{num}_node({n}, {v});}});'\ + js += 'acdonclick("#acd{num} #N{n}",'\ + 'function(){{acd{num}_node({n}, {v});}});\n'\ .format(num=num, n=n, v=v) html = '
{}
{}
'\ .format(style, @@ -1292,6 +1307,36 @@ def sat_minimize(aut, acc=None, colored=False, else: return sm(aut, args, state_based) +# Adding the inline csv-display option +def minimize_mealy(mm, opt = -1, display_log = False, return_log = False): + from spot.impl import minimize_mealy as minmealy + + try: + lvl = int(opt) + opt = synthesis_info() + opt.minimize_lvl = lvl + 4 + except (ValueError, TypeError) as _: + pass + + if display_log or return_log: + import pandas as pd + with tempfile.NamedTemporaryFile(dir='.', suffix='.minlog') as t: + opt.opt.set_str("satlogcsv", t.name) + resmm = minmealy(mm, opt) + + dfrm = pd.read_csv(t.name, dtype=object) + if display_log: + from IPython.display import display + del dfrm['instance'] + display(dfrm) + if return_log: + return resmm, dfrm + else: + return resmm + else: + return minmealy(mm, opt) + + def parse_word(word, dic=_bdd_dict): from spot.impl import parse_word as pw @@ -1302,6 +1347,10 @@ def bdd_to_formula(b, dic=_bdd_dict): from spot.impl import bdd_to_formula as bf return bf(b, dic) +def bdd_to_cnf_formula(b, dic=_bdd_dict): + from spot.impl import bdd_to_cnf_formula as bf + return bf(b, dic) + def language_containment_checker(dic=_bdd_dict): from spot.impl import language_containment_checker as c diff --git a/python/spot/impl.i b/python/spot/impl.i index 90a38a55a..a5a5bf5df 100644 --- a/python/spot/impl.i +++ b/python/spot/impl.i @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2022 Laboratoire de Recherche et Développement +// Copyright (C) 2009-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // Copyright (C) 2003-2006 Laboratoire d'Informatique de Paris 6 // (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -90,10 +90,13 @@ #include #include +#include #include +#include #include #include #include +#include #include #include #include @@ -113,13 +116,14 @@ #include #include #include -#include -#include -#include #include #include #include #include +#include +#include +#include +#include #include #include #include @@ -159,6 +163,7 @@ #include #include #include +#include #include #include #include @@ -485,6 +490,7 @@ static void handle_any_exception() } } +%implicitconv spot::parallel_policy; %include %include %include @@ -517,8 +523,12 @@ namespace std { %template(vectorbdd) vector; %template(aliasvector) vector>; %template(vectorstring) vector; + %template(vectorint) vector; + %template(pair_formula_vectorstring) pair>; %template(atomic_prop_set) set; %template(relabeling_map) map; + %template(pair_formula) pair; + %template(vector_pair_formula) vector>; } %include @@ -531,6 +541,8 @@ namespace std { %include %include %include +%template(formula_to_bdd) spot::formula_to_bdd; + %include /* These operators may raise exceptions, and we do not want Swig4 to convert those exceptions to NotImplemented. */ @@ -547,6 +559,27 @@ namespace std { } %apply std::vector &OUTPUT {std::vector& pairs} %apply std::vector &OUTPUT {std::vector& pairs} +// Must occur before the twa declaration +%typemap(out) SWIGTYPE spot::acc_cond::fin_unit_one_split %{ + { + auto& v = static_cast>($1); + $result = PyTuple_Pack(3, + swig::from(std::get<0>(v)), + swig::from(std::get<1>(v)), + swig::from(std::get<2>(v))); + } +%} +// Must occur before the twa declaration +%typemap(out) SWIGTYPE spot::acc_cond::fin_unit_one_split_improved %{ + { + auto& v = static_cast>($1); + $result = PyTuple_Pack(3, + swig::from(std::get<0>(v)), + swig::from(std::get<1>(v)), + swig::from(std::get<2>(v))); + } +%} + %include %template(pair_bool_mark) std::pair; @@ -575,8 +608,11 @@ namespace std { %include %include +%include +%include %include %include +%include %include %include %include @@ -661,11 +697,14 @@ def state_is_accepting(self, src) -> "bool": %include %include %include -%include -%include %include %include %include +%include +%include +%include +%include +%include %feature("flatnested") spot::twa_run::step; %include %template(list_step) std::list; @@ -677,8 +716,6 @@ def state_is_accepting(self, src) -> "bool": %include %include %include -%include -%include %include %include %include @@ -725,6 +762,7 @@ def state_is_accepting(self, src) -> "bool": %include %include %include +%include %include %include %include diff --git a/spot.spec.in b/spot.spec.in index 238647606..714d8589e 100755 --- a/spot.spec.in +++ b/spot.spec.in @@ -71,16 +71,16 @@ logic (LTL & PSL). %files -n libspot %{_libdir}/libbddx.a -%{_libdir}/libbddx.la +%exclude %{_libdir}/libbddx.la %{_libdir}/libbddx.so* %{_libdir}/libspot.a -%{_libdir}/libspot.la +%exclude %{_libdir}/libspot.la %{_libdir}/libspot.so* %{_libdir}/libspotgen.a -%{_libdir}/libspotgen.la +%exclude %{_libdir}/libspotgen.la %{_libdir}/libspotgen.so* %{_libdir}/libspotltsmin.a -%{_libdir}/libspotltsmin.la +%exclude %{_libdir}/libspotltsmin.la %{_libdir}/libspotltsmin.so* %license COPYING %doc AUTHORS COPYING NEWS README THANKS @@ -121,7 +121,7 @@ temporal logic (LTL & PSL). %dir %{python3_sitearch}/spot %{python3_sitearch}/spot/* %{python3_sitearch}/_buddy.*.a -%{python3_sitearch}/_buddy.*.la +%exclude %{python3_sitearch}/_buddy.*.la %{python3_sitearch}/_buddy.*.so %license COPYING %doc AUTHORS COPYING NEWS README THANKS diff --git a/spot/Makefile.am b/spot/Makefile.am index 821979f1d..806b299ad 100644 --- a/spot/Makefile.am +++ b/spot/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2009, 2010, 2012, 2013, 2014, 2015, 2016, 2017, 2020 +## Copyright (C) 2009, 2010, 2012, 2013, 2014, 2015, 2016, 2017, 2020, 2022 ## Laboratoire de Recherche et Développement de l'Epita (LRDE). ## Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), ## département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -35,7 +35,7 @@ SUBDIRS = misc priv tl graph twa twacube twaalgos ta taalgos kripke \ lib_LTLIBRARIES = libspot.la libspot_la_SOURCES = -libspot_la_LDFLAGS = $(BUDDY_LDFLAGS) -no-undefined $(SYMBOLIC_LDFLAGS) +libspot_la_LDFLAGS = $(BUDDY_LDFLAGS) -no-undefined @LIBSPOT_PTHREAD@ $(SYMBOLIC_LDFLAGS) libspot_la_LIBADD = \ kripke/libkripke.la \ misc/libmisc.la \ @@ -68,6 +68,7 @@ libspot.pc: $(srcdir)/libspot.pc.in Makefile -e 's![@]includedir[@]!$(includedir)!g' \ -e 's![@]libdir[@]!$(libdir)!g' \ -e 's![@]PACKAGE_VERSION[@]!$(PACKAGE_VERSION)!g' \ + -e 's![@]LIBSPOT_PTHREAD[@]!$(LIBSPOT_PTHREAD)!g' \ $(srcdir)/libspot.pc.in > $@.tmp && mv $@.tmp $@ CLEANFILES = libspot.pc diff --git a/spot/bricks/brick-hashset b/spot/bricks/brick-hashset index 1c97c9618..7763d29ae 100644 --- a/spot/bricks/brick-hashset +++ b/spot/bricks/brick-hashset @@ -583,7 +583,7 @@ struct _ConcurrentHashSet : HashSetBase< Cell > return begin() + size(); } - Row() : _data( nullptr ), _size( 0 ) {} + Row() noexcept : _data( nullptr ), _size( 0 ) {} ~Row() { free(); } }; diff --git a/spot/gen/automata.cc b/spot/gen/automata.cc index 165ab8c98..73c057a00 100644 --- a/spot/gen/automata.cc +++ b/spot/gen/automata.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019, 2021 Laboratoire de Recherche et +// Copyright (C) 2017-2019, 2021-2022 Laboratoire de Recherche et // Developpement de l'EPITA (LRDE). // // This file is part of Spot, a model checking library. @@ -220,13 +220,48 @@ namespace spot return aut; } + static twa_graph_ptr + cyclist_trace_or_proof(unsigned n, bool trace, bdd_dict_ptr dict) + { + auto aut = make_twa_graph(dict); + acc_cond::mark_t m = aut->set_buchi(); + aut->new_states(n + 2); + aut->set_init_state(0); + if (trace) + m = {}; + aut->prop_state_acc(true); + + // How many AP to we need to represent n letters + unsigned nap = ulog2(n + 1); + std::vector apvars(nap); + for (unsigned a = 0; a < nap; ++a) + apvars[a] = aut->register_ap("p" + std::to_string(a)); + + if (trace) + aut->new_edge(0, 0, bddtrue); // the only non-deterministic edge + else + aut->prop_universal(true); + + bdd zero = bdd_ibuildcube(0, nap, apvars.data()); + aut->new_edge(0, 1, zero, m); + for (unsigned letter = 1; letter <= n; ++letter) + { + bdd cond = bdd_ibuildcube(letter, nap, apvars.data()); + aut->new_acc_edge(1, letter + 1, cond); + aut->new_edge(letter + 1, 1, zero, m); + } + + return aut; + } + + twa_graph_ptr aut_pattern(aut_pattern_id pattern, int n, bdd_dict_ptr dict) { if (n < 0) { std::ostringstream err; err << "pattern argument for " << aut_pattern_name(pattern) - << " should be positive"; + << " should be non-negative"; throw std::runtime_error(err.str()); } @@ -241,6 +276,10 @@ namespace spot return l_dsa(n, dict); case AUT_M_NBA: return m_nba(n, dict); + case AUT_CYCLIST_TRACE_NBA: + return cyclist_trace_or_proof(n, true, dict); + case AUT_CYCLIST_PROOF_DBA: + return cyclist_trace_or_proof(n, false, dict); case AUT_END: break; } @@ -255,6 +294,8 @@ namespace spot "l-nba", "l-dsa", "m-nba", + "cyclist-trace-nba", + "cyclist-proof-dba", }; // Make sure we do not forget to update the above table every // time a new pattern is added. diff --git a/spot/gen/automata.hh b/spot/gen/automata.hh index d0c43d5f5..a54f75ac1 100644 --- a/spot/gen/automata.hh +++ b/spot/gen/automata.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2019 Laboratoire de Recherche et Developpement de +// Copyright (C) 2017, 2019, 2022 Laboratoire de Recherche et Developpement de // l'EPITA (LRDE). // // This file is part of Spot, a model checking library. @@ -79,6 +79,24 @@ namespace spot /// propositions to encode the $n+1$ letters used in the /// original alphabet. AUT_M_NBA, + /// \brief An NBA with (n+2) states derived from a Cyclic test + /// case. + /// + /// This familly of automata is derived from a couple of + /// examples supplied by Reuben Rowe. The task is to + /// check that the automaton generated with AUT_CYCLIST_TRACE_NBA + /// for a given n contain the automaton generated with + /// AUT_CYCLIST_PROOF_DBA for the same n. + AUT_CYCLIST_TRACE_NBA, + /// \brief A DBA with (n+2) states derived from a Cyclic test + /// case. + /// + /// This familly of automata is derived from a couple of + /// examples supplied by Reuben Rowe. The task is to + /// check that the automaton generated with AUT_CYCLIST_TRACE_NBA + /// for a given n contain the automaton generated with + /// AUT_CYCLIST_PROOF_DBA for the same n. + AUT_CYCLIST_PROOF_DBA, AUT_END }; diff --git a/spot/gen/formulas.cc b/spot/gen/formulas.cc index a94512970..10841e820 100644 --- a/spot/gen/formulas.cc +++ b/spot/gen/formulas.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2019 Laboratoire de Recherche et Developpement +// Copyright (C) 2012-2019, 2022 Laboratoire de Recherche et Developpement // de l'EPITA (LRDE). // // This file is part of Spot, a model checking library. @@ -1198,13 +1198,13 @@ namespace spot } static formula - pps_arbiter(std::string r_, std::string g_, int n, bool strict_) + pps_arbiter(std::string r_, std::string g_, unsigned n, bool strict_) { formula* r = new formula[n]; formula* g = new formula[n]; std::vector res; - for (int i = 0; i < n; ++i) + for (unsigned i = 0; i < n; ++i) { r[i] = formula::ap(r_ + std::to_string(i + 1)); g[i] = formula::ap(g_ + std::to_string(i + 1)); @@ -1218,17 +1218,17 @@ namespace spot formula phi_s; { std::vector res; - for (int i = 0; i < n; ++i) + for (unsigned i = 0; i < n; ++i) res.push_back(formula::Not(r[i])); theta_e = formula::And(res); res.clear(); - for (int i = 0; i < n; ++i) + for (unsigned i = 0; i < n; ++i) res.push_back(formula::Not(g[i])); theta_s = formula::And(res); res.clear(); - for (int i = 0; i < n; ++i) + for (unsigned i = 0; i < n; ++i) { formula left = formula::Xor(r[i], g[i]); formula right = formula::Equiv(r[i], formula::X(r[i])); @@ -1237,9 +1237,9 @@ namespace spot psi_e = formula::And(res); res.clear(); - for (int i = 0; i < n; ++i) + for (unsigned i = 0; i < n; ++i) { - for (int j = 0; j < i; ++j) + for (unsigned j = 0; j < i; ++j) res.push_back(formula::Not(formula::And({g[i], g[j]}))); formula left = formula::Equiv(r[i], g[i]); formula right = formula::Equiv(g[i], formula::X(g[i])); @@ -1248,7 +1248,7 @@ namespace spot psi_s = formula::And(res); res.clear(); - for (int i = 0; i < n; ++i) + for (unsigned i = 0; i < n; ++i) { formula f = formula::Not(formula::And({r[i], g[i]})); res.push_back(formula::G(formula::F(f))); @@ -1256,7 +1256,7 @@ namespace spot phi_e = formula::And(res); res.clear(); - for (int i = 0; i < n; ++i) + for (unsigned i = 0; i < n; ++i) { res.push_back(formula::G(formula::F(formula::Equiv(r[i], g[i])))); } @@ -1267,9 +1267,9 @@ namespace spot if (!strict_) { + formula left = formula::And({formula::G(psi_e), phi_e}); formula imp = - formula::Implies(formula::And({formula::G(psi_e), phi_e}), - formula::And({formula::G(psi_s), phi_s})); + formula::Implies(left, formula::And({formula::G(psi_s), phi_s})); return formula::Implies(theta_e, formula::And({theta_s, imp})); } else @@ -1281,6 +1281,21 @@ namespace spot } } + // G[0..n]((a S b) -> c) rewritten using future operators, + // from Edmond Irani Liu (EIL). GSI stands for "Globally Since Implies." + static formula eil_gsi(int n, std::string a, std::string b, std::string c) + { + formula fa = formula::ap(a); + formula fb = formula::ap(b); + formula res = fb; + for (int i = 1; i <= n; ++i) + { + formula tmp = formula::And({formula::strong_X(i, fa), res}); + res = formula::Or({formula::strong_X(i, fb), tmp}); + } + return formula::Implies(res, formula::strong_X(n, formula::ap(c))); + } + formula ltl_pattern(ltl_pattern_id pattern, int n, int m) { if (n < 0) @@ -1317,6 +1332,8 @@ namespace spot return dac_pattern(n); case LTL_EH_PATTERNS: return eh_pattern(n); + case LTL_EIL_GSI: + return eil_gsi(n, "a", "b", "c"); case LTL_FXG_OR: return FXG_or_n("p", n); case LTL_GF_EQUIV: @@ -1418,6 +1435,7 @@ namespace spot "ccj-beta-prime", "dac-patterns", "eh-patterns", + "eil-gsi", "fxg-or", "gf-equiv", "gf-equiv-xn", @@ -1485,6 +1503,7 @@ namespace spot return 55; case LTL_EH_PATTERNS: return 12; + case LTL_EIL_GSI: case LTL_FXG_OR: case LTL_GF_EQUIV: case LTL_GF_EQUIV_XN: @@ -1554,6 +1573,7 @@ namespace spot case LTL_CCJ_BETA_PRIME: case LTL_DAC_PATTERNS: case LTL_EH_PATTERNS: + case LTL_EIL_GSI: case LTL_FXG_OR: case LTL_GF_EQUIV: case LTL_GF_EQUIV_XN: diff --git a/spot/gen/formulas.hh b/spot/gen/formulas.hh index ac5974e48..ef5a0d850 100644 --- a/spot/gen/formulas.hh +++ b/spot/gen/formulas.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2018, 2019 Laboratoire de Recherche et Developpement de -// l'EPITA (LRDE). +// Copyright (C) 2017-2019, 2022 Laboratoire de Recherche et +// Developpement de l'EPITA (LRDE). // // This file is part of Spot, a model checking library. // @@ -56,6 +56,8 @@ namespace spot /// 12 formulas from Etessami and Holzmann. /// \cite etessami.00.concur LTL_EH_PATTERNS, + /// Familly sent by Edmond Irani Liu + LTL_EIL_GSI, /// `F(p0 | XG(p1 | XG(p2 | ... XG(pn))))` LTL_FXG_OR, /// `(GFa1 & GFa2 & ... & GFan) <-> GFz` diff --git a/spot/graph/graph.hh b/spot/graph/graph.hh index 75e0977b7..e6afda738 100644 --- a/spot/graph/graph.hh +++ b/spot/graph/graph.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2018, 2020, 2021 Laboratoire de Recherche et +// Copyright (C) 2014-2018, 2020-2022 Laboratoire de Recherche et // Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -20,6 +20,7 @@ #pragma once #include +#include #include #include #include @@ -28,6 +29,9 @@ #include #include #include +#ifdef SPOT_ENABLE_PTHREAD +# include +#endif // SPOT_ENABLE_PTHREAD namespace spot { @@ -553,10 +557,11 @@ namespace spot { std::map, unsigned> uniq_; G& g_; + unsigned acc_sink_; public: - univ_dest_mapper(G& graph) - : g_(graph) + univ_dest_mapper(G& graph, unsigned sink = -1u) + : g_(graph), acc_sink_(sink) { } @@ -566,6 +571,9 @@ namespace spot std::vector tmp(begin, end); std::sort(tmp.begin(), tmp.end()); tmp.erase(std::unique(tmp.begin(), tmp.end()), tmp.end()); + if (acc_sink_ != -1u && tmp.size() > 1) + tmp.erase(std::remove(tmp.begin(), tmp.end(), acc_sink_), + tmp.end()); auto p = uniq_.emplace(tmp, 0); if (p.second) p.first->second = g_.new_univ_dests(tmp.begin(), tmp.end()); @@ -800,8 +808,23 @@ namespace spot return *dst_begin; SPOT_ASSERT(sz > 1); unsigned d = dests_.size(); - dests_.emplace_back(sz); - dests_.insert(dests_.end(), dst_begin, dst_end); + if (!dests_.empty() + && &*dst_begin >= &dests_.front() + && &*dst_begin <= &dests_.back() + && (dests_.capacity() - dests_.size()) < (sz + 1)) + { + // If dst_begin...dst_end points into dests_ and dests_ risk + // being reallocated, we have to savea the destination + // states before we lose them. + std::vector tmp(dst_begin, dst_end); + dests_.emplace_back(sz); + dests_.insert(dests_.end(), tmp.begin(), tmp.end()); + } + else + { + dests_.emplace_back(sz); + dests_.insert(dests_.end(), dst_begin, dst_end); + } return ~d; } @@ -1226,6 +1249,78 @@ namespace spot std::stable_sort(edges_.begin() + 1, edges_.end(), p); } + /// \brief Sort all edges by src first, then, within edges of the same + /// source use the predicate + /// + /// This will invalidate all iterators, and also destroy edge + /// chains. Call chain_edges_() immediately afterwards unless you + /// know what you are doing. + /// \note: for performance this will work in parallel (if enabled) + /// and make a temporary copy of the edges (needs more ram) + /// \pre This needs the edge_vector to be in a coherent state when called + template> + void sort_edges_srcfirst_(Predicate p = Predicate(), + parallel_policy ppolicy = parallel_policy()) + { + SPOT_ASSERT(!edges_.empty()); + const unsigned ns = num_states(); + std::vector idx_list(ns+1); + edge_vector_t new_edges; + new_edges.reserve(edges_.size()); + new_edges.resize(1); + // This causes edge 0 to be considered as dead. + new_edges[0].next_succ = 0; + // Copy all edges so that they are sorted by src + for (unsigned s = 0; s < ns; ++s) + { + idx_list[s] = new_edges.size(); + for (const auto& e : out(s)) + new_edges.push_back(e); + } + idx_list[ns] = new_edges.size(); + // New edge sorted by source + // If we have few edge or only one threads + // Benchmark few? + auto bne = new_edges.begin(); +#ifndef SPOT_ENABLE_PTHREAD + (void) ppolicy; +#else + unsigned nthreads = ppolicy.nthreads(); + if (nthreads <= 1) +#endif + { + for (unsigned s = 0u; s < ns; ++s) + std::stable_sort(bne + idx_list[s], + bne + idx_list[s+1], p); + } +#ifdef SPOT_ENABLE_PTHREAD + else + { + static std::vector tv; + SPOT_ASSERT(tv.empty()); + tv.resize(nthreads); + // FIXME: Due to the way these thread advance into the state + // vector, they access very close memory location. It would + // seems more cache friendly to have threads work on blocks + // of continuous states. + for (unsigned id = 0; id < nthreads; ++id) + tv[id] = std::thread( + [bne, id, ns, &idx_list, p, nthreads]() + { + for (unsigned s = id; s < ns; s += nthreads) + std::stable_sort(bne + idx_list[s], + bne + idx_list[s+1], p); + return; + }); + for (auto& t : tv) + t.join(); + tv.clear(); + } +#endif + std::swap(edges_, new_edges); + // Like after normal sort_edges, they need to be chained before usage + } + /// \brief Sort edges of the given states /// /// \tparam Predicate : Comparison type @@ -1243,14 +1338,19 @@ namespace spot //dump_storage(std::cerr); auto pi = [&](unsigned t1, unsigned t2) {return p(edges_[t1], edges_[t2]); }; + + // Sort the outgoing edges of each selected state according + // to predicate p. Do that in place. std::vector sort_idx_; - for (unsigned i = 0; i < num_states(); ++i) + unsigned ns = num_states(); + for (unsigned i = 0; i < ns; ++i) { if (to_sort_ptr && !(*to_sort_ptr)[i]) continue; - - sort_idx_.clear(); unsigned t = states_[i].succ; + if (t == 0) + continue; + sort_idx_.clear(); do { sort_idx_.push_back(t); diff --git a/spot/libspot.pc.in b/spot/libspot.pc.in index 2dac1de5d..9cb877b34 100644 --- a/spot/libspot.pc.in +++ b/spot/libspot.pc.in @@ -8,5 +8,5 @@ Description: A library of LTL and omega-automata algorithms for model checking URL: https://spot.lrde.epita.fr/ Version: @PACKAGE_VERSION@ Cflags: -I${includedir} -Libs: -L${libdir} -lspot +Libs: -L${libdir} -lspot @LIBSPOT_PTHREAD@ Requires: libbddx diff --git a/spot/ltsmin/spins_kripke.hxx b/spot/ltsmin/spins_kripke.hxx index bafb6f641..bdf47fbb6 100644 --- a/spot/ltsmin/spins_kripke.hxx +++ b/spot/ltsmin/spins_kripke.hxx @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2018, 2020 Laboratoire de Recherche et Développement de -// l'Epita (LRDE) +// Copyright (C) 2017, 2018, 2020, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE) // // This file is part of Spot, a model checking library. // @@ -400,10 +400,10 @@ namespace spot } } - // FIXME I think we only need visbles aps, i.e. if the system has - // following variables, i.e. P_0.var1 and P_0.var2 but the property - // automaton only mention P_0.var2, we do not need to capture (in - // the resulting cube) any atomic proposition for P_0.var1 + // FIXME: I think we only need visible aps. E.g., if the system has + // variables P_0.var1 and P_0.var2 but the property automaton only + // mentions P_0.var2, we do not need to capture (in the resulting + // cube) any atomic proposition for P_0.var1 void kripkecube::match_aps(std::vector& aps, diff --git a/spot/mc/bloemen.hh b/spot/mc/bloemen.hh index 1a37a71be..432badb76 100644 --- a/spot/mc/bloemen.hh +++ b/spot/mc/bloemen.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2017, 2018, 2019, 2020 Laboratoire de Recherche et +// Copyright (C) 2015-2020, 2022 Laboratoire de Recherche et // Developpement de l'Epita // // This file is part of Spot, a model checking library. @@ -127,7 +127,7 @@ namespace spot bool b = it.isnew(); // Insertion failed, delete element - // FIXME Should we add a local cache to avoid useless allocations? + // FIXME: Should we add a local cache to avoid useless allocations? if (!b) p_.deallocate(v); else diff --git a/spot/mc/bloemen_ec.hh b/spot/mc/bloemen_ec.hh index c31f0231f..6e581a0ac 100644 --- a/spot/mc/bloemen_ec.hh +++ b/spot/mc/bloemen_ec.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2017, 2018, 2019, 2020 Laboratoire de Recherche et +// Copyright (C) 2015-2020, 2022 Laboratoire de Recherche et // Developpement de l'Epita // // This file is part of Spot, a model checking library. @@ -251,7 +251,7 @@ namespace spot uf_element* q; uf_element* r; - while (true) + do { a_root = find(a); b_root = find(b); @@ -261,28 +261,24 @@ namespace spot // Update acceptance condition { std::lock_guard rlock(a_root->acc_mutex_); - a_root->acc |= acc; acc |= a_root->acc; + a_root->acc = acc; } while (a_root->parent.load() != a_root) { a_root = find(a_root); std::lock_guard rlock(a_root->acc_mutex_); - a_root->acc |= acc; acc |= a_root->acc; + a_root->acc = acc; } return acc; } r = std::max(a_root, b_root); q = std::min(a_root, b_root); - - if (!lock_root(q)) - continue; - - break; } + while (!lock_root(q)); uf_element* a_list = lock_list(a); if (a_list == nullptr) @@ -329,9 +325,8 @@ namespace spot { std::lock_guard rlock(r->acc_mutex_); std::lock_guard qlock(q->acc_mutex_); - q->acc |= acc; - r->acc |= q->acc; - acc |= r->acc; + acc |= r->acc | q->acc; + r->acc = q->acc = acc; } while (r->parent.load() != r) @@ -339,8 +334,8 @@ namespace spot r = find(r); std::lock_guard rlock(r->acc_mutex_); std::lock_guard qlock(q->acc_mutex_); - r->acc |= q->acc; - acc |= r->acc; + acc |= r->acc | q->acc; + r->acc = acc; } unlock_list(a_list); @@ -360,9 +355,7 @@ namespace spot a_status = a->list_status_.load(); if (a_status == list_status::BUSY) - { - return a; - } + return a; if (a_status == list_status::DONE) break; @@ -407,9 +400,7 @@ namespace spot b_status = b->list_status_.load(); if (b_status == list_status::BUSY) - { - return b; - } + return b; if (b_status == list_status::DONE) break; @@ -556,8 +547,8 @@ namespace spot { auto root = uf_.find(w.second); - std::lock_guard lock(w.second->acc_mutex_); - scc_acc = w.second->acc; + std::lock_guard lock(root->acc_mutex_); + scc_acc = root->acc; } // cycle found in SCC and it contains acceptance condition diff --git a/spot/mc/cndfs.hh b/spot/mc/cndfs.hh index 9b414764f..02768144b 100644 --- a/spot/mc/cndfs.hh +++ b/spot/mc/cndfs.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2017, 2018, 2019, 2020 Laboratoire de Recherche et +// Copyright (C) 2015-2020, 2022 Laboratoire de Recherche et // Developpement de l'Epita // // This file is part of Spot, a model checking library. @@ -191,9 +191,7 @@ namespace spot { // Try to insert the new state in the shared map. auto it = map_.insert(s); - bool b = it.isnew(); - - SPOT_ASSERT(!b); // should never be new in a red DFS + SPOT_ASSERT(!it.isnew()); // should never be new in a red DFS bool red = ((*it)).colors->red.load(); bool cyan = ((*it)).colors->l[tid_].cyan; bool in_Rp = ((*it)).colors->l[tid_].is_in_Rp; diff --git a/spot/mc/lpar13.hh b/spot/mc/lpar13.hh index 28b71aa4b..77396fb9d 100644 --- a/spot/mc/lpar13.hh +++ b/spot/mc/lpar13.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2016, 2018-2021 Laboratoire de Recherche et +// Copyright (C) 2015-2016, 2018-, 20222022 Laboratoire de Recherche et // Developpement de l'Epita // // This file is part of Spot, a model checking library. @@ -32,9 +32,9 @@ namespace spot { /// \brief This class implements the sequential emptiness check as /// presented in "Three SCC-based Emptiness Checks for Generalized - /// B\¨uchi Automata" (Renault et al, LPAR 2013). Among the three - /// emptiness check that has been proposed we opted to implement - /// the Gabow's one. + /// Büchi Automata" (Renault et al, LPAR 2013). Among the three + /// emptiness checks that have been proposed, we opted to implement + /// yGabow's one. template class SPOT_API lpar13 @@ -62,8 +62,8 @@ namespace spot size_t operator()(const product_state that) const noexcept { - // FIXME! wang32_hash(that.st_prop) could have - // been pre-calculated! + // FIXME: wang32_hash(that.st_prop) could have been + // pre-calculated! StateHash hasher; return wang32_hash(that.st_prop) ^ hasher(that.st_kripke); } @@ -135,7 +135,7 @@ namespace spot map[newtop]))) { sys_.recycle(todo.back().it_kripke, tid_); - // FIXME a local storage for twacube iterator? + // FIXME: a local storage for twacube iterator? todo.pop_back(); if (SPOT_UNLIKELY(found_)) { @@ -346,7 +346,7 @@ namespace spot ctrx_element* current = front; while (current != nullptr) { - // FIXME also display acc? + // FIXME: also display acc? res = res + " " + std::to_string(current->prod_st->st_prop) + + "*" + diff --git a/spot/misc/Makefile.am b/spot/misc/Makefile.am index e509dbe87..6b771dbb5 100644 --- a/spot/misc/Makefile.am +++ b/spot/misc/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2011-2014, 2016-2018, 2020-2021 Laboratoire de +## Copyright (C) 2011-2014, 2016-2018, 2020-2022 Laboratoire de ## Recherche et Développement de l'Epita (LRDE). ## Copyright (C) 2003, 2004, 2005, 2006 Laboratoire d'Informatique de ## Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), diff --git a/spot/misc/bitvect.hh b/spot/misc/bitvect.hh index 3588b406e..74ab2bf3f 100644 --- a/spot/misc/bitvect.hh +++ b/spot/misc/bitvect.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2021, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -111,22 +111,22 @@ namespace spot return; if (storage_ == &local_storage_) { - block_t* new_storage_ = static_cast + block_t* new_storage = static_cast (malloc(new_block_count * sizeof(block_t))); + if (SPOT_UNLIKELY(!new_storage)) + throw std::bad_alloc(); for (size_t i = 0; i < block_count_; ++i) - new_storage_[i] = storage_[i]; - storage_ = new_storage_; + new_storage[i] = storage_[i]; + storage_ = new_storage; } else { - auto old = storage_; - storage_ = static_cast - (realloc(old, new_block_count * sizeof(block_t))); - if (!storage_) - { - free(old); - throw std::bad_alloc(); - } + block_t* new_storage = static_cast + (realloc(storage_, new_block_count * sizeof(block_t))); + if (SPOT_UNLIKELY(!new_storage)) + // storage_, untouched, will be freed by the destructor. + throw std::bad_alloc(); + storage_ = new_storage; } block_count_ = new_block_count; } @@ -134,8 +134,8 @@ namespace spot private: void grow() { - size_t new_block_count_ = (block_count_ + 1) * 7 / 5; - reserve_blocks(new_block_count_); + size_t new_block_count = (block_count_ + 1) * 7 / 5; + reserve_blocks(new_block_count); } public: diff --git a/spot/misc/common.hh b/spot/misc/common.hh index e38f9f15a..8b066b0a5 100644 --- a/spot/misc/common.hh +++ b/spot/misc/common.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -145,6 +145,27 @@ namespace spot { } }; + + /// \brief This class is used to tell parallel algorithms what + /// resources they may use. + /// + /// Currently, this simply stores an integer indicating the number + /// of threads that the algorithm may create, but in the future it + /// will probably do more. + class SPOT_API parallel_policy + { + unsigned nthreads_; + public: + parallel_policy(unsigned nthreads = 1) : nthreads_(nthreads) + { + } + + unsigned nthreads() const + { + return nthreads_; + } + }; + } // This is a workaround for the issue described in GNU GCC bug 89303. diff --git a/spot/misc/optionmap.cc b/spot/misc/optionmap.cc index 3349f0f0d..8be8d1adc 100644 --- a/spot/misc/optionmap.cc +++ b/spot/misc/optionmap.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2013-2016, 2018 Laboratoire de Recherche +// Copyright (C) 2008, 2013-2016, 2018, 2022 Laboratoire de Recherche // et Développement de l'Epita (LRDE). // Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -130,6 +130,7 @@ namespace spot int option_map::get(const char* option, int def) const { + is_used_ = true; unused_.erase(option); auto it = options_.find(option); return (it == options_.end()) ? def : it->second; @@ -138,6 +139,7 @@ namespace spot std::string option_map::get_str(const char* option, std::string def) const { + is_used_ = true; unused_.erase(option); auto it = options_str_.find(option); return (it == options_str_.end()) ? def : it->second; @@ -158,17 +160,39 @@ namespace spot int option_map::set(const char* option, int val, int def) { - int old = get(option, def); - set_(option, val); - return old; + if (auto [p, b] = options_.emplace(option, val); b) + { + unused_.insert(option); + return def; + } + else + { + int old = p->second; + p->second = val; + return old; + } + } + + void + option_map::set_if_unset(const char* option, int val) + { + if (options_.emplace(option, val).second) + unused_.insert(option); } std::string option_map::set_str(const char* option, std::string val, std::string def) { - std::string old = get_str(option, def); - set_str_(option, val); - return old; + if (auto [p, b] = options_str_.emplace(option, val); b) + { + unused_.insert(option); + return def; + } + else + { + std::swap(val, p->second); + return val; + } } void @@ -204,21 +228,25 @@ namespace spot void option_map::report_unused_options() const { - auto s = unused_.size(); - if (s == 0U) - return; - std::ostringstream os; - if (s == 1U) - { - os << "option '" << *unused_.begin() - << "' was not used (possible typo?)"; - } - else - { - os << "the following options where not used (possible typos?):"; - for (auto opt: unused_) - os << "\n\t- '" << opt << '\''; - } - throw std::runtime_error(os.str()); + // We don't consider that an unused map has unused options. + if (is_used_) + { + auto s = unused_.size(); + if (s == 0U) + return; + std::ostringstream os; + if (s == 1U) + { + os << "option '" << *unused_.begin() + << "' was not used (possible typo?)"; + } + else + { + os << "the following options where not used (possible typos?):"; + for (auto opt: unused_) + os << "\n\t- '" << opt << '\''; + } + throw std::runtime_error(os.str()); + } } } diff --git a/spot/misc/optionmap.hh b/spot/misc/optionmap.hh index ea06c62f9..11ec8c456 100644 --- a/spot/misc/optionmap.hh +++ b/spot/misc/optionmap.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2015, 2016-2017 Laboratoire de Recherche et -// Developpement de l'Epita (LRDE) +// Copyright (C) 2013, 2015, 2016-2017, 2022 Laboratoire de Recherche +// et Developpement de l'Epita (LRDE) // Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre // et Marie Curie. @@ -80,6 +80,9 @@ namespace spot /// or \a def otherwise. int set(const char* option, int val, int def = 0); + /// \brief Set the value of \a option to \a val if it is unset. + void set_if_unset(const char* option, int val); + /// \brief Set the value of a string \a option to \a val. /// /// \return The previous value associated to \a option if declared, @@ -107,6 +110,7 @@ namespace spot // will be erased as they are used. The resulting set can be used // for diagnosing errors. mutable std::set unused_; + mutable bool is_used_ = false; void set_(const std::string&, int val); void set_str_(const std::string&, const std::string& val); diff --git a/spot/misc/satsolver.hh b/spot/misc/satsolver.hh index 03a75fa02..3b5bedccd 100644 --- a/spot/misc/satsolver.hh +++ b/spot/misc/satsolver.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2017-2018, 2020 Laboratoire de Recherche et -// Développement de l'Epita. +// Copyright (C) 2013, 2017-2018, 2020, 2022 Laboratoire de Recherche +// et Développement de l'Epita. // // This file is part of Spot, a model checking library. // @@ -88,7 +88,7 @@ namespace spot /// \brief Add a single lit. to the current clause. void add(int v); - /// \breif Get the current number of clauses. + /// \brief Get the current number of clauses. int get_nb_clauses() const; /// \brief Get the current number of variables. diff --git a/spot/parseaut/parseaut.yy b/spot/parseaut/parseaut.yy index 1e3de6781..5b8792e96 100644 --- a/spot/parseaut/parseaut.yy +++ b/spot/parseaut/parseaut.yy @@ -1,5 +1,5 @@ /* -*- coding: utf-8 -*- -** Copyright (C) 2014-2022 Laboratoire de Recherche et Développement +** Copyright (C) 2014-2023 Laboratoire de Recherche et Développement ** de l'Epita (LRDE). ** ** This file is part of Spot, a model checking library. @@ -44,6 +44,7 @@ #include "spot/priv/accmap.hh" #include #include +#include using namespace std::string_literals; @@ -256,6 +257,11 @@ extern "C" int strverscmp(const char *s1, const char *s2); %token ENDOFFILE 0 "end of file" %token '[' +%token LINEDIRECTIVE "#line" +%token BDD + +/**** DSTAR tokens ****/ +%token ENDDSTAR "end of DSTAR automaton" %token DRA "DRA" %token DSA "DSA" %token V2 "v2" @@ -263,14 +269,12 @@ extern "C" int strverscmp(const char *s1, const char *s2); %token ACCPAIRS "Acceptance-Pairs:" %token ACCSIG "Acc-Sig:" %token ENDOFHEADER "---" -%token LINEDIRECTIVE "#line" -%token BDD %left '|' %left '&' %precedence '!' -%type init-state-conj-2 state-conj-2 state-conj-checked +%type init-state-conj-2 state-conj-2 state-conj-checked pgame_succs %type checked-state-num state-num acc-set sign %type label-expr %type acc-sig acc-sets trans-acc_opt state-acc_opt @@ -299,10 +303,14 @@ extern "C" int strverscmp(const char *s1, const char *s2); %type nc-one-ident nc-ident-list %type acceptance-cond +/**** PGAME tokens ****/ +// Also using INT, STRING +%token PGAME "start of PGSolver game" +%token ENDPGAME "end of PGSolver game" + /**** LBTT tokens *****/ - // Also using INT, STRING +// Also using INT, STRING %token ENDAUT "-1" -%token ENDDSTAR "end of DSTAR automaton" %token LBTT "LBTT header" %token INT_S "state acceptance" %token LBTT_EMPTY "acceptance sets for empty automaton" @@ -364,6 +372,7 @@ aut-1: hoa { res.h->type = spot::parsed_aut_type::HOA; } | never { res.h->type = spot::parsed_aut_type::NeverClaim; } | lbtt { res.h->type = spot::parsed_aut_type::LBTT; } | dstar /* will set type as DSA or DRA while parsing first line */ + | pgame { res.h->type = spot::parsed_aut_type::PGAME; } /**********************************************************************/ /* Rules for HOA */ @@ -1222,6 +1231,7 @@ body: states // diagnostic, so let not add another one. if (res.states >= 0) n = res.states; + std::vector unused_undeclared; for (unsigned i = 0; i < n; ++i) { auto& p = res.info_states[i]; @@ -1230,17 +1240,43 @@ body: states if (p.used) error(p.used_loc, "state " + std::to_string(i) + " has no definition"); - if (!p.used && res.complete) - if (auto p = res.prop_is_true("complete")) - { - error(res.states_loc, - "state " + std::to_string(i) + - " has no definition..."); - error(p.loc, "... despite 'properties: complete'"); - } + if (!p.used) + unused_undeclared.push_back(i); res.complete = false; } } + if (!unused_undeclared.empty()) + { + std::ostringstream out; + unsigned uus = unused_undeclared.size(); + int rangestart = -2; + int rangecur = -2; + const char* sep = uus > 1 ? "states " : "state "; + auto print_range = [&]() { + if (rangecur < 0) + return; + out << sep << rangestart; + if (rangecur != rangestart) + out << '-' << rangecur; + sep = ","; + }; + for (unsigned s: unused_undeclared) + { + if ((int)s != rangecur + 1) + { + print_range(); + rangestart = s; + } + rangecur = s; + } + print_range(); + out << (uus > 1 ? " are" : " is") << " unused and undefined"; + error(res.states_loc, out.str()); + + if (auto p = res.prop_is_true("complete")) + error(p.loc, "automaton is incomplete because it has " + "undefined states"); + } if (res.complete) if (auto p = res.prop_is_false("complete")) { @@ -1765,7 +1801,7 @@ dstar_header: dstar_sizes if (res.states > 0) { - res.h->aut->new_states(res.states);; + res.h->aut->new_states(res.states); res.info_states.resize(res.states); } res.acc_style = State_Acc; @@ -1908,6 +1944,93 @@ dstar_states: %empty res.h->aut->new_edge(res.cur_state, i.first, i.second, $3); } +/**********************************************************************/ +/* Rules for PGSolver games */ +/**********************************************************************/ + +pgamestart: PGAME + { + if (res.opts.want_kripke) + { + error(@$, + "cannot read a Kripke structure out of a PGSolver game."); + YYABORT; + } + } + +pgame: pgamestart pgame_nodes ENDPGAME + { + unsigned n = res.accset; + auto p = spot::acc_cond::acc_code::parity_max_odd(n); + res.h->aut->set_acceptance(n, p); + res.acc_style = State_Acc; + // Pretend that we have declared all states. + n = res.h->aut->num_states(); + res.info_states.resize(n); + for (auto& p: res.info_states) + p.declared = true; + } + | pgamestart error ENDPGAME + { + error(@$, "failed to parse this as a PGSolver game"); + } + +pgame_nodes: pgame_node ';' + | pgame_nodes pgame_node ';' + +pgame_succs: INT + { $$ = new std::vector{$1}; } + | pgame_succs ',' INT + { + $$ = $1; + $$->emplace_back($3); + } + +pgame_node: INT INT INT pgame_succs string_opt + { + unsigned state = $1; + unsigned owner = $3; + if (owner > 1) + { + error(@3, "node owner should be 0 or 1"); + owner = 0; + } + // Create any missing state + unsigned max_state = state; + for (unsigned s: *$4) + max_state = std::max(max_state, s); + unsigned n = res.h->aut->num_states(); + if (n <= max_state) + res.h->aut->new_states(max_state + 1 - n); + + // assume the source of the first edge is initial + if (res.start.empty()) + res.start.emplace_back(@$, std::vector{state}); + + // Create all edges with priority $2 + spot::acc_cond::mark_t m({$2}); + for (unsigned s: *$4) + res.h->aut->new_edge(state, s, bddtrue, m); + res.accset = std::max(res.accset, 1 + (int) $2); + + n = res.h->aut->num_states(); + if (!res.state_player) + res.state_player = new std::vector(n); + else if (res.state_player->size() < n) + res.state_player->resize(n); + (*res.state_player)[state] = owner; + + if (std::string* name = $5) + { + if (!res.state_names) + res.state_names = new std::vector(n); + else if (res.state_names->size() < n) + res.state_names->resize(n); + (*res.state_names)[state] = std::move(*name); + delete name; + } + } + /**********************************************************************/ /* Rules for neverclaims */ /**********************************************************************/ @@ -2504,13 +2627,13 @@ static void fix_initial_state(result_& r) "a single initial state"); return; } + auto& aut = r.h->aut; // Fiddling with initial state may turn an incomplete automaton // into a complete one. - if (r.complete.is_false()) - r.complete = spot::trival::maybe(); + if (aut->prop_complete().is_false()) + aut->prop_complete(spot::trival::maybe()); // Multiple initial states. We might need to add a fake one, // unless one of the actual initial state has no incoming edge. - auto& aut = r.h->aut; std::vector has_incoming(aut->num_states(), 0); for (auto& t: aut->edges()) for (unsigned ud: aut->univ_dests(t)) @@ -2548,9 +2671,30 @@ static void fix_initial_state(result_& r) for (auto& pp: start) { unsigned p = pp.front(); - if (p != init) - for (auto& t: aut->out(p)) - aut->new_edge(init, t.dst, t.cond); + if (p == init) + continue; + if (!has_incoming[p]) + { + // If p has no incoming edge, we can simply take + // out its outgoing edges and "re-source" them on init. + // This will avoid creating new edges. + for (auto& t: aut->out(p)) + t.src = init; + auto& gr = aut->get_graph(); + auto& ps = gr.state_storage(p); + auto& is = gr.state_storage(init); + gr.edge_storage(is.succ_tail).next_succ = ps.succ; + is.succ_tail = ps.succ_tail; + ps.succ = ps.succ_tail = 0; + // we just created a state without successors + aut->prop_complete(false); + } + else + { + // duplicate all edges + for (auto& t: aut->out(p)) + aut->new_edge(init, t.dst, t.cond); + } } } else @@ -2571,6 +2715,24 @@ static void fix_initial_state(result_& r) } combiner.new_dests(init, comb_or); } + + // Merging two states may break state-based acceptance + // make sure all outgoing edges have the same color. + if (aut->prop_state_acc().is_true()) + { + bool first = true; + spot::acc_cond::mark_t prev; + for (auto& e: aut->out(init)) + if (first) + { + first = false; + prev = e.acc; + } + else if (e.acc != prev) + { + e.acc = prev; + } + } } } @@ -2618,45 +2780,51 @@ namespace spot { automaton_stream_parser::automaton_stream_parser(const std::string& name, automaton_parser_options opt) - try : filename_(name), opts_(opt) { - if (hoayyopen(name, &scanner_)) - throw std::runtime_error("Cannot open file "s + name); - } - catch (...) - { - hoayyclose(scanner_); - throw; + try + { + if (hoayyopen(name, &scanner_)) + throw std::runtime_error("Cannot open file "s + name); + } + catch (...) + { + hoayyclose(scanner_); + throw; + } } automaton_stream_parser::automaton_stream_parser(int fd, const std::string& name, automaton_parser_options opt) - try : filename_(name), opts_(opt) { - if (hoayyopen(fd, &scanner_)) - throw std::runtime_error("Cannot open file "s + name); - } - catch (...) - { - hoayyclose(scanner_); - throw; + try + { + if (hoayyopen(fd, &scanner_)) + throw std::runtime_error("Cannot open file "s + name); + } + catch (...) + { + hoayyclose(scanner_); + throw; + } } automaton_stream_parser::automaton_stream_parser(const char* data, const std::string& filename, automaton_parser_options opt) - try : filename_(filename), opts_(opt) { - hoayystring(data, &scanner_); - } - catch (...) - { - hoayyclose(scanner_); - throw; + try + { + hoayystring(data, &scanner_); + } + catch (...) + { + hoayyclose(scanner_); + throw; + } } automaton_stream_parser::~automaton_stream_parser() @@ -2742,8 +2910,8 @@ namespace spot r.aut_or_ks->set_named_prop("aliases", p); } fix_acceptance(r); + fix_properties(r); // before fix_initial_state fix_initial_state(r); - fix_properties(r); if (r.h->aut && !r.h->aut->is_existential()) r.h->aut->merge_univ_dests(); return r.h; diff --git a/spot/parseaut/public.hh b/spot/parseaut/public.hh index d1c1793be..ec16b3ad7 100644 --- a/spot/parseaut/public.hh +++ b/spot/parseaut/public.hh @@ -44,7 +44,14 @@ namespace spot struct parse_aut_error_list {}; #endif - enum class parsed_aut_type { HOA, NeverClaim, LBTT, DRA, DSA, Unknown }; + enum class parsed_aut_type { + HOA, + NeverClaim, + LBTT, + DRA, /* DSTAR format for Rabin */ + DSA, /* DSTAR format for Streett */ + PGAME, /* PG Solver Game */ + Unknown }; /// \brief Result of the automaton parser struct SPOT_API parsed_aut final @@ -91,11 +98,11 @@ namespace spot struct automaton_parser_options final { - bool ignore_abort = false; ///< Skip aborted automata - bool debug = false; ///< Run the parser in debug mode? - bool trust_hoa = true; ///< Trust properties in HOA files - bool raise_errors = false; ///< Raise errors as exceptions. - bool want_kripke = false; ///< Parse as a Kripke structure. + bool ignore_abort = false; ///< Skip aborted automata + bool debug = false; ///< Run the parser in debug mode? + bool trust_hoa = true; ///< Trust properties in HOA files + bool raise_errors = false; ///< Raise errors as exceptions. + bool want_kripke = false; ///< Parse as a Kripke structure. }; /// \brief Parse a stream of automata diff --git a/spot/parseaut/scanaut.ll b/spot/parseaut/scanaut.ll index 8cccaec0e..c04834975 100644 --- a/spot/parseaut/scanaut.ll +++ b/spot/parseaut/scanaut.ll @@ -25,12 +25,15 @@ /* %option debug */ %top{ -#include "config.h" +#include "libc-config.h" /* Flex 2.6.4's test for relies on __STDC_VERSION__ which is undefined in C++. So without that, it will define - its own integer types, including a broken SIZE_MAX definition. - So let's define __STDC_VERSION__ to make sure gets - included. */ + its own integer types, including a broken SIZE_MAX definition that + breaks compilation on OpenBSD. So let's define __STDC_VERSION__ to + make sure gets included. Redefining __STDC_VERSION__ + this way can break all sort of macros defined in , so + we include "libc-config.h" instead of "config.h" above to define + those macros first. */ #if HAVE_INTTYPES_H && !(defined __STDC_VERSION__) # define __STDC_VERSION__ 199901L #endif @@ -62,12 +65,18 @@ eol \n+|\r+ eol2 (\n\r)+|(\r\n)+ eols ({eol}|{eol2})* identifier [[:alpha:]_][[:alnum:]_.-]* +pgameinit "parity"[ \t]+[0-9]+[ \t]*; +oldpgameinit [0-9]+[ \t]+[0-9]+[ \t]+[01]+[ \t]+[0-9,]+([ \t]+".*")?[ \t]*; +/* A pattern than match the start of an automaton, in order +to detect the end of the previous one. We do not try to match +LBTT automata here. */ +startaut {eols}("HOA:"|"never"|"DSA"|"DRA"|{pgameinit}) %x in_COMMENT in_STRING in_NEVER_PAR %s in_HOA in_NEVER in_LBTT_HEADER %s in_LBTT_STATE in_LBTT_INIT in_LBTT_TRANS %s in_LBTT_T_ACC in_LBTT_S_ACC in_LBTT_GUARD -%s in_DSTAR +%s in_DSTAR in_PGAME %% %{ @@ -124,7 +133,20 @@ identifier [[:alpha:]_][[:alnum:]_.-]* "never" BEGIN(in_NEVER); return token::NEVER; "DSA" BEGIN(in_DSTAR); return token::DSA; "DRA" BEGIN(in_DSTAR); return token::DRA; - +{pgameinit} { + BEGIN(in_PGAME); + char* end = nullptr; + errno = 0; + unsigned long n = strtoul(yytext + 7, &end, 10); + yylval->num = n; + return token::PGAME; + } +{oldpgameinit} { + BEGIN(in_PGAME); + yylval->num = 0; + yyless(0); + return token::PGAME; + } [0-9]+[ \t][0-9]+[ts]? { BEGIN(in_LBTT_HEADER); char* end = nullptr; @@ -226,10 +248,8 @@ identifier [[:alpha:]_][[:alnum:]_.-]* return token::INT; } [0-9]+ parse_int(); return token::INT; - /* The start of any automaton is the end of this one. - We do not try to detect LBTT automata, as that would - be too hard to distinguish from state numbers. */ - {eols}("HOA:"|"never"|"DSA"|"DRA") { + /* The start of any automaton is the end of this one. */ + {startaut} { yylloc->end = yylloc->begin; yyless(0); BEGIN(INITIAL); @@ -267,6 +287,24 @@ identifier [[:alpha:]_][[:alnum:]_.-]* } } +{ + /* Handle short numbers without going through parse_int() for efficiency. */ + [0-9] yylval->num = *yytext - '0'; return token::INT; + [0-9][0-9] { + yylval->num = (yytext[0] * 10) + yytext[1] - '0' * 11; + return token::INT; + } + [0-9]+ parse_int(); return token::INT; + /* The start of any automaton is the end of this one. */ + {startaut} { + yylloc->end = yylloc->begin; + yyless(0); + BEGIN(INITIAL); + return token::ENDPGAME; + } + <> return token::ENDPGAME; +} + /* Note: the LBTT format is scanf friendly, but not Bison-friendly. If we only tokenize it as a stream of INTs, the parser will have a very hard time recognizing what is a state from what is a @@ -451,6 +489,11 @@ identifier [[:alpha:]_][[:alnum:]_.-]* } } +"[" { + yylval->str = nullptr; + return *yytext; + } + . return *yytext; %{ diff --git a/spot/parsetl/Makefile.am b/spot/parsetl/Makefile.am index d98c9ebab..f218ca067 100644 --- a/spot/parsetl/Makefile.am +++ b/spot/parsetl/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2008-2015, 2018 Laboratoire de Recherche et +## Copyright (C) 2008-2015, 2018, 2022 Laboratoire de Recherche et ## Développement de l'Epita (LRDE). ## Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris ## 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), @@ -30,7 +30,6 @@ noinst_LTLIBRARIES = libparsetl.la PARSETL_YY = parsetl.yy FROM_PARSETL_YY_MAIN = parsetl.cc FROM_PARSETL_YY_OTHERS = \ - stack.hh \ parsetl.hh FROM_PARSETL_YY = $(FROM_PARSETL_YY_MAIN) $(FROM_PARSETL_YY_OTHERS) diff --git a/spot/parsetl/parsetl.yy b/spot/parsetl/parsetl.yy index bbcdedcb5..117695404 100644 --- a/spot/parsetl/parsetl.yy +++ b/spot/parsetl/parsetl.yy @@ -1,7 +1,6 @@ /* -*- coding: utf-8 -*- - -** Copyright (C) 2009-2019, 2021 Laboratoire de Recherche et Développement -** de l'Epita (LRDE). +** Copyright (C) 2009-2019, 2021, 2022 Laboratoire de Recherche et +** Développement de l'Epita (LRDE). ** Copyright (C) 2003-2006 Laboratoire d'Informatique de Paris 6 ** (LIP6), département Systèmes Répartis Coopératifs (SRC), Université ** Pierre et Marie Curie. @@ -21,11 +20,13 @@ ** You should have received a copy of the GNU General Public License ** along with this program. If not, see . */ -%require "3.0" +%require "3.3" %language "C++" %locations %defines %define api.prefix {tlyy} +%define api.value.type variant +%define api.value.automove true %debug %define parse.error verbose %expect 0 @@ -37,25 +38,164 @@ #include "config.h" #include #include +#include #include #include #include struct minmax_t { unsigned min, max; }; + + // pnode (parsing node) is simular to fnode (formula node) except + // that n-ary operators will delay their construction until all + // children are known; this is a hack to speedup the parsing, + // because n-ary operator usually do a lot of work on construction + // (sorting all children if the operator is commutative, removing + // duplicates if applicable, etc.). Building n-ary nodes by + // repeatedly calling the binary constructor as we did in the past + // has a prohibitive cost. See issue #500. + + struct nary + { + std::vector children; + spot::op kind; + }; + + struct pnode + { + // Hold either a constructed formula, or an n-ary operator that we + // will construct only when it is combined with a different + // operator. + std::variant data; + // Record whether this pnode has been transformed into a fnode( or + // moved to another pnode). If that occurred, the ownership of + // any fnode we store has been transfered to the constructed fnode + // (or to the other pnode), and our destructor has nothing to do. + // This is the usual case while parsing a formula without error. + // However during error recovering, the parser may have to discard + // unused pnode, in which case we have to remember to free fnode + // during destruction. + // + // We have to track this used status because pnode are destructed + // whenever the parser pops a token, and as of Bison 3.7.6, the + // handling of "%destructor" is broken when + // "%define api.value.type variant" is used. See + // https://lists.gnu.org/archive/html/bug-bison/2022-03/msg00000.html + bool used = false; + + pnode() + : data(nullptr) + { + } + + pnode(const spot::fnode* ltl) + : data(ltl) + { + } + + // We only support move construction. + pnode(const pnode& other) = delete; + pnode& operator=(const pnode& other) = delete; + + pnode(pnode&& other) + : data(std::move(other.data)) + { + other.used = true; + } + + pnode& operator=(pnode&& other) + { + data = std::move(other.data); + other.used = true; + return *this; + } + + ~pnode() + { + if (used) + return; + if (auto* n = std::get_if(&data)) + { + for (auto f: n->children) + f->destroy(); + } + else + { + auto* f = std::get(data); + // The only case where we expect f to be nullptr, is if + // parse_ap() return nullptr: then $$ is unset when YYERROR + // is called. + if (f) + f->destroy(); + } + } + + // Create a new n-ary node from left and right. + // This will empty left and right so that their + // destructor do nothing. + pnode(spot::op o, pnode&& left, pnode&& right) + : data(nary{}) + { + nary& n = std::get(data); + n.kind = o; + if (auto* nleft = std::get_if(&left.data); + nleft && nleft->kind == o) + std::swap(n.children, nleft->children); + else + n.children.push_back(left); + if (auto* nright = std::get_if(&right.data); + nright && nright->kind == o) + { + auto& rch = nright->children; + n.children.insert(n.children.end(), rch.begin(), rch.end()); + rch.clear(); + } + else + { + n.children.push_back(right); + } + } + + operator const spot::fnode*() + { + used = true; + if (auto* n = std::get_if(&data)) + { + return spot::fnode::multop(n->kind, n->children); + } + else + { + return std::get(data); + } + } + + // Convert to a temporary formula, for printing, do not mark as + // used. + const spot::formula tmp() const + { + const spot::fnode* f; + if (auto* n = std::get_if(&data)) + { + for (auto c: n->children) + c->clone(); + f = spot::fnode::multop(n->kind, n->children); + } + else + { + f = std::get(data); + assert(f != nullptr); + f->clone(); + } + return spot::formula(f); + } + }; + + } %parse-param {spot::parse_error_list &error_list} %parse-param {spot::environment &parse_environment} %parse-param {spot::formula &result} -%union -{ - std::string* str; - const spot::fnode* ltl; - unsigned num; - minmax_t minmax; -} - %code { /* parsetl.hh and parsedecl.hh include each other recursively. We mut ensure that YYSTYPE is declared (by the above %union) @@ -84,28 +224,20 @@ using namespace spot; } \ while (0); -// right is missing, so complain and use false. -#define missing_right_binop_hard(res, left, op, str) \ - do \ - { \ - left->destroy(); \ - missing_right_op(res, op, str); \ - } \ - while (0); - - static bool + static const fnode* sere_ensure_bool(const fnode* f, const spot::location& loc, const char* oper, spot::parse_error_list& error_list) { if (f->is_boolean()) - return true; + return f; + f->destroy(); std::string s; s.reserve(80); s = "not a Boolean expression: in a SERE "; s += oper; s += " can only be applied to a Boolean expression"; error_list.emplace_back(loc, s); - return false; + return nullptr; } static const fnode* @@ -164,7 +296,7 @@ using namespace spot; if (str.empty()) { error_list.emplace_back(location, "unexpected empty block"); - return nullptr; + return fnode::ff(); } spot::parsed_formula pf; @@ -196,9 +328,9 @@ using namespace spot; %token START_SERE "SERE start marker" %token START_BOOL "BOOLEAN start marker" %token PAR_OPEN "opening parenthesis" PAR_CLOSE "closing parenthesis" -%token PAR_BLOCK "(...) block" -%token BRA_BLOCK "{...} block" -%token BRA_BANG_BLOCK "{...}! block" +%token PAR_BLOCK "(...) block" +%token BRA_BLOCK "{...} block" +%token BRA_BANG_BLOCK "{...}! block" %token BRACE_OPEN "opening brace" BRACE_CLOSE "closing brace" %token BRACE_BANG_CLOSE "closing brace-bang" %token OP_OR "or operator" OP_XOR "xor operator" @@ -221,7 +353,7 @@ using namespace spot; %token OP_GOTO_OPEN "opening bracket for goto operator" %token OP_SQBKT_CLOSE "closing bracket" %token OP_SQBKT_STRONG_CLOSE "closing !]" -%token OP_SQBKT_NUM "number for square bracket operator" +%token OP_SQBKT_NUM "number for square bracket operator" %token OP_UNBOUNDED "unbounded mark" %token OP_SQBKT_SEP "separator for square bracket operator" %token OP_UCONCAT "universal concat operator" @@ -229,12 +361,12 @@ using namespace spot; %token OP_UCONCAT_NONO "universal non-overlapping concat operator" %token OP_ECONCAT_NONO "existential non-overlapping concat operator" %token OP_FIRST_MATCH "first_match" -%token ATOMIC_PROP "atomic proposition" +%token ATOMIC_PROP "atomic proposition" %token OP_CONCAT "concat operator" OP_FUSION "fusion operator" %token CONST_TRUE "constant true" CONST_FALSE "constant false" %token END_OF_INPUT "end of formula" %token OP_POST_NEG "negative suffix" OP_POST_POS "positive suffix" -%token OP_DELAY_N "SVA delay operator" +%token OP_DELAY_N "SVA delay operator" %token OP_DELAY_OPEN "opening bracket for SVA delay operator" %token OP_DELAY_PLUS "##[+] operator" %token OP_DELAY_STAR "##[*] operator" @@ -276,19 +408,16 @@ using namespace spot; need any precedence). */ %precedence OP_NOT -%type subformula atomprop booleanatom sere lbtformula boolformula -%type bracedsere parenthesedsubformula -%type starargs fstarargs equalargs sqbracketargs gotoargs delayargs -%type sqbkt_num +%type subformula atomprop booleanatom sere lbtformula +%type boolformula bracedsere parenthesedsubformula +%type starargs fstarargs equalargs sqbracketargs gotoargs delayargs +%type sqbkt_num -%destructor { delete $$; } -%destructor { $$->destroy(); } - -%printer { debug_stream() << *$$; } -%printer { print_psl(debug_stream(), formula($$->clone())); } -%printer { print_sere(debug_stream(), formula($$->clone())); } sere bracedsere -%printer { debug_stream() << $$; } -%printer { debug_stream() << $$.min << ".." << $$.max; } +%printer { debug_stream() << $$; } +%printer { print_psl(debug_stream(), $$.tmp()); } +%printer { print_sere(debug_stream(), $$.tmp()); } sere bracedsere +%printer { debug_stream() << $$; } +%printer { debug_stream() << $$.min << ".." << $$.max; } %% result: START_LTL subformula END_OF_INPUT @@ -380,18 +509,19 @@ error_opt: %empty sqbkt_num: OP_SQBKT_NUM { - if ($1 >= fnode::unbounded()) + auto n = $1; + if (n >= fnode::unbounded()) { auto max = fnode::unbounded() - 1; std::ostringstream s; - s << $1 << " exceeds maximum supported repetition (" + s << n << " exceeds maximum supported repetition (" << max << ")"; error_list.emplace_back(@1, s.str()); $$ = max; } else { - $$ = $1; + $$ = n; } } @@ -484,10 +614,10 @@ delayargs: OP_DELAY_OPEN sqbracketargs atomprop: ATOMIC_PROP { - $$ = parse_ap(*$1, @1, parse_environment, error_list); - delete $1; - if (!$$) + auto* f = parse_ap($1, @1, parse_environment, error_list); + if (!f) YYERROR; + $$ = f; } booleanatom: atomprop @@ -504,13 +634,12 @@ booleanatom: atomprop sere: booleanatom | OP_NOT sere { - if (sere_ensure_bool($2, @2, "`!'", error_list)) + if (auto f = sere_ensure_bool($2, @2, "`!'", error_list)) { - $$ = fnode::unop(op::Not, $2); + $$ = fnode::unop(op::Not, f); } else { - $2->destroy(); $$ = error_false_block(@$, error_list); } } @@ -518,9 +647,8 @@ sere: booleanatom | PAR_BLOCK { $$ = - try_recursive_parse(*$1, @1, parse_environment, + try_recursive_parse($1, @1, parse_environment, debug_level(), parser_sere, error_list); - delete $1; if (!$$) YYERROR; } @@ -543,134 +671,142 @@ sere: booleanatom $$ = fnode::ff(); } | sere OP_AND sere - { $$ = fnode::multop(op::AndRat, {$1, $3}); } + { $$ = pnode(op::AndRat, $1, $3); } | sere OP_AND error { missing_right_binop($$, $1, @2, "length-matching and operator"); } | sere OP_SHORT_AND sere - { $$ = fnode::multop(op::AndNLM, {$1, $3}); } + { $$ = pnode(op::AndNLM, $1, $3); } | sere OP_SHORT_AND error { missing_right_binop($$, $1, @2, "non-length-matching and operator"); } | sere OP_OR sere - { $$ = fnode::multop(op::OrRat, {$1, $3}); } + { $$ = pnode(op::OrRat, $1, $3); } | sere OP_OR error { missing_right_binop($$, $1, @2, "or operator"); } | sere OP_CONCAT sere - { $$ = fnode::multop(op::Concat, {$1, $3}); } + { $$ = pnode(op::Concat, $1, $3); } | sere OP_CONCAT error { missing_right_binop($$, $1, @2, "concat operator"); } | sere OP_FUSION sere - { $$ = fnode::multop(op::Fusion, {$1, $3}); } + { $$ = pnode(op::Fusion, $1, $3); } | sere OP_FUSION error { missing_right_binop($$, $1, @2, "fusion operator"); } | OP_DELAY_N sere - { $$ = formula::sugar_delay(formula($2), $1, $1).to_node_(); } + { unsigned n = $1; $$ = formula::sugar_delay(formula($2), n, n).to_node_(); } | OP_DELAY_N error { missing_right_binop($$, fnode::tt(), @1, "SVA delay operator"); } | sere OP_DELAY_N sere - { $$ = formula::sugar_delay(formula($1), formula($3), - $2, $2).to_node_(); } + { unsigned n = $2; + $$ = formula::sugar_delay(formula($1), formula($3), + n, n).to_node_(); } | sere OP_DELAY_N error { missing_right_binop($$, $1, @2, "SVA delay operator"); } | delayargs sere %prec OP_DELAY_OPEN { - if ($1.max < $1.min) + auto [min, max] = $1; + if (max < min) { error_list.emplace_back(@1, "reversed range"); - std::swap($1.max, $1.min); + std::swap(max, min); } $$ = formula::sugar_delay(formula($2), - $1.min, $1.max).to_node_(); + min, max).to_node_(); } | delayargs error { missing_right_binop($$, fnode::tt(), @1, "SVA delay operator"); } | sere delayargs sere %prec OP_DELAY_OPEN { - if ($2.max < $2.min) + auto [min, max] = $2; + if (max < min) { error_list.emplace_back(@1, "reversed range"); - std::swap($2.max, $2.min); + std::swap(max, min); } $$ = formula::sugar_delay(formula($1), formula($3), - $2.min, $2.max).to_node_(); + min, max).to_node_(); } | sere delayargs error { missing_right_binop($$, $1, @2, "SVA delay operator"); } | starargs { - if ($1.max < $1.min) + auto [min, max] = $1; + if (max < min) { error_list.emplace_back(@1, "reversed range"); - std::swap($1.max, $1.min); + std::swap(max, min); } - $$ = fnode::bunop(op::Star, fnode::tt(), $1.min, $1.max); + $$ = fnode::bunop(op::Star, fnode::tt(), min, max); } | sere starargs { - if ($2.max < $2.min) + auto [min, max] = $2; + if (max < min) { error_list.emplace_back(@2, "reversed range"); - std::swap($2.max, $2.min); + std::swap(max, min); } - $$ = fnode::bunop(op::Star, $1, $2.min, $2.max); + $$ = fnode::bunop(op::Star, $1, min, max); } | sere fstarargs { - if ($2.max < $2.min) + auto [min, max] = $2; + if (max < min) { error_list.emplace_back(@2, "reversed range"); - std::swap($2.max, $2.min); + std::swap(max, min); } - $$ = fnode::bunop(op::FStar, $1, $2.min, $2.max); + $$ = fnode::bunop(op::FStar, $1, min, max); } | sere equalargs { - if ($2.max < $2.min) + auto [min, max] = $2; + if (max < min) { error_list.emplace_back(@2, "reversed range"); - std::swap($2.max, $2.min); + std::swap(max, min); } - if (sere_ensure_bool($1, @1, "[=...]", error_list)) + if (auto f = sere_ensure_bool($1, @1, "[=...]", error_list)) { - $$ = formula::sugar_equal(formula($1), - $2.min, $2.max).to_node_(); + $$ = formula::sugar_equal(formula(f), + min, max).to_node_(); } else { - $1->destroy(); $$ = error_false_block(@$, error_list); } } | sere gotoargs { - if ($2.max < $2.min) + auto [min, max] = $2; + if (max < min) { error_list.emplace_back(@2, "reversed range"); - std::swap($2.max, $2.min); + std::swap(max, min); } - if (sere_ensure_bool($1, @1, "[->...]", error_list)) + if (auto f = sere_ensure_bool($1, @1, "[->...]", error_list)) { - $$ = formula::sugar_goto(formula($1), - $2.min, $2.max).to_node_(); + $$ = formula::sugar_goto(formula(f), min, max).to_node_(); } else { - $1->destroy(); $$ = error_false_block(@$, error_list); } } | sere OP_XOR sere { - if (sere_ensure_bool($1, @1, "`^'", error_list) - && sere_ensure_bool($3, @3, "`^'", error_list)) + auto left = sere_ensure_bool($1, @1, "`^'", error_list); + auto right = sere_ensure_bool($3, @3, "`^'", error_list); + if (left && right) { - $$ = fnode::binop(op::Xor, $1, $3); + $$ = fnode::binop(op::Xor, left, right); } else { - $1->destroy(); - $3->destroy(); + if (left) + left->destroy(); + else if (right) + right->destroy(); $$ = error_false_block(@$, error_list); } } @@ -678,14 +814,13 @@ sere: booleanatom { missing_right_binop($$, $1, @2, "xor operator"); } | sere OP_IMPLIES sere { - if (sere_ensure_bool($1, @1, "`->'", error_list)) + auto left = sere_ensure_bool($1, @1, "`->'", error_list); + if (left) { - $$ = fnode::binop(op::Implies, $1, $3); + $$ = fnode::binop(op::Implies, left, $3); } else { - $1->destroy(); - $3->destroy(); $$ = error_false_block(@$, error_list); } } @@ -693,15 +828,18 @@ sere: booleanatom { missing_right_binop($$, $1, @2, "implication operator"); } | sere OP_EQUIV sere { - if (sere_ensure_bool($1, @1, "`<->'", error_list) - && sere_ensure_bool($3, @3, "`<->'", error_list)) + auto left = sere_ensure_bool($1, @1, "`<->'", error_list); + auto right = sere_ensure_bool($3, @3, "`<->'", error_list); + if (left && right) { - $$ = fnode::binop(op::Equiv, $1, $3); + $$ = fnode::binop(op::Equiv, left, right); } else { - $1->destroy(); - $3->destroy(); + if (left) + left->destroy(); + else if (right) + right->destroy(); $$ = error_false_block(@$, error_list); } } @@ -739,19 +877,17 @@ bracedsere: BRACE_OPEN sere BRACE_CLOSE } | BRA_BLOCK { - $$ = try_recursive_parse(*$1, @1, parse_environment, + $$ = try_recursive_parse($1, @1, parse_environment, debug_level(), parser_sere, error_list); - delete $1; if (!$$) YYERROR; } parenthesedsubformula: PAR_BLOCK { - $$ = try_recursive_parse(*$1, @1, parse_environment, + $$ = try_recursive_parse($1, @1, parse_environment, debug_level(), parser_ltl, error_list); - delete $1; if (!$$) YYERROR; } @@ -786,10 +922,9 @@ parenthesedsubformula: PAR_BLOCK boolformula: booleanatom | PAR_BLOCK { - $$ = try_recursive_parse(*$1, @1, parse_environment, + $$ = try_recursive_parse($1, @1, parse_environment, debug_level(), parser_bool, error_list); - delete $1; if (!$$) YYERROR; } @@ -821,19 +956,19 @@ boolformula: booleanatom $$ = fnode::ff(); } | boolformula OP_AND boolformula - { $$ = fnode::multop(op::And, {$1, $3}); } + { $$ = pnode(op::And, $1, $3); } | boolformula OP_AND error { missing_right_binop($$, $1, @2, "and operator"); } | boolformula OP_SHORT_AND boolformula - { $$ = fnode::multop(op::And, {$1, $3}); } + { $$ = pnode(op::And, $1, $3); } | boolformula OP_SHORT_AND error { missing_right_binop($$, $1, @2, "and operator"); } | boolformula OP_STAR boolformula - { $$ = fnode::multop(op::And, {$1, $3}); } + { $$ = pnode(op::And, $1, $3); } | boolformula OP_STAR error { missing_right_binop($$, $1, @2, "and operator"); } | boolformula OP_OR boolformula - { $$ = fnode::multop(op::Or, {$1, $3}); } + { $$ = pnode(op::Or, $1, $3); } | boolformula OP_OR error { missing_right_binop($$, $1, @2, "or operator"); } | boolformula OP_XOR boolformula @@ -856,19 +991,19 @@ boolformula: booleanatom subformula: booleanatom | parenthesedsubformula | subformula OP_AND subformula - { $$ = fnode::multop(op::And, {$1, $3}); } + { $$ = pnode(op::And, $1, $3); } | subformula OP_AND error { missing_right_binop($$, $1, @2, "and operator"); } | subformula OP_SHORT_AND subformula - { $$ = fnode::multop(op::And, {$1, $3}); } + { $$ = pnode(op::And, $1, $3); } | subformula OP_SHORT_AND error { missing_right_binop($$, $1, @2, "and operator"); } | subformula OP_STAR subformula - { $$ = fnode::multop(op::And, {$1, $3}); } + { $$ = pnode(op::And, $1, $3); } | subformula OP_STAR error { missing_right_binop($$, $1, @2, "and operator"); } | subformula OP_OR subformula - { $$ = fnode::multop(op::Or, {$1, $3}); } + { $$ = pnode(op::Or, $1, $3); } | subformula OP_OR error { missing_right_binop($$, $1, @2, "or operator"); } | subformula OP_XOR subformula @@ -904,13 +1039,15 @@ subformula: booleanatom | OP_F error { missing_right_op($$, @1, "sometimes operator"); } | OP_FREP sqbkt_num OP_SQBKT_CLOSE subformula %prec OP_FREP - { $$ = fnode::nested_unop_range(op::X, op::Or, $2, $2, $4); + { unsigned n = $2; + $$ = fnode::nested_unop_range(op::X, op::Or, n, n, $4); error_list.emplace_back(@1 + @3, "F[n:m] expects two parameters"); } | OP_FREP sqbkt_num OP_SQBKT_STRONG_CLOSE subformula %prec OP_FREP - { $$ = fnode::nested_unop_range(op::strong_X, op::Or, $2, $2, $4); + { unsigned n = $2; + $$ = fnode::nested_unop_range(op::strong_X, op::Or, n, n, $4); error_list.emplace_back(@1 + @3, "F[n:m!] expects two parameters"); } @@ -966,14 +1103,16 @@ subformula: booleanatom { $$ = fnode::nested_unop_range(op::strong_X, op::And, $2, fnode::unbounded(), $5); } | OP_GREP sqbkt_num OP_SQBKT_CLOSE subformula %prec OP_GREP - { $$ = fnode::nested_unop_range(op::X, op::And, $2, $2, $4); + { unsigned n = $2; + $$ = fnode::nested_unop_range(op::X, op::And, n, n, $4); error_list.emplace_back(@1 + @3, "G[n:m] expects two parameters"); } | OP_GREP sqbkt_num OP_SQBKT_STRONG_CLOSE subformula %prec OP_GREP - { $$ = fnode::nested_unop_range(op::strong_X, op::And, - $2, $2, $4); + { unsigned n = $2; + $$ = fnode::nested_unop_range(op::strong_X, op::And, + n, n, $4); error_list.emplace_back(@1 + @3, "G[n:m!] expects two parameters"); } @@ -1003,7 +1142,8 @@ subformula: booleanatom | OP_STRONG_X error { missing_right_op($$, @1, "strong next operator"); } | OP_XREP sqbkt_num OP_SQBKT_CLOSE subformula %prec OP_XREP - { $$ = fnode::nested_unop_range(op::X, op::Or, $2, $2, $4); } + { unsigned n = $2; + $$ = fnode::nested_unop_range(op::X, op::Or, n, n, $4); } | OP_XREP sqbkt_num OP_SQBKT_CLOSE error { missing_right_op($$, @1 + @3, "X[.] operator"); } | OP_XREP error OP_SQBKT_CLOSE subformula %prec OP_XREP @@ -1013,8 +1153,9 @@ subformula: booleanatom { $$ = fnode::unop(op::strong_X, $3); } | OP_XREP sqbkt_num OP_SQBKT_STRONG_CLOSE subformula %prec OP_XREP - { $$ = fnode::nested_unop_range(op::strong_X, - op::Or, $2, $2, $4); } + { unsigned n = $2; + $$ = fnode::nested_unop_range(op::strong_X, + op::Or, n, n, $4); } | OP_XREP error OP_SQBKT_STRONG_CLOSE subformula %prec OP_XREP { error_list.emplace_back(@$, "treating this X[.!] as a simple X[!]"); $$ = fnode::unop(op::strong_X, $4); } @@ -1032,41 +1173,40 @@ subformula: booleanatom | bracedsere parenthesedsubformula { $$ = fnode::binop(op::UConcat, $1, $2); } | bracedsere OP_UCONCAT error - { missing_right_binop_hard($$, $1, @2, - "universal overlapping concat operator"); } + { missing_right_op($$, @2, + "universal overlapping concat operator"); } | bracedsere OP_ECONCAT subformula { $$ = fnode::binop(op::EConcat, $1, $3); } | bracedsere OP_ECONCAT error - { missing_right_binop_hard($$, $1, @2, - "existential overlapping concat operator"); + { missing_right_op($$, @2, + "existential overlapping concat operator"); } | bracedsere OP_UCONCAT_NONO subformula /* {SERE}[]=>EXP = {SERE;1}[]->EXP */ { $$ = fnode::binop(op::UConcat, - fnode::multop(op::Concat, {$1, fnode::tt()}), + pnode(op::Concat, $1, fnode::tt()), $3); } | bracedsere OP_UCONCAT_NONO error - { missing_right_binop_hard($$, $1, @2, - "universal non-overlapping concat operator"); + { missing_right_op($$, @2, + "universal non-overlapping concat operator"); } | bracedsere OP_ECONCAT_NONO subformula /* {SERE}<>=>EXP = {SERE;1}<>->EXP */ { $$ = fnode::binop(op::EConcat, - fnode::multop(op::Concat, {$1, fnode::tt()}), + pnode(op::Concat, $1, fnode::tt()), $3); } | bracedsere OP_ECONCAT_NONO error - { missing_right_binop_hard($$, $1, @2, - "existential non-overlapping concat operator"); + { missing_right_op($$, @2, + "existential non-overlapping concat operator"); } | BRACE_OPEN sere BRACE_BANG_CLOSE /* {SERE}! = {SERE} <>-> 1 */ { $$ = fnode::binop(op::EConcat, $2, fnode::tt()); } | BRA_BANG_BLOCK { - $$ = try_recursive_parse(*$1, @1, parse_environment, + $$ = try_recursive_parse($1, @1, parse_environment, debug_level(), parser_sere, error_list); - delete $1; if (!$$) YYERROR; $$ = fnode::binop(op::EConcat, $$, fnode::tt()); @@ -1076,9 +1216,9 @@ lbtformula: atomprop | '!' lbtformula { $$ = fnode::unop(op::Not, $2); } | '&' lbtformula lbtformula - { $$ = fnode::multop(op::And, {$2, $3}); } + { $$ = pnode(op::And, $2, $3); } | '|' lbtformula lbtformula - { $$ = fnode::multop(op::Or, {$2, $3}); } + { $$ = pnode(op::Or, $2, $3); } | '^' lbtformula lbtformula { $$ = fnode::binop(op::Xor, $2, $3); } | 'i' lbtformula lbtformula diff --git a/spot/parsetl/scantl.ll b/spot/parsetl/scantl.ll index 34fbfef32..33667a849 100644 --- a/spot/parsetl/scantl.ll +++ b/spot/parsetl/scantl.ll @@ -27,12 +27,15 @@ %option never-interactive %top{ -#include "config.h" +#include "libc-config.h" /* Flex 2.6.4's test for relies on __STDC_VERSION__ which is undefined in C++. So without that, it will define - its own integer types, including a broken SIZE_MAX definition. - So let's define __STDC_VERSION__ to make sure gets - included. */ + its own integer types, including a broken SIZE_MAX definition that + breaks compilation on OpenBSD. So let's define __STDC_VERSION__ to + make sure gets included. Redefining __STDC_VERSION__ + this way can break all sort of macros defined in , so + we include "libc-config.h" instead of "config.h" above to define + those macros first. */ #if HAVE_INTTYPES_H && !(defined __STDC_VERSION__) # define __STDC_VERSION__ 199901L #endif @@ -130,26 +133,26 @@ eol2 (\n\r)+|(\r\n)+ recursively. */ BEGIN(in_par); parent_level = 1; - yylval->str = new std::string(); + yylval->emplace(); } { "(" { ++parent_level; - yylval->str->append(yytext, yyleng); + yylval->as().append(yytext, yyleng); } ")" { if (--parent_level) { - yylval->str->append(yytext, yyleng); + yylval->as().append(yytext, yyleng); } else { BEGIN(not_prop); - spot::trim(*yylval->str); + spot::trim(yylval->as()); return token::PAR_BLOCK; } } - [^()]+ yylval->str->append(yytext, yyleng); + [^()]+ yylval->as().append(yytext, yyleng); <> { unput(')'); if (!missing_parent) @@ -172,38 +175,38 @@ eol2 (\n\r)+|(\r\n)+ recursively. */ BEGIN(in_bra); parent_level = 1; - yylval->str = new std::string(); + yylval->emplace(); } { "{" { ++parent_level; - yylval->str->append(yytext, yyleng); + yylval->as().append(yytext, yyleng); } "}"[ \t]*"!" { if (--parent_level) { - yylval->str->append(yytext, yyleng); + yylval->as().append(yytext, yyleng); } else { BEGIN(not_prop); - spot::trim(*yylval->str); + spot::trim(yylval->as()); return token::BRA_BANG_BLOCK; } } "}" { if (--parent_level) { - yylval->str->append(yytext, yyleng); + yylval->as().append(yytext, yyleng); } else { BEGIN(not_prop); - spot::trim(*yylval->str); + spot::trim(yylval->as()); return token::BRA_BLOCK; } } - [^{}]+ yylval->str->append(yytext, yyleng); + [^{}]+ yylval->as().append(yytext, yyleng); <> { unput('}'); if (!missing_parent) @@ -231,35 +234,36 @@ eol2 (\n\r)+|(\r\n)+ /* SVA operators */ "##"[0-9] { - yylval->num = yytext[2] - '0'; + yylval->emplace(yytext[2] - '0'); return token::OP_DELAY_N; } "##"[0-9][0-9] { - yylval->num = - yytext[2] * 10 + yytext[3] - '0' * 11; + yylval->emplace(yytext[2] * 10 + + yytext[3] + - '0' * 11); return token::OP_DELAY_N; } "##"[0-9]{3,} { errno = 0; unsigned long n = strtoul(yytext + 2, 0, 10); - yylval->num = n; - if (errno || yylval->num != n) + yylval->emplace(n); + if (errno || yylval->as() != n) { error_list.push_back( spot::one_parse_error(*yylloc, "value too large ignored")); - yylval->num = 1; + yylval->emplace(1); } - if (yylval->num >= spot::fnode::unbounded()) + if (yylval->as() >= spot::fnode::unbounded()) { auto max = spot::fnode::unbounded() - 1; std::ostringstream s; - s << yylval->num + s << yylval->as() << (" exceeds maximum supported " "repetition (") << max << ")"; error_list.emplace_back(*yylloc, s.str()); - yylval->num = max; + yylval->emplace(max); } return token::OP_DELAY_N; } @@ -288,8 +292,8 @@ eol2 (\n\r)+|(\r\n)+ [0-9]+ { errno = 0; unsigned long n = strtoul(yytext, 0, 10); - yylval->num = n; - if (errno || yylval->num != n) + yylval->emplace(n); + if (errno || yylval->as() != n) { error_list.push_back( spot::one_parse_error(*yylloc, @@ -380,7 +384,7 @@ eol2 (\n\r)+|(\r\n)+ */ [a-zA-EH-LN-QSTYZ_.][a-zA-EH-WYZ0-9_.]* | [a-zA-EH-LN-QSTYZ_.][a-zA-EH-WYZ0-9_.][a-zA-Z0-9_.]* { - yylval->str = new std::string(yytext, yyleng); + yylval->emplace(yytext, yyleng); BEGIN(not_prop); return token::ATOMIC_PROP; } @@ -401,7 +405,7 @@ eol2 (\n\r)+|(\r\n)+ { \" { BEGIN(orig_cond); - yylval->str = new std::string(s); + yylval->emplace(s); return token::ATOMIC_PROP; } {eol} { @@ -419,7 +423,7 @@ eol2 (\n\r)+|(\r\n)+ spot::one_parse_error(*yylloc, "unclosed string")); BEGIN(orig_cond); - yylval->str = new std::string(s); + yylval->emplace(s); return token::ATOMIC_PROP; } } @@ -430,7 +434,7 @@ eol2 (\n\r)+|(\r\n)+ for compatibility with ltl2dstar we also accept any alphanumeric string that is not an operator. */ [a-zA-Z._][a-zA-Z0-9._]* { - yylval->str = new std::string(yytext, yyleng); + yylval->emplace(yytext, yyleng); return token::ATOMIC_PROP; } diff --git a/spot/priv/Makefile.am b/spot/priv/Makefile.am index d4e9cc77c..9a23caaa3 100644 --- a/spot/priv/Makefile.am +++ b/spot/priv/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2013-2019, 2021 Laboratoire de Recherche et +## Copyright (C) 2013-2019, 2021-2023 Laboratoire de Recherche et ## Développement de l'Epita (LRDE). ## ## This file is part of Spot, a model checking library. @@ -24,11 +24,12 @@ AM_CXXFLAGS = $(WARNING_CXXFLAGS) noinst_LTLIBRARIES = libpriv.la libpriv_la_SOURCES = \ accmap.hh \ - allocator.hh \ bddalloc.cc \ bddalloc.hh \ freelist.cc \ freelist.hh \ + partitioned_relabel.cc \ + partitioned_relabel.hh \ robin_hood.hh \ satcommon.hh\ satcommon.cc\ @@ -42,5 +43,11 @@ RH = $(GH)/robin-hood-hashing/master/src/include/robin_hood.h .PHONY: update update: wget $(RH) -O robin_hood.tmp || curl $(RH) -o robin_hood.tmp - sed 's/std::malloc/malloc/' robin_hood.tmp > $(srcdir)/robin_hood.hh +## Do not use std::malloc but malloc, because gnulib may replace it by +## rpl_malloc instead. Also disable to tests of __GNUC__ about +## ROBIN_HOOD_IS_TRIVIALLY_COPYABLE because (1) all versions of G++ we +## support have std::is_trivially_copyable, and (2) clang define +## __GNUC__ to some value that fail this test, and then warn that +## __has_trivial_copy is obsoleted. + sed 's/std::malloc/malloc/;/https:\/\/stackoverflow.com\/a\/31798726/{n;s/defined.*/false/}' robin_hood.tmp > $(srcdir)/robin_hood.hh rm -f robin_hood.tmp diff --git a/spot/priv/allocator.hh b/spot/priv/allocator.hh deleted file mode 100644 index 9c3d50268..000000000 --- a/spot/priv/allocator.hh +++ /dev/null @@ -1,104 +0,0 @@ -// -*- coding: utf-8 -*- -// Copyright (C) 2011, 2015-2018 Laboratoire de Recherche et -// Développement de l'Epita (LRDE) -// -// This file is part of Spot, a model checking library. -// -// Spot is free software; you can redistribute it and/or modify it -// under the terms of the GNU General Public License as published by -// the Free Software Foundation; either version 3 of the License, or -// (at your option) any later version. -// -// Spot is distributed in the hope that it will be useful, but WITHOUT -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public -// License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -#pragma once - -#include - -namespace spot -{ - /// An allocator to be used with STL containers. - /// It uses a spot::fixed_size_pool to handle memory. - /// It is intended to improve performance and locality of node-based - /// containers (std::{unordered}{multi}{set,map}). - /// It is geared towards efficiently allocating memory for one object at a - /// time (the nodes of the node-based containers). Larger allocations are - /// served by calling the global memory allocation mechanism (::operator new). - /// Using it for contiguous containers (such as std::vector or std::deque) - /// will be less efficient than using the default std::allocator. - /// - /// Short reminder on STL concept of Allocator: - /// allocate() may throw - /// deallocate() must not throw - /// equality testing (i.e. == and !=) must not throw - /// copying allocator (constructor and assignment) must not throw - /// moving allocator (constructor and assignment) must not throw - /// - /// WARNING this class is NOT thread-safe: the allocator relies on a static - /// fixed_size_pool (which is not thread-safe either). - template - class pool_allocator - { - static - fixed_size_pool& - pool() - { - static fixed_size_pool p = - fixed_size_pool(sizeof(T)); - return p; - } - - public: - using value_type = T; - using pointer = value_type*; - using const_pointer = const value_type*; - using size_type = size_t; - - constexpr pool_allocator() noexcept - {} - template - constexpr pool_allocator(const pool_allocator&) noexcept - {} - - template - struct rebind - { - using other = pool_allocator; - }; - - pointer - allocate(size_type n) - { - if (SPOT_LIKELY(n == 1)) - return static_cast(pool().allocate()); - else - return static_cast(::operator new(n*sizeof(T))); - } - - void - deallocate(pointer ptr, size_type n) noexcept - { - if (SPOT_LIKELY(n == 1)) - pool().deallocate(static_cast(ptr)); - else - ::operator delete(ptr); - } - - bool - operator==(const pool_allocator&) const noexcept - { - return true; - } - bool - operator!=(const pool_allocator& o) const noexcept - { - return !(this->operator==(o)); - } - }; -} diff --git a/spot/priv/partitioned_relabel.cc b/spot/priv/partitioned_relabel.cc new file mode 100644 index 000000000..f28ea5554 --- /dev/null +++ b/spot/priv/partitioned_relabel.cc @@ -0,0 +1,147 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2022 Laboratoire de Recherche +// de l'Epita (LRE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" + +#include + + +relabeling_map +bdd_partition::to_relabeling_map(twa_graph& for_me) const +{ + relabeling_map res; + // Change to unordered_map? + bdd_dict_ptr bdddict = for_me.get_dict(); + + bool use_inner = ig->state_storage(0).new_label != bddfalse; + std::vector doskip + = use_inner ? std::vector(ig->num_states(), false) + : std::vector(); + + auto bdd2form = [&bdddict](const bdd& cond) + { + return bdd_to_formula(cond, bdddict); + }; + + for (const auto& [old_letter, s] : treated) + { + formula new_letter_form = bdd2form(ig->state_storage(s).new_label); + assert(res.count(new_letter_form) == 0); + if (use_inner) + doskip[s] = true; + res[new_letter_form] = bdd2form(old_letter); + } + + if (use_inner) + { + // This implies that the split option was false, + // so we can store further info + auto& all_cond = *all_cond_ptr; + const unsigned Norig = all_cond.size(); + + for (unsigned i = 0; i < Norig; ++i) + { + // Internal node -> new ? + if (doskip[i]) + continue; + // Leave node -> already exists + if (ig->state_storage(i).succ == 0) + continue; + doskip[i] = true; + formula new_letter_form + = bdd2form(ig->state_storage(i).new_label); +#ifdef NDEBUG + res[new_letter_form] = bdd2form(all_cond[i]); +#else + // Check if they are the same + formula old_form = bdd2form(all_cond[i]); + if (res.count(new_letter_form) == 0) + res[new_letter_form] = old_form; + else + assert(res[new_letter_form] == old_form); +#endif + } + } + return res; +} + +/// \brief Tries to partition the given condition vector \a all_cond +/// abandons at \a max_letter. +/// \return The corresponding bdd_partition +/// \note A pointer to all_cond is captured internally, it needs +/// to outlive the returned bdd_partition +bdd_partition +try_partition_me(const std::vector& all_cond, unsigned max_letter) +{ + // We create vector that will be succesively filled. + // Each entry corresponds to a "letter", of the partition + const size_t Norig = all_cond.size(); + + bdd_partition result(all_cond); + + auto& treated = result.treated; + auto& ig = *result.ig; + + for (unsigned io = 0; io < Norig; ++io) + { + bdd cond = all_cond[io]; + const auto Nt = treated.size(); + for (size_t in = 0; in < Nt; ++in) + { + if (cond == bddfalse) + break; + if (treated[in].first == cond) + { + // Found this very condition -> make transition + ig.new_edge(io, treated[in].second); + cond = bddfalse; + break; + } + if (bdd_have_common_assignment(treated[in].first, cond)) + { + bdd inter = treated[in].first & cond; + // Create two new states + unsigned ssplit = ig.new_states(2); + // ssplit becomes the state without the intersection + // ssplit + 1 becomes the intersection + // Both of them are implied by the original node, + // Only inter is implied by the current letter + ig.new_edge(treated[in].second, ssplit); + ig.new_edge(treated[in].second, ssplit+1); + ig.new_edge(io, ssplit+1); + treated.emplace_back(inter, ssplit+1); + // Update + cond -= inter; + treated[in].first -= inter; + treated[in].second = ssplit; + if (treated.size() > max_letter) + return bdd_partition{}; + } + } + if (cond != bddfalse) + { + unsigned sc = ig.new_state(); + treated.emplace_back(cond, sc); + ig.new_edge(io, sc); + } + } + + result.relabel_succ = true; + return result; +} \ No newline at end of file diff --git a/spot/priv/partitioned_relabel.hh b/spot/priv/partitioned_relabel.hh new file mode 100644 index 000000000..cd19ffaea --- /dev/null +++ b/spot/priv/partitioned_relabel.hh @@ -0,0 +1,81 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2022 Laboratoire de Recherche +// de l'Epita (LRE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + + +using namespace spot; + +struct bdd_partition +{ + struct S + { + bdd new_label = bddfalse; + }; + struct T + { + }; + using implication_graph = digraph; + + // A pointer to the conditions to be partitioned + const std::vector* all_cond_ptr; + // Graph with the invariant that + // children imply parents + // Leaves from the partition + // original conditions are "root" nodes + std::unique_ptr ig; + // todo: technically there are at most two successors, so a graph + // is "too" generic + // All conditions currently part of the partition + // unsigned corresponds to the associated node + std::vector> treated; + std::vector new_aps; + bool relabel_succ = false; + + bdd_partition() = default; + bdd_partition(const std::vector& all_cond) + : all_cond_ptr(&all_cond) + , ig{std::make_unique(2*all_cond.size(), + 2*all_cond.size())} + { + // Create the roots of all old conditions + // Each condition is associated to the state with + // the same index + const unsigned Norig = all_cond.size(); + ig->new_states(Norig); + } + + // Facilitate conversion + // This can only be called when letters have already + // been computed + relabeling_map + to_relabeling_map(twa_graph& for_me) const; +}; // bdd_partition + + +bdd_partition +try_partition_me(const std::vector& all_cond, unsigned max_letter); \ No newline at end of file diff --git a/spot/priv/robin_hood.hh b/spot/priv/robin_hood.hh index 8c151d517..a4bc8beae 100644 --- a/spot/priv/robin_hood.hh +++ b/spot/priv/robin_hood.hh @@ -36,7 +36,7 @@ // see https://semver.org/ #define ROBIN_HOOD_VERSION_MAJOR 3 // for incompatible API changes #define ROBIN_HOOD_VERSION_MINOR 11 // for adding functionality in a backwards-compatible manner -#define ROBIN_HOOD_VERSION_PATCH 3 // for backwards-compatible bug fixes +#define ROBIN_HOOD_VERSION_PATCH 5 // for backwards-compatible bug fixes #include #include @@ -206,7 +206,7 @@ static Counts& counts() { // workaround missing "is_trivially_copyable" in g++ < 5.0 // See https://stackoverflow.com/a/31798726/48181 -#if defined(__GNUC__) && __GNUC__ < 5 +#if false # define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__) #else # define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value @@ -1820,6 +1820,12 @@ public: InsertionState::key_found != idxAndState.second); } + template + iterator emplace_hint(const_iterator position, Args&&... args) { + (void)position; + return emplace(std::forward(args)...).first; + } + template std::pair try_emplace(const key_type& key, Args&&... args) { return try_emplace_impl(key, std::forward(args)...); @@ -1831,16 +1837,15 @@ public: } template - std::pair try_emplace(const_iterator hint, const key_type& key, - Args&&... args) { + iterator try_emplace(const_iterator hint, const key_type& key, Args&&... args) { (void)hint; - return try_emplace_impl(key, std::forward(args)...); + return try_emplace_impl(key, std::forward(args)...).first; } template - std::pair try_emplace(const_iterator hint, key_type&& key, Args&&... args) { + iterator try_emplace(const_iterator hint, key_type&& key, Args&&... args) { (void)hint; - return try_emplace_impl(std::move(key), std::forward(args)...); + return try_emplace_impl(std::move(key), std::forward(args)...).first; } template @@ -1854,16 +1859,15 @@ public: } template - std::pair insert_or_assign(const_iterator hint, const key_type& key, - Mapped&& obj) { + iterator insert_or_assign(const_iterator hint, const key_type& key, Mapped&& obj) { (void)hint; - return insertOrAssignImpl(key, std::forward(obj)); + return insertOrAssignImpl(key, std::forward(obj)).first; } template - std::pair insert_or_assign(const_iterator hint, key_type&& key, Mapped&& obj) { + iterator insert_or_assign(const_iterator hint, key_type&& key, Mapped&& obj) { (void)hint; - return insertOrAssignImpl(std::move(key), std::forward(obj)); + return insertOrAssignImpl(std::move(key), std::forward(obj)).first; } std::pair insert(const value_type& keyval) { @@ -1871,10 +1875,20 @@ public: return emplace(keyval); } + iterator insert(const_iterator hint, const value_type& keyval) { + (void)hint; + return emplace(keyval).first; + } + std::pair insert(value_type&& keyval) { return emplace(std::move(keyval)); } + iterator insert(const_iterator hint, value_type&& keyval) { + (void)hint; + return emplace(std::move(keyval)).first; + } + // Returns 1 if key is found, 0 otherwise. size_t count(const key_type& key) const { // NOLINT(modernize-use-nodiscard) ROBIN_HOOD_TRACE(this) @@ -2308,13 +2322,14 @@ private: auto const numElementsWithBuffer = calcNumElementsWithBuffer(max_elements); - // calloc also zeroes everything + // malloc & zero mInfo. Faster than calloc everything. auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer); ROBIN_HOOD_LOG("std::calloc " << numBytesTotal << " = calcNumBytesTotal(" << numElementsWithBuffer << ")") mKeyVals = reinterpret_cast( - detail::assertNotNull(std::calloc(1, numBytesTotal))); + detail::assertNotNull(malloc(numBytesTotal))); mInfo = reinterpret_cast(mKeyVals + numElementsWithBuffer); + std::memset(mInfo, 0, numBytesTotal - numElementsWithBuffer * sizeof(Node)); // set sentinel mInfo[numElementsWithBuffer] = 1; diff --git a/spot/priv/satcommon.cc b/spot/priv/satcommon.cc index 21d75eee1..aec73d104 100644 --- a/spot/priv/satcommon.cc +++ b/spot/priv/satcommon.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2019 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2019, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -167,7 +167,7 @@ namespace spot return; std::ofstream out(log, std::ios_base::ate | std::ios_base::app); - out.exceptions(std::ifstream::failbit | std::ifstream::badbit); + out.exceptions(std::ofstream::failbit | std::ofstream::badbit); if (out.tellp() == 0) out << ("input.states,target.states,reachable.states,edges,transitions," diff --git a/spot/tl/Makefile.am b/spot/tl/Makefile.am index b7362ae99..bd7516c30 100644 --- a/spot/tl/Makefile.am +++ b/spot/tl/Makefile.am @@ -28,9 +28,11 @@ tl_HEADERS = \ contain.hh \ declenv.hh \ defaultenv.hh \ + derive.hh \ dot.hh \ environment.hh \ exclusive.hh \ + expansions.hh \ formula.hh \ hierarchy.hh \ length.hh \ @@ -44,6 +46,7 @@ tl_HEADERS = \ remove_x.hh \ simplify.hh \ snf.hh \ + sonf.hh \ unabbrev.hh noinst_LTLIBRARIES = libtl.la @@ -52,8 +55,10 @@ libtl_la_SOURCES = \ contain.cc \ declenv.cc \ defaultenv.cc \ + derive.cc \ dot.cc \ exclusive.cc \ + expansions.cc \ formula.cc \ hierarchy.cc \ length.cc \ @@ -68,4 +73,5 @@ libtl_la_SOURCES = \ remove_x.cc \ simplify.cc \ snf.cc \ + sonf.cc \ unabbrev.cc diff --git a/spot/tl/derive.cc b/spot/tl/derive.cc new file mode 100644 index 000000000..5e8526eec --- /dev/null +++ b/spot/tl/derive.cc @@ -0,0 +1,589 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2021 Laboratoire de Recherche et Développement de +// l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" +#include +#include +#include +#include +#include + +namespace spot +{ + namespace + { + static std::pair + first(formula f, const bdd_dict_ptr& d, void* owner) + { + if (f.is_boolean()) + { + bdd res = formula_to_bdd(f, d, owner); + return { res, bdd_support(res) }; + } + + switch (f.kind()) + { + // handled by is_boolean above + case op::ff: + case op::tt: + case op::ap: + case op::And: + case op::Or: + SPOT_UNREACHABLE(); + + case op::eword: + return { bddfalse, bddtrue }; + + case op::OrRat: + { + bdd res = bddfalse; + bdd support = bddtrue; + for (auto subformula : f) + { + auto [r, sup] = first(subformula, d, owner); + res |= r; + support &= sup; + } + return { res, support }; + } + + case op::AndRat: + { + bdd res = bddtrue; + bdd support = bddtrue; + for (auto subformula : f) + { + auto [r, sup] = first(subformula, d, owner); + res &= r; + support &= sup; + } + return { res, support }; + } + + case op::AndNLM: + return first(rewrite_and_nlm(f), d, owner); + + case op::Concat: + { + auto [res, support] = first(f[0], d, owner); + + if (f[0].accepts_eword()) + { + auto [r, sup] = first(f.all_but(0), d, owner); + res |= r; + support &= sup; + } + + return { res, support }; + } + + case op::Fusion: + { + auto [res, support] = first(f[0], d, owner); + + // this should be computed only if f[0] recognizes words of size 1 + // or accepts eword ? + auto p = first(f.all_but(0), d, owner); + + return { res, support & p.second }; + } + + case op::Star: + case op::first_match: + return first(f[0], d, owner); + + case op::FStar: + { + auto [res, support] = first(f[0], d, owner); + + if (f.min() == 0) + res = bddtrue; + + return { res, support }; + } + + default: + std::cerr << "unimplemented kind " + << static_cast(f.kind()) + << std::endl; + SPOT_UNIMPLEMENTED(); + } + + return { bddfalse, bddtrue }; + } + + static std::vector + formula_aps(formula f) + { + auto res = std::unordered_set(); + + f.traverse([&res](formula f) + { + if (f.is(op::ap)) + { + res.insert(f.ap_name()); + return true; + } + + return false; + }); + + return std::vector(res.begin(), res.end()); + } + } + + formula + rewrite_and_nlm(formula f) + { + unsigned s = f.size(); + std::vector final; + std::vector non_final; + + for (auto g: f) + if (g.accepts_eword()) + final.emplace_back(g); + else + non_final.emplace_back(g); + + if (non_final.empty()) + // (a* & b*);c = (a*|b*);c + return formula::OrRat(std::move(final)); + if (!final.empty()) + { + // let F_i be final formulae + // N_i be non final formula + // (F_1 & ... & F_n & N_1 & ... & N_m) + // = (F_1 | ... | F_n);[*] && (N_1 & ... & N_m) + // | (F_1 | ... | F_n) && (N_1 & ... & N_m);[*] + formula f = formula::OrRat(std::move(final)); + formula n = formula::AndNLM(std::move(non_final)); + formula t = formula::one_star(); + formula ft = formula::Concat({f, t}); + formula nt = formula::Concat({n, t}); + formula ftn = formula::AndRat({ft, n}); + formula fnt = formula::AndRat({f, nt}); + return formula::OrRat({ftn, fnt}); + } + // No final formula. + // Translate N_1 & N_2 & ... & N_n into + // N_1 && (N_2;[*]) && ... && (N_n;[*]) + // | (N_1;[*]) && N_2 && ... && (N_n;[*]) + // | (N_1;[*]) && (N_2;[*]) && ... && N_n + formula star = formula::one_star(); + std::vector disj; + for (unsigned n = 0; n < s; ++n) + { + std::vector conj; + for (unsigned m = 0; m < s; ++m) + { + formula g = f[m]; + if (n != m) + g = formula::Concat({g, star}); + conj.emplace_back(g); + } + disj.emplace_back(formula::AndRat(std::move(conj))); + } + return formula::OrRat(std::move(disj)); + } + + twa_graph_ptr + derive_finite_automaton_with_first(formula f, bdd_dict_ptr bdd_dict, + bool deterministic, derive_opts options) + { + auto aut = make_twa_graph(bdd_dict); + + aut->prop_state_acc(true); + const auto acc_mark = aut->set_buchi(); + + auto formula2state = robin_hood::unordered_map(); + + unsigned init_state = aut->new_state(); + aut->set_init_state(init_state); + + formula2state.insert({ f, init_state }); + + auto f_aps = formula_aps(f); + for (auto& ap : f_aps) + aut->register_ap(ap); + + auto todo = std::vector>(); + todo.push_back({f, init_state}); + + auto state_names = new std::vector(); + std::ostringstream ss; + ss << f; + state_names->push_back(ss.str()); + + auto find_dst = [&](formula derivative) -> unsigned + { + unsigned dst; + auto it = formula2state.find(derivative); + if (it != formula2state.end()) + { + dst = it->second; + } + else + { + dst = aut->new_state(); + todo.push_back({derivative, dst}); + formula2state.insert({derivative, dst}); + std::ostringstream ss; + ss << derivative; + state_names->push_back(ss.str()); + } + + return dst; + }; + + while (!todo.empty()) + { + auto [curr_f, curr_state] = todo[todo.size() - 1]; + todo.pop_back(); + + auto curr_acc_mark = curr_f.accepts_eword() + ? acc_mark + : acc_cond::mark_t(); + + auto [firsts, firsts_support] = first(curr_f, bdd_dict, aut.get()); + for (const bdd one : minterms_of(firsts, firsts_support)) + { + formula derivative = + partial_derivation(curr_f, one, bdd_dict, aut.get(), options); + + // no transition possible from this letter + if (derivative.is(op::ff)) + continue; + + // either the formula isn't an OrRat, or if it is we consider it as + // as a whole to get a deterministic automaton + if (deterministic || !derivative.is(op::OrRat)) + { + auto dst = find_dst(derivative); + aut->new_edge(curr_state, dst, one, curr_acc_mark); + continue; + } + + // formula is an OrRat and we want a non deterministic automaton, + // so consider each child as a destination + for (const auto& subformula : derivative) + { + auto dst = find_dst(subformula); + aut->new_edge(curr_state, dst, one, curr_acc_mark); + } + } + + // if state has no transitions and should be accepting, create + // artificial transition + if (aut->get_graph().state_storage(curr_state).succ == 0 + && curr_f.accepts_eword()) + aut->new_edge(curr_state, curr_state, bddfalse, acc_mark); + } + + aut->set_named_prop("state-names", state_names); + + aut->merge_edges(); + + return aut; + } + + twa_graph_ptr + derive_finite_automaton(formula f, bool deterministic) + { + auto bdd_dict = make_bdd_dict(); + auto aut = make_twa_graph(bdd_dict); + + aut->prop_state_acc(true); + const auto acc_mark = aut->set_buchi(); + + auto formula2state = robin_hood::unordered_map(); + + unsigned init_state = aut->new_state(); + aut->set_init_state(init_state); + + formula2state.insert({ f, init_state }); + + auto f_aps = formula_aps(f); + for (auto& ap : f_aps) + aut->register_ap(ap); + bdd all_aps = aut->ap_vars(); + + auto todo = std::vector>(); + todo.push_back({f, init_state}); + + auto state_names = new std::vector(); + std::ostringstream ss; + ss << f; + state_names->push_back(ss.str()); + + auto find_dst = [&](formula derivative) -> unsigned + { + unsigned dst; + auto it = formula2state.find(derivative); + if (it != formula2state.end()) + { + dst = it->second; + } + else + { + dst = aut->new_state(); + todo.push_back({derivative, dst}); + formula2state.insert({derivative, dst}); + std::ostringstream ss; + ss << derivative; + state_names->push_back(ss.str()); + } + + return dst; + }; + + while (!todo.empty()) + { + auto [curr_f, curr_state] = todo[todo.size() - 1]; + todo.pop_back(); + + auto curr_acc_mark = curr_f.accepts_eword() + ? acc_mark + : acc_cond::mark_t(); + + for (const bdd one : minterms_of(bddtrue, all_aps)) + { + formula derivative = + partial_derivation(curr_f, one, bdd_dict, aut.get()); + + // no transition possible from this letter + if (derivative.is(op::ff)) + continue; + + // either the formula isn't an OrRat, or if it is we consider it as + // as a whole to get a deterministic automaton + if (deterministic || !derivative.is(op::OrRat)) + { + auto dst = find_dst(derivative); + aut->new_edge(curr_state, dst, one, curr_acc_mark); + continue; + } + + // formula is an OrRat and we want a non deterministic automaton, + // so consider each child as a destination + for (const auto& subformula : derivative) + { + auto dst = find_dst(subformula); + aut->new_edge(curr_state, dst, one, curr_acc_mark); + } + } + + // if state has no transitions and should be accepting, create + // artificial transition + if (aut->get_graph().state_storage(curr_state).succ == 0 + && curr_f.accepts_eword()) + aut->new_edge(curr_state, curr_state, bddfalse, acc_mark); + } + + aut->set_named_prop("state-names", state_names); + + aut->merge_edges(); + + return aut; + } + + twa_graph_ptr + derive_automaton_with_first(formula f, bdd_dict_ptr bdd_dict, + bool deterministic) + { + auto finite = derive_finite_automaton_with_first(f, bdd_dict, + deterministic); + + return from_finite(finite); + } + + twa_graph_ptr + derive_automaton(formula f, bool deterministic) + { + auto finite = derive_finite_automaton(f, deterministic); + + return from_finite(finite); + } + + formula + partial_derivation(formula f, const bdd var, const bdd_dict_ptr& d, + void* owner, derive_opts options) + { + if (f.is_boolean()) + { + auto f_bdd = formula_to_bdd(f, d, owner); + + if (bdd_implies(var, f_bdd)) + return formula::eword(); + + return formula::ff(); + } + + switch (f.kind()) + { + // handled by is_boolean above + case op::ff: + case op::tt: + case op::ap: + SPOT_UNREACHABLE(); + + case op::eword: + return formula::ff(); + + // d(E.F) = { d(E).F } U { c(E).d(F) } + case op::Concat: + { + formula E = f[0]; + formula F = f.all_but(0); + + formula d_E = partial_derivation(E, var, d, owner, options); + + formula res; + + if (options.concat_star_distribute && d_E.is(op::OrRat)) + { + std::vector distributed; + for (auto g : d_E) + { + distributed.push_back(formula::Concat({g, F})); + } + + res = formula::OrRat(distributed); + } + else + { + res = + formula::Concat({ partial_derivation(E, var, d, owner, options), F }); + } + + + if (E.accepts_eword()) + res = formula::OrRat( + { res, partial_derivation(F, var, d, owner, options) }); + + return res; + } + + // d(E*) = d(E).E* + // d(E[*i..j]) = d(E).E[*(i-1)..(j-1)] + case op::Star: + { + auto min = f.min() == 0 ? 0 : (f.min() - 1); + auto max = f.max() == formula::unbounded() + ? formula::unbounded() + : (f.max() - 1); + + formula d_E = partial_derivation(f[0], var, d, owner, options); + + if (options.concat_star_distribute && !f[0].is_finite() && d_E.is(op::OrRat)) + { + std::vector distributed; + for (auto g : d_E) + { + distributed.push_back(formula::Concat({g, formula::Star(f[0], min, max)})); + } + + return formula::OrRat(distributed); + } + + return formula::Concat({ d_E, formula::Star(f[0], min, max) }); + } + + // d(E[:*i..j]) = E:E[:*(i-1)..(j-1)] + (eword if i == 0 or c(d(E))) + case op::FStar: + { + formula E = f[0]; + + if (f.min() == 0 && f.max() == 0) + return formula::tt(); + + auto d_E = partial_derivation(E, var, d, owner, options); + + auto min = f.min() == 0 ? 0 : (f.min() - 1); + auto max = f.max() == formula::unbounded() + ? formula::unbounded() + : (f.max() - 1); + + auto results = std::vector(); + + auto E_i_j_minus = formula::FStar(E, min, max); + results.push_back(formula::Fusion({ d_E, E_i_j_minus })); + + if (d_E.accepts_eword()) + results.push_back(d_E); + + if (f.min() == 0) + results.push_back(formula::eword()); + + return formula::OrRat(std::move(results)); + } + + // d(E && F) = d(E) && d(F) + // d(E + F) = {d(E)} U {d(F)} + case op::AndRat: + case op::OrRat: + { + std::vector subderivations; + for (auto subformula : f) + { + auto subderivation = + partial_derivation(subformula, var, d, owner, options); + subderivations.push_back(subderivation); + } + return formula::multop(f.kind(), std::move(subderivations)); + } + + case op::AndNLM: + { + formula rewrite = rewrite_and_nlm(f); + return partial_derivation(rewrite, var, d, owner, options); + } + + // d(E:F) = {d(E):F} U {c(d(E)).d(F)} + case op::Fusion: + { + formula E = f[0]; + formula F = f.all_but(0); + + auto d_E = partial_derivation(E, var, d, owner, options); + auto res = formula::Fusion({ d_E, F }); + + if (d_E.accepts_eword()) + res = + formula::OrRat({ res, partial_derivation(F, var, d, owner, options) }); + + return res; + } + + case op::first_match: + { + formula E = f[0]; + auto d_E = partial_derivation(E, var, d, owner, options); + // if d_E.accepts_eword(), first_match(d_E) will return eword + return formula::first_match(d_E); + } + + default: + std::cerr << "unimplemented kind " + << static_cast(f.kind()) + << std::endl; + SPOT_UNIMPLEMENTED(); + } + return formula::ff(); + } +} diff --git a/spot/tl/derive.hh b/spot/tl/derive.hh new file mode 100644 index 000000000..993db2ed2 --- /dev/null +++ b/spot/tl/derive.hh @@ -0,0 +1,60 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2021 Laboratoire de Recherche et Développement de +// l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include + +#include + +#include +#include +#include + +namespace spot +{ + + struct derive_opts + { + bool concat_star_distribute = true; + }; + + /// \ingroup tl_misc + /// \brief Produce a SERE formula's partial derivative + SPOT_API formula + partial_derivation(formula f, const bdd var, const bdd_dict_ptr& d, + void* owner, derive_opts options = {}); + + SPOT_API twa_graph_ptr + derive_automaton(formula f, bool deterministic = true); + + SPOT_API twa_graph_ptr + derive_automaton_with_first(formula f, bdd_dict_ptr bdd_dict, + bool deterministic = true); + + SPOT_API twa_graph_ptr + derive_finite_automaton(formula f, bool deterministic = true); + + SPOT_API twa_graph_ptr + derive_finite_automaton_with_first(formula f, bdd_dict_ptr bdd_dict, + bool deterministic = true, derive_opts options = {}); + + SPOT_API formula + rewrite_and_nlm(formula f); +} diff --git a/spot/tl/expansions.cc b/spot/tl/expansions.cc new file mode 100644 index 000000000..3a0a7800e --- /dev/null +++ b/spot/tl/expansions.cc @@ -0,0 +1,878 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2021 Laboratoire de Recherche et Développement de +// l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" +#include +#include +#include +#include +#include +#include + +namespace spot +{ + namespace + { + // FIXME: could probably just return a map directly + static std::vector + formula_aps(formula f) + { + auto res = std::unordered_set(); + + f.traverse([&res](formula f) + { + if (f.is(op::ap)) + { + res.insert(f.ap_name()); + return true; + } + + return false; + }); + + return std::vector(res.begin(), res.end()); + } + + formula + rewrite_and_nlm(formula f) + { + unsigned s = f.size(); + std::vector final; + std::vector non_final; + + for (auto g: f) + if (g.accepts_eword()) + final.emplace_back(g); + else + non_final.emplace_back(g); + + if (non_final.empty()) + // (a* & b*);c = (a*|b*);c + return formula::OrRat(std::move(final)); + if (!final.empty()) + { + // let F_i be final formulae + // N_i be non final formula + // (F_1 & ... & F_n & N_1 & ... & N_m) + // = (F_1 | ... | F_n);[*] && (N_1 & ... & N_m) + // | (F_1 | ... | F_n) && (N_1 & ... & N_m);[*] + formula f = formula::OrRat(std::move(final)); + formula n = formula::AndNLM(std::move(non_final)); + formula t = formula::one_star(); + formula ft = formula::Concat({f, t}); + formula nt = formula::Concat({n, t}); + formula ftn = formula::AndRat({ft, n}); + formula fnt = formula::AndRat({f, nt}); + return formula::OrRat({ftn, fnt}); + } + // No final formula. + // Translate N_1 & N_2 & ... & N_n into + // N_1 && (N_2;[*]) && ... && (N_n;[*]) + // | (N_1;[*]) && N_2 && ... && (N_n;[*]) + // | (N_1;[*]) && (N_2;[*]) && ... && N_n + formula star = formula::one_star(); + std::vector disj; + for (unsigned n = 0; n < s; ++n) + { + std::vector conj; + for (unsigned m = 0; m < s; ++m) + { + formula g = f[m]; + if (n != m) + g = formula::Concat({g, star}); + conj.emplace_back(g); + } + disj.emplace_back(formula::AndRat(std::move(conj))); + } + return formula::OrRat(std::move(disj)); + } + + class bdd_finalizer + { + public: + int encode(formula f) + { + bool is_anon = false; + int var_num; + auto it = formula2bdd_.find(f); + if (it != formula2bdd_.end()) + { + var_num = it->second; + } + else + { + if (opt_sigma_star_ && (f.is(op::Star) + && f[0].is(op::tt) + && f.min() == 0 + && f.max() == formula::unbounded())) + { + var_num = bddtrue.id(); + } + else if (opt_bdd_encode_ && (f.is(op::AndRat) || f.is(op::OrRat))) + { + bdd var = f.is(op::AndRat) ? bdd(bddtrue) : bdd(bddfalse); + for (const auto& sub_f : f) + { + int bddid = encode(sub_f); + bdd subvar = bdd_ithvar(bddid); + var = f.is(op::AndRat) ? var & subvar : var | subvar; + } + var_num = var.id(); + } + else + { + var_num = d_->register_anonymous_variables(1, this); + is_anon = true; + } + + formula2bdd_.insert({f, var_num}); + bdd2formula_.insert({var_num, f}); + } + + bdd var = bdd_ithvar(var_num); + + if (is_anon) + anon_set_ &= var; + + return var_num; + } + + bdd_finalizer(expansion_t& exp, bdd_dict_ptr d, bool opt_sigma_star, bool opt_bdd_encode) + : anon_set_(bddtrue) + , d_(d) + , opt_sigma_star_(opt_sigma_star) + , opt_bdd_encode_(opt_bdd_encode) + { + for (const auto& [prefix, suffix] : exp) + { + int var_num = encode(suffix); + bdd var = bdd_ithvar(var_num); + exp_ |= prefix & var; + } + } + + ~bdd_finalizer() + { + d_->unregister_all_my_variables(this); + } + + expansion_t + simplify(exp_opts::expand_opt opts); + + private: + bdd exp_; + bdd anon_set_; + std::map formula2bdd_; + std::map bdd2formula_; + bdd_dict_ptr d_; + bool opt_sigma_star_; + bool opt_bdd_encode_; + + formula var_to_formula(int var); + formula conj_bdd_to_sere(bdd b); + formula bdd_to_sere(bdd b); + }; + + formula + bdd_finalizer::var_to_formula(int var) + { + formula f = bdd2formula_[var]; + assert(f); + return f; + } + + formula + bdd_finalizer::bdd_to_sere(bdd f) + { + if (f == bddfalse) + return formula::ff(); + + std::vector v; + minato_isop isop(f); + bdd cube; + while ((cube = isop.next()) != bddfalse) + v.emplace_back(conj_bdd_to_sere(cube)); + return formula::OrRat(std::move(v)); + } + + formula + bdd_finalizer::conj_bdd_to_sere(bdd b) + { + if (b == bddtrue) + { + if (opt_sigma_star_){ + return formula::Star(formula::tt(), 0, formula::unbounded()); + } else { + return formula::tt(); + } + } + if (b == bddfalse) + return formula::ff(); + + // Unroll the first loop of the next do/while loop so that we + // do not have to create v when b is not a conjunction. + formula res = var_to_formula(bdd_var(b)); + bdd high = bdd_high(b); + if (high == bddfalse) + { + res = formula::Not(res); + b = bdd_low(b); + } + else + { + assert(bdd_low(b) == bddfalse); + b = high; + } + if (b == bddtrue) + return res; + std::vector v{std::move(res)}; + do + { + res = var_to_formula(bdd_var(b)); + high = bdd_high(b); + if (high == bddfalse) + { + res = formula::Not(res); + b = bdd_low(b); + } + else + { + assert(bdd_low(b) == bddfalse); + b = high; + } + assert(b != bddfalse); + v.emplace_back(std::move(res)); + } + while (b != bddtrue); + return formula::multop(op::AndRat, std::move(v)); + } + + expansion_t + bdd_finalizer::simplify(exp_opts::expand_opt opts) + { + expansion_t res; + + if (opts & exp_opts::expand_opt::BddMinterm) + { + bdd prop_set = bdd_exist(bdd_support(exp_), anon_set_); + bdd or_labels = bdd_exist(exp_, anon_set_); + // TODO: check are_equivalent avec or_labels/exp_ en premier argument + for (bdd letter: minterms_of(or_labels, prop_set)) + { + bdd dest_bdd = bdd_restrict(exp_, letter); + formula dest = bdd_to_sere(dest_bdd); + + #ifndef NDEBUG + // make sure it didn't exist before + auto it = std::find(res.begin(), res.end(), {letter, dest}); + SPOT_ASSERT(it == res.end()); + #endif + + res.push_back({letter, dest}); + } + } + else // BddIsop + { + minato_isop isop(exp_); + bdd cube; + while ((cube = isop.next()) != bddfalse) + { + bdd letter = bdd_exist(cube, anon_set_); + bdd suffix = bdd_existcomp(cube, anon_set_); + formula dest = conj_bdd_to_sere(suffix); + + res.push_back({letter, dest}); + } + } + + return res; + } + + void + finalize(expansion_t& exp, exp_opts::expand_opt opts, bdd_dict_ptr d, std::unordered_set* seen) + { + if (opts & (exp_opts::expand_opt::BddIsop + | exp_opts::expand_opt::BddMinterm)) + { + bdd_finalizer bddf(exp, d, opts & exp_opts::expand_opt::BddSigmaStar, opts & exp_opts::expand_opt::BddEncode); + exp = bddf.simplify(opts); + } + + if (opts & exp_opts::expand_opt::UniqueSuffixPre) + { + std::map unique_map; + for (const auto& [prefix, suffix] : exp) + { + auto res = unique_map.insert({suffix, prefix}); + if (!res.second) + { + auto it = res.first; + it->second |= prefix; + } + } + + exp.clear(); + for (const auto [suffix, prefix] : unique_map) + { + exp.push_back({prefix, suffix}); + } + } + + if (opts & exp_opts::expand_opt::UniquePrefix) + { + std::map unique_map; + for (const auto& [prefix, suffix] : exp) + { + auto res = unique_map.insert({prefix, suffix}); + if (!res.second) + { + auto it = res.first; + it->second = formula::OrRat({it->second, suffix}); + } + } + + exp.clear(); + + for (const auto [prefix, suffix] : unique_map) + { + if ((opts & exp_opts::expand_opt::UniquePrefixSeenOpt) + && suffix.is(op::OrRat)) + { + std::vector merge; + std::vector single; + + for (const auto& sub_f : suffix) + { + if (seen->find(sub_f) != seen->end()) + { + single.push_back(sub_f); + } + else + { + merge.push_back(sub_f); + } + } + + for (const auto& sub_f : single) + exp.push_back({prefix, sub_f}); + + if (!merge.empty()) + exp.push_back({prefix, formula::OrRat(merge)}); + } + else + { + exp.push_back({prefix, suffix}); + } + } + } + + if (opts & exp_opts::expand_opt::UniqueSuffixPost) + { + std::map unique_map; + for (const auto& [prefix, suffix] : exp) + { + auto res = unique_map.insert({suffix, prefix}); + if (!res.second) + { + auto it = res.first; + it->second |= prefix; + } + } + + exp.clear(); + for (const auto [suffix, prefix] : unique_map) + { + exp.push_back({prefix, suffix}); + } + } + + if (opts & exp_opts::expand_opt::Determinize) + { + expansion_t exp_new; + + bdd props = bddtrue; + for (const auto& [prefix, _] : exp) + props &= bdd_support(prefix); + + std::vector dests; + for (bdd letter : minterms_of(bddtrue, props)) + { + for (const auto& [prefix, suffix] : exp) + { + if (bdd_implies(letter, prefix)) + dests.push_back(suffix); + } + formula or_dests = formula::OrRat(dests); + exp_new.push_back({letter, or_dests}); + dests.clear(); + } + exp = exp_new; + } + } + } + + formula + expansion_to_formula(expansion_t e, bdd_dict_ptr& d) + { + std::vector res; + + for (const auto& [key, val] : e) + { + formula prefix = bdd_to_formula(key, d); + res.push_back(formula::Concat({prefix, val})); + } + + return formula::OrRat(res); + } + + void print_expansion(const expansion_t& exp, const bdd_dict_ptr& dict) + { + for (const auto& [prefix, suffix] : exp) + { + std::cout << bdd_to_formula(prefix, dict) << ": " << suffix << std::endl; + } + } + + std::vector> + expansion_simple(formula f) + { + int owner = 42; + auto d = make_bdd_dict(); + + auto exp = expansion(f, d, &owner, exp_opts::None); + + std::vector> res; + for (const auto& [bdd, f] : exp) + res.push_back({bdd_to_formula(bdd, d), f}); + + d->unregister_all_my_variables(&owner); + return res; + } + + expansion_t + expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts, std::unordered_set* seen) + { + using exp_t = expansion_t; + + if (f.is_boolean()) + { + auto f_bdd = formula_to_bdd(f, d, owner); + + if (f_bdd == bddfalse) + return {}; + + return {{f_bdd, formula::eword()}}; + } + + auto rec = [&d, owner, opts, seen](formula f){ + return expansion(f, d, owner, exp_opts::None, seen); + }; + + + switch (f.kind()) + { + case op::ff: + case op::tt: + case op::ap: + SPOT_UNREACHABLE(); + + case op::eword: + // return {{bddfalse, formula::ff()}}; + return {}; + + case op::Concat: + { + auto exps = rec(f[0]); + + exp_t res; + for (const auto& [bdd_l, form] : exps) + { + res.push_back({bdd_l, formula::Concat({form, f.all_but(0)})}); + } + + if (f[0].accepts_eword()) + { + auto exps_rest = rec(f.all_but(0)); + for (const auto& [bdd_l, form] : exps_rest) + { + res.push_back({bdd_l, form}); + } + } + + finalize(res, opts, d, seen); + return res; + } + + case op::FStar: + { + formula E = f[0]; + + if (f.min() == 0 && f.max() == 0) + return {{bddtrue, formula::eword()}}; + + auto min = f.min() == 0 ? 0 : (f.min() - 1); + auto max = f.max() == formula::unbounded() + ? formula::unbounded() + : (f.max() - 1); + + auto E_i_j_minus = formula::FStar(E, min, max); + + auto exp = rec(E); + exp_t res; + for (const auto& [li, ei] : exp) + { + res.push_back({li, formula::Fusion({ei, E_i_j_minus})}); + + if (ei.accepts_eword() && f.min() != 0) + { + for (const auto& [ki, fi] : rec(E_i_j_minus)) + { + // FIXME: build bdd once + if ((li & ki) != bddfalse) + res.push_back({li & ki, fi}); + } + } + } + if (f.min() == 0) + res.push_back({bddtrue, formula::eword()}); + + finalize(res, opts, d, seen); + return res; + } + + case op::Star: + { + auto min = f.min() == 0 ? 0 : (f.min() - 1); + auto max = f.max() == formula::unbounded() + ? formula::unbounded() + : (f.max() - 1); + + auto exps = rec(f[0]); + + exp_t res; + for (const auto& [bdd_l, form] : exps) + { + res.push_back({bdd_l, formula::Concat({form, formula::Star(f[0], min, max)})}); + } + + finalize(res, opts, d, seen); + return res; + } + + case op::AndNLM: + { + formula rewrite = rewrite_and_nlm(f); + auto res = rec(rewrite); + finalize(res, opts, d, seen); + return res; + } + + case op::first_match: + { + auto exps = rec(f[0]); + + exp_t res; + for (const auto& [bdd_l, form] : exps) + { + res.push_back({bdd_l, form}); + } + + // determinize + bdd or_labels = bddfalse; + bdd support = bddtrue; + bool is_det = true; + for (const auto& [l, _] : res) + { + support &= bdd_support(l); + if (is_det) + is_det = !bdd_have_common_assignment(l, or_labels); + or_labels |= l; + } + + if (is_det) + { + for (auto& [_, dest] : res) + dest = formula::first_match(dest); + finalize(res, opts, d, seen); + return res; + } + + exp_t res_det; + std::vector dests; + for (bdd l: minterms_of(or_labels, support)) + { + for (const auto& [ndet_label, ndet_dest] : res) + { + if (bdd_implies(l, ndet_label)) + dests.push_back(ndet_dest); + } + formula or_dests = formula::OrRat(dests); + res_det.push_back({l, or_dests}); + dests.clear(); + } + + for (auto& [_, dest] : res_det) + dest = formula::first_match(dest); + finalize(res_det, opts, d, seen); + return res_det; + } + + case op::Fusion: + { + exp_t res; + formula E = f[0]; + formula F = f.all_but(0); + + exp_t Ei = rec(E); + // TODO: std::option + exp_t Fj = rec(F); + + for (const auto& [li, ei] : Ei) + { + if (ei.accepts_eword()) + { + for (const auto& [kj, fj] : Fj) + if ((li & kj) != bddfalse) + res.push_back({li & kj, fj}); + } + res.push_back({li, formula::Fusion({ei, F})}); + } + + finalize(res, opts, d, seen); + return res; + } + + case op::AndRat: + { + exp_t res; + for (const auto& sub_f : f) + { + auto exps = rec(sub_f); + + if (exps.empty()) + { + // op::AndRat: one of the expansions was empty (the only + // edge was `false`), so the AndRat is empty as + // well + res.clear(); + break; + } + + if (res.empty()) + { + res = std::move(exps); + continue; + } + + exp_t new_res; + bool inserted = false; + for (const auto& [l_key, l_val] : exps) + { + for (const auto& [r_key, r_val] : res) + { + if ((l_key & r_key) != bddfalse) + { + new_res.push_back({l_key & r_key, formula::multop(f.kind(), {l_val, r_val})}); + inserted = true; + } + } + } + + if (!inserted) + { + // all prefix conjuctions led to bddfalse, And is empty + res.clear(); + break; + } + + res = std::move(new_res); + } + + finalize(res, opts, d, seen); + return res; + } + + case op::OrRat: + { + exp_t res; + for (const auto& sub_f : f) + { + auto exps = rec(sub_f); + if (exps.empty()) + continue; + + if (res.empty()) + { + res = std::move(exps); + continue; + } + + for (const auto& [label, dest] : exps) + res.push_back({label, dest}); + } + + finalize(res, opts, d, seen); + return res; + } + + default: + std::cerr << "unimplemented kind " + << static_cast(f.kind()) + << std::endl; + SPOT_UNIMPLEMENTED(); + } + + return {}; + } + + twa_graph_ptr + expand_automaton(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts) + { + auto finite = expand_finite_automaton(f, d, opts); + return from_finite(finite); + } + + struct signature_hash + { + std::size_t + operator() (const std::pair& sig) const + { + size_t hash = std::hash()(sig.first); + + for (const auto& keyvalue : sig.second) + { + hash ^= (bdd_hash()(keyvalue.first) ^ std::hash()(keyvalue.second)) + + 0x9e3779b9 + (hash << 6) + (hash >> 2); + } + + return hash; + } + }; + + twa_graph_ptr + expand_finite_automaton(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts) + { + bool signature_merge = opts & exp_opts::expand_opt::SignatureMerge; + + auto aut = make_twa_graph(d); + + aut->prop_state_acc(true); + const auto acc_mark = aut->set_buchi(); + + auto formula2state = robin_hood::unordered_map(); + auto signature2state = std::unordered_map, unsigned, signature_hash>(); + auto seen = std::unordered_set(); + seen.insert(f); + + unsigned init_state = aut->new_state(); + aut->set_init_state(init_state); + formula2state.insert({ f, init_state }); + + + auto f_aps = formula_aps(f); + for (auto& ap : f_aps) + aut->register_ap(ap); + + if (signature_merge) + signature2state.insert({ {f.accepts_eword(), expansion(f, d, aut.get(), opts, &seen)}, init_state}); + + + auto todo = std::vector>(); + todo.push_back({f, init_state}); + + auto state_names = new std::vector(); + std::ostringstream ss; + ss << f; + state_names->push_back(ss.str()); + + auto find_dst = [&](formula suffix) -> unsigned + { + unsigned dst; + auto it = formula2state.find(suffix); + if (it != formula2state.end()) + { + dst = it->second; + } + else + { + if (signature_merge) + { + auto exp = expansion(suffix, d, aut.get(), opts, &seen); + bool accepting = suffix.accepts_eword(); + auto it2 = signature2state.find({accepting, exp}); + if (it2 != signature2state.end()) + { + formula2state.insert({suffix, it2->second}); + return it2->second; + } + } + + dst = aut->new_state(); + todo.push_back({suffix, dst}); + seen.insert(suffix); + + formula2state.insert({suffix, dst}); + if (signature_merge) + signature2state.insert({{suffix.accepts_eword(), expansion(suffix, d, aut.get(), opts, &seen)}, dst}); + + std::ostringstream ss; + ss << suffix; + state_names->push_back(ss.str()); + } + + return dst; + }; + + while (!todo.empty()) + { + auto [curr_f, curr_state] = todo[todo.size() - 1]; + todo.pop_back(); + + auto curr_acc_mark= curr_f.accepts_eword() + ? acc_mark + : acc_cond::mark_t(); + + auto exp = expansion(curr_f, d, aut.get(), opts, &seen); + + for (const auto& [letter, suffix] : exp) + { + if (suffix.is(op::ff)) + // TODO ASSERT NOT + continue; + + auto dst = find_dst(suffix); + aut->new_edge(curr_state, dst, letter, curr_acc_mark); + } + + // if state has no transitions and should be accepting, create + // artificial transition + if (aut->get_graph().state_storage(curr_state).succ == 0 + && curr_f.accepts_eword()) + aut->new_edge(curr_state, curr_state, bddfalse, acc_mark); + } + + aut->set_named_prop("state-names", state_names); + + if ((opts & exp_opts::MergeEdges) + && !(opts & exp_opts::UniqueSuffixPre || opts & exp_opts::UniqueSuffixPost)) + aut->merge_edges(); + + return aut; + } +} diff --git a/spot/tl/expansions.hh b/spot/tl/expansions.hh new file mode 100644 index 000000000..036ac945a --- /dev/null +++ b/spot/tl/expansions.hh @@ -0,0 +1,70 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2021 Laboratoire de Recherche et Développement de +// l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include + +#include + +#include +#include +#include +#include + +namespace spot +{ + using expansion_t = std::vector>; + + struct exp_opts + { + enum expand_opt { + None = 0, + UniqueSuffixPre = 1, + UniquePrefix = 2, + BddIsop = 4, + BddMinterm = 8, + BddSigmaStar = 16, + BddEncode = 32, + MergeEdges = 64, + SignatureMerge = 128, + Determinize = 256, + UniquePrefixSeenOpt = 512, + UniqueSuffixPost = 1024, + }; + }; + + SPOT_API std::vector> + expansion_simple(formula f); + + SPOT_API expansion_t + expansion(formula f, const bdd_dict_ptr& d, void *owner, exp_opts::expand_opt opts, std::unordered_set* seen = nullptr); + + SPOT_API twa_graph_ptr + expand_automaton(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts); + + SPOT_API twa_graph_ptr + expand_finite_automaton(formula f, bdd_dict_ptr d, exp_opts::expand_opt opts); + + SPOT_API formula + expansion_to_formula(expansion_t e, bdd_dict_ptr& d); + + SPOT_API void + print_expansion(const expansion_t& exp, const bdd_dict_ptr& dict); +} diff --git a/spot/tl/formula.cc b/spot/tl/formula.cc index 8fe91bf70..370a50e8f 100644 --- a/spot/tl/formula.cc +++ b/spot/tl/formula.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2019, 2021 Laboratoire de Recherche et +// Copyright (C) 2015-2019, 2021, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -136,7 +136,7 @@ namespace spot // - AndRat(Exps1...,Bool1,Exps2...,Bool2,Exps3...) = // AndRat(And(Bool1,Bool2),Exps1...,Exps2...,Exps3...) // - OrRat(Exps1...,Bool1,Exps2...,Bool2,Exps3...) = - // AndRat(Or(Bool1,Bool2),Exps1...,Exps2...,Exps3...) + // OrRat(Or(Bool1,Bool2),Exps1...,Exps2...,Exps3...) if (!b.empty()) v.insert(v.begin(), fnode::multop(o, std::move(b))); } @@ -307,11 +307,14 @@ namespace spot unsigned orig_size = v.size(); - const fnode* neutral; - const fnode* neutral2; - const fnode* abs; - const fnode* abs2; - const fnode* weak_abs; + const fnode* neutral; // neutral element + const fnode* neutral2; // second neutral element (if any) + const fnode* abs; // absorbent element + const fnode* abs2; // second absorbent element (if any) + const fnode* weak_abs; // almost absorbent element (if any) + // The notion of "almost absorbent" captures situation where the + // present of the element can be simplified in itself or another + // element depending on a condition on the rest of the formula. switch (o) { case op::And: @@ -323,7 +326,17 @@ namespace spot break; case op::AndRat: neutral = one_star(); - neutral2 = nullptr; + { + // If this AndRat contains an operand that does not accept + // the empty word, and that is not [+], then any [+] can be + // removed. + bool one_non_eword_non_plus = + std::find_if(v.begin(), v.end(), + [o = one_plus()](const fnode* f) { + return !f->accepts_eword() && f != o; + }) != v.end(); + neutral2 = one_non_eword_non_plus ? one_plus() : nullptr; + } abs = ff(); abs2 = nullptr; weak_abs = eword(); @@ -349,7 +362,7 @@ namespace spot neutral2 = nullptr; abs = one_star(); abs2 = nullptr; - weak_abs = nullptr; + weak_abs = one_plus(); gather_bool(v, op::Or); break; case op::Concat: @@ -506,11 +519,10 @@ namespace spot else return abs; } - else + else if (o == op::AndNLM) { // Similarly, a* & 1 & (c;d) = c;d // a* & 1 & c* = 1 - assert(o == op::AndNLM); vec tmp; for (auto i: v) { @@ -527,6 +539,27 @@ namespace spot tmp.emplace_back(weak_abs); v.swap(tmp); } + else if (o == op::OrRat) + { + // We have a[*] | [+] | c = [*] + // and a | [+] | c = [+] + // So if [+] has been seen, check if some term + // recognize the empty word. + bool acc_eword = false; + for (i = v.begin(); i != v.end(); ++i) + { + acc_eword |= (*i)->accepts_eword(); + (*i)->destroy(); + } + if (acc_eword) + return abs; + else + return weak_abs; + } + else + { + SPOT_UNREACHABLE(); + } } else if (o == op::Concat || o == op::Fusion) { @@ -588,9 +621,9 @@ namespace spot } else if (min != unbounded()) { - min += min2; - if (SPOT_UNLIKELY(min >= unbounded())) + if (SPOT_UNLIKELY(min + min2 >= unbounded())) break; + min += min2; } if (max2 == unbounded()) { @@ -598,9 +631,9 @@ namespace spot } else if (max != unbounded()) { - max += max2; - if (SPOT_UNLIKELY(max >= unbounded())) + if (SPOT_UNLIKELY(max + max2 >= unbounded())) break; + max += max2; } (*i)->destroy(); i = v.erase(i); @@ -613,6 +646,81 @@ namespace spot *fpos = newfs; } } + // also + // b[*i..j]:b -> b[*max(1,i),j] + // b:b[*i..j] -> b[*max(1,i),j] + // b[*i..j]:b[*k..l] -> b[*max(i,1)+max(j,1)-1,j+l-1] + if (o == op::Fusion && v.size() > 1) + { + i = v.begin(); + while (i != v.end()) + { + if (!(((*i)->is(op::Star) && (*i)->nth(0)->is_boolean()) + || (*i)->is_boolean())) + { + ++i; + continue; + } + const fnode *b; + unsigned min; + unsigned max; + if ((*i)->is_boolean()) + { + min = max = 1; + b = *i; + } + else + { + b = (*i)->nth(0); + min = (*i)->min(); + max = (*i)->max(); + } + vec::iterator prev = i; + ++i; + bool changed = false; + while (i != v.end()) + { + unsigned min2; + unsigned max2; + if ((*i)->is_boolean()) + { + if (*i != b) + break; + min2 = max2 = 1; + } + else if ((*i)->is(op::Star) && (*i)->nth(0)->is_boolean()) + { + if ((*i)->nth(0) != b) + break; + min2 = (*i)->min(); + max2 = (*i)->max(); + } + else + { + break; + } + // Now we can merge prev and i. + min = min + (min == 0) + min2 + (min2 == 0) - 1; + assert(max != 0 && max2 != 0); + if (max2 == unbounded() || max == unbounded()) + max = unbounded(); + else if (max + max2 < unbounded()) + max = max + max2 - 1; + else + break; + changed = true; + (*i)->destroy(); + i = v.erase(i); + } + if (changed) + { + const fnode* newf = + fnode::bunop(op::Star, b->clone(), min, max); + (*prev)->destroy(); + *prev = newf; + } + } + } } } @@ -660,6 +768,16 @@ namespace spot switch (o) { case op::Star: + if (max == unbounded() && child == tt_) + { + // bypass normal construction: 1[*] and 1[+] are + // frequently used, so they are not reference counted. + if (min == 0) + return one_star(); + if (min == 1) + return one_plus(); + } + neutral = eword(); break; case op::FStar: @@ -810,7 +928,7 @@ namespace spot return tt(); // ![*0] = 1[+] if (f->is_eword()) - return bunop(op::Star, tt(), 1); + return one_plus(); auto fop = f->kind(); // "Not" is an involution. @@ -1138,10 +1256,11 @@ namespace spot return id; } - const fnode* fnode::ff_ = new fnode(op::ff, {}); - const fnode* fnode::tt_ = new fnode(op::tt, {}); - const fnode* fnode::ew_ = new fnode(op::eword, {}); + const fnode* fnode::ff_ = new fnode(op::ff, {}, true); + const fnode* fnode::tt_ = new fnode(op::tt, {}, true); + const fnode* fnode::ew_ = new fnode(op::eword, {}, true); const fnode* fnode::one_star_ = nullptr; // Only built when necessary. + const fnode* fnode::one_plus_ = nullptr; // Only built when necessary. void fnode::setup_props(op o) { @@ -1817,7 +1936,7 @@ namespace spot { unsigned cnt = 0; for (auto i: m.uniq) - if (i->id() > 3 && i != one_star_) + if (!i->saturated_) { if (!cnt++) std::cerr << "*** m.uniq is not empty ***\n"; diff --git a/spot/tl/formula.hh b/spot/tl/formula.hh index c52ed3e39..074ec8b02 100644 --- a/spot/tl/formula.hh +++ b/spot/tl/formula.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2015-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -149,7 +149,7 @@ namespace spot { if (SPOT_LIKELY(refs_)) --refs_; - else if (SPOT_LIKELY(id_ > 2) && SPOT_LIKELY(!saturated_)) + else if (SPOT_LIKELY(!saturated_)) // last reference to a node that is not a constant destroy_aux(); } @@ -294,7 +294,9 @@ namespace spot { if (SPOT_UNLIKELY(i >= size())) report_non_existing_child(); - return children[i]; + const fnode* c = children[i]; + SPOT_ASSUME(c != nullptr); + return c; } /// \see formula::ff @@ -351,10 +353,18 @@ namespace spot static const fnode* one_star() { if (!one_star_) - one_star_ = bunop(op::Star, tt(), 0); + one_star_ = new fnode(op::Star, tt_, 0, unbounded(), true); return one_star_; } + /// \see formula::one_plus + static const fnode* one_plus() + { + if (!one_plus_) + one_plus_ = new fnode(op::Star, tt_, 1, unbounded(), true); + return one_plus_; + } + /// \see formula::ap_name const std::string& ap_name() const; @@ -536,7 +546,7 @@ namespace spot template - fnode(op o, iter begin, iter end) + fnode(op o, iter begin, iter end, bool saturated = false) // Clang has some optimization where is it able to combine the // 4 movb initializing op_,min_,max_,saturated_ into a single // movl. Also it can optimize the three byte-comparisons of @@ -551,7 +561,7 @@ namespace spot #if __llvm__ min_(0), max_(0), #endif - saturated_(0) + saturated_(saturated) { size_t s = std::distance(begin, end); if (SPOT_UNLIKELY(s > (size_t) UINT16_MAX)) @@ -563,13 +573,15 @@ namespace spot setup_props(o); } - fnode(op o, std::initializer_list l) - : fnode(o, l.begin(), l.end()) + fnode(op o, std::initializer_list l, + bool saturated = false) + : fnode(o, l.begin(), l.end(), saturated) { } - fnode(op o, const fnode* f, uint8_t min, uint8_t max) - : op_(o), min_(min), max_(max), saturated_(0), size_(1) + fnode(op o, const fnode* f, uint8_t min, uint8_t max, + bool saturated = false) + : op_(o), min_(min), max_(max), saturated_(saturated), size_(1) { children[0] = f; setup_props(o); @@ -579,6 +591,7 @@ namespace spot static const fnode* tt_; static const fnode* ew_; static const fnode* one_star_; + static const fnode* one_plus_; op op_; // operator uint8_t min_; // range minimum (for star-like operators) @@ -1227,12 +1240,12 @@ namespace spot return bunop(op::Name, std::move(f), min, max); \ } #endif - /// \brief Create SERE for f[*min..max] + /// \brief Create SERE for `f[*min..max]` /// @{ SPOT_DEF_BUNOP(Star); /// @} - /// \brief Create SERE for f[:*min..max] + /// \brief Create SERE for `f[:*min..max]` /// /// This operator is a generalization of the (+) operator /// defined by Dax et al. \cite dax.09.atva @@ -1259,24 +1272,24 @@ namespace spot f.ptr_->clone())); } - /// \brief Create a SERE equivalent to b[->min..max] + /// \brief Create a SERE equivalent to `b[->min..max]` /// /// The operator does not exist: it is handled as syntactic sugar /// by the parser and the printer. This function is used by the /// parser to create the equivalent SERE. static formula sugar_goto(const formula& b, unsigned min, unsigned max); - /// Create the SERE b[=min..max] + /// \brief Create the SERE `b[=min..max]` /// /// The operator does not exist: it is handled as syntactic sugar /// by the parser and the printer. This function is used by the /// parser to create the equivalent SERE. static formula sugar_equal(const formula& b, unsigned min, unsigned max); - /// Create the SERE a ##[n:m] b + /// \brief Create the SERE `a ##[n:m] b` /// - /// This ##[n:m] operator comes from SVA. When n=m, it is simply - /// written ##n. + /// This `##[n:m]` operator comes from SVA. When n=m, it is simply + /// written `##n`. /// /// The operator does not exist in Spot it is handled as syntactic /// sugar by the parser. This function is used by the parser to @@ -1552,7 +1565,15 @@ namespace spot /// \brief Return a copy of the formula 1[*]. static formula one_star() { - return formula(fnode::one_star()->clone()); + // no need to clone, 1[*] is not reference counted + return formula(fnode::one_star()); + } + + /// \brief Return a copy of the formula 1[+]. + static formula one_plus() + { + // no need to clone, 1[+] is not reference counted + return formula(fnode::one_plus()); } /// \brief Whether the formula is an atomic proposition or its diff --git a/spot/tl/relabel.cc b/spot/tl/relabel.cc index 44d6577cb..26c7564c1 100644 --- a/spot/tl/relabel.cc +++ b/spot/tl/relabel.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2016, 2018-2020 Laboratoire de Recherche et +// Copyright (C) 2012-2016, 2018-2020, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -363,7 +363,7 @@ namespace spot goto done; } } - if (sz > 2 && !f.is_boolean()) + if (sz > 2 && !f.is_boolean() && f.is(op::And, op::Or)) { /// If we have a formula like (a & b & Xc), consider /// it as ((a & b) & Xc) in the graph to isolate the @@ -384,7 +384,7 @@ namespace spot for (i = 1; i < sz; ++i) { formula next = f[i]; - // Note that we only add an edge in both directions, + // Note that we add an edge in both directions, // as the cut point algorithm really need undirected // graphs. (We used to do only one direction, and // that turned out to be a bug.) @@ -581,6 +581,14 @@ namespace spot conv.visit(f); } + //// Uncomment to print the graph. + // for (auto& [f, sv]: g) + // { + // std::cerr << f << ":\n"; + // for (auto& s: sv) + // std::cerr << " " << s << '\n'; + // } + // Compute its cut-points fset c; cut_points(g, c, f); diff --git a/spot/tl/simplify.cc b/spot/tl/simplify.cc index 3a2433197..4eac97282 100644 --- a/spot/tl/simplify.cc +++ b/spot/tl/simplify.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2021 Laboratoire de Recherche et Developpement +// Copyright (C) 2011-2022 Laboratoire de Recherche et Developpement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -64,14 +64,14 @@ namespace spot } tl_simplifier_cache(const bdd_dict_ptr& d) - : dict(d), lcc(d, true, true, false, false) + : dict(d), lcc(d, false, true, false, false) { } tl_simplifier_cache(const bdd_dict_ptr& d, const tl_simplifier_options& opt) : dict(d), options(opt), - lcc(d, true, true, false, false, opt.containment_max_states) + lcc(d, false, true, false, false, opt.containment_max_states) { options.containment_checks |= options.containment_checks_stronger; options.event_univ |= options.favor_event_univ; @@ -2507,8 +2507,11 @@ namespace spot unsigned mos = mo.size(); if ((opt_.synt_impl | opt_.containment_checks) - && mo.is(op::Or, op::And)) + && mo.is(op::Or, op::And) + && (opt_.containment_max_ops == 0 + || opt_.containment_max_ops >= mos)) { + bool is_and = mo.is(op::And); // Do not merge these two loops, as rewritings from the // second loop could prevent rewritings from the first one // to trigger. @@ -2520,7 +2523,6 @@ namespace spot // if fo => !fi, then fi & fo = false // if !fi => fo, then fi | fo = true // if !fo => fi, then fi | fo = true - bool is_and = mo.is(op::And); if (c_->implication_neg(fi, fo, is_and) || c_->implication_neg(fo, fi, is_and)) return recurse(is_and ? formula::ff() : formula::tt()); @@ -2531,8 +2533,8 @@ namespace spot formula fo = mo.all_but(i); // if fi => fo, then fi | fo = fo // if fo => fi, then fi & fo = fo - if ((mo.is(op::Or) && c_->implication(fi, fo)) - || (mo.is(op::And) && c_->implication(fo, fi))) + if (((!is_and) && c_->implication(fi, fo)) + || (is_and && c_->implication(fo, fi))) { // We are about to pick fo, but hold on! // Maybe we actually have fi <=> fo, in diff --git a/spot/tl/simplify.hh b/spot/tl/simplify.hh index e5838544d..ec102a205 100644 --- a/spot/tl/simplify.hh +++ b/spot/tl/simplify.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2017, 2019, 2020 Laboratoire de Recherche et Developpement +// Copyright (C) 2011-2022 Laboratoire de Recherche et Developpement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -96,6 +96,9 @@ namespace spot // If greater than 0, bound the number of states used by automata // in containment checks. unsigned containment_max_states = 0; + // If greater than 0, maximal number of terms in a multop to perform + // containment checks on this multop. + unsigned containment_max_ops = 16; }; // fwd declaration to hide technical details. diff --git a/spot/tl/sonf.cc b/spot/tl/sonf.cc new file mode 100644 index 000000000..29a319039 --- /dev/null +++ b/spot/tl/sonf.cc @@ -0,0 +1,185 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2021 Laboratoire de Recherche et Developpement de +// l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" +#include +#include + +#include +#include +#include +#include + +namespace spot +{ + namespace + { + /// Uses `extractor` to extract some parts of the formula and replace them + /// with atomic propositions. + /// + /// Returns (f & g1 & g2 & .. & gn) with g1..gn the extracted subformulas. + /// + /// `extractor` should be a lambda taking the following parameters as input: + /// + /// - `formula` the formula to process + /// - `std::vector&` the vector that stores extracted subformulas + /// - `auto&&` itself, in case it needs to call itself recursively + /// (formula::map for example) + /// - `bool` a boolean indicating whether the lambda is currently being + /// called at the formula's "root" + /// - `bool` a boolean indicating whether the lambda is currently being + /// called inside a toplevel `and` construct. + /// + /// Note that the last 2 boolean arguments can be used as you see fit in + /// your recursive calls, the first one being set to true in the original + /// call, and the second one to false. + /// + /// `extractor` should return the new rewritten formula. + /// + /// auto sample_extractor = [](formula f, + /// std::vector& extracted, + /// auto&& extractor, + /// bool top_level, + /// bool in_top_level_and) -> formula + template + static formula + extract(formula f, Ext extractor) + { + std::vector extracted; + formula new_f = extractor(f, extracted, extractor, true, false); + extracted.push_back(new_f); + return formula::And(extracted); + } + } + + std::pair> + suffix_operator_normal_form(formula f, const std::string prefix) + { + // SONF can only be applied to formulas in negative normal form + f = negative_normal_form(f); + + std::unordered_set used_aps; + std::vector added_aps; + size_t count = 0; + + // identify all used ap names to avoid them when generating new ones + auto ap_indexer = [&used_aps](formula f) noexcept { + if (f.is(op::ap)) + { + used_aps.insert(f.ap_name()); + return true; + } + + return false; + }; + + f.traverse(ap_indexer); + + auto new_ap_name = + [&used_aps, &added_aps, &prefix, &count]() noexcept -> std::string + { + std::string new_name = prefix + std::to_string(count++); + while (used_aps.find(new_name) != used_aps.end()) + new_name = prefix + std::to_string(count++); + used_aps.insert(new_name); + added_aps.push_back(new_name); + return new_name; + }; + + // extracts the SERE part and replaces it with an atomic proposition, + // storing the extracted formula in `extracted` and returning the rewritten + // original formula + auto sonf_extract = [&](formula f, + std::vector& extracted, + auto&& extractor, + bool top_level, + bool in_top_level_and) noexcept -> formula + { + const auto kind = f.kind(); + + switch (kind) + { + case op::G: + { + // skip if shape is G(!ap | (regex []-> formula)) and at top level + if ((top_level || in_top_level_and) + && f[0].is(op::Or) // G(_ | _) + && f[0][0].is(op::Not) // G(!_ | _) + && f[0][0][0].is(op::ap) // G(!ap | _) + && f[0][1].is(op::EConcat, op::UConcat)) // G(!ap | (_ []-> _)) + return f; + else + return f.map(extractor, extracted, extractor, false, false); + } + case op::EConcat: + case op::UConcat: + { + // recurse into rhs first (_ []-> rhs) + formula rhs = + f[1].map(extractor, extracted, extractor, false, false); + f = formula::binop(kind, f[0], rhs); + + formula ap = formula::ap(new_ap_name()); + extracted.push_back(formula::G(formula::Or({formula::Not(ap), f}))); + return ap; + } + default: + // tracking if we're in a op::And at the formula root + in_top_level_and = top_level && f.is(op::And); + return f.map(extractor, extracted, extractor, + false, in_top_level_and); + } + }; + + f = extract(f, sonf_extract); + + auto ltl_extract = [&](formula f, + std::vector& extracted, + auto&& extractor, + [[maybe_unused]] + bool top_level, + [[maybe_unused]] + bool in_top_level_and) noexcept -> formula + { + switch (f.kind()) + { + case op::EConcat: + case op::UConcat: + { + formula rhs = f[1]; + + if (rhs.is(op::ap)) + return f; + + formula ap = formula::ap(new_ap_name()); + extracted.push_back( + formula::G(formula::Or({formula::Not(ap), rhs}))); + + return formula::binop(f.kind(), f[0], ap); + } + default: + return f.map(extractor, extracted, extractor, false, false); + } + }; + + f = extract(f, ltl_extract); + + return {f, added_aps}; + } +} diff --git a/spot/tl/sonf.hh b/spot/tl/sonf.hh new file mode 100644 index 000000000..37ef5d05d --- /dev/null +++ b/spot/tl/sonf.hh @@ -0,0 +1,44 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2021 Laboratoire de Recherche et Développement de +// l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include +#include +#include + +#include + +namespace spot +{ + /// \ingroup tl_rewriting + /// \brief Helper to rewrite a PSL formula in Suffix Operator Normal Form. + /// + /// SONF is described in section 4 of \cite cimatti.06.fmcad + /// + /// The formula output by this function is guaranteed to be in Negative Normal + /// Form. + /// + /// \param f the PSL formula to rewrite + /// \param prefix the prefix to use to name newly introduced aps + /// \return a pair with the rewritten formula, and a vector containing the + /// names of newly introduced aps + SPOT_API std::pair> + suffix_operator_normal_form(formula f, const std::string prefix); +} diff --git a/spot/twa/acc.cc b/spot/twa/acc.cc index 5b7985d70..d73af33b0 100644 --- a/spot/twa/acc.cc +++ b/spot/twa/acc.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2015-2023 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -1029,7 +1029,7 @@ namespace spot int base = ba.allocate_variables(umax+2); assert(base == 0); std::vector r; - for (unsigned i = 0; r.size() < umax; ++i) + while (r.size() < umax) r.emplace_back(bdd_ithvar(base++)); bdd bddcode = to_bdd(&r[0]); bdd tmp; @@ -2707,6 +2707,45 @@ namespace spot return false; } + // Check if pos contains Fin(f) in a substree + template + bool has_fin(const acc_cond::acc_word* pos, acc_cond::mark_t f) + { + auto sub = pos - pos->sub.size; + do + { + switch (pos->sub.op) + { + case acc_cond::acc_op::And: + --pos; + break; + case acc_cond::acc_op::Or: + if constexpr (top_conjunct_only) + pos -= pos->sub.size + 1; + else + --pos; + break; + case acc_cond::acc_op::Fin: + if (pos[-1].mark & f) + return true; + SPOT_FALLTHROUGH; + case acc_cond::acc_op::Inf: + case acc_cond::acc_op::InfNeg: + case acc_cond::acc_op::FinNeg: + pos -= 2; + break; + } + } + while (sub < pos); + return false; + } + + // Check whether pos looks like Fin(f) or Fin(f)&rest + bool is_conj_fin(const acc_cond::acc_word* pos, acc_cond::mark_t f) + { + return has_fin(pos, f); + } + acc_cond::acc_code extract_fin(const acc_cond::acc_word* pos, acc_cond::mark_t f) { @@ -2716,7 +2755,7 @@ namespace spot case acc_cond::acc_op::And: case acc_cond::acc_op::Fin: case acc_cond::acc_op::Inf: - return pos; + return strip_rec(pos, f, true, false); case acc_cond::acc_op::Or: { --pos; @@ -2725,7 +2764,7 @@ namespace spot { if (uses_fin(pos, f)) { - acc_cond::acc_code tmp(pos); + auto tmp = strip_rec(pos, f, true, false); tmp |= std::move(res); std::swap(tmp, res); } @@ -2742,6 +2781,64 @@ namespace spot SPOT_UNREACHABLE(); return {}; } + + template + std::pair + split_top_fin(const acc_cond::acc_word* pos, acc_cond::mark_t f) + { + auto start = pos - pos->sub.size; + switch (pos->sub.op) + { + case acc_cond::acc_op::And: + case acc_cond::acc_op::Fin: + if (is_conj_fin(pos, f)) + return {pos, acc_cond::acc_code::f()}; + SPOT_FALLTHROUGH; + case acc_cond::acc_op::Inf: + return {acc_cond::acc_code::f(), pos}; + case acc_cond::acc_op::Or: + { + --pos; + auto left = acc_cond::acc_code::f(); + auto right = acc_cond::acc_code::f(); + do + { + if (is_conj_fin(pos, f)) + { + auto tmp = strip_rec(pos, f, true, false); + tmp |= std::move(left); + std::swap(tmp, left); + } + else if (deeper_check + && has_top_fin(pos) == -1 + && has_fin(pos, f)) + { + auto tmp = strip_rec(pos, f, true, false); + tmp |= std::move(left); + std::swap(tmp, left); + tmp = force_inf_rec(pos, f); + tmp |= std::move(right); + std::swap(tmp, right); + } + else + { + acc_cond::acc_code tmp(pos); + tmp |= std::move(right); + std::swap(tmp, right); + } + pos -= pos->sub.size + 1; + } + while (pos > start); + return {std::move(left), std::move(right)}; + } + case acc_cond::acc_op::FinNeg: + case acc_cond::acc_op::InfNeg: + SPOT_UNREACHABLE(); + return {acc_cond::acc_code::f(), acc_cond::acc_code::f()}; + } + SPOT_UNREACHABLE(); + return {acc_cond::acc_code::f(), acc_cond::acc_code::f()}; + } } std::pair @@ -2756,6 +2853,47 @@ namespace spot return {selected_fin, extract_fin(pos, {(unsigned) selected_fin})}; } + std::tuple + acc_cond::acc_code::fin_unit_one_split() const + { + if (SPOT_UNLIKELY(is_t() || is_f())) + err: + throw std::runtime_error("fin_unit_one_split(): no Fin"); + const acc_cond::acc_word* pos = &back(); + int selected_fin = has_top_fin(pos); + if (selected_fin >= 0) + { + auto [left, right] = split_top_fin(pos, {(unsigned) selected_fin}); + return {selected_fin, std::move(left), std::move(right)}; + } + selected_fin = fin_one(); + if (selected_fin < 0) + goto err; + acc_cond::mark_t fo_m = {(unsigned) selected_fin}; + return {selected_fin, extract_fin(pos, fo_m), force_inf(fo_m)}; + } + + std::tuple + acc_cond::acc_code::fin_unit_one_split_improved() const + { + if (SPOT_UNLIKELY(is_t() || is_f())) + err: + throw std::runtime_error("fin_unit_one_split_improved(): no Fin"); + const acc_cond::acc_word* pos = &back(); + int selected_fin = has_top_fin(pos); + if (selected_fin >= 0) + { + auto [left, right] = + split_top_fin(pos, {(unsigned) selected_fin}); + return {selected_fin, std::move(left), std::move(right)}; + } + selected_fin = fin_one(); + if (selected_fin < 0) + goto err; + acc_cond::mark_t fo_m = {(unsigned) selected_fin}; + return {selected_fin, extract_fin(pos, fo_m), force_inf(fo_m)}; + } + namespace { bool diff --git a/spot/twa/acc.hh b/spot/twa/acc.hh index 455850f35..1b46e4024 100644 --- a/spot/twa/acc.hh +++ b/spot/twa/acc.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2014-2023 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -1010,6 +1011,7 @@ namespace spot return res; } +#ifndef SWIG /// \brief Conjunct the current condition with \a r. acc_code operator&(acc_code&& r) const { @@ -1017,6 +1019,7 @@ namespace spot res &= r; return res; } +#endif // SWIG /// \brief Disjunct the current condition in place with \a r. acc_code& operator|=(const acc_code& r) @@ -1105,6 +1108,7 @@ namespace spot return *this; } +#ifndef SWIG /// \brief Disjunct the current condition with \a r. acc_code operator|(acc_code&& r) const { @@ -1112,6 +1116,7 @@ namespace spot res |= r; return res; } +#endif // SWIG /// \brief Disjunct the current condition with \a r. acc_code operator|(const acc_code& r) const @@ -1271,7 +1276,8 @@ namespace spot int fin_one() const; /// \brief Return one acceptance set i that appears as `Fin(i)` - /// in the condition, and all disjuncts containing it. + /// in the condition, and all disjuncts containing it with + /// Fin(i) changed to true and Inf(i) to false. /// /// If the condition is a disjunction and one of the disjunct /// has the shape `...&Fin(i)&...`, then `i` will be prefered @@ -1282,13 +1288,39 @@ namespace spot /// `Fin(i)` have been removed. /// /// For example on - /// `Fin(1)&Inf(2)|Inf(3)&Inf(4)|Inf(5)&(Fin(1)|Fin(7))` - /// the output would be the pair - /// `(1, Fin(1)&Inf(2)|Inf(5)&(Fin(1)|Fin(7)))`. - /// On that example `Fin(1)` is prefered to `Fin(7)` because - /// it appears at the top-level. + /// `Fin(1)&Inf(2)|Inf(3)&Inf(4)|Inf(5)&(Fin(1)|Fin(7))` the + /// output would be the pair, we would select `Fin(1)` over + /// `Fin(7)` because it appears at the top-level. Then we would + /// collect the disjuncts containing `Fin(1)`, that is, + /// `Fin(1)&Inf(2)|Inf(5)&(Fin(1)|Fin(7)))`. Finally we would + /// replace `Fin(1)` by true and `Inf(1)` by false. The return + /// value would then be `(1, Inf(2)|Inf(5))`. std::pair fin_one_extract() const; + /// \brief Split an acceptance condition, trying to select one + /// unit-Fin. + /// + /// If the condition is a disjunction and one of the disjunct has + /// the shape `...&Fin(i)&...`, then this will return (i, left, + /// right), where left is all disjunct of this form (with Fin(i) + /// replaced by true), and right are all the others. + /// + /// If the input formula has the shape `...&Fin(i)&...` then left + /// is set to the entire formula (with Fin(i) replaced by true), + /// and right is empty. + /// + /// If no disjunct has the right shape, then a random Fin(i) is + /// searched in the formula, and the output (i, left, right). + /// is such that left contains all disjuncts containing Fin(i) + /// (at any depth), and right contains the original formlula + /// where Fin(i) has been replaced by false. + /// @{ + std::tuple + fin_unit_one_split() const; + std::tuple + fin_unit_one_split_improved() const; + /// @} + /// \brief Help closing accepting or rejecting cycle. /// /// Assuming you have a partial cycle visiting all acceptance @@ -2203,7 +2235,8 @@ namespace spot } /// \brief Return one acceptance set i that appears as `Fin(i)` - /// in the condition, and all disjuncts containing it. + /// in the condition, and all disjuncts containing it with + /// Fin(i) changed to true and Inf(i) to false. /// /// If the condition is a disjunction and one of the disjunct /// has the shape `...&Fin(i)&...`, then `i` will be prefered @@ -2214,17 +2247,51 @@ namespace spot /// `Fin(i)` have been removed. /// /// For example on - /// `Fin(1)&Inf(2)|Inf(3)&Inf(4)|Inf(5)&(Fin(1)|Fin(7))` - /// the output would be the pair - /// `(1, Fin(1)&Inf(2)|Inf(5)&(Fin(1)|Fin(7)))`. - /// On that example `Fin(1)` is prefered to `Fin(7)` because - /// it appears at the top-level. + /// `Fin(1)&Inf(2)|Inf(3)&Inf(4)|Inf(5)&(Fin(1)|Fin(7))` the + /// output would be the pair, we would select `Fin(1)` over + /// `Fin(7)` because it appears at the top-level. Then we would + /// collect the disjuncts containing `Fin(1)`, that is, + /// `Fin(1)&Inf(2)|Inf(5)&(Fin(1)|Fin(7)))`. Finally we would + /// replace `Fin(1)` by true and `Inf(1)` by false. The return + /// value would then be `(1, Inf(2)|Inf(5))`. std::pair fin_one_extract() const { auto [f, c] = code_.fin_one_extract(); return {f, {num_sets(), std::move(c)}}; } + /// \brief Split an acceptance condition, trying to select one + /// unit-Fin. + /// + /// If the condition is a disjunction and one of the disjunct has + /// the shape `...&Fin(i)&...`, then this will return (i, left, + /// right), where left is all disjunct of this form (with Fin(i) + /// replaced by true), and right are all the others. + /// + /// If the input formula has the shape `...&Fin(i)&...` then left + /// is set to the entire formula (with Fin(i) replaced by true), + /// and right is empty. + /// + /// If no disjunct has the right shape, then a random Fin(i) is + /// searched in the formula, and the output (i, left, right). + /// is such that left contains all disjuncts containing Fin(i) + /// (at any depth), and right contains the original formlula + /// where Fin(i) has been replaced by false. + /// @{ + std::tuple + fin_unit_one_split() const + { + auto [f, l, r] = code_.fin_unit_one_split(); + return {f, {num_sets(), std::move(l)}, {num_sets(), std::move(r)}}; + } + std::tuple + fin_unit_one_split_improved() const + { + auto [f, l, r] = code_.fin_unit_one_split_improved(); + return {f, {num_sets(), std::move(l)}, {num_sets(), std::move(r)}}; + } + /// @} + /// \brief Return the top-level disjuncts. /// /// For instance, if the formula is diff --git a/spot/twa/bdddict.hh b/spot/twa/bdddict.hh index c9b39d8a5..f9c2ed6df 100644 --- a/spot/twa/bdddict.hh +++ b/spot/twa/bdddict.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011-2017 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2011-2017, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // Copyright (C) 2003, 2004, 2006 Laboratoire d'Informatique de Paris // 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), // Université Pierre et Marie Curie. @@ -78,7 +78,7 @@ namespace spot enum var_type { anon = 0, var, acc }; struct bdd_info { - bdd_info() : type(anon) {} + bdd_info() noexcept: type(anon) {} var_type type; formula f; // Used unless t==anon. ref_set refs; diff --git a/spot/twa/formula2bdd.cc b/spot/twa/formula2bdd.cc index 7596c0759..15434395f 100644 --- a/spot/twa/formula2bdd.cc +++ b/spot/twa/formula2bdd.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2019 Laboratoire de Recherche et Développement +// Copyright (C) 2009-2019, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris // 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), @@ -30,11 +30,14 @@ namespace spot namespace { // Convert a BDD which is known to be a conjonction into a formula. + // If dual is true, dualize the result, i.e., negate literals, and + // exchange ∧ and ∨. + template static formula conj_to_formula(bdd b, const bdd_dict_ptr d) { if (b == bddfalse) - return formula::ff(); + return dual ? formula::tt() : formula::ff(); std::vector v; while (b != bddtrue) { @@ -49,11 +52,14 @@ namespace spot bdd high = bdd_high(b); if (high == bddfalse) { - res = formula::Not(res); + if (!dual) + res = formula::Not(res); b = bdd_low(b); } else { + if (dual) + res = formula::Not(res); // If bdd_low is not false, then b was not a conjunction. assert(bdd_low(b) == bddfalse); b = high; @@ -61,7 +67,7 @@ namespace spot assert(b != bddfalse); v.emplace_back(res); } - return formula::And(v); + return dual ? formula::Or(v) : formula::And(v); } } // anonymous @@ -143,7 +149,23 @@ namespace spot minato_isop isop(f); bdd cube; while ((cube = isop.next()) != bddfalse) - v.emplace_back(conj_to_formula(cube, d)); + v.emplace_back(conj_to_formula(cube, d)); return formula::Or(std::move(v)); } + + formula + bdd_to_cnf_formula(bdd f, const bdd_dict_ptr d) + { + if (f == bddtrue) + return formula::tt(); + + std::vector v; + + minato_isop isop(!f); + bdd cube; + while ((cube = isop.next()) != bddfalse) + v.emplace_back(conj_to_formula(cube, d)); + return formula::And(std::move(v)); + } + } diff --git a/spot/twa/formula2bdd.hh b/spot/twa/formula2bdd.hh index 4d5c81a60..a84d27996 100644 --- a/spot/twa/formula2bdd.hh +++ b/spot/twa/formula2bdd.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012, 2013, 2014, 2015 Laboratoire de Recherche et +// Copyright (C) 2012-2015, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // Copyright (C) 2003 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -52,12 +52,21 @@ namespace spot /// \brief Convert a BDD into a formula. /// - /// Format the BDD as an irredundant sum of product (see the - /// minato_isop class for details) and map the BDD variables back - /// into their atomic propositions. This works only for Boolean - /// formulas, and all the BDD variables used in \a f should have - /// been registered in \a d. Although the result has type - /// formula, it obviously does not use any temporal operator. + /// Format the BDD as a Boolean spot::formula object. This works only + /// for Boolean formulas, and all the BDD variables used in \a f + /// should have been registered in \a d. Although the result has + /// type formula, it obviously does not use any temporal operator. + /// + /// The bdd_to_formula() version produces an irredundant sum of + /// product (see the minato_isop class for details) and map the BDD + /// variables back into their atomic propositions. + /// + /// The bdd_to_cnf_formula() version produces an irredundant product of + /// sum, using the dual construction. + /// @{ SPOT_API formula bdd_to_formula(bdd f, const bdd_dict_ptr d); + SPOT_API + formula bdd_to_cnf_formula(bdd f, const bdd_dict_ptr d); + /// @} } diff --git a/spot/twa/twa.hh b/spot/twa/twa.hh index cb1e208ec..819a90962 100644 --- a/spot/twa/twa.hh +++ b/spot/twa/twa.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2011, 2013-2021 Laboratoire de Recherche et +// Copyright (C) 2009, 2011, 2013-2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // Copyright (C) 2003-2005 Laboratoire d'Informatique de Paris 6 // (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -761,7 +761,7 @@ namespace spot void register_aps_from_dict() { if (!aps_.empty()) - throw std::runtime_error("register_ap_from_dict() may not be" + throw std::runtime_error("register_aps_from_dict() may not be" " called on an automaton that has already" " registered some AP"); auto& m = get_dict()->bdd_map; diff --git a/spot/twa/twagraph.cc b/spot/twa/twagraph.cc index 051514550..197ea190c 100644 --- a/spot/twa/twagraph.cc +++ b/spot/twa/twagraph.cc @@ -21,13 +21,90 @@ #include #include #include +#include #include +#include #include +#include #include #include using namespace std::string_literals; +namespace +{ + using namespace spot; + // If LAST is false, + // it is guaranteed that there will be another src state + template + void treat(std::vector>& e_idx, + const twa_graph::graph_t::edge_vector_t& e_vec, + std::vector& e_chain, + std::vector& hash_of_state, + unsigned& idx, + unsigned s, + unsigned n_e) + { + assert(s < e_idx.size()); + assert(idx < e_vec.size()); + assert(e_chain.size() == e_vec.size()); + + //std::cout << s << "; " << idx << std::endl; + + // Check if this state has outgoing transitions + if (s != e_vec[idx].src) + // Nothing to do + { + assert(!LAST); + return; + } + + auto& s_idx = e_idx[s]; + s_idx[0] = idx; + + // helper + unsigned sub_idx[] = {-1u, -1u}; + + // All transitions of this state + while (true) + { + assert(idx < e_vec.size() + LAST); + if constexpr (!LAST) + { + if (e_vec[idx].src != s) + break; + } + else + { + if (idx == n_e) + break; + } + + // Argh so many ifs + unsigned which = e_vec[idx].src == e_vec[idx].dst; + if (sub_idx[which] == -1u) + { + // First non-selflooping + sub_idx[which] = idx; + s_idx[1u+which] = idx; + } + else + { + // Continue the chained list + e_chain[sub_idx[which]] = idx; + sub_idx[which] = idx; + } + ++idx; + } + s_idx[3] = idx; + + // Check if self-loops appeared. We cannot hash + // states with self-loops. + if (s_idx[2] != -1u) + hash_of_state[s] = fnv::init; + } +} + namespace spot { @@ -164,11 +241,15 @@ namespace spot // them. }); + bool is_state_acc = this->prop_state_acc().is_true(); + unsigned out = 0; unsigned in = 1; // Skip any leading false edge. - while (in < tend && trans[in].cond == bddfalse) + while (in < tend + && trans[in].cond == bddfalse + && (!is_state_acc || trans[in].src != trans[in].dst)) ++in; if (in < tend) { @@ -177,7 +258,9 @@ namespace spot trans[out] = trans[in]; for (++in; in < tend; ++in) { - if (trans[in].cond == bddfalse) // Unusable edge + if (trans[in].cond == bddfalse + && (!is_state_acc + || trans[in].src != trans[in].dst)) // Unusable edge continue; // Merge edges with the same source, destination, and // colors. (We test the source last, because this is the @@ -289,85 +372,367 @@ namespace spot g_.chain_edges_(); } - unsigned twa_graph::merge_states() + unsigned twa_graph::merge_states(parallel_policy ppolicy) { if (!is_existential()) throw std::runtime_error( "twa_graph::merge_states() does not work on alternating automata"); +#ifdef ENABLE_PTHREAD + const unsigned nthreads = ppolicy.nthreads(); +#else + (void) ppolicy; + constexpr unsigned nthreads = 1; +#endif + typedef graph_t::edge_storage_t tr_t; - g_.sort_edges_([](const tr_t& lhs, const tr_t& rhs) + g_.sort_edges_srcfirst_([](const tr_t& lhs, const tr_t& rhs) { - if (lhs.src < rhs.src) - return true; - if (lhs.src > rhs.src) - return false; + assert(lhs.src == rhs.src); if (lhs.acc < rhs.acc) return true; if (lhs.acc > rhs.acc) return false; + // compare with id? if (bdd_less_than_stable lt; lt(lhs.cond, rhs.cond)) return true; if (rhs.cond != lhs.cond) return false; - // The destination must be sorted last - // for our self-loop optimization to work. return lhs.dst < rhs.dst; - }); + }, nthreads); g_.chain_edges_(); + const unsigned n_states = num_states(); + + // Edges are nicely chained and there are no erased edges + // -> We can work with the edge_vector + + // Check if it is a game <-> "state-player" is defined. If + // so, we can only merge states that belong to the same player. + // (We will use two hash maps in this case.) + auto sp = get_named_prop>("state-player"); + + // The hashing is a bit delicat: We may only use the dst if it has + // no self-loop. HASH_OF_STATE stores the hash associated to each + // state (by default its own number) or some common value if the + // state contains self-loop. + std::vector hash_of_state; + hash_of_state.reserve(n_states); + for (unsigned i = 0; i < n_states; ++i) + hash_of_state.push_back(i); + + const auto& e_vec = edge_vector(); + unsigned n_edges = e_vec.size(); + + // For each state we need 4 indices of the edge vector + // [first, first_non_sfirst_selflooplfloop, first_selfloop, end] + // The init value makes sure nothing is done for dead end states + std::vector> e_idx(n_states, {-1u, -1u, + -1u, -1u}); + // Like a linked list holding the non-selfloop and selfloop transitions + std::vector e_chain(n_edges, -1u); + + unsigned idx = 1; + + // Edges are sorted with respect to src first + const unsigned n_high = e_vec.back().src; + for (auto s = 0u; s < n_high; ++s) + treat(e_idx, e_vec, e_chain, + hash_of_state, idx, s, n_edges); + // Last one + treat(e_idx, e_vec, e_chain, + hash_of_state, idx, n_high, n_edges); + + assert(idx == e_vec.size() && "Something went wrong during indexing"); + + unsigned n_player1 = 0u; + if (sp) + n_player1 = std::accumulate(sp->begin(), sp->end(), 0u); + + // Represents which states share a hash + // Head is in the unordered_map, + // hash_linked_list is like a linked list structure + // of fake pointers + + std::vector hash_linked_list(n_states, -1u); + typedef robin_hood::unordered_flat_map> player_map; + // If the automaton is not a game, everything is assumed to be + // owned by player 0. + player_map map0; // for player 0 + player_map map1; // for player 1 + + map0.reserve(n_states - n_player1); + map1.reserve(n_player1); + + // Sadly we need to loop the edges twice since we have + // to check for self-loops before hashing + + auto emplace = [&hash_linked_list](auto& m, auto h, auto s) + { + auto [it, inserted] = m.try_emplace(h, std::make_pair(s, s)); + if (!inserted) + { + // We already have an entry with hash "h". Link it + // to the new state. + unsigned idx = it->second.second; // tail of the list + assert(idx < s && "Must be monotone"); + hash_linked_list[idx] = s; + it->second.second = s; + } + }; + + // Hash all states + constexpr unsigned shift = sizeof(size_t)/2 * CHAR_BIT; + for (auto s = 0u; s != n_states; ++s) + { + size_t h = fnv::init; + const unsigned e = e_idx[s][3]; + for (unsigned i = e_idx[s][0]; i != e; ++i) + { + // If size_t has 8byte and unsigned has 4byte + // then this works fine, otherwise there might be more collisions + size_t hh = hash_of_state[e_vec[i].dst]; + hh <<= shift; + hh += e_vec[i].cond.id(); + h ^= hh; + h *= fnv::prime; + h ^= e_vec[i].acc.hash(); + h *= fnv::prime; + } + if (sp && (*sp)[s]) + emplace(map1, h, s); + else + emplace(map0, h, s); + } + // All states that might possible be merged share the same hash + // Info hash coll + //std::cout << "Hash collission rate pre merge: " + // << ((map0.size()+map1.size())/((float)n_states)) + // << '\n'; + + + // Check whether we can merge two states + // and takes into account the self-loops + auto state_equal = [&e_vec, &e_chain, &e_idx](unsigned s1, unsigned s2, + std::vector& checked1, + std::vector& checked2) + { + auto edge_data_comp = [](const auto& lhs, + const auto& rhs) + { + if (lhs.acc < rhs.acc) + return true; + if (lhs.acc > rhs.acc) + return false; + // todo compare with id + if (bdd_less_than_stable lt; lt(lhs.cond, rhs.cond)) + return true; + return false; + }; + + auto [i1, nsl1, sl1, e1] = e_idx[s1]; + auto [i2, nsl2, sl2, e2] = e_idx[s2]; + + unsigned n_trans = e1 - i1; + if ((e2 - i2) != n_trans) + return false; // Different number of outgoing trans + + // checked1/2 is one element larger than necessary; + // the last element (false) serves as a sentinel. + checked1.clear(); + checked1.resize(n_trans + 1, false); + checked2.clear(); + checked2.resize(n_trans + 1, false); + + // Try to match self-loops + unsigned self_loops_matched = 0; + while ((sl1 < e1) && (sl2 < e2)) + { + auto& data1 = e_vec[sl1].data(); + auto& data2 = e_vec[sl2].data(); + if (data1 == data2) + { + // Matched + ++self_loops_matched; + checked1[sl1 - i1] = true; //never touches last element + checked2[sl2 - i2] = true; + // Advance both + sl1 = e_chain[sl1]; + sl2 = e_chain[sl2]; + } + // Since edges are ordered on each side, aadvance + // the smallest side in case there is no match. + else if (edge_data_comp(data1, data2)) + sl1 = e_chain[sl1]; + else + sl2 = e_chain[sl2]; + } + + // If the matched self-loops cover all transitions, we can + // stop here. + if (self_loops_matched == n_trans) + return true; + + // The remaining edges need to match exactly + unsigned idx1 = i1; + unsigned idx2 = i2; + while (((idx1 < e1) && (idx2 < e2))) + { + // More efficient version? + // Skip checked edges + // Last element serves as break + while (checked1[idx1 - i1]) + ++idx1; + while (checked2[idx2 - i2]) + ++idx2; + // If one is out of bounds, so is the other + if (idx1 == e1) + { + assert(idx2 == e2); + break; + } + + if ((e_vec[idx1].dst != e_vec[idx2].dst) + || !(e_vec[idx1].data() == e_vec[idx2].data())) + return false; + + // Advance + ++idx1; + ++idx2; + } + // All edges have bee paired + return true; + }; + const unsigned nb_states = num_states(); std::vector remap(nb_states, -1U); - for (unsigned i = 0; i != nb_states; ++i) - { - auto out1 = out(i); - for (unsigned j = 0; j != i; ++j) - { - auto out2 = out(j); - if (std::equal(out1.begin(), out1.end(), out2.begin(), out2.end(), - [](const edge_storage_t& a, - const edge_storage_t& b) - { return ((a.dst == b.dst - || (a.dst == a.src && b.dst == b.src)) - && a.data() == b.data()); })) - { - remap[i] = (remap[j] != -1U) ? remap[j] : j; - // Because of the special self-loop tests we use above, - // it's possible that i can be mapped to remap[j] even - // if j was last compatible states found. Consider the - // following cases, taken from an actual test case: - // 18 is equal to 5, 35 is equal to 18, but 35 is not - // equal to 5. - // - // State: 5 - // [0&1&2] 8 {3} - // [!0&1&2] 10 {1} - // [!0&!1&!2] 18 {1} - // [!0&!1&2] 19 {1} - // [!0&1&!2] 20 {1} - // - // State: 18 - // [0&1&2] 8 {3} - // [!0&1&2] 10 {1} - // [!0&!1&!2] 18 {1} // self-loop - // [!0&!1&2] 19 {1} - // [!0&1&!2] 20 {1} - // - // State: 35 - // [0&1&2] 8 {3} - // [!0&1&2] 10 {1} - // [!0&!1&!2] 35 {1} // self-loop - // [!0&!1&2] 19 {1} - // [!0&1&!2] 20 {1} - break; - } + // Check all pair of states with compatible hash + auto check_ix = [&](unsigned ix, std::vector& v, + std::vector& checked1, + std::vector& checked2) + { + if (hash_linked_list[ix] == -1U) // no compatible state + return; + + v.clear(); + for (unsigned i = ix; i != -1U; i = hash_linked_list[i]) + v.push_back(i); + const unsigned vs = v.size(); + + for (unsigned idx = 0; idx < vs; ++idx) + { + unsigned i = v[idx]; + for (unsigned jdx = 0; jdx < idx; ++jdx) + { + unsigned j = v[jdx]; + if (state_equal(j, i, checked1, checked2)) + { + remap[i] = (remap[j] != -1U) ? remap[j] : j; + + // Because of the special self-loop tests we use + // above, it's possible that i can be mapped to + // remap[j] even if j was the last compatible + // state found. Consider the following cases, + // taken from an actual test case: 18 is equal to + // 5, 35 is equal to 18, but 35 is not equal to 5. + // + // State: 5 + // [0&1&2] 8 {3} + // [!0&1&2] 10 {1} + // [!0&!1&!2] 18 {1} + // [!0&!1&2] 19 {1} + // [!0&1&!2] 20 {1} + // + // State: 18 + // [0&1&2] 8 {3} + // [!0&1&2] 10 {1} + // [!0&!1&!2] 18 {1} // self-loop + // [!0&!1&2] 19 {1} + // [!0&1&!2] 20 {1} + // + // State: 35 + // [0&1&2] 8 {3} + // [!0&1&2] 10 {1} + // [!0&!1&!2] 35 {1} // self-loop + // [!0&!1&2] 19 {1} + // [!0&1&!2] 20 {1} + break; + } + } } - } + }; + + auto upd = [](auto& b, const auto&e, unsigned it) + { + while ((it > 0) & (b != e)) + { + --it; + ++b; + } + }; + + auto worker = [&upd, check_ix, nthreads](unsigned pid, + auto beg1, auto end1, + auto beg0, auto end0) + { + // Temporary storage for list of edges to reduce cache misses + std::vector v; + // Vector reused by all invocations of state_equal to mark edges + // that have been matched already. + std::vector checked1; + std::vector checked2; + upd(beg1, end1, pid); + upd(beg0, end0, pid); + for (; beg1 != end1; upd(beg1, end1, nthreads)) + check_ix(beg1->second.first, v, checked1, checked2); + for (; beg0 != end0; upd(beg0, end0, nthreads)) + check_ix(beg0->second.first, v, checked1, checked2); + }; + + { + auto beg1 = map1.begin(); + auto end1 = map1.end(); + auto beg0 = map0.begin(); + auto end0 = map0.end(); + +#ifndef ENABLE_PTHREAD + (void) nthreads; +#else + if (nthreads <= 1) + { +#endif // ENABLE_PTHREAD + worker(0, beg1, end1, beg0, end0); +#ifdef ENABLE_PTHREAD + } + else + { + static auto tv = std::vector(); + assert(tv.empty()); + tv.resize(nthreads); + for (unsigned pid = 0; pid < nthreads; ++pid) + tv[pid] = std::thread( + [worker, pid, beg1, end1, beg0, end0]() + { + worker(pid, beg1, end1, beg0, end0); + return; + }); + for (auto& t : tv) + t.join(); + tv.clear(); + } +#endif // ENABLE_PTHREAD + } for (auto& e: edges()) if (remap[e.dst] != -1U) - e.dst = remap[e.dst]; + { + assert((!sp || (sp->at(e.dst) == sp->at(remap[e.dst]))) + && "States do not have the same owner"); + e.dst = remap[e.dst]; + } if (remap[get_init_state_number()] != -1U) set_init_state(remap[get_init_state_number()]); @@ -382,6 +747,10 @@ namespace spot unsigned merged = num_states() - st; if (merged) defrag_states(remap, st); + // Info hash coll 2 + //std::cout << "Hash collission rate post merge: " + // << ((map0.size()+map1.size())/((float)num_states())) + // << '\n'; return merged; } @@ -465,7 +834,7 @@ namespace spot comp_classes_.clear(); // get all compatible classes // Candidate classes share a hash - // A state is compatible to a class if it is compatble + // A state is compatible to a class if it is compatible // to any of its states auto& cand_classes = equiv_class_[hi]; unsigned n_c_classes = cand_classes.size(); @@ -942,6 +1311,22 @@ namespace spot s = newst[s]; } } + // Reassign the state-players + if (auto sp = get_named_prop>("state-player")) + { + const auto ns = (unsigned) used_states; + const auto sps = (unsigned) sp->size(); + assert(ns <= sps); + assert(sps == newst.size()); + + for (unsigned i = 0; i < sps; ++i) + { + if (newst[i] == -1u) + continue; + (*sp)[newst[i]] = (*sp)[i]; + } + sp->resize(ns); + } init_number_ = newst[init_number_]; g_.defrag_states(newst, used_states); } @@ -1273,6 +1658,17 @@ namespace spot return p.first->second; }; + // If the input is a kripke_graph and the number of states is + // not restricted, predeclare all states to keep their + // numbering, and also copy unreachable states. + if (max_states == -1U) + if (auto kg = std::dynamic_pointer_cast(aut)) + { + unsigned ns = kg->num_states(); + for (unsigned s = 0; s < ns; ++s) + new_state(kg->state_from_number(s)); + } + out->set_init_state(new_state(aut->get_init_state())); while (!todo.empty()) { @@ -1311,7 +1707,6 @@ namespace spot } } - auto s = seen.begin(); while (s != seen.end()) { diff --git a/spot/twa/twagraph.hh b/spot/twa/twagraph.hh index c6222871e..742a4d69a 100644 --- a/spot/twa/twagraph.hh +++ b/spot/twa/twagraph.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2014-2022 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -589,8 +589,12 @@ namespace spot /// (1)-a->(1) and (1)-a->(1) if (1), (2) and (3) are merged into /// (1). /// + /// On large automaton, it might be worthwhile to use multiple + /// threads to find states that can be merged. This can be + /// requested with the \a ppolicy argument. + /// /// \return the number of states that have been merged and removed. - unsigned merge_states(); + unsigned merge_states(parallel_policy ppolicy = parallel_policy()); /// \brief Like merge states, but one can chose which states are /// candidates for merging. diff --git a/spot/twaalgos/Makefile.am b/spot/twaalgos/Makefile.am index ff71982b5..6a8fbe6b7 100644 --- a/spot/twaalgos/Makefile.am +++ b/spot/twaalgos/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2008-2018, 2020-2021 Laboratoire de Recherche et +## Copyright (C) 2008-2018, 2020-2022 Laboratoire de Recherche et ## Développement de l'Epita (LRDE). ## Copyright (C) 2003-2005 Laboratoire d'Informatique de Paris 6 ## (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -42,6 +42,7 @@ twaalgos_HEADERS = \ contains.hh \ copy.hh \ cycles.hh \ + dbranch.hh \ degen.hh \ determinize.hh \ dot.hh \ @@ -98,6 +99,7 @@ twaalgos_HEADERS = \ totgba.hh \ toweak.hh \ translate.hh \ + translate_aa.hh \ word.hh \ zlktree.hh @@ -115,6 +117,7 @@ libtwaalgos_la_SOURCES = \ compsusp.cc \ contains.cc \ cycles.cc \ + dbranch.cc \ degen.cc \ determinize.cc \ dot.cc \ @@ -171,6 +174,7 @@ libtwaalgos_la_SOURCES = \ totgba.cc \ toweak.cc \ translate.cc \ + translate_aa.cc \ word.cc \ zlktree.cc diff --git a/spot/twaalgos/aiger.cc b/spot/twaalgos/aiger.cc index d846e678c..af255a167 100644 --- a/spot/twaalgos/aiger.cc +++ b/spot/twaalgos/aiger.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -41,6 +42,13 @@ #define STR_(x) STR(x) #define STR_LINE STR_(__LINE__) +//#define TRACE +#ifdef TRACE +# define trace std::cerr +#else +# define trace while (0) std::cerr +#endif + namespace { using namespace spot; @@ -459,6 +467,7 @@ namespace spot aig::roll_back_(safe_point sf, bool do_stash) { // todo specialise for safe_all? + trace << "Roll back to sf: " << sf.first << "; " << sf.second << '\n'; safe_stash ss; auto& [gates, vardict, negs] = ss; if (do_stash) @@ -480,6 +489,7 @@ namespace spot // Copy the gates std::copy(and_gates_.begin()+sf.second, and_gates_.end(), gates.begin()); + trace << "Safed " << gates.size() << '\n'; } // 1. Delete all literals // max_var_old was used before @@ -489,6 +499,8 @@ namespace spot // 2. Set back the gates and_gates_.erase(and_gates_.begin() + sf.second, and_gates_.end()); max_var_ = sf.first; + trace << "After rollback: \n" << and_gates_.size() << " gates and\n" + << max_var_ << " variables\n\n"; return ss; } @@ -497,6 +509,8 @@ namespace spot { // Do some check_ups auto& [gates, vardict, _] = ss; + trace << "Reapplying sf: " << sf.first << "; " << sf.second + << "\nwith " << gates.size() << " additional gates.\n\n"; assert(gates.size() == vardict.size()); assert(sf.first == max_var_); assert(sf.second == and_gates_.size()); @@ -511,6 +525,7 @@ namespace spot and_gates_.insert(and_gates_.end(), gates.begin(), gates.end()); max_var_ = new_max_var_; + trace << "New Ngates: " << num_gates() << '\n'; } void aig::set_output(unsigned i, unsigned v) @@ -622,19 +637,44 @@ namespace spot // De-morgan // !(!v&low | v&high) = !(!v&low) & !(v&high) // !v&low | v&high = !(!(!v&low) & !(v&high)) + // note that if low or high are T + // we can simplify the formula + // given that low / high is T + // then !(!v&low) & !(v&high) can be simplified to + // !v&low | v&high = !v | high / low | v + // = !(v & !high) / !(!low & !v) + // The case when low / high is ⊥ is automatically treated auto b_it = bdd2var_.find(b.id()); if (b_it != bdd2var_.end()) return b_it->second; - // todo -// unsigned v_var = bdd2var_.at(bdd_var(b)); unsigned v_var = bdd2var_.at(bdd_ithvar(bdd_var(b)).id()); unsigned b_branch_var[2] = {bdd2INFvar(bdd_low(b)), bdd2INFvar(bdd_high(b))}; - unsigned r = aig_not(aig_and(v_var, b_branch_var[1])); - unsigned l = aig_not(aig_and(aig_not(v_var), b_branch_var[0])); + unsigned l; + unsigned r; + + if (b_branch_var[0] == aig_true()) + { + // low == T + l = v_var; + r = aig_not(b_branch_var[1]); + } + else if (b_branch_var[1] == aig_true()) + { + // high == T + l = aig_not(b_branch_var[0]); + r = aig_not(v_var); + } + else + { + // General case + r = aig_not(aig_and(v_var, b_branch_var[1])); + l = aig_not(aig_and(aig_not(v_var), b_branch_var[0])); + } + return aig_not(aig_and(l, r)); } @@ -698,7 +738,6 @@ namespace spot while ((prod = cond.next()) != bddfalse) plus_vars_.push_back(cube2var_(prod, use_split_off == 2 ? 0 : use_split_off)); - // Done building -> sum them return aig_or(plus_vars_); } @@ -709,11 +748,20 @@ namespace spot { // Before doing anything else, let us check if one the variables // already exists in which case we are done +#ifdef TRACE + trace << "encoding one of \n"; + for (const auto& c: c_alt) + trace << c << '\n'; +#endif + for (const bdd& cond : c_alt) { auto it = bdd2var_.find(cond.id()); if (it != bdd2var_.end()) - return it->second; + { + trace << "Condition already encoded -> Direct return\n\n"; + return it->second; + } } safe_point sf = get_safe_point_(); @@ -732,9 +780,6 @@ namespace spot && "Cannot convert the given method. " "Only 0,1 and 2 are currently supported"); - const auto negate = use_dual ? std::vector{false} - : std::vector{false, true}; - auto enc_1 = [&](const bdd& b, const char m) { @@ -751,41 +796,60 @@ namespace spot std::vector cond_parts; std::vector cond_vars; - for (bool do_negate : negate) - for (const bdd& b : c_alt) - { - bdd b_used = do_negate ? bdd_not(b) : b; - cond_parts.clear(); - split_cond_(b_used, - use_split_off != 1 ? use_split_off : 0, cond_parts); + //for (bool do_negate : (use_dual ? std::initializer_list{false, true} + // : std::initializer_list{false})) + for (unsigned neg_counter = 0; neg_counter <= 0 + use_dual; ++neg_counter) + { + bool do_negate = neg_counter; + for (const bdd& b : c_alt) + { + bdd b_used = do_negate ? bdd_not(b) : b; + cond_parts.clear(); + split_cond_(b_used, + use_split_off != 1 ? use_split_off : 0, cond_parts); - for (auto m : used_m) - { - cond_vars.clear(); - for (const bdd& cpart : cond_parts) - { - cond_vars.push_back(enc_1(cpart, m)); - if (num_gates() >= ngates_min) - break; // Cannot be optimal - } - // Compute the and if there is still hope - unsigned this_res = -1u; - if (num_gates() < ngates_min) - this_res = aig_and(cond_vars); - - if (num_gates() < ngates_min) - { - // This is the new best - res_var = do_negate ? aig_not(this_res) : this_res; - ngates_min = num_gates(); - ss_min = roll_back_(sf, true); - } - else - // Reset the computations - roll_back_(sf, false); - } // Encoding styles - } // alternatives - // end do_negate + for (auto m : used_m) + { + cond_vars.clear(); + for (const bdd& cpart : cond_parts) + { + cond_vars.push_back(enc_1(cpart, m)); + if (num_gates() >= ngates_min) + break; // Cannot be optimal + } + // Compute the and if there is still hope + auto this_res = -1u; + if (num_gates() < ngates_min) + this_res = aig_and(cond_vars); + // Check if after adding these gates + // the circuit is still smaller + if (num_gates() < ngates_min) + { + // This is the new best + assert(this_res != -1u); + res_var = do_negate ? aig_not(this_res) : this_res; + ngates_min = num_gates(); + trace << "Found new best encoding with\nneg: " + << do_negate << "\nmethod: " << (m == 0 ? "INF" + : "ISOP") + << "\nalt: " << b + << "\nNgates: " << num_gates() << "\n\n"; + ss_min = roll_back_(sf, true); + } + else + // Reset the computations + { + trace << "Method \nneg: " + << do_negate << "\nmethod: " << (m == 0 ? "INF" + : "ISOP") + << "\nalt: " << b + << "\nNgates: " << num_gates() + << " discarded.\n\n"; + roll_back_(sf, false); + } + } // Encoding styles + } // alternatives + } // end do_negate // Reapply the best result reapply_(sf, ss_min); @@ -1359,7 +1423,7 @@ namespace spot { unsigned var_g = gate_var(i); state_[var_g] = state_[and_gates_[i].first] - & state_[and_gates_[i].second]; + && state_[and_gates_[i].second]; state_[aig_not(var_g)] = !state_[var_g]; } // Update latches @@ -1753,6 +1817,7 @@ namespace bool use_dual = false; bool use_dontcare = false; int use_split_off = 0; + std::string s; }; auto to_treat = [&mode]() @@ -1766,6 +1831,8 @@ namespace while (std::getline(s, buffer, ',')) { tr_opt this_opt; + // Store raw info + this_opt.s = buffer; std::stringstream s2; s2 << buffer; std::getline(s2, buffer2, '+'); @@ -1865,15 +1932,16 @@ namespace }; // Create the vars - std::vector alt_conds(amodedescr.use_dontcare ? 1 : 2); for (unsigned i = 0; i < n_outs; ++i) { + trace << "Assign out " << i << '\n'; if (circuit.num_gates() > min_gates) break; circuit.set_output(i, bdd2var(out[i], out_dc[i])); } for (unsigned i = 0; i < n_latches; ++i) { + trace << "Assign latch " << i << '\n'; if (circuit.num_gates() > min_gates) break; circuit.set_next_latch(i, bdd2var(latch[i], bddfalse)); @@ -1883,6 +1951,8 @@ namespace // Overwrite the stash if we generated less gates if (circuit.num_gates() < min_gates) { + trace << "New best mode: " << amodedescr.s + << " with Ngates: " << circuit.num_gates() << '\n'; min_gates = circuit.num_gates(); ss = circuit.roll_back_(sf, true); bdd2var_min = bdd2var; @@ -1892,6 +1962,8 @@ namespace } //Use the best sol circuit.reapply_(sf, ss); + trace << "Finished encoding, reasssigning\n" + << "Final gate count is " << circuit.num_gates() << '\n'; // Reset them for (unsigned i = 0; i < n_outs; ++i) circuit.set_output(i, bdd2var_min(out[i], out_dc[i])); diff --git a/spot/twaalgos/alternation.cc b/spot/twaalgos/alternation.cc index a3762f9b0..694796c4b 100644 --- a/spot/twaalgos/alternation.cc +++ b/spot/twaalgos/alternation.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016-2019, 2021 Laboratoire de Recherche et +// Copyright (C) 2016-2019, 2021, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -26,8 +26,8 @@ namespace spot { - outedge_combiner::outedge_combiner(const twa_graph_ptr& aut) - : aut_(aut), vars_(bddtrue) + outedge_combiner::outedge_combiner(const twa_graph_ptr& aut, unsigned sink) + : aut_(aut), vars_(bddtrue), acc_sink_(sink) { } @@ -36,7 +36,8 @@ namespace spot aut_->get_dict()->unregister_all_my_variables(this); } - bdd outedge_combiner::operator()(unsigned st) + bdd outedge_combiner::operator()(unsigned st, const std::vector& dst_filter, + bool remove_original_edges) { const auto& dict = aut_->get_dict(); bdd res = bddtrue; @@ -45,9 +46,27 @@ namespace spot bdd res2 = bddfalse; for (auto& e: aut_->out(d1)) { + // handle edge filtering + if (!dst_filter.empty()) + { + // if any edge destination is an accepting state in the SERE + // automaton, handle the edge, otherwise skip it + auto univ_dests = aut_->univ_dests(e.dst); + if (std::all_of(univ_dests.begin(), univ_dests.end(), + [&](unsigned dst) + { + return std::find(dst_filter.begin(), dst_filter.end(), dst) + == dst_filter.end(); + })) + continue; + } + bdd out = bddtrue; for (unsigned d: aut_->univ_dests(e.dst)) { + if (d == acc_sink_) + continue; + auto p = state_to_var.emplace(d, 0); if (p.second) { @@ -59,7 +78,11 @@ namespace spot out &= bdd_ithvar(p.first->second); } res2 |= e.cond & out; + + if (remove_original_edges) + e.cond = bddfalse; } + res &= res2; } return res; @@ -76,7 +99,17 @@ namespace spot { bdd cond = bdd_exist(cube, vars_); bdd dest = bdd_existcomp(cube, vars_); - while (dest != bddtrue) + + if (dest == bddtrue) + { + // if dest is bddtrue then the accepting sink is the only + // destination for this edge, in that case don't filter it out + assert(acc_sink_ != -1u); + aut_->new_edge(st, acc_sink_, cond); + continue; + } + + do { assert(bdd_low(dest) == bddfalse); auto it = var_to_state.find(bdd_var(dest)); @@ -84,6 +117,8 @@ namespace spot univ_dest.push_back(it->second); dest = bdd_high(dest); } + while (dest != bddtrue); + std::sort(univ_dest.begin(), univ_dest.end()); aut_->new_univ_edge(st, univ_dest.begin(), univ_dest.end(), cond); univ_dest.clear(); @@ -457,12 +492,10 @@ namespace spot // First loop over all possible valuations atomic properties. for (bdd oneletter: minterms_of(all_letters, ap)) { - minato_isop isop(bs & oneletter); - bdd cube; - while ((cube = isop.next()) != bddfalse) + minato_isop isop(bdd_restrict(bs, oneletter)); + bdd dest; + while ((dest = isop.next()) != bddfalse) { - bdd cond = bdd_exist(cube, all_vars_); - bdd dest = bdd_existcomp(cube, all_vars_); v.clear(); acc_cond::mark_t m = bdd_to_state(dest, v); @@ -491,7 +524,7 @@ namespace spot unsigned d = new_state(v, has_mark); if (has_mark) m.set(0); - res->new_edge(s, d, cond, all_marks - m); + res->new_edge(s, d, oneletter, all_marks - m); } } } @@ -576,7 +609,8 @@ namespace spot bdd all_states_; bdd ap_; bdd all_letters_; - bdd transition_; + bdd dest_; + bdd cond_; minato_isop isop_; const std::map& var_to_state_; univ_remover_state* dst_; @@ -587,8 +621,8 @@ namespace spot const std::vector& state_to_var, const std::map& var_to_state, bdd all_states) - : transitions_(bddtrue), all_states_(all_states), transition_(bddfalse), - isop_(bddfalse), var_to_state_(var_to_state) + : transitions_(bddtrue), all_states_(all_states), dest_(bddfalse), + cond_(bddfalse), isop_(bddfalse), var_to_state_(var_to_state) { // Build the bdd transitions_, from which we extract the successors. for (unsigned s : state->states()) @@ -627,20 +661,20 @@ namespace spot void one_transition() { - transition_ = isop_.next(); - if (transition_ != bddfalse || all_letters_ != bddfalse) + dest_ = isop_.next(); + if (dest_ != bddfalse || all_letters_ != bddfalse) { // If it was the last transition, try the next letter. - if (transition_ == bddfalse) + if (dest_ == bddfalse) { bdd oneletter = bdd_satoneset(all_letters_, ap_, bddfalse); + cond_ = oneletter; all_letters_ -= oneletter; // Get a sum of possible transitions matching this letter. - isop_ = minato_isop(oneletter & transitions_); - transition_ = isop_.next(); + isop_ = minato_isop(bdd_restrict(transitions_, oneletter)); + dest_ = isop_.next(); } - bdd dest_bdd = bdd_exist(transition_, ap_); - std::set dest = bdd_to_state(dest_bdd); + std::set dest = bdd_to_state(dest_); dst_ = new univ_remover_state(dest); } } @@ -648,18 +682,18 @@ namespace spot virtual bool first() override { one_transition(); - return transition_ != bddfalse; + return dest_ != bddfalse; } virtual bool next() override { one_transition(); - return transition_ != bddfalse; + return dest_ != bddfalse; } virtual bool done() const override { - return transition_ == bddfalse && all_letters_ == bddfalse; + return dest_ == bddfalse && all_letters_ == bddfalse; } virtual const state* dst() const override @@ -669,7 +703,7 @@ namespace spot virtual bdd cond() const override { - return bdd_exist(transition_, all_states_); + return cond_; } virtual acc_cond::mark_t acc() const override diff --git a/spot/twaalgos/alternation.hh b/spot/twaalgos/alternation.hh index a4665aacf..0017f27bb 100644 --- a/spot/twaalgos/alternation.hh +++ b/spot/twaalgos/alternation.hh @@ -50,10 +50,12 @@ namespace spot std::map state_to_var; std::map var_to_state; bdd vars_; + unsigned acc_sink_; public: - outedge_combiner(const twa_graph_ptr& aut); + outedge_combiner(const twa_graph_ptr& aut, unsigned sink = -1u); ~outedge_combiner(); - bdd operator()(unsigned st); + bdd operator()(unsigned st, const std::vector& dst_filter = std::vector(), + bool remove_original_edges = false); void new_dests(unsigned st, bdd out) const; }; diff --git a/spot/twaalgos/cobuchi.cc b/spot/twaalgos/cobuchi.cc index 783cd0903..23d4871a0 100644 --- a/spot/twaalgos/cobuchi.cc +++ b/spot/twaalgos/cobuchi.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2018, 2021 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2018, 2021, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -338,23 +339,26 @@ namespace spot twa_graph_ptr to_nca(const_twa_graph_ptr aut, bool named_states) { - if (aut->acc().is_co_buchi()) + const acc_cond& acc = aut->acc(); + if (acc.is_co_buchi()) return make_twa_graph(aut, twa::prop_set::all()); if (auto weak = weak_to_cobuchi(aut)) return weak; + if (acc.is_generalized_co_buchi()) + return degeneralize_tba(aut); + const acc_cond::acc_code& code = aut->get_acceptance(); std::vector pairs; - if (aut->acc().is_streett_like(pairs) || aut->acc().is_parity()) + if (acc.is_streett_like(pairs) || acc.is_parity()) return nsa_to_nca(aut, named_states); else if (code.is_dnf()) return dnf_to_nca(aut, named_states); auto tmp = make_twa_graph(aut, twa::prop_set::all()); - tmp->set_acceptance(aut->acc().num_sets(), - aut->get_acceptance().to_dnf()); + tmp->set_acceptance(acc.num_sets(), code.to_dnf()); return to_nca(tmp, named_states); } @@ -683,6 +687,8 @@ namespace spot return make_twa_graph(aut, twa::prop_set::all()); if (auto weak = weak_to_cobuchi(aut)) return weak; + if (aut->acc().is_generalized_co_buchi()) + return degeneralize_tba(aut); } const acc_cond::acc_code& code = aut->get_acceptance(); diff --git a/spot/twaalgos/cobuchi.hh b/spot/twaalgos/cobuchi.hh index 5c8d85e59..b02c0535d 100644 --- a/spot/twaalgos/cobuchi.hh +++ b/spot/twaalgos/cobuchi.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2019, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -92,8 +92,8 @@ namespace spot /// original language, and is a superset iff the original language /// can not be expressed using a co-Büchi acceptance condition. /// - /// The implementation dispatches between dnf_to_nca, nsa_to_nca, - /// and a trivial implementation for weak automata. + /// The implementation dispatches between dnf_to_nca(), nsa_to_nca(), + /// degeneralize_tba(), and a trivial implementation for weak automata. SPOT_API twa_graph_ptr to_nca(const_twa_graph_ptr aut, bool named_states = false); @@ -126,7 +126,8 @@ namespace spot /// can not be expressed using a co-Büchi acceptance condition. /// /// The implementation dispatches between dnf_to_dca, nsa_to_dca, - /// and a trivial implementation for deterministic weak automata. + /// degeneralize(), and a trivial implementation for deterministic + /// weak automata. SPOT_API twa_graph_ptr to_dca(const_twa_graph_ptr aut, bool named_states = false); } diff --git a/spot/twaalgos/complete.cc b/spot/twaalgos/complete.cc index 20be2ea06..803b3f440 100644 --- a/spot/twaalgos/complete.cc +++ b/spot/twaalgos/complete.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2018, 2022 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -29,6 +29,8 @@ namespace spot return; unsigned n = aut->num_states(); + bool need_acc_fix = false; + // UM is a pair (bool, mark). If the Boolean is false, the // acceptance is always satisfiable. Otherwise, MARK is an // example of unsatisfiable mark. @@ -36,10 +38,11 @@ namespace spot if (!um.first) { // We cannot safely complete an automaton if its - // acceptance is always satisfiable. - auto acc = aut->set_buchi(); - for (auto& t: aut->edge_vector()) - t.acc = acc; + // acceptance is always satisfiable, so we will + // have to fix the acceptance automaton. However + // postpone that until we are sure that the + // automaton really need to be completed. + need_acc_fix = true; } else { @@ -129,6 +132,8 @@ namespace spot // acceptance sets as the last outgoing edge of the // state. acc = t.acc; + // If a state already has a edge to a sink, remember it + // so we can add the missing conditions to it. if (t.dst == sink) edge_to_sink = aut->edge_number(t); } @@ -136,6 +141,15 @@ namespace spot // edge to some sink state. if (missingcond != bddfalse) { + if (need_acc_fix) + { + auto a = aut->set_buchi(); + for (auto& t: aut->edge_vector()) + t.acc = a; + if (aut->num_edges()) + acc = a; + need_acc_fix = false; + } // If we haven't found any sink, simply add one. if (sink == -1U) { diff --git a/spot/twaalgos/complete.hh b/spot/twaalgos/complete.hh index 87703dcc2..3525904be 100644 --- a/spot/twaalgos/complete.hh +++ b/spot/twaalgos/complete.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014, 2015, 2017 Laboratoire de Recherche et +// Copyright (C) 2013-2015, 2017, 2022 Laboratoire de Recherche et // Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -25,12 +25,13 @@ namespace spot { /// \brief Complete a twa_graph in place. /// - /// If the TωA has an acceptance condition that is a tautology, - /// it will be changed into a Büchi automaton. + /// If the TωA is incomplete and has an acceptance condition that is + /// a tautology, it will be changed into a Büchi automaton. SPOT_API void complete_here(twa_graph_ptr aut); /// \brief Clone a twa and complete it. /// - /// If the twa has no acceptance set, one will be added. + /// If the TωA is incomplete and has an acceptance condition that is + /// a tautology, it will be changed into a Büchi automaton. SPOT_API twa_graph_ptr complete(const const_twa_ptr& aut); } diff --git a/spot/twaalgos/contains.cc b/spot/twaalgos/contains.cc index 6c76249d5..cf2680d01 100644 --- a/spot/twaalgos/contains.cc +++ b/spot/twaalgos/contains.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018, 2019 Laboratoire de Recherche et Développement de +// Copyright (C) 2018, 2019, 2022 Laboratoire de Recherche et Développement de // l'Epita. // // This file is part of Spot, a model checking library. @@ -34,7 +34,7 @@ namespace spot } } - bool contains(const_twa_graph_ptr left, const_twa_graph_ptr right) + bool contains(const_twa_graph_ptr left, const_twa_ptr right) { return !complement(left)->intersects(right); } @@ -44,7 +44,7 @@ namespace spot return contains(left, translate(right, left->get_dict())); } - bool contains(formula left, const_twa_graph_ptr right) + bool contains(formula left, const_twa_ptr right) { return !translate(formula::Not(left), right->get_dict())->intersects(right); } diff --git a/spot/twaalgos/contains.hh b/spot/twaalgos/contains.hh index 61c53076a..a1d64f1b1 100644 --- a/spot/twaalgos/contains.hh +++ b/spot/twaalgos/contains.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018 Laboratoire de Recherche et Développement de +// Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement de // l'Epita. // // This file is part of Spot, a model checking library. @@ -38,10 +38,15 @@ namespace spot /// associated to the complement of \a left. It helps if \a left /// is a deterministic automaton or a formula (because in both cases /// complementation is easier). + /// + /// Complementation is only supported on twa_graph automata, so that + /// is the reason \a left must be a twa_graph. Right will be + /// explored on-the-fly if it is not a twa_graph. + /// /// @{ - SPOT_API bool contains(const_twa_graph_ptr left, const_twa_graph_ptr right); + SPOT_API bool contains(const_twa_graph_ptr left, const_twa_ptr right); SPOT_API bool contains(const_twa_graph_ptr left, formula right); - SPOT_API bool contains(formula left, const_twa_graph_ptr right); + SPOT_API bool contains(formula left, const_twa_ptr right); SPOT_API bool contains(formula left, formula right); /// @} diff --git a/spot/twaalgos/dbranch.cc b/spot/twaalgos/dbranch.cc new file mode 100644 index 000000000..7cf1b262e --- /dev/null +++ b/spot/twaalgos/dbranch.cc @@ -0,0 +1,181 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2022-2023 Laboratoire de Recherche et Développement +// de l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" + +#include +#include +#include +#include +#include +#include + +namespace spot +{ + namespace + { + typedef std::pair bdd_color; + + struct bdd_color_hash + { + size_t + operator()(const bdd_color& bc) const noexcept + { + return bc.first.id() ^ bc.second.hash(); + } + }; + + template + bool delay_branching_aux(const twa_graph_ptr& aut, std::vector* owner) + { + unsigned ns = aut->num_states(); + // number of predecessors of each state + std::vector pred_count(ns, 0); + unsigned init = aut->get_init_state_number(); + pred_count[init] = 2; // pretend the initial state has too many + // predecessors, so it does not get fused. + // for each state, number of successors that have a single predecessors + std::vector succ_cand(ns, 0); + for (auto& e: aut->edges()) + for (unsigned d: aut->univ_dests(e)) + { + // Note that e.dst might be a destination group in + // alternating automata. + unsigned pc = ++pred_count[d]; + succ_cand[e.src] += (pc == 1) - (pc == 2); + } + bool changed = false; + typedef robin_hood::unordered_map hashmap_t; + hashmap_t first_dest[1 + is_game]; + auto& g = aut->get_graph(); + + // Merging outgoing transitions may cause the automaton to need + // transition-based acceptance. + bool need_trans = !aut->prop_state_acc().is_true(); + + // setup a DFS + std::vector seen(ns); + std::stack todo; + auto push_state = [&](unsigned state) + { + todo.push(state); + seen[state] = true; + }; + push_state(init); + + while (!todo.empty()) + { + unsigned src = todo.top(); + todo.pop(); + if (succ_cand[src] < 2) // nothing to merge + { + for (auto& e: aut->out(src)) + for (unsigned d: aut->univ_dests(e)) + if (!seen[d]) + push_state(d); + continue; + } + first_dest[0].clear(); + if constexpr (is_game) + first_dest[1].clear(); + auto it = g.out_iteraser(src); + while (it) + { + unsigned canddst = it->dst; + for (unsigned d: aut->univ_dests(canddst)) + if (!seen[d]) + push_state(d); + if (aut->is_univ_dest(canddst) || pred_count[canddst] != 1) + { + ++it; + continue; + } + if (it->cond == bddfalse) + { + it.erase(); + continue; + } + unsigned mapidx = is_game ? (*owner)[canddst] : 0; + auto [it2, inserted] = + first_dest[mapidx].emplace(bdd_color{it->cond, it->acc}, + canddst); + if (inserted) + { + ++it; + continue; + } + unsigned mergedst = it2->second; + // we have to merge canddst into mergedst. + // This is as simple as: + // 1) connecting their list of transitions + unsigned& candlast = g.state_storage(canddst).succ_tail; + if (candlast) + { + unsigned& mergedfirst = g.state_storage(mergedst).succ; + unsigned& mergedlast = g.state_storage(mergedst).succ_tail; + unsigned& candfirst = g.state_storage(canddst).succ; + if (mergedlast) + { + aut->edge_storage(mergedlast).next_succ = candfirst; + // Do we need to require transition-based acceptance? + if (!need_trans) + need_trans = + (aut->edge_storage(candfirst).acc + != aut->edge_storage(mergedfirst).acc); + } + else // mergedst had no successor + { + mergedfirst = candfirst; + } + mergedlast = candlast; + // 2) updating the source of the merged transitions + for (unsigned e2 = candfirst; e2 != 0;) + { + auto& edge = aut->edge_storage(e2); + edge.src = mergedst; + e2 = edge.next_succ; + } + // 3) deleting the edge to canddst. + candfirst = candlast = 0; + } + it.erase(); + // 4) updating succ_cand + succ_cand[mergedst] += succ_cand[canddst]; + succ_cand[canddst] = 0; + changed = true; + } + } + if (need_trans) + aut->prop_state_acc(false); + return changed; + } + } + + bool delay_branching_here(const twa_graph_ptr& aut) + { + if (aut->prop_universal()) + return false; + auto owner = aut->get_named_prop>("state-player"); + if (SPOT_UNLIKELY(owner)) + return delay_branching_aux(aut, owner); + else + return delay_branching_aux(aut, nullptr); + } +} diff --git a/spot/twaalgos/dbranch.hh b/spot/twaalgos/dbranch.hh new file mode 100644 index 000000000..022c1a75b --- /dev/null +++ b/spot/twaalgos/dbranch.hh @@ -0,0 +1,41 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2022, 2023 Laboratoire de Recherche et Développement +// de l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include + +namespace spot +{ + /// \ingroup twa_algorithms + /// \brief Merge states to delay + /// + /// In an automaton with transition-based acceptance, if a state (x) + /// has two outgoing transitions (x,l,m,y) and (x,l,m,z) going to + /// states (x) and (y) that have no other incoming edges, then (y) + /// and (z) can be merged (keeping the union of their outgoing + /// destinations). + /// + /// If the input automaton uses state-based acceptance, running this + /// function might make the acceptance transition-based, but only if + /// two states with different acceptance are merged at some point. + /// + /// \return true iff the automaton was modified. + SPOT_API bool delay_branching_here(const twa_graph_ptr& aut); +} diff --git a/spot/twaalgos/degen.cc b/spot/twaalgos/degen.cc index 11092ddac..d79844b84 100644 --- a/spot/twaalgos/degen.cc +++ b/spot/twaalgos/degen.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2020 Laboratoire de Recherche -// et Développement de l'Epita (LRDE). +// Copyright (C) 2012-2020, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -80,7 +80,8 @@ namespace spot void fill_cache(unsigned s) { unsigned s1 = scc_of(s); - acc_cond::mark_t common = a_->acc().all_sets(); + acc_cond::mark_t all_colors = a_->acc().all_sets(); + acc_cond::mark_t common = all_colors; acc_cond::mark_t union_ = {}; bool has_acc_self_loop = false; bool is_true_state = false; @@ -97,7 +98,7 @@ namespace spot std::get<2>(cache_[d]) &= t.acc; // an accepting self-loop? - if ((t.dst == s) && a_->acc().accepting(t.acc)) + if ((t.dst == s) && t.acc == all_colors) { has_acc_self_loop = true; if (t.cond == bddtrue) @@ -330,9 +331,10 @@ namespace spot bool skip_levels, bool ignaccsl, bool remove_extra_scc) { - if (!a->acc().is_generalized_buchi()) + bool input_is_gba = a->acc().is_generalized_buchi(); + if (!(input_is_gba || a->acc().is_generalized_co_buchi())) throw std::runtime_error - ("degeneralize() only works with generalized Büchi acceptance"); + ("degeneralize() only works with generalized (co)Büchi acceptance"); if (!a->is_existential()) throw std::runtime_error ("degeneralize() does not support alternation"); @@ -347,7 +349,11 @@ namespace spot // The result automaton is an SBA. auto res = make_twa_graph(dict); res->copy_ap_of(a); - res->set_buchi(); + if (input_is_gba) + res->set_buchi(); + else + res->set_co_buchi(); + acc_cond::mark_t all_colors = a->get_acceptance().used_sets(); if (want_sba) res->prop_state_acc(true); // Preserve determinism, weakness, and stutter-invariance @@ -396,9 +402,32 @@ namespace spot std::vector> lvl_cache(a->num_states()); // Compute SCCs in order to use any optimization. - std::unique_ptr m = use_scc - ? std::make_unique(a, scc_info_options::NONE) - : nullptr; + std::unique_ptr m = nullptr; + if (use_scc) + { + if (!input_is_gba) + { + // If the input is gen-co-Büchi, temporary pretend its + // generalized Büchi. + unsigned n = a->num_sets(); + twa_graph_ptr amut = std::const_pointer_cast(a); + amut->set_generalized_buchi(n); + try + { + m = std::make_unique(a, scc_info_options::NONE); + } + catch (...) + { + amut->set_generalized_co_buchi(n); + throw; + } + amut->set_generalized_co_buchi(n); + } + else + { + m = std::make_unique(a, scc_info_options::NONE); + } + } // Initialize scc_orders std::unique_ptr orders = use_cust_acc_orders @@ -674,7 +703,7 @@ namespace spot { d.second = 0; // Make it go to the first level. // Skip as many levels as possible. - if (!a->acc().accepting(acc) && skip_levels) + if (acc != all_colors && skip_levels) { if (use_cust_acc_orders) { @@ -723,9 +752,10 @@ namespace spot int use_lvl_cache, bool skip_levels, bool ignaccsl, bool remove_extra_scc) { - // If this already a degeneralized digraph, there is nothing we + // If this already a degeneralized twa, there is nothing we // can improve. - if (a->is_sba()) + if (const acc_cond& acc = a->acc(); + a->prop_state_acc() && (acc.is_buchi() || acc.is_co_buchi())) return std::const_pointer_cast(a); return degeneralize_aux(a, use_z_lvl, use_cust_acc_orders, @@ -739,9 +769,9 @@ namespace spot int use_lvl_cache, bool skip_levels, bool ignaccsl, bool remove_extra_scc) { - // If this already a degeneralized digraph, there is nothing we + // If this already a degeneralized twa, there is nothing we // can improve. - if (a->acc().is_buchi()) + if (a->acc().is_buchi() || a->acc().is_co_buchi()) return std::const_pointer_cast(a); return degeneralize_aux(a, use_z_lvl, use_cust_acc_orders, @@ -1168,5 +1198,9 @@ namespace spot unsigned idx = aut->edge_number(e); e.acc = marks[idx]; } + // If aut was state-based acc before, this might no longer + // this might no longer be the case + if (aut->prop_state_acc().is_true()) + aut->prop_state_acc(false); } } diff --git a/spot/twaalgos/degen.hh b/spot/twaalgos/degen.hh index 281ba2ef5..e9ae13021 100644 --- a/spot/twaalgos/degen.hh +++ b/spot/twaalgos/degen.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2015, 2017-2020 Laboratoire de +// Copyright (C) 2012-2015, 2017-2020, 2022 Laboratoire de // Recherche et Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -26,33 +26,36 @@ namespace spot class scc_info; /// \ingroup twa_acc_transform - /// \brief Degeneralize a spot::tgba into an equivalent sba with - /// only one acceptance condition. + /// \brief Degeneralize a generalized (co)Büchi automaton into an + /// equivalent (co)Büchi automaton. /// - /// This algorithm will build a new explicit automaton that has - /// at most (N+1) times the number of states of the original automaton. + /// There are two variants of the function. If the generalizd + /// (co)Büchi acceptance uses N colors, degeneralize() algorithm + /// will builds a state-based (co)Büchi automaton that has at most + /// (N+1) times the number of states of the original automaton. + /// degeneralize_tba() builds a transition-based (co)Büchi automaton + /// that has at most N times the number of states of the original + /// automaton. /// - /// When \a use_z_lvl is set, the level of the degeneralized - /// automaton is reset everytime an SCC is exited. If \a - /// use_cust_acc_orders is set, the degeneralization will compute a - /// custom acceptance order for each SCC (this option is disabled by - /// default because our benchmarks show that it usually does more - /// harm than good). If \a use_lvl_cache is set, everytime an SCC - /// is entered on a state that as already been associated to some - /// level elsewhere, reuse that level (set it to 2 to keep the - /// smallest number, 3 to keep the largest level, and 1 to keep the - /// first level found). If \a ignaccsl is set, we do not directly - /// jump to the accepting level if the entering state has an - /// accepting self-loop. If \a remove_extra_scc is set (the default) - /// we ensure that the output automaton has as many SCCs as the input - /// by removing superfluous SCCs. + /// Additional options control optimizations described in + /// \cite babiak.13.spin . When \a use_z_lvl is set, the level of + /// the degeneralized automaton is reset everytime an SCC is exited. + /// If \a use_cust_acc_orders is set, the degeneralization will + /// compute a custom acceptance order for each SCC (this option is + /// disabled by default because our benchmarks show that it usually + /// does more harm than good). If \a use_lvl_cache is set, + /// everytime an SCC is entered on a state that as already been + /// associated to some level elsewhere, reuse that level (set it to + /// 2 to keep the smallest number, 3 to keep the largest level, and + /// 1 to keep the first level found). If \a ignaccsl is set, we do + /// not directly jump to the accepting level if the entering state + /// has an accepting self-loop. If \a remove_extra_scc is set (the + /// default) we ensure that the output automaton has as many SCCs as + /// the input by removing superfluous SCCs. /// /// Any of these three options will cause the SCCs of the automaton /// \a a to be computed prior to its actual degeneralization. /// - /// The degeneralize_tba() variant produce a degeneralized automaton - /// with transition-based acceptance. - /// /// The mapping between each state of the resulting automaton /// and the original state of the input automaton is stored in the /// "original-states" named property of the produced automaton. Call @@ -70,6 +73,14 @@ namespace spot /// Similarly, the property "degen-levels" keeps track of the degeneralization /// levels. To retrieve it, call /// `aut->get_named_prop>("degen-levels")`. + /// + /// As an alternative method to degeneralization, one may also + /// consider ACD transform. acd_transform() will never produce + /// larger automata than degenaralize_tba(), and + /// acd_transform_sbacc() produce smaller automata than + /// degeneralize() on the average. See \cite casares.22.tacas for + /// some comparisons. + /// /// \@{ SPOT_API twa_graph_ptr degeneralize(const const_twa_graph_ptr& a, bool use_z_lvl = true, diff --git a/spot/twaalgos/determinize.cc b/spot/twaalgos/determinize.cc index 82305f564..d2d35a824 100644 --- a/spot/twaalgos/determinize.cc +++ b/spot/twaalgos/determinize.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2021 Laboratoire de Recherche et +// Copyright (C) 2015-2022 Laboratoire de Recherche et // Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -472,15 +472,23 @@ namespace spot std::vector res; for (const auto& n: s.nodes_) { - int brace = n.second; - std::vector tmp; - while (brace >= 0) + // First, count the number of braces. + unsigned nbraces = 0; + for (int brace = n.second; brace >= 0; brace = s.braces_[brace]) + ++nbraces; + // Then list them in reverse order. Since we know the + // number of braces, we can allocate exactly what we need. + if (nbraces > 0) { - // FIXME is not there a smarter way? - tmp.insert(tmp.begin(), brace); - brace = s.braces_[brace]; + std::vector tmp(nbraces, 0); + for (int brace = n.second; brace >= 0; brace = s.braces_[brace]) + tmp[--nbraces] = brace; + res.emplace_back(n.first, std::move(tmp)); + } + else + { + res.emplace_back(n.first, std::vector{}); } - res.emplace_back(n.first, std::move(tmp)); } std::sort(res.begin(), res.end(), compare()); return res; @@ -781,7 +789,7 @@ namespace spot bool safra_state::operator<(const safra_state& other) const { - // FIXME what is the right, if any, comparison to perform? + // FIXME: what is the right, if any, comparison to perform? return braces_ == other.braces_ ? nodes_ < other.nodes_ : braces_ < other.braces_; } @@ -887,7 +895,7 @@ namespace spot // NB spot::simulation() does not remove unreachable states, as it // would invalidate the contents of 'implications'. // so we need to explicitly test for unreachable states - // FIXME based on the scc_info, we could remove the unreachable + // FIXME: based on the scc_info, we could remove the unreachable // states, both in the input automaton and in 'implications' // to reduce the size of 'implies'. if (!scc.reachable_state(i)) @@ -922,7 +930,7 @@ namespace spot std::vector support(aut->num_states()); if (use_stutter && aut->prop_stutter_invariant()) { - // FIXME this could be improved + // FIXME: this could be improved // supports of states should account for possible stuttering if we plan // to use stuttering invariance for (unsigned c = 0; c != scc.scc_count(); ++c) diff --git a/spot/twaalgos/dot.cc b/spot/twaalgos/dot.cc index 66804f304..19a638b9e 100644 --- a/spot/twaalgos/dot.cc +++ b/spot/twaalgos/dot.cc @@ -578,10 +578,27 @@ namespace spot return tmp_dst.str(); } - template - void print_true_state(U to, V from) const + void print_hidden_true_name(unsigned to, unsigned from) const { - os_ << " T" << to << 'T' << from << " [label=\"\", style=invis, "; + os_ << 'T' << to << 'T' << from; + } + + void print_hidden_true_name(unsigned to, const std::string& from) const + { + bool neg = from[0] == '-'; + if (neg) + os_ << '"'; + os_ << 'T' << to << 'T' << from; + if (neg) + os_ << '"'; + } + + template + void print_true_state(unsigned to, F from) const + { + os_ << " "; + print_hidden_true_name(to, from); + os_ << " [label=\"\", style=invis, "; os_ << (opt_vertical_ ? "height=0]\n" : "width=0]\n"); } @@ -606,7 +623,7 @@ namespace spot print_true_state(d, dest); os_ << " " << dest << " -> "; if (dst_is_hidden_true_state) - os_ << 'T' << d << 'T' << dest; + print_hidden_true_name(d, dest); else os_ << d; if ((style && *style) || opt_id_) @@ -1067,9 +1084,12 @@ namespace spot { if (aut->acc().is_t()) opt_show_acc_ = false; - bdd out = *p; - opt_mealy_output_ = out; - opt_mealy_ = true; + if (opt_showlabel_) + { + bdd out = *p; + opt_mealy_output_ = out; + opt_mealy_ = true; + } } incomplete_ = aut->get_named_prop>("incomplete-states"); diff --git a/spot/twaalgos/dtbasat.cc b/spot/twaalgos/dtbasat.cc index eb39a69b6..c4bf3d1bc 100644 --- a/spot/twaalgos/dtbasat.cc +++ b/spot/twaalgos/dtbasat.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018, 2021 Laboratoire de Recherche et +// Copyright (C) 2013-2018, 2021-2023 Laboratoire de Recherche et // Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -77,11 +77,7 @@ namespace spot return true; if (this->src_ref > other.src_ref) return false; - if (this->dst_ref < other.dst_ref) - return true; - if (this->dst_ref > other.dst_ref) - return false; - return false; + return this->dst_ref < other.dst_ref; } }; @@ -585,7 +581,7 @@ namespace spot #if TRACE std::fstream out("dtba-sat.dbg", std::ios_base::trunc | std::ios_base::out); - out.exceptions(std::ifstream::failbit | std::ifstream::badbit); + out.exceptions(std::ofstream::failbit | std::ofstream::badbit); #endif std::set acc_states; std::set seen_trans; diff --git a/spot/twaalgos/dtwasat.cc b/spot/twaalgos/dtwasat.cc index 670a9ffc8..2ecf38fd1 100644 --- a/spot/twaalgos/dtwasat.cc +++ b/spot/twaalgos/dtwasat.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2021 Laboratoire de Recherche +// Copyright (C) 2013-2023 Laboratoire de Recherche // et Développement de l'Epita. // // This file is part of Spot, a model checking library. @@ -98,11 +98,7 @@ namespace spot return true; if (this->acc_ref > other.acc_ref) return false; - if (this->acc_cand < other.acc_cand) - return true; - if (this->acc_cand > other.acc_cand) - return false; - return false; + return this->acc_cand < other.acc_cand; } }; @@ -864,7 +860,7 @@ namespace spot #if TRACE std::fstream out("dtwa-sat.dbg", std::ios_base::trunc | std::ios_base::out); - out.exceptions(std::ifstream::failbit | std::ifstream::badbit); + out.exceptions(std::ofstream::failbit | std::ofstream::badbit); #endif std::map state_acc; std::set seen_trans; diff --git a/spot/twaalgos/dualize.cc b/spot/twaalgos/dualize.cc index e42822740..1b60a0d17 100644 --- a/spot/twaalgos/dualize.cc +++ b/spot/twaalgos/dualize.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2019, 2021 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2017-2019, 2021-2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -155,14 +155,11 @@ namespace spot for (bdd oneletter: minterms_of(letters, ap)) { - minato_isop isop(delta & oneletter); - bdd cube; + minato_isop isop(bdd_restrict(delta, oneletter)); + bdd dest; - while ((cube = isop.next()) != bddfalse) + while ((dest = isop.next()) != bddfalse) { - bdd cond = bdd_exist(cube, all_vars_); - bdd dest = bdd_existcomp(cube, all_vars_); - st.clear(); acc_cond::mark_t m = bdd_to_state(dest, st); if (st.empty()) @@ -171,7 +168,7 @@ namespace spot if (aut_->prop_state_acc()) m = aut_->state_acc_sets(i); } - res->new_univ_edge(i, st.begin(), st.end(), cond, m); + res->new_univ_edge(i, st.begin(), st.end(), oneletter, m); } } } diff --git a/spot/twaalgos/emptiness.cc b/spot/twaalgos/emptiness.cc index fd3319141..ef8890f95 100644 --- a/spot/twaalgos/emptiness.cc +++ b/spot/twaalgos/emptiness.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009, 2011-2019, 2021 Laboratoire de Recherche et +// Copyright (C) 2009, 2011-2019, 2021, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -570,7 +570,7 @@ namespace spot if (debug) os << "ERROR: First state of run (in " << in << "): " << aut->format_state(i->s) - << "\ndoes not match initial state of automata: " + << "\ndoes not match initial state of automaton: " << aut->format_state(s) << '\n'; s->destroy(); return false; @@ -802,38 +802,38 @@ namespace spot res->set_named_prop("state-names", names); } - const state* s = aut->get_init_state(); unsigned src; unsigned dst; const twa_run::steps* l; - acc_cond::mark_t seen_acc = {}; - - state_map seen; + unsigned cycle_entry = 0; if (prefix.empty()) - l = &cycle; + l = &cycle; else - l = &prefix; + l = &prefix; twa_run::steps::const_iterator i = l->begin(); - assert(s->compare(i->s) == 0); +#if NDEBUG + const state* init = aut->get_init_state(); + assert(init->compare(i->s) == 0); + init->destroy(); +#endif + src = res->new_state(); - seen.emplace(i->s, src); if (names) - names->push_back(aut->format_state(s)); + names->push_back(aut->format_state(i->s)); for (; i != l->end();) { - // expected outgoing transition bdd label = i->label; acc_cond::mark_t acc = i->acc; - // compute the next expected state const state* next; ++i; if (i != l->end()) { + dst = res->new_state(); next = i->s; } else @@ -842,57 +842,24 @@ namespace spot { l = &cycle; i = l->begin(); + cycle_entry = dst = res->new_state(); + } + else + { + dst = cycle_entry; } next = l->begin()->s; } - // browse the actual outgoing transitions and - // look for next; - const state* the_next = nullptr; - for (auto j: aut->succ(s)) + if (names && i != l->end()) { - if (j->cond() != label - || j->acc() != acc) - continue; - - const state* s2 = j->dst(); - if (s2->compare(next) == 0) - { - the_next = s2; - break; - } - s2->destroy(); + assert(dst == names->size()); + names->push_back(aut->format_state(next)); } - s->destroy(); - if (!the_next) - throw std::runtime_error("twa_run::as_twa() unable to replay run"); - s = the_next; - - - auto p = seen.emplace(next, 0); - if (p.second) - { - unsigned ns = res->new_state(); - p.first->second = ns; - if (names) - { - assert(ns == names->size()); - names->push_back(aut->format_state(next)); - } - } - dst = p.first->second; - res->new_edge(src, dst, label, acc); src = dst; - - // Sum acceptance conditions. - if (l == &cycle && i != l->begin()) - seen_acc |= acc; } - s->destroy(); - - assert(aut->acc().accepting(seen_acc)); return res; } diff --git a/spot/twaalgos/emptiness.hh b/spot/twaalgos/emptiness.hh index 47896a1d7..66bf8ca56 100644 --- a/spot/twaalgos/emptiness.hh +++ b/spot/twaalgos/emptiness.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2011, 2013-2018, 2020-2021 Laboratoire de +// Copyright (C) 2011, 2013-2018, 2020-2021, 2023 Laboratoire de // Recherche et Developpement de l'Epita (LRDE). // Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -451,9 +451,9 @@ namespace spot /// Note that this works only if the automaton is a twa_graph_ptr. void highlight(unsigned color); - /// \brief Return a twa_graph_ptr corresponding to \a run + /// \brief Convert the run into a lasso-shaped automaton /// - /// Identical states are merged. + /// This preserves the original acceptance condition. /// /// If \a preserve_names is set, the created states are named /// using the format_state() result from the original state. diff --git a/spot/twaalgos/game.cc b/spot/twaalgos/game.cc index 9b8fdcee9..add0926fe 100644 --- a/spot/twaalgos/game.cc +++ b/spot/twaalgos/game.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2018, 2020-2021 Laboratoire de Recherche et +// Copyright (C) 2017-2018, 2020-2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -23,13 +23,23 @@ #include #include -#include +#include #include namespace spot { namespace { + constexpr unsigned unseen_mark = std::numeric_limits::max(); + using par_t = int; + constexpr par_t limit_par_even = + std::numeric_limits::max() & 1 + ? std::numeric_limits::max()-3 + : std::numeric_limits::max()-2; + using strat_t = long long; + constexpr strat_t no_strat_mark = std::numeric_limits::min(); + + static const std::vector* ensure_game(const const_twa_graph_ptr& arena, const char* fnname) { @@ -48,15 +58,11 @@ namespace spot ensure_parity_game(const const_twa_graph_ptr& arena, const char* fnname) { bool max, odd; - arena->acc().is_parity(max, odd, true); - if (!(max && odd)) + bool is_par = arena->acc().is_parity(max, odd, true); + if (!is_par) throw std::runtime_error (std::string(fnname) + - ": arena must have max-odd acceptance condition"); - for (const auto& e : arena->edges()) - if (!e.acc) - throw std::runtime_error - (std::string(fnname) + ": arena must be colorized"); + ": arena must have one of the four parity acceptance conditions"); return ensure_game(arena, fnname); } @@ -71,10 +77,7 @@ namespace spot { // returns true if player p wins v // false otherwise - if (!has_winner_[v]) - return false; - - return winner_[v] == p; + return has_winner_[v] ? winner_[v] == p : false; } inline void set(unsigned v, bool p) @@ -95,40 +98,27 @@ namespace spot } }; // winner_t - // When using scc decomposition we need to track the - // changes made to the graph - struct edge_stash_t - { - edge_stash_t(unsigned num, unsigned dst, acc_cond::mark_t acc) noexcept - : e_num(num), - e_dst(dst), - e_acc(acc) - { - } - const unsigned e_num, e_dst; - const acc_cond::mark_t e_acc; - }; // edge_stash_t - // Internal structs used by parity_game // Struct to change recursive calls to stack struct work_t { - work_t(unsigned wstep_, unsigned rd_, unsigned min_par_, - unsigned max_par_) noexcept + work_t(unsigned wstep_, unsigned rd_, par_t min_par_, + par_t max_par_) noexcept : wstep(wstep_), rd(rd_), min_par(min_par_), max_par(max_par_) { } - const unsigned wstep, rd, min_par, max_par; + const unsigned wstep, rd; + const par_t min_par, max_par; }; // work_t // Collects information about an scc // Used to detect special cases struct subgame_info_t { - typedef std::set> all_parities_t; + typedef std::set> all_parities_t; subgame_info_t() noexcept { @@ -159,65 +149,152 @@ namespace spot { public: - bool solve(const twa_graph_ptr &arena) + bool solve(const twa_graph_ptr& arena, bool solve_globally) { // todo check if reordering states according to scc is worth it set_up(arena); // Start recursive zielonka in a bottom-up fashion on each scc subgame_info_t subgame_info; - for (c_scc_idx_ = 0; c_scc_idx_ < info_->scc_count(); ++c_scc_idx_) + while (true) { - // Useless SCCs are winning for player 0. - if (!info_->is_useful_scc(c_scc_idx_)) - { - for (unsigned v: c_states()) - { - w_.set(v, false); - // The strategy for player 0 is to take the first - // available edge. - if ((*owner_ptr_)[v] == false) - for (const auto &e : arena_->out(v)) - { - s_[v] = arena_->edge_number(e); - break; - } - } - continue; - } - // Convert transitions leaving edges to self-loops - // and check if trivially solvable - subgame_info = fix_scc(); - // If empty, the scc was trivially solved - if (!subgame_info.is_empty) - { - // Check for special cases - if (subgame_info.is_one_parity) - one_par_subgame_solver(subgame_info, max_abs_par_); - else - { - // "Regular" solver - max_abs_par_ = *subgame_info.all_parities.begin(); - w_stack_.emplace_back(0, 0, 0, max_abs_par_); - zielonka(); - } - } - } - // All done -> restore graph, i.e. undo self-looping - restore(); + // If we solve globally, + auto maybe_useful = [&](unsigned scc_idx){ + if (info_->is_useful_scc(scc_idx)) + return true; + if (!solve_globally) + return false; + // Check if we have an out-edge to a winning state + // in another scc + return std::any_of( + info_->states_of(scc_idx).begin(), + info_->states_of(scc_idx).end(), + [&](unsigned s){ + return std::any_of( + arena->out(s).begin(), + arena->out(s).end(), + [&](const auto& e){ + assert ((subgame_[e.dst] == unseen_mark) + || (info_->scc_of(e.dst) != scc_idx)); + return (info_->scc_of(e.dst) != scc_idx) + && w_.winner(e.dst); + }); + }); + }; - assert(std::all_of(w_.has_winner_.cbegin(), w_.has_winner_.cend(), - [](bool b) - { return b; })); - assert(std::all_of(s_.cbegin(), s_.cend(), - [](unsigned e_idx) - { return e_idx > 0; })); + for (c_scc_idx_ = 0; c_scc_idx_ < info_->scc_count(); ++c_scc_idx_) + { + // Testing + // Make sure that every state that has a winner also + // belongs to a subgame + assert([&]() + { + for (unsigned i = 0; i < arena_->num_states(); ++i) + if (w_.has_winner_[i] + && (subgame_[i] == unseen_mark)) + return false; + return true; + }()); + // Useless SCCs are winning for player 0. + if (!maybe_useful(c_scc_idx_)) + { + // This scc also gets its own subgame + ++rd_; + for (unsigned v: c_states()) + { + subgame_[v] = rd_; + w_.set(v, false); + // The strategy for player 0 is to take the first + // available edge. + if ((*owner_ptr_)[v] == false) + for (const auto &e : arena_->out(v)) + { + s_[v] = arena_->edge_number(e); + break; + } + } + continue; + } + // Convert transitions leaving edges to self-loops + // and check if trivially solvable + subgame_info = fix_scc(); + // If empty, the scc was trivially solved + if (!subgame_info.is_empty) + { + // Check for special cases + if (subgame_info.is_one_parity) + one_par_subgame_solver(subgame_info, max_abs_par_); + else + { + // "Regular" solver + max_abs_par_ = *subgame_info.all_parities.begin(); + w_stack_.emplace_back(0, 0, + min_par_graph_, max_abs_par_); + zielonka(); + } + } + } + if (!solve_globally) + break; + + // Update the scc_info and continue + unsigned new_init + = std::distance(subgame_.begin(), + std::find(subgame_.begin(), subgame_.end(), + unseen_mark)); + if (new_init == arena->num_states()) + break; // All states have been solved + // Compute new sccs + scc_info::edge_filter ef + = [](const twa_graph::edge_storage_t&, + unsigned dst, void* subgame){ + const auto& sg = *static_cast*>(subgame); + return sg[dst] == unseen_mark ? + scc_info::edge_filter_choice::keep : + scc_info::edge_filter_choice::ignore; + }; + info_ = std::make_unique(arena, new_init, ef, &subgame_); + } + // Every state needs a winner (solve_globally) + // Or only those reachable + assert((solve_globally + && std::all_of(w_.has_winner_.cbegin(), w_.has_winner_.cend(), + [](bool b) { return b; })) + || (!solve_globally + && [&](){ + for (unsigned s = 0; s < arena->num_states(); ++s) + { + if ((info_->scc_of(s) != -1u) + && !w_.has_winner_.at(s)) + return false; + } + return true; + }())); + // Only the states owned by the winner need a strategy + assert([&]() + { + std::unordered_set valid_strat; + for (const auto& e : arena_->edges()) + valid_strat.insert(arena_->edge_number(e)); + + for (unsigned v = 0; v < arena_->num_states(); ++v) + { + if (!solve_globally && (info_->scc_of(v) == -1u)) + continue; + if (((*owner_ptr_)[v] == w_.winner(v)) + && (valid_strat.count(s_.at(v)) == 0)) + return false; + } + return true; + }()); // Put the solution as named property region_t &w = *arena->get_or_set_named_prop("state-winner"); strategy_t &s = *arena->get_or_set_named_prop("strategy"); w.swap(w_.winner_); - s.resize(s_.size()); - std::copy(s_.begin(), s_.end(), s.begin()); + s.clear(); + s.reserve(s_.size()); + for (auto as : s_) + s.push_back(as == no_strat_mark ? 0 : (unsigned) as); clean_up(); return w[arena->get_init_state_number()]; @@ -234,7 +311,7 @@ namespace spot return info_->states_of(c_scc_idx_); } - void set_up(const twa_graph_ptr &arena) + void set_up(const twa_graph_ptr& arena) { owner_ptr_ = ensure_parity_game(arena, "solve_parity_game()"); arena_ = arena; @@ -247,22 +324,63 @@ namespace spot w_.winner_.clear(); w_.winner_.resize(n_states, 0); s_.clear(); - s_.resize(n_states, -1); + s_.resize(n_states, no_strat_mark); // Init rd_ = 0; - max_abs_par_ = arena_->get_acceptance().used_sets().max_set() - 1; info_ = std::make_unique(arena_); - // Every edge leaving an scc needs to be "fixed" - // at some point. - // We store: number of edge fixed, original dst, original acc - change_stash_.clear(); - change_stash_.reserve(info_->scc_count() * 2); + // Create all the parities + // we want zielonka to work with any of the four parity types + // and we want it to work on partially colored arenas + // However the actually algorithm still supposes max odd. + // Therefore (and in order to avoid the manipulation of the mark + // at each step) we generate a vector directly storing the + // "equivalent" parity for each edge + bool max, odd; + arena_->acc().is_parity(max, odd, true); + max_abs_par_ = arena_->acc().all_sets().max_set()-1; + // Make it the next larger odd + par_t next_max_par = max_abs_par_ + 1; + all_edge_par_.resize(arena_->edge_vector().size(), + std::numeric_limits::max()); + + // The parities are modified much like for colorize_parity + // however if the acceptance condition is "min", we negate all + // parities to get "max" + // The algorithm works on negative or positive parities alike + //| kind/style | n | empty tr. | other tr. | result | min par + //|------------+-----+---------------+------------+--------------|--------- + //| max odd | any | set to {-1} | unchanged | max odd n | -1 + //| max even | any | set to {0} | add 1 | max odd n+1 | 0 + //| min odd | any | set to {-n} | negate | max odd 0 | -n + //| min even | any | set to {-n+1} | negate + 1 | max odd +1 | -n + 1 + min_par_graph_ = -(!max*max_abs_par_) - (max*odd); + max_par_graph_ = max*(max_abs_par_ + !odd) + !max*!odd; + + // Takes an edge and returns the "equivalent" max odd parity + auto equiv_par = [max, odd, next_max_par, inv = 2*max-1](const auto& e) + { + par_t e_par = e.acc.max_set() - 1; // -1 for empty + // If "min" and empty -> set to n + if (!max & (e_par == -1)) + e_par = next_max_par; + // Negate if min + e_par *= inv; + // even -> odd + e_par += !odd; + return e_par; + }; + + for (const auto& e : arena_->edges()) + { + unsigned e_idx = arena_->edge_number(e); + all_edge_par_[e_idx] = equiv_par(e); + } } // Checks if an scc is empty and reports the occurring parities // or special cases inline subgame_info_t - inspect_scc(unsigned max_par) + inspect_scc(par_t max_par) { subgame_info_t info; info.is_empty = true; @@ -278,7 +396,7 @@ namespace spot if (subgame_[e.dst] == unseen_mark) { info.is_empty = false; - unsigned this_par = e.acc.max_set() - 1; + par_t this_par = to_par(e); if (this_par <= max_par) { info.all_parities.insert(this_par); @@ -301,86 +419,81 @@ namespace spot return info; } - // Checks if an scc can be trivially solved, - // that is, all vertices of the scc belong to the - // attractor of a transition leaving the scc + // Computes the trivially solvable part of the scc + // That is the states that can be attracted to an + // outgoing transition inline subgame_info_t fix_scc() { - auto scc_acc = info_->acc_sets_of(c_scc_idx_); - // We will override all parities of edges leaving the scc - bool added[] = {false, false}; - unsigned par_pair[2]; - unsigned scc_new_par = std::max(scc_acc.max_set(), 1u); - if (scc_new_par&1) - { - par_pair[1] = scc_new_par; - par_pair[0] = scc_new_par+1; - } - else - { - par_pair[1] = scc_new_par+1; - par_pair[0] = scc_new_par; - } - acc_cond::mark_t even_mark({par_pair[0]}); - acc_cond::mark_t odd_mark({par_pair[1]}); + // Note that the winner of the transitions + // leaving the scc are already determined + // attr(...) will only modify the + // states within the current scc + // but we have to "trick" it into + // not disregarding the transitions leaving the scc + // dummy needed to pass asserts + max_abs_par_ = limit_par_even+2; + // The attractors should define their own subgame + // but as we want to compute the attractors of the + // leaving transitions, we need to make + // sure that + // a) no transition is excluded due to its parity + // b) no transition is considered accepting/winning + // due to its parity + // Final note: Attractors cannot intersect by definition + // therefore the order in which they are computed + // is irrelevant + unsigned dummy_rd = 0; + // Attractor of outgoing transitions winning for env + attr(dummy_rd, false, limit_par_even, true, limit_par_even, false); + // Attractor of outgoing transitions winning for player + attr(dummy_rd, true, limit_par_even+1, true, limit_par_even+1, false); - // Only necessary to pass tests - max_abs_par_ = std::max(par_pair[0], par_pair[1]); + // No strategy fix need + // assert if all winning states of the current scc have a valid strategy - for (unsigned v : c_states()) - { - assert(subgame_[v] == unseen_mark); - for (auto &e : arena_->out(v)) - { - // The outgoing edges are taken finitely often - // -> disregard parity - if (info_->scc_of(e.dst) != c_scc_idx_) - { - // Edge leaving the scc - change_stash_.emplace_back(arena_->edge_number(e), - e.dst, e.acc); - if (w_.winner(e.dst)) - { - // Winning region of player -> odd - e.acc = odd_mark; - added[1] = true; - } - else - { - // Winning region of env -> even - e.acc = even_mark; - added[0] = true; - } - // Replace with self-loop - e.dst = e.src; - } - } // e - } // v + assert([&]() + { + for (unsigned v : c_states()) + { + if (!w_.has_winner_[v]) + continue; + // We only need a strategy if the winner + // of the state is also the owner + if (w_.winner(v) != (*owner_ptr_)[v]) + continue; + if (s_[v] <= 0) + { + std::cerr << "state " << v << " has a winner " + << w_.winner(v) << " and owner " + << (*owner_ptr_)[v] + << " but no strategy " + << s_[v] << '\n'; + return false; + } + const auto& e = arena_->edge_storage(s_[v]); + if (!w_.has_winner_[e.dst] + || (w_.winner(e.src) != w_.winner(e.dst))) + { + std::cerr << "state " << v << " has a winner " + << w_.winner(v) + << " but no valid strategy\n"; + return false; + } + } + return true; + }()); - // Compute the attractors of the self-loops/transitions leaving scc - // These can be directly added to the winning states - // Note: attractors can not intersect therefore the order in which - // they are computed does not matter - unsigned dummy_rd; - - for (bool p : {false, true}) - if (added[p]) - attr(dummy_rd, p, par_pair[p], true, par_pair[p]); - - if (added[0] || added[1]) - // Fix "negative" strategy - for (unsigned v : c_states()) - if (subgame_[v] != unseen_mark) - s_[v] = std::abs(s_[v]); - - return inspect_scc(unseen_mark); + auto ins = inspect_scc(limit_par_even); + return ins; } // fix_scc inline bool - attr(unsigned &rd, bool p, unsigned max_par, - bool acc_par, unsigned min_win_par) + attr(unsigned &rd, bool p, par_t max_par, + bool acc_par, par_t min_win_par, bool respect_sg=true) { + // In fix_scc, the attr computation is + // abused so we can not check ertain things // Computes the attractor of the winning set of player p within a // subgame given as rd. // If acc_par is true, max_par transitions are also accepting and @@ -394,8 +507,8 @@ namespace spot // As proposed in Oink! / PGSolver // Needs the transposed graph however - assert((!acc_par) || (acc_par && (max_par&1) == p)); - assert(!acc_par || (0 < min_win_par)); + assert((!acc_par) || (acc_par && to_player(max_par) == p)); + assert(!acc_par || (min_par_graph_ <= min_win_par)); assert((min_win_par <= max_par) && (max_par <= max_abs_par_)); bool grown = false; @@ -411,19 +524,16 @@ namespace spot do { - if (!to_add.empty()) + grown |= !to_add.empty(); + for (unsigned v : to_add) { - grown = true; - for (unsigned v : to_add) + // v is winning + w_.set(v, p); + // Mark if demanded + if (acc_par) { - // v is winning - w_.set(v, p); - // Mark if demanded - if (acc_par) - { - assert(subgame_[v] == unseen_mark); - subgame_[v] = rd; - } + assert(subgame_[v] == unseen_mark); + subgame_[v] = rd; } } to_add.clear(); @@ -431,7 +541,7 @@ namespace spot for (unsigned v : c_states()) { if ((subgame_[v] < rd) || (w_(v, p))) - // Not in subgame or winning + // Not in subgame or winning for p continue; bool is_owned = (*owner_ptr_)[v] == p; @@ -441,11 +551,12 @@ namespace spot // Optim: If given the choice, // we seek to go to the "oldest" subgame // That is the subgame with the lowest rd value - unsigned min_subgame_idx = -1u; + unsigned min_subgame_idx = unseen_mark; for (const auto &e: arena_->out(v)) { - unsigned this_par = e.acc.max_set() - 1; - if ((subgame_[e.dst] >= rd) && (this_par <= max_par)) + par_t this_par = to_par(e); + if ((!respect_sg || (subgame_[e.dst] >= rd)) + && (this_par <= max_par)) { // Check if winning if (w_(e.dst, p) @@ -453,7 +564,7 @@ namespace spot { assert(!acc_par || (this_par < min_win_par) || (acc_par && (min_win_par <= this_par) && - ((this_par&1) == p))); + (to_player(this_par) == p))); if (is_owned) { wins = true; @@ -504,7 +615,7 @@ namespace spot // We need to check if transitions that are accepted due // to their parity remain in the winning region of p inline bool - fix_strat_acc(unsigned rd, bool p, unsigned min_win_par, unsigned max_par) + fix_strat_acc(unsigned rd, bool p, par_t min_win_par, par_t max_par) { for (unsigned v : c_states()) { @@ -520,28 +631,28 @@ namespace spot const auto &e_s = arena_->edge_storage(s_[v]); // Optimization only for player if (!p && w_(e_s.dst, p)) - // If current strat is admissible -> nothing to do - // for env + // If current strat is admissible -> + // nothing to do for env continue; // This is an accepting edge that is no longer admissible // or we seek a more desirable edge (for player) - assert(min_win_par <= e_s.acc.max_set() - 1); - assert(e_s.acc.max_set() - 1 <= max_par); + assert(min_win_par <= to_par(e_s)); + assert(to_par(e_s) <= max_par); // Strategy heuristic : go to the oldest subgame - unsigned min_subgame_idx = -1u; + unsigned min_subgame_idx = unseen_mark; - s_[v] = -1; + s_[v] = no_strat_mark; for (const auto &e_fix : arena_->out(v)) { if (subgame_[e_fix.dst] >= rd) { - unsigned this_par = e_fix.acc.max_set() - 1; + par_t this_par = to_par(e_fix); // This edge must have less than max_par, // otherwise it would have already been attracted assert((this_par <= max_par) - || ((this_par&1) != (max_par&1))); + || (to_player(this_par) != (max_par&1))); // if it is accepting and leads to the winning region // -> valid fix if ((min_win_par <= this_par) @@ -555,7 +666,7 @@ namespace spot } } } - if (s_[v] == -1) + if (s_[v] == no_strat_mark) // NO fix found // This state is NOT won by p due to any accepting edges return true; // true -> grown @@ -576,7 +687,7 @@ namespace spot case (0): { assert(this_work.rd == 0); - assert(this_work.min_par == 0); + assert(this_work.min_par == min_par_graph_); unsigned rd; assert(this_work.max_par <= max_abs_par_); @@ -599,18 +710,20 @@ namespace spot // -> Priority compression // Optional, improves performance // Highest actually occurring - unsigned max_par = *subgame_info.all_parities.begin(); - unsigned min_win_par = max_par; - while ((min_win_par > 2) && - (!subgame_info.all_parities.count(min_win_par-1))) + // Attention in partially colored graphs + // the parity -1 and 0 appear + par_t max_par = *subgame_info.all_parities.begin(); + par_t min_win_par = max_par; + while ((min_win_par >= (min_par_graph_+2)) && + (!subgame_info.all_parities.count(min_win_par - 1))) min_win_par -= 2; - assert(max_par > 0); + assert(min_win_par >= min_par_graph_); + assert(max_par >= min_win_par); + assert((max_par&1) == (min_win_par&1)); assert(!subgame_info.all_parities.empty()); - assert(min_win_par > 0); // Get the player - bool p = min_win_par&1; - assert((max_par&1) == (min_win_par&1)); + bool p = to_player(min_win_par); // Attraction to highest par // This increases rd_ and passes it to rd attr(rd, p, max_par, true, min_win_par); @@ -619,17 +732,17 @@ namespace spot // Continuation w_stack_.emplace_back(1, rd, min_win_par, max_par); // Recursion - w_stack_.emplace_back(0, 0, 0, min_win_par-1); + w_stack_.emplace_back(0, 0, min_par_graph_, min_win_par - 1); // Others attracted will have higher counts in subgame break; } case (1): { unsigned rd = this_work.rd; - unsigned min_win_par = this_work.min_par; - unsigned max_par = this_work.max_par; - assert((min_win_par&1) == (max_par&1)); - bool p = min_win_par&1; + par_t min_win_par = this_work.min_par; + par_t max_par = this_work.max_par; + assert(to_player(min_win_par) == to_player(max_par)); + bool p = to_player(min_win_par); // Check if the attractor of w_[!p] is equal to w_[!p] // if so, player wins if there remain accepting transitions // for max_par (see fix_strat_acc) @@ -659,9 +772,9 @@ namespace spot // Mark as unseen subgame_[v] = unseen_mark; // Unset strat for testing - s_[v] = -1; + s_[v] = no_strat_mark; } - w_stack_.emplace_back(0, 0, 0, max_par); + w_stack_.emplace_back(0, 0, min_par_graph_, max_par); // No need to do anything else // the attractor of !p of this level is not changed } @@ -673,20 +786,6 @@ namespace spot } // while } // zielonka - // Undo change to the graph made along the way - inline void restore() - { - // "Unfix" the edges leaving the sccs - // This is called once the game has been solved - for (auto &e_stash : change_stash_) - { - auto &e = arena_->edge_storage(e_stash.e_num); - e.dst = e_stash.e_dst; - e.acc = e_stash.e_acc; - } - // Done - } - // Empty all internal variables inline void clean_up() { @@ -697,12 +796,11 @@ namespace spot s_.clear(); rd_ = 0; max_abs_par_ = 0; - change_stash_.clear(); } // Dedicated solver for special cases inline void one_par_subgame_solver(const subgame_info_t &info, - unsigned max_par) + par_t max_par) { assert(info.all_parities.size() == 1); // The entire subgame is won by the player of the only parity @@ -711,8 +809,8 @@ namespace spot // This subgame gets its own counter ++rd_; unsigned rd = rd_; - unsigned one_par = *info.all_parities.begin(); - bool winner = one_par & 1; + par_t one_par = *info.all_parities.begin(); + bool winner = to_player(one_par); assert(one_par <= max_par); for (unsigned v : c_states()) @@ -723,10 +821,10 @@ namespace spot subgame_[v] = rd; w_.set(v, winner); // Get the strategy - assert(s_[v] == -1); + assert(s_[v] == no_strat_mark); for (const auto &e : arena_->out(v)) { - unsigned this_par = e.acc.max_set() - 1; + par_t this_par = to_par(e); if ((subgame_[e.dst] >= rd) && (this_par <= max_par)) { assert(this_par == one_par); @@ -740,7 +838,18 @@ namespace spot // Done } - const unsigned unseen_mark = std::numeric_limits::max(); + template + inline par_t + to_par(const EDGE& e) + { + return all_edge_par_[arena_->edge_number(e)]; + } + + inline bool + to_player(par_t par) + { + return par & 1; + } twa_graph_ptr arena_; const std::vector *owner_ptr_; @@ -751,27 +860,31 @@ namespace spot // strategies for env and player; For synthesis only player is needed // We need a signed value here in order to "fix" the strategy // during construction - std::vector s_; + std::vector s_; // Informations about sccs andthe current scc std::unique_ptr info_; - unsigned max_abs_par_; // Max parity occurring in the current scc + par_t max_abs_par_; // Max parity occurring in the current scc + // Minimal and maximal parity occurring in the entire graph + par_t min_par_graph_, max_par_graph_; // Info on the current scc unsigned c_scc_idx_; - // Fixes made to the sccs that have to be undone - // before returning - std::vector change_stash_; // Change recursive calls to stack std::vector w_stack_; + // Directly store a vector of parities + // This vector will be created such + // that it takes care of the actual parity condition + // and after that zielonka can be called as if max odd + std::vector all_edge_par_; }; } // anonymous - bool solve_parity_game(const twa_graph_ptr& arena) + bool solve_parity_game(const twa_graph_ptr& arena, bool solve_globally) { parity_game pg; - return pg.solve(arena); + return pg.solve(arena, solve_globally); } bool solve_game(const twa_graph_ptr& arena) @@ -786,12 +899,39 @@ namespace spot return solve_parity_game(arena); } + // backward compatibility void pg_print(std::ostream& os, const const_twa_graph_ptr& arena) { - auto owner = ensure_parity_game(arena, "pg_print"); + print_pg(os, arena); + } - unsigned ns = arena->num_states(); - unsigned init = arena->get_init_state_number(); + std::ostream& print_pg(std::ostream& os, const const_twa_graph_ptr& arena) + { + bool is_par, max, odd; + is_par = arena->acc().is_parity(max, odd, true); + if (!is_par) + throw std::runtime_error("print_pg: arena must have a parity acceptance"); + const region_t& owner = *ensure_game(arena, "print_pg"); + + bool max_odd_colored = + max && odd && std::all_of(arena->edges().begin(), + arena->edges().end(), + [](const auto& e) + { + return (bool) e.acc; + }); + const_twa_graph_ptr towork = arena; + if (!max_odd_colored) + { + twa_graph_ptr tmp = + change_parity(arena, parity_kind_max, parity_style_odd); + colorize_parity_here(tmp, true); + towork = tmp; + } + + auto sn = arena->get_named_prop>("state-names"); + unsigned ns = towork->num_states(); + unsigned init = towork->get_init_state_number(); os << "parity " << ns - 1 << ";\n"; std::vector seen(ns, false); std::vector todo({init}); @@ -803,8 +943,8 @@ namespace spot continue; seen[src] = true; os << src << ' '; - os << arena->out(src).begin()->acc.max_set() - 1 << ' '; - os << (*owner)[src] << ' '; + os << towork->out(src).begin()->acc.max_set() - 1 << ' '; + os << owner[src] << ' '; bool first = true; for (auto& e: arena->out(src)) { @@ -815,10 +955,15 @@ namespace spot if (!seen[e.dst]) todo.push_back(e.dst); } - if (src == init) - os << " \"INIT\""; + if (sn && sn->size() > src && !(*sn)[src].empty()) + { + os << " \""; + escape_str(os, (*sn)[src]); + os << '"'; + } os << ";\n"; } + return os; } void alternate_players(spot::twa_graph_ptr& arena, @@ -843,7 +988,7 @@ namespace spot todo.pop_back(); seen[src] = true; bdd missing = bddtrue; - for (const auto& e: arena->out(src)) + for (auto& e: arena->out(src)) { bool osrc = (*owner)[src]; if (complete0 && !osrc) @@ -854,6 +999,21 @@ namespace spot (*owner)[e.dst] = !osrc; todo.push_back(e.dst); } + else if (e.src == e.dst) + { + if (e.cond == bddtrue) + { + // Fix trivial self-loop + // No need to add it to seen + auto inter = arena->new_state(); + owner->push_back(!osrc); + e.dst = inter; + arena->new_edge(inter, src, bddtrue, e.acc); + } + else + throw std::runtime_error("alternate_players(): " + "Nontrivial selfloop"); + } else if ((*owner)[e.dst] == osrc) { delete owner; @@ -872,10 +1032,19 @@ namespace spot arena->new_edge(sink_con, sink_env, bddtrue, um.second); arena->new_edge(sink_env, sink_con, bddtrue, um.second); } - arena->new_edge(src, sink_con, missing, um.second); + arena->new_edge(src, sink_env, missing, um.second); + assert(owner->at(src) != owner->at(sink_env)); } } + assert([&]() + { + for (const auto& e : arena->edges()) + if (owner->at(e.src) == owner->at(e.dst)) + return false; + return true; + }() && "Not alternating"); + arena->set_named_prop("state-player", owner); } @@ -929,7 +1098,7 @@ namespace spot ("set_state_players(): There must be as many owners as states"); arena->set_named_prop("state-player", - new region_t(std::forward(owners))); + new region_t(std::move(owners))); } void set_state_player(twa_graph_ptr arena, unsigned state, bool owner) @@ -951,7 +1120,18 @@ namespace spot (*owners)[state] = owner; } - const region_t& get_state_players(const_twa_graph_ptr arena) + const region_t& get_state_players(const const_twa_graph_ptr& arena) + { + region_t *owners = arena->get_named_prop + ("state-player"); + if (!owners) + throw std::runtime_error + ("get_state_players(): state-player property not defined, not a game?"); + + return *owners; + } + + const region_t& get_state_players(twa_graph_ptr& arena) { region_t *owners = arena->get_named_prop ("state-player"); @@ -976,7 +1156,7 @@ namespace spot } - const strategy_t& get_strategy(const_twa_graph_ptr arena) + const strategy_t& get_strategy(const const_twa_graph_ptr& arena) { auto strat_ptr = arena->get_named_prop("strategy"); if (!strat_ptr) @@ -996,7 +1176,7 @@ namespace spot throw std::runtime_error("set_strategy(): strategies need to have " "the same size as the automaton."); arena->set_named_prop("strategy", - new strategy_t(std::forward(strat))); + new strategy_t(std::move(strat))); } void set_synthesis_outputs(const twa_graph_ptr& arena, const bdd& outs) @@ -1047,7 +1227,7 @@ namespace spot ("set_state_winners(): There must be as many winners as states"); arena->set_named_prop("state-winner", - new region_t(std::forward(winners))); + new region_t(std::move(winners))); } void set_state_winner(twa_graph_ptr arena, unsigned state, bool winner) @@ -1069,7 +1249,7 @@ namespace spot (*winners)[state] = winner; } - const region_t& get_state_winners(const_twa_graph_ptr arena) + const region_t& get_state_winners(const const_twa_graph_ptr& arena) { region_t *winners = arena->get_named_prop("state-winner"); if (!winners) diff --git a/spot/twaalgos/game.hh b/spot/twaalgos/game.hh index 64f8d52c8..d4937e46c 100644 --- a/spot/twaalgos/game.hh +++ b/spot/twaalgos/game.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2017-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -70,13 +70,19 @@ namespace spot /// This computes the winning strategy and winning region using /// Zielonka's recursive algorithm. \cite zielonka.98.tcs /// + /// By default only a 'local' strategy is computed: + /// Only the part of the arena reachable from the init state is considered. + /// If you want to compute a strategy for ALL states, set + /// \a solve_globally to true + /// /// Also includes some inspiration from Oink. /// \cite vandijk.18.tacas /// /// Returns the player winning in the initial state, and sets /// the state-winner and strategy named properties. SPOT_API - bool solve_parity_game(const twa_graph_ptr& arena); + bool solve_parity_game(const twa_graph_ptr& arena, + bool solve_globally = false); /// \ingroup games /// \brief Solve a safety game. @@ -112,10 +118,26 @@ namespace spot /// \ingroup games - /// \brief Print a max odd parity game using PG-solver syntax + /// \brief Print a parity game using PG-solver syntax + /// + /// The input automaton should have parity acceptance and should + /// define state owner. Since the PG solver format want player 1 to + /// solve a max odd condition, the acceptance condition will be + /// adapted to max odd if necessary. + /// + /// The output will list the initial state as first state (because + /// that is the convention of our parser), and list only reachable + /// states. + /// + /// If states are named, the names will be output as well. + /// @{ + SPOT_API + std::ostream& print_pg(std::ostream& os, const const_twa_graph_ptr& arena); + + SPOT_DEPRECATED("use print_pg() instead") SPOT_API void pg_print(std::ostream& os, const const_twa_graph_ptr& arena); - + /// @} /// \ingroup games /// \brief Highlight the edges of a strategy on an automaton. @@ -147,14 +169,18 @@ namespace spot /// \ingroup games /// \brief Get the owner of all states + ///@{ SPOT_API - const region_t& get_state_players(const_twa_graph_ptr arena); + const region_t& get_state_players(const const_twa_graph_ptr& arena); + SPOT_API + const region_t& get_state_players(twa_graph_ptr& arena); + ///@} /// \ingroup games /// \brief Get or set the strategy /// @{ SPOT_API - const strategy_t& get_strategy(const_twa_graph_ptr arena); + const strategy_t& get_strategy(const const_twa_graph_ptr& arena); SPOT_API void set_strategy(twa_graph_ptr arena, const strategy_t& strat); SPOT_API @@ -198,5 +224,5 @@ namespace spot /// \ingroup games /// \brief Get the winner of all states SPOT_API - const region_t& get_state_winners(const_twa_graph_ptr arena); + const region_t& get_state_winners(const const_twa_graph_ptr& arena); } diff --git a/spot/twaalgos/genem.cc b/spot/twaalgos/genem.cc index e49f5b07c..0b0d1fd5f 100644 --- a/spot/twaalgos/genem.cc +++ b/spot/twaalgos/genem.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2021 Laboratoire de Recherche et Developpement +// Copyright (C) 2017-2023 Laboratoire de Recherche et Developpement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -25,7 +25,7 @@ namespace spot { namespace { - enum genem_version_t { spot28, atva19, spot29, spot210 }; + enum genem_version_t { spot28, atva19, spot29, spot210, spot211, spot212 }; static genem_version_t genem_version = spot29; } @@ -33,6 +33,10 @@ namespace spot { if (emversion == nullptr || !strcasecmp(emversion, "spot29")) genem_version = spot29; + else if (!strcasecmp(emversion, "spot212")) + genem_version = spot212; + else if (!strcasecmp(emversion, "spot211")) + genem_version = spot211; else if (!strcasecmp(emversion, "spot210")) genem_version = spot210; else if (!strcasecmp(emversion, "spot28")) @@ -41,7 +45,8 @@ namespace spot genem_version = atva19; else throw std::invalid_argument("generic_emptiness_check version should be " - "one of {spot28, atva19, spot29, spot210}"); + "one of {spot28, atva19, spot29, spot210, " + "spot211, spot212}"); } namespace @@ -84,6 +89,10 @@ namespace spot scc_split_check(const scc_info& si, unsigned scc, const acc_cond& acc, Extra extra, acc_cond::mark_t tocut) { + if (genem_version == spot211 + || genem_version == spot212 + || genem_version == spot210) + tocut |= acc.fin_unit(); scc_and_mark_filter filt(si, scc, tocut); filt.override_acceptance(acc); scc_info upper_si(filt, EarlyStop @@ -118,13 +127,41 @@ namespace spot // Try to accept when Fin(fo) == true acc_cond::mark_t fo_m = {(unsigned) fo}; if (!scc_split_check - (si, scc, fpart.remove(fo_m, true), extra, fo_m)) + (si, scc, fpart, extra, fo_m)) if constexpr (EarlyStop) return false; // Try to accept when Fin(fo) == false acc = acc.force_inf(fo_m); } while (!acc.is_f()); + else if (genem_version == spot211) + { + do + { + auto [fo, fpart, rest] = acc.fin_unit_one_split(); + acc_cond::mark_t fo_m = {(unsigned) fo}; + if (!scc_split_check + (si, scc, fpart, extra, fo_m)) + if constexpr (EarlyStop) + return false; + acc = rest; + } + while (!acc.is_f()); + } + else if (genem_version == spot212) + { + do + { + auto [fo, fpart, rest] = acc.fin_unit_one_split_improved(); + acc_cond::mark_t fo_m = {(unsigned) fo}; + if (!scc_split_check + (si, scc, fpart, extra, fo_m)) + if constexpr (EarlyStop) + return false; + acc = rest; + } + while (!acc.is_f()); + } else if (genem_version == spot29) do { @@ -290,5 +327,82 @@ namespace spot return !scc_split_check_filtered(upper_si, forced_acc, callback, {}); } + // return ⊤ if there exists at least one accepting transition. + static bool + accepting_transitions_aux(const scc_info &si, unsigned scc, + const acc_cond acc, + acc_cond::mark_t removed_colors, + acc_cond::mark_t tocut, + std::vector &accepting_transitions, + const bitvect& kept) + { + bool result = false; + scc_and_mark_filter filt(si, scc, tocut, kept); + filt.override_acceptance(acc); + scc_info upper_si(filt, scc_info_options::ALL); + for (unsigned sc = 0; sc < upper_si.scc_count(); ++sc) + result |= accepting_transitions_scc(upper_si, sc, acc, removed_colors, + accepting_transitions, kept); + return result; + } + bool + accepting_transitions_scc(const scc_info &si, unsigned scc, + const acc_cond aut_acc, + acc_cond::mark_t removed_colors, + std::vector& accepting_transitions, + const bitvect& kept) + { + // The idea is the same as in is_scc_empty() + bool result = false; + acc_cond::mark_t sets = si.acc_sets_of(scc); + acc_cond acc = aut_acc.restrict_to(sets); + acc = acc.remove(si.common_sets_of(scc), false); + + auto inner_edges = si.inner_edges_of(scc); + + if (si.is_trivial(scc)) + return false; + if (acc.is_t() || acc.accepting(acc.get_acceptance().used_sets())) + { + for (auto& e : inner_edges) + if ((e.acc & removed_colors) == acc_cond::mark_t {}) + accepting_transitions[si.get_aut()->edge_number(e)] = true; + return true; + } + else if (acc.is_f()) + return false; + acc_cond::acc_code rest = acc_cond::acc_code::f(); + for (const acc_cond& disjunct: acc.top_disjuncts()) + if (acc_cond::mark_t fu = disjunct.fin_unit()) + result |= accepting_transitions_aux(si, scc, acc.remove(fu, true), + (removed_colors | fu), fu, + accepting_transitions, kept); + else + rest |= disjunct.get_acceptance(); + if (!rest.is_f()) + { + acc_cond::mark_t m = { (unsigned) acc.fin_one() }; + result |= accepting_transitions_aux(si, scc, acc.remove(m, true), + (removed_colors | m), m, accepting_transitions, + kept); + result |= accepting_transitions_scc(si, scc, acc.remove(m, false), + removed_colors, accepting_transitions, + kept); + } + return result; + } + + std::vector + accepting_transitions(const const_twa_graph_ptr aut, acc_cond cond) + { + auto aut_vector_size = aut->edge_vector().size(); + std::vector result(aut_vector_size, false); + auto kept = make_bitvect(aut_vector_size); + scc_info si(aut); + for (unsigned scc = 0; scc < si.scc_count(); ++scc) + accepting_transitions_scc(si, scc, cond, {}, result, *kept); + delete kept; + return result; + } } diff --git a/spot/twaalgos/genem.hh b/spot/twaalgos/genem.hh index 2d1ded4c7..3fefcdc77 100644 --- a/spot/twaalgos/genem.hh +++ b/spot/twaalgos/genem.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017-2021 Laboratoire de Recherche et Developpement +// Copyright (C) 2017-2022 Laboratoire de Recherche et Developpement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -100,8 +100,44 @@ namespace spot /// - "spot29" improves upon the worst case of atva19. This is /// the default. /// - "spot210" improves upon "spot29" in a few cases where a Fin - /// is shared by multiple disjuncts. + /// is shared by multiple disjuncts. This improve the worst + /// case complexity of EL-automata in the general case, but worsen + /// the complexity of Hyper-Rabin in particular. + /// - "spot211" is another attempt at fixing worst case complexities. + /// Compared to atva19, this improves the complexities for Rabin, + /// GeneralizedRabin, and EL without worsening the complexity of + /// Hyper-Rabin. SPOT_API void generic_emptiness_check_select_version(const char* emversion = nullptr); + /// \ingroup emptiness_check_algorithms + /// + /// Give the set of transitions contained in + /// an accepting cycle of the SCC \a scc of \a aut. + /// + /// \param si scc_info used to describle the automaton + /// \param scc SCC to consider + /// \param aut_acc Acceptance condition used for this SCC + /// \param removed_colors A set of colors that can't appear on a transition + /// \param accepting_transitions The result. Must be a vector of size at least + /// the max index + 1 of a transition of the SCC scc and the value of each + /// index of a transition of this SCC must be set to false + /// \param kept A list of booleans that say if a transition is kept even if + /// it does not have an element of removed_colors + /// \return True if there is an accepting transition + SPOT_API bool + accepting_transitions_scc(const scc_info &si, unsigned scc, + const acc_cond aut_acc, + acc_cond::mark_t removed_colors, + std::vector& accepting_transitions, + const bitvect& kept); + + /// \ingroup emptiness_check_algorithms + /// + /// Give the set of transitions contained in an accepting cycle of \a aut. + /// \param aut Automaton to process + /// \param cond Acceptance condition associated + SPOT_API std::vector + accepting_transitions(const const_twa_graph_ptr aut, acc_cond cond); + } diff --git a/spot/twaalgos/gfguarantee.hh b/spot/twaalgos/gfguarantee.hh index 5124667f4..40cb16f97 100644 --- a/spot/twaalgos/gfguarantee.hh +++ b/spot/twaalgos/gfguarantee.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018 Laboratoire de Recherche et Développement +// Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -48,7 +48,7 @@ namespace spot /// \brief Convert GF(φ) into a (D)BA if φ is a guarantee property. /// /// If the formula \a gf has the form GΦ where Φ matches either F(φ) - /// or F(φ₁)|F(φ₂)|...|F(φₙ), we translate Φ into A_Φ and attempt to + /// or F(φ₁)&F(φ₂)&...&F(φₙ), we translate Φ into A_Φ and attempt to /// minimize it to a WDBA W_Φ. If the resulting automaton is /// terminal, we then call g_f_terminal_inplace(W_Φ). If \a /// deterministic is not set, we keep the minimized automaton only diff --git a/spot/twaalgos/hoa.cc b/spot/twaalgos/hoa.cc index 1865a6d49..e6147afda 100644 --- a/spot/twaalgos/hoa.cc +++ b/spot/twaalgos/hoa.cc @@ -31,6 +31,7 @@ #include #include #include +#include using namespace std::string_literals; @@ -793,7 +794,7 @@ namespace spot os << (v1_1 ? "spot." : "spot-") << "state-player:"; if (player->size() != num_states) throw std::runtime_error("print_hoa(): state-player property has" - " (" + std::to_string(player->size()) + + " " + std::to_string(player->size()) + " states but automaton has " + std::to_string(num_states)); unsigned n = 0; @@ -973,7 +974,11 @@ namespace spot strcpy(tmpopt, opt); tmpopt[n] = 'k'; tmpopt[n + 1] = 0; - preserve_names = true; + // Preserve names if we have some state names, or if we are + // not a kripke_graph. + auto sn = aut->get_named_prop>("state-names"); + preserve_names = + !!sn || !std::dynamic_pointer_cast(aut); } auto a = std::dynamic_pointer_cast(aut); diff --git a/spot/twaalgos/hoa.hh b/spot/twaalgos/hoa.hh index 8c2da4e43..441b9ed16 100644 --- a/spot/twaalgos/hoa.hh +++ b/spot/twaalgos/hoa.hh @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -94,7 +95,7 @@ namespace spot /// registered in the automaton is not only ignored, but also /// removed from the alias list stored in the automaton. /// - /// The \a or_str, \a and_str, and \ap_printer arguments are + /// The \a or_str, \a and_str, and \a ap_printer arguments are /// used to print operators OR, AND, and to print atomic propositions /// that are not aliases. \a lpar_str and \a rpar_str are used /// to group conjuncts that appear in a disjunction. @@ -118,7 +119,7 @@ namespace spot /// /// - If an alias A exists for \a label, `"@A"` is returned. /// - /// - If an alias A exists for the negation of \a label, `"!@A` + /// - If an alias A exists for the negation of \a label, `"!@A"` /// is returned. /// /// - If \a label is true or false, `true_str` or `false_str` diff --git a/spot/twaalgos/ltl2taa.cc b/spot/twaalgos/ltl2taa.cc index 9c10777a9..eaba49e92 100644 --- a/spot/twaalgos/ltl2taa.cc +++ b/spot/twaalgos/ltl2taa.cc @@ -61,7 +61,8 @@ namespace spot { std::vector empty; res_->create_transition(init_, empty); - succ_state ss = { empty, f, empty }; + succ_state ss; + ss.condition = f; succ_.emplace_back(ss); return; } @@ -76,7 +77,8 @@ namespace spot std::vector empty; taa_tgba::transition* t = res_->create_transition(init_, empty); res_->add_condition(t, f); - succ_state ss = { empty, f, empty }; + succ_state ss; + ss.condition = f; succ_.emplace_back(ss); return; } @@ -90,7 +92,7 @@ namespace spot return; dst.emplace_back(v.init_); res_->create_transition(init_, dst); - succ_state ss = { dst, formula::tt(), a }; + succ_state ss = { std::move(dst), formula::tt(), std::move(a) }; succ_.emplace_back(ss); return; } @@ -206,7 +208,7 @@ namespace spot } t = res_->create_transition(init_, u); res_->add_condition(t, f); - succ_state ss = { u, f, a }; + succ_state ss = { std::move(u), f, std::move(a) }; succ_.emplace_back(ss); } diff --git a/spot/twaalgos/ltl2tgba_fm.cc b/spot/twaalgos/ltl2tgba_fm.cc index 3566abc97..ed9ad7d06 100644 --- a/spot/twaalgos/ltl2tgba_fm.cc +++ b/spot/twaalgos/ltl2tgba_fm.cc @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -106,7 +107,7 @@ namespace spot { typedef twa_graph::namer namer; public: - ratexp_to_dfa(translate_dict& dict); + ratexp_to_dfa(translate_dict& dict, bool disable_scc_trimming = false); std::tuple succ(formula f); ~ratexp_to_dfa(); @@ -121,6 +122,7 @@ namespace spot typedef robin_hood::unordered_node_map f2a_t; std::vector automata_; f2a_t f2a_; + bool disable_scc_trimming_; }; // Helper dictionary. We represent formulae using BDDs to @@ -753,55 +755,7 @@ namespace spot SPOT_UNREACHABLE(); case op::AndNLM: { - unsigned s = f.size(); - vec final; - vec non_final; - - for (auto g: f) - if (g.accepts_eword()) - final.emplace_back(g); - else - non_final.emplace_back(g); - - if (non_final.empty()) - // (a* & b*);c = (a*|b*);c - return recurse_and_concat(formula::OrRat(std::move(final))); - if (!final.empty()) - { - // let F_i be final formulae - // N_i be non final formula - // (F_1 & ... & F_n & N_1 & ... & N_m) - // = (F_1 | ... | F_n);[*] && (N_1 & ... & N_m) - // | (F_1 | ... | F_n) && (N_1 & ... & N_m);[*] - formula f = formula::OrRat(std::move(final)); - formula n = formula::AndNLM(std::move(non_final)); - formula t = formula::one_star(); - formula ft = formula::Concat({f, t}); - formula nt = formula::Concat({n, t}); - formula ftn = formula::AndRat({ft, n}); - formula fnt = formula::AndRat({f, nt}); - return recurse_and_concat(formula::OrRat({ftn, fnt})); - } - // No final formula. - // Translate N_1 & N_2 & ... & N_n into - // N_1 && (N_2;[*]) && ... && (N_n;[*]) - // | (N_1;[*]) && N_2 && ... && (N_n;[*]) - // | (N_1;[*]) && (N_2;[*]) && ... && N_n - formula star = formula::one_star(); - vec disj; - for (unsigned n = 0; n < s; ++n) - { - vec conj; - for (unsigned m = 0; m < s; ++m) - { - formula g = f[m]; - if (n != m) - g = formula::Concat({g, star}); - conj.emplace_back(g); - } - disj.emplace_back(formula::AndRat(std::move(conj))); - } - return recurse_and_concat(formula::OrRat(std::move(disj))); + return recurse_and_concat(rewrite_and_nlm(f)); } case op::AndRat: { @@ -900,8 +854,7 @@ namespace spot for (bdd label: minterms_of(all_props, var_set)) { formula dest = - dict_.bdd_to_sere(bdd_appex(res_ndet, label, bddop_and, - dict_.var_set)); + dict_.bdd_to_sere(bdd_restrict(res_ndet, label)); dest = formula::first_match(dest); if (to_concat_) dest = formula::Concat({dest, to_concat_}); @@ -955,8 +908,9 @@ namespace spot } - ratexp_to_dfa::ratexp_to_dfa(translate_dict& dict) + ratexp_to_dfa::ratexp_to_dfa(translate_dict& dict, bool disable_scc_trimming) : dict_(dict) + , disable_scc_trimming_(disable_scc_trimming) { } @@ -995,9 +949,7 @@ namespace spot bdd all_props = bdd_existcomp(res, dict_.var_set); for (bdd label: minterms_of(all_props, var_set)) { - formula dest = - dict_.bdd_to_sere(bdd_appex(res, label, bddop_and, - dict_.var_set)); + formula dest = dict_.bdd_to_sere(bdd_restrict(res, label)); f2a_t::const_iterator i = f2a_.find(dest); if (i != f2a_.end() && i->second.first == nullptr) continue; @@ -1011,6 +963,12 @@ namespace spot } } + if (disable_scc_trimming_) + { + automata_.emplace_back(a, namer); + return labelled_aut(a, namer); + } + // The following code trims the automaton in a crude way by // eliminating SCCs that are not coaccessible. It does not // actually remove the states, it simply marks the corresponding @@ -1029,11 +987,16 @@ namespace spot bool coacc = false; auto& st = sm->states_of(n); for (auto l: st) - if (namer->get_name(l).accepts_eword()) - { - coacc = true; - break; - } + { + formula lf = namer->get_name(l); + // Somehow gcc 12.2.0 thinks lf can be nullptr. + SPOT_ASSUME(lf != nullptr); + if (lf.accepts_eword()) + { + coacc = true; + break; + } + } if (!coacc) { // ... or if any of its successors is coaccessible. @@ -1471,9 +1434,7 @@ namespace spot for (bdd label: minterms_of(all_props, var_set)) { formula dest = - dict_.bdd_to_sere(bdd_appex(f1, label, bddop_and, - dict_.var_set)); - + dict_.bdd_to_sere(bdd_restrict(f1, label)); formula dest2 = formula::binop(o, dest, node[1]); bool unamb = dict_.unambiguous; if (!dest2.is_ff()) @@ -1552,9 +1513,7 @@ namespace spot for (bdd label: minterms_of(all_props, var_set)) { formula dest = - dict_.bdd_to_sere(bdd_appex(f1, label, bddop_and, - dict_.var_set)); - + dict_.bdd_to_sere(bdd_restrict(f1, label)); formula dest2 = formula::binop(o, dest, node[1]); bdd udest = @@ -1787,16 +1746,15 @@ namespace spot var_set = bdd_existcomp(bdd_support(t.symbolic), d_.var_set); all_props = bdd_existcomp(t.symbolic, d_.var_set); } - for (bdd one_prop_set: minterms_of(all_props, var_set)) + for (bdd label: minterms_of(all_props, var_set)) { - minato_isop isop(t.symbolic & one_prop_set); + minato_isop isop(t.symbolic & label); bdd cube; while ((cube = isop.next()) != bddfalse) { bdd label = bdd_exist(cube, d_.next_set); bdd dest_bdd = bdd_existcomp(cube, d_.next_set); - formula dest = - d_.conj_bdd_to_formula(dest_bdd); + formula dest = d_.conj_bdd_to_formula(dest_bdd); // Handle a Miyano-Hayashi style unrolling for // rational operators. Marked nodes correspond to @@ -1818,8 +1776,7 @@ namespace spot dest = d_.mt.mark_concat_ops(dest); } // Note that simplify_mark may have changed dest. - dest_bdd = bdd_ithvar(d_.register_next_variable(dest)); - res |= label & dest_bdd; + res |= label & bdd_ithvar(d_.register_next_variable(dest)); } } t.symbolic = res; @@ -2040,6 +1997,7 @@ namespace spot { if (aborter && aborter->too_large(a)) { + a->release_formula_namer(namer, false); if (!simplifier) delete s; return nullptr; @@ -2120,16 +2078,15 @@ namespace spot // // FIXME: minato_isop is quite expensive, and I (=adl) // don't think we really care that much about getting the - // smalled sum of products that minato_isop strives to + // smallest sum of products that minato_isop strives to // compute. Given that Next and Acc variables should // always be positive, maybe there is a faster way to // compute the successors? E.g. using bdd_satone() and // ignoring negated Next and Acc variables. - minato_isop isop(res & one_prop_set); + minato_isop isop(bdd_restrict(res, one_prop_set)); bdd cube; while ((cube = isop.next()) != bddfalse) { - bdd label = bdd_exist(cube, d.next_set); bdd dest_bdd = bdd_existcomp(cube, d.next_set); formula dest = d.conj_bdd_to_formula(dest_bdd); @@ -2147,8 +2104,9 @@ namespace spot if (symb_merge) dest = fc.canonicalize(dest); - bdd conds = bdd_existcomp(label, d.var_set); - bdd promises = bdd_existcomp(label, d.a_set); + bdd conds = + exprop ? one_prop_set : bdd_existcomp(cube, d.var_set); + bdd promises = bdd_existcomp(cube, d.a_set); dests.emplace_back(transition(dest, conds, promises)); } } @@ -2271,4 +2229,63 @@ namespace spot return a; } + twa_graph_ptr + sere_to_tgba(formula f, const bdd_dict_ptr& dict, bool disable_scc_trimming) + { + f = negative_normal_form(f); + + tl_simplifier* s = new tl_simplifier(dict); + twa_graph_ptr a = make_twa_graph(dict); + + translate_dict d(a, s, false, false, false); + ratexp_to_dfa sere2dfa(d, disable_scc_trimming); + + auto [dfa, namer, state] = sere2dfa.succ(f); + + // language was empty, build an automaton with one non accepting state + if (dfa == nullptr) + { + auto res = make_twa_graph(dict); + res->set_init_state(res->new_state()); + res->prop_universal(true); + res->prop_complete(false); + res->prop_stutter_invariant(true); + res->prop_terminal(true); + res->prop_state_acc(true); + return res; + } + + auto res = make_twa_graph(dfa, {false, false, true, false, false, false}); + + // HACK: translate_dict registers the atomic propositions in the "final" + // automaton that would be produced by a full translation, not in the + // intermediate automaton we're interested in. We can copy them from the + // resulting automaton. + res->copy_ap_of(a); + + res->prop_state_acc(true); + const auto acc_mark = res->set_buchi(); + + size_t sn = namer->state_to_name.size(); + auto names = new std::vector(sn); + for (size_t i = 0; i < sn; ++i) + { + formula g = namer->state_to_name[i]; + (*names)[i] = str_psl(g); + if (g.accepts_eword()) + { + if (res->get_graph().state_storage(i).succ == 0) + res->new_edge(i, i, bddfalse, acc_mark); + else + { + for (auto& e : res->out(i)) + e.acc = acc_mark; + } + } + } + + res->set_named_prop("state-names", names); + + return res; + } } diff --git a/spot/twaalgos/ltl2tgba_fm.hh b/spot/twaalgos/ltl2tgba_fm.hh index 8c1827490..18341fb37 100644 --- a/spot/twaalgos/ltl2tgba_fm.hh +++ b/spot/twaalgos/ltl2tgba_fm.hh @@ -88,4 +88,7 @@ namespace spot tl_simplifier* simplifier = nullptr, bool unambiguous = false, const output_aborter* aborter = nullptr); + + SPOT_API twa_graph_ptr + sere_to_tgba(formula f, const bdd_dict_ptr& dict, bool disable_scc_trimming = false); } diff --git a/spot/twaalgos/mealy_machine.cc b/spot/twaalgos/mealy_machine.cc index 99b762f16..25bab05a9 100644 --- a/spot/twaalgos/mealy_machine.cc +++ b/spot/twaalgos/mealy_machine.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021 Laboratoire de Recherche et Développement +// Copyright (C) 2021, 2022, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -34,7 +35,10 @@ #include #include #include +#include +#include #include +#include #include @@ -55,6 +59,7 @@ namespace { + using namespace spot; bool is_deterministic_(const std::vector& ins) { const unsigned n_ins = ins.size(); @@ -64,27 +69,90 @@ namespace return false; return true; } + +#ifndef NDEBUG + bool is_complete_(const const_twa_graph_ptr& m, + const bdd& outs) + { + auto* sp = m->get_named_prop("state-player"); + const auto N = m->num_states(); + for (auto s = 0u; s < N; ++s) + { + if (sp && sp->at(s)) + continue; // No need tpo check player states + bdd all_cond = bddfalse; + for (const auto& e : m->out(s)) + all_cond |= bdd_exist(e.cond, outs); + if (all_cond != bddtrue) + return false; + } + return true; + } +#endif +} + +namespace +{ + static std::unique_ptr sat_csv_file; + struct fwrapper{ + std::string fname; + std::FILE* f; + fwrapper(const std::string& name) + : fname{name} + , f{std::fopen(name.c_str(), "a")} + { + if (!f) + throw std::runtime_error("`" + name + + "' could not be oppened for writing."); + } + ~fwrapper() + { + std::fclose(f); + f = nullptr; + } + fwrapper& operator=(const fwrapper&) = delete; + fwrapper& operator=(fwrapper&&) = delete; + fwrapper(const fwrapper&) = delete; + fwrapper(fwrapper&&) = delete; + }; + static std::unique_ptr sat_dimacs_file; + static std::string sat_instance_name = ""; } namespace spot { + static bdd + ensure_mealy(const char* function_name, + const const_twa_graph_ptr& m) + { + if (SPOT_UNLIKELY(!m->acc().is_t())) + throw std::runtime_error(std::string(function_name) + + "(): Mealy machines must have " + "true acceptance condition"); + bdd* out = m->get_named_prop("synthesis-outputs"); + if (SPOT_UNLIKELY(!out)) + throw std::runtime_error(std::string(function_name) + + "(): \"synthesis-outputs\" not defined"); + return *out; + } + bool is_mealy(const const_twa_graph_ptr& m) { if (!m->acc().is_t()) - { - trace << "is_mealy(): Mealy machines must have " - "true acceptance condition.\n"; - return false; - } + { + trace << "is_mealy(): Mealy machines must have " + "true acceptance condition.\n"; + return false; + } if (!m->get_named_prop("synthesis-outputs")) - { - trace << "is_mealy(): \"synthesis-outputs\" not found!\n"; - return false; - } + { + trace << "is_mealy(): \"synthesis-outputs\" not found!\n"; + return false; + } return true; } @@ -115,11 +183,13 @@ namespace spot if (!is_mealy(m)) return false; - if (m->get_named_prop("state-player") == nullptr) + if (!m->get_named_prop("state-player")) { trace << "is_split_mealy(): Split mealy machine must define the named " "property \"state-player\"!\n"; + return false; } + auto sp = get_state_players(m); if (sp.size() != m->num_states()) @@ -201,9 +271,7 @@ namespace spot void split_separated_mealy_here(const twa_graph_ptr& m) { - assert(is_mealy(m)); - - auto output_bdd = get_synthesis_outputs(m); + bdd output_bdd = ensure_mealy("split_separated_mealy_here", m); struct dst_cond_color_t { @@ -272,10 +340,10 @@ namespace spot twa_graph_ptr split_separated_mealy(const const_twa_graph_ptr& m) { - assert(is_mealy((m))); + bdd outputs = ensure_mealy("split_separated_mealy", m); auto m2 = make_twa_graph(m, twa::prop_set::all()); m2->copy_acceptance_of(m); - set_synthesis_outputs(m2, get_synthesis_outputs(m)); + set_synthesis_outputs(m2, outputs); split_separated_mealy_here(m2); return m2; } @@ -716,7 +784,7 @@ namespace spot twa_graph_ptr reduce_mealy(const const_twa_graph_ptr& mm, bool output_assignment) { - assert(is_mealy(mm)); + bdd outputs = ensure_mealy("reduce_mealy", mm); if (mm->get_named_prop>("state-player")) throw std::runtime_error("reduce_mealy(): " "Only works on unsplit machines.\n"); @@ -724,7 +792,7 @@ namespace spot auto mmc = make_twa_graph(mm, twa::prop_set::all()); mmc->copy_ap_of(mm); mmc->copy_acceptance_of(mm); - set_synthesis_outputs(mmc, get_synthesis_outputs(mm)); + set_synthesis_outputs(mmc, outputs); reduce_mealy_here(mmc, output_assignment); @@ -734,7 +802,7 @@ namespace spot void reduce_mealy_here(twa_graph_ptr& mm, bool output_assignment) { - assert(is_mealy(mm)); + ensure_mealy("reduce_mealy_here", mm); // Only consider infinite runs mm->purge_dead_states(); @@ -794,6 +862,121 @@ namespace #else void trace_clause(const std::vector&){} #endif + struct satprob_info + { + stopwatch sw; + + double premin_time, reorg_time, partsol_time, player_incomp_time, + incomp_time, split_all_let_time, split_min_let_time, + split_cstr_time, prob_init_build_time, sat_time, + build_time, refine_time, total_time; + long long n_classes, n_refinement, n_lit, n_clauses, + n_iteration, n_letters_part, n_bisim_let, n_min_states, done; + std::string task; + const std::string instance; + + satprob_info(const std::string& instance) + : premin_time{-1} + , reorg_time{-1} + , partsol_time{-1} + , player_incomp_time{-1} + , incomp_time{-1} + , split_all_let_time{-1} + , split_min_let_time{-1} + , split_cstr_time{-1} + , prob_init_build_time{-1} + , sat_time{-1} + , build_time{-1} + , refine_time{-1} + , total_time{-1} + , n_classes{-1} + , n_refinement{-1} + , n_lit{-1} + , n_clauses{-1} + , n_iteration{-1} + , n_letters_part{-1} + , n_bisim_let{-1} + , n_min_states{-1} + , done{-1} + , task{} + , instance{instance+","} + { + } + + void start() + { + sw.start(); + } + double stop() + { + return sw.stop(); + } + double restart() + { + double res = sw.stop(); + sw.start(); + return res; + } + // Writing also "flushes" + void write() + { + if (!sat_csv_file) + return; + auto f = [](std::ostream& o, auto& v, bool sep = true) + { + if (v >= 0) + o << v; + if (sep) + o.put(','); + v = -1; + }; + + auto& out = *sat_csv_file; + if (out.tellp() == 0) + { + out << "instance,task,premin_time,reorg_time,partsol_time," + << "player_incomp_time,incomp_time,split_all_let_time," + << "split_min_let_time,split_cstr_time,prob_init_build_time," + << "sat_time,build_time,refine_time,total_time,n_classes," + << "n_refinement,n_lit,n_clauses,n_iteration,n_letters_part," + << "n_bisim_let,n_min_states,done\n"; + } + + assert(!task.empty()); + out << instance; + out << task; + task = ""; + out.put(','); + + std::stringstream ss; + + f(ss, premin_time); + f(ss, reorg_time); + f(ss, partsol_time); + f(ss, player_incomp_time); + f(ss, incomp_time); + f(ss, split_all_let_time); + f(ss, split_min_let_time); + f(ss, split_cstr_time); + f(ss, prob_init_build_time); + f(ss, sat_time); + f(ss, build_time); + f(ss, refine_time); + f(ss, total_time); + f(ss, n_classes); + f(ss, n_refinement); + f(ss, n_lit); + f(ss, n_clauses); + f(ss, n_iteration); + f(ss, n_letters_part); + f(ss, n_bisim_let); + f(ss, n_min_states); + f(ss, done, false); + out << ss.str(); + out.put('\n'); + } + }; + template bool all_of(const CONT& c) @@ -1007,6 +1190,28 @@ namespace std::pair reorganize_mm(const_twa_graph_ptr mm, const std::vector& sp) { + // Check if the twa_graph already has the correct form + { + auto sp = get_state_players(mm); + // All player states mus be at the end + bool is_ok = true; + bool seen_player = false; + for (const auto& p : sp) + { + if (seen_player & !p) + { + is_ok = false; + break; + } + seen_player |= p; + } + if (is_ok) + return {mm, + mm->num_states() + - std::accumulate(sp.begin(), sp.end(), 0)}; + } + // We actually need to generate a new graph with the correct + // form // Purge unreachable and reorganize the graph std::vector renamed(mm->num_states(), -1u); const unsigned n_old = mm->num_states(); @@ -1079,8 +1284,8 @@ namespace } square_matrix - compute_incomp(const_twa_graph_ptr mm, const unsigned n_env, - stopwatch& sw) + compute_incomp_impl_(const_twa_graph_ptr mm, const unsigned n_env, + satprob_info& si, bool is_partitioned) { const unsigned n_tot = mm->num_states(); @@ -1091,20 +1296,6 @@ namespace // Have two states already been checked for common pred square_matrix checked_pred(n_env, false); - // We also need a transposed_graph - auto mm_t = make_twa_graph(mm->get_dict()); - mm_t->copy_ap_of(mm); - mm_t->new_states(n_env); - - for (unsigned s = 0; s < n_env; ++s) - { - for (const auto& e_env : mm->out(s)) - { - unsigned dst_env = mm->out(e_env.dst).begin()->dst; - mm_t->new_edge(dst_env, s, e_env.cond); - } - } - // Utility function auto get_cond = [&mm](unsigned s)->const bdd& {return mm->out(s).begin()->cond; }; @@ -1121,7 +1312,9 @@ namespace // Associated condition and id of each player state std::vector> ps2c; ps2c.reserve(n_tot - n_env); + // bdd id -> internal index std::unordered_map all_out_cond; + for (unsigned s1 = n_env; s1 < n_tot; ++s1) { const bdd &c1 = get_cond(s1); @@ -1136,27 +1329,29 @@ namespace #endif } // Are two player condition ids states incompatible + // Matrix for incompatibility square_matrix inc_player(all_out_cond.size(), false); + // Matrix whether computed or not + square_matrix inc_player_comp(all_out_cond.size(), false); // Compute. First is id of bdd - for (const auto& p1 : all_out_cond) - for (const auto& p2 : all_out_cond) - { - if (p1.second > p2.second) - continue; - inc_player.set(p1.second, p2.second, - !bdd_have_common_assignment( - bdd_from_int((int) p1.first), - bdd_from_int((int) p2.first))); - assert(inc_player.get(p1.second, p2.second) - == ((bdd_from_int((int) p1.first) - & bdd_from_int((int) p2.first)) == bddfalse)); - } + // Lazy eval: Compute incompatibility between out conditions + // only if demanded + auto is_p_incomp = [&](unsigned s1, unsigned s2) { - return inc_player.get(ps2c[s1].second, ps2c[s2].second); + const auto& [s1bdd, s1idx] = ps2c[s1]; + const auto& [s2bdd, s2idx] = ps2c[s2]; + + if (!inc_player_comp.get(s1idx, s2idx)) + { + inc_player_comp.set(s1idx, s2idx, true); + inc_player.set(s1idx, s2idx, + !bdd_have_common_assignment(s1bdd, s2bdd)); + } + return inc_player.get(s1idx, s2idx); }; - dotimeprint << "Done computing player incomp " << sw.stop() << '\n'; + si.player_incomp_time = si.restart(); #ifdef TRACE trace << "player cond id incomp\n"; @@ -1166,15 +1361,28 @@ namespace #endif // direct incomp: Two env states can reach incompatible player states // under the same input + // The original graph mm is not sorted, and most of the + // sorting is not rentable + // However, bdd_have_common_assignment simply becomes equality auto direct_incomp = [&](unsigned s1, unsigned s2) { for (const auto& e1 : mm->out(s1)) for (const auto& e2 : mm->out(s2)) { + if (is_partitioned && (e1.cond != e2.cond)) + continue; if (!is_p_incomp(e1.dst - n_env, e2.dst - n_env)) continue; //Compatible -> no prob // Reachable under same letter? - if (bdd_have_common_assignment(e1.cond, e2.cond)) + if (is_partitioned) // -> Yes + { + trace << s1 << " and " << s2 << " directly incomp " + "due to successors " << e1.dst << " and " << e2.dst + << '\n'; + return true; + } + else if (!is_partitioned + && bdd_have_common_assignment(e1.cond, e2.cond)) { trace << s1 << " and " << s2 << " directly incomp " "due to successors " << e1.dst << " and " << e2.dst @@ -1187,7 +1395,27 @@ namespace // If two states can reach an incompatible state // under the same input, then they are incompatible as well - auto tag_predec = [&](unsigned s1, unsigned s2) + + // Version if the input is not partitioned + // We also need a transposed_graph + twa_graph_ptr mm_t = nullptr; + if (!is_partitioned) + { + mm_t = make_twa_graph(mm->get_dict()); + mm_t->copy_ap_of(mm); + mm_t->new_states(n_env); + + for (unsigned s = 0; s < n_env; ++s) + { + for (const auto& e_env : mm->out(s)) + { + unsigned dst_env = mm->out(e_env.dst).begin()->dst; + mm_t->new_edge(dst_env, s, e_env.cond); + } + } + } + + auto tag_predec_unpart = [&](unsigned s1, unsigned s2) { static std::vector> todo_; assert(todo_.empty()); @@ -1221,17 +1449,98 @@ namespace // Done tagging all pred }; + // Version of taging taking advantaged of partitioned conditions + struct S + { + }; + struct T + { + int id; + }; + std::unique_ptr> mm_t_part; + if (is_partitioned) + { + mm_t_part = std::make_unique>(n_env, mm->num_edges()); + mm_t_part->new_states(n_env); + + for (unsigned s = 0; s < n_env; ++s) + { + for (const auto& e_env : mm->out(s)) + { + unsigned dst_env = mm->out(e_env.dst).begin()->dst; + mm_t_part->new_edge(dst_env, s, e_env.cond.id()); + } + } + + // Now we need to sort the edge to ensure that + // the next algo works correctly + mm_t_part->sort_edges_srcfirst_([](const auto& e1, const auto& e2) + {return e1.id < e2.id; }); + mm_t_part->chain_edges_(); + } + + auto tag_predec_part = [&](unsigned s1, unsigned s2) + { + static std::vector> todo_; + assert(todo_.empty()); + + todo_.emplace_back(s1, s2); + + while (!todo_.empty()) + { + auto [i, j] = todo_.back(); + todo_.pop_back(); + if (checked_pred.get(i, j)) + continue; + // If predecs are already marked incomp + auto e_it_i = mm_t_part->out(i); + auto e_it_j = mm_t_part->out(j); + + auto e_it_i_e = e_it_i.end(); + auto e_it_j_e = e_it_j.end(); + + auto e_i = e_it_i.begin(); + auto e_j = e_it_j.begin(); + + // Joint iteration over both edge groups + while ((e_i != e_it_i_e) && (e_j != e_it_j_e)) + { + if (e_i->id < e_j->id) + ++e_i; + else if (e_j->id < e_i->id) + ++e_j; + else + { + assert(e_j->id == e_i->id); + trace << e_i->dst << " and " << e_j->dst << " tagged incomp" + " due to " << e_i->id << '\n'; + inc_env.set(e_i->dst, e_j->dst, true); + todo_.emplace_back(e_i->dst, e_j->dst); + ++e_i; + ++e_j; + } + } + checked_pred.set(i, j, true); + } + // Done tagging all pred + }; + for (unsigned s1 = 0; s1 < n_env; ++s1) for (unsigned s2 = s1 + 1; s2 < n_env; ++s2) { if (inc_env.get(s1, s2)) continue; // Already done + // Check if they are incompatible for some letter // We have to check all pairs of edges if (direct_incomp(s1, s2)) { inc_env.set(s1, s2, true); - tag_predec(s1, s2); + if (is_partitioned) + tag_predec_part(s1, s2); + else + tag_predec_unpart(s1, s2); + } } @@ -1239,11 +1548,40 @@ namespace trace << "Env state incomp\n"; inc_env.print(std::cerr); #endif - + si.incomp_time = si.restart(); return inc_env; + } // incomp no partition + + square_matrix + compute_incomp(const_twa_graph_ptr mm, const unsigned n_env, + satprob_info& si, int max_letter_mult) + { + // Try to generate a graph with partitioned env transitions + auto mm2 = make_twa_graph(mm, twa::prop_set::all()); + set_state_players(mm2, get_state_players(mm)); + set_synthesis_outputs(mm2, get_synthesis_outputs(mm)); + + // todo get a good value for cutoff + auto relabel_maps + = partitioned_game_relabel_here(mm2, true, false, true, + false, -1u, max_letter_mult); + bool succ = !relabel_maps.env_map.empty(); + + si.n_letters_part = relabel_maps.env_map.size(); + +#ifdef TRACE + if (succ) + std::cout << "Relabeling succesfull with " << relabel_maps.env_map.size() + << " letters\n"; + else + std::cout << "Relabeling aborted\n"; +#endif + + return compute_incomp_impl_(succ ? const_twa_graph_ptr(mm2) : mm, + n_env, si, succ); } - struct part_sol_t + struct part_sol_t { std::vector psol; std::vector is_psol; @@ -1401,6 +1739,11 @@ namespace return std::make_pair(n_group, which_group); } + // Helper function + // Computes the set of all original letters implied by the leaves + // This avoids transposing the graph + + // Computes the letters of each group // Letters here means bdds such that for all valid // assignments of the bdd we go to the same dst from the same source @@ -1410,7 +1753,9 @@ namespace { //To avoid recalc std::set all_bdd; - std::set treated_bdd; + std::vector all_bdd_v; + std::unordered_map node2idx; + std::unordered_multimap>> sigma_map; @@ -1448,6 +1793,11 @@ namespace continue; else { + // Store bdds as vector for compatibility + all_bdd_v.clear(); // Note: sorted automatically by id + std::transform(all_bdd.begin(), all_bdd.end(), + std::back_inserter(all_bdd_v), + [](int i){return bdd_from_int(i); }); // Insert it already into the sigma_map trace << "Group " << groupidx << " generates a new alphabet\n"; sigma_map.emplace(std::piecewise_construct, @@ -1457,62 +1807,60 @@ namespace } } + // Result red.share_sigma_with.push_back(groupidx); red.all_letters.emplace_back(); auto& group_letters = red.all_letters.back(); - treated_bdd.clear(); + // Compute it + auto this_part = try_partition_me(all_bdd_v, -1u); + assert(this_part.relabel_succ); - for (unsigned s = 0; s < n_env; ++s) + // Transform it + // group_letters is pair + // There are as many new_letters as treated bdds in the partition + group_letters.clear(); + group_letters.reserve(this_part.treated.size()); + node2idx.clear(); + node2idx.reserve(this_part.treated.size()); + + for (const auto& [label, node] : this_part.treated) { - if (red.which_group[s] != groupidx) - continue; - for (const auto& e : mmw->out(s)) - { - bdd rcond = e.cond; - const int econd_id = rcond.id(); - trace << rcond << " - " << econd_id << std::endl; - if (treated_bdd.count(econd_id)) - { - trace << "Already treated" << std::endl; - continue; - } - treated_bdd.insert(econd_id); - - assert(rcond != bddfalse && "Deactivated edges are forbiden"); - // Check against all currently used "letters" - const size_t osize = group_letters.size(); - for (size_t i = 0; i < osize; ++i) - { - if (group_letters[i].first == rcond) - { - rcond = bddfalse; - group_letters[i].second.insert(econd_id); - break; - } - bdd inter = group_letters[i].first & rcond; - if (inter == bddfalse) - continue; // No intersection - if (group_letters[i].first == inter) - group_letters[i].second.insert(econd_id); - else - { - group_letters[i].first -= inter; - group_letters.emplace_back(inter, - group_letters[i].second); - group_letters.back().second.insert(econd_id); - } - - rcond -= inter; - // Early exit? - if (rcond == bddfalse) - break; - } - // Leftovers? - if (rcond != bddfalse) - group_letters.emplace_back(rcond, std::set{econd_id}); - } + node2idx[node] = group_letters.size(); + group_letters.emplace_back(std::piecewise_construct, + std::forward_as_tuple(label), + std::forward_as_tuple()); } + + // Go through the graph for each original letter + auto search_leaves + = [&ig = *this_part.ig, &group_letters, &node2idx] + (int orig_letter_id, unsigned s, auto&& search_leaves_) -> void + { + if (ig.state_storage(s).succ == 0) + { + // Leaf + unsigned idx = node2idx[s]; + auto& setidx = group_letters[idx].second; + setidx.emplace_hint(setidx.end(), orig_letter_id); + } + else + { + // Traverse + for (const auto& e : ig.out(s)) + search_leaves_(orig_letter_id, e.dst, search_leaves_); + } + }; + + const unsigned Norig = all_bdd_v.size(); + for (unsigned s = 0; s < Norig; ++s) + search_leaves(all_bdd_v[s].id(), s, search_leaves); + + // Verify that all letters imply at least one original letter + assert(std::all_of(group_letters.begin(), group_letters.end(), + [](const auto& l){return !l.second.empty(); })); + + #ifdef TRACE trace << "this group letters" << std::endl; auto sp = [&](const auto& c) @@ -1867,26 +2215,22 @@ namespace std::pair reduce_and_split(const_twa_graph_ptr mmw, const unsigned n_env, const square_matrix& incompmat, - stopwatch& sw) + satprob_info& si) { reduced_alphabet_t red; + si.start(); + std::tie(red.n_groups, red.which_group) = trans_comp_classes(incompmat); - dotimeprint << "Done trans comp " << red.n_groups - << " - " << sw.stop() << '\n'; compute_all_letters(red, mmw, n_env); - dotimeprint << "Done comp all letters " << " - " << sw.stop() << '\n'; + si.split_all_let_time = si.restart(); compute_minimal_letters(red, mmw, n_env); -#ifdef MINTIMINGS - dotimeprint << "Done comp all min sim letters "; - for (const auto& al : red.bisim_letters) - dotimeprint << al.size() << ' '; - dotimeprint << " - " << sw.stop() << '\n'; -#endif + si.split_min_let_time = si.restart(); + si.n_bisim_let = red.n_red_sigma; twa_graph_ptr split_mmw = split_on_minimal(red, mmw, n_env); - dotimeprint << "Done splitting " << sw.stop() << '\n'; + si.split_cstr_time = si.restart(); trace << std::endl; return std::make_pair(split_mmw, red); @@ -2189,9 +2533,10 @@ namespace struct mm_sat_prob_t { mm_sat_prob_t(unsigned n_classes, unsigned n_env, - unsigned n_sigma_red) + unsigned n_sigma_red, satprob_info& si) : lm(n_classes, n_env, n_sigma_red) , n_classes{lm.n_classes_} + , si{si} { state_cover_clauses.reserve(n_classes); trans_cover_clauses.reserve(n_classes*n_sigma_red); @@ -2243,6 +2588,13 @@ namespace // res[i] == -1 : i not used in lit mapper // res[i] == 0 : i is assigned false // res[i] == 1 : i is assigned true + if (sat_dimacs_file) + { + fprintf(sat_dimacs_file->f, + "c ### Next Instance %lld %lld ###\n", + this->si.n_classes, this->si.n_refinement); + picosat_print(lm.psat_, sat_dimacs_file->f); + } switch (picosat_sat(lm.psat_, -1)) { case PICOSAT_UNSATISFIABLE: @@ -2308,6 +2660,8 @@ namespace std::unordered_map> cube_map; // A map that indicates if two cubes are compatible or not via their id std::unordered_map, bool, pair_hash> cube_incomp_map; + // Piggy-back a struct for performance measure + satprob_info& si; }; template<> @@ -2386,14 +2740,15 @@ namespace const square_matrix& incompmat, const reduced_alphabet_t& red, const part_sol_t& psol, - const unsigned n_env) + const unsigned n_env, + satprob_info& si) { const auto& psolv = psol.psol; const unsigned n_classes = psolv.size(); const unsigned n_red = red.n_red_sigma; const unsigned n_groups = red.n_groups; - mm_sat_prob_t mm_pb(n_classes, n_env, n_red); + mm_sat_prob_t mm_pb(n_classes, n_env, n_red, si); auto& lm = mm_pb.lm; @@ -3259,6 +3614,7 @@ namespace for (unsigned letter_idx = 0; letter_idx < n_ml; ++letter_idx) { const auto& ml_list = group_map[letter_idx]; + assert(ml_list.begin() != ml_list.end()); // Incompatibility is commutative // new / new constraints const auto it_end = ml_list.end(); @@ -3327,7 +3683,7 @@ namespace const reduced_alphabet_t& red, const part_sol_t& psol, const unsigned n_env, - stopwatch& sw) + satprob_info& si) { const auto& psolv = psol.psol; const unsigned n_psol = psolv.size(); @@ -3348,15 +3704,16 @@ namespace mm_pb.lm.print(std::cerr); #endif mm_pb.set_variable_clauses(); - dotimeprint << "Done constructing SAT " << sw.stop() << '\n'; - dotimeprint << "n literals " << mm_pb.n_lits() - << " n clauses " << mm_pb.n_clauses() << '\n'; + si.n_lit = mm_pb.n_lits(); + si.n_clauses = mm_pb.n_clauses(); + si.start(); auto sol = mm_pb.get_sol(); - dotimeprint << "Done solving SAT " << sw.stop() << '\n'; + si.sat_time = si.restart(); if (sol.empty()) { mm_pb.unset_variable_clauses(); + si.write(); return nullptr; } #ifdef TRACE @@ -3561,82 +3918,84 @@ namespace for (const auto& el : used_ziaj_map) if (el.second == bddfalse) infeasible_classes.emplace_back(el.first.i, el.first.a); + si.build_time = si.restart(); + if (!infeasible_classes.empty()) { // Remove the variable clauses // This is suboptimal but the contexts form a stack so... - dotimeprint << "Refining constraints for " - << infeasible_classes.size() << " classses.\n"; + auto oldrefine = si.n_refinement; + si.write(); + si.task = "refinement"; + si.n_classes = n_classes; + si.n_refinement = oldrefine + infeasible_classes.size(); mm_pb.unset_variable_clauses(); add_bdd_cond_constr(mm_pb, mmw, red, n_env, infeasible_classes, x_in_class); + si.refine_time = si.restart(); continue; //retry } cstr_split_mealy(minmach, red, x_in_class, used_ziaj_map); - // todo: What is the impact of chosing one of the possibilities minmach->set_init_state(init_class_v.front()); return minmach; } // while loop } // try_build_machine -} // namespace -namespace spot -{ - twa_graph_ptr minimize_mealy(const const_twa_graph_ptr& mm, - int premin) + twa_graph_ptr minimize_mealy_(const const_twa_graph_ptr& mm, + int premin, int max_letter_mult) { - assert(is_split_mealy(mm)); + bdd outputs = ensure_mealy("minimize_mealy", mm); - stopwatch sw; - sw.start(); + satprob_info si(sat_instance_name); + si.task = "presat"; + stopwatch sglob; + sglob.start(); + si.start(); if ((premin < -1) || (premin > 1)) throw std::runtime_error("premin has to be -1, 0 or 1"); - auto orig_spref = get_state_players(mm); - - // Check if finite traces exist - // If so, deactivate fast minimization - // todo : this is overly conservative - // If unreachable states have no outgoing edges we do not care - // but testing this as well starts to be expensive... - if (premin != -1 - && [&]() - { - for (unsigned s = 0; s < mm->num_states(); ++s) - { - auto eit = mm->out(s); - if (eit.begin() == eit.end()) - return true; - } - return false; - }()) - premin = -1; - auto do_premin = [&]()->const_twa_graph_ptr { if (premin == -1) - return mm; + { + if (!mm->get_named_prop("state-player")) + return split_2step(mm, false); + else + return mm; + } else { + bool is_split = mm->get_named_prop("state-player"); // We have a split machine -> unsplit then resplit, // as reduce mealy works on separated - auto mms = unsplit_mealy(mm); - reduce_mealy_here(mms, premin == 1); - split_separated_mealy_here(mms); - return mms; + twa_graph_ptr mms; + if (is_split) + { + auto mmi = unsplit_2step(mm); + reduce_mealy_here(mmi, premin == 1); + split_separated_mealy_here(mmi); + return mmi; + } + else + { + auto mms = reduce_mealy(mm, premin == 1); + return split_2step(mms, false); + } } }; const_twa_graph_ptr mmw = do_premin(); assert(is_split_mealy(mmw)); - dotimeprint << "Done premin " << sw.stop() << '\n'; + si.premin_time = si.restart(); + + // 0 -> "Env" next is input props // 1 -> "Player" next is output prop - const auto& spref = get_state_players(mmw); + const region_t& spref = get_state_players(mmw); assert((spref.size() == mmw->num_states()) && "Inconsistent state players"); @@ -3650,85 +4009,129 @@ namespace spot print_hoa(std::cerr, mmw); #endif assert(n_env != -1u); - dotimeprint << "Done reorganise " << n_env << " - " - << sw.stop() << '\n'; + si.reorg_time = si.restart(); // Compute incompatibility based on bdd - auto incompmat = compute_incomp(mmw, n_env, sw); - dotimeprint << "Done incompatibility " << sw.stop() << '\n'; + auto incompmat = compute_incomp(mmw, n_env, si, max_letter_mult); #ifdef TRACE + std::cerr << "Final incomp mat\n"; incompmat.print(std::cerr); #endif // Get a partial solution auto partsol = get_part_sol(incompmat); - dotimeprint << "Done partial solution " << partsol.psol.size() - << " - " << sw.stop() << '\n'; + si.partsol_time = si.restart(); auto early_exit = [&]() { + si.done = 1; + si.total_time = sglob.stop(); + si.write(); // Always keep machines split - assert(is_split_mealy_specialization(mm, mmw)); + if (mm->get_named_prop("state-player")) + assert(is_split_mealy_specialization(mm, mmw)); + else + assert(is_split_mealy_specialization(split_2step(mm, false), + mmw)); return std::const_pointer_cast(mmw); - }; + }; // If the partial solution has the same number of // states as the original automaton -> we are done if (partsol.psol.size() == n_env) { - dotimeprint << "Done trans comp " << 1 << " - " << sw.stop() << '\n'; - dotimeprint << "Done comp all letters " << " - " - << sw.stop() << '\n'; -#ifdef MINTIMINGS - dotimeprint << "Done comp all min sim letters 0 - " - << sw.stop() << '\n'; -#endif - dotimeprint << "Done splitting " << sw.stop() << '\n'; - dotimeprint << "Done split and reduce " << sw.stop() << '\n'; - dotimeprint << "Done build init prob " << sw.stop() << '\n'; - dotimeprint << "Done minimizing - " << mmw->num_states() - << " - " << sw.stop() << '\n'; return early_exit(); } // Get the reduced alphabet auto [split_mmw, reduced_alphabet] = - reduce_and_split(mmw, n_env, incompmat, sw); - dotimeprint << "Done split and reduce " << sw.stop() << '\n'; + reduce_and_split(mmw, n_env, incompmat, si); auto mm_pb = build_init_prob(split_mmw, incompmat, - reduced_alphabet, partsol, n_env); - dotimeprint << "Done build init prob " << sw.stop() << '\n'; + reduced_alphabet, partsol, n_env, si); + si.prob_init_build_time = si.restart(); + si.write(); twa_graph_ptr minmachine = nullptr; for (size_t n_classes = partsol.psol.size(); n_classes < n_env; ++n_classes) { + if (si.task.empty()) + si.task = "sat"; + si.n_iteration = (n_classes-partsol.psol.size()); + si.n_refinement = 0; + si.n_classes = n_classes; + minmachine = try_build_min_machine(mm_pb, mmw, reduced_alphabet, partsol, n_env, - sw); - dotimeprint << "Done try_build " << n_classes - << " - " << sw.stop() << '\n'; + si); if (minmachine) break; increment_classes(split_mmw, incompmat, reduced_alphabet, partsol, mm_pb); - dotimeprint << "Done incrementing " << sw.stop() << '\n'; + } // Is already minimal -> Return a copy // Set state players! if (!minmachine) return early_exit(); - set_synthesis_outputs(minmachine, get_synthesis_outputs(mm)); - dotimeprint << "Done minimizing - " << minmachine->num_states() - << " - " << sw.stop() << '\n'; + set_synthesis_outputs(minmachine, outputs); - assert(is_split_mealy_specialization(mm, minmachine)); + si.done=1; + si.n_min_states = minmachine->num_states(); + si.total_time = sglob.stop(); + si.write(); + + assert(is_split_mealy_specialization( + mm->get_named_prop("state-player") ? mm + :split_2step(mm, false), + minmachine)); return minmachine; } +} // namespace + +namespace spot +{ + twa_graph_ptr minimize_mealy(const const_twa_graph_ptr& mm, + int premin) + { + return minimize_mealy_(mm, premin, 10); + } + + twa_graph_ptr + minimize_mealy(const const_twa_graph_ptr& mm, + synthesis_info& si) + { + if ((si.minimize_lvl < 3) || (si.minimize_lvl > 5)) + throw std::runtime_error("minimize_mealy(): " + "minimize_lvl should be between 3 and 5."); + + std::string csvfile = si.opt.get_str("satlogcsv"); + std::string dimacsfile = si.opt.get_str("satlogdimacs"); + + if (!csvfile.empty()) + { + sat_csv_file = std::make_unique + (csvfile, std::ios_base::ate | std::ios_base::app); + if (!*sat_csv_file) + throw std::runtime_error("could not open `" + csvfile + + "' for writing"); + sat_csv_file->exceptions(std::ofstream::failbit + | std::ofstream::badbit); + } + if (!dimacsfile.empty()) + sat_dimacs_file + = std::make_unique(dimacsfile); + sat_instance_name = si.opt.get_str("satinstancename"); + auto res = minimize_mealy_(mm, si.minimize_lvl-4, + si.opt.get("max_letter_mult", 10)); + sat_csv_file.reset(); + sat_dimacs_file.reset(); + return res; + } } namespace spot @@ -3743,9 +4146,9 @@ namespace spot const unsigned initl = left->get_init_state_number(); const unsigned initr = right->get_init_state_number(); - auto& spr = get_state_players(right); + const region_t& spr = get_state_players(right); #ifndef NDEBUG - auto& spl = get_state_players(left); + const region_t& spl = get_state_players(left); // todo auto check_out = [](const const_twa_graph_ptr& aut, const auto& sp) @@ -3843,4 +4246,125 @@ namespace spot return true; } + twa_graph_ptr + mealy_product(const const_twa_graph_ptr& left, + const const_twa_graph_ptr& right) + { + bdd outs[] = {get_synthesis_outputs(left), + get_synthesis_outputs(right)}; + +#ifndef NDEBUG + for (const auto& [m, n, o] : {std::tuple{left, "left", outs[0]}, + {right, "right", outs[1]}}) + { + if (!is_mealy(m)) + throw std::runtime_error(std::string("mealy_prod(): ") + n + + " is not a mealy machine"); + if (!is_complete_(m, o)) + throw std::runtime_error(std::string("mealy_prod(): ") + n + + " is not input complete"); + } +#endif + + auto p = product(left, right); + bdd pouts = outs[0] & outs[1]; + set_synthesis_outputs(p, pouts); + +#ifndef NDEBUG + if (!is_mealy(p)) + throw std::runtime_error("mealy_prod(): Prooduct is not mealy"); + if (!is_complete_(p, pouts)) + throw std::runtime_error("mealy_prod(): Prooduct is not input complete. " + "Incompatible machines?"); +#endif + + return p; + } + + + void + simplify_mealy_here(twa_graph_ptr& m, int minimize_lvl, + bool split_out) + { + auto si = synthesis_info(); + si.minimize_lvl = minimize_lvl; + return simplify_mealy_here(m, si, split_out); + } + + void + simplify_mealy_here(twa_graph_ptr& m, synthesis_info& si, + bool split_out) + { + const auto minimize_lvl = si.minimize_lvl; + assert(is_mealy(m) + && "simplify_mealy_here(): m is not a mealy machine!"); + if (minimize_lvl < 0 || 5 < minimize_lvl) + throw std::runtime_error("simplify_mealy_here(): minimize_lvl " + "must be between 0 and 5."); + + stopwatch sw; + if (si.bv) + sw.start(); + + bool is_separated = false; + if (0 < minimize_lvl && minimize_lvl < 3) + { + // unsplit if necessary + if (m->get_named_prop("state-player")) + { + m = unsplit_mealy(m); + is_separated = true; + } + reduce_mealy_here(m, minimize_lvl == 2); + } + else if (3 <= minimize_lvl) + m = minimize_mealy(m, si); + + // Convert to demanded output format + bool is_split = m->get_named_prop("state-player"); + if (minimize_lvl == 0) + { + if (is_split && !split_out) + m = unsplit_mealy(m); + else if (!is_split && split_out) + m = split_2step(m, false); + } + else if (0 < minimize_lvl && minimize_lvl < 3 && split_out) + { + if (is_separated) + split_separated_mealy_here(m); + else + m = split_2step(m, false); + } + else if (3 <= minimize_lvl && !split_out) + m = unsplit_mealy(m); + + if (si.bv) + { + if (si.verbose_stream) + *si.verbose_stream << "simplification took " << sw.stop() + << " seconds\n"; + si.bv->simplify_strat_time += sw.stop(); + auto n_s_env = 0u; + auto n_e_env = 0u; + if (auto sp = m->get_named_prop("state-player")) + { + n_s_env = sp->size() - std::accumulate(sp->begin(), + sp->end(), + 0u); + std::for_each(m->edges().begin(), m->edges().end(), + [&n_e_env, &sp](const auto& e) + { + n_e_env += (*sp)[e.src]; + }); + } + else + { + n_s_env = m->num_states(); + n_e_env = m->num_edges(); + } + si.bv->nb_simpl_strat_states += n_s_env; + si.bv->nb_simpl_strat_edges += n_e_env; + } + } } diff --git a/spot/twaalgos/mealy_machine.hh b/spot/twaalgos/mealy_machine.hh index 139f7cce2..3bdb71b73 100644 --- a/spot/twaalgos/mealy_machine.hh +++ b/spot/twaalgos/mealy_machine.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021 Laboratoire de Recherche et Développement +// Copyright (C) 2021-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -21,56 +21,84 @@ #include +/// \addtogroup mealy Functions related to Mealy machines +/// \ingroup twa_algorithms + namespace spot { - /// todo - /// Comment je faire au mieux pour expliquer mealy dans les doc + // Forward decl + struct synthesis_info; - /// \brief Checks whether or not the automaton is a mealy machine + /// \ingroup mealy + /// \brief Checks whether the automaton is a mealy machine + /// + /// A mealy machine is an automaton with the named property + /// `"synthesis-outputs"` and and that has a "true" as acceptance + /// condition. /// /// \param m The automaton to be verified - /// \note A mealy machine must have the named property \"synthesis-outputs\" - /// and have a \"true\" as acceptance condition + /// \see is_separated_mealy + /// \see is_split_mealy + /// \see is_input_deterministic_mealy SPOT_API bool is_mealy(const const_twa_graph_ptr& m); - /// \brief Checks whether or not the automaton is a separated mealy machine + /// \ingroup mealy + /// \brief Checks whether the automaton is a separated mealy machine + /// + /// A separated mealy machine is a mealy machine with + /// all transitions having the form `(in)&(out)` where `in` and + /// `out` are BDDs over the input and output propositions. /// /// \param m The automaton to be verified - /// \note A separated mealy machine is a mealy machine machine with all - /// transitions having the form (in)&(out) where in[out] is a bdd over - /// input[output] propositions of m + /// + /// \see is_mealy + /// \see is_split_mealy SPOT_API bool is_separated_mealy(const const_twa_graph_ptr& m); + /// \ingroup mealy /// \brief Checks whether or not the automaton is a split mealy machine /// + /// A split mealy machine is a mealy machine machine that has + /// be converted into a game. It should have the named property + /// `"state-player"`, moreover the game should be alternating + /// between the two players. Transitions leaving states owned by + /// player 0 (the environment) should use only input propositions, + /// while transitions leaving states owned by player 1 (the + /// controller) should use only output propositions. + /// /// \param m The automaton to be verified - /// \note A split mealy machine is a mealy machine machine with the named - /// property \"state-player\". Moreover the underlying automaton - /// must be alternating between the player and the env. Transitions - /// leaving env[player] states can only be labeled by - /// input[output] propositions. + /// \see is_mealy + /// \see is_separated_mealy SPOT_API bool is_split_mealy(const const_twa_graph_ptr& m); - /// \brief Checks whether or not a mealy machine is input deterministic + /// \brief Checks whether a mealy machine is input deterministic + /// + /// A machine is input deterministic if none of the states has two + /// outgoing transitions that can agree on a common assignment of + /// the input propositions. In case the mealy machine is split, the + /// previous condition is tested only on states owned by player 0 + /// (the environment). /// /// \param m The automaton to be verified - /// \note works all mealy machines, no matter whether they are split - /// or separated or neither of neither of them. - /// \note A machine is input deterministic if none of the states - /// has two outgoing transitions that can agree on a assignement - /// of the input propositions. + /// \see is_mealy SPOT_API bool is_input_deterministic_mealy(const const_twa_graph_ptr& m); - /// \brief make each transition in a separated mealy machine a - /// 2-step transition. + /// \ingroup mealy + /// \brief split a separated mealy machine + /// + /// In a separated mealy machine, every transitions as a label of + /// the form `(in)&(out)`. This function will turn each transtion + /// into a pair of consecutive transitions labeled by `in` and + /// `out`, and turn the mealy machine into a game (what we call a + /// split mealy machine) /// /// \param m separated mealy machine to be split - /// \return returns the equivalent split mealy machine if not done inplace + /// \see is_split_mealy /// @{ SPOT_API twa_graph_ptr split_separated_mealy(const const_twa_graph_ptr& m); @@ -79,53 +107,122 @@ namespace spot split_separated_mealy_here(const twa_graph_ptr& m); /// @} + /// \ingroup mealy /// \brief the inverse of split_separated_mealy + /// + /// Take a split mealy machine \a m, and build a separated mealy machine. + /// + /// \see split_separated_mealy + /// \see is_split_mealy + /// \see is_separated_mealy SPOT_API twa_graph_ptr unsplit_mealy(const const_twa_graph_ptr& m); + /// \ingroup mealy /// \brief reduce an (in)completely specified mealy machine - /// Based on signature inclusion or equality. This is not guaranteed - /// to find the minimal number of states but is usually faster. - /// This also comes at another drawback: - /// All applicable sequences have to be infinite. Finite - /// traces are disregarded - /// \param mm The mealy machine to be minimized, has to be unsplit + /// + /// This is a bisimulation based reduction, that optionally use + /// inclusion between signatures to force some output when there is + /// a choice in order to favor more reductions. Only infinite + /// traces are considered. See \cite renkin.22.forte for details. + /// + /// \param mm The mealy machine to be minimized, has to be unsplit. /// \param output_assignment Whether or not to use output assignment - /// \return A specialization of \c mm. Note that if mm is separated, - /// the returned machine is separated as well. - /// \note See todo TACAS22 Effective reductions of mealy machines + /// \return A specialization of \c mm. + /// + /// \note If mm is separated, the returned machine is separated as + /// well. /// @{ SPOT_API twa_graph_ptr reduce_mealy(const const_twa_graph_ptr& mm, - bool output_assignment = false); + bool output_assignment = true); SPOT_API void reduce_mealy_here(twa_graph_ptr& mm, - bool output_assignment = false); + bool output_assignment = true); /// @} - /// \brief Minimizes a split (in)completely specified mealy machine - /// The approach is described in \todo TACAS - /// \param premin Use reduce_mealy before applying the - /// main algorithm if demanded AND - /// the original machine has no finite trace. - /// -1 : Do not use; - /// 0 : Use without output assignment; - /// 1 : Use with output assignment - /// \return Returns a split mealy machines which is a minimal - /// speciliazation of the original machine + /// \ingroup mealy + /// \brief Minimizes an (in)completely specified mealy machine + /// + /// The approach is described in \cite renkin.22.forte. + /// + /// \param premin Whether to use reduce_mealy as a preprocessing: + /// - -1: Do not use; + /// - 0: Use without output assignment; + /// - 1: Use with output assignment. + /// \return A split mealy machines which is a minimal + /// specialization of the original machine. + /// + /// \note Enabling \a premin will remove finite traces. + /// \see is_split_mealy_specialization SPOT_API twa_graph_ptr minimize_mealy(const const_twa_graph_ptr& mm, int premin = -1); + /// \ingroup mealy + /// \brief Minimizes an (in)completely specified mealy machine + /// + /// The approach is described in \cite renkin.22.forte. + /// + /// \param si synthesis_info structure used to store data for benchmarking + /// and indicates which premin level to use + /// + /// \return A split mealy machines which is a minimal + /// specialization of the original machine. + /// + /// \note Enabling \a premin will remove finite traces. + /// \note If si.opt contains an option "satlogcsv" detailed results will be + /// stored in this file. If it contains "satlogdimacs" all sat problems will + /// stored. + /// \see is_split_mealy_specialization + SPOT_API twa_graph_ptr + minimize_mealy(const const_twa_graph_ptr& mm, + synthesis_info& si); + + + /// \ingroup mealy /// \brief Test if the split mealy machine \a right is a specialization of /// the split mealy machine \a left. /// - /// That is all input sequences valid for left - /// must be applicable for right and the induced sequence of output signals - /// of right must imply the ones of left + /// That is, all input sequences valid for left must be applicable + /// for right and the induced sequence of output signals of right + /// must imply the ones of left SPOT_API bool is_split_mealy_specialization(const_twa_graph_ptr left, const_twa_graph_ptr right, bool verbose = false); -} \ No newline at end of file + + /// \ingroup mealy + /// \brief Product between two mealy machines \a left and \a right. + /// \pre The machines have to be both either split or unsplit, + /// input complete and compatible. All of this is checked by assertion. + /// \result A mealy machine representing the shared behaviour, + /// with the same tyoe (mealy/separated/split) as the input machines + SPOT_API twa_graph_ptr + mealy_product(const const_twa_graph_ptr& left, + const const_twa_graph_ptr& right); + + /// \ingroup mealy + /// \brief Convenience function to call minimize_mealy or reduce_mealy. + /// Uses the same convention as ltlsynt for \a minimize_lvl (or the + /// field `minimize_lvl` of \a si): + /// - 0: no reduction + /// - 1: bisimulation based reduction + /// - 2: bisimulation with output assignment + /// - 3: SAT minimization + /// - 4: 1 then 3 + /// - 5: 2 then 3 + /// + /// Minimizes the given machine \a m inplace, the parameter + /// \a split_out specifies if the result should be split. + /// @{ + SPOT_API void + simplify_mealy_here(twa_graph_ptr& m, int minimize_lvl, + bool split_out); + + SPOT_API void + simplify_mealy_here(twa_graph_ptr& m, synthesis_info& si, + bool split_out); + /// @} +} diff --git a/spot/twaalgos/minimize.cc b/spot/twaalgos/minimize.cc index 4fd6847b3..1ac961d46 100644 --- a/spot/twaalgos/minimize.cc +++ b/spot/twaalgos/minimize.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2010-2020 Laboratoire de Recherche et Développement +// Copyright (C) 2010-2020, 2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -394,8 +394,8 @@ namespace spot else { // Find any accepting sink state, to speed up the - // determinization by merging all states containing a sink - // state. + // determinization by merging all macro-states containing a + // sink state. std::vector acc_sinks; unsigned ns = a->num_states(); if (!a->prop_terminal().is_true()) diff --git a/spot/twaalgos/parity.cc b/spot/twaalgos/parity.cc index 94c7bd922..c8507ac53 100644 --- a/spot/twaalgos/parity.cc +++ b/spot/twaalgos/parity.cc @@ -387,27 +387,15 @@ namespace spot return aut; } - twa_graph_ptr - reduce_parity(const const_twa_graph_ptr& aut, bool colored) + reduce_parity_data::reduce_parity_data(const const_twa_graph_ptr& aut, + bool layered) { - return reduce_parity_here(make_twa_graph(aut, twa::prop_set::all()), - colored); - } - - twa_graph_ptr - reduce_parity_here(twa_graph_ptr aut, bool colored) - { - unsigned num_sets = aut->num_sets(); - if (!colored && num_sets == 0) - return aut; - - bool current_max; - bool current_odd; - if (!aut->acc().is_parity(current_max, current_odd, true)) - input_is_not_parity("reduce_parity"); + if (!aut->acc().is_parity(parity_max, parity_odd, true)) + input_is_not_parity("reduce_parity_data"); if (!aut->is_existential()) throw std::runtime_error - ("reduce_parity_here() does not support alternation"); + ("reduce_parity_data() does not support alternation"); + unsigned num_sets = aut->num_sets(); // The algorithm assumes "max odd" or "max even" parity. "min" // parity is handled by converting it to "max" while the algorithm @@ -466,8 +454,8 @@ namespace spot // // -2 means the edge was never assigned a color. unsigned evs = aut->edge_vector().size(); - std::vector piprime1(evs, -2); // k=1 - std::vector piprime2(evs, -2); // k=0 + piprime1.resize(evs, -2); // k=1 + piprime2.resize(evs, -2); // k=0 bool sba = aut->prop_state_acc().is_true(); auto rec = @@ -481,7 +469,7 @@ namespace spot { int piri; // π(Rᵢ) int color; // corresponding color, to deal with "min" kind - if (current_max) + if (parity_max) { piri = color = si.acc_sets_of(scc).max_set() - 1; } @@ -507,15 +495,30 @@ namespace spot m.first += (piri - m.first) & 1; m.second += (piri - m.second) & 1; } - for (unsigned state: si.states_of(scc)) - for (auto& e: aut->out(state)) - if ((sba || si.scc_of(e.dst) == scc) && - ((piri >= 0 && e.acc.has(color)) || (piri < 0 && !e.acc))) - { - unsigned en = aut->edge_number(e); - piprime1[en] = m.first; - piprime2[en] = m.second; - } + // Recolor edges. Depending on LAYERED we want to + // either recolor all edges for which piprime1 is -2 + // (uncolored), or only the edges that we were removed + // by the previous filter. + auto coloredge = [&](auto& e) { + unsigned en = aut->edge_number(e); + bool recolor = layered + ? piprime1[en] == -2 + : (piri >= 0 && e.acc.has(color)) || (piri < 0 && !e.acc); + if (recolor) + { + piprime1[en] = m.first; + piprime2[en] = m.second; + } + }; + if (sba) + // si.edges_of(scc) would be wrong as it can ignore + // outgoing edges removed from a previous level. + for (unsigned s: si.states_of(scc)) + for (auto& e: aut->out(s)) + coloredge(e); + else + for (auto& e: si.inner_edges_of(scc)) + coloredge(e); res.first = std::max(res.first, m.first); res.second = std::max(res.second, m.second); } @@ -523,11 +526,28 @@ namespace spot }; scc_and_mark_filter filter1(aut, {}); rec(filter1, rec); + } + + twa_graph_ptr + reduce_parity(const const_twa_graph_ptr& aut, bool colored, bool layered) + { + return reduce_parity_here(make_twa_graph(aut, twa::prop_set::all()), + colored, layered); + } + + twa_graph_ptr + reduce_parity_here(twa_graph_ptr aut, bool colored, bool layered) + { + unsigned num_sets = aut->num_sets(); + if (!colored && num_sets == 0) + return aut; + + reduce_parity_data pd(aut, layered); // compute the used range for each vector. int min1 = num_sets; int max1 = -2; - for (int m : piprime1) + for (int m : pd.piprime1) { if (m <= -2) continue; @@ -544,7 +564,7 @@ namespace spot } int min2 = num_sets; int max2 = -2; - for (int m : piprime2) + for (int m : pd.piprime2) { if (m <= -2) continue; @@ -560,13 +580,13 @@ namespace spot { std::swap(size1, size2); std::swap(min1, min2); - std::swap(piprime1, piprime2); + std::swap(pd.piprime1, pd.piprime2); } unsigned new_num_sets = size1; - if (current_max) + if (pd.parity_max) { - for (int& m : piprime1) + for (int& m : pd.piprime1) if (m > -2) m -= min1; else @@ -574,7 +594,7 @@ namespace spot } else { - for (int& m : piprime1) + for (int& m : pd.piprime1) if (m > -2) m = new_num_sets - (m - min1) - 1; else @@ -582,8 +602,8 @@ namespace spot } // The parity style changes if we shift colors by an odd number. - bool new_odd = current_odd ^ (min1 & 1); - if (!current_max) + bool new_odd = pd.parity_odd ^ (min1 & 1); + if (!pd.parity_max) // Switching from min<->max changes the parity style every time // the number of colors is even. If the input was "min", we // switched once to "max" to apply the reduction and once again @@ -592,7 +612,7 @@ namespace spot new_odd ^= !(num_sets & 1) ^ !(new_num_sets & 1); if (!colored) { - new_odd ^= current_max; + new_odd ^= pd.parity_max; new_num_sets -= 1; // It seems we have nothing to win by changing automata with a @@ -602,18 +622,18 @@ namespace spot } aut->set_acceptance(new_num_sets, - acc_cond::acc_code::parity(current_max, new_odd, + acc_cond::acc_code::parity(pd.parity_max, new_odd, new_num_sets)); if (colored) for (auto& e: aut->edges()) { unsigned n = aut->edge_number(e); - e.acc = acc_cond::mark_t({unsigned(piprime1[n])}); + e.acc = acc_cond::mark_t({unsigned(pd.piprime1[n])}); } - else if (current_max) + else if (pd.parity_max) for (auto& e: aut->edges()) { - unsigned n = piprime1[aut->edge_number(e)]; + unsigned n = pd.piprime1[aut->edge_number(e)]; if (n == 0) e.acc = acc_cond::mark_t({}); else @@ -622,7 +642,7 @@ namespace spot else for (auto& e: aut->edges()) { - unsigned n = piprime1[aut->edge_number(e)]; + unsigned n = pd.piprime1[aut->edge_number(e)]; if (n >= new_num_sets) e.acc = acc_cond::mark_t({}); else diff --git a/spot/twaalgos/parity.hh b/spot/twaalgos/parity.hh index 167c6b2d8..188e92483 100644 --- a/spot/twaalgos/parity.hh +++ b/spot/twaalgos/parity.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016-2019 Laboratoire de Recherche et Développement +// Copyright (C) 2016-2019, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -21,6 +21,7 @@ #include #include +#include namespace spot { @@ -134,7 +135,6 @@ namespace spot colorize_parity_here(twa_graph_ptr aut, bool keep_style = false); /// @} - /// \brief Reduce the parity acceptance condition to use a minimal /// number of colors. /// @@ -149,11 +149,73 @@ namespace spot /// the above paper assumes). Otherwise, the smallest or highest /// colors (depending on the parity kind) is removed to simplify the /// acceptance condition. + /// + /// If the input uses state-based acceptance, the output will use + /// state-based acceptance as well. + /// + /// A parity automaton, sometimes called a chain automaton, can be + /// seen as a stack of layers that are alternatively rejecting and + /// accepting. For instance imagine a parity max automaton that is + /// strongly connected. Removing the transitions with the maximal + /// color might leave a few transitions that were not labeled by + /// this maximal color, but that are part of any cycle anymore: + /// those transition could have been colored with the maximal color, + /// since any cycle going through them would have seen the maximal + /// color. (Once your remove this maximal layer, + /// your can define the next layer similarly.) + /// + /// When \a layered is true all transition that belong to the same + /// layer receive the same color. When layer is `false`, only the + /// transition that where used initially to define the layers (i.e, + /// the transition with the maximal color in the previous exemple), + /// get their color adjusted. The other will receive either no + /// color (if \a colored is false), or a useless color (if \a colored + /// is true). Here "useless color" means the smallest color + /// for parity max, and the largest color for parity min. + /// + /// When \a layered is true, the output of this function is + /// comparable to what acd_transform() would produce. The + /// difference is that this function preserve the kind (min/max) of + /// parity input, while acd_transform() always output a parity min + /// automaton. Additionally, this function needs fewer resources + /// than acd_transform() because it is already known that the input + /// is a parity automaton. In some (historically inaccurate) way, + /// reduce_parity() can be seen as a specialized version of + /// acd_transform(). + /// + /// The reason layered is false by default, is that not introducing + /// colors in place where there where none occasionally help with + /// simulation-based reductions. + /// /// @{ SPOT_API twa_graph_ptr - reduce_parity(const const_twa_graph_ptr& aut, bool colored = false); + reduce_parity(const const_twa_graph_ptr& aut, + bool colored = false, bool layered = false); SPOT_API twa_graph_ptr - reduce_parity_here(twa_graph_ptr aut, bool colored = false); + reduce_parity_here(twa_graph_ptr aut, + bool colored = false, bool layered = false); + + /// @} + + /// \brief Internal data computed by the reduce_parity function + /// + /// `piprime1` and `piprime2` have the size of `aut`'s edge vector, + /// represent two possible colorations of the edges. piprime1 assumes + /// that terminal cases of the recursion are odd, and piprime2 assumes + /// they are even. + /// + /// reduce_parity() actually compare the range of values in these + /// two vectors to limit the number of colors. + struct SPOT_API reduce_parity_data + { + bool parity_max; ///< Whether the input automaton is parity max + bool parity_odd; ///< Whether the input automaton is parity odd + std::vector piprime1; + std::vector piprime2; + + reduce_parity_data(const const_twa_graph_ptr& aut, bool layered = false); + }; + /// @} } diff --git a/spot/twaalgos/postproc.cc b/spot/twaalgos/postproc.cc index a44ac3d52..1a2915dda 100644 --- a/spot/twaalgos/postproc.cc +++ b/spot/twaalgos/postproc.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -40,6 +40,7 @@ #include #include #include +#include namespace spot { @@ -89,8 +90,10 @@ namespace spot wdba_minimize_ = opt->get("wdba-minimize", -1); gen_reduce_parity_ = opt->get("gen-reduce-parity", 1); simul_max_ = opt->get("simul-max", 4096); + merge_states_min_ = opt->get("merge-states-min", 128); wdba_det_max_ = opt->get("wdba-det-max", 4096); simul_trans_pruning_ = opt->get("simul-trans-pruning", 512); + acd_ = opt->get("acd", 1); if (sat_acc_ && sat_minimize_ == 0) sat_minimize_ = 1; // Dicho. @@ -118,6 +121,9 @@ namespace spot { if (opt == 0) return a; + if (merge_states_min_ > 0 + && static_cast(merge_states_min_) < a->num_states()) + a->merge_states(); if (simul_max_ > 0 && static_cast(simul_max_) < a->num_states()) return a; @@ -233,10 +239,13 @@ namespace spot { if (PREF_ != Any && level_ != Low) tmp->remove_unused_ap(); - if (COMP_) + bool was_complete = tmp->prop_complete().is_true(); + if (COMP_ && !was_complete) tmp = complete(tmp); bool want_parity = type_ & Parity; - if (want_parity && tmp->acc().is_generalized_buchi()) + if (want_parity && tmp->num_sets() > 1 + && (tmp->acc().is_generalized_buchi() + || tmp->acc().is_generalized_co_buchi())) tmp = choose_degen(tmp); assert(!!SBACC_ == state_based_); if (state_based_) @@ -245,7 +254,8 @@ namespace spot tmp = ensure_ba(tmp); if (want_parity) { - reduce_parity_here(tmp, COLORED_); + if (!acd_was_used_ || (COMP_ && !was_complete)) + reduce_parity_here(tmp, COLORED_); parity_kind kind = parity_kind_any; parity_style style = parity_style_any; if ((type_ & ParityMin) == ParityMin) @@ -290,6 +300,8 @@ namespace spot bool via_gba = (type_ == Buchi) || (type_ == GeneralizedBuchi) || (type_ == Monitor); bool want_parity = type_ & Parity; + acd_was_used_ = false; + if (COLORED_ && !want_parity) throw std::runtime_error("postprocessor: the Colored setting only works " "for parity acceptance"); @@ -335,18 +347,26 @@ namespace spot !(type_ == Generic && PREF_ == Any && level_ == Low)) a = remove_alternation(a); + // If we do want a parity automaton, we can use to_parity(). + // However (1) degeneralization is faster if the input is + // GBA, and (2) if we want a deterministic parity automaton and the + // input is not deterministic, that is useless here. We need + // to determinize it first, and our deterministization + // function only deal with TGBA as input. if ((via_gba || (want_parity && !a->acc().is_parity())) && !a->acc().is_generalized_buchi()) { - // If we do want a parity automaton, we can use to_parity(). - // However (1) degeneralization is better if the input is - // GBA, and (2) if we want a deterministic parity automaton and the - // input is not deterministic, that is useless here. We need - // to determinize it first, and our deterministization - // function only deal with TGBA as input. if (want_parity && (PREF_ != Deterministic || is_deterministic(a))) { - a = to_parity(a); + if (acd_) + { + a = acd_transform(a, COLORED_); + acd_was_used_ = true; + } + else + { + a = to_parity(a); + } } else { @@ -402,10 +422,19 @@ namespace spot if (PREF_ == Any) { - if (type_ == Buchi) - a = choose_degen(a); + if (type_ == Buchi + || (type_ == CoBuchi && a->acc().is_generalized_co_buchi())) + { + a = choose_degen(a); + } else if (type_ == CoBuchi) - a = to_nca(a); + { + a = to_nca(a); + if (state_based_ && a->prop_state_acc().is_true()) + a = do_sba_simul(a, simul_); + else + a = do_simul(a, simul_); + } return finalize(a); } @@ -699,6 +728,8 @@ namespace spot if (type_ == CoBuchi) { unsigned ns = sim->num_states(); + bool weak = sim->prop_weak().is_true(); + if (PREF_ == Deterministic) sim = to_dca(sim); else @@ -706,8 +737,13 @@ namespace spot // if the input of to_dca/to_nca was weak, the number of // states has not changed, and running simulation is useless. - if (level_ != Low && ns < sim->num_states()) - sim = do_simul(sim, simul_); + if (!weak || (level_ != Low && ns < sim->num_states())) + { + if (state_based_ && sim->prop_state_acc().is_true()) + sim = do_sba_simul(sim, simul_); + else + sim = do_simul(sim, simul_); + } } return finalize(sim); diff --git a/spot/twaalgos/postproc.hh b/spot/twaalgos/postproc.hh index 080cb831f..f470dcf5b 100644 --- a/spot/twaalgos/postproc.hh +++ b/spot/twaalgos/postproc.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -268,7 +268,10 @@ namespace spot bool state_based_ = false; int wdba_minimize_ = -1; int simul_max_ = 4096; + int merge_states_min_ = 128; int wdba_det_max_ = 4096; + bool acd_ = false; + bool acd_was_used_; }; /// @} } diff --git a/spot/twaalgos/powerset.cc b/spot/twaalgos/powerset.cc index c5fc07f94..326de7c76 100644 --- a/spot/twaalgos/powerset.cc +++ b/spot/twaalgos/powerset.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2009-2011, 2013-2019, 2021 Laboratoire de Recherche et +// Copyright (C) 2009-2011, 2013-2019, 2021, 2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -217,17 +217,19 @@ namespace spot pm.map_.emplace_back(std::move(ps)); } - { - unsigned init_num = aut->get_init_state_number(); - auto bvi = make_bitvect(ns); - bvi->set(init_num); - power_state ps{init_num}; - unsigned num = res->new_state(); - res->set_init_state(num); - seen[bvi] = num; - assert(pm.map_.size() == num); - pm.map_.emplace_back(std::move(ps)); - toclean.emplace_back(bvi); + // Add the initial state unless it's a sink. + if (unsigned init_num = aut->get_init_state_number(); + !acc_sinks || !acc_sinks->get(init_num)) + { + auto bvi = make_bitvect(ns); + bvi->set(init_num); + power_state ps{init_num}; + unsigned num = res->new_state(); + res->set_init_state(num); + seen[bvi] = num; + assert(pm.map_.size() == num); + pm.map_.emplace_back(std::move(ps)); + toclean.emplace_back(bvi); } // outgoing map diff --git a/spot/twaalgos/product.cc b/spot/twaalgos/product.cc index 7fb70ddd6..243f3768c 100644 --- a/spot/twaalgos/product.cc +++ b/spot/twaalgos/product.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2020 Laboratoire de Recherche et Développement +// Copyright (C) 2014-2020, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -122,8 +122,23 @@ namespace spot res->copy_ap_of(left); res->copy_ap_of(right); + auto& lacc = left->acc(); + auto& racc = right->acc(); + bool leftweak = left->prop_weak().is_true(); bool rightweak = right->prop_weak().is_true(); + + // The conjunction of two co-Büchi automata is a co-Büchi automaton. + // The disjunction of two Büchi automata is a Büchi automaton. + // + // The code to handle this case is similar to the weak_weak case, + // except we do not set the weak property on the result. + if (!leftweak + && !rightweak + && ((aop == and_acc && lacc.is_co_buchi() && racc.is_co_buchi()) + || (aop == or_acc && lacc.is_buchi() && racc.is_buchi()))) + goto and_cobuchi_or_buchi; + // We have optimization to the standard product in case one // of the arguments is weak. if (leftweak || rightweak) @@ -132,14 +147,13 @@ namespace spot // t, f, Büchi or co-Büchi. We use co-Büchi only when // t and f cannot be used, and both acceptance conditions // are in {t,f,co-Büchi}. - if (leftweak && rightweak) + if ((leftweak && rightweak)) { weak_weak: res->prop_weak(true); + and_cobuchi_or_buchi: acc_cond::mark_t accmark = {0}; acc_cond::mark_t rejmark = {}; - auto& lacc = left->acc(); - auto& racc = right->acc(); if ((lacc.is_co_buchi() && (racc.is_co_buchi() || racc.num_sets() == 0)) || (lacc.num_sets() == 0 && racc.is_co_buchi())) diff --git a/spot/twaalgos/product.hh b/spot/twaalgos/product.hh index 49ee9acdf..784a3cb49 100644 --- a/spot/twaalgos/product.hh +++ b/spot/twaalgos/product.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2014-2015, 2018-2020 Laboratoire de Recherche et +// Copyright (C) 2014-2015, 2018-2020, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -37,10 +37,14 @@ namespace spot /// The resulting automaton will accept the intersection of both /// languages and have an acceptance condition that is the /// conjunction of the acceptance conditions of the two input - /// automata. In case one of the left or right automaton is weak, - /// the acceptance condition of the result is made simpler: it - /// usually is the acceptance condition of the other argument, + /// automata. + /// + /// As an optionmization, in case one of the left or right automaton + /// is weak, the acceptance condition of the result is made simpler: + /// it usually is the acceptance condition of the other argument, /// therefore avoiding the need to introduce new accepting sets. + /// Similarly, the product of two co-Büchi automata will be a + /// co-Büchi automaton. /// /// The algorithm also defines a named property called /// "product-states" with type spot::product_states. This stores @@ -64,10 +68,14 @@ namespace spot /// languages recognized by each input automaton (with its initial /// state changed) and have an acceptance condition that is the /// conjunction of the acceptance conditions of the two input - /// automata. In case one of the left or right automaton is weak, - /// the acceptance condition of the result is made simpler: it - /// usually is the acceptance condition of the other argument, + /// automata. + /// + /// As an optionmization, in case one of the left or right automaton + /// is weak, the acceptance condition of the result is made simpler: + /// it usually is the acceptance condition of the other argument, /// therefore avoiding the need to introduce new accepting sets. + /// Similarly, the product of two co-Büchi automata will be a + /// co-Büchi automaton. /// /// The algorithm also defines a named property called /// "product-states" with type spot::product_states. This stores @@ -89,10 +97,15 @@ namespace spot /// The resulting automaton will accept the union of both /// languages and have an acceptance condition that is the /// disjunction of the acceptance conditions of the two input - /// automata. In case one of the left or right automaton is weak, - /// the acceptance condition of the result is made simpler: it - /// usually is the acceptance condition of the other argument, + /// automata. + /// + /// As an optionmization, in case one of the left or right automaton + /// is weak, the acceptance condition of the result is made simpler: + /// it usually is the acceptance condition of the other argument, /// therefore avoiding the need to introduce new accepting sets. + /// Similarly, the product_pr of two Büchi automata will be a + /// Büchi automaton. + /// /// /// The algorithm also defines a named property called /// "product-states" with type spot::product_states. This stores @@ -112,10 +125,14 @@ namespace spot /// recognized by each input automaton (with its initial state /// changed) and have an acceptance condition that is the /// disjunction of the acceptance conditions of the two input - /// automata. In case one of the left or right automaton is weak, - /// the acceptance condition of the result is made simpler: it - /// usually is the acceptance condition of the other argument, + /// automata. + /// + /// As an optionmization, in case one of the left or right automaton + /// is weak, the acceptance condition of the result is made simpler: + /// it usually is the acceptance condition of the other argument, /// therefore avoiding the need to introduce new accepting sets. + /// Similarly, the product_pr of two Büchi automata will be a + /// Büchi automaton. /// /// The algorithm also defines a named property called /// "product-states" with type spot::product_states. This stores diff --git a/spot/twaalgos/relabel.cc b/spot/twaalgos/relabel.cc index 22eddd893..ba5e4ed14 100644 --- a/spot/twaalgos/relabel.cc +++ b/spot/twaalgos/relabel.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2018, 2020 Laboratoire de Recherche et +// Copyright (C) 2015-2018, 2020, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -20,62 +20,277 @@ #include "config.h" #include #include +#include + +#include + +#include +#include +#include + namespace spot { - void - relabel_here(twa_graph_ptr& aut, relabeling_map* relmap) + namespace { - bddPair* pairs = bdd_newpair(); - auto d = aut->get_dict(); - std::vector vars; - std::set newvars; - vars.reserve(relmap->size()); - bool bool_subst = false; - auto aplist = aut->ap(); - for (auto& p: *relmap) - { - if (!p.first.is(op::ap)) - throw std::runtime_error - ("relabel_here: old labels should be atomic propositions"); - if (!p.second.is_boolean()) - throw std::runtime_error - ("relabel_here: new labels should be Boolean formulas"); + void + comp_new_letters(bdd_partition& part, + twa_graph& aut, + const std::string& var_prefix, + bool split) + { + auto& ig = *part.ig; + const auto& treated = part.treated; + auto& new_aps = part.new_aps; + // Get the new variables and their negations + const unsigned Nnl = treated.size(); + const unsigned Nnv = std::ceil(std::log2(Nnl)); + std::vector> Nv_vec(Nnv); - // Don't attempt to rename APs that are not used. - if (std::find(aplist.begin(), aplist.end(), p.first) == aplist.end()) - continue; + new_aps.reserve(Nnv); + for (unsigned i = 0; i < Nnv; ++i) + { + // todo check if it does not exist / use anonymous? + new_aps.push_back(formula::ap(var_prefix+std::to_string(i))); + int v = aut.register_ap(new_aps.back()); + Nv_vec[i] = {bdd_nithvar(v), bdd_ithvar(v)}; + } - int oldv = aut->register_ap(p.first); - vars.emplace_back(oldv); - if (p.second.is(op::ap)) - { - int newv = aut->register_ap(p.second); - newvars.insert(newv); - bdd_setpair(pairs, oldv, newv); - } - else - { - p.second.traverse([&](const formula& f) - { - if (f.is(op::ap)) - newvars.insert(aut->register_ap(f)); - return false; - }); - bdd newb = formula_to_bdd(p.second, d, aut); - bdd_setbddpair(pairs, oldv, newb); - bool_subst = true; - } + auto leaveidx2label = [&](unsigned idx) + { + unsigned c = 0; + unsigned rem = idx; + bdd thisbdd = bddtrue; + while (rem) + { + thisbdd &= Nv_vec[c][rem & 1]; + ++c; + rem >>= 1; + } + for (; c < Nnv; ++c) + thisbdd &= Nv_vec[c][0]; + return thisbdd; + }; + + // Compute only labels of leaves + for (unsigned idx = 0; idx < Nnl; ++idx) + ig.state_storage(treated[idx].second).new_label = leaveidx2label(idx); + + // We will label the implication graph with the new letters + auto relabel_impl = [&](unsigned s, auto&& relabel_impl_rec) + { + auto& ss = ig.state_storage(s); + if (ss.new_label != bddfalse) + return ss.new_label; + else + { + assert((ss.succ != 0) && "Should not be a leave"); + bdd thisbdd = bddfalse; + for (const auto& e : ig.out(s)) + thisbdd |= relabel_impl_rec(e.dst, relabel_impl_rec); + ss.new_label = thisbdd; + return thisbdd; + } + }; + + if (!split) + { + // For split only leaves is ok, + // disjunction is done via transitions + // This will compute the new_label for all states in the ig + const unsigned Norig = part.all_cond_ptr->size(); + for (unsigned i = 0; i < Norig; ++i) + relabel_impl(i, relabel_impl); + } + } // comp_new_letters + + // Recursive traversal of implication graph + void replace_label_(unsigned si, + unsigned esrc, unsigned edst, + bdd& econd, + const bdd_partition::implication_graph& ig, + twa_graph& aut) + { + auto& sstore = ig.state_storage(si); + if (sstore.succ == 0) + { + if (econd == bddfalse) + econd = sstore.new_label; + else + aut.new_edge(esrc, edst, sstore.new_label); + } + else + { + for (const auto& e_ig : ig.out(si)) + replace_label_(e_ig.dst, esrc, edst, econd, ig, aut); + } + } + + relabeling_map + partitioned_relabel_here_(twa_graph& aut, bool split, + unsigned max_letter, + unsigned max_letter_mult, + const bdd& concerned_ap, + bool treat_all, + const std::string& var_prefix) + { + auto abandon = []() + { + return relabeling_map{}; + }; + + + // When split we need to distiguish effectively new and old edges + if (split) + { + aut.get_graph().remove_dead_edges_(); + aut.get_graph().sort_edges_(); + aut.get_graph().chain_edges_(); + } + + // Get all conditions present in the automaton + std::vector all_cond; + bdd ignoredcon = bddtrue; + std::unordered_map all_cond_id2idx; + + all_cond.reserve(0.1*aut.num_edges()); + all_cond_id2idx.reserve(0.1*aut.num_edges()); + + // Map for all supports + // and whether or not they are to be relabeled + std::unordered_map, bdd_hash> all_supports; + + for (const auto& e : aut.edges()) + { + auto it = all_supports.find(e.cond); + if (it != all_supports.end()) + continue; // Already treated + bdd se = bddtrue; + bool is_concerned = true; + if (!treat_all) + { + se = bdd_support(e.cond); + is_concerned = bdd_implies(concerned_ap, se); + } + + all_supports.emplace(e.cond, + std::make_pair(is_concerned, se)); + + if (!is_concerned) + { + assert(bdd_existcomp(se, concerned_ap) == bddtrue + && "APs are not partitioned"); + continue; + } + + auto [_, ins] = + all_cond_id2idx.try_emplace(e.cond.id(), all_cond.size()); + if (ins) + { + all_cond.push_back(e.cond); + if (all_cond.size() > max_letter) + return abandon(); + } + } + + unsigned stop = max_letter; + if (max_letter_mult != -1u) + { + // Make sure it does not overflow + if (max_letter_mult <= (-1u / ((unsigned) all_cond.size()))) + stop = std::min(stop, + (unsigned) (max_letter_mult*all_cond.size())); + } + + auto this_partition = try_partition_me(all_cond, stop); + + if (!this_partition.relabel_succ) + return abandon(); + + comp_new_letters(this_partition, aut, var_prefix, split); + + // An original condition is represented by all leaves that imply it + auto& ig = *this_partition.ig; + const unsigned Ns = aut.num_states(); + const unsigned Nt = aut.num_edges(); + for (unsigned s = 0; s < Ns; ++s) + { + for (auto& e : aut.out(s)) + { + if (aut.edge_number(e) > Nt) + continue; + if (!all_supports.at(e.cond).first) + continue; // Edge not concerned + unsigned idx = all_cond_id2idx[e.cond.id()]; + + if (split) + { + // initial call + // We can not hold a ref to the edge + // as the edgevector might get reallocated + bdd econd = bddfalse; + unsigned eidx = aut.edge_number(e); + replace_label_(idx, e.src, e.dst, + econd, ig, aut); + aut.edge_storage(eidx).cond = econd; + } + else + e.cond = ig.state_storage(idx).new_label; + } // for edge + } // for state + return this_partition.to_relabeling_map(aut); + } + + void + relabel_here_ap_(twa_graph_ptr& aut_ptr, relabeling_map relmap) + { + assert(aut_ptr); + twa_graph& aut = *aut_ptr; + + std::unique_ptr pairs(bdd_newpair()); + auto d = aut.get_dict(); + std::vector vars; + std::set newvars; + vars.reserve(relmap.size()); + bool bool_subst = false; + auto aplist = aut.ap(); + + for (auto& p: relmap) + { + // Don't attempt to rename APs that are not used. + if (std::find(aplist.begin(), aplist.end(), p.first) == aplist.end()) + continue; + + int oldv = aut.register_ap(p.first); + vars.emplace_back(oldv); + if (p.second.is(op::ap)) + { + int newv = aut.register_ap(p.second); + newvars.insert(newv); + bdd_setpair(pairs.get(), oldv, newv); + } + else + { + p.second.traverse([&](const formula& f) + { + if (f.is(op::ap)) + newvars.insert(aut.register_ap(f)); + return false; + }); + bdd newb = formula_to_bdd(p.second, d, aut_ptr); + bdd_setbddpair(pairs.get(), oldv, newb); + bool_subst = true; + } } bool need_cleanup = false; typedef bdd (*op_t)(const bdd&, bddPair*); op_t op = bool_subst ? static_cast(bdd_veccompose) : static_cast(bdd_replace); - for (auto& t: aut->edges()) + for (auto& t: aut.edges()) { - bdd c = (*op)(t.cond, pairs); + bdd c = (*op)(t.cond, pairs.get()); t.cond = c; if (c == bddfalse) need_cleanup = true; @@ -86,14 +301,172 @@ namespace spot // p0) for (auto v: vars) if (newvars.find(v) == newvars.end()) - aut->unregister_ap(v); + aut.unregister_ap(v); // If some of the edges were relabeled false, we need to clean the // automaton. if (need_cleanup) { - aut->merge_edges(); // remove any edge labeled by 0 - aut->purge_dead_states(); // remove useless states + aut.merge_edges(); // remove any edge labeled by 0 + aut.purge_dead_states(); // remove useless states } + } + + void + relabel_here_gen_(twa_graph_ptr& aut_ptr, relabeling_map relmap) + { + assert(aut_ptr); + twa_graph& aut = *aut_ptr; + + auto form2bdd = [this_dict = aut.get_dict()](const formula& f) + { + return formula_to_bdd(f, this_dict, this_dict); + }; + + auto bdd2form = [bdddict = aut.get_dict()](const bdd& cond) + { + return bdd_to_formula(cond, bdddict); + }; + + + // translate formula -> bdd + std::unordered_map base_letters; + base_letters.reserve(relmap.size()); + + std::unordered_map comp_letters; + std::unordered_set ignored_letters; + + // Necessary to detect unused + bdd new_var_supp = bddtrue; + auto translate = [&](bdd& cond) + { + // Check if known + for (const auto& map : {base_letters, comp_letters}) + { + auto it = map.find(cond); + if (it != map.end()) + { + cond = it->second; + return; + } + } + + // Check if known to be ignored + if (auto it = ignored_letters.find(cond); + it != ignored_letters.end()) + return; + + // Check if ignored + bdd cond_supp = bdd_support(cond); + if (!bdd_implies(new_var_supp, cond_supp)) + { + ignored_letters.insert(cond); + assert(bdd_existcomp(cond_supp, new_var_supp) == bddtrue + && "APs are not partitioned"); + return; + } + + // Compute + // compose the given cond from a disjunction of base_letters + bdd old_cond = bddfalse; + for (const auto& [k, v] : base_letters) + { + if (bdd_implies(k, cond)) + old_cond |= v; + } + comp_letters[cond] = old_cond; + cond = old_cond; + return; + }; + + for (const auto& [new_f, old_f] : relmap) + { + bdd new_cond = form2bdd(new_f); + new_var_supp &= bdd_support(new_cond); + base_letters[new_cond] = form2bdd(old_f); + } + + + // Save the composed letters? With a special seperator like T/F? + // Is swapping between formula <-> bdd expensive + for (auto& e : aut.edges()) + translate(e.cond); + + // Remove the new auxilliary variables from the aut + bdd c_supp = new_var_supp; + while (c_supp != bddtrue) + { + aut.unregister_ap(bdd_var(c_supp)); + c_supp = bdd_high(c_supp); + } + + return; + } + + } // Namespace + + void + relabel_here(twa_graph_ptr& aut, relabeling_map* relmap) + { + if (!relmap || relmap->empty()) + return; + + // There are two different types of relabeling maps: + // 1) The "traditional": + // New atomic propositions (keys) correspond to general formulas over + // the original propositions (values) + // 2) The one resulting from partitioned_relabel_here + // Here general (boolean) formulas over new propositions (keys) + // are associated to general formulas over + // the original propositions (values) + + if (!std::all_of(relmap->begin(), relmap->end(), + [](const auto& it){return it.first.is_boolean() + && it.second.is_boolean(); })) + throw std::runtime_error + ("relabel_here: old labels and new labels " + "should be Boolean formulas"); + + bool only_ap = std::all_of(relmap->cbegin(), relmap->cend(), + [](const auto& p) + { + return p.first.is(op::ap); + }); + + if (only_ap) + relabel_here_ap_(aut, *relmap); + else + relabel_here_gen_(aut, *relmap); + } + + relabeling_map + partitioned_relabel_here(twa_graph_ptr& aut, + bool split, + unsigned max_letter, + unsigned max_letter_mult, + const bdd& concerned_ap, + std::string var_prefix) + { + if (!aut) + throw std::runtime_error("aut is null!"); + + if (std::find_if(aut->ap().cbegin(), aut->ap().cend(), + [var_prefix](const auto& ap) + { + return ap.ap_name().find(var_prefix) == 0; + }) != aut->ap().cend()) + throw std::runtime_error("partitioned_relabel_here(): " + "The given prefix for new variables may not appear as " + "a prefix of existing variables."); + + // If concerned_ap == bddtrue -> all aps are concerned + bool treat_all = concerned_ap == bddtrue; + bdd concerned_ap_ + = treat_all ? aut->ap_vars() : concerned_ap; + return partitioned_relabel_here_(*aut, split, + max_letter, max_letter_mult, + concerned_ap_, + treat_all, + var_prefix); } } diff --git a/spot/twaalgos/relabel.hh b/spot/twaalgos/relabel.hh index e10fe8903..34f7a0a41 100644 --- a/spot/twaalgos/relabel.hh +++ b/spot/twaalgos/relabel.hh @@ -21,6 +21,10 @@ #include #include +#include + +#include +#include namespace spot { @@ -33,4 +37,33 @@ namespace spot /// or relabel_bse(). SPOT_API void relabel_here(twa_graph_ptr& aut, relabeling_map* relmap); + + + /// \brief Replace conditions in \a aut with non-overlapping conditions + /// over fresh variables. + /// + /// Partitions the conditions in the automaton, then (binary) encodes + /// them using fresh propositions. + /// This can lead to an exponential explosion in the number of + /// conditions. The operations is aborted if either + /// the number of new letters (subsets of the partition) exceeds + /// \a max_letter OR \a max_letter_mult times the number of conditions + /// in the original automaton. + /// The argument \a concerned_ap can be used to filter out transitions. + /// If given, only the transitions whose support intersects the + /// concerned_ap (or whose condition is T) are taken into account. + /// The fresh aps will be enumerated and prefixed by \a var_prefix. + /// These variables need to be fresh, i.e. may not exist yet (not checked) + /// + /// \note If concerned_ap is given, then there may not be an edge + /// whose condition uses ap inside AND outside of concerned_ap. + /// Mostly used in a game setting to distinguish between + /// env and player transitions. + SPOT_API relabeling_map + partitioned_relabel_here(twa_graph_ptr& aut, bool split = false, + unsigned max_letter = -1u, + unsigned max_letter_mult = -1u, + const bdd& concerned_ap = bddtrue, + std::string var_prefix = "__nv"); + } diff --git a/spot/twaalgos/remprop.cc b/spot/twaalgos/remprop.cc index 942a1b4b5..c84ee9188 100644 --- a/spot/twaalgos/remprop.cc +++ b/spot/twaalgos/remprop.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015-2019, 2022 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2015-2019, 2022, 2023 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -205,7 +205,7 @@ namespace spot } else { - e.cond = bdd_exist(e.cond, rem); + e.cond = bdd_restrict(e.cond, rem); } } @@ -244,4 +244,51 @@ namespace spot } + twa_graph_ptr from_finite(const_twa_graph_ptr aut, const char* alive) + { + twa_graph_ptr res = + make_twa_graph(aut, + { true, false, true, false, false, false }); + + if (aut->get_named_prop>("state-names")) + res->copy_state_names_from(aut); + auto* names = res->get_named_prop>("state-names"); + + unsigned alive_sink = res->new_state(); + if (names != nullptr) + names->push_back("sink"); + auto acc = res->acc().all_sets(); + auto alive_bdd = bdd_ithvar(res->register_ap(alive)); + res->new_edge(alive_sink, alive_sink, !alive_bdd, acc); + + unsigned ns = res->num_states(); + for (unsigned s = 0; s < ns; ++s) + { + if (s == alive_sink) + continue; + + bool was_acc = res->state_is_accepting(s); + + // erase accepting marks, require alive on non-accepting transition, + // and remove self-loop edges used to mark acceptance + auto i = res->out_iteraser(s); + while (i) + { + if (i->src == i->dst && i->cond == bddfalse) + { + i.erase(); + continue; + } + + i->cond &= alive_bdd; + i->acc = {}; + ++i; + } + + if (was_acc) + res->new_edge(s, alive_sink, !alive_bdd); + } + + return res; + } } diff --git a/spot/twaalgos/remprop.hh b/spot/twaalgos/remprop.hh index 09d75ffac..4b496e65a 100644 --- a/spot/twaalgos/remprop.hh +++ b/spot/twaalgos/remprop.hh @@ -54,5 +54,8 @@ namespace spot SPOT_API twa_graph_ptr to_finite(const_twa_graph_ptr aut, const char* alive = "alive"); + /// \brief The opposite of the to_finite operation + SPOT_API twa_graph_ptr + from_finite(const_twa_graph_ptr aut, const char* alive = "alive"); } diff --git a/spot/twaalgos/simulation.cc b/spot/twaalgos/simulation.cc index e62762489..ed53929b3 100644 --- a/spot/twaalgos/simulation.cc +++ b/spot/twaalgos/simulation.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2012-2021 Laboratoire de Recherche et Développement +// Copyright (C) 2012-2023 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -93,13 +93,7 @@ namespace spot return true; if (states > r.states) return false; - - if (edges < r.edges) - return true; - if (edges > r.edges) - return false; - - return false; + return edges < r.edges; } inline bool operator>(const automaton_size& r) @@ -590,7 +584,7 @@ namespace spot // C1 then (!C1)C2, instead of C1 then C2. // With minatop_isop, we ensure that the no negative // class variable will be seen (likewise for promises). - minato_isop isop(sig & one); + minato_isop isop(bdd_restrict(sig, one)); ++nb_minterms; @@ -603,17 +597,12 @@ namespace spot // Take the edge, and keep only the variable which // are used to represent the class. - bdd dst = bdd_existcomp(cond_acc_dest, - all_class_var_); + bdd dst = bdd_existcomp(cond_acc_dest, all_class_var_); // Keep only ones who are acceptance condition. auto acc = bdd_to_mark(bdd_existcomp(cond_acc_dest, all_proms_)); - // Keep the other! - bdd cond = bdd_existcomp(cond_acc_dest, - sup_all_atomic_prop); - // Because we have complemented all the Inf // acceptance conditions on the input automaton, // we must revert them to create a new edge. @@ -630,11 +619,11 @@ namespace spot accst[srcst] = acc; acc = {}; } - gb->new_edge(dst.id(), src.id(), cond, acc); + gb->new_edge(dst.id(), src.id(), one, acc); } else { - gb->new_edge(src.id(), dst.id(), cond, acc); + gb->new_edge(src.id(), dst.id(), one, acc); } } } diff --git a/spot/twaalgos/stats.cc b/spot/twaalgos/stats.cc index ddccba5db..4a905e542 100644 --- a/spot/twaalgos/stats.cc +++ b/spot/twaalgos/stats.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2011-2018, 2020 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2008, 2011-2018, 2020, 2022 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre // et Marie Curie. @@ -33,6 +33,16 @@ namespace spot { + unsigned long long + count_all_transitions(const const_twa_graph_ptr& g) + { + unsigned long long tr = 0; + bdd v = g->ap_vars(); + for (auto& e: g->edges()) + tr += bdd_satcountset(e.cond, v); + return tr; + } + namespace { class stats_bfs: public twa_reachable_iterator_breadth_first @@ -82,6 +92,7 @@ namespace spot }; + template void dfs(const const_twa_graph_ptr& ge, SU state_update, EU edge_update) { @@ -344,10 +355,73 @@ namespace spot << std::string(beg, end + 2) << ", "; tmp << e.what(); throw std::runtime_error(tmp.str()); - } } + void printable_size::print(std::ostream& os, const char* pos) const + { + char p = 'r'; + if (*pos == '[') + { + p = pos[1]; + if (pos[2] != ']' || !(p == 'r' || p == 'u' || p == 'a')) + { + const char* end = strchr(pos + 1, ']'); + std::ostringstream tmp; + tmp << "while processing %" + << std::string(pos, end + 2) << ", " + << "only [a], [r], or [u] is supported."; + throw std::runtime_error(tmp.str()); + } + } + switch (p) + { + case 'r': + os << reachable_; + return; + case 'a': + os << all_; + return; + case 'u': + os << all_ - reachable_; + return; + } + SPOT_UNREACHABLE(); + return; + } + + void printable_long_size::print(std::ostream& os, const char* pos) const + { + char p = 'r'; + if (*pos == '[') + { + p = pos[1]; + if (pos[2] != ']' || !(p == 'r' || p == 'u' || p == 'a')) + { + const char* end = strchr(pos + 1, ']'); + std::ostringstream tmp; + tmp << "while processing %" + << std::string(pos, end + 2) << ", " + << "only [a], [r], or [u] is supported."; + throw std::runtime_error(tmp.str()); + } + } + switch (p) + { + case 'r': + os << reachable_; + return; + case 'a': + os << all_; + return; + case 'u': + os << all_ - reachable_; + return; + } + SPOT_UNREACHABLE(); + return; + } + stat_printer::stat_printer(std::ostream& os, const char* format) : format_(format) @@ -376,15 +450,15 @@ namespace spot if (has('t')) { twa_sub_statistics s = sub_stats_reachable(aut); - states_ = s.states; - edges_ = s.edges; - trans_ = s.transitions; + states_.set(s.states, aut->num_states()); + edges_.set(s.edges, aut->num_edges()); + trans_.set(s.transitions, count_all_transitions(aut)); } else if (has('s') || has('e')) { twa_statistics s = stats_reachable(aut); - states_ = s.states; - edges_ = s.edges; + states_.set(s.states, aut->num_states()); + edges_.set(s.edges, aut->num_edges()); } if (has('a')) diff --git a/spot/twaalgos/stats.hh b/spot/twaalgos/stats.hh index 1caa8324b..24353fc31 100644 --- a/spot/twaalgos/stats.hh +++ b/spot/twaalgos/stats.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008, 2011-2017, 2020 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2008, 2011-2017, 2020, 2022 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // Copyright (C) 2004 Laboratoire d'Informatique de Paris 6 (LIP6), // département Systèmes Répartis Coopératifs (SRC), Université Pierre // et Marie Curie. @@ -55,6 +55,9 @@ namespace spot /// \brief Compute sub statistics for an automaton. SPOT_API twa_sub_statistics sub_stats_reachable(const const_twa_ptr& g); + /// \brief Count all transtitions, even unreachable ones. + SPOT_API unsigned long long + count_all_transitions(const const_twa_graph_ptr& g); class SPOT_API printable_formula: public printable_value { @@ -102,6 +105,36 @@ namespace spot void print(std::ostream& os, const char* pos) const override; }; + class SPOT_API printable_size final: + public spot::printable + { + unsigned reachable_ = 0; + unsigned all_ = 0; + public: + void set(unsigned reachable, unsigned all) + { + reachable_ = reachable; + all_ = all; + } + + void print(std::ostream& os, const char* pos) const override; + }; + + class SPOT_API printable_long_size final: + public spot::printable + { + unsigned long long reachable_ = 0; + unsigned long long all_ = 0; + public: + void set(unsigned long long reachable, unsigned long long all) + { + reachable_ = reachable; + all_ = all; + } + + void print(std::ostream& os, const char* pos) const override; + }; + /// \brief prints various statistics about a TGBA /// /// This object can be configured to display various statistics @@ -123,9 +156,9 @@ namespace spot const char* format_; printable_formula form_; - printable_value states_; - printable_value edges_; - printable_value trans_; + printable_size states_; + printable_size edges_; + printable_long_size trans_; printable_value acc_; printable_scc_info scc_; printable_value nondetstates_; diff --git a/spot/twaalgos/synthesis.cc b/spot/twaalgos/synthesis.cc index 9025bb303..aef11d27b 100644 --- a/spot/twaalgos/synthesis.cc +++ b/spot/twaalgos/synthesis.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2020, 2021 Laboratoire de Recherche et +// Copyright (C) 2020-2023 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -36,6 +37,7 @@ #include +#include // Helper function/structures for split_2step namespace{ @@ -136,12 +138,12 @@ namespace{ // Note, this only deals with deterministic strategies // Note, assumes that env starts playing twa_graph_ptr - apply_strategy(const twa_graph_ptr& arena, + apply_strategy(const const_twa_graph_ptr& arena, bool unsplit, bool keep_acc) { - const auto& win = get_state_winners(arena); - const auto& strat = get_strategy(arena); - const auto& sp = get_state_players(arena); + const region_t& win = get_state_winners(arena); + const strategy_t& strat = get_strategy(arena); + const region_t& sp = get_state_players(arena); auto outs = get_synthesis_outputs(arena); if (!win[arena->get_init_state_number()]) @@ -446,9 +448,13 @@ namespace spot split_2step(const const_twa_graph_ptr& aut, const bdd& output_bdd, bool complete_env) { + assert(!aut->get_named_prop("state-player") + && "aut is already split!"); auto split = make_twa_graph(aut->get_dict()); auto [has_unsat, unsat_mark] = aut->acc().unsat_mark(); + bool max_par, odd_par, color_env; + color_env = aut->acc().is_parity(max_par, odd_par, true); split->copy_ap_of(aut); split->new_states(aut->num_states()); @@ -456,6 +462,7 @@ namespace spot set_synthesis_outputs(split, output_bdd); const auto use_color = has_unsat; + color_env &= use_color; if (has_unsat) split->copy_acceptance_of(aut); else @@ -489,8 +496,10 @@ namespace spot // So we can first loop over the aut // and then deduce the owner - // a sort of hash-map for all new intermediate states - std::unordered_multimap env_hash; + // a sort of hash-map for all new intermediate stat + // second is the color of the incoming env trans + std::unordered_multimap> env_hash; env_hash.reserve((int) (1.5 * aut->num_states())); // a local map for edges leaving the current src // this avoids creating and then combining edges for each minterm @@ -566,9 +575,7 @@ namespace spot // implies is faster than and if (bdd_implies(one_letter, e_info.einsup.first)) { - e_info.econdout = - bdd_appex(e_info.econd, one_letter, - bddop_and, input_bdd); + e_info.econdout = bdd_restrict(e_info.econd, one_letter); dests.push_back(&e_info); assert(e_info.econdout != bddfalse); } @@ -589,7 +596,7 @@ namespace spot auto range_h = env_hash.equal_range(h); for (auto it_h = range_h.first; it_h != range_h.second; ++it_h) { - unsigned i = it_h->second; + const auto& [i, this_color] = it_h->second; auto out = split->out(i); if (std::equal(out.begin(), out.end(), dests.begin(), dests.end(), @@ -611,9 +618,10 @@ namespace spot if (it != env_edge_hash.end()) it->second.second |= one_letter; else - // Uncolored env_edge_hash.emplace(i, - eeh_t(split->new_edge(src, i, bddtrue), one_letter)); + eeh_t(split->new_edge(src, i, bddtrue, + this_color), + one_letter)); break; } } @@ -621,12 +629,31 @@ namespace spot if (to_add) { unsigned d = split->new_state(); - unsigned n_e = split->new_edge(src, d, bddtrue); - env_hash.emplace(h, d); + auto this_color = acc_cond::mark_t({}); + bool has_uncolored = false; + for (const auto &t: dests) + { + split->new_edge(d, t->dst, t->econdout, + use_color ? t->acc + : acc_cond::mark_t({})); + this_color |= t->acc; + has_uncolored |= !t->acc; + } + + if (!color_env | has_uncolored) + this_color = acc_cond::mark_t({}); + else if (max_par) + this_color = + acc_cond::mark_t({this_color.min_set()-1}); + else // min_par + this_color = + acc_cond::mark_t({this_color.max_set()-1}); + + unsigned n_e = split->new_edge(src, d, bddtrue, this_color); + env_hash.emplace(std::piecewise_construct, + std::forward_as_tuple(h), + std::forward_as_tuple(d, this_color)); env_edge_hash.emplace(d, eeh_t(n_e, one_letter)); - for (const auto &t: dests) - split->new_edge(d, t->dst, t->econdout, - use_color ? t->acc : acc_cond::mark_t({})); } } // letters // save locally stored condition @@ -741,6 +768,9 @@ namespace spot case (algo::LAR_OLD): name = "lar.old"; break; + case (algo::ACD): + name = "acd"; + break; } return os << name; } @@ -768,17 +798,15 @@ namespace spot auto sol = gi.s; const bdd_dict_ptr& dict = gi.dict; - for (auto&& p : std::vector> - {{"simul", 0}, - {"ba-simul", 0}, - {"det-simul", 0}, - {"tls-impl", 1}, - {"wdba-minimize", 2}}) - extra_options.set(p.first, extra_options.get(p.first, p.second)); + extra_options.set_if_unset("simul", 0); + extra_options.set_if_unset("tls-impl", 1); + extra_options.set_if_unset("wdba-minimize", 2); translator trans(dict, &extra_options); switch (sol) { + case algo::ACD: + SPOT_FALLTHROUGH; case algo::LAR: SPOT_FALLTHROUGH; case algo::LAR_OLD: @@ -808,13 +836,10 @@ namespace spot if (force_sbacc) dpa = sbacc(dpa); reduce_parity_here(dpa, true); - change_parity_here(dpa, parity_kind_max, - parity_style_odd); assert(( [&dpa]() -> bool { bool max, odd; - dpa->acc().is_parity(max, odd); - return max && odd; + return dpa->acc().is_parity(max, odd); }())); assert(is_deterministic(dpa)); return dpa; @@ -907,7 +932,6 @@ namespace spot if (bv) sw.start(); dpa = split_2step(tmp, outs, true); - colorize_parity_here(dpa, true); if (bv) bv->split_time += sw.stop(); if (vs) @@ -930,7 +954,6 @@ namespace spot if (bv) sw.start(); dpa = split_2step(aut, outs, true); - colorize_parity_here(dpa, true); if (bv) bv->split_time += sw.stop(); if (vs) @@ -956,6 +979,10 @@ namespace spot *vs << "determinization done\nDPA has " << dpa->num_states() << " states, " << dpa->num_sets() << " colors\n"; + // The named property "state-player" is set in split_2step + // but not propagated by ntgba2dpa + alternate_players(dpa); + // Merge states knows about players dpa->merge_states(); if (bv) bv->paritize_time += sw.stop(); @@ -964,11 +991,10 @@ namespace spot << dpa->num_states() << " states\n" << "determinization and simplification took " << bv->paritize_time << " seconds\n"; - // The named property "state-player" is set in split_2step - // but not propagated by ntgba2dpa - alternate_players(dpa); break; } + case algo::ACD: + SPOT_FALLTHROUGH; case algo::LAR: SPOT_FALLTHROUGH; case algo::LAR_OLD: @@ -978,20 +1004,20 @@ namespace spot if (gi.s == algo::LAR) { dpa = to_parity(aut); - // reduce_parity is called by to_parity(), - // but with colorization turned off. - colorize_parity_here(dpa, true); + reduce_parity_here(dpa, false); } - else + else if (gi.s == algo::LAR_OLD) { dpa = to_parity_old(aut); - dpa = reduce_parity_here(dpa, true); + reduce_parity_here(dpa, true); } - change_parity_here(dpa, parity_kind_max, parity_style_odd); + else + dpa = acd_transform(aut); if (bv) bv->paritize_time += sw.stop(); if (vs) - *vs << "LAR construction done in " << bv->paritize_time + *vs << (gi.s == algo::ACD ? "ACD" : "LAR") + << " construction done in " << bv->paritize_time << " seconds\nDPA has " << dpa->num_states() << " states, " << dpa->num_sets() << " colors\n"; @@ -999,7 +1025,6 @@ namespace spot if (bv) sw.start(); dpa = split_2step(dpa, outs, true); - colorize_parity_here(dpa, true); if (bv) bv->split_time += sw.stop(); if (vs) @@ -1057,22 +1082,25 @@ namespace spot if (!get_state_winner(arena, arena->get_init_state_number())) return nullptr; - // If we use minimizations 0,1 or 2 -> unsplit - const bool do_unsplit = gi.minimize_lvl < 3; - auto m = apply_strategy(arena, do_unsplit, false); + auto m = apply_strategy(arena, false, false); m->prop_universal(true); - if ((0 < gi.minimize_lvl) && (gi.minimize_lvl < 3)) - reduce_mealy_here(m, gi.minimize_lvl == 2); - else if (gi.minimize_lvl >= 3) - m = minimize_mealy(m, gi.minimize_lvl - 4); - if (gi.bv) { + auto sp = get_state_players(m); + auto n_s_env = sp.size() - std::accumulate(sp.begin(), + sp.end(), + 0u); + auto n_e_env = 0u; + std::for_each(m->edges().begin(), m->edges().end(), + [&n_e_env, &sp](const auto& e) + { + n_e_env += sp[e.src]; + }); gi.bv->strat2aut_time += sw.stop(); - gi.bv->nb_strat_states += m->num_states(); - gi.bv->nb_strat_edges += m->num_edges(); + gi.bv->nb_strat_states += n_s_env; + gi.bv->nb_strat_edges += n_e_env; } assert(is_mealy(m)); @@ -1132,6 +1160,27 @@ namespace spot //Anonymous for try_create_strat namespace { + // Checks that 2 sets have a common element. Use it instead + // of set_intersection when we just want to check if they have a common + // element because it avoids going through the rest of the sets after an + // element is found. + static bool + are_intersecting(const std::set &v1, + const std::set &v2) + { + auto v1_pos = v1.begin(), v2_pos = v2.begin(), v1_end = v1.end(), + v2_end = v2.end(); + while (v1_pos != v1_end && v2_pos != v2_end) + { + if (*v1_pos < *v2_pos) + ++v1_pos; + else if (*v2_pos < *v1_pos) + ++v2_pos; + else + return true; + } + return false; + } class formula_2_inout_props { private: @@ -1173,16 +1222,19 @@ namespace spot mealy_like try_create_direct_strategy(formula f, const std::vector& output_aps, - synthesis_info &gi) + synthesis_info &gi, bool want_strategy) { auto vs = gi.verbose_stream; auto& bv = gi.bv; + bdd_dict_ptr& dict = gi.dict; + int tmp; if (vs) *vs << "trying to create strategy directly for " << f << '\n'; - auto ret_sol_maybe = [&vs]() + auto ret_sol_maybe = [&vs, &tmp, &dict]() { + dict->unregister_all_my_variables(&tmp); if (vs) *vs << "direct strategy might exist but was not found.\n"; return mealy_like{ @@ -1190,8 +1242,9 @@ namespace spot nullptr, bddfalse}; }; - auto ret_sol_none = [&vs]() + auto ret_sol_none = [&vs, &tmp, &dict]() { + dict->unregister_all_my_variables(&tmp); if (vs) *vs << "no strategy exists.\n"; return mealy_like{ @@ -1200,14 +1253,23 @@ namespace spot bddfalse}; }; - auto ret_sol_exists = [&vs](auto strat) + auto ret_sol_exists = + [&vs, &want_strategy, &tmp, &dict](twa_graph_ptr strat) { + dict->unregister_all_my_variables(&tmp); if (vs) { - *vs << "direct strategy was found.\n" - << "direct strat has " << strat->num_states() - << " states and " << strat->num_sets() << " colors\n"; + *vs << "direct strategy was found.\n"; + if (want_strategy) + { + *vs << "direct strat has " << strat->num_states() + << " states, " << strat->num_edges() + << " edges and " << strat->num_sets() << " colors\n"; + + } } + if (strat) + strat->merge_edges(); return mealy_like{ mealy_like::realizability_code::REALIZABLE_REGULAR, strat, @@ -1215,88 +1277,146 @@ namespace spot }; formula_2_inout_props form2props(output_aps); - auto output_aps_set = std::set(output_aps.begin(), - output_aps.end()); - - formula f_g = formula::tt(), f_left, f_right; - - // If we have a formula like G(b₁) ∧ (φ ↔ GFb₂), we extract b₁ and - // continue the construction for (φ ↔ GFb₂). + formula f_g, f_other; + // If it is G(α) ∧ G(β) ∧ … if (f.is(op::And)) { - if (f.size() != 2) - return ret_sol_maybe(); - if (f[0].is(op::G) && f[0][0].is_boolean()) - { - f_g = f[0]; - f = f[1]; - } - else if (f[1].is(op::G) && f[1][0].is_boolean()) - { - f_g = f[1]; - f = f[0]; - } - else - return ret_sol_maybe(); - } - if (f.is(op::Equiv)) - { - auto [left_ins, left_outs] = form2props.aps_of(f[0]); - auto [right_ins, right_outs] = form2props.aps_of(f[1]); + std::vector gs; + std::vector others; + for (auto child : f) + if (child.is(op::G) && child[0].is_boolean()) + gs.push_back(child[0]); + else + others.push_back(child); - auto properties_vector = [](const formula& f, - const std::set& ins, - const std::set& outs) + f_g = formula::And(gs); + f_other = formula::And(others); + } + else if (f.is(op::G) && f[0].is_boolean()) + { + f_g = f[0]; + f_other = formula::tt(); + } + else + { + f_g = formula::tt(); + f_other = f; + } + + // We have to check if the content of G is realizable (input-complete) + bdd output_bdd_tmp = bddtrue; + for (auto& out : output_aps) + output_bdd_tmp &= bdd_ithvar( + dict->register_proposition(formula::ap(out), &tmp)); + + if (!f_g.is_tt()) + { + auto g_bdd = formula_to_bdd(f_g, dict, &tmp); + if (bdd_exist(g_bdd, output_bdd_tmp) != bddtrue) + return ret_sol_none(); + } + + if (f_other.is(op::Equiv)) + { + // Check if FG or GF + auto is_general = [&tmp, &output_bdd_tmp, &dict](const formula &f, + op first, op second) { - return std::vector - { - f.is({op::G, op::F}) && f[0][0].is_boolean() && ins.empty(), - f.is_syntactic_recurrence() && outs.empty(), - // f is FG(bool) - f.is({op::F, op::G}) && f[0][0].is_boolean() && ins.empty(), - f.is_syntactic_persistence() && outs.empty() - }; + if (!f.is({first, second}) || !f[0][0].is_boolean()) + return false; + auto f_bdd = formula_to_bdd(f[0][0], dict, &tmp); + if (bdd_exist(f_bdd, output_bdd_tmp) != bddtrue) + return false; + f_bdd = formula_to_bdd(formula::Not(f[0][0]), dict, &tmp); + bool res = (bdd_exist(f_bdd, output_bdd_tmp) == bddtrue); + return res; }; - // We need to detect - // GF(outs) ↔ recurrence(ins), - // recurrence(ins) ↔ GF(outs), - // FG(outs) ↔ persistence(ins), - // persistence(ins) ↔ FG(outs) - const auto left_properties = properties_vector(f[0], left_ins, left_outs), - right_properties = properties_vector(f[1], right_ins, right_outs); + + auto is_gf = [is_general](const formula& f) + { + return is_general(f, op::G, op::F); + }; + + auto is_fg = [is_general](const formula& f) + { + return is_general(f, op::F, op::G); + }; + + auto is_co_bu = [](const formula &f, const std::set& outs) + { + return outs.empty() && f.is_syntactic_obligation(); + }; + + auto is_buchi = [](const formula &f, const std::set& outs) + { + return outs.empty() && f.is_syntactic_recurrence(); + }; + + auto properties_vector = [&](const formula &f, + const std::set &outs) + { + auto is_lgf = is_gf(f); + auto is_lfg = is_fg(f); + return std::vector{ + // f is GF(ins + outs) <-> buchi(ins) + is_lgf, + is_buchi(f, outs), + // f is FG(ins + outs) <-> co-buchi(ins) + is_lfg, + is_co_bu(f, outs)}; + }; + + + auto [left_ins, left_outs] = form2props.aps_of(f_other[0]); + auto [right_ins, right_outs] = form2props.aps_of(f_other[1]); + + auto left_properties = properties_vector(f_other[0], left_outs); + auto right_properties = properties_vector(f_other[1], right_outs); + unsigned combin = -1U; for (unsigned i = 0; i < 4; ++i) - { - if (left_properties[i] && right_properties[(i%2) ? (i-1) : (i+1)]) + if (left_properties[i] && right_properties[(i % 2) ? (i - 1) : (i + 1)]) { combin = i; break; } - } + + // If we don't match, we don't know if (combin == -1U) return ret_sol_maybe(); - // left is the recurrence (resp. persistence) - // right is GF(outs) (resp. GF(outs)) - // If f[0] is GF or FG - f_left = f[(combin+1)%2]; - f_right = f[combin%2]; - if (!(combin%2)) + formula f_left = f_other[(combin + 1) % 2]; + formula f_right = f_other[combin % 2]; + if (!(combin % 2)) { std::swap(left_ins, right_ins); std::swap(left_outs, right_outs); } + auto [_, g_outs] = form2props.aps_of(f_g); + if (are_intersecting(g_outs, right_outs)) + return ret_sol_maybe(); + + // We know that a strategy exists and we don't want to construct it. + if (!want_strategy) + return ret_sol_exists(nullptr); + auto trans = create_translator(gi); - trans.set_type(combin < 2 ? postprocessor::Buchi - : postprocessor::CoBuchi); + trans.set_pref(postprocessor::Deterministic | postprocessor::Complete); + if (combin < 2) + trans.set_type(postprocessor::Buchi); + else + trans.set_type(postprocessor::CoBuchi); stopwatch sw; if (bv) sw.start(); auto res = trans.run(f_left); + if (!is_deterministic(res)) + return ret_sol_maybe(); + if (bv) { auto delta = sw.stop(); @@ -1304,79 +1424,76 @@ namespace spot if (vs) *vs << "tanslating formula done in " << delta << " seconds\n"; } - - if (!is_deterministic(res)) - return ret_sol_maybe(); - for (auto& out : right_outs) - res->register_ap(out.ap_name()); - - // The BDD that describes the content of the G in a conjunction - bdd g_bdd = bddtrue; - - // Convert the set of outputs to a BDD - bdd output_bdd = bddtrue; - for (auto &out : output_aps_set) - output_bdd &= bdd_ithvar(res->register_ap(out)); - - if (!f_g.is_tt()) - { - g_bdd = formula_to_bdd(f_g[0], res->get_dict(), res); - // If the content of G is not input-complete, a simple strategy for - // env is to play this missing value. - if (bdd_exist(g_bdd, output_bdd) != bddtrue) - { - return ret_sol_none(); - } - } - - // For the GF(outs) (resp. GF(outs)), the content and its negation can be - // converted to a BDD. - bdd right_bdd, neg_right_bdd; - if (combin < 2) - { - right_bdd = formula_to_bdd(f_right[0][0], res->get_dict(), res); - neg_right_bdd = bdd_not(right_bdd); - } - else - { - neg_right_bdd = formula_to_bdd(f_right[0][0], res->get_dict(), res); - right_bdd = bdd_not(neg_right_bdd); - } - // Monitor is a special case. As we color accepting transitions, if the - // acceptance is true, we cannot say that a transition is accepting if - // a color is seen. - const bool is_true = res->acc().is_t(); - scc_info si(res, scc_info_options::NONE); - for (auto& e : res->edges()) - { - // Here the part describing the outputs is based on the fact that - // they must be seen infinitely often. As these edges are seen - // finitely often, we can let the minimization choose the value. - if (si.scc_of(e.src) == si.scc_of(e.dst)) - { - if (e.acc || is_true) - e.cond &= right_bdd; - else - e.cond &= neg_right_bdd; - } - // g_bdd has to be true all the time. So we cannot only do it - // between SCCs. - e.cond &= g_bdd; - if (e.cond == bddfalse) - return ret_sol_maybe(); - // The recurrence is Büchi but the strategy is a monitor. We need - // to remove the color. - e.acc = {}; - } - - set_synthesis_outputs(res, output_bdd); - res->set_acceptance(acc_cond::acc_code::t()); - res->prop_complete(trival::maybe()); + + bdd output_bdd = bddtrue; + auto [is, os] = form2props.aps_of(f); + for (auto i : is) + res->register_ap(i); + for (auto o : os) + output_bdd &= bdd_ithvar(res->register_ap(o)); + + bdd right_bdd = formula_to_bdd(f_right[0][0], dict, res); + bdd neg_right_bdd = bdd_not(right_bdd); + bdd g_bdd = formula_to_bdd(f_g, dict, res); + + if (combin > 1) + std::swap(right_bdd, neg_right_bdd); + + right_bdd = bdd_and(right_bdd, g_bdd); + neg_right_bdd = bdd_and(neg_right_bdd, g_bdd); + + scc_info si(res, scc_info_options::NONE); + + bool is_true_acc = ((combin < 2) && res->acc().is_t()) + || ((combin > 1) && res->acc().is_f()); + auto prop_vector = propagate_marks_vector(res); + auto& ev = res->edge_vector(); + for (unsigned i = 1; i < ev.size(); ++i) + { + auto &edge = ev[i]; + if (si.scc_of(edge.src) == si.scc_of(edge.dst)) + { + if (edge.acc || is_true_acc) + edge.cond &= right_bdd; + // If we have a GF and an edge is not colored but prop_vector says + // that this edge could be colored, it means that we can do what we + // want + else if (!prop_vector[i]) + edge.cond &= neg_right_bdd; + else + edge.cond &= g_bdd; + } + else + edge.cond &= g_bdd; + edge.acc = {}; + } + res->set_acceptance(acc_cond::acc_code::t()); + res->set_named_prop("synthesis-outputs", new bdd(output_bdd)); + return ret_sol_exists(res); } - else - return ret_sol_maybe(); + else if (f_other.is_tt()) + { + if (!want_strategy) + return ret_sol_exists(nullptr); + auto res = make_twa_graph(dict); + + bdd output_bdd = bddtrue; + auto [ins_f, _] = form2props.aps_of(f_g); + for (auto &out : output_aps) + output_bdd &= bdd_ithvar(res->register_ap(out)); + + for (auto &in : ins_f) + res->register_ap(in); + + res->set_named_prop("synthesis-outputs", new bdd(output_bdd)); + bdd g_bdd = formula_to_bdd(f_g, dict, res); + res->new_state(); + res->new_edge(0, 0, g_bdd); + return ret_sol_exists(res); + } + return ret_sol_maybe(); } } // spot @@ -1384,28 +1501,6 @@ namespace spot namespace // anonymous for subsformula { using namespace spot; - // Checks that 2 sets have a common element. Use it instead - // of set_intersection when we just want to check if they have a common - // element because it avoids going through the rest of the sets after an - // element is found. - static bool - are_intersecting(const std::set &v1, - const std::set &v2) - { - auto v1_pos = v1.begin(), v2_pos = v2.begin(), v1_end = v1.end(), - v2_end = v2.end(); - while (v1_pos != v1_end && v2_pos != v2_end) - { - if (*v1_pos < *v2_pos) - ++v1_pos; - else if (*v2_pos < *v1_pos) - ++v2_pos; - else - return true; - } - return false; - } - static std::pair, std::set> algo4(const std::vector &assumptions, const std::set &outs, @@ -1641,7 +1736,7 @@ namespace // anonymous for subsformula std::vector children; for (auto fi : f) children.push_back( - extract_and(fi, outs, can_extract_impl, form2props)); + extract_and(fi, outs, false, form2props)); return formula::And(children); } if (f.is(op::Not)) @@ -1841,4 +1936,98 @@ namespace spot return res; } + namespace + { + const std::string in_mark_s("__AP_IN__"); + const std::string out_mark_s("__AP_OUT__"); + } + + game_relabeling_map + partitioned_game_relabel_here(twa_graph_ptr& arena, + bool relabel_env, + bool relabel_play, + bool split_env, + bool split_play, + unsigned max_letter, + unsigned max_letter_mult) + { + if (!arena) + throw std::runtime_error("arena is null."); + auto& arena_r = *arena; + + const region_t& sp = get_state_players(arena); + bdd all_ap = arena->ap_vars(); + + if (std::find_if(arena->ap().cbegin(), arena->ap().cend(), + [](const auto& ap) + { + return ap.ap_name() == out_mark_s + || ap.ap_name() == in_mark_s; + }) != arena->ap().cend()) + throw std::runtime_error("partitioned_game_relabel_here(): " + "You can not use " + + in_mark_s + " or " + out_mark_s + + " as propositions if relabeling."); + + bdd out_mark = bdd_ithvar(arena_r.register_ap(out_mark_s)); + bdd in_mark = bdd_ithvar(arena_r.register_ap(in_mark_s)); + + bdd outs = get_synthesis_outputs(arena) & out_mark; + bdd ins = bdd_exist(all_ap, outs) & in_mark; + + for (auto& e : arena_r.edges()) + e.cond = e.cond & (sp[e.src] ? out_mark : in_mark); + + game_relabeling_map res; + + if (relabel_env) + res.env_map + = partitioned_relabel_here(arena, split_env, max_letter, + max_letter_mult, ins, "__nv_in"); + if (relabel_play) + res.player_map + = partitioned_relabel_here(arena, split_play, max_letter, + max_letter_mult, outs, "__nv_out"); + return res; + } + + void + relabel_game_here(twa_graph_ptr& arena, + game_relabeling_map& rel_maps) + { + if (!arena) + throw std::runtime_error("arena is null."); + auto& arena_r = *arena; + + // Check that it was partitioned_game_relabel_here + if (!((std::find_if(arena->ap().cbegin(), arena->ap().cend(), + [](const auto& ap) + { return ap.ap_name() == out_mark_s; }) + != arena->ap().cend()) + && (std::find_if(arena->ap().cbegin(), arena->ap().cend(), + [](const auto& ap) + { return ap.ap_name() == in_mark_s; })) + != arena->ap().cend())) + throw std::runtime_error("relabel_game_here(): " + + in_mark_s + " or " + out_mark_s + + " not registered with arena. " + "Not relabeled?"); + + if (!rel_maps.env_map.empty()) + relabel_here(arena, &rel_maps.env_map); + if (!rel_maps.player_map.empty()) + relabel_here(arena, &rel_maps.player_map); + + bdd dummy_ap = bdd_ithvar(arena_r.register_ap(in_mark_s)) + & bdd_ithvar(arena_r.register_ap(out_mark_s)); + + for (auto& e : arena_r.edges()) + e.cond = bdd_exist(e.cond, dummy_ap); + + arena_r.unregister_ap(arena_r.register_ap(in_mark_s)); + arena_r.unregister_ap(arena_r.register_ap(out_mark_s)); + + return; + } + } // spot diff --git a/spot/twaalgos/synthesis.hh b/spot/twaalgos/synthesis.hh index 95590504c..2d9c0600a 100644 --- a/spot/twaalgos/synthesis.hh +++ b/spot/twaalgos/synthesis.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2020-2021 Laboratoire de Recherche et +// Copyright (C) 2020-2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -21,6 +21,7 @@ #include #include +#include #include namespace spot @@ -36,8 +37,8 @@ namespace spot /// p -- cond --> q cond in 2^2^AP /// into a set of transitions of the form /// p -- {a} --> (p,a) -- o --> q - /// for each a in cond \cap 2^2^I - /// and where o = (cond & a) \cap 2^2^(O) + /// for each a in cond ∪ 2^2^I + /// and where o = (cond & a) ∪ 2^2^O. /// /// By definition, the states p are deterministic, /// only the states of the form @@ -86,6 +87,7 @@ namespace spot DPA_SPLIT, LAR, LAR_OLD, + ACD, }; struct bench_var @@ -96,11 +98,14 @@ namespace spot double paritize_time = 0.0; double solve_time = 0.0; double strat2aut_time = 0.0; + double simplify_strat_time = 0.0; double aig_time = 0.0; unsigned nb_states_arena = 0; unsigned nb_states_arena_env = 0; unsigned nb_strat_states = 0; unsigned nb_strat_edges = 0; + unsigned nb_simpl_strat_states = 0; + unsigned nb_simpl_strat_edges = 0; unsigned nb_latches = 0; unsigned nb_gates = 0; bool realizable = false; @@ -237,10 +242,12 @@ namespace spot /// \param f The formula to synthesize a strategy for /// \param output_aps A vector with the name of all output properties. /// All APs not named in this vector are treated as inputs + /// \param want_strategy Set to false if we don't want to construct the + /// strategy but only test realizability. SPOT_API mealy_like try_create_direct_strategy(formula f, const std::vector& output_aps, - synthesis_info& gi); + synthesis_info& gi, bool want_strategy = false); /// \ingroup synthesis /// \brief Solve a game, and update synthesis_info @@ -250,4 +257,39 @@ namespace spot SPOT_API bool solve_game(twa_graph_ptr arena, synthesis_info& gi); + struct SPOT_API game_relabeling_map + { + relabeling_map env_map; + relabeling_map player_map; + }; + + /// \ingroup synthesis + /// \brief Tries to relabel a SPLIT game \a arena using fresh propositions. + /// Can be applied to env or player depending on \a relabel_env + /// and \a relabel_play. The arguments \a split_env and \a split_play + /// determine whether or not env and player edges are to + /// be split into several transitions labelled by letters not conditions. + /// + /// \return pair of relabeling_map, first is for env, second is for player. + /// The maps are empty if no relabeling was performed + /// \note Can also be applied to split mealy machine. + /// \note partitioned_relabel_here can not be used directly if there are + /// T (true conditions) + SPOT_API game_relabeling_map + partitioned_game_relabel_here(twa_graph_ptr& arena, + bool relabel_env, + bool relabel_play, + bool split_env = false, + bool split_play = false, + unsigned max_letter = -1u, + unsigned max_letter_mult = -1u); + + /// \ingroup synthesis + /// \brief Undoes a relabeling done by partitioned_game_relabel_here. + /// A dedicated function is necessary in order to remove the + /// variables tagging env and player conditions + SPOT_API void + relabel_game_here(twa_graph_ptr& arena, + game_relabeling_map& rel_maps); + } diff --git a/spot/twaalgos/toparity.cc b/spot/twaalgos/toparity.cc index 0b46e6224..a82c7d57a 100644 --- a/spot/twaalgos/toparity.cc +++ b/spot/twaalgos/toparity.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2018-2020 Laboratoire de Recherche et Développement +// Copyright (C) 2018-2020, 2022 Laboratoire de Recherche et Développement // de l'Epita. // // This file is part of Spot, a model checking library. @@ -18,31 +18,2616 @@ // along with this program. If not, see . #include "config.h" -#include -#include -#include -#include -#include +#include #include #include -#include -#include -#include +#include #include #include -#include +#include #include -#include +#include + +#include +#include +#include + +namespace std +{ + template + inline void hash_combine(size_t &seed, T const &v) + { + seed ^= std::hash()(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); + } + + template + struct hash> + { + typedef vector argument_type; + typedef std::size_t result_type; + result_type operator()(argument_type const &in) const + { + size_t size = in.size(); + size_t seed = 0; + for (size_t i = 0; i < size; i++) + // Combine the hash of the current vector with the hashes of the + // previous ones + hash_combine(seed, in[i]); + return seed; + } + }; +} -#include namespace spot { + inline void + assign_color(acc_cond::mark_t &mark, unsigned col) + { + if (col < SPOT_MAX_ACCSETS) + mark.set(col); + else + acc_cond::mark_t{SPOT_MAX_ACCSETS}; + } + + // Describes if we want to test if it is a Büchi, co-Büchi,… type automaton. + enum cond_kind + { + BUCHI, + CO_BUCHI, + // A parity condition with a Inf as outermost term + INF_PARITY, + // A parity condition with a Fin as outermost term + FIN_PARITY + }; + + // This enum describes the status of an edge + enum edge_status + { + NOT_MARKED, + MARKED, + IMPOSSIBLE, + LINK_SCC + }; + + static bool + cond_type_main_aux(const twa_graph_ptr &aut, const cond_kind kind, + const bool need_equivalent, + std::vector &status, + std::vector &res_colors, + acc_cond &new_cond, bool &was_able_to_color, + unsigned max_col) + { + auto& ev = aut->edge_vector(); + const auto ev_size = ev.size(); + const auto aut_init = aut->get_init_state_number(); + was_able_to_color = false; + status = std::vector(ev_size, NOT_MARKED); + res_colors = std::vector(ev_size); + // Used by accepting_transitions_scc. + auto keep = std::unique_ptr(make_bitvect(ev_size)); + keep->set_all(); + + // Number of edges colored by the procedure, used to test equivalence for + // parity + unsigned nb_colored = 0; + + // We need to say that a transition between 2 SCC doesn't have to get a + // color. + scc_info si(aut, aut_init, nullptr, nullptr, scc_info_options::NONE); + status[0] = LINK_SCC; + if (si.scc_count() > 1) + { + for (unsigned edge_number = 1; edge_number < ev_size; ++edge_number) + { + auto& e = ev[edge_number]; + if (si.scc_of(e.src) != si.scc_of(e.dst)) + { + status[edge_number] = LINK_SCC; + ++nb_colored; + } + } + } + + // If we need to convert to (co-)Büchi, we have to search one accepting + // set. With parity there is no limit. + bool want_parity = kind == cond_kind::FIN_PARITY || + kind == cond_kind::INF_PARITY; + unsigned max_iter = want_parity ? -1U : 1; + + unsigned color = max_col; + // Do we want always accepting transitions? + // Don't consider CO_BUCHI as it is done by Büchi + bool search_inf = kind != cond_kind::FIN_PARITY; + + using filter_data_t = std::pair &>; + + scc_info::edge_filter filter = + [](const twa_graph::edge_storage_t &t, unsigned, void *data) + -> scc_info::edge_filter_choice + { + auto &d = *static_cast(data); + // We only keep transitions that can be marked + if (d.second[d.first->edge_number(t)] == NOT_MARKED) + return scc_info::edge_filter_choice::keep; + else + return scc_info::edge_filter_choice::cut; + }; + std::vector not_decidable_transitions(ev_size, false); + auto aut_acc = aut->get_acceptance(); + auto aut_acc_comp = aut_acc.complement(); + for (unsigned iter = 0; iter < max_iter; ++iter) + { + // Share the code with Büchi-type + if (kind == CO_BUCHI) + std::swap(aut_acc, aut_acc_comp); + std::fill(not_decidable_transitions.begin(), + not_decidable_transitions.end(), false); + auto cond = acc_cond(search_inf ? aut_acc_comp : aut_acc); + auto filter_data = filter_data_t{aut, status}; + scc_info si(aut, aut_init, filter, &filter_data, + scc_info_options::TRACK_STATES); + if (search_inf) + si.determine_unknown_acceptance(); + bool worked = false; + unsigned ssc_size = si.scc_count(); + for (unsigned scc = 0; scc < ssc_size; ++scc) + { + // scc_info can detect that we will not be able to find an + // accepting cycle. + if ((search_inf && si.is_accepting_scc(scc)) || !search_inf) + { + accepting_transitions_scc(si, scc, cond, {}, + not_decidable_transitions, *keep); + for (auto &e : si.inner_edges_of(scc)) + { + auto edge_number = aut->edge_number(e); + if (!not_decidable_transitions[edge_number]) + { + assert(!res_colors[edge_number]); + if (color != -1U) + assign_color(res_colors[edge_number], color); + was_able_to_color = true; + status[edge_number] = MARKED; + ++nb_colored; + keep->clear(edge_number); + worked = true; + } + } + } + } + + if (color-- == -1U) + break; + search_inf = !search_inf; + // If we were not able to add color, we have to add status 2 to + // remaining transitions. + if (!worked && !need_equivalent) + { + std::replace(status.begin(), status.end(), NOT_MARKED, IMPOSSIBLE); + break; + } + } + + acc_cond::acc_code new_code; + switch (kind) + { + case cond_kind::BUCHI: + new_code = acc_cond::acc_code::buchi(); + break; + case cond_kind::CO_BUCHI: + new_code = acc_cond::acc_code::cobuchi(); + break; + case cond_kind::INF_PARITY: + case cond_kind::FIN_PARITY: + new_code = acc_cond::acc_code::parity_max( + kind == cond_kind::INF_PARITY, SPOT_MAX_ACCSETS); + break; + } + + new_cond = acc_cond(new_code); + + // We check parity + if (need_equivalent) + { + // For parity, it's equivalent if every transition has a color + // (status 1) or links 2 SCCs. + if (kind == cond_kind::INF_PARITY || kind == cond_kind::FIN_PARITY) + return nb_colored == ev_size - 1; + else + { + // For Büchi, we remove the transitions that have {0} in the + // result from aut and if there is an accepting cycle, res is not + // equivalent to aut. + // For co-Büchi, it's the same but we don't want to find a + // rejecting cycle. + using filter_data_t = std::pair; + + scc_info::edge_filter filter = + [](const twa_graph::edge_storage_t &t, unsigned, void *data) + -> scc_info::edge_filter_choice + { + auto &d = *static_cast(data); + if (d.second.get(d.first->edge_number(t))) + return scc_info::edge_filter_choice::keep; + else + return scc_info::edge_filter_choice::cut; + }; + + if (kind == CO_BUCHI) + aut->set_acceptance(acc_cond(aut_acc)); + + filter_data_t filter_data = {aut, *keep}; + scc_info si(aut, aut_init, filter, &filter_data); + si.determine_unknown_acceptance(); + const auto num_scc = si.scc_count(); + for (unsigned scc = 0; scc < num_scc; ++scc) + if (si.is_accepting_scc(scc)) + { + if (kind == CO_BUCHI) + aut->set_acceptance(acc_cond(aut_acc_comp)); + return false; + } + if (kind == CO_BUCHI) + aut->set_acceptance(acc_cond(aut_acc_comp)); + } + } + + return true; + } + + static twa_graph_ptr + cond_type_main(const twa_graph_ptr &aut, const cond_kind kind, + bool &was_able_to_color, unsigned max_color) + { + std::vector res_colors; + std::vector status; + acc_cond new_cond; + if (cond_type_main_aux(aut, kind, true, status, res_colors, new_cond, + was_able_to_color, max_color)) + { + auto res = make_twa_graph(aut, twa::prop_set::all()); + auto &res_vector = res->edge_vector(); + unsigned rv_size = res_vector.size(); + for (unsigned i = 1; i < rv_size; ++i) + res_vector[i].acc = res_colors[i]; + res->set_acceptance(new_cond); + return res; + } + return nullptr; + } + + twa_graph_ptr + parity_type_to_parity(const twa_graph_ptr &aut) + { + bool odd_cond, max_cond; + bool parit = aut->acc().is_parity(max_cond, odd_cond); + // If it is parity, we just copy + if (parit) + { + if (!max_cond) + return change_parity(aut, parity_kind_max, parity_style_any); + auto res = make_twa_graph(aut, twa::prop_set::all()); + res->copy_acceptance_of(aut); + return res; + } + bool was_able_to_color; + // If the automaton is parity-type with a condition that has Inf as + // outermost term + auto res = cond_type_main(aut, cond_kind::INF_PARITY, + was_able_to_color, SPOT_MAX_ACCSETS - 1); + + // If it was impossible to find an accepting edge, it is perhaps possible + // to find a rejecting transition + if (res == nullptr && !was_able_to_color) + res = cond_type_main(aut, cond_kind::FIN_PARITY, + was_able_to_color, SPOT_MAX_ACCSETS - 1); + if (res) + { + res->prop_state_acc(false); + reduce_parity_here(res); + } + return res; + } + + twa_graph_ptr + buchi_type_to_buchi(const twa_graph_ptr &aut) + { + bool useless; + return cond_type_main(aut, cond_kind::BUCHI, useless, 0); + } + + twa_graph_ptr + co_buchi_type_to_co_buchi(const twa_graph_ptr &aut) + { + bool useless; + return cond_type_main(aut, cond_kind::CO_BUCHI, useless, 0); + } + +// New version for paritizing + +// data type used in a memory for CAR and IAR. +// TAR is a particular case +#if MAX_ACCSETS < UCHAR_MAX + using memory_type = unsigned char; + #define MAX_MEM_ELEM UCHAR_MAX +#elif MAX_ACCSETS < USHRT_MAX + using memory_type = unsigned short; + #define MAX_MEM_ELEM USHRT_MAX +#else + using memory_type = unsigned; + #define MAX_MEM_ELEM UINT_MAX +#endif + + template + using memory = std::vector; + + // Maps a state of the automaton to a parity_state + class state_2_lar + { + public: + // If to_parity wants to find the newest or the oldest or both, we + // adapt the algorithms + enum memory_order + { + ONLY_NEWEST, + ONLY_OLDEST, + BOTH + }; + + class node + { + public: + // Color that lead to this node + memory_type color_; + // For a state in states_, any child can be taken. While a unique state + // could be used when we search an existing state, here we have + // to consider opt_.search_ex = False, opt_.use_last_post_process = True. + // This configuration can lead to 2 states in the same node. For example + // if we add [0 1 | 2 3] and [0 1 | 3 2] where '|' says which part of the + // memory can be reordered (right). + std::vector states_; + std::vector children_; + // A timer used to detect which child is the oldest + unsigned timer_; + + node() : node(MAX_MEM_ELEM, -1U) + { + } + + node(memory_type val, unsigned timer) : color_(val), timer_(timer) + { + } + + ~node() + { + for (auto c : children_) + delete c; + } + }; + + std::vector nodes_; + memory_order order_; + unsigned timer_; + + state_2_lar() : timer_(0) + { + } + + void + init(unsigned nb_states, memory_order order) + { + order_ = order; + nodes_.reserve(nb_states); + for (unsigned i = 0; i < nb_states; ++i) + nodes_.push_back(new node()); + } + + ~state_2_lar() + { + for (auto x : nodes_) + delete x; + } + + void + add_new_path(unsigned state, const memory &vals, + unsigned res_state, unsigned nb_seen) + { + ++timer_; + node *current = nodes_[state]; + // Position in vals + int pos = vals.size() - 1; + while (true) + { + if (pos == (int)(nb_seen - 1)) + current->states_.push_back(res_state); + if (pos == -1) + break; + const unsigned current_val = vals[pos]; + auto child = std::find_if(current->children_.begin(), + current->children_.end(), + [&](const auto &child) constexpr + { return child->color_ == current_val; }); + // If we don't have a child with the corresponding color… + if (child == current->children_.end()) + { + auto nn = new node(current_val, timer_); + current->children_.push_back(nn); + current = nn; + } + else + { + // If get_compatible_state wants the most recent + // (opt_.use_last or opt_.use_last_post_process), we help this + // function by moving this node to the last position. + // Otherwise the oldest leaf will be reachable from the first child. + // If we have use_last = false and use_last_post_process = true, + // we need to access to the oldest and newest child. As the tree is + // smallest when we want to access to the oldest value, we continue + // to move the value to the last position and compute the oldest + // child in get_compatible_state. + if (order_ != memory_order::ONLY_OLDEST) + { + std::iter_swap(child, current->children_.end() - 1); + current = current->children_.back(); + } + else + current = *child; + } + --pos; + } + } + + unsigned + get_compatible_state(unsigned state, const memory &m, + unsigned seen_nb, + bool use_last) const + { + int pos = m.size() - 1; + unsigned res = -1U; + node *current = nodes_[state]; + while (true) + { + const auto ¤t_states = current->states_; + if (!current_states.empty()) + res = use_last ? current_states.back() : current_states.front(); + + const auto ¤t_children = current->children_; + if (current_children.empty()) + { + assert(current->color_ == MAX_MEM_ELEM || pos == -1); + return res; + } + // If we are in the part of the memory where the order does not matter, + // we just take the oldest/newest state. + if (pos < (int)seen_nb) + { + if (order_ == BOTH) + { + if (!use_last) + current = *std::min_element( + current_children.begin(), current_children.end(), + [](const auto &x, const auto &y) constexpr + { return x->timer_ < y->timer_; }); + else + current = current_children.back(); + } + else + { + // add_new_path constructed the tree such that the oldest/newest + // leaf is reachable from the first child. + current = use_last ? current_children.back() + : current_children.front(); + } + } + else + { + auto current_val = m[pos]; + auto ch = std::find_if( + current_children.begin(), current_children.end(), + [&](const auto &x) constexpr + { return x->color_ == current_val; }); + if (ch != current_children.end()) + current = *ch; + else + return -1U; + } + --pos; + } + } + }; + + class to_parity_generator + { + private: + class relation + { + public: + // Size of the matrix + unsigned size_; + // A line/column is indexed by a partial memory + const std::vector> labels_; + // Matrix such that vals_[x][y] = ⊤ ⇔ vals_[x] > vals_[y] + std::vector vals_; + + inline bool + at(unsigned i, unsigned j) const + { + return vals_.at(i * size_ + j); + } + + inline void + set(unsigned i, unsigned j, bool val) + { + vals_[i * size_ + j] = val; + } + + // Test if m1 ⊆ m2 + bool is_included(memory m1, memory m2) + { + if (m1.size() > m2.size()) + return false; + assert(std::is_sorted(m1.begin(), m1.end())); + assert(std::is_sorted(m2.begin(), m2.end())); + memory diff; + std::set_union(m1.begin(), m1.end(), m2.begin(), m2.end(), + std::inserter(diff, diff.begin())); + return diff.size() == m2.size(); + } + + // Supposes that there is no duplicates. + relation(std::vector> &labels) + : size_(labels.size()), labels_(labels) + { + unsigned long long size_vals; + if (__builtin_umulll_overflow(size_, size_, &size_vals)) + throw std::bad_alloc(); + vals_ = std::vector(size_vals); + for (unsigned i = 0; i < size_; ++i) + for (unsigned j = 0; j < size_; ++j) + // We cannot have vals_[i] > vals_[j] and vals_[j] > vals_[i] + if (!at(j, i)) + set(i, j, (i != j && is_included(labels_[j], labels_[i]))); + // Remove x > z if ∃y s.t. x > y > z + simplify_relation(); + } + + // Apply a transitive reduction + void + simplify_relation() + { + for (unsigned j = 0; j < size_; ++j) + for (unsigned i = 0; i < size_; ++i) + if (at(i, j)) + for (unsigned k = 0; k < size_; ++k) + if (at(j, k)) + set(i, k, false); + } + + template + void + add_to_res_(const memory ¤t, + const memory &other, + memory &result) + { + assert(std::is_sorted(current.begin(), current.end())); + assert(std::is_sorted(other.begin(), other.end())); + std::set_difference(current.begin(), current.end(), + other.begin(), other.end(), + std::inserter(result, result.end())); + } + + // Gives a compatible ordered partial memory for the partial memory + // partial_mem. + memory + find_order(const memory &partial_mem) + { + // Now if we want to find an order, we start from the line + // that contains partial_mem in the matrix, we find a more restrictive + // order and add the value that are used in partial_mem but not in this + // "child" value. + // The call to simplify_relation implies that we are sure we have + // used the longest possible path. + memory result; + auto elem = std::find(labels_.begin(), labels_.end(), partial_mem); + assert(elem != labels_.end()); + // Line that contains partial_mem + unsigned i = std::distance(labels_.begin(), elem); + while (true) + { + // The interval corresponding to the line i + auto vals_i_begin = vals_.begin() + (i * size_); + auto vals_i_end = vals_i_begin + size_; + // End of line i + auto child = std::find(vals_i_begin, vals_i_end, true); + // If there is a more restrictive memory, we use this "child" + if (child != vals_i_end) + { + unsigned child_pos = std::distance(vals_i_begin, child); + add_to_res_(labels_[i], labels_[child_pos], result); + i = child_pos; + } + // If there is no more restrictive memory, we just add the remaining + // memory. + else + { + add_to_res_(labels_[i], {}, result); + break; + } + } + // The order want that a value that is in the lowest value is a + // the head. + std::reverse(result.begin(), result.end()); + return result; + } + }; + + class scc_info_to_parity + { + private: + scc_info si_; + + public: + scc_info_to_parity(const const_twa_graph_ptr aut, + const acc_cond::mark_t removed = {}) + : si_(scc_and_mark_filter(aut, removed)) + { + } + + scc_info_to_parity(const scc_info lower_si, + const std::shared_ptr keep) + : si_(scc_and_mark_filter(lower_si, 0, acc_cond::mark_t{}, *keep), + scc_info_options::NONE) + { + } + + std::vector + split_aut(acc_cond::mark_t mark = {}) + { + auto aut = si_.get_aut(); + const auto num_scc = si_.scc_count(); + const unsigned aut_num_states = aut->num_states(); + std::vector res(num_scc); + std::vector aut_to_res; + aut_to_res.reserve(aut_num_states); + for (auto &g : res) + { + g = make_twa_graph(aut->get_dict()); + g->copy_ap_of(aut); + g->copy_acceptance_of(aut); + g->prop_copy(aut, {true, true, false, false, false, true}); + auto orig = new std::vector(); + g->set_named_prop("original-states", orig); + } + const auto tp_orig_aut = + aut->get_named_prop>("original-states"); + for (unsigned i = 0; i < aut_num_states; ++i) + { + unsigned scc_i = si_.scc_of(i); + auto &g = res[scc_i]; + unsigned ns = g->new_state(); + unsigned ori = tp_orig_aut ? (*tp_orig_aut)[i] : i; + auto pr = g->get_named_prop>("original-states"); + pr->push_back(ori); + aut_to_res.push_back(ns); + } + + for (auto &e : aut->edges()) + { + unsigned src_scc = si_.scc_of(e.src); + unsigned dst_scc = si_.scc_of(e.dst); + if (src_scc == dst_scc && !(e.acc & mark)) + res[src_scc]->new_edge(aut_to_res[e.src], aut_to_res[e.dst], + e.cond, e.acc); + } + return res; + } + + std::vector + split_aut(const std::shared_ptr &keep) + { + auto aut = si_.get_aut(); + const auto num_scc = si_.scc_count(); + const unsigned aut_num_states = aut->num_states(); + std::vector res(num_scc); + std::vector aut_to_res; + aut_to_res.reserve(aut_num_states); + for (auto &g : res) + { + g = make_twa_graph(aut->get_dict()); + g->copy_ap_of(aut); + g->copy_acceptance_of(aut); + g->prop_copy(aut, {true, true, false, false, false, true}); + auto orig = new std::vector(); + g->set_named_prop("original-states", orig); + } + const auto tp_orig_aut = + aut->get_named_prop>("original-states"); + for (unsigned i = 0; i < aut_num_states; ++i) + { + unsigned scc_i = si_.scc_of(i); + auto &g = res[scc_i]; + unsigned ns = g->new_state(); + unsigned ori = tp_orig_aut ? (*tp_orig_aut)[i] : i; + auto pr = g->get_named_prop>("original-states"); + pr->push_back(ori); + aut_to_res.push_back(ns); + } + + const auto &ev = si_.get_aut()->edge_vector(); + auto ev_size = ev.size(); + for (unsigned i = 0; i < ev_size; ++i) + if (keep->get(i)) + { + auto &e = ev[i]; + unsigned scc_src = si_.scc_of(e.src); + if (scc_src == si_.scc_of(e.dst)) + res[scc_src]->new_edge(aut_to_res[e.src], aut_to_res[e.dst], + e.cond, e.acc); + } + return res; + } + + unsigned scc_count() + { + return si_.scc_count(); + } + + unsigned scc_of(unsigned state) + { + return si_.scc_of(state); + } + }; + + // Original automaton + const const_twa_graph_ptr aut_; + // Resulting parity automaton + twa_graph_ptr res_; + // options + to_parity_options opt_; + // nullptr if opt_.pretty_print is false + std::vector *names_ = nullptr; + // original_states. Is propagated if the original automaton has + // this named property + std::vector *orig_ = nullptr; + scc_info_to_parity si_; + bool need_purge_ = false; + // Tells if we are constructing a parity max odd + bool is_odd_ = false; + // min_color used in the automaton + 1 (result of max_set). + std::optional min_color_used_; + std::optional max_color_scc_; + std::optional max_color_used_; + std::vector state_to_res_; + std::vector res_to_aut_; + // Map a state of aut_ to every copy of this state. Used by a recursive call + // to to_parity by parity_prefix for example + std::vector> *state_to_nums_ = nullptr; + unsigned algo_used_ = 0; + + enum algorithm + { + CAR = 1, + IAR_RABIN = 1 << 1, + IAR_STREETT = 1 << 2, + TAR = 1 << 3, + RABIN_TO_BUCHI = 1 << 4, + STREETT_TO_COBUCHI = 1 << 5, + PARITY_TYPE = 1 << 6, + BUCHI_TYPE = 1 << 7, + CO_BUCHI_TYPE = 1 << 8, + PARITY_EQUIV = 1 << 9, + PARITY_PREFIX = 1 << 10, + PARITY_PREFIX_GENERAL = 1 << 11, + GENERIC_EMPTINESS = 1 << 12, + PARTIAL_DEGEN = 1 << 13, + ACC_CLEAN = 1 << 14, + NONE = 1 << 15 + }; + + static std::string + algorithm_to_str(const algorithm &algo) + { + switch (algo) + { + case CAR: + return "CAR"; + case IAR_RABIN: + return "IAR (Rabin)"; + case IAR_STREETT: + return "IAR (Streett)"; + case TAR: + return "TAR"; + case NONE: + return "None"; + case BUCHI_TYPE: + return "Büchi-type"; + case CO_BUCHI_TYPE: + return "co-Büchi-type"; + case PARITY_TYPE: + return "Parity-type"; + case PARITY_EQUIV: + return "Parity equivalent"; + case GENERIC_EMPTINESS: + return "Generic emptiness"; + case STREETT_TO_COBUCHI: + return "Streett to co-Büchi"; + case RABIN_TO_BUCHI: + return "Rabin to Büchi"; + case PARITY_PREFIX: + return "Parity-prefix"; + case PARITY_PREFIX_GENERAL: + return "Parity-prefix general"; + case PARTIAL_DEGEN: + return "Partial degeneralization"; + case ACC_CLEAN: + return "acceptance cleanup"; + } + SPOT_UNREACHABLE(); + } + + template + struct to_parity_state + { + unsigned state; + unsigned state_scc; + memory mem; + + to_parity_state(unsigned st, unsigned st_scc, memory m) : + state(st), state_scc(st_scc), mem(m) + {} + + to_parity_state(const to_parity_state &) = default; + to_parity_state(to_parity_state &&) noexcept = default; + + ~to_parity_state() noexcept = default; + + bool + operator<(const to_parity_state &other) const + { + if (state < other.state) + return true; + if (state > other.state) + return false; + if (state_scc < other.state_scc) + return true; + if (state_scc > other.state_scc) + return false; + if (mem < other.mem) + return true; + return false; + } + + std::string + to_str(const algorithm &algo) const + { + std::stringstream s; + s << state; + // An empty memory does not mean that we don't use LAR. For example + // if the condition is true. We don't display a useless memory. + if (!mem.empty()) + { + s << ",["; + const char delim = ','; + s << ((unsigned)mem[0]); + auto mem_size = mem.size(); + for (unsigned i = 1; i < mem_size; ++i) + s << delim << ((unsigned)mem[i]); + s << ']'; + } + s << ',' << algorithm_to_str(algo); + return s.str(); + } + + bool operator==(const to_parity_state &other) const + { + return state == other.state && state_scc == other.state_scc + && mem == other.mem; + } + }; + + template + struct to_parity_hash + { + size_t operator()(to_parity_state const &tp) const + { + size_t result = std::hash>{}(tp.mem); + std::hash_combine(result, tp.state); + std::hash_combine(result, tp.state_scc); + return result; + } + }; + + template + unsigned + add_res_state(const algorithm &algo, const to_parity_state &ps) + { + if (names_) + names_->emplace_back(ps.to_str(algo)); + orig_->push_back(ps.state); + auto res = res_->new_state(); + if (opt_.datas) + { + algo_used_ |= algo; + ++opt_.datas->nb_states_created; + } + assert(ps.state < aut_->num_states()); + // state_to_res_ could be updated even if there is already a value. + // However it would lead to a result close to BSCC. + // So it is easier to show the influence of BSCC when the value is not + // changed when there is already a value. + if (state_to_res_[ps.state] == -1U) + state_to_res_[ps.state] = res; + if (state_to_nums_) + { + assert(ps.state < state_to_nums_->size()); + (*state_to_nums_)[ps.state].push_back(res); + } + res_to_aut_.push_back(ps.state); + return res; + } + + unsigned + add_res_edge(unsigned res_src, unsigned res_dst, + const acc_cond::mark_t &mark, const bdd &cond, + const bool can_merge_edge = true, + robin_hood::unordered_map* + edge_cache = nullptr) + { + // In a parity automaton we just need the maximal value + auto simax = mark.max_set(); + + const bool need_cache = edge_cache != nullptr && can_merge_edge; + long long key = 0; + if (need_cache) + { + constexpr auto unsignedsize = sizeof(unsigned) * 8; + key = (long long)simax << unsignedsize | res_dst; + auto cache_value = edge_cache->find(key); + if (cache_value != edge_cache->end()) + { + auto edge_index = cache_value->second; + auto &existing_edge = res_->edge_vector()[edge_index]; + existing_edge.cond |= cond; + return edge_index; + } + } + + auto simplified = mark ? acc_cond::mark_t{simax - 1} + : acc_cond::mark_t{}; + assert(res_src != -1U); + assert(res_dst != -1U); + + // No edge already done in the current scc. + if (!max_color_scc_.has_value()) + max_color_scc_.emplace(simax); + else + max_color_scc_.emplace(std::max(*max_color_scc_, simax)); + + // If it is the first edge of the result + if (!min_color_used_.has_value()) + { + assert(!max_color_used_.has_value()); + max_color_used_.emplace(simax); + min_color_used_.emplace(simax); + } + else + { + min_color_used_.emplace(std::min(*min_color_used_, simax)); + max_color_used_.emplace(std::max(*max_color_used_, simax)); + } + + auto new_edge_num = res_->new_edge(res_src, res_dst, cond, simplified); + if (need_cache) + edge_cache->emplace(std::make_pair(key, new_edge_num)); + + if (opt_.datas) + ++opt_.datas->nb_edges_created; + return new_edge_num; + } + + // copy + using coloring_function = + std::function; + + void + apply_copy_general(const const_twa_graph_ptr &sub_automaton, + const coloring_function &col_fun, + const algorithm &algo) + { + if (opt_.datas) + algo_used_ |= algo; + auto init_states = + sub_automaton->get_named_prop>("original-states"); + assert(init_states); + std::vector state_2_res_local; + auto sub_aut_ns = sub_automaton->num_states(); + state_2_res_local.reserve(sub_aut_ns); + for (unsigned state = 0; state < sub_aut_ns; ++state) + { + to_parity_state ps = {(*init_states)[state], state, {}}; + state_2_res_local.push_back(add_res_state(algo, ps)); + } + for (auto &e : sub_automaton->edges()) + { + auto new_mark = col_fun(e); + add_res_edge(state_2_res_local[e.src], state_2_res_local[e.dst], + new_mark, e.cond); + } + } + + // Case where one color is replaced by another. + // new_colors is a vector such that new_colors[i + 1] = j means that the + // color i is replaced by j. new_colors[0] is the value for an uncolored + // edge. + void + apply_copy(const const_twa_graph_ptr &sub_aut, + const std::vector &new_colors, + const algorithm &algo) + { + auto col_fun = [&](const twa_graph::edge_storage_t &edge) + { + acc_cond::mark_t res{}; + for (auto c : edge.acc.sets()) + { + auto new_col = new_colors[c + 1]; + if (new_col != -1U) + assign_color(res, new_col); + } + if (!edge.acc && new_colors[0] != -1U) + assign_color(res, new_colors[0]); + return res; + }; + apply_copy_general(sub_aut, col_fun, algo); + } + + // Case where new_color is a function such that edge_vector[i] should + // be colored by new_color[i]. + void + apply_copy_edge_index(const const_twa_graph_ptr &sub_aut, + const std::vector &new_color, + const algorithm &algo) + { + auto col_fun = [&](const twa_graph::edge_storage_t &edge) + { + auto res = new_color[sub_aut->edge_number(edge)]; + if (res == -1U) + return acc_cond::mark_t{}; + return acc_cond::mark_t{res}; + }; + apply_copy_general(sub_aut, col_fun, algo); + } + + // Create a memory for the first state created by apply_lar. + // If the algorithm is IAR, it also fills pairs_indices that + // contains the indices of the pairs that can be moved to the head of + // the memory. + template + memory + initial_memory_of(const const_twa_graph_ptr &sub_aut, + const std::vector &pairs, + std::vector, memory>> &relations) + { + unsigned init_state = sub_aut->get_init_state_number(); + if constexpr (algo == algorithm::CAR) + { + unsigned max_set = sub_aut->get_acceptance().used_sets().max_set(); + memory values(max_set); + std::iota(values.begin(), values.end(), 0); + if (opt_.force_order) + apply_move_heuristic(init_state, values, max_set, relations); + return values; + } + else if constexpr (algo == algorithm::TAR) + { + if (UINT_MAX < sub_aut->num_edges()) + { + throw std::runtime_error("Too many edges for TAR"); + } + const auto &ev = sub_aut->edge_vector(); + const auto ev_size = ev.size(); + memory values(ev_size - 1); + // 0 is not an edge number + std::iota(values.begin(), values.end(), 1); + if (opt_.force_order && sub_aut->num_states() > 1) + { + unsigned free_pos = 0; + // If a transition goes to state, it is at the head of the memory. + for (unsigned i = 1; i < ev_size; ++i) + if (ev[i].dst == init_state) + { + std::swap(values[i - 1], values[free_pos]); + ++free_pos; + } + } + return values; + } + else + { + static_assert(algo == IAR_RABIN || algo == IAR_STREETT); + memory values(pairs.size()); + std::iota(values.begin(), values.end(), 0); + if (opt_.force_order) + apply_move_heuristic(init_state, values, values.size(), relations); + return values; + } + } + + // LAR + algorithm + choose_lar(const acc_cond &scc_condition, + std::vector &pairs, + const unsigned num_edges) + { + std::vector pairs1, pairs2; + bool is_rabin_like = scc_condition.is_rabin_like(pairs1); + bool is_streett_like = scc_condition.is_streett_like(pairs2); + // If we cannot apply IAR and TAR and CAR are not used + if ((!(is_rabin_like || is_streett_like) || !opt_.iar) + && !(opt_.car || opt_.tar)) + throw std::runtime_error("to_parity needs CAR or TAR to process " + "a condition that is not a Rabin or Streett " + "condition or if IAR is not enabled"); + remove_duplicates(pairs1); + remove_duplicates(pairs2); + unsigned num_col = scc_condition.num_sets(); + + auto num_pairs1 = (opt_.iar && is_streett_like) ? pairs2.size() : -1UL; + auto num_pairs2 = (opt_.iar && is_rabin_like) ? pairs1.size() : -1UL; + + // In practice, if the number of pairs is bigger than the number of + // colors, it will create a color greater than SPOT_MAX_ACCSETS, so + // we don't consider that it is a Rabin condition. + // In this case, if CAR or TAR is not used, it will throw a Runtime + // Error. + + bool iar_overflow = false; + if ((num_pairs1 > MAX_MEM_ELEM) && (num_pairs2 > MAX_MEM_ELEM)) + { + num_pairs1 = num_pairs2 = -1U; + iar_overflow = true; + } + + const std::vector + number_elements = + { + (opt_.iar && is_streett_like) ? pairs2.size() : -1UL, + (opt_.iar && is_rabin_like) ? pairs1.size() : -1UL, + opt_.car ? num_col : -1UL, + opt_.tar ? num_edges : -1UL}; + constexpr std::array algos = {IAR_STREETT, IAR_RABIN, CAR, + TAR}; + int min_pos = std::distance(number_elements.begin(), + std::min_element(number_elements.begin(), + number_elements.end())); + + if (number_elements[min_pos] == -1U && iar_overflow) + throw std::runtime_error( + "Too many Rabin/Streett pairs, try to increase SPOT_MAX_ACCSETS"); + algorithm algo = algos[min_pos]; + if (algo == IAR_RABIN) + pairs = pairs1; + else if (algo == IAR_STREETT) + pairs = pairs2; + return algo; + } + + // Remove duplicates in pairs without changing the order. + static void + remove_duplicates(std::vector &pairs) + { + std::vector res; + res.reserve(pairs.size()); + for (auto &elem : pairs) + if (std::find(res.begin(), res.end(), elem) == res.end()) + res.emplace_back(elem); + pairs = res; + } + + template + acc_cond::mark_t + fin(const std::vector &pairs, unsigned k) + { + static_assert(algo == IAR_RABIN || algo == IAR_STREETT); + if constexpr (algo == IAR_RABIN) + return pairs[k].fin; + else + return pairs[k].inf; + } + + template + acc_cond::mark_t + inf(const std::vector &pairs, unsigned k) + { + static_assert(algo == IAR_RABIN || algo == IAR_STREETT); + if constexpr (algo == IAR_RABIN) + return pairs[k].inf; + else + return pairs[k].fin; + } + + template + std::vector, memory>> + find_relations(const const_twa_graph_ptr &sub_aut, + const std::vector &pairs, + const std::set &pairs_indices) + { + static_assert(algo == IAR_RABIN || algo == IAR_STREETT || algo == CAR); + const unsigned sub_aut_num_states = sub_aut->num_states(); + // Set of memory elements that can be at the head of the memory for + // a given state. + std::vector>> incomem(sub_aut_num_states); + // Add a mark with all colors/pairs to deal with the order of the + // original state + if constexpr (algo == algorithm::CAR) + { + auto ms = sub_aut->get_acceptance().used_sets().max_set(); + memory m(ms); + std::iota(m.begin(), m.end(), 0); + incomem[sub_aut->get_init_state_number()].insert(std::move(m)); + } + else if constexpr (algo == IAR_RABIN || algo == IAR_STREETT) + { + memory m(pairs_indices.begin(), pairs_indices.end()); + incomem[sub_aut->get_init_state_number()].insert(std::move(m)); + } + + for (auto &e : sub_aut->edges()) + { + auto e_sets = e.acc.sets(); + if constexpr (algo == algorithm::CAR) + incomem[e.dst].insert({e_sets.begin(), e_sets.end()}); + // IAR + else + { + memory parti; + for (unsigned k : pairs_indices) + if (e.acc & fin(pairs, k)) + parti.push_back(k); + incomem[e.dst].insert(parti); + } + } + std::vector, memory>> res; + res.reserve(sub_aut_num_states); + for (unsigned i = 0; i < sub_aut_num_states; ++i) + { + std::map, memory> ma; + // Memory incoming to state i. + std::vector> elem(incomem[i].begin(), + incomem[i].end()); + relation rel(elem); + for (auto &x : rel.labels_) + ma.insert({x, rel.find_order(x)}); + res.emplace_back(ma); + } + return res; + } + + void + apply_move_heuristic(unsigned state, memory &m, + unsigned nb_seen, + std::vector, + memory>> &relations) + { + // If we move 0 or 1 color we cannot change the order + if (nb_seen < 2) + return; + memory seen{m.begin(), m.begin() + nb_seen}; + const auto &new_prefix = relations[state][seen]; + + unsigned new_prefix_size = new_prefix.size(); + for (unsigned i = 0; i < new_prefix_size; ++i) + m[i] = new_prefix[i]; + } + + template + void + find_new_memory(unsigned state, memory &m, unsigned edge_number, + const acc_cond::mark_t &colors, + const std::vector &pairs, + const std::set &pairs_indices, + unsigned &nb_seen, + unsigned &h, + std::vector, memory>> &relations) + { + if constexpr (algo == TAR) + { + (void)state; + auto pos = std::find(m.begin(), m.end(), edge_number); + assert(pos != m.end()); + h = std::distance(m.begin(), pos); + std::rotate(m.begin(), pos, pos + 1); + } + else if constexpr (algo == CAR) + { + (void)edge_number; + for (auto k : colors.sets()) + { + auto it = std::find(m.begin(), m.end(), k); + // A color can exist in the automaton but not in the condition. + if (it != m.end()) + { + h = std::max(h, (unsigned)(it - m.begin()) + 1); + std::rotate(m.begin(), it, it + 1); + ++nb_seen; + } + } + if (opt_.force_order) + { + // apply_move_heuristic needs an increasing list of values + std::reverse(m.begin(), m.begin() + nb_seen); + apply_move_heuristic(state, m, nb_seen, relations); + } + } + else if constexpr (algo == IAR_RABIN || algo == IAR_STREETT) + { + (void)edge_number; + for (auto k = pairs_indices.rbegin(); k != pairs_indices.rend(); ++k) + if (colors & fin(pairs, *k)) + { + ++nb_seen; + auto it = std::find(m.begin(), m.end(), *k); + assert(it != m.end()); + // move the pair in front of the permutation + std::rotate(m.begin(), it, it + 1); + } + if (opt_.force_order) + { + // As with CAR, in relation the partial memory is sorted. That is + // why the previous loop use a reverse iterator. + assert(std::is_sorted(m.begin(), m.begin() + nb_seen)); + apply_move_heuristic(state, m, nb_seen, relations); + } + } + } + + template + void + compute_new_color_lar(const const_twa_graph_ptr &sub_aut, + const memory ¤t_mem, + const memory &new_perm, + unsigned &h, + const acc_cond::mark_t &edge_colors, + acc_cond::mark_t &acc, + const std::vector &pairs, + robin_hood::unordered_map& + acc_cache) + { + // This function should not be called with algo ∉ [CAR, IAR, TAR]. + static_assert(algo == CAR || algo == IAR_RABIN || algo == IAR_STREETT + || algo == TAR); + assert(!acc); + auto sub_aut_cond = sub_aut->acc(); + if constexpr (algo == CAR) + { + acc_cond::mark_t m(new_perm.begin(), new_perm.begin() + h); + auto cc = acc_cache.find(m); + bool rej; + if (cc != acc_cache.end()) + rej = cc->second; + else + { + rej = !sub_aut_cond.accepting(m); + acc_cache.insert({m, rej}); + } + unsigned value = 2 * h + rej - 1; + if (value != -1U) + assign_color(acc, value); + return; + } + else if constexpr (algo == TAR) + { + auto &edge_vector = sub_aut->edge_vector(); + acc_cond::mark_t acc_seen {}; + for (unsigned i = 0; i <= h; ++i) + acc_seen |= edge_vector[new_perm[i]].acc; + + auto cc = acc_cache.find(acc_seen); + bool rej; + if (cc != acc_cache.end()) + rej = cc->second; + else + { + rej = !sub_aut_cond.accepting(acc_seen); + acc_cache.insert({acc_seen, rej}); + } + + unsigned acc_col = 2 * h + rej - 1; + if (acc_col != -1U) + assign_color(acc, acc_col); + } + else + { + // IAR_RABIN produces a parity max even condition. If res_ + // is parity max odd, we add 1 to a transition to produce a parity max + // odd automaton. + unsigned delta_acc = ((algo == IAR_RABIN) && is_odd_) - 1; + + unsigned maxint = -1U; + for (int k = current_mem.size() - 1; k >= 0; --k) + { + unsigned pk = current_mem[k]; + + if (!inf(pairs, pk) || (edge_colors + & (pairs[pk].fin | pairs[pk].inf))) + { + maxint = k; + break; + } + } + + unsigned value; + if (maxint == -1U) + value = delta_acc; + else if (edge_colors & fin(pairs, current_mem[maxint])) + value = 2 * maxint + 2 + delta_acc; + else + value = 2 * maxint + 1 + delta_acc; + + if (value != -1U) + assign_color(acc, value); + } + } + + void + change_to_odd() + { + if (is_odd_) + return; + is_odd_ = true; + // We can reduce if we don't have an edge without color. + bool can_reduce = (min_color_used_.has_value() && *min_color_used_ != 0); + int shift; + + if (can_reduce) + shift = -1 * (*min_color_used_ - (*min_color_used_ % 2) + 1); + else + shift = 1; + + // If we cannot decrease and we already the the maximum color, we don't + // have to try. Constructs a mark_t to avoid to make report_too_many_sets + // public. + if (!can_reduce && max_color_used_.value_or(-1) + shift == MAX_ACCSETS) + acc_cond::mark_t {SPOT_MAX_ACCSETS}; + if (max_color_used_.has_value()) + *max_color_used_ += shift; + if (min_color_used_.has_value()) + *min_color_used_ += shift; + for (auto &e : res_->edges()) + { + auto new_val = e.acc.max_set() - 1 + shift; + if (new_val != -1U) + e.acc = { new_val }; + else + e.acc = {}; + } + } + + template + void + apply_lar(twa_graph_ptr &sub_aut, + const std::vector &pairs) + { + if constexpr (algo != IAR_RABIN) + change_to_odd(); + // avoids to call LAR if there is one color/pair/transition. + // LAR can work with this kind of condition but some optimizations + // like searching an existing state suppose that there is at least + // one element. + if ((algo == CAR && sub_aut->acc().num_sets() == 0) + || ((algo == IAR_RABIN || algo == IAR_STREETT) && pairs.empty()) + || (algo == TAR && sub_aut->num_edges() == 0)) + { + bool need_col = sub_aut->acc().is_t() != is_odd_; + auto col_fun = [&](const twa_graph::edge_storage_t &) + { + return need_col ? acc_cond::mark_t{0} : acc_cond::mark_t{}; + }; + apply_copy_general(sub_aut, col_fun, algo); + return; + } + // We sometimes need to have a list of the states + // of res_ constructed by this call to apply_lar. + const bool use_bscc = opt_.bscc; + const bool use_last_post_process = opt_.use_last_post_process; + constexpr bool is_tar = algo == TAR; + const bool need_tree = !is_tar + && (opt_.search_ex || use_last_post_process); + const bool need_state_list = use_last_post_process || use_bscc; + const bool is_dfs = opt_.lar_dfs; + // state_2_lar adapts add_new_state such that depending on the + // value of use_last in get_compatible_state, we will be able + // to find a compatible state faster. + state_2_lar::memory_order order; + if (!opt_.use_last) + { + if (opt_.use_last_post_process) + order = state_2_lar::memory_order::BOTH; + else + order = state_2_lar::memory_order::ONLY_OLDEST; + } + else + order = state_2_lar::memory_order::ONLY_NEWEST; + state_2_lar s2l; + if (need_tree) + s2l.init(sub_aut->num_states(), order); + std::vector states_scc_res; + if (need_state_list) + states_scc_res.reserve(sub_aut->num_states()); + auto init = + sub_aut->get_named_prop>("original-states"); + + if (opt_.propagate_col) + propagate_marks_here(sub_aut); + + auto init_state = sub_aut->get_init_state_number(); + robin_hood::unordered_map, + unsigned, to_parity_hash> ps_2_num; + unsigned lb_size; + if constexpr (algo == TAR) + lb_size = aut_->num_edges(); + else if constexpr (algo == CAR) + lb_size = aut_->num_states() * aut_->acc().num_sets(); + else + lb_size = aut_->num_states() * pairs.size(); + // num_2_ps maps a state of the result to a parity_state. As this function + // does not always create the first state, we need to add + // "- nb_states_before" to get a value. + const unsigned nb_states_before = res_->num_states(); + std::vector> num_2_ps; + // At least one copy of each state will be created. + num_2_ps.reserve(lb_size + num_2_ps.size()); + ps_2_num.reserve(lb_size + num_2_ps.size()); + + std::deque todo; + // return a pair new_state, is_new such that + // ps is associated to the state new_state in res_ + // and is_new is true if a new state was created by + // get_state + // We store 2 unsigned in a long long. + static_assert(sizeof(long long) >= 2 * sizeof(unsigned)); + robin_hood::unordered_map* edge_cache = nullptr; + if (!use_last_post_process) + { + edge_cache = new robin_hood::unordered_map(); + edge_cache->reserve(sub_aut->num_edges()); + } + auto get_state = [&](const to_parity_state &&ps) constexpr + { + auto it = ps_2_num.find(ps); + if (it == ps_2_num.end()) + { + unsigned nb = add_res_state(algo, ps); + ps_2_num[ps] = nb; + assert(nb == num_2_ps.size() + nb_states_before); + num_2_ps.emplace_back(ps); + todo.push_back(nb); + if (need_state_list) + states_scc_res.push_back(nb); + return std::pair{nb, true}; + } + return std::pair{it->second, false}; + }; + + std::set pairs_indices; + std::vector, memory>> relations; + if constexpr (algo == IAR_STREETT || algo == IAR_RABIN) + { + const auto num_pairs = pairs.size(); + for (unsigned k = 0; k < num_pairs; ++k) + if (fin(pairs, k)) + pairs_indices.insert(k); + } + + if constexpr (algo != TAR) + if (opt_.force_order) + relations = find_relations(sub_aut, pairs, pairs_indices); + + auto m = initial_memory_of(sub_aut, pairs, relations); + + assert(init); + auto init_res = get_state({(*init)[init_state], init_state, m}).first; + // A path is added when it is a destination. That is why we need to + // add the initial state. + unsigned nb_edges_before = res_->num_edges(); + std::vector edge_to_seen_nb; + if (use_last_post_process && algo != TAR) + edge_to_seen_nb.reserve(sub_aut->num_edges()); + if constexpr(!is_tar) + if (need_tree) + s2l.add_new_path(init_state, m, init_res, 0); + + robin_hood::unordered_map acc_cache; + // Main loop + while (!todo.empty()) + { + if (edge_cache) + edge_cache->clear(); + // If we want to process the most recent state of the result, we + // take the last value + unsigned res_current = is_dfs ? todo.back() : todo.front(); + unsigned res_index = res_current - nb_states_before; + const auto ¤t_ps = num_2_ps[res_index]; + const auto current_mem = current_ps.mem; + if (is_dfs) + todo.pop_back(); + else + todo.pop_front(); + + // For each edge leaving the state corresponding to res_state in sub_aut + for (auto &e : sub_aut->out(current_ps.state_scc)) + { + // We create a new memory and update it + memory mem(current_mem); + unsigned nb_seen = 0, + h = 0; + find_new_memory(e.dst, mem, sub_aut->edge_number(e), e.acc, + pairs, pairs_indices, nb_seen, h, relations); + // Now we try to find a way to move the elements and obtain an + // existing memory. + unsigned res_dst = -1U; + if constexpr (algo != TAR) + if (opt_.search_ex) + res_dst = s2l.get_compatible_state(e.dst, mem, nb_seen, + opt_.use_last); + // If it doesn't exist, we create a new state… + if (res_dst == -1U) + { + auto gs = get_state({(*init)[e.dst], e.dst, mem}); + res_dst = gs.first; + // And add it to the "tree" used to find a compatible state + if constexpr (!is_tar) + { + if (need_tree && gs.second) + s2l.add_new_path(e.dst, mem, res_dst, nb_seen); + } + } + + // We compute the color assigned to the new edge. + acc_cond::mark_t new_edge_color{}; + compute_new_color_lar(sub_aut, current_mem, mem, h, e.acc, + new_edge_color, pairs, acc_cache); + + // As we can assign a new destination later when + // use_last_post_process is true, we cannot try to find a compatible + // edge. + auto edge_res_num = add_res_edge(res_current, res_dst, + new_edge_color, e.cond, + !use_last_post_process, + edge_cache); + (void) edge_res_num; + // We have to remember how many colors were seen if we do a post + // processing + if constexpr (algo != TAR) + if (use_last_post_process) + { + assert(edge_res_num == + edge_to_seen_nb.size() + nb_edges_before + 1); + edge_to_seen_nb.push_back(nb_seen); + } + } + } + + // We used the most recent compatible state but perhaps that another + // state was created after. We do a new search. As TAR always moves one + // element we don't need it. + if constexpr (algo != TAR) + if (use_last_post_process) + { + for (auto &res_state : states_scc_res) + for (auto &e : res_->out(res_state)) + { + auto e_dst = e.dst; + if (e.src == e_dst) + continue; + auto edge_num = res_->edge_number(e); + const auto &ps = num_2_ps[e_dst - nb_states_before]; + unsigned seen_nb = + edge_to_seen_nb[edge_num - nb_edges_before - 1]; + assert(seen_nb < SPOT_MAX_ACCSETS); + auto new_dst = s2l.get_compatible_state(ps.state_scc, ps.mem, + seen_nb, true); + if (new_dst != e_dst) + { + assert(new_dst != -1U); + need_purge_ = true; + e.dst = new_dst; + } + } + } + if (use_bscc) + { + // Contrary to the (old) implementation of IAR, adding an edge between + // 2 SCCs of the result is the last thing done. It means that + // we don't need to use a filter when we compute the BSCC. + // A state s is in the BSCC if scc_of(s) is 0. + scc_info sub_scc(res_, init_res, nullptr, nullptr, + scc_info_options::NONE); + if (sub_scc.scc_count() > 1) + { + need_purge_ = true; + for (auto &state_produced : states_scc_res) + if (sub_scc.scc_of(state_produced) == 0) + state_to_res_[res_to_aut_[state_produced]] = state_produced; + } + } + delete edge_cache; + } + + void + link_sccs() + { + if (si_.scc_count() > 1) + { + const unsigned res_num_states = res_->num_states(); + for (unsigned i = 0; i < res_num_states; ++i) + { + auto aut_i = res_to_aut_[i]; + auto aut_i_scc = si_.scc_of(aut_i); + for (auto &e : aut_->out(aut_i)) + if (aut_i_scc != si_.scc_of(e.dst)) + { + auto e_dst_repr = state_to_res_[e.dst]; + add_res_edge(i, e_dst_repr, {}, e.cond); + } + } + } + } + + bool + try_parity_equivalence(const zielonka_tree &tree, + const twa_graph_ptr &sub_aut) + { + if (tree.has_parity_shape()) + { + bool first_is_accepting = tree.is_even(); + // A vector that stores the difference between 2 levels. + std::vector colors_diff; + auto &tree_nodes = tree.nodes_; + // Supposes that the index of the root is 0. + unsigned current_index = 0; + auto current_node = tree_nodes[current_index]; + // While the current node has a child + while (current_node.first_child != 0) + { + auto child_index = current_node.first_child; + auto child = tree_nodes[child_index]; + acc_cond::mark_t diff = current_node.colors - child.colors; + colors_diff.emplace_back(diff); + current_node = child; + } + // We have to deal with the edge between the last node and ∅. + bool is_empty_accepting = sub_aut->acc().accepting({}); + bool is_current_accepting = (current_node.level % 2) != tree.is_even(); + if (is_empty_accepting != is_current_accepting) + colors_diff.emplace_back(current_node.colors); + // + 1 as we need to know which value should be given to an uncolored + // edge. + std::vector new_colors( + sub_aut->get_acceptance().used_sets().max_set() + 1, -1U); + unsigned current_col = colors_diff.size() - 1; + for (auto &diff : colors_diff) + { + for (auto col : diff.sets()) + new_colors[col + 1] = current_col; + --current_col; + } + bool is_max_even = first_is_accepting == (colors_diff.size() % 2); + if (!is_max_even) + change_to_odd(); + + bool is_even_in_odd_world = is_odd_ && is_max_even; + if (is_even_in_odd_world) + for (auto &x : new_colors) + ++x; + apply_copy(sub_aut, new_colors, PARITY_EQUIV); + return true; + } + return false; + } + + bool + try_parity_prefix(const zielonka_tree &tree, const twa_graph_ptr &sub_aut) + { + unsigned index = 0; + auto current = tree.nodes_[index]; + std::vector prefixes; + bool first_is_accepting = tree.is_even(); + + acc_cond::mark_t removed_cols{}; + auto has_one_child = [&](const auto node) constexpr + { + auto fc = node.first_child; + return tree.nodes_[fc].next_sibling == fc; + }; + while (has_one_child(current)) + { + auto child = tree.nodes_[current.first_child]; + acc_cond::mark_t diff{}; + const bool is_leaf = current.first_child == 0; + if (is_leaf) + diff = current.colors; + else + diff = current.colors - child.colors; + prefixes.emplace_back(diff); + removed_cols |= diff; + if (is_leaf) + break; + current = child; + } + if (prefixes.empty()) + return false; + + if (opt_.datas) + algo_used_ |= algorithm::PARITY_PREFIX; + + // As we want to remove the prefix we need to remove it from the + // condition. As an unused color is not always removed (acc_clean false), + // we do it here. + auto used_cols = sub_aut->get_acceptance().used_sets() - removed_cols; + auto new_cond = sub_aut->acc().restrict_to(used_cols); + scc_info_to_parity sub(sub_aut, removed_cols); + // The recursive call will add some informations to help + // to add missing edges + state_to_nums_ = + new std::vector>(aut_->num_states()); + opt_.parity_prefix = false; + bool old_pp_gen = opt_.parity_prefix_general; + opt_.parity_prefix_general = false; + + auto max_scc_color_rec = max_color_scc_; + for (auto x : sub.split_aut({removed_cols})) + { + x->set_acceptance(new_cond); + process_scc(x, algorithm::PARITY_PREFIX); + if (max_color_scc_.has_value()) + { + if (!max_scc_color_rec.has_value()) + max_scc_color_rec.emplace(*max_color_scc_); + else + max_scc_color_rec.emplace( + std::max(*max_scc_color_rec, *max_color_scc_)); + } + } + opt_.parity_prefix = true; + opt_.parity_prefix_general = old_pp_gen; + + assert(max_scc_color_rec.has_value()); + auto max_used_is_accepting = ((*max_scc_color_rec - 1) % 2) == is_odd_; + bool last_prefix_acc = (prefixes.size() % 2) != first_is_accepting; + + unsigned m = prefixes.size() + (max_used_is_accepting != last_prefix_acc) + + *max_scc_color_rec - 1; + auto sub_aut_orig = + sub_aut->get_named_prop>("original-states"); + assert(sub_aut_orig); + for (auto &e : sub_aut->edges()) + if (e.acc & removed_cols) + { + auto el = std::find_if(prefixes.begin(), prefixes.end(), + [&](acc_cond::mark_t &x) + { return x & e.acc; }); + assert(el != prefixes.end()); + unsigned pos = std::distance(prefixes.begin(), el); + const unsigned col = m - pos; + // As it is a parity prefix we should never get a lower value than + // the color recursively produced. + assert(!max_scc_color_rec.has_value() || *max_scc_color_rec == 0 + || col + 1 > *max_scc_color_rec); + unsigned dst = state_to_res_[(*sub_aut_orig)[e.dst]]; + for (auto src : (*state_to_nums_)[(*sub_aut_orig)[e.src]]) + if (col != -1U) + add_res_edge(src, dst, {col}, e.cond); + else + add_res_edge(src, dst, {}, e.cond); + } + // As when we need to use link_scc, a set of edges that link 2 SCC + // need to be added and don't need to have a color. + else if (sub.scc_of(e.src) != sub.scc_of(e.dst)) + { + unsigned dst = state_to_res_[(*sub_aut_orig)[e.dst]]; + for (auto src : (*state_to_nums_)[(*sub_aut_orig)[e.src]]) + add_res_edge(src, dst, {}, e.cond); + } + delete state_to_nums_; + state_to_nums_ = nullptr; + + return true; + } + + bool + try_parity_prefix_general(twa_graph_ptr &sub_aut) + { + // This function should not be applied on an "empty" automaton as + // it must create an empty SCC with the algorithm NONE. + assert(sub_aut->num_edges() > 0); + static_assert((MAX_ACCSETS % 2) == 0, + "MAX_ACCSETS is supposed to be even"); + std::vector res_colors; + std::vector status; + acc_cond new_cond; + bool was_able_to_color; + // Is the maximal color accepting? + bool start_inf = true; + cond_type_main_aux(sub_aut, cond_kind::INF_PARITY, false, status, + res_colors, new_cond, was_able_to_color, + SPOT_MAX_ACCSETS - 1); + // Otherwise we can try to find a rejecting transition as first step + if (!was_able_to_color) + { + cond_type_main_aux(sub_aut, cond_kind::FIN_PARITY, false, status, + res_colors, new_cond, was_able_to_color, + SPOT_MAX_ACCSETS - 1); + if (!was_able_to_color) + return false; + start_inf = false; + } + + // If we have a parity-type automaton, it is just a copy. + if (std::find(status.begin(), status.end(), edge_status::IMPOSSIBLE) + == status.end()) + { + std::vector res_cols; + res_cols.reserve(res_colors.size()); + + auto min_set = + std::min_element(res_colors.begin() + 1, res_colors.end())->max_set(); + // Does the minimal color has the same parity than the maximal parity? + bool same_acceptance_min_max = (min_set % 2); + // Do we need to shift to match the parity of res_? + bool odd_shift = start_inf != is_odd_; + unsigned shift_col = min_set - (same_acceptance_min_max != odd_shift); + std::transform(res_colors.begin(), res_colors.end(), + std::back_inserter(res_cols), [&](auto &x) + { return x.max_set() - 1 - shift_col; }); + apply_copy_edge_index(sub_aut, res_cols, + algorithm::PARITY_PREFIX_GENERAL); + return true; + } + + // At this moment, a prefix exists + auto& ev = sub_aut->edge_vector(); + const auto ev_size = ev.size(); + auto keep = std::shared_ptr(make_bitvect(ev_size)); + const unsigned status_size = status.size(); + for (unsigned i = 1; i < status_size; ++i) + if (status[i] == edge_status::IMPOSSIBLE) + keep->set(i); + else + keep->clear(i); + + // Avoid recursive parity prefix + opt_.parity_prefix_general = false; + bool old_pp = opt_.parity_prefix; + opt_.parity_prefix = false; + + auto max_scc_color_rec = max_color_scc_; + scc_info lower_scc(sub_aut, scc_info_options::TRACK_STATES); + scc_info_to_parity sub(lower_scc, keep); + state_to_nums_ = + new std::vector>(aut_->num_states()); + for (auto x : sub.split_aut(keep)) + { + process_scc(x, algorithm::PARITY_PREFIX_GENERAL); + if (!max_scc_color_rec.has_value()) + max_scc_color_rec = max_color_scc_; + else if (max_color_scc_.has_value()) + max_scc_color_rec.emplace( + std::max(*max_scc_color_rec, *max_color_scc_)); + } + + // restore options + opt_.parity_prefix_general = true; + opt_.parity_prefix = old_pp; + + assert(sub_aut->num_edges() > 0); + + // Compute the minimal color used by parity prefix. + unsigned min_set_prefix = -2U; + for (unsigned i = 1; i < ev_size; ++i) + if (status[i] == edge_status::MARKED) + { + auto e_mark = res_colors[i].max_set(); + if (min_set_prefix == -2U) + min_set_prefix = e_mark - 1; + else + min_set_prefix = std::min(min_set_prefix + 1, e_mark) - 1; + } + + // At least one transition should be marked here. + assert(min_set_prefix != -2U); + + // Reduce the colors used by parity_prefix. + const bool min_prefix_accepting = (min_set_prefix % 2) == start_inf; + // max_scc_color_rec has a value as the automaton is not parity-type, + // so there was a recursive paritisation + assert(max_scc_color_rec.has_value()); + const bool max_rec_accepting = ((*max_scc_color_rec - 1) % 2) == is_odd_; + const bool same_prio = min_prefix_accepting == max_rec_accepting; + const unsigned delta = + min_set_prefix - (*max_scc_color_rec + 1) - !same_prio; + + auto sub_aut_orig = + sub_aut->get_named_prop>("original-states"); + assert(sub_aut_orig); + for (unsigned e_num = 1; e_num < ev_size; ++e_num) + { + auto& e = ev[e_num]; + if (status[e_num] == edge_status::MARKED) + { + unsigned dst = state_to_res_[(*sub_aut_orig)[e.dst]]; + for (auto src : (*state_to_nums_)[(*sub_aut_orig)[e.src]]) + { + auto col = res_colors[e_num].max_set() - delta - 1; + if (col == -1U) + add_res_edge(src, dst, {}, e.cond); + else + add_res_edge(src, dst, {col}, e.cond); + } + } + } + + delete state_to_nums_; + state_to_nums_ = nullptr; + + return true; + } + + bool + try_emptiness(const const_twa_graph_ptr &sub_aut, bool &tried) + { + tried = true; + if (generic_emptiness_check(sub_aut)) + { + auto col_fun = + [col = is_odd_ ? acc_cond::mark_t{0} : acc_cond::mark_t{}] + (const twa_graph::edge_storage_t &) noexcept + { + return col; + }; + apply_copy_general(sub_aut, col_fun, GENERIC_EMPTINESS); + return true; + } + return false; + } + + bool + try_rabin_to_buchi(twa_graph_ptr &sub_aut) + { + algorithm algo = RABIN_TO_BUCHI; + auto buch_aut = rabin_to_buchi_if_realizable(sub_aut); + if (buch_aut == nullptr) + { + algo = STREETT_TO_COBUCHI; + auto old_cond = sub_aut->get_acceptance(); + sub_aut->set_acceptance(acc_cond(old_cond.complement())); + buch_aut = rabin_to_buchi_if_realizable(sub_aut); + sub_aut->set_acceptance(acc_cond(old_cond)); + } + if (buch_aut != nullptr) + { + if (algo == STREETT_TO_COBUCHI) + change_to_odd(); + unsigned shift = (algo == RABIN_TO_BUCHI) && is_odd_; + + auto &buch_aut_ev = buch_aut->edge_vector(); + // 0 is not an edge, so we assign -1; + std::vector colors; + colors.reserve(buch_aut_ev.size()); + colors.push_back(-1U); + std::transform( + buch_aut_ev.begin() + 1, buch_aut_ev.end(), + std::back_inserter(colors), + [&](const twa_graph::edge_storage_t &e) { + return e.acc.max_set() - 1 + shift; + }); + apply_copy_edge_index(sub_aut, colors, algo); + return true; + } + return false; + } + + bool + try_buchi_type(const twa_graph_ptr &sub_aut) + { + std::vector status; + std::vector res_colors; + acc_cond new_cond; + bool is_co_bu = false; + bool was_able_to_color; + if (!cond_type_main_aux(sub_aut, cond_kind::BUCHI, true, status, + res_colors, new_cond, was_able_to_color, 0)) + { + is_co_bu = true; + if (!cond_type_main_aux(sub_aut, cond_kind::CO_BUCHI, true, status, + res_colors, new_cond, was_able_to_color, 0)) + return false; + change_to_odd(); + } + // Tests if all edges are colored or all edges are uncolored + auto [min, max] = + std::minmax_element(res_colors.begin() + 1, res_colors.end()); + const bool one_color = min->max_set() == max->max_set(); + const bool is_colored = min->max_set(); + auto col_fun = [&](const twa_graph::edge_storage_t &edge) + { + // If there one color in the automaton, we can simplify. + if (one_color) + { + bool z = (is_colored && !is_odd_) || (!is_colored && is_odd_); + // When we do co-buchi, we reverse + if (is_co_bu) + z = !z; + return z ? acc_cond::mark_t{0} : acc_cond::mark_t{}; + } + // Otherwise, copy the color + auto edge_number = sub_aut->edge_number(edge); + unsigned mc = res_colors[edge_number].max_set() - 1; + mc += (!is_co_bu && is_odd_); + if (mc == -1U) + return acc_cond::mark_t{}; + return acc_cond::mark_t{mc}; + }; + apply_copy_general(sub_aut, col_fun, is_co_bu ? algorithm::CO_BUCHI_TYPE + : algorithm::BUCHI_TYPE); + return true; + } + + bool + try_parity_type(const twa_graph_ptr &sub_aut) + { + std::vector status; + std::vector res_colors; + acc_cond new_cond; + bool was_able_to_color; + if (!cond_type_main_aux(sub_aut, cond_kind::INF_PARITY, true, status, + res_colors, new_cond, was_able_to_color, + SPOT_MAX_ACCSETS - 3)) + { + if (!cond_type_main_aux(sub_aut, cond_kind::FIN_PARITY, true, status, + res_colors, new_cond, was_able_to_color, + SPOT_MAX_ACCSETS - 3)) + return false; + } + bool is_max, is_odd; + new_cond.is_parity(is_max, is_odd); + auto min = + std::min_element(res_colors.begin() + 1, res_colors.end()); + // cond_type_main_aux returns a parity max condition + assert(is_max); + auto col_fun = + [shift = (is_odd != is_odd_) - (min->max_set() + (min->max_set() % 2)), + &res_colors, &sub_aut] + (const twa_graph::edge_storage_t &edge) + { + auto edge_number = sub_aut->edge_number(edge); + unsigned mc = res_colors[edge_number].max_set() - 1; + mc += shift; + if (mc == -1U) + return acc_cond::mark_t{}; + return acc_cond::mark_t{mc}; + }; + apply_copy_general(sub_aut, col_fun, PARITY_TYPE); + return true; + } + + // Keeps the result of the partial degeneralization if it reduces the number + // of colors or it allows to apply IAR. + bool + keep_deg(const const_twa_graph_ptr &sub_aut, const const_twa_graph_ptr °) + { + if (!opt_.reduce_col_deg) + return true; + unsigned nb_col_orig = sub_aut->get_acceptance().used_sets().count(); + + if (deg->get_acceptance().used_sets().count() < nb_col_orig) + return true; + std::vector pairs; + if (deg->acc().is_rabin_like(pairs)) + { + remove_duplicates(pairs); + if (pairs.size() < nb_col_orig) + return true; + } + if (deg->acc().is_streett_like(pairs)) + { + remove_duplicates(pairs); + if (pairs.size() < nb_col_orig) + return true; + } + return false; + } + + // Process a SCC. If there is no edge in the automaton, a new state is + // created and we say (if pretty_print is true) that none_algo created + // this state. + void + process_scc(twa_graph_ptr &sub_aut, + const algorithm none_algo = algorithm::NONE) + { + // Init the maximal color produced when processing this SCC. + max_color_scc_.reset(); + // If the sub_automaton is "empty", we don't need to apply an algorithm. + if (sub_aut->num_edges() == 0) + { + apply_copy(sub_aut, {}, none_algo); + return; + } + + bool tried_emptiness = false; + bool changed_structure = true; + while (true) + { + auto cond_before_simpl = sub_aut->acc(); + if (opt_.acc_clean) + simplify_acceptance_here(sub_aut); + if (opt_.propagate_col) + { + propagate_marks_here(sub_aut); + if (opt_.acc_clean) + simplify_acceptance_here(sub_aut); + } + if (opt_.datas && sub_aut->acc() != cond_before_simpl) + algo_used_ |= algorithm::ACC_CLEAN; + + if (opt_.parity_equiv || opt_.parity_prefix) + { + // If we don't try to find a parity prefix, we can stop + // to construct the tree when it has not parity shape. + zielonka_tree_options zopt = zielonka_tree_options::MERGE_SUBTREES + | zielonka_tree_options::CHECK_PARITY; + if (!opt_.parity_prefix) + zopt = zopt | zielonka_tree_options::ABORT_WRONG_SHAPE; + auto tree = zielonka_tree(sub_aut->acc(), zopt); + // If it is not parity shape, tree.nodes_ will be empty + if (tree.num_branches() != 0 && opt_.parity_equiv + && try_parity_equivalence(tree, sub_aut)) + return; + if (opt_.parity_prefix && try_parity_prefix(tree, sub_aut)) + return; + } + + if (changed_structure && opt_.parity_prefix_general + && try_parity_prefix_general(sub_aut)) + return; + + if (opt_.generic_emptiness && !tried_emptiness + && try_emptiness(sub_aut, tried_emptiness)) + return; + + // Buchi_type_to_buchi is more general that Rabin_to_buchi so + // we just call rabin_to_buchi if buchi_type_to_buchi is false. + if (!opt_.buchi_type_to_buchi && !opt_.parity_type_to_parity + && opt_.rabin_to_buchi + && try_rabin_to_buchi(sub_aut)) + return; + + // As parity_type_to_parity is stronger, we don't + // try if this option is used. + if (opt_.buchi_type_to_buchi && !opt_.parity_type_to_parity + && try_buchi_type(sub_aut)) + return; + + // We don't do it if parity_prefix_general is true as on a parity-type + // automaton parity_prefix_general removes all the transitions and + // we also get a parity-type automaton. + if (!opt_.parity_prefix_general && opt_.parity_type_to_parity + && try_parity_type(sub_aut)) + return; + + if (opt_.partial_degen + && is_partially_degeneralizable(sub_aut, true, true)) + { + auto deg = sub_aut; + std::vector forbid; + auto m = is_partially_degeneralizable(sub_aut, true, true, forbid); + bool changed = false; + while (m) + { + auto tmp = partial_degeneralize(deg, m); + simplify_acceptance_here(tmp); + if (keep_deg(deg, tmp)) + { + algo_used_ |= algorithm::PARTIAL_DEGEN; + deg = tmp; + changed = true; + changed_structure = true; + } + else + forbid.emplace_back(m); + m = is_partially_degeneralizable(deg, true, true, forbid); + } + + if (changed) + { + sub_aut = deg; + continue; + } + } + break; + } + if (opt_.use_generalized_rabin) + { + auto gen_rab = to_generalized_rabin(sub_aut); + // to_generalized_rabin does not propagate original-states. + auto sub_aut_orig = + sub_aut->get_named_prop>("original-states"); + assert(sub_aut_orig); + auto orig = new std::vector(); + const auto sub_aut_num_states = sub_aut->num_states(); + orig->reserve(sub_aut_num_states); + gen_rab->set_named_prop("original-states", orig); + for (unsigned i = 0; i < sub_aut_num_states; ++i) + orig->push_back((*sub_aut_orig)[i]); + sub_aut = partial_degeneralize(gen_rab); + } + std::vector pairs; + algorithm algo = choose_lar(sub_aut->acc(), pairs, sub_aut->num_edges()); + if (opt_.datas) + algo_used_ |= algo; + if (algo == CAR) + apply_lar(sub_aut, pairs); + else if (algo == IAR_STREETT) + apply_lar(sub_aut, pairs); + else if (algo == IAR_RABIN) + apply_lar(sub_aut, pairs); + else if (algo == TAR) + apply_lar(sub_aut, pairs); + else + SPOT_UNREACHABLE(); + } + + public: + twa_graph_ptr + run() + { + res_ = make_twa_graph(aut_->get_dict()); + res_->copy_ap_of(aut_); + const unsigned num_scc = si_.scc_count(); + auto orig_aut = + aut_->get_named_prop>("original-states"); + std::optional> orig_st; + if (orig_aut) + { + orig_st.emplace(std::vector{*orig_aut}); + std::const_pointer_cast(aut_) + ->set_named_prop("original-states", nullptr); + } + auto sccs = si_.split_aut(); + for (unsigned scc = 0; scc < num_scc; ++scc) + { + auto sub_automaton = sccs[scc]; + process_scc(sub_automaton); + } + + link_sccs(); + // During the execution, to_parity works on its own + // original-states and we must combine it with the property original + // states of aut_ to propagate the information. + if (orig_st) + for (unsigned i = 0; i < orig_->size(); ++i) + (*orig_)[i] = (*orig_aut)[(*orig_)[i]]; + res_->set_named_prop("original-states", orig_); + if (opt_.pretty_print) + res_->set_named_prop("state-names", names_); + if (res_->num_states() == 0) + add_res_state(NONE, {0, 0, {}}); + res_->set_init_state(state_to_res_[aut_->get_init_state_number()]); + // There is only a subset of algorithm that can create an unreachable + // state + if (need_purge_) + res_->purge_unreachable_states(); + // A special case is an automaton without edge. It implies + // max_color_used_ has not value so we need to test it. + if (!max_color_used_.has_value()) + { + assert(aut_->num_edges() == 0); + res_->set_acceptance(acc_cond(acc_cond::acc_code::f())); + } + else + { + res_->set_acceptance(acc_cond( + acc_cond::acc_code::parity(true, is_odd_, *max_color_used_))); + } + if (opt_.datas) + { + constexpr std::array + algos = {BUCHI_TYPE, CAR, CO_BUCHI_TYPE, GENERIC_EMPTINESS, IAR_RABIN, + IAR_STREETT, NONE, PARITY_EQUIV, PARITY_PREFIX, + PARITY_PREFIX_GENERAL, PARITY_TYPE, RABIN_TO_BUCHI, + STREETT_TO_COBUCHI, TAR}; + for (auto al : algos) + if (algo_used_ & al) + opt_.datas->algorithms_used.emplace_back(algorithm_to_str(al)); + } + return res_; + } + + to_parity_generator(const const_twa_graph_ptr &aut, + const to_parity_options opt) + : aut_(aut), + opt_(opt), + si_(aut), + state_to_res_(aut->num_states(), -1U) + { + auto aut_num = aut->num_states(); + res_to_aut_.reserve(aut_num); + orig_ = new std::vector(); + orig_->reserve(aut_num); + if (opt.pretty_print) + { + names_ = new std::vector(); + names_->reserve(aut_num); + } + } + }; + + twa_graph_ptr + to_parity(const const_twa_graph_ptr &aut, + const to_parity_options options) + { + bool is_max, is_odd; + if (aut->acc().is_parity(is_max, is_odd, false)) + { + if (!is_max) + return change_parity(aut, parity_kind::parity_kind_max, + parity_style::parity_style_any); + else + { + auto res = make_twa_graph(aut, twa::prop_set::all()); + res->copy_acceptance_of(aut); + return res; + } + } + to_parity_generator gen(aut, options); + return gen.run(); + } + + // Old version of CAR - // Old version of IAR. namespace { + struct lar_state + { + unsigned state; + std::vector perm; + bool operator<(const lar_state &s) const + { + return state == s.state ? perm < s.perm : state < s.state; + } + + std::string to_string() const + { + std::ostringstream s; + s << state << " ["; + unsigned ps = perm.size(); + for (unsigned i = 0; i < ps; ++i) + { + if (i > 0) + s << ','; + s << perm[i]; + } + s << ']'; + return s.str(); + } + }; + + class lar_generator + { + const const_twa_graph_ptr &aut_; + twa_graph_ptr res_; + const bool pretty_print; + + std::map lar2num; + + public: + explicit lar_generator(const const_twa_graph_ptr &a, bool pretty_print) + : aut_(a), res_(nullptr), pretty_print(pretty_print) + { + } + + twa_graph_ptr run() + { + res_ = make_twa_graph(aut_->get_dict()); + res_->copy_ap_of(aut_); + + std::deque todo; + auto get_state = [this, &todo](const lar_state &s) + { + auto it = lar2num.emplace(s, -1U); + if (it.second) // insertion took place + { + unsigned nb = res_->new_state(); + it.first->second = nb; + todo.push_back(s); + } + return it.first->second; + }; + + std::vector initial_perm(aut_->num_sets()); + std::iota(initial_perm.begin(), initial_perm.end(), 0); + { + lar_state s0{aut_->get_init_state_number(), initial_perm}; + res_->set_init_state(get_state(s0)); + } + + scc_info si(aut_, scc_info_options::NONE); + // main loop + while (!todo.empty()) + { + lar_state current = todo.front(); + todo.pop_front(); + + // TODO: todo could store this number to avoid one lookup + unsigned src_num = get_state(current); + + unsigned source_scc = si.scc_of(current.state); + for (const auto &e : aut_->out(current.state)) + { + // find the new permutation + std::vector new_perm = current.perm; + unsigned h = 0; + for (unsigned k : e.acc.sets()) + { + auto it = std::find(new_perm.begin(), new_perm.end(), k); + h = std::max(h, unsigned(new_perm.end() - it)); + std::rotate(it, it + 1, new_perm.end()); + } + + if (source_scc != si.scc_of(e.dst)) + { + new_perm = initial_perm; + h = 0; + } + + lar_state dst{e.dst, new_perm}; + unsigned dst_num = get_state(dst); + + // Do the h last elements satisfy the acceptance condition? + // If they do, emit 2h, if they don't emit 2h+1. + acc_cond::mark_t m(new_perm.end() - h, new_perm.end()); + bool rej = !aut_->acc().accepting(m); + res_->new_edge(src_num, dst_num, e.cond, {2 * h + rej}); + } + } + + // parity max even + unsigned sets = 2 * aut_->num_sets() + 2; + res_->set_acceptance(sets, acc_cond::acc_code::parity_max_even(sets)); + + if (pretty_print) + { + auto names = new std::vector(res_->num_states()); + for (const auto &p : lar2num) + (*names)[p.second] = p.first.to_string(); + res_->set_named_prop("state-names", names); + } + + return res_; + } + }; + } + + twa_graph_ptr + to_parity_old(const const_twa_graph_ptr &aut, bool pretty_print) + { + if (!aut->is_existential()) + throw std::runtime_error("LAR does not handle alternation"); + // if aut is already parity return it as is + if (aut->acc().is_parity()) + return std::const_pointer_cast(aut); + + lar_generator gen(aut, pretty_print); + return gen.run(); + } + + // Old version of IAR + + namespace + { using perm_t = std::vector; struct iar_state { @@ -50,18 +2635,18 @@ namespace spot perm_t perm; bool - operator<(const iar_state& other) const + operator<(const iar_state &other) const { return state == other.state ? perm < other.perm : state < other.state; } }; - template + template class iar_generator { // helper functions: access fin and inf parts of the pairs // these functions negate the Streett condition to see it as a Rabin one - const acc_cond::mark_t& + const acc_cond::mark_t & fin(unsigned k) const { if (is_rabin) @@ -77,16 +2662,15 @@ namespace spot else return pairs_[k].fin; } + public: - explicit iar_generator(const const_twa_graph_ptr& a, - const std::vector& p, + explicit iar_generator(const const_twa_graph_ptr &a, + const std::vector &p, const bool pretty_print) - : aut_(a) - , pairs_(p) - , scc_(scc_info(a)) - , pretty_print_(pretty_print) - , state2pos_iar_states(aut_->num_states(), -1U) - {} + : aut_(a), pairs_(p), scc_(scc_info(a)), pretty_print_(pretty_print), + state2pos_iar_states(aut_->num_states(), -1U) + { + } twa_graph_ptr run() @@ -110,9 +2694,6 @@ namespace spot res_->set_init_state(s); } - // there could be quite a number of unreachable states, prune them - res_->purge_unreachable_states(); - if (pretty_print_) { unsigned nstates = res_->num_states(); @@ -120,13 +2701,13 @@ namespace spot for (auto e : res_->edges()) { unsigned s = e.src; - iar_state iar = num2iar.at(s); + iar_state iar = num2iar[s]; std::ostringstream st; st << iar.state << ' '; if (iar.perm.empty()) st << '['; char sep = '['; - for (unsigned h: iar.perm) + for (unsigned h : iar.perm) { st << sep << h; sep = ','; @@ -137,6 +2718,8 @@ namespace spot res_->set_named_prop("state-names", names); } + // there could be quite a number of unreachable states, prune them + res_->purge_unreachable_states(); return res_; } @@ -147,44 +2730,44 @@ namespace spot unsigned init = scc_.one_state_of(scc_num); std::deque todo; - auto get_state = [&](const iar_state& s) + auto get_state = [&](const iar_state &s) + { + auto it = iar2num.find(s); + if (it == iar2num.end()) { - auto it = iar2num.find(s); - if (it == iar2num.end()) - { - unsigned nb = res_->new_state(); - iar2num[s] = nb; - num2iar[nb] = s; - unsigned iar_pos = iar_states.size(); - unsigned old_newest_pos = state2pos_iar_states[s.state]; - state2pos_iar_states[s.state] = iar_pos; - iar_states.push_back({s, old_newest_pos}); - todo.push_back(s); - return nb; - } - return it->second; - }; + unsigned nb = res_->new_state(); + iar2num[s] = nb; + num2iar[nb] = s; + unsigned iar_pos = iar_states.size(); + unsigned old_newest_pos = state2pos_iar_states[s.state]; + state2pos_iar_states[s.state] = iar_pos; + iar_states.push_back({s, old_newest_pos}); + todo.push_back(s); + return nb; + } + return it->second; + }; auto get_other_scc = [this](unsigned state) - { - auto it = state2iar.find(state); - // recursively build the destination SCC if we detect that it has - // not been already built. - if (it == state2iar.end()) - build_iar_scc(scc_.scc_of(state)); - return iar2num.at(state2iar.at(state)); - }; + { + auto it = state2iar.find(state); + // recursively build the destination SCC if we detect that it has + // not been already built. + if (it == state2iar.end()) + build_iar_scc(scc_.scc_of(state)); + return iar2num.at(state2iar.at(state)); + }; if (scc_.is_trivial(scc_num)) - { - iar_state iar_s{init, perm_t()}; - state2iar[init] = iar_s; - unsigned src_num = get_state(iar_s); - // Do not forget to connect to subsequent SCCs - for (const auto& e : aut_->out(init)) - res_->new_edge(src_num, get_other_scc(e.dst), e.cond); - return; - } + { + iar_state iar_s{init, perm_t()}; + state2iar[init] = iar_s; + unsigned src_num = get_state(iar_s); + // Do not forget to connect to subsequent SCCs + for (const auto &e : aut_->out(init)) + res_->new_edge(src_num, get_other_scc(e.dst), e.cond); + return; + } // determine the pairs that appear in the SCC auto colors = scc_.acc_sets_of(scc_num); @@ -202,109 +2785,110 @@ namespace spot // the main loop while (!todo.empty()) + { + iar_state current = todo.front(); + todo.pop_front(); + + unsigned src_num = get_state(current); + + for (const auto &e : aut_->out(current.state)) { - iar_state current = todo.front(); - todo.pop_front(); + // connect to the appropriate state + if (scc_.scc_of(e.dst) != scc_num) + res_->new_edge(src_num, get_other_scc(e.dst), e.cond); + else + { + // find the new permutation + perm_t new_perm = current.perm; + // Count pairs whose fin-part is seen on this transition + unsigned seen_nb = 0; + // consider the pairs for this SCC only + for (unsigned k : scc_pairs) + if (e.acc & fin(k)) + { + ++seen_nb; + auto it = std::find(new_perm.begin(), + new_perm.end(), + k); + // move the pair in front of the permutation + std::rotate(new_perm.begin(), it, it + 1); + } - unsigned src_num = get_state(current); + iar_state dst; + unsigned dst_num = -1U; - for (const auto& e : aut_->out(current.state)) + // Optimization: when several indices are seen in the + // transition, they move at the front of new_perm in any + // order. Check whether there already exists an iar_state + // that matches this condition. + + auto iar_pos = state2pos_iar_states[e.dst]; + while (iar_pos != -1U) { - // connect to the appropriate state - if (scc_.scc_of(e.dst) != scc_num) - res_->new_edge(src_num, get_other_scc(e.dst), e.cond); - else - { - // find the new permutation - perm_t new_perm = current.perm; - // Count pairs whose fin-part is seen on this transition - unsigned seen_nb = 0; - // consider the pairs for this SCC only - for (unsigned k : scc_pairs) - if (e.acc & fin(k)) - { - ++seen_nb; - auto it = std::find(new_perm.begin(), - new_perm.end(), - k); - // move the pair in front of the permutation - std::rotate(new_perm.begin(), it, it+1); - } - - iar_state dst; - unsigned dst_num = -1U; - - // Optimization: when several indices are seen in the - // transition, they move at the front of new_perm in any - // order. Check whether there already exists an iar_state - // that matches this condition. - - auto iar_pos = state2pos_iar_states[e.dst]; - while (iar_pos != -1U) - { - iar_state& tmp = iar_states[iar_pos].first; - iar_pos = iar_states[iar_pos].second; - if (std::equal(new_perm.begin() + seen_nb, - new_perm.end(), - tmp.perm.begin() + seen_nb)) - { - dst = tmp; - dst_num = iar2num[dst]; - break; - } - } - // if such a state was not found, build it - if (dst_num == -1U) - { - dst = iar_state{e.dst, new_perm}; - dst_num = get_state(dst); - } - - // find the maximal index encountered by this transition - unsigned maxint = -1U; - for (int k = current.perm.size() - 1; k >= 0; --k) - { - unsigned pk = current.perm[k]; - if (!inf(pk) || - (e.acc & (pairs_[pk].fin | pairs_[pk].inf))) { - maxint = k; - break; - } - } - - acc_cond::mark_t acc = {}; - if (maxint == -1U) - acc = {0}; - else if (e.acc & fin(current.perm[maxint])) - acc = {2*maxint+2}; - else - acc = {2*maxint+1}; - - res_->new_edge(src_num, dst_num, e.cond, acc); - } + iar_state &tmp = iar_states[iar_pos].first; + iar_pos = iar_states[iar_pos].second; + if (std::equal(new_perm.begin() + seen_nb, + new_perm.end(), + tmp.perm.begin() + seen_nb)) + { + dst = tmp; + dst_num = iar2num[dst]; + break; + } } + // if such a state was not found, build it + if (dst_num == -1U) + { + dst = iar_state{e.dst, new_perm}; + dst_num = get_state(dst); + } + + // find the maximal index encountered by this transition + unsigned maxint = -1U; + for (int k = current.perm.size() - 1; k >= 0; --k) + { + unsigned pk = current.perm[k]; + if (!inf(pk) || + (e.acc & (pairs_[pk].fin | pairs_[pk].inf))) + { + maxint = k; + break; + } + } + + acc_cond::mark_t acc{}; + if (maxint == -1U) + acc.set(0); + else if (e.acc & fin(current.perm[maxint])) + assign_color(acc, 2 * maxint + 2); + else + assign_color(acc, 2 * maxint + 1); + + res_->new_edge(src_num, dst_num, e.cond, acc); + } } + } // Optimization: find the bottom SCC of the sub-automaton we have just // built. To that end, we have to ignore edges going out of scc_num. - auto leaving_edge = [&](unsigned d) - { - return scc_.scc_of(num2iar.at(d).state) != scc_num; - }; - auto filter_edge = [](const twa_graph::edge_storage_t&, + auto leaving_edge = [&](unsigned d) constexpr + { + return scc_.scc_of(num2iar.at(d).state) != scc_num; + }; + auto filter_edge = [](const twa_graph::edge_storage_t &, unsigned dst, - void* filter_data) - { - decltype(leaving_edge)* data = - static_cast(filter_data); + void *filter_data) constexpr + { + decltype(leaving_edge) *data = + static_cast(filter_data); - if ((*data)(dst)) - return scc_info::edge_filter_choice::ignore; - return scc_info::edge_filter_choice::keep; - }; + if ((*data)(dst)) + return scc_info::edge_filter_choice::ignore; + return scc_info::edge_filter_choice::keep; + }; scc_info sub_scc(res_, get_state(s0), filter_edge, &leaving_edge); - // SCCs are numbered in reverse topological order, so the bottom SCC has - // index 0. + // SCCs are numbered in reverse topological order, so the bottom SCC + // has index 0. const unsigned bscc = 0; assert(sub_scc.succ(0).empty()); assert( @@ -314,23 +2898,23 @@ namespace spot if (sub_scc.succ(s).empty()) return false; return true; - } ()); + }()); assert(sub_scc.states_of(bscc).size() - >= scc_.states_of(scc_num).size()); + >= scc_.states_of(scc_num).size()); // update state2iar for (unsigned scc_state : sub_scc.states_of(bscc)) - { - iar_state& iar = num2iar.at(scc_state); - if (state2iar.find(iar.state) == state2iar.end()) - state2iar[iar.state] = iar; - } + { + iar_state &iar = num2iar.at(scc_state); + if (state2iar.find(iar.state) == state2iar.end()) + state2iar[iar.state] = iar; + } } private: - const const_twa_graph_ptr& aut_; - const std::vector& pairs_; + const const_twa_graph_ptr &aut_; + const std::vector &pairs_; const scc_info scc_; twa_graph_ptr res_; bool pretty_print_; @@ -349,1520 +2933,36 @@ namespace spot // Make this a function different from iar_maybe(), so that // iar() does not have to call a deprecated function. static twa_graph_ptr - iar_maybe_(const const_twa_graph_ptr& aut, bool pretty_print) + iar_maybe_(const const_twa_graph_ptr &aut, bool pretty_print) { std::vector pairs; if (!aut->acc().is_rabin_like(pairs)) if (!aut->acc().is_streett_like(pairs)) return nullptr; else - { - iar_generator gen(aut, pairs, pretty_print); - return gen.run(); - } - else { - iar_generator gen(aut, pairs, pretty_print); + iar_generator gen(aut, pairs, pretty_print); return gen.run(); } + else + { + iar_generator gen(aut, pairs, pretty_print); + return gen.run(); + } } } twa_graph_ptr - iar_maybe(const const_twa_graph_ptr& aut, bool pretty_print) - { - return iar_maybe_(aut, pretty_print); - } - - twa_graph_ptr - iar(const const_twa_graph_ptr& aut, bool pretty_print) + iar(const const_twa_graph_ptr &aut, bool pretty_print) { if (auto res = iar_maybe_(aut, pretty_print)) return res; throw std::runtime_error("iar() expects Rabin-like or Streett-like input"); } -// New version for paritizing -namespace -{ -struct node -{ - // A color of the permutation or a state. - unsigned label; - std::vector children; - // is_leaf is true if the label is a state of res_. - bool is_leaf; - - node() - : node(0, 0){ - } - - node(int label_, bool is_leaf_) - : label(label_) - , children(0) - , is_leaf(is_leaf_){ - } - - ~node() - { - for (auto c : children) - delete c; - } - - // Add a permutation to the tree. - void - add_new_perm(const std::vector& permu, int pos, unsigned state) - { - if (pos == -1) - children.push_back(new node(state, true)); - else - { - auto lab = permu[pos]; - auto child = std::find_if(children.begin(), children.end(), - [lab](node* n){ - return n->label == lab; - }); - if (child == children.end()) - { - node* new_child = new node(lab, false); - children.push_back(new_child); - new_child->add_new_perm(permu, pos - 1, state); - } - else - (*child)->add_new_perm(permu, pos - 1, state); - } - } - - node* - get_sub_tree(const std::vector& elements, int pos) - { - if (pos < 0) - return this; - unsigned lab = elements[pos]; - auto child = std::find_if(children.begin(), children.end(), - [lab](node* n){ - return n->label == lab; - }); - assert(child != children.end()); - return (*child)->get_sub_tree(elements, pos - 1); - } - - // Gives a state of res_ (if it exists) reachable from this node. - // If use_last is true, we take the most recent, otherwise we take - // the oldest. - unsigned - get_end(bool use_last) - { - if (children.empty()) - { - if (!is_leaf) - return -1U; - return label; - } - if (use_last) - return children[children.size() - 1]->get_end(use_last); - return children[0]->get_end(use_last); - } - - // Try to find a state compatible with the permu when seen_nb colors are - // moved. - unsigned - get_existing(const std::vector& permu, unsigned seen_nb, int pos, - bool use_last) - { - if (pos < (int) seen_nb) - return get_end(use_last); - else - { - auto lab = permu[pos]; - auto child = std::find_if(children.begin(), children.end(), - [lab](node* n){ - return n->label == lab; - }); - if (child == children.end()) - return -1U; - return (*child)->get_existing(permu, seen_nb, pos - 1, use_last); - } - } -}; - -class state_2_car_scc -{ -std::vector nodes; - -public: -state_2_car_scc(unsigned nb_states) - : nodes(nb_states, node()){ -} - -// Try to find a state compatible with the permu when seen_nb colors are -// moved. If use_last is true, it return the last created compatible state. -// If it is false, it returns the oldest. -unsigned -get_res_state(unsigned state, const std::vector& permu, - unsigned seen_nb, bool use_last) -{ - return nodes[state].get_existing(permu, seen_nb, - permu.size() - 1, use_last); -} - -void -add_res_state(unsigned initial, unsigned state, - const std::vector& permu) -{ - nodes[initial].add_new_perm(permu, ((int) permu.size()) - 1, state); -} - -node* -get_sub_tree(const std::vector& elements, unsigned state) -{ - return nodes[state].get_sub_tree(elements, elements.size() - 1); -} -}; - -class car_generator -{ -enum algorithm { - // Try to have a Büchi condition if we have Rabin. - Rabin_to_Buchi, - Streett_to_Buchi, - // IAR - IAR_Streett, - IAR_Rabin, - // CAR - CAR, - // Changing colors transforms acceptance to max even/odd copy. - Copy_even, - Copy_odd, - // If a condition is "t" or "f", we just have to copy the automaton. - False_clean, - True_clean, - None -}; - - -static std::string -algorithm_to_str(algorithm algo) -{ - std::string algo_str; - switch (algo) - { - case IAR_Streett: - algo_str = "IAR (Streett)"; - break; - case IAR_Rabin: - algo_str = "IAR (Rabin)"; - break; - case CAR: - algo_str = "CAR"; - break; - case Copy_even: - algo_str = "Copy even"; - break; - case Copy_odd: - algo_str = "Copy odd"; - break; - case False_clean: - algo_str = "False clean"; - break; - case True_clean: - algo_str = "True clean"; - break; - case Streett_to_Buchi: - algo_str = "Streett to Büchi"; - break; - case Rabin_to_Buchi: - algo_str = "Rabin to Büchi"; - break; - default: - algo_str = "None"; - break; - } - return algo_str; -} - -using perm_t = std::vector; - -struct car_state -{ - // State of the original automaton - unsigned state; - // We create a new automaton for each SCC of the original automaton - // so we keep a link between a car_state and the state of the - // subautomaton. - unsigned state_scc; - // Permutation used by IAR and CAR. - perm_t perm; - - bool - operator<(const car_state &other) const - { - if (state < other.state) - return true; - if (state > other.state) - return false; - if (perm < other.perm) - return true; - if (perm > other.perm) - return false; - return state_scc < other.state_scc; - } - - std::string - to_string(algorithm algo) const - { - std::stringstream s; - s << state; - unsigned ps = perm.size(); - if (ps > 0) - { - s << " ["; - for (unsigned i = 0; i != ps; ++i) - { - if (i > 0) - s << ','; - s << perm[i]; - } - s << ']'; - } - s << ", "; - s << algorithm_to_str(algo); - return s.str(); - } -}; - -const acc_cond::mark_t & -fin(const std::vector& pairs, unsigned k, algorithm algo) -const -{ - if (algo == IAR_Rabin) - return pairs[k].fin; - else - return pairs[k].inf; -} - -acc_cond::mark_t -inf(const std::vector& pairs, unsigned k, algorithm algo) -const -{ - if (algo == IAR_Rabin) - return pairs[k].inf; - else - return pairs[k].fin; -} - -// Gives for each state a set of marks incoming to this state. -std::vector> -get_inputs_states(const twa_graph_ptr& aut) -{ - auto used = aut->acc().get_acceptance().used_sets(); - std::vector> inputs(aut->num_states()); - for (auto e : aut->edges()) - { - auto elements = e.acc & used; - if (elements.has_many()) - inputs[e.dst].insert(elements); - } - return inputs; -} - -// Gives for each state a set of pairs incoming to this state. -std::vector>> -get_inputs_iar(const twa_graph_ptr& aut, algorithm algo, - const std::set& perm_elem, - const std::vector& pairs) -{ - std::vector>> inputs(aut->num_states()); - for (auto e : aut->edges()) - { - auto acc = e.acc; - std::vector new_vect; - for (unsigned k : perm_elem) - if (acc & fin(pairs, k, algo)) - new_vect.push_back(k); - std::sort(std::begin(new_vect), std::end(new_vect)); - inputs[e.dst].insert(new_vect); - } - return inputs; -} -// Give an order from the set of marks -std::vector -group_to_vector(const std::set& group) -{ - // In this function, we have for example the marks {1, 2}, {1, 2, 3}, {2} - // A compatible order is [2, 1, 3] - std::vector group_vect(group.begin(), group.end()); - - // We sort the elements by inclusion. This function is called on a - // set of marks such that each mark is included or includes the others. - std::sort(group_vect.begin(), group_vect.end(), - [](const acc_cond::mark_t left, const acc_cond::mark_t right) - { - return (left != right) && ((left & right) == left); - }); - // At this moment, we have the vector [{2}, {1, 2}, {1, 2, 3}]. - // In order to create the order, we add the elements of the first element. - // Then we add the elements of the second mark (without duplication), etc. - std::vector result; - for (auto mark : group_vect) - { - for (unsigned col : mark.sets()) - if (std::find(result.begin(), result.end(), col) == result.end()) - result.push_back(col); - } - return result; -} - -// Give an order from the set of indices of pairs -std::vector -group_to_vector_iar(const std::set>& group) -{ - std::vector> group_vect(group.begin(), group.end()); - for (auto& vec : group_vect) - std::sort(std::begin(vec), std::end(vec)); - std::sort(group_vect.begin(), group_vect.end(), - [](const std::vector left, - const std::vector right) - { - return (right != left) - && std::includes(right.begin(), right.end(), - left.begin(), left.end()); - }); - std::vector result; - for (auto vec : group_vect) - for (unsigned col : vec) - if (std::find(result.begin(), result.end(), col) == result.end()) - result.push_back(col); - return result; -} - -// Give a correspondance between a mark and an order for CAR -std::map> -get_groups(const std::set& marks_input) -{ - std::map> result; - - std::vector> groups; - for (acc_cond::mark_t mark : marks_input) - { - bool added = false; - for (unsigned group = 0; group < groups.size(); ++group) - { - if (std::all_of(groups[group].begin(), groups[group].end(), - [mark](acc_cond::mark_t element) - { - return ((element | mark) == mark) - || ((element | mark) == element); - })) - { - groups[group].insert(mark); - added = true; - break; - } - } - if (!added) - groups.push_back({mark}); - } - for (auto& group : groups) - { - auto new_vector = group_to_vector(group); - for (auto mark : group) - result.insert({mark, new_vector}); - } - return result; -} - -// Give a correspondance between a mark and an order for IAR -std::map, std::vector> -get_groups_iar(const std::set>& marks_input) -{ - std::map, std::vector> result; - - std::vector>> groups; - for (auto vect : marks_input) - { - bool added = false; - for (unsigned group = 0; group < groups.size(); ++group) - if (std::all_of(groups[group].begin(), groups[group].end(), - [vect](std::vector element) - { - return std::includes(vect.begin(), vect.end(), - element.begin(), element.end()) - || std::includes(element.begin(), element.end(), - vect.begin(), vect.end()); - })) - { - groups[group].insert(vect); - added = true; - break; - } - if (!added) - groups.push_back({vect}); - } - for (auto& group : groups) - { - auto new_vector = group_to_vector_iar(group); - for (auto vect : group) - result.insert({vect, new_vector}); - } - return result; -} - -// Give for each state the correspondance between a mark and an order (CAR) -std::vector>> -get_mark_to_vector(const twa_graph_ptr& aut) -{ - std::vector>> result; - auto inputs = get_inputs_states(aut); - for (unsigned state = 0; state < inputs.size(); ++state) - result.push_back(get_groups(inputs[state])); - return result; -} - -// Give for each state the correspondance between a mark and an order (IAR) -std::vector, std::vector>> -get_iar_to_vector(const twa_graph_ptr& aut, algorithm algo, - const std::set& perm_elem, - const std::vector& pairs) -{ - std::vector, std::vector>> result; - auto inputs = get_inputs_iar(aut, algo, perm_elem, pairs); - for (unsigned state = 0; state < inputs.size(); ++state) - result.push_back(get_groups_iar(inputs[state])); - return result; -} - -public: -explicit car_generator(const const_twa_graph_ptr &a, to_parity_options options) - : aut_(a) - , scc_(scc_info(a)) - , is_odd(false) - , options(options) -{ - if (options.pretty_print) - names = new std::vector(); - else - names = nullptr; -} - -// During the creation of the states, we had to choose between a set of -// compatible states. But it is possible to create another compatible state -// after. This function checks if a compatible state was created after and -// use it. -void -change_transitions_destination(twa_graph_ptr& aut, -const std::vector& states, -std::map>& partial_history, -state_2_car_scc& state_2_car) -{ - for (auto s : states) - for (auto& edge : aut->out(s)) - { - unsigned - src = edge.src, - dst = edge.dst; - // We don't change loops - if (src == dst) - continue; - unsigned dst_scc = num2car[dst].state_scc; - auto cant_change = partial_history[aut->edge_number(edge)]; - edge.dst = state_2_car.get_sub_tree(cant_change, dst_scc) - ->get_end(true); - } -} - -unsigned -apply_false_true_clean(const twa_graph_ptr &sub_automaton, bool is_true, - const std::vector& inf_fin_prefix, - unsigned max_free_color, - std::map& state2car_local, - std::map& car2num_local) -{ - std::vector* init_states = sub_automaton-> - get_named_prop>("original-states"); - - for (unsigned state = 0; state < sub_automaton->num_states(); ++state) - { - unsigned s_aut = (*init_states)[state]; - - car_state new_car = { s_aut, state, perm_t() }; - auto new_state = res_->new_state(); - car2num_local[new_car] = new_state; - num2car.insert(num2car.begin() + new_state, new_car); - if (options.pretty_print) - names->push_back( - new_car.to_string(is_true ? True_clean : False_clean)); - state2car_local[s_aut] = new_car; - } - for (unsigned state = 0; state < sub_automaton->num_states(); ++state) - { - unsigned s_aut = (*init_states)[state]; - car_state src = { s_aut, state, perm_t() }; - unsigned src_state = car2num_local[src]; - for (auto e : aut_->out(s_aut)) - { - auto col = is_true ^ !is_odd; - if (((unsigned)col) > max_free_color) - throw std::runtime_error("CAR needs more sets"); - if (scc_.scc_of(s_aut) == scc_.scc_of(e.dst)) - { - for (auto c : e.acc.sets()) - if (inf_fin_prefix[c] + is_odd > col) - col = inf_fin_prefix[c] + is_odd; - acc_cond::mark_t cond = { (unsigned) col }; - res_->new_edge( - src_state, car2num_local[state2car_local[e.dst]], - e.cond, cond); - } - } - } - return sub_automaton->num_states(); -} - -unsigned -apply_copy(const twa_graph_ptr &sub_automaton, - const std::vector &permut, - bool copy_odd, - const std::vector& inf_fin_prefix, - std::map& state2car_local, - std::map& car2num_local) -{ - std::vector* init_states = sub_automaton - ->get_named_prop>("original-states"); - for (unsigned state = 0; state < sub_automaton->num_states(); ++state) - { - car_state new_car = { (*init_states)[state], state, perm_t() }; - auto new_state = res_->new_state(); - car2num_local[new_car] = new_state; - num2car.insert(num2car.begin() + new_state, new_car); - state2car_local[(*init_states)[state]] = new_car; - if (options.pretty_print) - names->push_back( - new_car.to_string(copy_odd ? Copy_odd : Copy_even)); - } - auto cond_col = sub_automaton->acc().get_acceptance().used_sets(); - for (unsigned s = 0; s < sub_automaton->num_states(); ++s) - { - for (auto e : sub_automaton->out(s)) - { - acc_cond::mark_t mark = { }; - int max_edge = -1; - for (auto col : e.acc.sets()) - { - if (cond_col.has(col)) - max_edge = std::max(max_edge, (int) permut[col]); - if (inf_fin_prefix[col] + (is_odd || copy_odd) > max_edge) - max_edge = inf_fin_prefix[col] + (is_odd || copy_odd); - } - if (max_edge != -1) - mark.set((unsigned) max_edge); - car_state src = { (*init_states)[s], s, perm_t() }, - dst = { (*init_states)[e.dst], e.dst, perm_t() }; - unsigned src_state = car2num_local[src], - dst_state = car2num_local[dst]; - res_->new_edge(src_state, dst_state, e.cond, mark); - } - } - return sub_automaton->num_states(); -} - -unsigned -apply_to_Buchi(const twa_graph_ptr& sub_automaton, - const twa_graph_ptr& buchi, - bool is_streett_to_buchi, - const std::vector& inf_fin_prefix, - unsigned max_free_color, - std::map& state2car_local, - std::map& car2num_local) -{ - std::vector* init_states = sub_automaton - ->get_named_prop>("original-states"); - - for (unsigned state = 0; state < buchi->num_states(); ++state) - { - car_state new_car = { (*init_states)[state], state, perm_t() }; - auto new_state = res_->new_state(); - car2num_local[new_car] = new_state; - num2car.insert(num2car.begin() + new_state, new_car); - state2car_local[(*init_states)[state]] = new_car; - if (options.pretty_print) - names->push_back(new_car.to_string( - is_streett_to_buchi ? Streett_to_Buchi : Rabin_to_Buchi)); - } - auto g = buchi->get_graph(); - for (unsigned s = 0; s < buchi->num_states(); ++s) - { - unsigned b = g.state_storage(s).succ; - while (b) - { - auto& e = g.edge_storage(b); - auto acc = e.acc; - acc <<= (is_odd + is_streett_to_buchi); - if ((is_odd || is_streett_to_buchi) && acc == acc_cond::mark_t{ }) - acc = { (unsigned) (is_streett_to_buchi && is_odd) }; - car_state src = { (*init_states)[s], s, perm_t() }, - dst = { (*init_states)[e.dst], e.dst, perm_t() }; - unsigned src_state = car2num_local[src], - dst_state = car2num_local[dst]; - int col = ((int) acc.max_set()) - 1; - if (col > (int) max_free_color) - throw std::runtime_error("CAR needs more sets"); - auto& e2 = sub_automaton->get_graph().edge_storage(b); - for (auto c : e2.acc.sets()) - { - if (inf_fin_prefix[c] + is_odd > col) - col = inf_fin_prefix[c] + is_odd; - } - if (col != -1) - acc = { (unsigned) col }; - else - acc = {}; - res_->new_edge(src_state, dst_state, e.cond, acc); - b = e.next_succ; - } - } - return buchi->num_states(); -} - -// Create a permutation for the first state of a SCC (IAR) -void -initial_perm_iar(std::set &perm_elem, perm_t &p0, - algorithm algo, const acc_cond::mark_t &colors, - const std::vector &pairs) -{ - for (unsigned k = 0; k != pairs.size(); ++k) - if (!inf(pairs, k, algo) || (colors & (pairs[k].fin | pairs[k].inf))) - { - perm_elem.insert(k); - p0.push_back(k); - } -} - -// Create a permutation for the first state of a SCC (CAR) -void -initial_perm_car(perm_t &p0, const acc_cond::mark_t &colors) -{ - auto cont = colors.sets(); - p0.assign(cont.begin(), cont.end()); -} - -void -find_new_perm_iar(perm_t &new_perm, - const std::vector &pairs, - const acc_cond::mark_t &acc, - algorithm algo, const std::set &perm_elem, - unsigned &seen_nb) -{ - for (unsigned k : perm_elem) - if (acc & fin(pairs, k, algo)) - { - ++seen_nb; - auto it = std::find(new_perm.begin(), new_perm.end(), k); - - // move the pair in front of the permutation - std::rotate(new_perm.begin(), it, it + 1); - } -} - -// Given the set acc of colors appearing on an edge, create a new -// permutation new_perm, and give the number seen_nb of colors moved to -// the head of the permutation. -void -find_new_perm_car(perm_t &new_perm, const acc_cond::mark_t &acc, - unsigned &seen_nb, unsigned &h) -{ - for (unsigned k : acc.sets()) - { - auto it = std::find(new_perm.begin(), new_perm.end(), k); - if (it != new_perm.end()) - { - h = std::max(h, unsigned(it - new_perm.begin()) + 1); - std::rotate(new_perm.begin(), it, it + 1); - ++seen_nb; - } - } -} - -void -get_acceptance_iar(algorithm algo, const perm_t ¤t_perm, - const std::vector &pairs, - const acc_cond::mark_t &e_acc, acc_cond::mark_t &acc) -{ - unsigned delta_acc = (algo == IAR_Streett) && is_odd; - - // find the maximal index encountered by this transition - unsigned maxint = -1U; - - for (int k = current_perm.size() - 1; k >= 0; --k) - { - unsigned pk = current_perm[k]; - - if (!inf(pairs, pk, - algo) - || (e_acc & (pairs[pk].fin | pairs[pk].inf))) - { - maxint = k; - break; - } - } - unsigned value; - - if (maxint == -1U) - value = delta_acc; - else if (e_acc & fin(pairs, current_perm[maxint], algo)) - value = 2 * maxint + 2 + delta_acc; - else - value = 2 * maxint + 1 + delta_acc; - acc = { value }; -} - -void -get_acceptance_car(const acc_cond &sub_aut_cond, const perm_t &new_perm, - unsigned h, acc_cond::mark_t &acc) -{ - acc_cond::mark_t m(new_perm.begin(), new_perm.begin() + h); - bool rej = !sub_aut_cond.accepting(m); - unsigned value = 2 * h + rej + is_odd; - acc = { value }; -} - -unsigned -apply_lar(const twa_graph_ptr &sub_automaton, - unsigned init, std::vector &pairs, - algorithm algo, unsigned scc_num, - const std::vector& inf_fin_prefix, - unsigned max_free_color, - std::map& state2car_local, - std::map& car2num_local, - unsigned max_states) -{ - auto maps = get_mark_to_vector(sub_automaton); - // For each edge e of res_, we store the elements of the permutation - // that are not moved, and we respect the order. - std::map> edge_to_colors; - unsigned nb_created_states = 0; - auto state_2_car = state_2_car_scc(sub_automaton->num_states()); - std::vector* init_states = sub_automaton-> - get_named_prop>("original-states"); - std::deque todo; - auto get_state = - [&](const car_state &s){ - auto it = car2num_local.find(s); - - if (it == car2num_local.end()) - { - ++nb_created_states; - unsigned nb = res_->new_state(); - if (options.search_ex) - state_2_car.add_res_state(s.state_scc, nb, s.perm); - car2num_local[s] = nb; - num2car.insert(num2car.begin() + nb, s); - - todo.push_back(s); - if (options.pretty_print) - names->push_back(s.to_string(algo)); - return nb; - } - return it->second; - }; - - auto colors = sub_automaton->acc().get_acceptance().used_sets(); - std::set perm_elem; - - perm_t p0 = { }; - switch (algo) - { - case IAR_Streett: - case IAR_Rabin: - initial_perm_iar(perm_elem, p0, algo, colors, pairs); - break; - case CAR: - initial_perm_car(p0, colors); - break; - default: - assert(false); - break; - } - - std::vector, std::vector>> - iar_maps; - if (algo == IAR_Streett || algo == IAR_Rabin) - iar_maps = get_iar_to_vector(sub_automaton, algo, perm_elem, pairs); - - car_state s0{ (*init_states)[init], init, p0 }; - get_state(s0); // put s0 in todo - - // the main loop - while (!todo.empty()) - { - car_state current = todo.front(); - todo.pop_front(); - - unsigned src_num = get_state(current); - for (const auto &e : sub_automaton->out(current.state_scc)) - { - perm_t new_perm = current.perm; - - // Count pairs whose fin-part is seen on this transition - unsigned seen_nb = 0; - - // consider the pairs for this SCC only - unsigned h = 0; - - switch (algo) - { - case IAR_Rabin: - case IAR_Streett: - find_new_perm_iar(new_perm, pairs, e.acc, algo, - perm_elem, seen_nb); - break; - case CAR: - find_new_perm_car(new_perm, e.acc, seen_nb, h); - break; - default: - assert(false); - } - - std::vector not_moved(new_perm.begin() + seen_nb, - new_perm.end()); - - if (options.force_order) - { - if (algo == CAR && seen_nb > 1) - { - auto map = maps[e.dst]; - acc_cond::mark_t first_vals( - new_perm.begin(), new_perm.begin() + seen_nb); - auto new_start = map.find(first_vals); - assert(new_start->second.size() >= seen_nb); - assert(new_start != map.end()); - for (unsigned i = 0; i < seen_nb; ++i) - new_perm[i] = new_start->second[i]; - } - else if ((algo == IAR_Streett || algo == IAR_Rabin) - && seen_nb > 1) - { - auto map = iar_maps[e.dst]; - std::vector first_vals( - new_perm.begin(), new_perm.begin() + seen_nb); - std::sort(std::begin(first_vals), std::end(first_vals)); - auto new_start = map.find(first_vals); - assert(new_start->second.size() >= seen_nb); - assert(new_start != map.end()); - for (unsigned i = 0; i < seen_nb; ++i) - new_perm[i] = new_start->second[i]; - } - } - - // Optimization: when several indices are seen in the - // transition, they move at the front of new_perm in any - // order. Check whether there already exists an car_state - // that matches this condition. - car_state dst; - unsigned dst_num = -1U; - - if (options.search_ex) - dst_num = state_2_car.get_res_state(e.dst, new_perm, seen_nb, - options.use_last); - - if (dst_num == -1U) - { - auto dst = car_state{ (*init_states)[e.dst], e.dst, new_perm }; - dst_num = get_state(dst); - if (nb_created_states > max_states) - return -1U; - } - - acc_cond::mark_t acc = { }; - - switch (algo) - { - case IAR_Rabin: - case IAR_Streett: - get_acceptance_iar(algo, current.perm, pairs, e.acc, acc); - break; - case CAR: - get_acceptance_car(sub_automaton->acc(), new_perm, h, acc); - break; - default: - assert(false); - } - - unsigned acc_col = acc.min_set() - 1; - if (options.parity_prefix) - { - if (acc_col > max_free_color) - throw std::runtime_error("CAR needs more sets"); - // parity prefix - for (auto col : e.acc.sets()) - { - if (inf_fin_prefix[col] + is_odd > (int) acc_col) - acc_col = (unsigned) inf_fin_prefix[col] + is_odd; - } - } - auto new_e = res_->new_edge(src_num, dst_num, e.cond, { acc_col }); - edge_to_colors.insert({new_e, not_moved}); - } - } - if (options.search_ex && options.use_last) - { - std::vector added_states; - std::transform(car2num_local.begin(), car2num_local.end(), - std::back_inserter(added_states), - [](std::pair pair) { - return pair.second; - }); - change_transitions_destination( - res_, added_states, edge_to_colors, state_2_car); - } - auto leaving_edge = - [&](unsigned d){ - return scc_.scc_of(num2car.at(d).state) != scc_num; - }; - auto filter_edge = - [](const twa_graph::edge_storage_t &, - unsigned dst, - void* filter_data){ - decltype(leaving_edge) *data = - static_cast(filter_data); - - if ((*data)(dst)) - return scc_info::edge_filter_choice::ignore; - - return scc_info::edge_filter_choice::keep; - }; - scc_info sub_scc(res_, get_state(s0), filter_edge, &leaving_edge); - - // SCCs are numbered in reverse topological order, so the bottom SCC has - // index 0. - const unsigned bscc = 0; - assert(sub_scc.scc_count() != 0); - assert(sub_scc.succ(0).empty()); - assert( - [&](){ - for (unsigned s = 1; s != sub_scc.scc_count(); ++s) - if (sub_scc.succ(s).empty()) - return false; - - return true; - } ()); - - assert(sub_scc.states_of(bscc).size() >= sub_automaton->num_states()); - - // update state2car - for (unsigned scc_state : sub_scc.states_of(bscc)) - { - car_state &car = num2car.at(scc_state); - - if (state2car_local.find(car.state) == state2car_local.end()) - state2car_local[car.state] = car; - } - return sub_scc.states_of(bscc).size(); -} - -algorithm -chooseAlgo(twa_graph_ptr &sub_automaton, - twa_graph_ptr &rabin_aut, - std::vector &pairs, - std::vector &permut) -{ - auto scc_condition = sub_automaton->acc(); - if (options.parity_equiv) - { - if (scc_condition.is_f()) - return False_clean; - if (scc_condition.is_t()) - return True_clean; - std::vector permut_tmp(scc_condition.all_sets().max_set(), -1); - - if (!is_odd && scc_condition.is_parity_max_equiv(permut_tmp, true)) - { - for (auto c : permut_tmp) - permut.push_back((unsigned) c); - - scc_condition.apply_permutation(permut); - sub_automaton->apply_permutation(permut); - return Copy_even; - } - std::fill(permut_tmp.begin(), permut_tmp.end(), -1); - if (scc_condition.is_parity_max_equiv(permut_tmp, false)) - { - for (auto c : permut_tmp) - permut.push_back((unsigned) c); - scc_condition.apply_permutation(permut); - sub_automaton->apply_permutation(permut); - return Copy_odd; - } - } - - if (options.rabin_to_buchi) - { - auto ra = rabin_to_buchi_if_realizable(sub_automaton); - if (ra != nullptr) - { - rabin_aut = ra; - return Rabin_to_Buchi; - } - else - { - bool streett_buchi = false; - auto sub_cond = sub_automaton->get_acceptance(); - sub_automaton->set_acceptance(sub_cond.complement()); - auto ra = rabin_to_buchi_if_realizable(sub_automaton); - streett_buchi = (ra != nullptr); - sub_automaton->set_acceptance(sub_cond); - if (streett_buchi) - { - rabin_aut = ra; - return Streett_to_Buchi; - } - } - } - - auto pairs1 = std::vector(); - auto pairs2 = std::vector(); - std::sort(pairs1.begin(), pairs1.end()); - pairs1.erase(std::unique(pairs1.begin(), pairs1.end()), pairs1.end()); - std::sort(pairs2.begin(), pairs2.end()); - pairs2.erase(std::unique(pairs2.begin(), pairs2.end()), pairs2.end()); - bool is_r_like = scc_condition.is_rabin_like(pairs1); - bool is_s_like = scc_condition.is_streett_like(pairs2); - unsigned num_cols = scc_condition.get_acceptance().used_sets().count(); - if (is_r_like) - { - if ((is_s_like && pairs1.size() < pairs2.size()) || !is_s_like) - { - if (pairs1.size() > num_cols) - return CAR; - pairs = pairs1; - return IAR_Rabin; - } - else if (is_s_like) - { - if (pairs2.size() > num_cols) - return CAR; - pairs = pairs2; - return IAR_Streett; - } - } - else - { - if (is_s_like) - { - if (pairs2.size() > num_cols) - return CAR; - pairs = pairs2; - return IAR_Streett; - } - } - return CAR; -} - -unsigned -build_scc(twa_graph_ptr &sub_automaton, - unsigned scc_num, - std::map& state2car_local, - std::map&car2num_local, - algorithm& algo, - unsigned max_states = -1U) -{ - - std::vector parity_prefix_colors (SPOT_MAX_ACCSETS, - - SPOT_MAX_ACCSETS - 2); - unsigned min_prefix_color = SPOT_MAX_ACCSETS + 1; - if (options.parity_prefix) - { - auto new_acc = sub_automaton->acc(); - auto colors = std::vector(); - bool inf_start = - sub_automaton->acc().has_parity_prefix(new_acc, colors); - sub_automaton->set_acceptance(new_acc); - for (unsigned i = 0; i < colors.size(); ++i) - parity_prefix_colors[colors[i]] = - SPOT_MAX_ACCSETS - 4 - i - !inf_start; - if (colors.size() > 0) - min_prefix_color = - SPOT_MAX_ACCSETS - 4 - colors.size() - 1 - !inf_start; - } - --min_prefix_color; - - unsigned init = 0; - - std::vector pairs = { }; - auto permut = std::vector(); - twa_graph_ptr rabin_aut = nullptr; - algo = chooseAlgo(sub_automaton, rabin_aut, pairs, permut); - switch (algo) - { - case False_clean: - case True_clean: - return apply_false_true_clean(sub_automaton, (algo == True_clean), - parity_prefix_colors, min_prefix_color, - state2car_local, car2num_local); - break; - case IAR_Streett: - case IAR_Rabin: - case CAR: - return apply_lar(sub_automaton, init, pairs, algo, scc_num, - parity_prefix_colors, min_prefix_color, - state2car_local, car2num_local, max_states); - break; - case Copy_odd: - case Copy_even: - return apply_copy(sub_automaton, permut, algo == Copy_odd, - parity_prefix_colors, state2car_local, - car2num_local); - break; - case Rabin_to_Buchi: - case Streett_to_Buchi: - return apply_to_Buchi(sub_automaton, rabin_aut, - (algo == Streett_to_Buchi), - parity_prefix_colors, min_prefix_color, - state2car_local, car2num_local); - break; - default: - break; - } - return -1U; -} - -public: -twa_graph_ptr -run() -{ - res_ = make_twa_graph(aut_->get_dict()); - res_->copy_ap_of(aut_); - for (unsigned scc = 0; scc < scc_.scc_count(); ++scc) - { - if (!scc_.is_useful_scc(scc)) - continue; - auto sub_automata = scc_.split_on_sets(scc, { }, true); - if (sub_automata.empty()) - { - for (auto state : scc_.states_of(scc)) - { - auto new_state = res_->new_state(); - car_state new_car = { state, state, perm_t() }; - car2num[new_car] = new_state; - num2car.insert(num2car.begin() + new_state, new_car); - if (options.pretty_print) - names->push_back(new_car.to_string(None)); - state2car[state] = new_car; - } - continue; - } - - auto sub_automaton = sub_automata[0]; - auto deg = sub_automaton; - if (options.acc_clean) - simplify_acceptance_here(sub_automaton); - bool has_degeneralized = false; - if (options.partial_degen) - { - std::vector forbid; - auto m = - is_partially_degeneralizable(sub_automaton, true, - true, forbid); - while (m != acc_cond::mark_t {}) - { - auto tmp = partial_degeneralize(deg, m); - simplify_acceptance_here(tmp); - if (tmp->get_acceptance().used_sets().count() - < deg->get_acceptance().used_sets().count() || - !(options.reduce_col_deg)) - { - deg = tmp; - has_degeneralized = true; - } - else - forbid.push_back(m); - m = is_partially_degeneralizable(deg, true, true, forbid); - } - } - - if (options.propagate_col) - { - propagate_marks_here(sub_automaton); - if (deg != sub_automaton) - propagate_marks_here(deg); - } - - std::map state2car_sub, state2car_deg; - std::map car2num_sub, car2num_deg; - - unsigned nb_states_deg = -1U, - nb_states_sub = -1U; - - algorithm algo_sub, algo_deg; - unsigned max_states_sub_car = -1U; - // We try with and without degeneralization and we keep the best. - if (has_degeneralized) - { - nb_states_deg = - build_scc(deg, scc, state2car_deg, car2num_deg, algo_deg); - // We suppose that if we see nb_states_deg + 1000 states when - // when construct the version without degeneralization during the - // construction, we will not be able to have nb_states_deg after - // removing useless states. So we will stop the execution. - max_states_sub_car = - 10000 + nb_states_deg - 1; - } - if (!options.force_degen || !has_degeneralized) - nb_states_sub = - build_scc(sub_automaton, scc, state2car_sub, car2num_sub, - algo_sub, max_states_sub_car); - if (nb_states_deg < nb_states_sub) - { - state2car.insert(state2car_deg.begin(), state2car_deg.end()); - car2num.insert(car2num_deg.begin(), car2num_deg.end()); - algo_sub = algo_deg; - } - else - { - state2car.insert(state2car_sub.begin(), state2car_sub.end()); - car2num.insert(car2num_sub.begin(), car2num_sub.end()); - } - if ((algo_sub == IAR_Rabin || algo_sub == Copy_odd) && !is_odd) - { - is_odd = true; - for (auto &edge : res_->edges()) - { - if (scc_.scc_of(num2car[edge.src].state) != scc - && scc_.scc_of(num2car[edge.dst].state) != scc) - { - if (edge.acc == acc_cond::mark_t{}) - edge.acc = { 0 }; - else - edge.acc <<= 1; - } - } - } - } - - for (unsigned state = 0; state < res_->num_states(); ++state) - { - unsigned original_state = num2car.at(state).state; - auto state_scc = scc_.scc_of(original_state); - for (auto edge : aut_->out(original_state)) - { - if (scc_.scc_of(edge.dst) != state_scc) - { - auto car = state2car.find(edge.dst); - if (car != state2car.end()) - { - unsigned res_dst = car2num.at(car->second); - res_->new_edge(state, res_dst, edge.cond, { }); - } - } - } - } - unsigned initial_state = aut_->get_init_state_number(); - auto initial_car_ptr = state2car.find(initial_state); - car_state initial_car; - // If we take an automaton with one state and without transition, - // the SCC was useless so state2car doesn't have initial_state - if (initial_car_ptr == state2car.end()) - { - assert(res_->num_states() == 0); - auto new_state = res_->new_state(); - car_state new_car = {initial_state, 0, perm_t()}; - state2car[initial_state] = new_car; - if (options.pretty_print) - names->push_back(new_car.to_string(None)); - num2car.insert(num2car.begin() + new_state, new_car); - car2num[new_car] = new_state; - initial_car = new_car; - } - else - initial_car = initial_car_ptr->second; - auto initial_state_res = car2num.find(initial_car); - if (initial_state_res != car2num.end()) - res_->set_init_state(initial_state_res->second); - else - res_->new_state(); - if (options.pretty_print) - res_->set_named_prop("state-names", names); - - res_->purge_unreachable_states(); - // If parity_prefix is used, we use all available colors by - // default: The IAR/CAR are using lower indices, and the prefix is - // using the upper indices. So we use reduce_parity() to clear - // the mess. If parity_prefix is not used, - unsigned max_color = SPOT_MAX_ACCSETS; - if (!options.parity_prefix) - { - acc_cond::mark_t all = {}; - for (auto& e: res_->edges()) - all |= e.acc; - max_color = all.max_set(); - } - res_->set_acceptance(acc_cond::acc_code::parity_max(is_odd, max_color)); - if (options.parity_prefix) - reduce_parity_here(res_); - return res_; -} - -private: -const const_twa_graph_ptr &aut_; -const scc_info scc_; -twa_graph_ptr res_; -// Says if we constructing an odd or even max -bool is_odd; - -std::vector num2car; -std::map state2car; -std::map car2num; - -to_parity_options options; - -std::vector* names; -}; // car_generator - -}// namespace - - -twa_graph_ptr -to_parity(const const_twa_graph_ptr &aut, const to_parity_options options) -{ - return car_generator(aut, options).run(); -} - - // Old version of CAR. - namespace - { - struct lar_state - { - unsigned state; - std::vector perm; - - bool operator<(const lar_state& s) const - { - return state == s.state ? perm < s.perm : state < s.state; - } - - std::string to_string() const - { - std::stringstream s; - s << state << " ["; - unsigned ps = perm.size(); - for (unsigned i = 0; i != ps; ++i) - { - if (i > 0) - s << ','; - s << perm[i]; - } - s << ']'; - return s.str(); - } - }; - - class lar_generator - { - const const_twa_graph_ptr& aut_; - twa_graph_ptr res_; - const bool pretty_print; - - std::map lar2num; - public: - explicit lar_generator(const const_twa_graph_ptr& a, bool pretty_print) - : aut_(a) - , res_(nullptr) - , pretty_print(pretty_print) - {} - - twa_graph_ptr run() - { - res_ = make_twa_graph(aut_->get_dict()); - res_->copy_ap_of(aut_); - - std::deque todo; - auto get_state = [this, &todo](const lar_state& s) - { - auto it = lar2num.emplace(s, -1U); - if (it.second) // insertion took place - { - unsigned nb = res_->new_state(); - it.first->second = nb; - todo.push_back(s); - } - return it.first->second; - }; - - std::vector initial_perm(aut_->num_sets()); - std::iota(initial_perm.begin(), initial_perm.end(), 0); - { - lar_state s0{aut_->get_init_state_number(), initial_perm}; - res_->set_init_state(get_state(s0)); - } - - scc_info si(aut_, scc_info_options::NONE); - // main loop - while (!todo.empty()) - { - lar_state current = todo.front(); - todo.pop_front(); - - // TODO: todo could store this number to avoid one lookup - unsigned src_num = get_state(current); - - unsigned source_scc = si.scc_of(current.state); - for (const auto& e : aut_->out(current.state)) - { - // find the new permutation - std::vector new_perm = current.perm; - unsigned h = 0; - for (unsigned k : e.acc.sets()) - { - auto it = std::find(new_perm.begin(), new_perm.end(), k); - h = std::max(h, unsigned(new_perm.end() - it)); - std::rotate(it, it+1, new_perm.end()); - } - - if (source_scc != si.scc_of(e.dst)) - { - new_perm = initial_perm; - h = 0; - } - - lar_state dst{e.dst, new_perm}; - unsigned dst_num = get_state(dst); - - // Do the h last elements satisfy the acceptance condition? - // If they do, emit 2h, if they don't emit 2h+1. - acc_cond::mark_t m(new_perm.end() - h, new_perm.end()); - bool rej = !aut_->acc().accepting(m); - res_->new_edge(src_num, dst_num, e.cond, {2*h + rej}); - } - } - - // parity max even - unsigned sets = 2*aut_->num_sets() + 2; - res_->set_acceptance(sets, acc_cond::acc_code::parity_max_even(sets)); - - if (pretty_print) - { - auto names = new std::vector(res_->num_states()); - for (const auto& p : lar2num) - (*names)[p.second] = p.first.to_string(); - res_->set_named_prop("state-names", names); - } - - return res_; - } - }; - } - twa_graph_ptr - to_parity_old(const const_twa_graph_ptr& aut, bool pretty_print) + iar_maybe(const const_twa_graph_ptr &aut, bool pretty_print) { - if (!aut->is_existential()) - throw std::runtime_error("LAR does not handle alternation"); - // if aut is already parity return it as is - if (aut->acc().is_parity()) - return std::const_pointer_cast(aut); - - lar_generator gen(aut, pretty_print); - return gen.run(); + return iar_maybe_(aut, pretty_print); } - } diff --git a/spot/twaalgos/toparity.hh b/spot/twaalgos/toparity.hh index 7d2701581..d82403aa5 100644 --- a/spot/twaalgos/toparity.hh +++ b/spot/twaalgos/toparity.hh @@ -19,10 +19,24 @@ #pragma once -#include +#include +#include +#include namespace spot { + /// Structure used by to_parity to store some information about the + /// construction + struct to_parity_data + { + /// Total number of states created + unsigned nb_states_created = 0; + /// Total number of edges created + unsigned nb_edges_created = 0; + /// Name of algorithms used + std::vector algorithms_used; + }; + /// \ingroup twa_acc_transform /// \brief Options to control various optimizations of to_parity(). struct to_parity_options @@ -35,6 +49,9 @@ namespace spot /// most recent state when we find multiple existing state /// compatible with the current move. bool use_last = true; + /// If \c use_last_post_process is true, then when LAR ends, it tries to + /// replace the destination of an edge by the newest compatible state. + bool use_last_post_process = false; /// If \c force_order is true, we force to use an order when CAR or IAR is /// applied. Given a state s and a set E ({0}, {0 1}, {2} for example) we /// construct an order on colors. With the given example, we ask to have @@ -45,16 +62,26 @@ namespace spot /// degeneralization to remove occurrences of acceptance /// subformulas such as `Fin(x) | Fin(y)` or `Inf(x) & Inf(y)`. bool partial_degen = true; - /// If \c force_degen is false, to_parity will checks if we can - /// get a better result if we don't apply partial_degeneralize. - bool force_degen = true; /// If \c scc_acc_clean is true, to_parity() will ignore colors /// not occurring in an SCC while processing this SCC. bool acc_clean = true; /// If \c parity_equiv is true, to_parity() will check if there - /// exists a permutations of colors such that the acceptance - /// condition is a parity condition. + /// exists a way to see the acceptance condition as a parity max one. bool parity_equiv = true; + /// If \c Car is true, to_parity will try to apply CAR. It is a version of + /// LAR that tracks colors. + bool car = true; + /// If \c tar is true, to_parity will try to apply TAR. It is a version of + /// LAR that tracks transitions instead of colors. + bool tar = false; + /// If \c iar is true, to_parity will try to apply IAR. + bool iar = true; + /// if \c lar_dfs is true, then when LAR is used the next state of the + /// result that will be processed is the last created state. + bool lar_dfs = true; + /// If \c bscc is true, to_parity() will only keep the bottommost automaton + /// when it applies LAR. + bool bscc = true; /// If \c parity_prefix is true, to_parity() will use a special /// handling for acceptance conditions of the form `Inf(m0) | /// (Fin(m1) & (Inf(m2) | (… β)))` that start as a parity @@ -62,30 +89,42 @@ namespace spot /// `β` can be an arbitrary formula. In this case, the paritization /// algorithm is really applied only to `β`, and the marks of the /// prefix are appended after a suitable renumbering. - /// - /// For technical reasons, activating this option (and this is the - /// default) causes reduce_parity() to be called at the end to - /// minimize the number of colors used. It is therefore - /// recommended to disable this option when one wants to follow - /// the output CAR/IAR constructions. bool parity_prefix = true; + /// If \c parity_prefix_general is true, to_parity() will rewrite the + /// acceptance condition as `Inf(m0) | (Fin(m1) & (Inf(m2) | (… β)))` before + /// applying the same construction as with the option \c parity_prefix. + bool parity_prefix_general = false; + /// If \c generic_emptiness is true, to_parity() will check if the automaton + /// does not accept any word with an emptiness check algorithm. + bool generic_emptiness = false; /// If \c rabin_to_buchi is true, to_parity() tries to convert a Rabin or /// Streett condition to Büchi or co-Büchi with /// rabin_to_buchi_if_realizable(). bool rabin_to_buchi = true; - /// Only allow degeneralization if it reduces the number of colors in the - /// acceptance condition. + /// If \c buchi_type_to_buchi is true, to_parity converts a + /// (co-)Büchi type automaton to a (co-)Büchi automaton. + bool buchi_type_to_buchi = false; + /// If \c parity_type_to_parity is true, to_parity converts a + /// parity type automaton to a parity automaton. + bool parity_type_to_parity = false; + /// Only allow partial degeneralization if it reduces the number of colors + /// in the acceptance condition or if it implies to apply IAR instead of + /// CAR. bool reduce_col_deg = false; /// Use propagate_marks_here to increase the number of marks on transition /// in order to move more colors (and increase the number of /// compatible states) when we apply LAR. bool propagate_col = true; + /// If \c use_generalized_buchi is true, each SCC will use a generalized + /// Rabin acceptance in order to avoid CAR. + bool use_generalized_rabin = false; /// If \c pretty_print is true, states of the output automaton are /// named to help debugging. bool pretty_print = false; + /// Structure used to store some information about the construction. + to_parity_data* datas = nullptr; }; - /// \ingroup twa_acc_transform /// \brief Take an automaton with any acceptance condition and return an /// equivalent parity automaton. @@ -156,4 +195,30 @@ namespace spot SPOT_API twa_graph_ptr // deprecated since Spot 2.9 iar_maybe(const const_twa_graph_ptr& aut, bool pretty_print = false); -} // namespace spot + /// \ingroup twa_acc_transform + /// \brief Convert an automaton into a parity max automaton preserving + /// structure when possible. + /// + /// Return nullptr if no such automaton is found. + /// \param aut Automaton that we want to convert + SPOT_API twa_graph_ptr + parity_type_to_parity(const twa_graph_ptr &aut); + + /// \ingroup twa_acc_transform + /// \brief Convert an automaton into a Büchi automaton preserving structure + /// when possible. + /// + /// Return nullptr if no such automaton is found. + /// \param aut Automaton that we want to convert + SPOT_API twa_graph_ptr + buchi_type_to_buchi(const twa_graph_ptr &aut); + + /// \ingroup twa_acc_transform + /// \brief Convert an automaton into a co-Büchi automaton preserving structure + /// when possible. + /// + /// Return nullptr if no such automaton is found. + /// \param aut Automaton that we want to convert + SPOT_API twa_graph_ptr + co_buchi_type_to_co_buchi(const twa_graph_ptr &aut); +} diff --git a/spot/twaalgos/toweak.cc b/spot/twaalgos/toweak.cc index 8f62477a4..ae7a0f58a 100644 --- a/spot/twaalgos/toweak.cc +++ b/spot/twaalgos/toweak.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2017, 2018, 2021 Laboratoire de Recherche et Développement -// de l'Epita (LRDE). +// Copyright (C) 2017, 2018, 2021, 2022 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -179,14 +179,11 @@ namespace spot for (bdd oneletter: minterms_of(letters, ap)) { - minato_isop isop(delta & oneletter); - bdd cube; + minato_isop isop(bdd_restrict(delta, oneletter)); + bdd dest; - while ((cube = isop.next()) != bddfalse) + while ((dest = isop.next()) != bddfalse) { - bdd cond = bdd_exist(cube, all_states_); - bdd dest = bdd_existcomp(cube, all_states_); - states.clear(); while (dest != bddtrue) { @@ -199,7 +196,7 @@ namespace spot } res_->new_univ_edge(new_state(st.id, st.rank, st.mark), states.begin(), states.end(), - cond, mark); + oneletter, mark); } } todo_.pop(); diff --git a/spot/twaalgos/translate.cc b/spot/twaalgos/translate.cc index 4db8643f9..339463426 100644 --- a/spot/twaalgos/translate.cc +++ b/spot/twaalgos/translate.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018, 2020-2021 Laboratoire de Recherche et +// Copyright (C) 2013-2018, 2020-2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -29,6 +29,7 @@ #include #include #include +#include namespace spot { @@ -61,15 +62,17 @@ namespace spot gf_guarantee_set_ = true; } ltl_split_ = opt->get("ltl-split", 1); - int tls_max_states = opt->get("tls-max-states", 64); - tls_max_states_ = std::max(0, tls_max_states); + tls_max_states_ = std::max(0, opt->get("tls-max-states", 64)); + tls_max_ops_ = std::max(0, opt->get("tls-max-ops", 16)); exprop_ = opt->get("exprop", -1); + branchpost_ = opt->get("branch-post", -1); } void translator::build_simplifier(const bdd_dict_ptr& dict) { tl_simplifier_options options(false, false, false); options.containment_max_states = tls_max_states_; + options.containment_max_ops = tls_max_ops_; switch (level_) { case High: @@ -135,6 +138,9 @@ namespace spot twa_graph_ptr aut; twa_graph_ptr aut2 = nullptr; + bool split_hard = + type_ == Generic || (type_ & Parity) || type_ == GeneralizedBuchi; + if (ltl_split_ && !r.is_syntactic_obligation()) { formula r2 = r; @@ -144,11 +150,11 @@ namespace spot r2 = r2[0]; ++leading_x; } - if (type_ == Generic || type_ == GeneralizedBuchi) + if (split_hard) { - // F(q|u|f) = q|F(u)|F(f) only for generic acceptance + // F(q|u|f) = q|F(u)|F(f) disabled for GeneralizedBuchi // G(q&e&f) = q&G(e)&G(f) - bool want_u = r2.is({op::F, op::Or}) && (type_ == Generic); + bool want_u = r2.is({op::F, op::Or}) && (type_ != GeneralizedBuchi); if (want_u || r2.is({op::G, op::And})) { std::vector susp; @@ -203,28 +209,30 @@ namespace spot if (!rest.empty() && !oblg.empty()) { auto safety = [](formula f) - { - return f.is_syntactic_safety(); - }; + { + // Prevent gcc 12.2.0 from warning us that f could be a + // nullptr formula. + SPOT_ASSUME(f != nullptr); + return f.is_syntactic_safety(); + }; auto i = std::remove_if(oblg.begin(), oblg.end(), safety); rest.insert(rest.end(), i, oblg.end()); oblg.erase(i, oblg.end()); } + // The only cases where we accept susp and rest to be both + // non-empty is when doing Generic/Parity/TGBA if (!susp.empty()) { - // The only cases where we accept susp and rest to be both - // non-empty is when doing Generic acceptance or TGBA. - if (!rest.empty() - && !(type_ == Generic || type_ == GeneralizedBuchi)) + if (!rest.empty() && !split_hard) { rest.insert(rest.end(), susp.begin(), susp.end()); susp.clear(); } // For Parity, we want to translate all suspendable // formulas at once. - if (rest.empty() && type_ & Parity) - susp = { formula::multop(r2.kind(), susp) }; + //if (rest.empty() && type_ & Parity) + // susp = { formula::multop(r2.kind(), susp) }; } // For TGBA and BA, we only split if there is something to // suspend. @@ -383,6 +391,7 @@ namespace spot || type_ == GeneralizedBuchi) aut2 = gf_guarantee_to_ba_maybe(r, simpl_->get_dict(), det, state_based_); + acd_was_used_ = false; if (aut2 && (pref_ & Deterministic)) return finalize(aut2); if (!aut2 && (type_ == Generic @@ -398,9 +407,22 @@ namespace spot bool exprop = unambiguous || (level_ == postprocessor::High && exprop_ != 0) || exprop_ > 0; + // branch-post: 1 == force branching postponement + // 0 == disable branching post. and delay_branching + // 2 == force delay_branching + // -1 == auto (delay_branching) + // Some quick experiments suggests that branching postponement + // can produce larger automata on non-obligations formulas, and + // that even on obligation formulas, delay_branching is faster. + bool bpost = branchpost_ == 1; aut = ltl_to_tgba_fm(r, simpl_->get_dict(), exprop, - true, false, false, nullptr, nullptr, + true, bpost, false, nullptr, nullptr, unambiguous); + if (!bpost && branchpost_ != 0 && delay_branching_here(aut)) + { + aut->purge_unreachable_states(); + aut->merge_edges(); + } } aut = this->postprocessor::run(aut, r); diff --git a/spot/twaalgos/translate.hh b/spot/twaalgos/translate.hh index 9dc6b12d2..8428a2f22 100644 --- a/spot/twaalgos/translate.hh +++ b/spot/twaalgos/translate.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013-2018, 2020 Laboratoire de Recherche et Développement +// Copyright (C) 2013-2018, 2020, 2022 Laboratoire de Recherche et Développement // de l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -154,7 +154,9 @@ namespace spot bool gf_guarantee_ = true; bool gf_guarantee_set_ = false; bool ltl_split_; - unsigned tls_max_states_ = 0; + int branchpost_ = -1; + unsigned tls_max_states_ = 64; + unsigned tls_max_ops_ = 16; int exprop_; const option_map* opt_; }; diff --git a/spot/twaalgos/translate_aa.cc b/spot/twaalgos/translate_aa.cc new file mode 100644 index 000000000..c18570d41 --- /dev/null +++ b/spot/twaalgos/translate_aa.cc @@ -0,0 +1,485 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2013-2018, 2020-2021 Laboratoire de Recherche et +// Développement de l'Epita (LRDE). +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include "config.h" +#include +#include +#include +#include + +#include + +namespace spot +{ + namespace + { + struct ltl_to_aa_builder + { + ltl_to_aa_builder(twa_graph_ptr aut, unsigned accepting_sink) + : aut_(aut) + , accepting_sink_(accepting_sink) + , uniq_(aut_->get_graph(), accepting_sink) + , oe_(aut_, accepting_sink) + { + } + + ~ltl_to_aa_builder() + { + aut_->get_dict()->unregister_all_my_variables(this); + } + + twa_graph_ptr aut_; + unsigned accepting_sink_; + internal::univ_dest_mapper uniq_; + outedge_combiner oe_; + + void add_self_loop(twa_graph::edge_storage_t const& e, + unsigned init_state, + acc_cond::mark_t acc) + { + if (e.dst == accepting_sink_) + { + // avoid creating a univ_dests vector if the only dest is an + // accepting sink, which can be simplified, only keeping the self + // loop + aut_->new_edge(init_state, init_state, e.cond, acc); + return; + } + + auto dests = aut_->univ_dests(e); + std::vector new_dests(dests.begin(), dests.end()); + new_dests.push_back(init_state); + + unsigned dst = uniq_.new_univ_dests(new_dests.begin(), + new_dests.end()); + aut_->new_edge(init_state, dst, e.cond, acc); + } + + unsigned copy_sere_aut_to_res(twa_graph_ptr sere_aut, + std::map& old_to_new, + std::vector* acc_states = nullptr, + bool use_accepting_sink = true) + { + unsigned ns = sere_aut->num_states(); + + // TODO: create all new states at once, keeping an initial offset (the + // number of states already present in aut_) + aut_->copy_ap_of(sere_aut); + auto register_state = [&](unsigned st) -> unsigned { + auto p = old_to_new.emplace(st, 0); + if (p.second) + { + unsigned new_st = aut_->new_state(); + p.first->second = new_st; + if (acc_states != nullptr && sere_aut->state_is_accepting(st)) + acc_states->push_back(new_st); + } + return p.first->second; + }; + + for (unsigned st = 0; st < ns; ++st) + { + unsigned new_st = register_state(st); + for (const auto& e : sere_aut->out(st)) + { + if (use_accepting_sink && sere_aut->state_is_accepting(e.dst)) + aut_->new_edge(new_st, accepting_sink_, e.cond); + else + aut_->new_edge(new_st, register_state(e.dst), e.cond); + } + } + + auto it = old_to_new.find(sere_aut->get_init_state_number()); + assert(it != old_to_new.end()); + return it->second; + } + + + unsigned recurse(formula f) + { + switch (f.kind()) + { + case op::ff: + return aut_->new_state(); + + case op::tt: + { + unsigned init_state = aut_->new_state(); + aut_->new_edge(init_state, accepting_sink_, bddtrue, {}); + return init_state; + } + + case op::ap: + case op::Not: + { + unsigned init_state = aut_->new_state(); + + bdd ap; + if (f.kind() == op::ap) + ap = bdd_ithvar(aut_->register_ap(f.ap_name())); + else + ap = bdd_nithvar(aut_->register_ap(f[0].ap_name())); + + aut_->new_edge(init_state, accepting_sink_, ap, {}); + return init_state; + } + + // FIXME: is this right for LTLf? + case op::strong_X: + case op::X: + { + unsigned sub_init_state = recurse(f[0]); + unsigned new_init_state = aut_->new_state(); + aut_->new_edge(new_init_state, sub_init_state, bddtrue, {}); + return new_init_state; + } + + case op::Or: + { + unsigned init_state = aut_->new_state(); + + for (const auto& sub_formula : f) + { + unsigned sub_init = recurse(sub_formula); + for (auto& e : aut_->out(sub_init)) + { + unsigned dst = e.dst; + if (aut_->is_univ_dest(e.dst)) + { + auto dests = aut_->univ_dests(e); + dst = uniq_.new_univ_dests(dests.begin(), dests.end()); + } + aut_->new_edge(init_state, dst, e.cond, {}); + } + } + + return init_state; + } + + case op::And: + { + unsigned init_state = aut_->new_state(); + + outedge_combiner oe(aut_, accepting_sink_); + bdd comb = bddtrue; + for (const auto& sub_formula : f) + { + unsigned sub_init = recurse(sub_formula); + comb &= oe_(sub_init); + } + oe_.new_dests(init_state, comb); + + return init_state; + } + + case op::U: + case op::W: + { + auto acc = f.kind() == op::U + ? acc_cond::mark_t{0} + : acc_cond::mark_t{}; + + unsigned init_state = aut_->new_state(); + + unsigned lhs_init = recurse(f[0]); + unsigned rhs_init = recurse(f[1]); + + std::vector new_dests; + for (auto& e : aut_->out(lhs_init)) + add_self_loop(e, init_state, acc); + + for (auto& e : aut_->out(rhs_init)) + { + unsigned dst = e.dst; + if (aut_->is_univ_dest(e.dst)) + { + auto dests = aut_->univ_dests(e); + dst = uniq_.new_univ_dests(dests.begin(), dests.end()); + } + aut_->new_edge(init_state, dst, e.cond, {}); + } + + return init_state; + } + + case op::R: + case op::M: + { + auto acc = f.kind() == op::M + ? acc_cond::mark_t{0} + : acc_cond::mark_t{}; + + unsigned init_state = aut_->new_state(); + + unsigned lhs_init = recurse(f[0]); + unsigned rhs_init = recurse(f[1]); + + for (auto& e : aut_->out(rhs_init)) + add_self_loop(e, init_state, acc); + + bdd comb = oe_(lhs_init); + comb &= oe_(rhs_init); + oe_.new_dests(init_state, comb); + + return init_state; + } + + // F(phi) = tt U phi + case op::F: + { + auto acc = acc_cond::mark_t{0}; + + // if phi is boolean then we can reuse its initial state (otherwise + // we can't because of potential self loops) + if (f[0].is_boolean()) + { + unsigned init_state = recurse(f[0]); + aut_->new_edge(init_state, init_state, bddtrue, acc); + return init_state; + } + + unsigned init_state = aut_->new_state(); + unsigned sub_init = recurse(f[0]); + + aut_->new_edge(init_state, init_state, bddtrue, acc); + + for (auto& e : aut_->out(sub_init)) + aut_->new_edge(init_state, e.dst, e.cond, {}); + + return init_state; + } + + // G phi = ff R phi + case op::G: + { + unsigned init_state = aut_->new_state(); + + unsigned sub_init = recurse(f[0]); + + // translate like R, but only the self loop part; `ff` cancels out + // the product of edges + std::vector new_dests; + for (auto& e : aut_->out(sub_init)) + add_self_loop(e, init_state, {}); + + return init_state; + } + + case op::EConcat: + { + unsigned rhs_init = recurse(f[1]); + const auto& dict = aut_->get_dict(); + twa_graph_ptr sere_aut = derive_finite_automaton_with_first(f[0], dict); + + // TODO: this should be a std::vector ! + std::vector acc_states; + std::map old_to_new; + copy_sere_aut_to_res(sere_aut, old_to_new, &acc_states, false); + + std::vector acc_edges; + unsigned ns = sere_aut->num_states(); + for (unsigned st = 0; st < ns; ++st) + { + auto it = old_to_new.find(st); + assert(it != old_to_new.end()); + unsigned new_st = it->second; + + for (auto& e : aut_->out(new_st)) + { + e.acc = acc_cond::mark_t{0}; + if (std::find(acc_states.begin(), acc_states.end(), e.dst) + != acc_states.end()) + acc_edges.push_back(aut_->edge_number(e)); + } + } + + for (unsigned i : acc_edges) + { + auto& e1 = aut_->edge_storage(i); + for (const auto& e2 : aut_->out(rhs_init)) + aut_->new_edge(e1.src, e2.dst, e1.cond & e2.cond); + } + + auto it = old_to_new.find(sere_aut->get_init_state_number()); + assert(it != old_to_new.end()); + + return it->second; + } + + case op::UConcat: + { + unsigned rhs_init = recurse(f[1]); + const auto& dict = aut_->get_dict(); + twa_graph_ptr sere_aut = derive_finite_automaton_with_first(f[0], dict); + + // DFA recognizes the empty language, so {0} []-> rhs is always true + unsigned ns = sere_aut->num_states(); + bool has_accepting_state = false; + for (unsigned st = 0; st < ns && !has_accepting_state; ++st) + has_accepting_state = sere_aut->state_is_accepting(st); + if (!has_accepting_state) + return accepting_sink_; + + std::map old_to_new; + std::map state_to_var; + std::map var_to_state; + bdd vars = bddtrue; + bdd aps = sere_aut->ap_vars(); + std::vector univ_dest; + // TODO: this should be a std::vector ! + std::vector acc_states; + + // registers a state in various maps and returns the index of the + // anonymous bdd var representing that state + auto register_state = [&](unsigned st) -> int { + auto p = state_to_var.emplace(st, 0); + if (p.second) + { + int v = dict->register_anonymous_variables(1, this); + p.first->second = v; + + unsigned new_st = aut_->new_state(); + old_to_new.emplace(st, new_st); + var_to_state.emplace(v, new_st); + + if (sere_aut->state_is_accepting(st)) + acc_states.push_back(new_st); + + vars &= bdd_ithvar(v); + } + + return p.first->second; + }; + + aut_->copy_ap_of(sere_aut); + for (unsigned st = 0; st < ns; ++st) + { + register_state(st); + + bdd sig = bddfalse; + for (const auto& e : sere_aut->out(st)) + { + int st_bddi = register_state(e.dst); + sig |= e.cond & bdd_ithvar(st_bddi); + } + + for (bdd cond : minterms_of(bddtrue, aps)) + { + bdd dest = bdd_appex(sig, cond, bddop_and, aps); + while (dest != bddfalse) + { + assert(bdd_high(dest) == bddtrue); + auto it = var_to_state.find(bdd_var(dest)); + assert(it != var_to_state.end()); + univ_dest.push_back(it->second); + dest = bdd_low(dest); + } + + auto it = old_to_new.find(st); + assert(it != old_to_new.end()); + unsigned src = it->second; + + unsigned dst = univ_dest.empty() + ? accepting_sink_ + : (uniq_.new_univ_dests(univ_dest.begin(), + univ_dest.end())); + + aut_->new_edge(src, dst, cond, {}); + univ_dest.clear(); + } + } + + for (unsigned st = 0; st < ns; ++st) + { + auto it = old_to_new.find(st); + assert(it != old_to_new.end()); + unsigned new_st = it->second; + + bdd comb = bddtrue; + comb &= oe_(new_st, acc_states, true); + if (comb != bddtrue) + { + comb &= oe_(rhs_init); + oe_.new_dests(new_st, comb); + } + } + + auto it = old_to_new.find(sere_aut->get_init_state_number()); + assert(it != old_to_new.end()); + + aut_->merge_edges(); + return it->second; + } + + case op::Closure: + { + twa_graph_ptr sere_aut = + derive_finite_automaton_with_first(f[0], aut_->get_dict()); + std::map old_to_new; + return copy_sere_aut_to_res(sere_aut, old_to_new); + } + + case op::NegClosure: + case op::NegClosureMarked: + case op::eword: + case op::Xor: + case op::Implies: + case op::Equiv: + case op::EConcatMarked: + case op::OrRat: + case op::AndRat: + case op::AndNLM: + case op::Concat: + case op::Fusion: + case op::Star: + case op::FStar: + case op::first_match: + SPOT_UNREACHABLE(); + return -1; + } + + SPOT_UNREACHABLE(); + } + }; + } + + twa_graph_ptr + ltl_to_aa(formula f, bdd_dict_ptr& dict, bool purge_dead_states) + { + f = negative_normal_form(f); + + auto aut = make_twa_graph(dict); + aut->set_co_buchi(); + + unsigned accepting_sink = aut->new_state(); + aut->new_edge(accepting_sink, accepting_sink, bddtrue, {}); + auto builder = ltl_to_aa_builder(aut, accepting_sink); + + unsigned init_state = builder.recurse(f); + aut->set_init_state(init_state); + + if (purge_dead_states) + { + aut->purge_dead_states(); + aut->merge_edges(); + } + + return aut; + } +} diff --git a/spot/twaalgos/translate_aa.hh b/spot/twaalgos/translate_aa.hh new file mode 100644 index 000000000..9a8760072 --- /dev/null +++ b/spot/twaalgos/translate_aa.hh @@ -0,0 +1,32 @@ +// -*- coding: utf-8 -*- +// Copyright (C) 2010-2015, 2017, 2019-2020 Laboratoire de +// Recherche et Développement de l'Epita (LRDE). +// Copyright (C) 2003, 2004, 2005, 2006 Laboratoire d'Informatique de +// Paris 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), +// Université Pierre et Marie Curie. +// +// This file is part of Spot, a model checking library. +// +// Spot is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 3 of the License, or +// (at your option) any later version. +// +// Spot is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +// License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#pragma once + +#include +#include + +namespace spot +{ + SPOT_API twa_graph_ptr + ltl_to_aa(formula f, bdd_dict_ptr& dict, bool purge_dead_states = false); +} diff --git a/spot/twaalgos/word.hh b/spot/twaalgos/word.hh index f6f70fc14..979a4070b 100644 --- a/spot/twaalgos/word.hh +++ b/spot/twaalgos/word.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2013, 2014, 2015, 2016, 2018, 2019 Laboratoire de Recherche et -// Développement de l'Epita (LRDE). +// Copyright (C) 2013-2016, 2018-2019, 2023 Laboratoire de Recherche +// et Développement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -80,6 +80,9 @@ namespace spot /// \brief Convert the twa_word as an automaton. /// + /// Convert the twa_word into a lasso-shapred automaton + /// with "true" acceptance condition. + /// /// This is useful to evaluate a word on an automaton. twa_graph_ptr as_automaton() const; diff --git a/spot/twaalgos/zlktree.cc b/spot/twaalgos/zlktree.cc index 2f87e6352..f31c46896 100644 --- a/spot/twaalgos/zlktree.cc +++ b/spot/twaalgos/zlktree.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021 Laboratoire de Recherche et Developpement de +// Copyright (C) 2021, 2022 Laboratoire de Recherche et Developpement de // l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -109,7 +109,8 @@ namespace spot } } - zielonka_tree::zielonka_tree(const acc_cond& cond) + zielonka_tree::zielonka_tree(const acc_cond& cond, + zielonka_tree_options opt) { const acc_cond::acc_code& code = cond.get_acceptance(); auto all = cond.all_sets(); @@ -120,11 +121,47 @@ namespace spot nodes_[0].colors = all; nodes_[0].level = 0; + robin_hood::unordered_node_map nmap; + std::vector models; // This loop is a BFS over the increasing set of nodes. for (unsigned node = 0; node < nodes_.size(); ++node) { acc_cond::mark_t colors = nodes_[node].colors; + unsigned nextlvl = nodes_[node].level + 1; + + // Have we already seen this combination of colors previously? + // If yes, simply copy the children. + if (auto p = nmap.emplace(colors, node); !p.second) + { + unsigned fc = nodes_[p.first->second].first_child; + if (!fc) // this is a leaf + { + ++num_branches_; + continue; + } + if (!!(opt & zielonka_tree_options::MERGE_SUBTREES)) + { + nodes_[node].first_child = fc; + continue; + } + unsigned child = fc; + unsigned first = nodes_.size(); + nodes_[node].first_child = first; + do + { + auto& c = nodes_[child]; + child = c.next_sibling; + nodes_.push_back({node, static_cast(nodes_.size() + 1), + 0, nextlvl, c.colors}); + } + while (child != fc); + nodes_.back().next_sibling = first; + // We do not have to test the shape since this is the second time + // we see these colors; + continue; + } + bool is_accepting = code.accepting(colors); if (node == 0) is_even_ = is_accepting; @@ -145,15 +182,32 @@ namespace spot nodes_.reserve(first + num_children); for (auto& m: models) nodes_.push_back({node, static_cast(nodes_.size() + 1), - 0, nodes_[node].level + 1, m.model}); + 0, nextlvl, m.model}); nodes_.back().next_sibling = first; if (num_children > 1) { + bool abort = false; if (is_accepting) - has_rabin_shape_ = false; + { + has_rabin_shape_ = false; + if (!!(opt & zielonka_tree_options::ABORT_WRONG_SHAPE) + && !!(opt & zielonka_tree_options::CHECK_RABIN)) + abort = true; + } else - has_streett_shape_ = false; + { + has_streett_shape_ = false; + if (!!(opt & zielonka_tree_options::ABORT_WRONG_SHAPE) + && !!(opt & zielonka_tree_options::CHECK_STREETT)) + abort = true; + } + if (abort) + { + nodes_.clear(); + num_branches_ = 0; + return; + } } } @@ -523,14 +577,18 @@ namespace spot do { auto& c = nodes_[child]; + // We have to read anything we need from C + // before emplace_back, which may reallocate. + acc_cond::mark_t colors = c.colors; + unsigned minstate = c.minstate; + child = c.next_sibling; nodes_.emplace_back(c.edges, c.states); auto& n = nodes_.back(); n.parent = node; n.level = lvl + 1; n.scc = ref.scc; - n.colors = c.colors; - n.minstate = c.minstate; - child = c.next_sibling; + n.colors = colors; + n.minstate = minstate; } while (child != fc); chain_children(node, before, nodes_.size()); diff --git a/spot/twaalgos/zlktree.hh b/spot/twaalgos/zlktree.hh index 675224682..6d8b3270c 100644 --- a/spot/twaalgos/zlktree.hh +++ b/spot/twaalgos/zlktree.hh @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2021 Laboratoire de Recherche et Developpement de +// Copyright (C) 2021, 2022 Laboratoire de Recherche et Developpement de // l'Epita (LRDE). // // This file is part of Spot, a model checking library. @@ -28,6 +28,68 @@ namespace spot { + /// \ingroup twa_acc_transform + /// \brief Options to alter the behavior of acd + enum class zielonka_tree_options + { + /// Build the ZlkTree, without checking its shape. + NONE = 0, + /// Check if the ZlkTree has Rabin shape. + /// This actually has no effect unless ABORT_WRONG_SHAPE is set, + /// because zielonka_tree always check the shape. + CHECK_RABIN = 1, + /// Check if the ZlkTree has Streett shape. + /// This actually has no effect unless ABORT_WRONG_SHAPE is set, + /// because zielonka_tree always check the shape. + CHECK_STREETT = 2, + /// Check if the ZlkTree has Parity shape + /// This actually has no effect unless ABORT_WRONG_SHAPE is set, + /// because zielonka_tree always check the shape. + CHECK_PARITY = CHECK_RABIN | CHECK_STREETT, + /// Abort the construction of the ZlkTree if it does not have the + /// shape that is tested. When that happens, num_branches() is set + /// to 0. + ABORT_WRONG_SHAPE = 4, + /// Fuse identical substree. This cannot be used with + /// zielonka_tree_transform(). However it saves memory if the + /// only use of the zielonka_tree to check the shape. + MERGE_SUBTREES = 8, + }; + +#ifndef SWIG + inline + bool operator!(zielonka_tree_options me) + { + return me == zielonka_tree_options::NONE; + } + + inline + zielonka_tree_options operator&(zielonka_tree_options left, + zielonka_tree_options right) + { + typedef std::underlying_type_t ut; + return static_cast(static_cast(left) + & static_cast(right)); + } + + inline + zielonka_tree_options operator|(zielonka_tree_options left, + zielonka_tree_options right) + { + typedef std::underlying_type_t ut; + return static_cast(static_cast(left) + | static_cast(right)); + } + + inline + zielonka_tree_options operator-(zielonka_tree_options left, + zielonka_tree_options right) + { + typedef std::underlying_type_t ut; + return static_cast(static_cast(left) + & ~static_cast(right)); + } +#endif /// \ingroup twa_acc_transform /// \brief Zielonka Tree implementation /// @@ -41,7 +103,8 @@ namespace spot { public: /// \brief Build a Zielonka tree from the acceptance condition. - zielonka_tree(const acc_cond& cond); + zielonka_tree(const acc_cond& cond, + zielonka_tree_options opt = zielonka_tree_options::NONE); /// \brief The number of branches in the Zielonka tree. /// @@ -118,7 +181,6 @@ namespace spot /// \brief Render the tree as in GraphViz format. void dot(std::ostream&) const; - private: struct zielonka_node { unsigned parent; @@ -128,6 +190,7 @@ namespace spot acc_cond::mark_t colors; }; std::vector nodes_; + private: unsigned one_branch_ = 0; unsigned num_branches_ = 0; bool is_even_; @@ -418,7 +481,7 @@ namespace spot /// /// If \a colored is set, each output transition will have exactly /// one color, and the output automaton will use at most n+1 colors - /// if the input has n colors. If \colored is unsed (the default), + /// if the input has n colors. If \a colored is unsed (the default), /// output transitions will use at most one color, and output /// automaton will use at most n colors. /// diff --git a/spot/twacube_algos/convert.hh b/spot/twacube_algos/convert.hh index f21aaec3f..ba739f470 100644 --- a/spot/twacube_algos/convert.hh +++ b/spot/twacube_algos/convert.hh @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2015, 2016, 2020 Laboratoire de Recherche et Developpement de -// l'Epita (LRDE). +// Copyright (C) 2015, 2016, 2020, 2022 Laboratoire de Recherche et +// Developpement de l'Epita (LRDE). // // This file is part of Spot, a model checking library. // @@ -51,7 +51,7 @@ namespace spot twa_to_twacube(spot::const_twa_graph_ptr aut); /// \brief Convert a twacube into a twa. - /// When \d is specified, the BDD_dict in parameter is used rather than + /// When \a d is specified, the BDD_dict in parameter is used rather than /// creating a new one. SPOT_API spot::twa_graph_ptr twacube_to_twa(spot::twacube_ptr twacube, diff --git a/tests/Makefile.am b/tests/Makefile.am index 1b5d63fee..6728ea0ba 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -1,5 +1,5 @@ ## -*- coding: utf-8 -*- -## Copyright (C) 2009-2022 Laboratoire de Recherche et Développement +## Copyright (C) 2009-2023 Laboratoire de Recherche et Développement ## de l'Epita (LRDE). ## Copyright (C) 2003-2006 Laboratoire d'Informatique de Paris 6 ## (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -73,6 +73,7 @@ check_PROGRAMS = \ core/cube \ core/emptchk \ core/equals \ + core/expand \ core/graph \ core/kind \ core/length \ @@ -80,6 +81,7 @@ check_PROGRAMS = \ core/intvcomp \ core/intvcmp2 \ core/kripkecat \ + core/ltl2aa \ core/ltl2dot \ core/ltl2text \ core/ltlrel \ @@ -114,6 +116,7 @@ core_bricks_SOURCES = core/bricks.cc core_checkpsl_SOURCES = core/checkpsl.cc core_checkta_SOURCES = core/checkta.cc core_emptchk_SOURCES = core/emptchk.cc +core_expand_SOURCES = core/expand.cc core_graph_SOURCES = core/graph.cc core_ikwiad_SOURCES = core/ikwiad.cc core_intvcomp_SOURCES = core/intvcomp.cc @@ -129,6 +132,7 @@ core_cube_SOURCES = core/cube.cc core_equals_SOURCES = core/equalsf.cc core_kind_SOURCES = core/kind.cc core_length_SOURCES = core/length.cc +core_ltl2aa_SOURCES = core/ltl2aa.cc core_ltl2dot_SOURCES = core/readltl.cc core_ltl2dot_CPPFLAGS = $(AM_CPPFLAGS) -DDOTTY core_ltl2text_SOURCES = core/readltl.cc @@ -165,6 +169,7 @@ TESTS_tl = \ core/parse.test \ core/parseerr.test \ core/utf8.test \ + core/500.test \ core/length.test \ core/equals.test \ core/tostring.test \ @@ -198,7 +203,8 @@ TESTS_tl = \ core/stutter-ltl.test \ core/hierarchy.test \ core/mempool.test \ - core/format.test + core/format.test \ + core/sonf.test TESTS_graph = \ core/graph.test \ @@ -220,6 +226,8 @@ TESTS_misc = \ TESTS_twa = \ core/385.test \ + core/521.test \ + core/522.test \ core/acc.test \ core/acc2.test \ core/bdddict.test \ @@ -339,7 +347,11 @@ TESTS_twa = \ core/dnfstreett.test \ core/parity.test \ core/parity2.test \ + core/pgsolver.test \ core/ltlsynt.test \ + core/ltlsynt2.test \ + core/ltlsynt-pgame.test \ + core/syfco.test \ core/rabin2parity.test \ core/twacube.test @@ -358,6 +370,7 @@ TESTS_ipython = \ python/atva16-fig2b.ipynb \ python/automata-io.ipynb \ python/automata.ipynb \ + python/cav22-figs.ipynb \ python/contains.ipynb \ python/decompose.ipynb \ python/formulas.ipynb \ @@ -388,6 +401,7 @@ TESTS_python = \ python/_altscc.ipynb \ python/_autparserr.ipynb \ python/_aux.ipynb \ + python/acc.py \ python/accparse2.py \ python/alarm.py \ python/aliases.py \ @@ -398,6 +412,7 @@ TESTS_python = \ python/bddnqueen.py \ python/bugdet.py \ python/complement_semidet.py \ + python/dbranch.py \ python/declenv.py \ python/decompose_scc.py \ python/det.py \ @@ -426,6 +441,7 @@ TESTS_python = \ python/origstate.py \ python/otfcrash.py \ python/parsetgba.py \ + python/_partitioned_relabel.ipynb \ python/parity.py \ python/pdegen.py \ python/prodexpt.py \ @@ -445,6 +461,7 @@ TESTS_python = \ python/setxor.py \ python/simplacc.py \ python/simstate.py \ + python/sonf.py \ python/split.py \ python/streett_totgba.py \ python/streett_totgba2.py \ @@ -466,7 +483,11 @@ endif CLEANFILES = python/test1.dve python/test1.dve2C python/test1.dve.cpp SUFFIXES = .ipynb .html +# Use the classic template when available because it loads +# jquery and we need it in zlktree.html; however the --template +# option does not exist with nbconvert 5.6.1 (in Debian stable). .ipynb.html: + $(JUPYTER) nbconvert $< --to html --template classic --stdout >$@ || \ $(JUPYTER) nbconvert $< --to html --stdout >$@ .PHONY: nb-html diff --git a/tests/core/.gitignore b/tests/core/.gitignore index d4ebfae45..fdee02715 100644 --- a/tests/core/.gitignore +++ b/tests/core/.gitignore @@ -33,6 +33,7 @@ kripkecat length .libs ikwiad +ltl2aa ltl2dot ltl2text ltlmagic diff --git a/tests/core/500.test b/tests/core/500.test new file mode 100755 index 000000000..60d5c6365 --- /dev/null +++ b/tests/core/500.test @@ -0,0 +1,43 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs + +set -e + +# The LTL parser used to exhibit a worse-than-quadratic behavior on +# n-ary operators with many children. See issue #500. Before the +# fix, this test would run for ages. + +awk 'BEGIN{x="s0"; for(i = 1; i < 40000; ++i) x=x " | s" i; print x;}' | + ltlfilt --stats=%x > out +test 40000 = `cat out` + +awk 'BEGIN{x="s0"; for(i = 1; i < 40000; ++i) x=x " & s" i; print x;}' | + ltlfilt --stats=%x > out +test 40000 = `cat out` + +awk 'BEGIN{x="s0"; for(i = 1; i < 40000; ++i) x=x ";s" i; print "{" x "}";}' | + ltlfilt --stats=%x > out +test 40000 = `cat out` + +awk 'BEGIN{x="s0"; for(i = 1; i < 40000; ++i) x=x ":s" i; print "{" x "}";}' | + ltlfilt --stats=%x > out +test 40000 = `cat out` diff --git a/tests/core/521.test b/tests/core/521.test new file mode 100755 index 000000000..002ab1ca2 --- /dev/null +++ b/tests/core/521.test @@ -0,0 +1,64 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) 2022 Laboratoire de Recherche et Développement de +# l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs + +set -e + +# For issue #521. + +# The following formula used to take hours or days to translate with +# default settings (nobody was patient enough to wait) because +# automata-based containment checks were run to exprop=1. + +cat >formula.ltl <<'EOF' +!a & !b & !c & !d & e & f & G(g & h & i & j & ((!c & !d) | (!c & d) | +(c & !d) | (c & d)) & ((!a & !b) | (!a & b) | (a & !b) | (a & b)) & +(k -> !l) & (f -> k) & (l -> !k) & (f -> !l) & (l -> !f) & (m -> !n) & +(m -> o) & (p -> !q) & (m -> !r) & (p -> !m) & (s -> !e) & (r -> !s) & +(e -> n) & (m -> !t) & (t -> !s) & (q -> u) & (o -> !t) & (m -> !p) & +(u -> o) & (p -> !v) & (q -> v) & (n -> w) & (x -> !s) & (u -> !t) & +(p -> w) & (u -> !p) & (t -> n) & (m -> !x) & (q -> !e) & (p -> !u) & +(s -> !n) & (s -> o) & (s -> m) & (v -> !e) & (x -> n) & (s -> !r) & +(e -> x) & (e -> !q) & (n -> r) & (w -> !s) & (m -> q) & (s -> !t) & +(u -> !x) & (e -> p) & (e -> !m) & (s -> !p) & (p -> r) & (e -> !o) & +(e -> !v) & (t -> x) & (q -> o) & (q -> !n) & (t -> !q) & (r -> !m) & +(t -> p) & (t -> !m) & (s -> !x) & (v -> o) & (e -> w) & (n -> !s) & +(q -> !t) & (t -> !o) & (x -> !q) & (e -> !u) & (q -> !p) & (t -> !v) & +(p -> !s) & (m -> u) & (x -> !m) & (v -> !t) & (s -> q) & (v -> !p) & +(m -> v) & (r -> w) & (t -> w) & (e -> t) & (e -> r) & (q -> !x) & +(t -> !u) & (p -> n) & (m -> !e) & (u -> v) & (x -> w) & (o -> !e) & +(x -> !u) & (s -> !w) & (u -> !e) & (t -> r) & (s -> u) & (e -> !s) & +(s -> v) & (n -> !q) & (x -> r) & (n -> !m) & (p -> x) & ((!a & !b & +!c & !d) | (!a & b & !c & d) | (a & !b & c & !d) | (a & b & c & d)) & +((!c & !d & k & o) -> X(!c & d)) & ((!c & !d & l & v & !(k & o)) -> +X(!c & d)) & ((!c & !d) -> ((!(k & o) & !(l & v)) -> X(!c & !d))) & +((!c & d & k & t) -> X(!c & !d)) & ((!c & d & l & p & !(k & t)) -> X(!c +& !d)) & ((!c & d & k & u & !(l & p & !(k & t))) -> X(c & !d)) & ((!c & +d & l & q & !(k & u & !(l & p & !(k & t)))) -> X(c & !d)) & ((!c & d) -> +((!(k & t) & !(l & p) & !(k & u) & !(l & q)) -> X(!c & d))) & ((c & !d +& k & x) -> X(!c & d)) & ((c & !d & l & n & !(k & x)) -> X(!c & d)) & +((c & !d & k & m & !(l & n & !(k & x))) -> X(c & d)) & ((c & !d & l & +s & !(k & m & !(l & n & !(k & x)))) -> X(c & d)) & ((c & !d) -> ((!(k & +x) & !(l & n) & !(k & m) & !(l & s)) -> X(c & !d))) & ((c & d & k & r) +-> X(c & !d)) & ((c & d & l & w & !(k & r)) -> X(c & !d)) & ((c & d) -> +((!(k & r) & !(l & w)) -> X(c & d)))) +EOF +test 5 = `tr -d "\r\n" < formula.ltl | ltl2tgba --stats=%s` diff --git a/tests/core/522.test b/tests/core/522.test new file mode 100755 index 000000000..3f1596514 --- /dev/null +++ b/tests/core/522.test @@ -0,0 +1,77 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) 2023 Laboratoire de Recherche et Développement de +# l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs + +set -e + +# For issue #522. + +cat >552.hoa < out.hoa +test "8 1 16 0" = "`autfilt --stats='%[a]s %[u]s %[a]e %[u]e' out.hoa`" + +cat >552loop1.hoa < out.hoa +test "8 0 20 0" = "`autfilt --stats='%[a]s %[u]s %[a]e %[u]e' out.hoa`" + +cat >552loop2.hoa < out.hoa +test "9 0 24 0" = "`autfilt --stats='%[a]s %[u]s %[a]e %[u]e' out.hoa`" diff --git a/tests/core/acc_word.test b/tests/core/acc_word.test index 53ce4b98e..5f3b6880b 100644 --- a/tests/core/acc_word.test +++ b/tests/core/acc_word.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2017, 2018, 2019 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) 2016-2019, 2023 Laboratoire de Recherche +# et Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -91,6 +91,15 @@ State: 1 EOF diff expected out +ltl2tgba -G '(GF(a & X!a) -> GF(b & XXb)) & GFc' > aut.hoa +word='!a&!c;cycle{!a&b&!c;!a&c;!a&b&c}' +autfilt -H1.1 aut.hoa --highlight-word="$word" > out.hoa +grep spot.highlight.edges out.hoa >out.edges +cat >expected <stderr && exit 1 -test $? -eq 2 -grep 'highlight-word.*Fin' stderr - +# highlight-word used not to work with Fin acceptance, but it's ok now +ltl2tgba -G -D 'FGa' | autfilt --highlight-word='cycle{a}' ltlfilt -f 'GFa' --accept-word 'cycle{!a}' && exit 1 ltlfilt -f 'GF!a' --accept-word 'cycle{!a}' diff --git a/tests/core/alternating.test b/tests/core/alternating.test index 17f012675..df4e47624 100755 --- a/tests/core/alternating.test +++ b/tests/core/alternating.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016-2018, 2020-2021 Laboratoire de Recherche et +# Copyright (C) 2016-2018, 2020-2022 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -1009,3 +1009,41 @@ test '2_0_1_1_1_1_3_3' = "`autfilt --stats=$stats in`" autfilt --stats='%[x]U' in 2>stderr && exit2 grep '%\[x\]U' stderr + +cat >in <out.dot +# T0T-1 is not a valid name for GraphViz, it has to be quoted. +cat >exp.dot < 1 + 1 [label="1"] + 1 -> -1 [label="1\n{0}", arrowhead=onormal] + -1 [label=<>,shape=point,width=0.05,height=0.05] + "T0T-1" [label="", style=invis, width=0] + -1 -> "T0T-1" + -1 -> 1 + T0T1 [label="", style=invis, width=0] + 1 -> T0T1 [label="a"] +} +EOF +diff out.dot exp.dot diff --git a/tests/core/autcross.test b/tests/core/autcross.test index 2ac14eb34..b3d27ec0a 100755 --- a/tests/core/autcross.test +++ b/tests/core/autcross.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2018 Laboratoire de Recherche et Développement +# Copyright (C) 2017, 2018, 2022 Laboratoire de Recherche et Développement # de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -46,3 +46,17 @@ for f in out.csv out2.csv; do sed 's/,[0-9]*\.[0-9]*,/,TIME,/' $f > _$f done diff _out.csv _out2.csv + + +# The {autfilt {complement}} name makes sure we can nest braces. +randaut -n10 2 | + autcross 'ltl2dstar --complement-input=yes' 'autfilt --complement' \ + --language-complemented --csv=out3.csv --verbose 2>stderr +test 10 = `grep 'check_empty Comp(input)\*Comp(A0)' stderr | wc -l` + + +randaut -n1 2 | + autcross 'ltl2dstar --complement-input=yes' 'autfilt --complement' \ + --language-complemented --language-preserved 2> stderr && exit 1 +cat stderr +grep 'preserved.*complemented.*incompatible' stderr diff --git a/tests/core/autcross4.test b/tests/core/autcross4.test index 9e0d68638..13f770d1c 100755 --- a/tests/core/autcross4.test +++ b/tests/core/autcross4.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2018, 2019 Laboratoire de Recherche et Développement de +# Copyright (C) 2018, 2019, 2022 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -85,7 +85,9 @@ autcross -T3 -q --language-preserved --ignore-execution-failures \ --fail-on-timeout \ 'sleep 10; autfilt %H>%O' 'false %H %O' 2>err -Fin && exit 1 cat err -test 4 = `wc -l err && exit 1 diff --git a/tests/core/bdd.test b/tests/core/bdd.test index ba2e11232..85d410f8d 100755 --- a/tests/core/bdd.test +++ b/tests/core/bdd.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2020 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) 2017, 2018, 2020, 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -23,22 +23,23 @@ set -e # Make sure that setting the SPOT_BDD_TRACE envvar actually does # something. -genltl --kr-nlogn=2 | - SPOT_BDD_TRACE=1 ltl2tgba -x tls-max-states=0 -D >out 2>err +genltl --kr-n=3 | + SPOT_BDD_TRACE=1 ltl2tgba -x tls-max-states=0,tls-max-ops=0 -D >out 2>err cat err grep spot: out && exit 1 grep 'spot: BDD package initialized' err # This value below, which is the number of time we need to garbage # collect might change if we improve the tool or change the way BuDDy # is initialized. -test 11 = `grep -c 'spot: BDD GC' err` +test 15 = `grep -c 'spot: BDD GC' err` # Minimal size for this automaton. # See also https://www.lrde.epita.fr/dload/spot/mochart10-fixes.pdf -test "147,207" = `autfilt --stats=%s,%e out` +test "2240,4214" = `autfilt --stats=%s,%e out` # With the default value of tls-max-states, no GC is needed -genltl --kr-nlogn=2 | SPOT_BDD_TRACE=1 ltl2tgba -D --stats=%s,%e >out 2>err +genltl --kr-n=3 | + SPOT_BDD_TRACE=1 ltl2tgba -D -x tls-max-ops=0 --stats=%s,%e >out 2>err cat err grep 'spot: BDD package initialized' err test 0 = `grep -c 'spot: BDD GC' err` -test "147,207" = `cat out` +test "2240,4214" = `cat out` diff --git a/tests/core/bricks.test b/tests/core/bricks.test index b98c7e856..37ff57cb0 100644 --- a/tests/core/bricks.test +++ b/tests/core/bricks.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -21,12 +21,13 @@ . ./defs set -e -seq 0 1999 > expected +# The seq command is not always available, but we assume awk is. +$AWK 'BEGIN{for(x=0;x<2000;++x) print x;}' >expected ../bricks > stdout -cat stdout | head -n 2000 | awk '{print $2}' | sed 's/{//g' | \ - awk -F',' '{print $1}' | sort -n > map +cat stdout | head -n 2000 | $AWK '{print $2}' | sed 's/{//g' | \ + $AWK -F',' '{print $1}' | sort -n > map diff expected map diff --git a/tests/core/defs.in b/tests/core/defs.in index 7df6fdf77..d06a3b67d 100644 --- a/tests/core/defs.in +++ b/tests/core/defs.in @@ -1,5 +1,5 @@ # -*- mode: shell-script; coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2012, 2013, 2015 Laboratoire de Recherche +# Copyright (C) 2009, 2010, 2012, 2013, 2015, 2022 Laboratoire de Recherche # et Développement de l'Epita (LRDE). # Copyright (C) 2003, 2004, 2006 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -57,6 +57,7 @@ case $srcdir in *) srcdir=../$srcdir esac +AWK='@AWK@' DOT='@DOT@' LBTT="@LBTT@" LBTT_TRANSLATE="@LBTT_TRANSLATE@" diff --git a/tests/core/equals.test b/tests/core/equals.test index f00216347..a67c4b1ef 100755 --- a/tests/core/equals.test +++ b/tests/core/equals.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009-2012, 2014-2015, 2021 Laboratoire de Recherche et +# Copyright (C) 2009-2012, 2014-2015, 2021, 2022 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -196,6 +196,12 @@ G({1}<>->1), 1 {(a*;b|c)[:*0]}, 1 {(a*;b|c)[:*1]}, {(a*;b|c)} {(a;b):(a;b):(a;b)[:*2]:(a;b):b*:b*:(c;d)[:*1]}, {(a;b)[:*5]:b*[:*2]:(c;d)} +{((a;b)|[+]|(c;d[*]));a}, {[+];a} +{((a;b)|[+]|(d[*]));a}, {[*];a} +{((a;b)&&[+]&&(d[*]));a}, {((a;b)&&(d[*]));a} +{((a;b|[*0])&&[+]&&(d[*]));a}, {((a;b|[*0])&&[+]&&(d[*]));a} +{(a;c):b[*3..5]:b[*10]:(a;c)}, {(a;c):b[*12..14]:(a;c)} +{(a;c):b:b[*3..5]:b:b[*0..4]:(a;c)}, {(a;c):b[*3..8]:(a;c)} EOF diff --git a/tests/core/expand.cc b/tests/core/expand.cc new file mode 100644 index 000000000..a589d6370 --- /dev/null +++ b/tests/core/expand.cc @@ -0,0 +1,25 @@ +#include "config.h" + +#include +#include +#include +#include + +int main(int argc, char** argv) +{ + if (argc != 2) + return 1; + + spot::formula f = spot::parse_infix_sere(argv[1]).f; + auto d = spot::make_bdd_dict(); + + auto m = spot::expansion(f, d, nullptr); + + for (const auto& [bdd_l, form] : m) + std::cout << '[' << bdd_to_formula(bdd_l, d) << ']' << ": " << form << std::endl; + std::cout << "formula: " << expansion_to_formula(m, d) << std::endl; + + d->unregister_all_my_variables(nullptr); + + return 0; +} diff --git a/tests/core/format.test b/tests/core/format.test index 4e6f4a4d5..da78e3e7e 100644 --- a/tests/core/format.test +++ b/tests/core/format.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2017 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) 2016, 2017, 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -139,18 +139,36 @@ test 3,5 = `ltl2tgba --low --any --stats=%s,%e "$f"` test 3,4 = `ltl2tgba --stats=%s,%e "$f"` cat >foo < stats + +cat >expected <err && exit 1 +grep 'only \[a\], \[r\], or \[u\] is supported' err diff --git a/tests/core/genaut.test b/tests/core/genaut.test index 5da9509ed..f364569e1 100644 --- a/tests/core/genaut.test +++ b/tests/core/genaut.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2019, 2020 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) 2017-2020, 2023 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -60,7 +60,10 @@ genaut --l-nba='1..3?' 2>err && exit 1 grep 'invalid range.*trailing garbage' err genaut --l-nba='1..' 2>err && exit 1 grep 'invalid range.*missing end' err - +genaut --l-nba='9999999999999999999999999..' 2>err && exit 1 +grep 'start.*too large' err +genaut --l-nba='1..9999999999999999999999999' 2>err && exit 1 +grep 'end.*too large' err # Tests for autfilt -N/--nth genaut --ks-nca=1..5 | autfilt -N 2..4 > range1.hoa diff --git a/tests/core/genltl.test b/tests/core/genltl.test index d5efb0236..ce5584a21 100755 --- a/tests/core/genltl.test +++ b/tests/core/genltl.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016-2021 Laboratoire de Recherche et Développement +# Copyright (C) 2016-2022 Laboratoire de Recherche et Développement # de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -134,8 +134,8 @@ genltl --kr-n2=1..2 --kr-nlogn=1..2 --kr-n=1..2 --gxf-and=0..3 --fxg-or=0..3 \ --pps-arbiter-standard=2..3 --pps-arbiter-strict=2..3 --format=%F=%L,%f | ltl2tgba --low --det -F-/2 --stats='%<,%s' > out cat >exp<exp< range1.ltl genltl --sb-patterns=1..9 > range2.ltl diff range1.ltl range2.ltl + + +# Edmond Irani Liu sent a bug report where formula 23 in this series +# took 9 days to produce, despite the small size of the resulting +# automaton. I (ADL) later found this to be caused by simulation +# applied on a non-deterministic automaton with many non-deterministic +# choices going to state that simulate one another, which in turn lead +# to massive slowdown of the minato_isop algorithm. As a workaround, +# I introduced delay_branching_here(), a cheap function that is called +# before simplification. In this case, this is enough to determinize +# the automaton, simplifying simulation-based reduction greatly. +genltl --eil-gsi=1..25 | ltlfilt --from-ltlf > formulas.ltl +ltl2tgba -F formulas.ltl --stats=%s,%e >output +cat >expected <output +diff expected output diff --git a/tests/core/included.test b/tests/core/included.test index 9f39fef20..3574af9e3 100755 --- a/tests/core/included.test +++ b/tests/core/included.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016 Laboratoire de Recherche et Développement +# Copyright (C) 2016, 2022 Laboratoire de Recherche et Développement # de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -60,5 +60,12 @@ ltl2tgba true | autfilt out.hoa --equivalent-to - ltl2tgba '!(a U c)' | autfilt --product-or a1.hoa > out.hoa ltl2tgba true | autfilt out.hoa --equivalent-to - && exit 1 -: +# In Spot 2.10, the following was very slow. +for n in 1 2 4 8 16 512 1024 2048 4096 8192; do + genaut --cyclist-trace-nba=$n > trace.hoa + genaut --cyclist-proof-dba=$n > proof.hoa + autfilt -q --included-in=trace.hoa proof.hoa || exit 1 + autfilt -q --included-in=proof.hoa trace.hoa && exit 1 +done +: diff --git a/tests/core/ltl2aa.cc b/tests/core/ltl2aa.cc new file mode 100644 index 000000000..82b4b9c7e --- /dev/null +++ b/tests/core/ltl2aa.cc @@ -0,0 +1,22 @@ +#include "config.h" + +#include + +#include +#include +#include +#include + +int main(int argc, char * argv[]) +{ + if (argc < 3) + return 1; + + spot::formula f = spot::parse_formula(argv[1]); + spot::bdd_dict_ptr d = spot::make_bdd_dict(); + auto aut = ltl_to_aa(f, d, true); + + std::ofstream out(argv[2]); + spot::print_hoa(out, aut); + return 0; +} diff --git a/tests/core/ltl2tgba2.test b/tests/core/ltl2tgba2.test index 79a07a17a..8397bbc85 100755 --- a/tests/core/ltl2tgba2.test +++ b/tests/core/ltl2tgba2.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009-2021 Laboratoire de Recherche et Développement de +# Copyright (C) 2009-2022 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -375,8 +375,8 @@ diff output expected cat >formulas < out3 cmp out3 out4 && exit 1 # make sure we did remove something autfilt out3 > out4 diff out4 expected3 + +# Issue #526 +ltlfilt -f '(i->XXo)|G(i<->Xo2)' --from-ltlf | ltl2tgba -D |\ + autfilt -C --to-finite > out +cat >exp < file +run 0 ltlfilt -Ffile/1 -Ffile/2 --stats=%f >out +cat >expected <,%F,%L' <err && exit 1 +grep 'too large' err diff --git a/tests/core/ltlsynt-pgame.test b/tests/core/ltlsynt-pgame.test new file mode 100755 index 000000000..b4bada798 --- /dev/null +++ b/tests/core/ltlsynt-pgame.test @@ -0,0 +1,157 @@ +#! /bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs || exit 1 + +set -e + +# From SYNTCOMP +cat >aut7.hoa < Xa)" +Start: 0 +acc-name: Buchi +Acceptance: 1 Inf(0) +AP: 2 "a" "b" +controllable-AP: 1 +properties: explicit-labels trans-labels +--BODY-- +State: 0 + [t] 1 + [1] 2 + [!1] 3 +State: 1 "GFa" + [0] 1 {0} + [!0] 1 +State: 2 "a & G(b <-> Xa)" {0} + [0&1] 2 + [0&!1] 3 +State: 3 "!a & G(b <-> Xa)" {0} + [!0&1] 2 + [!0&!1] 3 +--END-- +EOF + +test UNREALIZABLE = `ltlsynt --realizability --from-pgame aut7.hoa` + +grep -v controllable-AP aut7.hoa > aut7b.hoa +run 2 ltlsynt --realizability --from-pgame aut7b.hoa 2>stderr +grep 'aut7b.*controllable-AP' stderr + + +# From SYNTCOMP +cat >UnderapproxDemo2.ehoa <starve.ehoa <expect <results +diff expect results + +ltlsynt --realizability --from-pgame starve.ehoa \ + --from-pgame UnderapproxDemo2.ehoa \ + --from-pgame aut7.hoa --csv=out.csv >result || : +cat >expect <result || : +test 4 = `wc -l < out.csv` +cut -d, -f 9,10,11,12,13 right +end='"strat_num_states","strat_num_edges"' +cat >expect <exp < out @@ -192,32 +192,30 @@ ltlsynt --ins=a --outs=b,c -f 'GFa <-> (GFb & GFc)' \ diff out exp cat >exp < GFb -tanslating formula done in X seconds direct strategy was found. -direct strat has 1 states and 0 colors EOF ltlsynt --ins='a' --outs='b' -f 'GFa <-> GFb' --verbose --realizability 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp cat >exp < GFb tanslating formula done in X seconds direct strategy was found. -direct strat has 1 states and 0 colors -final strategy has 1 states and 2 edges -minimization took X seconds +direct strat has 1 states, 2 edges and 0 colors +simplification took X seconds EOF ltlsynt --ins=a --outs=b -f 'GFa <-> GFb' --verbose --algo=ps 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp cat >exp < GFe -tanslating formula done in X seconds +there are 1 subformulas +trying to create strategy directly for GFe <-> (Fa & Fb & Fc & Fd) direct strategy was found. -direct strat has 16 states and 0 colors EOF ltlsynt --ins='a,b,c,d' --outs='e' -f '(Fa & Fb & Fc & Fd) <-> GFe' \ --verbose --realizability --algo=lar 2> out @@ -225,15 +223,16 @@ sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp cat >exp < G(i1 <-> o0) direct strategy might exist but was not found. translating formula done in X seconds automaton has 2 states and 3 colors LAR construction done in X seconds -DPA has 4 states, 3 colors +DPA has 4 states, 1 colors split inputs and outputs done in X seconds automaton has 12 states -solving game with acceptance: parity max odd 5 +solving game with acceptance: co-Büchi game solved in X seconds EOF ltlsynt -f "G(Fi0 && Fi1 && Fi2) -> G(i1 <-> o0)" --outs="o0" --algo=lar \ @@ -415,13 +414,13 @@ grep 'DPA has 29 states' err ltlsynt --verbose -x wdba-minimize=1 --algo=ps --outs=p1 --ins=p0 -f "$f" 2>err grep 'DPA has 12 states' err -ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=no | grep 'States: 5' -ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=bisim | grep 'States: 5' -ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=bwoa | grep 'States: 4' -ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" | grep 'States: 4' -ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=sat | grep 'States: 2' -ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=bisim-sat | grep 'States: 2' -ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=bwoa-sat | grep 'States: 4' +ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=no | grep 'States: 7' +ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=bisim | grep 'States: 7' +ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=bwoa | grep 'States: 6' +ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" | grep 'States: 6' +ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=sat | grep 'States: 3' +ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=bisim-sat | grep 'States: 3' +ltlsynt --outs=p1 -f "$f" -x"dpa-simul=1" --simpl=bwoa-sat | grep 'States: 6' # The following used to raise an exception because of a bug in # split_2step_fast_here(). @@ -475,10 +474,81 @@ i3 i3 o0 o0 o1 o1 EOF +ltlsynt -f "G((i0 && i1)<->X(o0)) && G((i2|i3)<->X(o1))" --outs="o0,o1"\ + --aiger=isop+ud --algo=lar --decompose=no --simpl=no >out +diff out exp + +cat >exp <X(o0)) && G((i2|i3)<->X(o1))" --outs="o0,o1"\ --aiger=isop --algo=lar --decompose=no --simpl=no >out diff out exp + cat >exp <X(o0)) && G((i2|i3)<->X(o1))" --outs="o0,o1"\ - --aiger=isop --algo=lar --decompose=yes --simpl=no >out + --aiger=isop+ud --algo=lar --decompose=yes --simpl=no >out diff out exp ltlsynt -f "G((i0 && i1)<->X(o0)) && G((i2|i3)<->X(o1))" --outs="o0,o1"\ - --aiger=isop --algo=lar --simpl=no >out + --aiger=isop+ud --algo=lar --simpl=no >out diff out exp # Issue #477 @@ -527,15 +597,14 @@ REALIZABLE HOA: v1 States: 1 Start: 0 -AP: 3 "a" "b" "c" +AP: 3 "c" "a" "b" acc-name: all Acceptance: 0 t properties: trans-labels explicit-labels state-acc deterministic -controllable-AP: 2 +controllable-AP: 0 --BODY-- State: 0 -[!0&!2 | !1&!2] 0 -[0&1&2] 0 +[!0&!1 | !0&!2 | 0&1&2] 0 --END-- EOF ltlsynt --ins=a,b -f 'G (a & b <=> c)' >stdout @@ -558,28 +627,22 @@ grep "one of --ins or --outs" stderr # Try to find a direct strategy for GFa <-> GFb and a direct strategy for # Gc cat >exp < GFb tanslating formula done in X seconds direct strategy was found. -direct strat has 1 states and 0 colors -final strategy has 1 states and 2 edges -minimization took X seconds +direct strat has 1 states, 2 edges and 0 colors +simplification took X seconds trying to create strategy directly for Gc -direct strategy might exist but was not found. -translating formula done in X seconds -automaton has 1 states and 1 colors -LAR construction done in X seconds -DPA has 1 states, 2 colors -split inputs and outputs done in X seconds -automaton has 2 states -solving game with acceptance: parity max odd 4 -game solved in X seconds +direct strategy was found. +direct strat has 1 states, 1 edges and 0 colors +simplification took X seconds EOF ltlsynt -f '(GFa <-> GFb) && (Gc)' --outs=b,c --verbose 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp -# Try to find a direct strategy for (GFa <-> GFb) & Gc. THe order should not +# Try to find a direct strategy for (GFa <-> GFb) & Gc. The order should not # impact the result for f in "(GFa <-> GFb) & Gc" "(GFb <-> GFa) & Gc" \ "Gc & (GFa <-> GFb)" "Gc & (GFb <-> GFa)" @@ -588,9 +651,8 @@ cat >exp < out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx @@ -601,7 +663,6 @@ done # # impossible to find a strategy. cat >exp < GFa) & G(a & c) -tanslating formula done in X seconds no strategy exists. EOF ltlsynt -f '(GFb <-> GFa) && G(a&c)' --outs=b,c --verbose\ @@ -609,15 +670,14 @@ ltlsynt -f '(GFb <-> GFa) && G(a&c)' --outs=b,c --verbose\ sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp -# # Ltlsynt should be able to create a strategy when the last G +# # ltlsynt should be able to create a strategy when the last G # is input-complete. cat >exp < GFa) & G((a & c) | (!a & !c)) tanslating formula done in X seconds direct strategy was found. -direct strat has 1 states and 0 colors -final strategy has 1 states and 2 edges -minimization took X seconds +direct strat has 1 states, 2 edges and 0 colors +simplification took X seconds EOF ltlsynt -f '(GFb <-> GFa) && (G((a&c)|(!a&!c)))' --outs=b,c --verbose\ --verify --decompose=0 2> out @@ -630,9 +690,8 @@ cat >exp < FGb tanslating formula done in X seconds direct strategy was found. -direct strat has 2 states and 0 colors -final strategy has 2 states and 3 edges -minimization took X seconds +direct strat has 2 states, 3 edges and 0 colors +simplification took X seconds EOF ltlsynt -f "Fa <-> FGb" --outs=b,c --verbose --decompose=0 --verify 2> out sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx @@ -646,11 +705,12 @@ direct strategy might exist but was not found. translating formula done in X seconds automaton has 4 states and 1 colors LAR construction done in X seconds -DPA has 4 states, 4 colors +DPA has 4 states, 1 colors split inputs and outputs done in X seconds -automaton has 9 states -solving game with acceptance: parity max odd 6 +automaton has 10 states +solving game with acceptance: Büchi game solved in X seconds +simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF ltlsynt -f "Ga <-> Gb" --outs=b --verbose --decompose=0 --verify --aiger 2> out @@ -658,26 +718,29 @@ sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp cat >exp < y direct strategy might exist but was not found. translating formula done in X seconds automaton has 2 states and 1 colors LAR construction done in X seconds -DPA has 2 states, 2 colors +DPA has 2 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: all game solved in X seconds +simplification took X seconds trying to create strategy directly for (a | x) -> x direct strategy might exist but was not found. translating formula done in X seconds automaton has 2 states and 1 colors LAR construction done in X seconds -DPA has 2 states, 2 colors +DPA has 2 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: all game solved in X seconds +simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF ltlsynt -f '((a|x) & (b | y) & b) => (x & y)' --outs="x,y" --aiger=ite\ @@ -687,26 +750,15 @@ diff outx exp # Here, G!(!x | !y) should be Gx & Gy cat >exp < out @@ -715,16 +767,9 @@ diff outx exp # Here, !F(a | b) should be G(!a) & G(!b) cat >exp < out || true sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx @@ -732,16 +777,9 @@ diff outx exp # Here, G!(a -> b) should be G(a) & G(!b) cat >exp < b)' --outs=b --decompose=yes --aiger\ --verbose 2> out || true @@ -750,16 +788,18 @@ diff outx exp # Here, (a & b) U (b & c) should be (a U (b & c)) & (b U (b & c)) cat >exp < (b & c & d) should be # (a => b) & (a => c) & (a => d) cat >exp < b direct strategy might exist but was not found. translating formula done in X seconds automaton has 2 states and 1 colors LAR construction done in X seconds -DPA has 2 states, 2 colors +DPA has 2 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: all game solved in X seconds +simplification took X seconds trying to create strategy directly for a -> c direct strategy might exist but was not found. translating formula done in X seconds automaton has 2 states and 1 colors LAR construction done in X seconds -DPA has 2 states, 2 colors +DPA has 2 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: all game solved in X seconds +simplification took X seconds trying to create strategy directly for a -> d direct strategy might exist but was not found. translating formula done in X seconds automaton has 2 states and 1 colors LAR construction done in X seconds -DPA has 2 states, 2 colors +DPA has 2 states, 0 colors split inputs and outputs done in X seconds automaton has 4 states -solving game with acceptance: parity max odd 4 +solving game with acceptance: all game solved in X seconds +simplification took X seconds AIG circuit was created in X seconds and has 0 latches and 0 gates EOF ltlsynt -f 'a => (b & c & d)' --outs=b,c,d, --decompose=yes\ @@ -809,18 +853,175 @@ diff outx exp # Here, !(F(a | b)) should be G!a & G!b cat >exp < out || true sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx diff outx exp + +ltlsynt --ins="" -f "GFa" +ltlsynt --outs="" -f "GFb" | grep "UNREALIZABLE" + +ltlsynt --outs="" -f "1" + +ltlsynt --outs="" --ins="" -f "GFa" 2>&1 | \ + grep "both --ins and --outs are specified" + +LTL='(((((G (((((((g_0) && (G (! (r_0)))) -> (F (! (g_0)))) && (((g_0) && +(X ((! (r_0)) && (! (g_0))))) -> (X ((r_0) R (! (g_0)))))) && (((g_1) && +(G (! (r_1)))) -> (F (! (g_1))))) && (((g_1) && (X ((! (r_1)) && (! (g_1))))) -> +(X ((r_1) R (! (g_1)))))) && (((! (g_0)) && (true)) || ((true) && (! (g_1)))))) +&& ((r_0) R (! (g_0)))) && (G ((r_0) -> (F (g_0))))) && ((r_1) R (! (g_1)))) && +(G ((r_1) -> (F (g_1)))))' +OUT='g_0, g_1' +ltlsynt --outs="$OUT" -f "$LTL" --aiger=both+ud\ + --algo=acd | grep "aag 8 2 2 2 4" +ltlsynt --outs="$OUT" -f "$LTL" --aiger=both+ud\ + --algo=lar | grep "aag 34 2 3 2 29" + +ltlsynt -f 'G(c) & (G(a) <-> GFb)' --outs=b,c --decompose=yes\ + --verbose --realizability 2> out +cat >exp < GFb +direct strategy was found. +EOF +diff out exp + +ltlsynt -f 'G(c) & (G(a) <-> GFb)' --outs=b,c --decompose=yes\ + --verbose --realizability --bypass=no 2> out +cat >exp < outx +diff outx exp + +# ACD verbose +cat >exp < GFb) && (Gc)' --outs=b,c --verbose --bypass=no\ + --algo=acd 2> out +sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx +diff outx exp + +# Bypass: check that in G(b1) ∧ (Büchi ↔ GF(b2)), b1 and b2 don't share an AP. +# We do it because G(o1 ∨ o2) ∧ (GFi ↔ GFo1) is realizable while +# G(o1) ∧ (GFi ↔ GFo1) is not realizable. So we cannot conclude if +# they share an AP. +cat >exp < GFo1) +direct strategy might exist but was not found. +translating formula done in X seconds +automaton has 1 states and 1 colors +LAR construction done in X seconds +DPA has 1 states, 1 colors +split inputs and outputs done in X seconds +automaton has 3 states +solving game with acceptance: Büchi +game solved in X seconds +EOF +ltlsynt -f "G(o1) & (GFi <-> GFo1)" --outs="o1" --verbose\ + --bypass=yes 2> out || true +sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx +diff outx exp + +cat >exp < GFo1) +direct strategy might exist but was not found. +translating formula done in X seconds +automaton has 1 states and 2 colors +LAR construction done in X seconds +DPA has 2 states, 2 colors +split inputs and outputs done in X seconds +automaton has 6 states +solving game with acceptance: Streett 1 +game solved in X seconds +simplification took X seconds +EOF +ltlsynt -f "G(o1|o2) & (GFi <-> GFo1)" --outs="o1,o2" --verbose\ + --bypass=yes 2> out +sed 's/ [0-9.e-]* seconds/ X seconds/g' out > outx +diff outx exp + +# Test --dot and --hide-status +ltlsynt -f 'i <-> Fo' --ins=i --aiger --dot | grep arrowhead=dot +ltlsynt -f 'i <-> Fo' --ins=i --print-game-hoa --dot | grep 'shape="diamond"' +ltlsynt -f 'i <-> Fo' --ins=i --dot --hide-status > res +cat >exp < 0 + 0 [label="0"] + 0 -> 0 [label="i / o"] + 0 -> 1 [label="!i / !o"] + 1 [label="1"] + 1 -> 1 [label="1 / !o"] +} +EOF +diff res exp + + +# The following formula, generated from SPIReadManag.tlsf exhibited a bug +# in the decomposition. +s1="G(!((!o15 & !((!o14 & o16) <-> (o14 & !o16))) <-> (o15 & !(o14 | o16)))" +s2=" & !((!o12 & !((!o11 & o13) <-> (o11 & !o13))) <-> (o12 & !(o11 | o13)))" +s3=" & !((o09 & !o10) <-> (!o09 & o10)) & !((o07 & !o08) <-> (!o07 & o08))" +s4=" & !((!o05 & !((!o04 & o06) <-> (o04 & !o06))) <-> (o05 & !(o04 | o06)))" +s5=" & !((!o02 & !((!o01 & o03) <-> (o01 & !o03))) <-> (o02 & !(o01 | o03))))" +s6=" & ((G!(i2 & i7) & G(o15 -> Fi3)) -> (Go09 & G(o14 <-> (i6 & !i7)) & " +s7="G(o07 <-> (i7 & i8)) & G((i7 & i8) -> (o11 U i3)) & GFo12 & G(o04 <-> " +s8="(i4 & i6)) & G(o05 <-> !(i4 & i6)) & G(o15 <-> (i7 & i8)) & G(i7 -> o02) & " +s9="G((!i7 & !(i1 & i2 & !i5 & i6)) -> o03) & G(o01 <-> (i1 & i2 & !i5 & i6))))" +s=$s1$s2$s3$s4$s5$s6$s7$s8$s9 +ltlsynt --decomp=yes -f "$s" --ins=i1,i2,i3,i4,i5,i6,i7,i8 --realizability >out +ltlsynt --decomp=no -f "$s" --ins=i1,i2,i3,i4,i5,i6,i7,i8 --realizability >>out +cat >expected <. + + +# More checks for ltlfilt + +. ./defs || exit 1 + +set -e + +cat >formulas.ltl < Xo1) +F(i1 xor i2) <-> F(o1) +i1 <-> F(o1 xor o2) +F(i1) <-> G(o2) +EOF + +ltlsynt --ins=i1,i2 -F formulas.ltl -f 'o1 & F(i1 <-> o2)' -q --csv=out.csv &&\ + exit 2 +test $? -eq 1 || exit 2 + +test -z "$PYTHON" && exit 77 + +cat >test.py <expected < Xo1),lar,1,3 +F(i1 xor i2) <-> Fo1,lar,1,2 +i1 <-> F(o1 xor o2),lar,1,3 +Fi1 <-> Go2,lar,0,0 +o1 & F(i1 <-> o2),lar,1,4 +EOF + +diff filtered.csv expected + +# ltlfilt should be able to read the first columns +mv filtered.csv input.csv +ltlsynt --ins=i1,i2 -F input.csv/-1 --csv=out.csv -q && exit 2 +test $? -eq 1 +$PYTHON test.py +diff filtered.csv expected + +grep -v 0,0 filtered.csv >input.csv +ltlsynt --ins=i1,i2 -F input.csv/-1 --csv=out.csv -q || exit 2 +$PYTHON test.py +diff filtered.csv input.csv diff --git a/tests/core/mempool.cc b/tests/core/mempool.cc index 0dae6ce0e..1431a24b2 100644 --- a/tests/core/mempool.cc +++ b/tests/core/mempool.cc @@ -1,6 +1,6 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2016, 2018 Laboratoire de Recherche et Développement -// de l'Epita. +// Copyright (C) 2016, 2018, 2022 Laboratoire de Recherche et +// Développement de l'Epita. // // This file is part of Spot, a model checking library. // @@ -23,9 +23,6 @@ #include #include -#include - -#include namespace { @@ -103,7 +100,7 @@ namespace int main() { -#ifndef HAVE_VALGRIND_MEMCHECK_H +#if !__has_include() return 77; #endif @@ -157,33 +154,6 @@ int main() c->incr(); // no delete: valgrind should find a leak } - { - std::set, spot::pool_allocator> s; - s.insert(1); - s.insert(2); - s.insert(1); - s.erase(1); - s.insert(3); - s.insert(4); - - s.clear(); - - auto t = s; - t.insert(5); - t.insert(6); - - std::swap(s, t); - - s.erase(5); - s.erase(6); - - if (s != t) - return 1; - else - return 0; - } - return 0; } - diff --git a/tests/core/minusx.test b/tests/core/minusx.test index 9ed77cdad..238eeaf89 100755 --- a/tests/core/minusx.test +++ b/tests/core/minusx.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2020, 2021 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) 2016, 2020-2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -41,9 +41,9 @@ test 4,1 = `ltl2tgba --stats=%s,%d "$f"` test 6,0 = `ltl2tgba -x wdba-det-max=4 --stats=%s,%d "$f"` # Make sure simul-max has an effect -f=`genltl --ms-phi-s=2` -test 484 -eq `ltl2tgba -P -D --stats=%s "$f"` -test 484 -lt `ltl2tgba -P -D -x simul-max=512 --stats=%s "$f"` +f=`genltl --ms-phi-h=8` +test 511 -eq `ltl2tgba -P -D --stats=%s "$f"` +test 511 -lt `ltl2tgba -P -D -x simul-max=512 --stats=%s "$f"` # Illustrate issue #455: the simulation-based reduction applied before # tba-det can cause the creation of a DBA that is harder to reduce. diff --git a/tests/core/neverclaimread.test b/tests/core/neverclaimread.test index bf736f55d..09af8af58 100755 --- a/tests/core/neverclaimread.test +++ b/tests/core/neverclaimread.test @@ -350,10 +350,8 @@ digraph "-" { } EOF diff stdout expected -# FIXME: the "ignoring trailing garbage" is unwanted cat >expected.err <input <input <. + +. ./defs + +set -e + +# This is example 6 is the manual of pgsolver 4.1 +cat >example1.pg <out + +rest='(Fin(6) & (Inf(5) | (Fin(4) & (Inf(3) | (Fin(2) & (Inf(1) | Fin(0)))))))' +cat >example1.hoa <out +diff out example1.hoa + + +# Test streaming. +cat >example2.pg < assert(!(false)) } + od; +accept_all: + skip +} +EOF +autfilt example2.pg >out +parity15=`randaut -A'parity max odd 15' -Q1 0 | grep Acceptance` +parity31=`randaut -A'parity max odd 31' -Q1 0 | grep Acceptance` +cat > example2.hoa <example3.pg <stdout 2>stderr && exit 1 +cat >expected.err< example3.hoa +diff stdout example3.hoa diff --git a/tests/core/prodchain.test b/tests/core/prodchain.test index b5037782f..c2d6091c7 100755 --- a/tests/core/prodchain.test +++ b/tests/core/prodchain.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2018 Laboratoire de Recherche et Développement +# Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement # de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -23,27 +23,61 @@ set -e set x shift -for i in `seq 1 42`; do +for i in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 \ + 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42; do ltl2tgba "{a[*$i]}[]->GFb" > $i.hoa -done -for i in *.hoa; do - set x "$@" --product $i + set x "$@" --product $i.hoa shift done shift -autfilt "$@" 2> error && exit 1 -grep 'Too many acceptance sets used' error -autfilt -B "$@" > result -test "127,253,508,1" = `autfilt --stats=%s,%e,%t,%a result` +if [ $MAX_ACCSETS -eq 32 ]; then + autfilt "$@" 2> error && exit 1 + grep 'Too many acceptance sets used' error +fi +autfilt -B --low "$@" > result +test "4,7,16,1" = `autfilt --stats=%s,%e,%t,%a result` set x shift -for i in *.hoa; do - set x "$@" --product-or $i +for i in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 \ + 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42; do + set x "$@" --product-or $i.hoa shift done shift -autfilt "$@" 2> error && exit 1 -grep 'Too many acceptance sets used' error -autfilt -B "$@" > result +autfilt -B --low "$@" > result test "45,89,180,1" = `autfilt --stats=%s,%e,%t,%a result` + + +set x +shift +for i in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 \ + 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42; do + ltl2tgba -D --cobuchi -S "{a[*$i]}<>->FGb" > $i.hoa + set x "$@" --product $i.hoa + shift +done +shift +autfilt --cobuchi --high -D -S "$@" > result +test "44,47,92,1" = `autfilt --stats=%s,%e,%t,%a result` + +: > stats +set x +shift +for i in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 \ + 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42; do + ltl2tgba -D --cobuchi "{a[*$i]}<>->FGb" > $i.hoa + set x "$@" --product-or $i.hoa + shift + test $i -eq 1 && shift # remove the first --product + test 2,3,6,1 = `autfilt --high --small --cobuchi "$@" --stats=%s,%e,%t,%a` + test 3,5,10,1 = \ + `autfilt --high --small --cobuchi "$@" | autfilt -S --stats=%s,%e,%t,%a` +done + +if [ $MAX_ACCSETS -eq 32 ]; then + autfilt "$@" 2> error && exit 1 + grep 'Too many acceptance sets used' error +fi + +true diff --git a/tests/core/prodor.test b/tests/core/prodor.test index 03127508b..03d8cd458 100755 --- a/tests/core/prodor.test +++ b/tests/core/prodor.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2015, 2017-2018, 2021 Laboratoire de Recherche et +# Copyright (C) 2015, 2017-2018, 2021-2022 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -154,8 +154,8 @@ diff por.hoa exp ltl2tgba -BDH 'GFa' > gfa.hoa ltl2tgba -x '!wdba-minimize' -DH 'Xb' > xb.hoa -autfilt --product-or gfa.hoa xb.hoa -H > por.hoa -cat por.hoa +autfilt --product-or gfa.hoa xb.hoa -H > por2.hoa +cat por2.hoa cat >exp <err && exit 1 grep "randaut: failed to parse '1a' as an integer.* -n/--automata)" err +randaut -n99999999999999999999999999 3 2>err && exit 1 +grep "randaut:.*too large" err + randaut --spin -Q4 a b | ../ikwiad -H -XN - >out grep 'States: 4' out grep 'AP: 2' out diff --git a/tests/core/randtgba.cc b/tests/core/randtgba.cc index 7462f2c80..460bf9cd9 100644 --- a/tests/core/randtgba.cc +++ b/tests/core/randtgba.cc @@ -1,5 +1,5 @@ // -*- coding: utf-8 -*- -// Copyright (C) 2008-2012, 2014-2019 Laboratoire de Recherche et +// Copyright (C) 2008-2012, 2014-2019, 2022 Laboratoire de Recherche et // Développement de l'Epita (LRDE). // Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris // 6 (LIP6), département Systèmes Répartis Coopératifs (SRC), @@ -69,9 +69,11 @@ const char* default_algos[] = { "Cou99abs", "CVWY90", "CVWY90(bsh=4K)", + "CVWY90(ar:from_stack)", "GV04", "SE05", "SE05(bsh=4K)", + "SE05(ar:from_stack)", "Tau03", "Tau03_opt", "Tau03_opt(condstack)", diff --git a/tests/core/readsave.test b/tests/core/readsave.test index dd4e2efaf..cf6f43b89 100755 --- a/tests/core/readsave.test +++ b/tests/core/readsave.test @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2012, 2014-2022 Laboratoire de +# Copyright (C) 2009, 2010, 2012, 2014-2023 Laboratoire de # Recherche et Développement de l'Epita (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre @@ -346,9 +346,8 @@ digraph "" { 0 -> 0 [label="b", id="E1", tooltip="\\\\E\n#1"] 1 -> 1 [label="a", id="E2", tooltip="\\\\E\n#2"] 2 [label="s2"] - 2 -> 0 [label="b", id="E3", tooltip="\\\\E\n#3"] - 3 -> 1 [label="a", id="E4", tooltip="\\\\E\n#4"] - 3 -> 0 [label="b", id="E5", tooltip="\\\\E\n#5"] + 3 -> 1 [label="a", id="E3", tooltip="\\\\E\n#3"] + 3 -> 0 [label="b", id="E4", tooltip="\\\\E\n#4"] } EOF @@ -963,7 +962,8 @@ EOF test `autfilt -c --is-inherently-weak input7` = 1 test `autfilt -c --is-weak input7` = 0 test `autfilt -c --is-stutter-invariant input7` = 1 -autfilt --check input7 -H >output7 +autfilt --check input7 -H >output7 && exit 0 +test $? -eq 2 cat >expected7 <oneline.hoa -autfilt input8 --stats='%h' >oneline2.hoa -autfilt input8 --stats='%H' >oneline3.hoa -autfilt input8 --randomize --stats='%h' >oneline4.hoa -autfilt input8 --randomize --stats='%H' >oneline5.hoa +autfilt input8 -Hl >oneline.hoa && exit 1 +autfilt input8 --stats='%h' >oneline2.hoa && exit 1 +autfilt input8 --stats='%H' >oneline3.hoa && exit 1 +autfilt input8 --randomize --stats='%h' >oneline4.hoa && exit 1 +autfilt input8 --randomize --stats='%H' >oneline5.hoa && exit 1 diff oneline.hoa oneline2.hoa diff oneline.hoa oneline3.hoa diff oneline.hoa oneline4.hoa && exit 1 diff --git a/tests/core/sonf.test b/tests/core/sonf.test new file mode 100644 index 000000000..0febfc342 --- /dev/null +++ b/tests/core/sonf.test @@ -0,0 +1,85 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) 2021 Laboratoire de Recherche et Développement de +# l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs +set -e + +cat >input < Fa) & G(b -> ({x[*]}[]-> c)) +{x[*]}[]-> F({y[*]}<>-> GFz) +<>(({{p12}[*0..3]}[]-> ((p9) || (!(p17)))) V ((true) U (p17))) +{{true} || {[*0]}}[]-> (false) +{{p14} & {{p0}[*]}}[]-> (p11) +{{{!{p6}} -> {!{p3}}}[*]}[]-> ((p3)V((p3) || ((X((false))) && ((p2)V(p18))))) +X({{true} || {[*0]}}[]-> ((p17) U ((p8) && (p17)))) +({{{p4} || {p5} || {{p16} <-> {{p15} -> {p11}}}}[*]}[]-> (false)) -> (p8) +{[*1..6]}[]-> ((p9) V ((p9) || (!((p4) && (p19))))) +X({{{[*0]} || {{{p10};{p14}}[:*2..3]}}[:*]}<>-> (p8)) +{{true} && {{p8}[*]}}<>-> (!(p10)) +<>(!(({{p7}[*1..2]}<>-> (p11)) V ((!(p9)) && ([]((p11) || (X(p10))))))) +<>({{!{{p5} || {{!{p2}} <-> {p7}}}} & {[*]}}<>-> (p17)) +{{p0} || {{{[*0..2]}[:*2]}[*]}}<>-> ((p1) && (p6)) +EOF + +cat >expected < c)) +s1&G(!s2|GFz)&G(!s0|({y[*]}<>-> s2))&G(!s3|Fs0)&G(!s1|({x[*]}[]-> s3)) +F(s0 R (1 U p17))&G(p9|!p17|!s1)&G(!s0|({p12[*0..3]}[]-> s1)) +s0&G!s1&G(!s0|({1|[*0]}[]-> s1)) +s0&G(!s0|({p14&p0[*]}[]-> p11)) +s0&G(!s1|(p3 R (p3|(X(0)&(p2 R p18)))))&G(!s0|({{!p3|p6}[*]}[]-> s1)) +Xs0&G(!s1|(p17 U (p8&p17)))&G(!s0|({1|[*0]}[]-> s1)) +(p8|s0)&G(!s0|({{p4|p5|{p16 && {p11|!p15}}|{!p11 && p15 && !p16}}[*]}<>-> s1)) +s0&G(!s1|(p9 R (!p4|p9|!p19)))&G(!s0|({[*1..6]}[]-> s1)) +G(!s0|({{[*0]|{p10;p14}[:*2..3]}[:*]}<>-> p8))&Xs0 +s0&G(!p10|!s1)&G(!s0|({1 && p8[*]}<>-> s1)) +F(s0 U (p9|F(!p11&X!p10)))&G(!p11|!s1)&G(!s0|({p7[*1..2]}[]-> s1)) +G(!s0|({{!p5 && {{!p2 && !p7}|{p2 && p7}}}&[*]}<>-> p17))&Fs0 +s0&G(!s1|(p1&p6))&G(!s0|({p0|[*0..2][:*2][*]}<>-> s1)) +EOF + +cat >expected-aps < stdout +diff expected stdout +diff expected-aps stdout-aps + +# check idempotence +ltlfilt -F expected --sonf=s --sonf-aps=stdout-aps \ + | sed 's/ \([|&]\) /\1/g' > stdout +diff expected stdout +# should be 14 empty lines, no new aps introduced this time +test "$(wc -l -m stdout-aps | awk '{print $1 " " $2}')" = "14 14" diff --git a/tests/core/syfco.test b/tests/core/syfco.test new file mode 100755 index 000000000..453aa19bb --- /dev/null +++ b/tests/core/syfco.test @@ -0,0 +1,55 @@ +#! /bin/sh +# -*- coding: utf-8 -*- +# Copyright (C) 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. ./defs || exit 1 +set -e + +# Test that we can pass a tlsf specification to ltlsynt. This +# only work if syfco is installed. + +(syfco --version) || exit 77 + +cat >test.tlsf < X(out)); } +} +EOF + +test REALIZABLE = `ltlsynt --tlsf test.tlsf --realizability` +test UNREALIZABLE = `ltlsynt --tlsf test.tlsf --outs=foo --realizability` +test UNREALIZABLE = `ltlsynt --outs=foo --tlsf test.tlsf --realizability` + +# --tlsf can be used several time +ltlsynt --tlsf test.tlsf > out1 +ltlsynt --tlsf test.tlsf --tlsf test.tlsf > out2 +cat out1 out1 > out11 +diff out11 out2 + +ltlsynt --tlsf test.tlsf --tlsf test.tlsf --print-game > pgame.hoa +ltlsynt --from-pgame pgame.hoa > out3 +diff out2 out3 diff --git a/tests/core/tostring.test b/tests/core/tostring.test index e559ea198..7067a8b2c 100755 --- a/tests/core/tostring.test +++ b/tests/core/tostring.test @@ -1,7 +1,7 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2011, 2013, 2016 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) 2009-2011, 2013, 2016, 2022 Laboratoire de Recherche +# et Développement de l'Epita (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre # et Marie Curie. @@ -63,7 +63,7 @@ X"R" {a;b;{c && d[*]};[+]}[]-> G{a[*]:b[*]} GF!(b & (a | c)) GF!({b && {a | c[*]}}<>-> {{!a}[*]}) -GF({{a | c[*]} & b[*]}[]-> d) +GF({b[*] & {a | c[*]}}[]-> d) {a[*2..3]} {a[*0..1]} {a[*]} diff --git a/tests/core/wdba2.test b/tests/core/wdba2.test index ca49bad94..3850a447a 100755 --- a/tests/core/wdba2.test +++ b/tests/core/wdba2.test @@ -1,7 +1,7 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2012, 2014, 2015, 2018, 2019 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) 2012, 2014-2015, 2018-2019, 2023 Laboratoire de +# Recherche et Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -82,3 +82,36 @@ EOF autfilt --small --high -C -Hi input > output diff output expected + +# This test comes from a report from Raven Beutner and used to cause a +# segfault. +cat >input <output +cat >expected < #include -const char argp_program_doc[] = +static const char argp_program_doc[] = "Process model and formula to check wether a " "model meets a specification.\v\ Exit status:\n\ diff --git a/tests/python/298.py b/tests/python/298.py index d4865c440..89ddbdb0c 100644 --- a/tests/python/298.py +++ b/tests/python/298.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -20,21 +20,23 @@ # Test for parts of Issue #298. import spot +from unittest import TestCase +tc = TestCase() a1 = spot.automaton("""genltl --dac=51 | ltl2tgba --med |""") a1 = spot.degeneralize_tba(a1) r1 = spot.tgba_determinize(a1, True, False, False) -assert r1.num_sets() == 3 -assert a1.prop_complete().is_false(); +tc.assertEqual(r1.num_sets(), 3) +tc.assertTrue(a1.prop_complete().is_false()) # This used to fail in 2.9.5 and earlier. -assert r1.prop_complete().is_maybe(); -assert spot.is_complete(r1) +tc.assertTrue(r1.prop_complete().is_maybe()) +tc.assertTrue(spot.is_complete(r1)) a2 = spot.automaton("""genltl --dac=51 | ltl2tgba --high |""") a2 = spot.degeneralize_tba(a2) r2 = spot.tgba_determinize(a2, True, False, False) # This used to fail in 2.9.5 and earlier. -assert r2.num_sets() == 3 -assert a2.prop_complete().is_false(); -assert r2.prop_complete().is_maybe(); -assert spot.is_complete(r2) +tc.assertEqual(r2.num_sets(), 3) +tc.assertTrue(a2.prop_complete().is_false()) +tc.assertTrue(r2.prop_complete().is_maybe()) +tc.assertTrue(spot.is_complete(r2)) diff --git a/tests/python/341.py b/tests/python/341.py index 4c5937149..e828ab07c 100644 --- a/tests/python/341.py +++ b/tests/python/341.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2017, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -19,7 +19,8 @@ import spot from subprocess import _active - +from unittest import TestCase +tc = TestCase() def two_intersecting_automata(): """return two random automata with a non-empty intersection""" @@ -34,4 +35,4 @@ for i in range(5): n = len(_active) print(n, "active processes") -assert(n == 0) +tc.assertEqual(n, 0) diff --git a/tests/python/471.py b/tests/python/471.py index 6fee3a2d3..0fe180554 100644 --- a/tests/python/471.py +++ b/tests/python/471.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2021 Laboratoire de Recherche et Développement de l'Epita +# Copyright (C) 2021, 2022 Laboratoire de Recherche et Développement de l'Epita # (LRDE). # # This file is part of Spot, a model checking library. @@ -20,9 +20,12 @@ # Test for Issue #471. import spot +from unittest import TestCase +tc = TestCase() + a = spot.translate('Fa') a = spot.to_generalized_rabin(a, False) r1 = a.intersecting_run(a) r2 = a.accepting_run() -assert str(r1) == str(r2) -assert a.prop_weak().is_true() +tc.assertEqual(str(r1), str(r2)) +tc.assertTrue(a.prop_weak().is_true()) diff --git a/tests/python/_mealy.ipynb b/tests/python/_mealy.ipynb index 0fbad3d08..ebeeaacb7 100644 --- a/tests/python/_mealy.ipynb +++ b/tests/python/_mealy.ipynb @@ -3,15 +3,18 @@ { "cell_type": "code", "execution_count": 1, + "id": "8bca10b8", "metadata": {}, "outputs": [], "source": [ - "import spot\n", + "import spot, buddy\n", + "import pandas as pd\n", "spot.setup()" ] }, { "cell_type": "markdown", + "id": "c73e997a", "metadata": {}, "source": [ "Test the Mealy printer." @@ -20,6 +23,7 @@ { "cell_type": "code", "execution_count": 2, + "id": "f8eff7ed", "metadata": {}, "outputs": [], "source": [ @@ -29,6 +33,7 @@ { "cell_type": "code", "execution_count": 3, + "id": "ad3c80bc", "metadata": {}, "outputs": [ { @@ -49,6 +54,7 @@ { "cell_type": "code", "execution_count": 4, + "id": "50130d85", "metadata": {}, "outputs": [ { @@ -60,82 +66,70 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))\n", - "[parity max odd 4]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "!a & !c\n", - "\n", + "\n", + "\n", + "!a & !c\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "a | c\n", - "\n", + "\n", + "\n", + "a | c\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!b & !d\n", - "\n", + "\n", + "\n", + "!b & !d\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "b | d\n", - "\n", + "\n", + "\n", + "b | d\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f300caabba0> >" + " *' at 0x7f86481a2690> >" ] }, "execution_count": 4, @@ -150,6 +144,7 @@ { "cell_type": "code", "execution_count": 5, + "id": "3d56cda6", "metadata": {}, "outputs": [], "source": [ @@ -159,6 +154,7 @@ { "cell_type": "code", "execution_count": 6, + "id": "c24548a1", "metadata": {}, "outputs": [ { @@ -213,7 +209,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f300c179300> >" + " *' at 0x7f85f45cbb70> >" ] }, "execution_count": 6, @@ -228,6 +224,7 @@ { "cell_type": "code", "execution_count": 7, + "id": "88f2c0e0", "metadata": {}, "outputs": [], "source": [ @@ -237,6 +234,7 @@ { "cell_type": "code", "execution_count": 8, + "id": "e626997e", "metadata": {}, "outputs": [ { @@ -285,7 +283,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f300c179300> >" + " *' at 0x7f85f45cbb70> >" ] }, "execution_count": 8, @@ -296,11 +294,1404 @@ "source": [ "x" ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "923a59d6", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "!i\n", + "/\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "i\n", + "/\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f861bfc8ae0> >" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut = spot.make_twa_graph()\n", + "i = buddy.bdd_ithvar(aut.register_ap(\"i\"))\n", + "o = buddy.bdd_ithvar(aut.register_ap(\"o\"))\n", + "spot.set_synthesis_outputs(aut, o)\n", + "aut.new_states(3)\n", + "aut.new_edge(0,1,buddy.bdd_not(i)&buddy.bdd_not(o))\n", + "aut.new_edge(0,2,i&o)\n", + "aut.new_edge(1,1,buddy.bdd_not(o))\n", + "aut.new_edge(2,2,buddy.bddtrue)\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "f06d6df4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "('o',)\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!i\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "i\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f85f45efde0> >" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut_s = spot.split_2step(aut)\n", + "print(spot.get_synthesis_output_aps(aut_s))\n", + "aut_s" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "3cc4d320", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
taskpremin_timereorg_timepartsol_timeplayer_incomp_timeincomp_timesplit_all_let_timesplit_min_let_timesplit_cstr_timeprob_init_build_time...total_timen_classesn_refinementn_litn_clausesn_iterationn_letters_partn_bisim_letn_min_statesdone
0presat3868.953.282e-061.4388e-050.0001297651.3759e-059.499e-068.73e-069.01e-066.6209e-05...NaNNaNNaNNaNNaNNaN32NaNNaN
1satNaNNaNNaNNaNNaNNaNNaNNaNNaN...0.000743251207120NaNNaN41
\n", + "

2 rows × 23 columns

\n", + "
" + ], + "text/plain": [ + " task premin_time reorg_time partsol_time player_incomp_time incomp_time \\\n", + "0 presat 3868.95 3.282e-06 1.4388e-05 0.000129765 1.3759e-05 \n", + "1 sat NaN NaN NaN NaN NaN \n", + "\n", + " split_all_let_time split_min_let_time split_cstr_time prob_init_build_time \\\n", + "0 9.499e-06 8.73e-06 9.01e-06 6.6209e-05 \n", + "1 NaN NaN NaN NaN \n", + "\n", + " ... total_time n_classes n_refinement n_lit n_clauses n_iteration \\\n", + "0 ... NaN NaN NaN NaN NaN NaN \n", + "1 ... 0.000743251 2 0 7 12 0 \n", + "\n", + " n_letters_part n_bisim_let n_min_states done \n", + "0 3 2 NaN NaN \n", + "1 NaN NaN 4 1 \n", + "\n", + "[2 rows x 23 columns]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "i\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!i\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!o\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f861bfc8630> >" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "min_lvl = 0\n", + "aut_ms, table = spot.minimize_mealy(aut_s, min_lvl, display_log=True, return_log=True)\n", + "aut_ms" + ] + }, + { + "cell_type": "markdown", + "id": "bc844797", + "metadata": {}, + "source": [ + "## A more involved example" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "893bc90e", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "o1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "(!o0 & o1) | (o0 & !o1)\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f861bf9fb40> >" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut = spot.make_twa_graph()\n", + "i = buddy.bdd_ithvar(aut.register_ap(\"i\"))\n", + "o0 = buddy.bdd_ithvar(aut.register_ap(\"o0\"))\n", + "no0 = buddy.bdd_not(o0)\n", + "o1 = buddy.bdd_ithvar(aut.register_ap(\"o1\"))\n", + "no1 = buddy.bdd_not(o1)\n", + "spot.set_synthesis_outputs(aut, o0&o1)\n", + "\n", + "vo1 = o0&o1\n", + "vo2 = no0&o1\n", + "vo3 = o0&no1\n", + "\n", + "aut.new_states(3)\n", + "\n", + "aut.new_edge(0,1,vo1|vo2)\n", + "aut.new_edge(1,2,vo1|vo3)\n", + "aut.new_edge(2,2,vo2|vo3)\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "23edb107", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "o1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "(!o0 & o1) | (o0 & !o1)\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f861bf9f210> >" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut_s = spot.split_2step(aut)\n", + "aut_s" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "837aab84", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
taskpremin_timereorg_timepartsol_timeplayer_incomp_timeincomp_timesplit_all_let_timesplit_min_let_timesplit_cstr_timeprob_init_build_time...total_timen_classesn_refinementn_litn_clausesn_iterationn_letters_partn_bisim_letn_min_statesdone
0presat3869.083.213e-069.079e-069.5752e-055.168e-065.727e-067.543e-061.5784e-054.0507e-05...NaNNaNNaNNaNNaNNaN11NaNNaN
1satNaNNaNNaNNaNNaNNaNNaNNaNNaN...NaN10360NaNNaNNaNNaN
2refinementNaNNaNNaNNaNNaNNaNNaNNaNNaN...NaN111016NaNNaNNaNNaNNaN
3satNaNNaNNaNNaNNaNNaNNaNNaNNaN...0.0003990732017291NaNNaN41
\n", + "

4 rows × 23 columns

\n", + "
" + ], + "text/plain": [ + " task premin_time reorg_time partsol_time player_incomp_time \\\n", + "0 presat 3869.08 3.213e-06 9.079e-06 9.5752e-05 \n", + "1 sat NaN NaN NaN NaN \n", + "2 refinement NaN NaN NaN NaN \n", + "3 sat NaN NaN NaN NaN \n", + "\n", + " incomp_time split_all_let_time split_min_let_time split_cstr_time \\\n", + "0 5.168e-06 5.727e-06 7.543e-06 1.5784e-05 \n", + "1 NaN NaN NaN NaN \n", + "2 NaN NaN NaN NaN \n", + "3 NaN NaN NaN NaN \n", + "\n", + " prob_init_build_time ... total_time n_classes n_refinement n_lit \\\n", + "0 4.0507e-05 ... NaN NaN NaN NaN \n", + "1 NaN ... NaN 1 0 3 \n", + "2 NaN ... NaN 1 1 10 \n", + "3 NaN ... 0.000399073 2 0 17 \n", + "\n", + " n_clauses n_iteration n_letters_part n_bisim_let n_min_states done \n", + "0 NaN NaN 1 1 NaN NaN \n", + "1 6 0 NaN NaN NaN NaN \n", + "2 16 NaN NaN NaN NaN NaN \n", + "3 29 1 NaN NaN 4 1 \n", + "\n", + "[4 rows x 23 columns]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Number of variables\n", + "0 NaN\n", + "1 3\n", + "2 10\n", + "3 17\n", + "Name: n_lit, dtype: object\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "o0 & o1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "o0 & !o1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f861bfcdc00> >" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "si = spot.synthesis_info()\n", + "si.minimize_lvl = 3\n", + "aut_ms, table = spot.minimize_mealy(aut_s, si, display_log=True, return_log=True)\n", + "print(\"Number of variables\")\n", + "print(table[\"n_lit\"])\n", + "aut_ms" + ] + }, + { + "cell_type": "markdown", + "id": "0fea0269", + "metadata": {}, + "source": [ + "## Testing dimacs output" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "d14324e8", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
taskpremin_timereorg_timepartsol_timeplayer_incomp_timeincomp_timesplit_all_let_timesplit_min_let_timesplit_cstr_timeprob_init_build_time...total_timen_classesn_refinementn_litn_clausesn_iterationn_letters_partn_bisim_letn_min_statesdone
0presat3869.142.863e-069.08e-066.0622e-054.679e-065.308e-068.59e-067.962e-064.0159e-05...NaNNaNNaNNaNNaNNaN11NaNNaN
1satNaNNaNNaNNaNNaNNaNNaNNaNNaN...NaN10360NaNNaNNaNNaN
2refinementNaNNaNNaNNaNNaNNaNNaNNaNNaN...NaN111016NaNNaNNaNNaNNaN
3satNaNNaNNaNNaNNaNNaNNaNNaNNaN...0.0004164642017291NaNNaN41
\n", + "

4 rows × 23 columns

\n", + "
" + ], + "text/plain": [ + " task premin_time reorg_time partsol_time player_incomp_time \\\n", + "0 presat 3869.14 2.863e-06 9.08e-06 6.0622e-05 \n", + "1 sat NaN NaN NaN NaN \n", + "2 refinement NaN NaN NaN NaN \n", + "3 sat NaN NaN NaN NaN \n", + "\n", + " incomp_time split_all_let_time split_min_let_time split_cstr_time \\\n", + "0 4.679e-06 5.308e-06 8.59e-06 7.962e-06 \n", + "1 NaN NaN NaN NaN \n", + "2 NaN NaN NaN NaN \n", + "3 NaN NaN NaN NaN \n", + "\n", + " prob_init_build_time ... total_time n_classes n_refinement n_lit \\\n", + "0 4.0159e-05 ... NaN NaN NaN NaN \n", + "1 NaN ... NaN 1 0 3 \n", + "2 NaN ... NaN 1 1 10 \n", + "3 NaN ... 0.000416464 2 0 17 \n", + "\n", + " n_clauses n_iteration n_letters_part n_bisim_let n_min_states done \n", + "0 NaN NaN 1 1 NaN NaN \n", + "1 6 0 NaN NaN NaN NaN \n", + "2 16 NaN NaN NaN NaN NaN \n", + "3 29 1 NaN NaN 4 1 \n", + "\n", + "[4 rows x 23 columns]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "c ### Next Instance 1 0 ###\n", + "p cnf 5 5\n", + "-1 2 -3 0\n", + "1 -3 0\n", + "1 -5 0\n", + "2 -5 0\n", + "3 -5 0\n", + "c ### Next Instance 1 1 ###\n", + "p cnf 12 15\n", + "-1 2 -3 0\n", + "4 0\n", + "6 0\n", + "-9 0\n", + "-1 -2 10 0\n", + "-10 0\n", + "1 -3 0\n", + "1 -5 0\n", + "1 -12 0\n", + "2 -5 0\n", + "2 -12 0\n", + "-2 9 0\n", + "3 -5 0\n", + "3 -12 0\n", + "7 8 0\n", + "c ### Next Instance 2 0 ###\n", + "p cnf 19 29\n", + "-3 -1 2 0\n", + "4 0\n", + "6 0\n", + "-9 0\n", + "-1 -2 10 0\n", + "-10 0\n", + "11 -16 -17 0\n", + "1 -15 -17 0\n", + "-1 13 -14 0\n", + "-11 13 -16 0\n", + "-11 -15 2 0\n", + "-13 -15 2 0\n", + "1 11 -19 0\n", + "13 -19 2 0\n", + "15 16 -19 0\n", + "3 14 -19 0\n", + "-2 0\n", + "-12 0\n", + "-5 0\n", + "1 -3 0\n", + "1 -5 0\n", + "1 -12 0\n", + "2 -5 0\n", + "2 -12 0\n", + "-2 9 0\n", + "3 -5 0\n", + "3 -12 0\n", + "7 8 0\n", + "11 -14 0\n", + "\n" + ] + } + ], + "source": [ + "import tempfile\n", + "\n", + "si = spot.synthesis_info()\n", + "si.minimize_lvl = 3\n", + "\n", + "with tempfile.NamedTemporaryFile(dir='.', suffix='.dimacslog') as t:\n", + " si.opt.set_str(\"satlogdimacs\", t.name)\n", + " aut_ms, table = spot.minimize_mealy(aut_s, si, display_log=True, return_log=True)\n", + " with open(t.name, \"r\") as f:\n", + " print(\"\".join(f.readlines()))\n", + " \n", + " " + ] + }, + { + "cell_type": "markdown", + "id": "b10213b8", + "metadata": {}, + "source": [ + "# Testing partitioned relabeling" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "fd5ca506", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Conditions in orig machine: 8\n", + "Conditions in relabeled machine: 13\n" + ] + } + ], + "source": [ + "def get_mealy():\n", + " return spot.split_2step(spot.automaton(\"\"\"HOA: v1\n", + "States: 2\n", + "Start: 0\n", + "AP: 11 \"u0accel0accel\" \"u0accel0f1dcon23p81b\" \"u0accel0f1dcon231b\" \"u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b\" \"u0gear0gear\" \"u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b\" \"u0steer0f1dsteering0angle0trackpos1b\" \"u0steer0steer\" \"p0p0gt0rpm0f1dcon5523231b\" \"p0p0lt0rpm0f1dcon32323231b\" \"p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc deterministic\n", + "controllable-AP: 0 1 2 3 4 5 6 7\n", + "--BODY--\n", + "State: 0\n", + "[!0&!1&2&!3&4&!5&6&!7&!8&!9&!10] 0\n", + "[!0&1&!2&!3&4&!5&6&!7&!8&!9&10] 0\n", + "[!0&!1&2&!3&!4&5&6&!7&!8&9&!10] 0\n", + "[!0&1&!2&!3&!4&5&6&!7&!8&9&10] 0\n", + "[!0&!1&2&3&!4&!5&6&!7&8&!9&!10] 0\n", + "[!0&1&!2&3&!4&!5&6&!7&8&!9&10] 0\n", + "[!0&!1&2&!3&!4&5&!6&7&8&9 | !0&!1&2&!3&!4&5&6&!7&8&9 | !0&!1&2&!3&4&!5&!6&7&8&9 | !0&!1&2&!3&4&!5&6&!7&8&9 | !0&!1&2&3&!4&!5&!6&7&8&9 | !0&!1&2&3&!4&!5&6&!7&8&9 | !0&1&!2&!3&!4&5&!6&7&8&9 | !0&1&!2&!3&!4&5&6&!7&8&9 | !0&1&!2&!3&4&!5&!6&7&8&9 | !0&1&!2&!3&4&!5&6&!7&8&9 | !0&1&!2&3&!4&!5&!6&7&8&9 | !0&1&!2&3&!4&!5&6&!7&8&9 | 0&!1&!2&!3&!4&5&!6&7&8&9 | 0&!1&!2&!3&!4&5&6&!7&8&9 | 0&!1&!2&!3&4&!5&!6&7&8&9 | 0&!1&!2&!3&4&!5&6&!7&8&9 | 0&!1&!2&3&!4&!5&!6&7&8&9 | 0&!1&!2&3&!4&!5&6&!7&8&9] 1\n", + "State: 1\n", + "[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 1\n", + "--END--\"\"\"))\n", + "\n", + "def env_conditions(m):\n", + " sp = spot.get_state_players(m)\n", + " conds = []\n", + " for e in m.edges():\n", + " if sp[e.src]:\n", + " continue\n", + " if not e.cond in conds:\n", + " conds.append(e.cond)\n", + " return conds\n", + "print(\"Conditions in orig machine: \", len(env_conditions(get_mealy())))\n", + "ms = get_mealy()\n", + "# Relabel only env\n", + "spot.partitioned_game_relabel_here(ms, True, False, True, False)\n", + "print(\"Conditions in relabeled machine: \", len(env_conditions(ms)))" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "ee29da67", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Partitioned env letters: 13\n" + ] + } + ], + "source": [ + "si = spot.synthesis_info()\n", + "si.minimize_lvl = 3\n", + "# Turn on relabeling\n", + "si.opt.set(\"max_letter_mult\", 100000)\n", + "\n", + "mm, log = spot.minimize_mealy(get_mealy(), si, return_log=True)\n", + "print(\"Partitioned env letters:\", log[\"n_letters_part\"][0])" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "0aec8019", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Partitioned env letters: 0\n" + ] + } + ], + "source": [ + "# Turn off relabeling\n", + "si.opt.set(\"max_letter_mult\", 0)\n", + "\n", + "mm, log = spot.minimize_mealy(get_mealy(), si, return_log=True)\n", + "print(\"Partitioned env letters:\", log[\"n_letters_part\"][0])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a92f4f43", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -315,6 +1706,11 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" + }, + "vscode": { + "interpreter": { + "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1" + } } }, "nbformat": 4, diff --git a/tests/python/_partitioned_relabel.ipynb b/tests/python/_partitioned_relabel.ipynb new file mode 100644 index 000000000..b7f1c4380 --- /dev/null +++ b/tests/python/_partitioned_relabel.ipynb @@ -0,0 +1,2353 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "4d896402", + "metadata": {}, + "outputs": [], + "source": [ + "import spot, buddy" + ] + }, + { + "cell_type": "markdown", + "id": "94e87f9c", + "metadata": {}, + "source": [ + "# Partitioned relabeling\n", + "\n", + "Partitioned relabeling will:\n", + "First compute a partition over all conditions appearing in the automaton.\n", + "That is, the set of new conditions is such that (1) they do not overlap (2) all valuations that verify some condition in the original automaton also verify (exactly one) of the new conditions.\n", + "These new conditions can be thought of as letters in a \"classical\" sense.\n", + "Then we create new aps and encode the \"number\" of these letters using the fresh aps, resulting in new letters which are a single valuation over the fresh aps.\n", + "\n", + "This can be helpful if there are many aps, but few different conditions over them\n", + "\n", + "The algorithm comes in two flavours:\n", + "\n", + "We maintain the original number of edges. Therefore the new label correspond to a disjunction over new letters (split=False).\n", + "We split each edge into its letters, creating more edges (split=True)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62123fa9", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936415fbd0> >" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#Relabeling a graph\n", + "aut = spot.make_twa_graph()\n", + "aut.new_states(5)\n", + "\n", + "a = buddy.bdd_ithvar(aut.register_ap(\"a\"))\n", + "na = buddy.bdd_nithvar(aut.register_ap(\"a\"))\n", + "b0 = buddy.bdd_ithvar(aut.register_ap(\"b0\"))\n", + "nb0 = buddy.bdd_nithvar(aut.register_ap(\"b0\"))\n", + "b1 = buddy.bdd_ithvar(aut.register_ap(\"b1\"))\n", + "nb1 = buddy.bdd_nithvar(aut.register_ap(\"b1\"))\n", + "b2 = buddy.bdd_ithvar(aut.register_ap(\"b2\"))\n", + "nb2 = buddy.bdd_nithvar(aut.register_ap(\"b2\"))\n", + "\n", + "aut.new_edge(0,1,buddy.bddtrue)\n", + "aut.new_edge(0,2,a)\n", + "aut.new_edge(0,3,a&b0&b1&b2)\n", + "aut.new_edge(0,4,a&nb0&nb1&nb2)\n", + "\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d4c8e977", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "6\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "__nv0 | __nv1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "__nv0 & __nv1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936415fbd0> >" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "relabel_dict = spot.partitioned_relabel_here(aut)\n", + "\n", + "print(relabel_dict.size())\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "6f90a095", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936415fbd0> >" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Undo the relabeling\n", + "spot.relabel_here(aut, relabel_dict)\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "513067ab", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936415bf30> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HOA: v1\n", + "States: 5\n", + "Start: 0\n", + "AP: 6 \"a\" \"b0\" \"b1\" \"b2\" \"__nv0\" \"__nv1\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc\n", + "--BODY--\n", + "State: 0\n", + "[!4&!5] 1\n", + "[4&!5] 2\n", + "[!4&5] 3\n", + "[4&5] 4\n", + "[4&!5] 1\n", + "[4&5] 1\n", + "[!4&5] 1\n", + "[4&5] 2\n", + "[!4&5] 2\n", + "State: 1\n", + "State: 2\n", + "State: 3\n", + "State: 4\n", + "--END--\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!__nv0 & !__nv1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "__nv0 & !__nv1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "__nv0 & !__nv1\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "__nv0 & __nv1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936415bf30> >" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Relabeling the same graph using the split option\n", + "aut = spot.make_twa_graph()\n", + "aut.new_states(5)\n", + "\n", + "a = buddy.bdd_ithvar(aut.register_ap(\"a\"))\n", + "na = buddy.bdd_nithvar(aut.register_ap(\"a\"))\n", + "b0 = buddy.bdd_ithvar(aut.register_ap(\"b0\"))\n", + "nb0 = buddy.bdd_nithvar(aut.register_ap(\"b0\"))\n", + "b1 = buddy.bdd_ithvar(aut.register_ap(\"b1\"))\n", + "nb1 = buddy.bdd_nithvar(aut.register_ap(\"b1\"))\n", + "b2 = buddy.bdd_ithvar(aut.register_ap(\"b2\"))\n", + "nb2 = buddy.bdd_nithvar(aut.register_ap(\"b2\"))\n", + "\n", + "aut.new_edge(0,1,buddy.bddtrue)\n", + "aut.new_edge(0,2,a)\n", + "aut.new_edge(0,3,a&b0&b1&b2)\n", + "aut.new_edge(0,4,a&nb0&nb1&nb2)\n", + "\n", + "display(aut)\n", + "xx = spot.partitioned_relabel_here(aut, True)\n", + "print(aut.to_str(\"hoa\"))\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "50c6a08b", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "(a & !b0 & b2) | (a & b0 & !b2) | (a & !b1 & b2) | (a & b1 & !b2)\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "(a & !b0 & b2) | (a & b0 & !b2) | (a & !b1 & b2) | (a & b1 & !b2)\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b0 & b1 & b2\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b0 & !b1 & !b2\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936415bf30> >" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Undo the relabeling -> disjoint conditions over the original ap\n", + "spot.relabel_here(aut, relabel_dict)\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "d2efd313", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936c3c6090> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HOA: v1\n", + "States: 6\n", + "Start: 0\n", + "AP: 5 \"a\" \"__nv0\" \"__nv1\" \"b\" \"c\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc\n", + "--BODY--\n", + "State: 0\n", + "[!1 | !2] 1\n", + "[!1&2 | 1&!2] 2\n", + "[!1&2] 3\n", + "[1&!2] 4\n", + "[4] 5\n", + "State: 1\n", + "State: 2\n", + "State: 3\n", + "State: 4\n", + "State: 5\n", + "--END--\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!__nv0 | !__nv1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "(__nv0 & !__nv1) | (!__nv0 & __nv1)\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!__nv0 & __nv1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "__nv0 & !__nv1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936c3c6090> >" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Working only on a subset of the aps\n", + "# Note that True is always relabeled\n", + "\n", + "aut = spot.make_twa_graph()\n", + "aut.new_states(6)\n", + "\n", + "a = buddy.bdd_ithvar(aut.register_ap(\"a\"))\n", + "na = buddy.bdd_nithvar(aut.register_ap(\"a\"))\n", + "b = buddy.bdd_ithvar(aut.register_ap(\"b\"))\n", + "nb = buddy.bdd_nithvar(aut.register_ap(\"b\"))\n", + "c = buddy.bdd_ithvar(aut.register_ap(\"c\"))\n", + "nc = buddy.bdd_nithvar(aut.register_ap(\"c\"))\n", + "\n", + "aut.new_edge(0,1,buddy.bddtrue)\n", + "aut.new_edge(0,2,a)\n", + "aut.new_edge(0,3,a&b)\n", + "aut.new_edge(0,4,a&nb)\n", + "aut.new_edge(0,5,c)\n", + "\n", + "display(aut)\n", + "\n", + "concerned_aps = a & b # concerned aps are given as a conjunction of positive aps\n", + "# As partitioning can be exponentially costly,\n", + "# one can limit the number of new letters generated before abadoning\n", + "# This can be done either as a hard limit and/or as the number of current condition\n", + "# times a factor\n", + "relabel_dict = spot.partitioned_relabel_here(aut, False, 1000, 1000, concerned_aps)\n", + "print(aut.to_str(\"hoa\"))\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "1fbc8813", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HOA: v1\n", + "States: 6\n", + "Start: 0\n", + "AP: 3 \"a\" \"b\" \"c\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc\n", + "--BODY--\n", + "State: 0\n", + "[t] 1\n", + "[0] 2\n", + "[0&1] 3\n", + "[0&!1] 4\n", + "[2] 5\n", + "State: 1\n", + "State: 2\n", + "State: 3\n", + "State: 4\n", + "State: 5\n", + "--END--\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936c3c6090> >" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#undo partial relabeling\n", + "spot.relabel_here(aut, relabel_dict)\n", + "print(aut.to_str(\"hoa\"))\n", + "aut" + ] + }, + { + "cell_type": "markdown", + "id": "ef77c2ee", + "metadata": {}, + "source": [ + "# Concerning games and Mealy machines\n", + "\n", + "Games and split mealy machines have both: defined outputs and states that either belong to player or env.\n", + "Relabeling is done separately for env and player transitions (over inputs and outputs respectively).\n", + "\n", + "The problem is that T (bddtrue) is ambiguous, as it may be over the inputs or outputs.\n", + "\n", + "We therefore introduce a dedicated function for this matter." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "296a93d3", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b / !u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b / (label too long)\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "1 / (label too long)\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936415f510> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HOA: v1\n", + "States: 9\n", + "Start: 0\n", + "AP: 11 \"u0accel0accel\" \"u0accel0f1dcon23p81b\" \"u0accel0f1dcon231b\" \"u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b\" \"u0gear0gear\" \"u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b\" \"u0steer0f1dsteering0angle0trackpos1b\" \"u0steer0steer\" \"p0p0gt0rpm0f1dcon5523231b\" \"p0p0lt0rpm0f1dcon32323231b\" \"p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc deterministic\n", + "spot-state-player: 0 0 1 1 1 1 1 1 1\n", + "controllable-AP: 0 1 2 3 4 5 6 7\n", + "--BODY--\n", + "State: 0\n", + "[!8&!9&!10] 2\n", + "[!8&!9&10] 3\n", + "[!8&9&!10] 4\n", + "[!8&9&10] 5\n", + "[8&!9&!10] 6\n", + "[8&!9&10] 7\n", + "[8&9] 8\n", + "State: 1\n", + "[t] 8\n", + "State: 2\n", + "[!0&!1&2&!3&4&!5&6&!7] 0\n", + "State: 3\n", + "[!0&1&!2&!3&4&!5&6&!7] 0\n", + "State: 4\n", + "[!0&!1&2&!3&!4&5&6&!7] 0\n", + "State: 5\n", + "[!0&1&!2&!3&!4&5&6&!7] 0\n", + "State: 6\n", + "[!0&!1&2&3&!4&!5&6&!7] 0\n", + "State: 7\n", + "[!0&1&!2&3&!4&!5&6&!7] 0\n", + "State: 8\n", + "[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 1\n", + "--END--\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "0->7\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "0->8\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "7->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->1\n", + "\n", + "\n", + "(label too long)\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936415f990> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HOA: v1\n", + "States: 9\n", + "Start: 0\n", + "AP: 21 \"u0accel0accel\" \"u0accel0f1dcon23p81b\" \"u0accel0f1dcon231b\" \"u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b\" \"u0gear0gear\" \"u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b\" \"u0steer0f1dsteering0angle0trackpos1b\" \"u0steer0steer\" \"p0p0gt0rpm0f1dcon5523231b\" \"p0p0lt0rpm0f1dcon32323231b\" \"p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\" \"__AP_OUT__\" \"__AP_IN__\" \"__nv_in0\" \"__nv_in1\" \"__nv_in2\" \"__nv_in3\" \"__nv_out0\" \"__nv_out1\" \"__nv_out2\" \"__nv_out3\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc deterministic\n", + "spot-state-player: 0 0 1 1 1 1 1 1 1\n", + "controllable-AP: 0 1 2 3 4 5 6 7\n", + "--BODY--\n", + "State: 0\n", + "[!13&!14&!15&!16] 2\n", + "[13&!14&!15&!16] 3\n", + "[!13&14&!15&!16] 4\n", + "[13&14&!15&!16] 5\n", + "[!13&!14&15&!16] 6\n", + "[13&!14&15&!16] 7\n", + "[!13&14&15&!16] 8\n", + "[13&14&15&!16] 2\n", + "[!13&!14&!15&16] 3\n", + "[13&!14&!15&16] 4\n", + "[!13&14&!15&16] 5\n", + "[13&14&!15&16] 6\n", + "[!13&!14&15&16] 7\n", + "State: 1\n", + "[13&14&15&!16] 8\n", + "[!13&!14&!15&16] 8\n", + "[13&!14&!15&16] 8\n", + "[!13&14&!15&16] 8\n", + "[13&14&!15&16] 8\n", + "[!13&!14&15&16] 8\n", + "[!13&14&15&!16] 8\n", + "State: 2\n", + "[!17&!18&!19&!20 | !17&18&19&!20] 0\n", + "State: 3\n", + "[17&!18&!19&!20 | 17&18&19&!20] 0\n", + "State: 4\n", + "[!17&!18&!19&20 | !17&18&!19&!20] 0\n", + "State: 5\n", + "[17&!18&!19&20 | 17&18&!19&!20] 0\n", + "State: 6\n", + "[!17&!18&19&!20 | !17&18&!19&20] 0\n", + "State: 7\n", + "[17&!18&19&!20 | 17&18&!19&20] 0\n", + "State: 8\n", + "[!17&!18&20 | 18&19&!20 | !19&20] 1\n", + "--END--\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & !__nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "__nv_in0 & !__nv_in1 & !__nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & !__nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & !__nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "0->7\n", + "\n", + "\n", + "__nv_in0 & !__nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "0->7\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & __nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "0->8\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "(!__nv_out0 & !__nv_out1 & !__nv_out2 & !__nv_out3) | (!__nv_out0 & __nv_out1 & __nv_out2 & !__nv_out3)\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "(__nv_out0 & !__nv_out1 & !__nv_out2 & !__nv_out3) | (__nv_out0 & __nv_out1 & __nv_out2 & !__nv_out3)\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "(!__nv_out0 & __nv_out1 & !__nv_out2 & !__nv_out3) | (!__nv_out0 & !__nv_out1 & !__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "(__nv_out0 & __nv_out1 & !__nv_out2 & !__nv_out3) | (__nv_out0 & !__nv_out1 & !__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "(!__nv_out0 & !__nv_out1 & __nv_out2 & !__nv_out3) | (!__nv_out0 & __nv_out1 & !__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "\n", + "7->0\n", + "\n", + "\n", + "(__nv_out0 & !__nv_out1 & __nv_out2 & !__nv_out3) | (__nv_out0 & __nv_out1 & !__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->1\n", + "\n", + "\n", + "(!__nv_out0 & !__nv_out1 & __nv_out3) | (__nv_out1 & __nv_out2 & !__nv_out3) | (!__nv_out2 & __nv_out3)\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "__nv_in0 & !__nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "__nv_in0 & __nv_in1 & !__nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!__nv_in0 & !__nv_in1 & __nv_in2 & __nv_in3\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!__nv_in0 & __nv_in1 & __nv_in2 & !__nv_in3\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936415f990> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Strategy torquesimple_acd as mealy machine\n", + "\n", + "aut = spot.automaton(\"\"\"HOA: v1\n", + "States: 2\n", + "Start: 0\n", + "AP: 11 \"u0accel0accel\" \"u0accel0f1dcon23p81b\" \"u0accel0f1dcon231b\" \"u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b\" \"u0gear0gear\" \"u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b\" \"u0steer0f1dsteering0angle0trackpos1b\" \"u0steer0steer\" \"p0p0gt0rpm0f1dcon5523231b\" \"p0p0lt0rpm0f1dcon32323231b\" \"p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc deterministic\n", + "controllable-AP: 0 1 2 3 4 5 6 7\n", + "--BODY--\n", + "State: 0\n", + "[!0&!1&2&!3&4&!5&6&!7&!8&!9&!10] 0\n", + "[!0&1&!2&!3&4&!5&6&!7&!8&!9&10] 0\n", + "[!0&!1&2&!3&!4&5&6&!7&!8&9&!10] 0\n", + "[!0&1&!2&!3&!4&5&6&!7&!8&9&10] 0\n", + "[!0&!1&2&3&!4&!5&6&!7&8&!9&!10] 0\n", + "[!0&1&!2&3&!4&!5&6&!7&8&!9&10] 0\n", + "[!0&!1&2&!3&!4&5&!6&7&8&9 | !0&!1&2&!3&!4&5&6&!7&8&9 | !0&!1&2&!3&4&!5&!6&7&8&9 | !0&!1&2&!3&4&!5&6&!7&8&9 | !0&!1&2&3&!4&!5&!6&7&8&9 | !0&!1&2&3&!4&!5&6&!7&8&9 | !0&1&!2&!3&!4&5&!6&7&8&9 | !0&1&!2&!3&!4&5&6&!7&8&9 | !0&1&!2&!3&4&!5&!6&7&8&9 | !0&1&!2&!3&4&!5&6&!7&8&9 | !0&1&!2&3&!4&!5&!6&7&8&9 | !0&1&!2&3&!4&!5&6&!7&8&9 | 0&!1&!2&!3&!4&5&!6&7&8&9 | 0&!1&!2&!3&!4&5&6&!7&8&9 | 0&!1&!2&!3&4&!5&!6&7&8&9 | 0&!1&!2&!3&4&!5&6&!7&8&9 | 0&!1&!2&3&!4&!5&!6&7&8&9 | 0&!1&!2&3&!4&!5&6&!7&8&9] 1\n", + "State: 1\n", + "[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 1\n", + "--END--\"\"\")\n", + "\n", + "display(aut)\n", + "\n", + "# Convert to split mealy machine\n", + "auts = spot.split_2step(aut)\n", + "print(auts.to_str(\"hoa\"))\n", + "display(auts)\n", + "\n", + "# Relabel both, inputs and outputs\n", + "# You can choose the split option and stopping criteria as before\n", + "rel_dicts = spot.partitioned_game_relabel_here(auts, True, True, True, False, 10000, 10000)\n", + "print(auts.to_str(\"hoa\"))\n", + "display(auts)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "7ec02ff5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HOA: v1\n", + "States: 9\n", + "Start: 0\n", + "AP: 11 \"u0accel0accel\" \"u0accel0f1dcon23p81b\" \"u0accel0f1dcon231b\" \"u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b\" \"u0gear0gear\" \"u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b\" \"u0steer0f1dsteering0angle0trackpos1b\" \"u0steer0steer\" \"p0p0gt0rpm0f1dcon5523231b\" \"p0p0lt0rpm0f1dcon32323231b\" \"p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc deterministic\n", + "spot-state-player: 0 0 1 1 1 1 1 1 1\n", + "controllable-AP: 0 1 2 3 4 5 6 7\n", + "--BODY--\n", + "State: 0\n", + "[f] 2\n", + "[f] 3\n", + "[f] 4\n", + "[f] 5\n", + "[f] 6\n", + "[f] 7\n", + "[8&9] 8\n", + "[!8&!9&!10] 2\n", + "[!8&!9&10] 3\n", + "[!8&9&!10] 4\n", + "[!8&9&10] 5\n", + "[8&!9&!10] 6\n", + "[8&!9&10] 7\n", + "State: 1\n", + "[!8&!9&!10] 8\n", + "[!8&!9&10] 8\n", + "[!8&9&!10] 8\n", + "[!8&9&10] 8\n", + "[8&!9&!10] 8\n", + "[8&!9&10] 8\n", + "[8&9] 8\n", + "State: 2\n", + "[!0&!1&2&!3&4&!5&6&!7] 0\n", + "State: 3\n", + "[!0&1&!2&!3&4&!5&6&!7] 0\n", + "State: 4\n", + "[!0&!1&2&!3&!4&5&6&!7] 0\n", + "State: 5\n", + "[!0&1&!2&!3&!4&5&6&!7] 0\n", + "State: 6\n", + "[!0&!1&2&3&!4&!5&6&!7] 0\n", + "State: 7\n", + "[!0&1&!2&3&!4&!5&6&!7] 0\n", + "State: 8\n", + "[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 1\n", + "--END--\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "0->7\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->7\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "0->8\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & !u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "!u0accel0accel & !u0accel0f1dcon23p81b & u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "7->0\n", + "\n", + "\n", + "!u0accel0accel & u0accel0f1dcon23p81b & !u0accel0f1dcon231b & !u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b & u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b & !u0gear0gear & u0steer0f1dsteering0angle0trackpos1b & !u0steer0steer\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->1\n", + "\n", + "\n", + "(label too long)\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & !p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & !p0p0lt0rpm0f1dcon32323231b & p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "p0p0gt0rpm0f1dcon5523231b & p0p0lt0rpm0f1dcon32323231b\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f936415f990> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Undo relabel\n", + "spot.relabel_game_here(auts, rel_dicts)\n", + "print(auts.to_str(\"hoa\"))\n", + "display(auts)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "48c2283b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "True\n" + ] + } + ], + "source": [ + "# Check if we do actually obtain the same automaton\n", + "\n", + "print(spot.are_equivalent(aut, spot.unsplit_2step(auts)))" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "2b8d907e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n", + "True\n" + ] + } + ], + "source": [ + "# Test all options for equivalence\n", + "for relabel_env in [True, False]:\n", + " for relabel_player in [True, False]:\n", + " for split_env in [True, False]:\n", + " for split_player in [True, False]:\n", + " auts = spot.split_2step(aut)\n", + " rel_dicts = spot.partitioned_game_relabel_here(auts, relabel_env, relabel_player, split_env, split_player, 10000, 10000)\n", + " spot.relabel_game_here(auts, rel_dicts)\n", + " print(spot.are_equivalent(aut, spot.unsplit_2step(auts)))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17a32a72", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tests/python/_product_weak.ipynb b/tests/python/_product_weak.ipynb index 5e4a2a4a0..e52b9b793 100644 --- a/tests/python/_product_weak.ipynb +++ b/tests/python/_product_weak.ipynb @@ -87,6 +87,20 @@ "[0] 0 {0}\n", "--END--\n", "HOA: v1\n", + "name: \"FGd\"\n", + "States: 1\n", + "Start: 0\n", + "AP: 1 \"c\"\n", + "acc-name: co-Buchi\n", + "Acceptance: 1 Fin(0)\n", + "properties: trans-labels explicit-labels trans-acc complete\n", + "properties: deterministic stutter-invariant\n", + "--BODY--\n", + "State: 0\n", + "[!0] 0 {0}\n", + "[0] 0\n", + "--END--\n", + "HOA: v1\n", "States: 1\n", "Start: 0\n", "AP: 1 \"d\"\n", @@ -117,7 +131,7 @@ { "data": { "text/plain": [ - "6" + "7" ] }, "execution_count": 3, @@ -189,7 +203,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347ba0> >" + " *' at 0x7f26743d3720> >" ] }, "metadata": {}, @@ -299,11 +313,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -379,12 +393,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -434,11 +448,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -508,11 +522,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -644,12 +658,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -699,11 +713,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -774,11 +788,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1127,6 +1141,226 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2,0\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -1586,7 +1820,7 @@ "# In a previous version we used to iterate over all possible left automata with \"for left in auts:\"\n", "# however we had trouble with Jupyter on i386, where running the full loop abort with some low-level \n", "# exeptions from Jupyter client. Halving the loop helped for some times, but then the timeout\n", - "# came back. So we do one left automaton at at time.\n", + "# came back. So we do one left automaton at a time.\n", "left = auts[0]\n", "display(left)\n", "for right in auts:\n", @@ -1609,12 +1843,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1660,7 +1894,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347bd0> >" + " *' at 0x7f26743d36c0> >" ] }, "metadata": {}, @@ -1723,11 +1957,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1797,11 +2031,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1933,12 +2167,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -1988,11 +2222,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -2042,11 +2276,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -2108,12 +2342,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -2163,11 +2397,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -2217,11 +2451,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -2490,6 +2724,224 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -2949,12 +3401,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -3000,7 +3452,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347cf0> >" + " *' at 0x7f26743d37e0> >" ] }, "metadata": {}, @@ -3063,11 +3515,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -3138,11 +3590,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -3274,12 +3726,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -3329,11 +3781,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -3383,11 +3835,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -3449,12 +3901,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -3504,11 +3956,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -3558,11 +4010,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -3831,6 +4283,224 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -4331,7 +5001,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347d50> >" + " *' at 0x7f26743d3870> >" ] }, "metadata": {}, @@ -4575,12 +5245,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -4801,12 +5471,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -5119,44 +5789,192 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Inf(\n", - "\n", - ")\n", - "[Fin-less 2]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!c\n", + "\n", + "\n", + "!c\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "c\n", - "\n", - "\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")\n", + "[Rabin-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[gen. Streett 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", "\n", "\n", "\n", @@ -5559,6 +6377,1431 @@ "metadata": { "scrolled": false }, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f26743d3900> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "a\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "0,2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fb\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fb\n", + "\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "GFc\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")\n", + "[Streett 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")|Fin(\n", + "\n", + ")\n", + "[gen. co-Büchi 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")\n", + "[Rabin-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")\n", + "[Streett-like 3]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[Streett-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ")|Fin(\n", + "\n", + ")) | Inf(\n", + "\n", + ")\n", + "[Rabin-like 3]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "left = auts[4]\n", + "display(left)\n", + "for right in auts:\n", + " display_inline(right, spot.product(left, right), spot.product_or(left, right))" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "scrolled": false + }, "outputs": [ { "data": { @@ -5611,7 +7854,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347d80> >" + " *' at 0x7f26743d3990> >" ] }, "metadata": {}, @@ -5856,12 +8099,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -6082,12 +8325,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -6486,6 +8729,193 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")\n", + "[Streett-like 3]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | Fin(\n", + "\n", + ")\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -6807,1266 +9237,6 @@ "output_type": "display_data" } ], - "source": [ - "left = auts[4]\n", - "display(left)\n", - "for right in auts:\n", - " display_inline(right, spot.product(left, right), spot.product_or(left, right))" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "data": { - "image/svg+xml": [ - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett-like 1]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n" - ], - "text/plain": [ - " *' at 0x7fd90c347e40> >" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "a\n", - "\n", - "t\n", - "[all]\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "I->1\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "\n", - "\n", - "1->0\n", - "\n", - "\n", - "a\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "a & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "a & d\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett-like 1]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "a & !d\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "a & d\n", - "\n", - "\n", - "\n", - "2\n", - "\n", - "0,2\n", - "\n", - "\n", - "\n", - "0->2\n", - "\n", - "\n", - "!a & !d\n", - "\n", - "\n", - "\n", - "0->2\n", - "\n", - "\n", - "!a & d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "2->2\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "2->2\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Fb\n", - "\n", - "[Büchi]\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "I->1\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!b\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "0\n", - "\n", - "\n", - "\n", - "1->0\n", - "\n", - "\n", - "b\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & !d\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & d\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & !d\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett-like 1]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & d\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Fb\n", - "\n", - "[co-Büchi]\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "I->1\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!b\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "\n", - "\n", - "1->0\n", - "\n", - "\n", - "b\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & !d\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & d\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & !d\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett-like 1]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & d\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "GFc\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!c\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "c\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "(Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & Inf(\n", - "\n", - ")\n", - "[Streett-like 2]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!c & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "c & !d\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!c & d\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "c & d\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ") | Inf(\n", - "\n", - ")\n", - "[Rabin-like 3]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!c & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "c & !d\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!c & d\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "c & d\n", - "\n", - "\n", - "\n", - "\n", - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") & Fin(\n", - "\n", - ")\n", - "[Rabin-like 1]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "(Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & Inf(\n", - "\n", - ") & Fin(\n", - "\n", - ")\n", - "[Streett-like 3]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ") | (Inf(\n", - "\n", - ") & Fin(\n", - "\n", - "))\n", - "[Rabin-like 3]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")\n", - "[Streett-like 1]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "(Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - "))\n", - "[Streett-like 2]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ")|Fin(\n", - "\n", - ")) | Inf(\n", - "\n", - ")\n", - "[Rabin-like 4]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], "source": [ "left = auts[5]\n", "display(left)\n", @@ -8134,7 +9304,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347ba0> >" + " *' at 0x7f26743d3720> >" ] }, "metadata": {}, @@ -8197,11 +9367,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -8263,11 +9433,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -8344,12 +9514,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -8399,11 +9569,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -8522,11 +9692,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -8656,12 +9826,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -8711,11 +9881,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -8834,11 +10004,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -9241,6 +10411,288 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2,0\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2,0\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -9785,12 +11237,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -9836,7 +11288,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347bd0> >" + " *' at 0x7f26743d36c0> >" ] }, "metadata": {}, @@ -9899,11 +11351,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -10022,11 +11474,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -10156,12 +11608,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -10211,11 +11663,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -10264,11 +11716,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -10331,12 +11783,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -10386,11 +11838,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -10439,11 +11891,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -10729,6 +12181,238 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -11189,12 +12873,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -11240,7 +12924,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347cf0> >" + " *' at 0x7f26743d37e0> >" ] }, "metadata": {}, @@ -11303,11 +12987,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -11426,11 +13110,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -11560,12 +13244,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -11615,11 +13299,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -11668,11 +13352,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -11735,12 +13419,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -11790,11 +13474,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -11845,11 +13529,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -12137,6 +13821,242 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "1,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -12638,7 +14558,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347d50> >" + " *' at 0x7f26743d3870> >" ] }, "metadata": {}, @@ -12938,12 +14858,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -13180,12 +15100,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -13575,6 +15495,164 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Fin(\n", + "\n", + ") & Inf(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -13985,6 +16063,1562 @@ "metadata": { "scrolled": false }, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f26743d3900> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "a\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "0,2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & c\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "0,2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fb\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fb\n", + "\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "GFc\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ")&Inf(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")\n", + "[Rabin-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & (Fin(\n", + "\n", + ") | Inf(\n", + "\n", + "))) | ((Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")) & Fin(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & (Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")) | (Inf(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))) | ((Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")) & Fin(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "left = auts[4]\n", + "display(left)\n", + "for right in auts:\n", + " display_inline(right, spot.product_xor(left, right), spot.product_xnor(left, right))" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "scrolled": false + }, "outputs": [ { "data": { @@ -14037,7 +17671,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347d80> >" + " *' at 0x7f26743d3990> >" ] }, "metadata": {}, @@ -14314,12 +17948,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -14541,12 +18175,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -14956,6 +18590,203 @@ "metadata": {}, "output_type": "display_data" }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "((Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")) & Fin(\n", + "\n", + ")) | ((Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")) & Fin(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ") & Fin(\n", + "\n", + ")) | ((Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")) & Inf(\n", + "\n", + "))\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { "data": { "text/html": [ @@ -15306,7 +19137,7 @@ } ], "source": [ - "left = auts[4]\n", + "left = auts[5]\n", "display(left)\n", "for right in auts:\n", " display_inline(right, spot.product_xor(left, right), spot.product_xnor(left, right))" @@ -15314,7 +19145,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 16, "metadata": { "scrolled": false }, @@ -15370,7 +19201,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fd90c347e40> >" + " *' at 0x7f26743d3a20> >" ] }, "metadata": {}, @@ -15433,46 +19264,115 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,1\n", + "\n", + "0,1\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "a & !d\n", + "\n", + "\n", + "a & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & d\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a & !d\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "a & d\n", + "\n", + "\n", + "a & d\n", "\n", "\n", "\n", @@ -15483,30 +19383,30 @@ "\n", "\n", "0->2\n", - "\n", - "\n", - "!a & !d\n", + "\n", + "\n", + "!a & !d\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!a & d\n", + "\n", + "\n", + "!a & d\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "!d\n", + "\n", + "\n", + "!d\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "d\n", + "\n", + "\n", + "d\n", "\n", "\n", "\n", @@ -15521,111 +19421,7 @@ "2->2\n", "\n", "\n", - "d\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "a & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "a & d\n", - "\n", - "\n", - "\n", - "\n", - "2\n", - "\n", - "0,2\n", - "\n", - "\n", - "\n", - "0->2\n", - "\n", - "\n", - "!a & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->2\n", - "\n", - "\n", - "!a & d\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "\n", - "2->2\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "2->2\n", - "\n", - "\n", - "d\n", + "d\n", "\n", "\n", "\n", @@ -15647,12 +19443,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[Büchi]\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -15702,87 +19498,6 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & d\n", - "\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & d\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", "\n", "\n", @@ -15856,6 +19571,87 @@ "\n", "\n", "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & d\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & d\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n", "
" ], "text/plain": [ @@ -15874,12 +19670,12 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "Fb\n", - "\n", - "[co-Büchi]\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -15929,87 +19725,6 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ")\n", - "[Büchi]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0,1\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->0\n", - "\n", - "\n", - "!b & d\n", - "\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "0,0\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & !d\n", - "\n", - "\n", - "\n", - "\n", - "0->1\n", - "\n", - "\n", - "b & d\n", - "\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", - "\n", - "1->1\n", - "\n", - "\n", - "d\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", "\n", "\n", @@ -16083,6 +19798,87 @@ "\n", "\n", "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett-like 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,1\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!b & d\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "b & d\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "d\n", + "\n", + "\n", + "\n", "
" ], "text/plain": [ @@ -16146,67 +19942,62 @@ "\n", "\n", - "\n", - "\n", - "\n", - "((Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & Fin(\n", - "\n", - ")) | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")))\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")) & Inf(\n", + "\n", + ")\n", + "[Streett-like 2]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!c & !d\n", - "\n", + "\n", + "\n", + "!c & !d\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "c & !d\n", - "\n", - "\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!c & d\n", + "\n", + "\n", + "!c & d\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "c & d\n", - "\n", + "\n", + "\n", + "c & d\n", + "\n", "\n", "\n", "\n", @@ -16216,67 +20007,249 @@ "\n", "\n", - "\n", - "\n", - "\n", - "((Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & Inf(\n", - "\n", - ")) | (Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ") & Fin(\n", - "\n", - "))\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ") | Inf(\n", + "\n", + ")\n", + "[Rabin-like 3]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!c & !d\n", - "\n", + "\n", + "\n", + "!c & !d\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "c & !d\n", - "\n", - "\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!c & d\n", + "\n", + "\n", + "!c & d\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "c & d\n", - "\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "\n", + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "FGd\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")) & Fin(\n", + "\n", + ")\n", + "[Streett-like 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ")|Fin(\n", + "\n", + "))\n", + "[Rabin-like 3]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & !d\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & !d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!c & d\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "c & d\n", "\n", "\n", "\n", @@ -16344,55 +20317,48 @@ "\n", "\n", - "\n", - "\n", - "\n", - "((Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & (Fin(\n", - "\n", - ") | Inf(\n", - "\n", - "))) | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")) & Fin(\n", - "\n", - "))\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")) & Inf(\n", + "\n", + ") & Fin(\n", + "\n", + ")\n", + "[Streett-like 3]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "d\n", + "\n", + "\n", + "d\n", "\n", "\n", "\n", @@ -16402,55 +20368,48 @@ "\n", "\n", - "\n", - "\n", - "\n", - "((Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & Inf(\n", - "\n", - ") & Fin(\n", - "\n", - ")) | (Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ") & (Fin(\n", - "\n", - ") | Inf(\n", - "\n", - ")))\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ") | (Inf(\n", + "\n", + ") & Fin(\n", + "\n", + "))\n", + "[Rabin-like 3]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "d\n", + "\n", + "\n", + "d\n", "\n", "\n", "\n", @@ -16518,55 +20477,48 @@ "\n", "\n", - "\n", - "\n", - "\n", - "((Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ")) | (Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))\n", + "\n", + "\n", + "\n", + "(Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")) & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[Streett-like 2]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "d\n", + "\n", + "\n", + "d\n", "\n", "\n", "\n", @@ -16576,55 +20528,48 @@ "\n", "\n", - "\n", - "\n", - "\n", - "((Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")) & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - "))) | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ")&Inf(\n", - "\n", - ")) & Fin(\n", - "\n", - "))\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ")|Fin(\n", + "\n", + ")) | Inf(\n", + "\n", + ")\n", + "[Rabin-like 4]\n", "\n", "\n", "\n", "0\n", - "\n", - "0,0\n", + "\n", + "0,0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "!d\n", - "\n", - "\n", + "\n", + "\n", + "!d\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "d\n", + "\n", + "\n", + "d\n", "\n", "\n", "\n", @@ -16639,16 +20584,317 @@ } ], "source": [ - "left = auts[5]\n", + "left = auts[6]\n", "display(left)\n", "for right in auts:\n", - " display_inline(right, spot.product_xor(left, right), spot.product_xnor(left, right))" + " display_inline(right, spot.product(left, right), spot.product_or(left, right))" ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")&Inf(\n", + "\n", + ")\n", + "[gen. Büchi 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "a1 = spot.translate('GFa')\n", + "a2 = spot.translate('GFb')\n", + "display_inline(spot.product(a1,a2), spot.product_or(a1, a2))" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")|Fin(\n", + "\n", + ")\n", + "[gen. co-Büchi 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "a1 = spot.dualize(a1)\n", + "a2 = spot.dualize(a2)\n", + "display_inline(spot.product(a1,a2), spot.product_or(a1, a2))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -16662,7 +20908,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.3rc1" + "version": "3.10.6" } }, "nbformat": 4, diff --git a/tests/python/_synthesis.ipynb b/tests/python/_synthesis.ipynb index 3a91415e9..2d92236b7 100644 --- a/tests/python/_synthesis.ipynb +++ b/tests/python/_synthesis.ipynb @@ -3,15 +3,18 @@ { "cell_type": "code", "execution_count": 1, + "id": "c54c43ba", "metadata": {}, "outputs": [], "source": [ "import spot, buddy\n", - "spot.setup()" + "spot.setup()\n", + "from spot.jupyter import display_inline" ] }, { "cell_type": "markdown", + "id": "0576f64a", "metadata": {}, "source": [ "Additional testing for synthesis" @@ -19,6 +22,7 @@ }, { "cell_type": "markdown", + "id": "e25b7989", "metadata": {}, "source": [ "Testing the different methods to solve" @@ -27,6 +31,7 @@ { "cell_type": "code", "execution_count": 2, + "id": "007107a6", "metadata": {}, "outputs": [ { @@ -50,6 +55,7 @@ { "cell_type": "code", "execution_count": 3, + "id": "a7859f19", "metadata": {}, "outputs": [ { @@ -57,43 +63,72 @@ "output_type": "stream", "text": [ "HOA: v1\n", - "States: 7\n", + "States: 21\n", "Start: 0\n", "AP: 3 \"i1\" \"i0\" \"o0\"\n", "acc-name: all\n", "Acceptance: 0 t\n", "properties: trans-labels explicit-labels state-acc deterministic\n", + "spot-state-player: 0 0 1 0 1 0 1 0 1 0 1 1 1 1 0 1 1 1 1 1 1\n", "controllable-AP: 2\n", "--BODY--\n", "State: 0\n", - "[!0&!1] 1\n", - "[!0&1] 2\n", - "[0&!1] 3\n", - "[0&1] 4\n", + "[!0&!1] 2\n", + "[!0&1] 4\n", + "[0&!1] 6\n", + "[0&1] 8\n", "State: 1\n", - "[0&1&!2] 4\n", - "[0&!1&!2] 3\n", - "[!0&1&!2] 2\n", - "[!0&!1&!2] 1\n", + "[0&1] 13\n", + "[0&!1] 18\n", + "[!0&1] 19\n", + "[!0&!1] 20\n", "State: 2\n", - "[0&!2] 4\n", - "[!0&!2] 2\n", + "[t] 1\n", "State: 3\n", - "[!0&1&2] 5\n", - "[0&1&2] 4\n", - "[!0&!1&2] 6\n", - "[0&!1&2] 3\n", + "[0] 13\n", + "[!0] 19\n", "State: 4\n", - "[!0&2] 5\n", - "[0&2] 4\n", + "[t] 3\n", "State: 5\n", - "[!0&!2] 5\n", - "[0&!2] 4\n", + "[!0&1] 10\n", + "[0&1] 11\n", + "[!0&!1] 15\n", + "[0&!1] 16\n", "State: 6\n", - "[!0&1&!2] 5\n", - "[0&1&!2] 4\n", - "[!0&!1&!2] 6\n", - "[0&!1&!2] 3\n", + "[t] 5\n", + "State: 7\n", + "[!0] 10\n", + "[0] 11\n", + "State: 8\n", + "[t] 7\n", + "State: 9\n", + "[!0] 12\n", + "[0] 13\n", + "State: 10\n", + "[2] 9\n", + "State: 11\n", + "[2] 7\n", + "State: 12\n", + "[!2] 9\n", + "State: 13\n", + "[!2] 7\n", + "State: 14\n", + "[!0&1] 12\n", + "[0&1] 13\n", + "[!0&!1] 17\n", + "[0&!1] 18\n", + "State: 15\n", + "[2] 14\n", + "State: 16\n", + "[2] 5\n", + "State: 17\n", + "[!2] 14\n", + "State: 18\n", + "[!2] 5\n", + "State: 19\n", + "[!2] 3\n", + "State: 20\n", + "[!2] 1\n", "--END--\n", "HOA: v1\n", "States: 7\n", @@ -141,163 +176,137 @@ "acc-name: all\n", "Acceptance: 0 t\n", "properties: trans-labels explicit-labels state-acc deterministic\n", - "spot-state-player: 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n", + "spot-state-player: 0 0 1 0 1 0 1 0 1 0 1 1 1 1 0 1 1 1 1 1 1\n", "controllable-AP: 2\n", "--BODY--\n", "State: 0\n", - "[!0&!1] 7\n", - "[!0&1] 8\n", - "[0&!1] 9\n", - "[0&1] 10\n", + "[!0&!1] 2\n", + "[!0&1] 4\n", + "[0&!1] 6\n", + "[0&1] 8\n", "State: 1\n", - "[0&1] 11\n", - "[0&!1] 12\n", - "[!0&1] 13\n", - "[!0&!1] 14\n", + "[0&1] 13\n", + "[0&!1] 18\n", + "[!0&1] 19\n", + "[!0&!1] 20\n", "State: 2\n", - "[0] 11\n", - "[!0] 13\n", + "[t] 1\n", "State: 3\n", - "[!0&1] 15\n", - "[0&1] 16\n", + "[0] 13\n", + "[!0] 19\n", + "State: 4\n", + "[t] 3\n", + "State: 5\n", + "[!0&1] 10\n", + "[0&1] 11\n", + "[!0&!1] 15\n", + "[0&!1] 16\n", + "State: 6\n", + "[t] 5\n", + "State: 7\n", + "[!0] 10\n", + "[0] 11\n", + "State: 8\n", + "[t] 7\n", + "State: 9\n", + "[!0] 12\n", + "[0] 13\n", + "State: 10\n", + "[2] 9\n", + "State: 11\n", + "[2] 7\n", + "State: 12\n", + "[!2] 9\n", + "State: 13\n", + "[!2] 7\n", + "State: 14\n", + "[!0&1] 12\n", + "[0&1] 13\n", "[!0&!1] 17\n", "[0&!1] 18\n", - "State: 4\n", - "[!0] 15\n", - "[0] 16\n", - "State: 5\n", - "[!0] 19\n", - "[0] 11\n", - "State: 6\n", - "[!0&1] 19\n", - "[0&1] 11\n", - "[!0&!1] 20\n", - "[0&!1] 12\n", - "State: 7\n", - "[t] 1\n", - "State: 8\n", - "[t] 2\n", - "State: 9\n", - "[t] 3\n", - "State: 10\n", - "[t] 4\n", - "State: 11\n", - "[!2] 4\n", - "State: 12\n", - "[!2] 3\n", - "State: 13\n", - "[!2] 2\n", - "State: 14\n", - "[!2] 1\n", "State: 15\n", - "[2] 5\n", + "[2] 14\n", "State: 16\n", - "[2] 4\n", + "[2] 5\n", "State: 17\n", - "[2] 6\n", + "[!2] 14\n", "State: 18\n", - "[2] 3\n", - "State: 19\n", "[!2] 5\n", + "State: 19\n", + "[!2] 3\n", "State: 20\n", - "[!2] 6\n", + "[!2] 1\n", "--END--\n", "HOA: v1\n", - "States: 2\n", - "Start: 1\n", + "States: 21\n", + "Start: 0\n", "AP: 3 \"i1\" \"i0\" \"o0\"\n", "acc-name: all\n", "Acceptance: 0 t\n", "properties: trans-labels explicit-labels state-acc deterministic\n", + "spot-state-player: 0 0 1 0 1 0 1 0 1 0 1 1 1 1 0 1 1 1 1 1 1\n", "controllable-AP: 2\n", "--BODY--\n", "State: 0\n", - "[0&1&!2] 1\n", - "[0&!1&!2] 1\n", - "[!0&1&!2] 0\n", - "[!0&!1&!2] 0\n", - "State: 1\n", - "[!0&1&2] 0\n", - "[0&1&2] 1\n", - "[!0&!1&2] 0\n", - "[0&!1&2] 1\n", - "--END--\n", - "HOA: v1\n", - "States: 2\n", - "Start: 1\n", - "AP: 3 \"i1\" \"i0\" \"o0\"\n", - "acc-name: all\n", - "Acceptance: 0 t\n", - "properties: trans-labels explicit-labels state-acc deterministic\n", - "controllable-AP: 2\n", - "--BODY--\n", - "State: 0\n", - "[0&1&!2] 1\n", - "[0&!1&!2] 1\n", - "[!0&1&!2] 0\n", - "[!0&!1&!2] 0\n", - "State: 1\n", - "[!0&1&2] 0\n", - "[0&1&2] 1\n", - "[!0&!1&2] 0\n", - "[0&!1&2] 1\n", - "--END--\n", - "HOA: v1\n", - "States: 6\n", - "Start: 1\n", - "AP: 3 \"i1\" \"i0\" \"o0\"\n", - "acc-name: all\n", - "Acceptance: 0 t\n", - "properties: trans-labels explicit-labels state-acc deterministic\n", - "spot-state-player: 0 0 1 1 1 1\n", - "controllable-AP: 2\n", - "--BODY--\n", - "State: 0\n", - "[0&1] 2\n", - "[0&!1] 2\n", - "[!0&1] 3\n", - "[!0&!1] 3\n", - "State: 1\n", + "[!0&!1] 2\n", "[!0&1] 4\n", - "[0&1] 5\n", - "[!0&!1] 4\n", - "[0&!1] 5\n", - "State: 2\n", - "[!2] 1\n", - "State: 3\n", - "[!2] 0\n", - "State: 4\n", - "[2] 0\n", - "State: 5\n", - "[2] 1\n", - "--END--\n", - "HOA: v1\n", - "States: 6\n", - "Start: 1\n", - "AP: 3 \"i1\" \"i0\" \"o0\"\n", - "acc-name: all\n", - "Acceptance: 0 t\n", - "properties: trans-labels explicit-labels state-acc deterministic\n", - "spot-state-player: 0 0 1 1 1 1\n", - "controllable-AP: 2\n", - "--BODY--\n", - "State: 0\n", - "[0] 2\n", - "[!0] 3\n", + "[0&!1] 6\n", + "[0&1] 8\n", "State: 1\n", - "[0] 4\n", - "[!0] 5\n", + "[0&1] 13\n", + "[0&!1] 18\n", + "[!0&1] 19\n", + "[!0&!1] 20\n", "State: 2\n", - "[!2] 1\n", + "[t] 1\n", "State: 3\n", - "[!2] 0\n", + "[0] 13\n", + "[!0] 19\n", "State: 4\n", - "[2] 1\n", + "[t] 3\n", "State: 5\n", - "[2] 0\n", + "[!0&1] 10\n", + "[0&1] 11\n", + "[!0&!1] 15\n", + "[0&!1] 16\n", + "State: 6\n", + "[t] 5\n", + "State: 7\n", + "[!0] 10\n", + "[0] 11\n", + "State: 8\n", + "[t] 7\n", + "State: 9\n", + "[!0] 12\n", + "[0] 13\n", + "State: 10\n", + "[2] 9\n", + "State: 11\n", + "[2] 7\n", + "State: 12\n", + "[!2] 9\n", + "State: 13\n", + "[!2] 7\n", + "State: 14\n", + "[!0&1] 12\n", + "[0&1] 13\n", + "[!0&!1] 17\n", + "[0&!1] 18\n", + "State: 15\n", + "[2] 14\n", + "State: 16\n", + "[2] 5\n", + "State: 17\n", + "[!2] 14\n", + "State: 18\n", + "[!2] 5\n", + "State: 19\n", + "[!2] 3\n", + "State: 20\n", + "[!2] 1\n", "--END--\n", "HOA: v1\n", - "States: 2\n", + "States: 7\n", "Start: 0\n", "AP: 3 \"i1\" \"i0\" \"o0\"\n", "acc-name: all\n", @@ -306,36 +315,277 @@ "controllable-AP: 2\n", "--BODY--\n", "State: 0\n", - "[0&2] 0\n", - "[!0&2] 1\n", + "[!0&!1] 1\n", + "[!0&1] 2\n", + "[0&!1] 3\n", + "[0&1] 4\n", "State: 1\n", - "[0&!2] 0\n", - "[!0&!2] 1\n", + "[0&1&!2] 4\n", + "[0&!1&!2] 3\n", + "[!0&1&!2] 2\n", + "[!0&!1&!2] 1\n", + "State: 2\n", + "[0&!2] 4\n", + "[!0&!2] 2\n", + "State: 3\n", + "[!0&1&2] 5\n", + "[0&1&2] 4\n", + "[!0&!1&2] 6\n", + "[0&!1&2] 3\n", + "State: 4\n", + "[!0&2] 5\n", + "[0&2] 4\n", + "State: 5\n", + "[!0&!2] 5\n", + "[0&!2] 4\n", + "State: 6\n", + "[!0&1&!2] 5\n", + "[0&1&!2] 4\n", + "[!0&!1&!2] 6\n", + "[0&!1&!2] 3\n", "--END--\n", "HOA: v1\n", - "States: 6\n", - "Start: 1\n", + "States: 21\n", + "Start: 0\n", "AP: 3 \"i1\" \"i0\" \"o0\"\n", "acc-name: all\n", "Acceptance: 0 t\n", "properties: trans-labels explicit-labels state-acc deterministic\n", - "spot-state-player: 0 0 1 1 1 1\n", + "spot-state-player: 0 0 1 0 1 0 1 0 1 0 1 1 1 1 0 1 1 1 1 1 1\n", "controllable-AP: 2\n", "--BODY--\n", "State: 0\n", - "[0] 2\n", - "[!0] 3\n", + "[!0&!1] 2\n", + "[!0&1] 4\n", + "[0&!1] 6\n", + "[0&1] 8\n", "State: 1\n", - "[0] 4\n", - "[!0] 5\n", + "[0&1] 13\n", + "[0&!1] 18\n", + "[!0&1] 19\n", + "[!0&!1] 20\n", "State: 2\n", - "[!2] 1\n", + "[t] 1\n", "State: 3\n", - "[!2] 0\n", + "[0] 13\n", + "[!0] 19\n", "State: 4\n", - "[2] 1\n", + "[t] 3\n", "State: 5\n", - "[2] 0\n", + "[!0&1] 10\n", + "[0&1] 11\n", + "[!0&!1] 15\n", + "[0&!1] 16\n", + "State: 6\n", + "[t] 5\n", + "State: 7\n", + "[!0] 10\n", + "[0] 11\n", + "State: 8\n", + "[t] 7\n", + "State: 9\n", + "[!0] 12\n", + "[0] 13\n", + "State: 10\n", + "[2] 9\n", + "State: 11\n", + "[2] 7\n", + "State: 12\n", + "[!2] 9\n", + "State: 13\n", + "[!2] 7\n", + "State: 14\n", + "[!0&1] 12\n", + "[0&1] 13\n", + "[!0&!1] 17\n", + "[0&!1] 18\n", + "State: 15\n", + "[2] 14\n", + "State: 16\n", + "[2] 5\n", + "State: 17\n", + "[!2] 14\n", + "State: 18\n", + "[!2] 5\n", + "State: 19\n", + "[!2] 3\n", + "State: 20\n", + "[!2] 1\n", + "--END--\n", + "HOA: v1\n", + "States: 21\n", + "Start: 0\n", + "AP: 3 \"i1\" \"i0\" \"o0\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc deterministic\n", + "spot-state-player: 0 0 1 0 1 0 1 0 1 0 1 1 1 1 0 1 1 1 1 1 1\n", + "controllable-AP: 2\n", + "--BODY--\n", + "State: 0\n", + "[!0&!1] 2\n", + "[!0&1] 4\n", + "[0&!1] 6\n", + "[0&1] 8\n", + "State: 1\n", + "[0&1] 13\n", + "[0&!1] 18\n", + "[!0&1] 19\n", + "[!0&!1] 20\n", + "State: 2\n", + "[t] 1\n", + "State: 3\n", + "[0] 13\n", + "[!0] 19\n", + "State: 4\n", + "[t] 3\n", + "State: 5\n", + "[!0&1] 10\n", + "[0&1] 11\n", + "[!0&!1] 15\n", + "[0&!1] 16\n", + "State: 6\n", + "[t] 5\n", + "State: 7\n", + "[!0] 10\n", + "[0] 11\n", + "State: 8\n", + "[t] 7\n", + "State: 9\n", + "[!0] 12\n", + "[0] 13\n", + "State: 10\n", + "[2] 9\n", + "State: 11\n", + "[2] 7\n", + "State: 12\n", + "[!2] 9\n", + "State: 13\n", + "[!2] 7\n", + "State: 14\n", + "[!0&1] 12\n", + "[0&1] 13\n", + "[!0&!1] 17\n", + "[0&!1] 18\n", + "State: 15\n", + "[2] 14\n", + "State: 16\n", + "[2] 5\n", + "State: 17\n", + "[!2] 14\n", + "State: 18\n", + "[!2] 5\n", + "State: 19\n", + "[!2] 3\n", + "State: 20\n", + "[!2] 1\n", + "--END--\n", + "HOA: v1\n", + "States: 7\n", + "Start: 0\n", + "AP: 3 \"i1\" \"i0\" \"o0\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc deterministic\n", + "controllable-AP: 2\n", + "--BODY--\n", + "State: 0\n", + "[!0&!1] 1\n", + "[!0&1] 2\n", + "[0&!1] 3\n", + "[0&1] 4\n", + "State: 1\n", + "[0&1&!2] 4\n", + "[0&!1&!2] 3\n", + "[!0&1&!2] 2\n", + "[!0&!1&!2] 1\n", + "State: 2\n", + "[0&!2] 4\n", + "[!0&!2] 2\n", + "State: 3\n", + "[!0&1&2] 5\n", + "[0&1&2] 4\n", + "[!0&!1&2] 6\n", + "[0&!1&2] 3\n", + "State: 4\n", + "[!0&2] 5\n", + "[0&2] 4\n", + "State: 5\n", + "[!0&!2] 5\n", + "[0&!2] 4\n", + "State: 6\n", + "[!0&1&!2] 5\n", + "[0&1&!2] 4\n", + "[!0&!1&!2] 6\n", + "[0&!1&!2] 3\n", + "--END--\n", + "HOA: v1\n", + "States: 21\n", + "Start: 0\n", + "AP: 3 \"i1\" \"i0\" \"o0\"\n", + "acc-name: all\n", + "Acceptance: 0 t\n", + "properties: trans-labels explicit-labels state-acc deterministic\n", + "spot-state-player: 0 0 1 0 1 0 1 0 1 0 1 1 1 1 0 1 1 1 1 1 1\n", + "controllable-AP: 2\n", + "--BODY--\n", + "State: 0\n", + "[!0&!1] 2\n", + "[!0&1] 4\n", + "[0&!1] 6\n", + "[0&1] 8\n", + "State: 1\n", + "[0&1] 13\n", + "[0&!1] 18\n", + "[!0&1] 19\n", + "[!0&!1] 20\n", + "State: 2\n", + "[t] 1\n", + "State: 3\n", + "[0] 13\n", + "[!0] 19\n", + "State: 4\n", + "[t] 3\n", + "State: 5\n", + "[!0&1] 10\n", + "[0&1] 11\n", + "[!0&!1] 15\n", + "[0&!1] 16\n", + "State: 6\n", + "[t] 5\n", + "State: 7\n", + "[!0] 10\n", + "[0] 11\n", + "State: 8\n", + "[t] 7\n", + "State: 9\n", + "[!0] 12\n", + "[0] 13\n", + "State: 10\n", + "[2] 9\n", + "State: 11\n", + "[2] 7\n", + "State: 12\n", + "[!2] 9\n", + "State: 13\n", + "[!2] 7\n", + "State: 14\n", + "[!0&1] 12\n", + "[0&1] 13\n", + "[!0&!1] 17\n", + "[0&!1] 18\n", + "State: 15\n", + "[2] 14\n", + "State: 16\n", + "[2] 5\n", + "State: 17\n", + "[!2] 14\n", + "State: 18\n", + "[!2] 5\n", + "State: 19\n", + "[!2] 3\n", + "State: 20\n", + "[!2] 1\n", "--END--\n" ] } @@ -345,7 +595,7 @@ "mm0 = spot.solved_game_to_mealy(game, si)\n", "msep0 = spot.solved_game_to_separated_mealy(game, si)\n", "msplit0 = spot.solved_game_to_split_mealy(game, si)\n", - "assert(spot.is_separated_mealy(mm0)) #Not imposed by the functions pre or post, but results of current impl, change if necessary\n", + "assert(spot.is_mealy(mm0))\n", "assert(spot.is_separated_mealy(msep0))\n", "assert(spot.is_split_mealy(msplit0))\n", "print(mm0.to_str(\"hoa\"))\n", @@ -355,7 +605,7 @@ "mm2 = spot.solved_game_to_mealy(game, si)\n", "msep2 = spot.solved_game_to_separated_mealy(game, si)\n", "msplit2 = spot.solved_game_to_split_mealy(game, si)\n", - "assert(spot.is_separated_mealy(mm2)) #Not imposed by the functions pre or post, but results of current impl, change if necessary\n", + "assert(spot.is_mealy(mm2))\n", "assert(spot.is_separated_mealy(msep2))\n", "assert(spot.is_split_mealy(msplit2))\n", "print(mm2.to_str(\"hoa\"))\n", @@ -365,7 +615,7 @@ "mm3 = spot.solved_game_to_mealy(game, si)\n", "msep3 = spot.solved_game_to_separated_mealy(game, si)\n", "msplit3 = spot.solved_game_to_split_mealy(game, si)\n", - "assert(spot.is_split_mealy(mm3)) #Not imposed by the functions pre or post, but results of current impl, change if necessary\n", + "assert(spot.is_mealy(mm3))\n", "assert(spot.is_separated_mealy(msep3))\n", "assert(spot.is_split_mealy(msplit3))\n", "print(mm3.to_str(\"hoa\"))\n", @@ -376,31 +626,48 @@ { "cell_type": "code", "execution_count": 4, + "id": "fb57ac53", "metadata": {}, "outputs": [], "source": [ "mus0 = spot.unsplit_mealy(msplit0)\n", "mus2 = spot.unsplit_mealy(msplit2)\n", - "mus3 = spot.unsplit_mealy(msplit3)\n", - "mmus3 = spot.unsplit_mealy(mm3)" + "mus3 = spot.unsplit_mealy(msplit3)" ] }, { "cell_type": "code", "execution_count": 5, + "id": "40fc65b5", "metadata": {}, "outputs": [], "source": [ - "assert(mm0.equivalent_to(msep0))\n", - "assert(mm0.equivalent_to(mus0))\n", - "assert(mm2.equivalent_to(msep2))\n", - "assert(mm2.equivalent_to(mus2))\n", - "assert(mmus3.equivalent_to(msep3))\n", - "assert(mmus3.equivalent_to(mus3))" + "assert(mus0.equivalent_to(msep0))" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "f6d8b29c", + "metadata": {}, + "outputs": [], + "source": [ + "assert(mus2.equivalent_to(msep2))" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "db8d47f2", + "metadata": {}, + "outputs": [], + "source": [ + "assert(mus3.equivalent_to(msep3))" ] }, { "cell_type": "markdown", + "id": "c19beeb0", "metadata": {}, "source": [ "Testing related to #495" @@ -408,7 +675,8 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 8, + "id": "3736cd1b", "metadata": {}, "outputs": [ { @@ -470,10 +738,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fee3716f7b0> >" + " *' at 0x7fbccc33a0f0> >" ] }, - "execution_count": 6, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -486,7 +754,8 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 9, + "id": "da6a7802", "metadata": {}, "outputs": [ { @@ -552,10 +821,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fee3716f7b0> >" + " *' at 0x7fbccc33a0f0> >" ] }, - "execution_count": 7, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -567,7 +836,8 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 10, + "id": "987219a4", "metadata": {}, "outputs": [ { @@ -675,10 +945,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fee3716f630> >" + " *' at 0x7fbccc345660> >" ] }, - "execution_count": 8, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -691,7 +961,8 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 11, + "id": "958d81f2", "metadata": {}, "outputs": [ { @@ -772,10 +1043,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fee36703f60> >" + " *' at 0x7fbccc3486c0> >" ] }, - "execution_count": 9, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -798,7 +1069,8 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 12, + "id": "078bb43e", "metadata": {}, "outputs": [ { @@ -939,10 +1211,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fee3716f930> >" + " *' at 0x7fbccc345ae0> >" ] }, - "execution_count": 10, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -955,7 +1227,8 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 13, + "id": "05b4a138", "metadata": {}, "outputs": [ { @@ -1147,10 +1420,10 @@ "\n" ], "text/plain": [ - " *' at 0x7fee3716fa20> >" + " *' at 0x7fbccc345e40> >" ] }, - "execution_count": 11, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -1160,11 +1433,4944 @@ "print(a_s.acc())\n", "a_s" ] + }, + { + "cell_type": "markdown", + "id": "0ee90b2a", + "metadata": {}, + "source": [ + "## A problematic case for merge\n", + "\n", + "This is an example graph for which the self-loop optimisation in merge_states does not work" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "06b20a8c", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc364a20> >" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut = spot.make_twa_graph()\n", + "aut.set_buchi()\n", + "aut.new_states(5)\n", + "\n", + "a = buddy.bdd_ithvar(aut.register_ap(\"a\"))\n", + "b = buddy.bdd_ithvar(aut.register_ap(\"b\"))\n", + "c = buddy.bdd_ithvar(aut.register_ap(\"c\"))\n", + "x = buddy.bdd_ithvar(aut.register_ap(\"x\"))\n", + "\n", + "\n", + "aut.new_edge(0, 1, buddy.bddtrue)\n", + "aut.new_edge(0, 4, buddy.bddtrue)\n", + "\n", + "# OK, edge conditions ensure \"correct\" ordering\n", + "aut.new_edge(1, 1, a)\n", + "aut.new_edge(1, 2, b)\n", + "aut.new_edge(1, 3, c)\n", + "\n", + "aut.new_edge(4, 4, a)\n", + "aut.new_edge(4, 2, b)\n", + "aut.new_edge(4, 3, c)\n", + "\n", + "aut.new_edge(2, 2, x)\n", + "aut.new_edge(3, 3, buddy.bdd_not(x))\n", + "\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "8a2f2e4d", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc364a20> >" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut.merge_states()\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "b40f8ce7", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc35a2d0> >" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut = spot.make_twa_graph()\n", + "aut.set_buchi()\n", + "aut.new_states(5)\n", + "\n", + "a = buddy.bdd_ithvar(aut.register_ap(\"a\"))\n", + "b = buddy.bdd_ithvar(aut.register_ap(\"b\"))\n", + "c = buddy.bdd_ithvar(aut.register_ap(\"c\"))\n", + "x = buddy.bdd_ithvar(aut.register_ap(\"x\"))\n", + "\n", + "\n", + "aut.new_edge(0, 1, buddy.bddtrue)\n", + "aut.new_edge(0, 4, buddy.bddtrue)\n", + "\n", + "# Not OK, all edge equal -> sorted by destination\n", + "# Fails to merge\n", + "aut.new_edge(1, 1, a)\n", + "aut.new_edge(1, 2, a)\n", + "aut.new_edge(1, 3, a)\n", + "\n", + "aut.new_edge(4, 4, a)\n", + "aut.new_edge(4, 2, a)\n", + "aut.new_edge(4, 3, a)\n", + "\n", + "aut.new_edge(2, 2, x)\n", + "aut.new_edge(3, 3, buddy.bdd_not(x))\n", + "\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "1f596284", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc35a2d0> >" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut.merge_states()\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "761b4c96", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "g\n", + "\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "11\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "13\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "cond\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "a\n", + "\n", + "b\n", + "\n", + "c\n", + "\n", + "a\n", + "\n", + "b\n", + "\n", + "c\n", + "\n", + "x\n", + "\n", + "!x\n", + "\n", + "a\n", + "\n", + "b\n", + "\n", + "c\n", + "\n", + "acc\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "dst\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "0\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "0\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "meta\n", + "init_state:\n", + "\n", + "0\n", + "num_sets:\n", + "1\n", + "acceptance:\n", + "Inf(0)\n", + "ap_vars:\n", + "a b c x\n", + "\n", + "\n", + "\n", + "\n", + "props\n", + "prop_state_acc:\n", + "maybe\n", + "prop_inherently_weak:\n", + "maybe\n", + "prop_terminal:\n", + "maybe\n", + "prop_weak:\n", + "maybe\n", + "prop_very_weak:\n", + "maybe\n", + "prop_complete:\n", + "maybe\n", + "prop_universal:\n", + "maybe\n", + "prop_unambiguous:\n", + "maybe\n", + "prop_semi_deterministic:\n", + "maybe\n", + "prop_stutter_invariant:\n", + "maybe\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "5->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc35af00> >" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut = spot.make_twa_graph()\n", + "aut.set_buchi()\n", + "aut.new_states(8)\n", + "\n", + "a = buddy.bdd_ithvar(aut.register_ap(\"a\"))\n", + "b = buddy.bdd_ithvar(aut.register_ap(\"b\"))\n", + "c = buddy.bdd_ithvar(aut.register_ap(\"c\"))\n", + "x = buddy.bdd_ithvar(aut.register_ap(\"x\"))\n", + "\n", + "\n", + "aut.new_edge(0, 1, buddy.bddtrue)\n", + "aut.new_edge(0, 4, buddy.bddtrue)\n", + "\n", + "aut.new_edge(1, 1, a)\n", + "aut.new_edge(1, 2, b)\n", + "aut.new_edge(1, 3, c)\n", + "\n", + "aut.new_edge(4, 4, a)\n", + "aut.new_edge(4, 2, b)\n", + "aut.new_edge(4, 3, c)\n", + "\n", + "aut.new_edge(2, 2, x)\n", + "aut.new_edge(3, 3, buddy.bdd_not(x))\n", + "\n", + "aut.new_edge(5, 1, a)\n", + "aut.new_edge(5, 2, b)\n", + "aut.new_edge(5, 3, c)\n", + "\n", + "display(aut.show_storage())\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "d4e09261", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc35af00> >" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut.merge_states()\n", + "aut" + ] + }, + { + "cell_type": "markdown", + "id": "4a8ace82", + "metadata": {}, + "source": [ + "## Splitting can inhibit merging\n", + "\n", + "In split automata, no self-loops exist.\n", + "Therefore states that can be merged pre-split can not be merged in a split automaton" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "c9e38db9", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "4->4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc36d240> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "g\n", + "\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "\n", + "14\n", + "\n", + "\n", + "15\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "17\n", + "\n", + "\n", + "18\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "18\n", + "\n", + "\n", + "20\n", + "\n", + "\n", + "21\n", + "\n", + "\n", + "22\n", + "\n", + "\n", + "24\n", + "\n", + "\n", + "25\n", + "\n", + "\n", + "27\n", + "\n", + "\n", + "29\n", + "\n", + "\n", + "32\n", + "\n", + "\n", + "33\n", + "\n", + "\n", + "34\n", + "\n", + "\n", + "35\n", + "\n", + "\n", + "37\n", + "\n", + "\n", + "39\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "17\n", + "\n", + "\n", + "19\n", + "\n", + "\n", + "20\n", + "\n", + "\n", + "21\n", + "\n", + "\n", + "23\n", + "\n", + "\n", + "24\n", + "\n", + "\n", + "26\n", + "\n", + "\n", + "28\n", + "\n", + "\n", + "31\n", + "\n", + "\n", + "32\n", + "\n", + "\n", + "33\n", + "\n", + "\n", + "34\n", + "\n", + "\n", + "36\n", + "\n", + "\n", + "38\n", + "\n", + "\n", + "41\n", + "\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "\n", + "14\n", + "\n", + "\n", + "15\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "17\n", + "\n", + "\n", + "18\n", + "\n", + "\n", + "19\n", + "\n", + "\n", + "20\n", + "\n", + "\n", + "21\n", + "\n", + "\n", + "22\n", + "\n", + "\n", + "23\n", + "\n", + "\n", + "24\n", + "\n", + "\n", + "25\n", + "\n", + "\n", + "26\n", + "\n", + "\n", + "27\n", + "\n", + "\n", + "28\n", + "\n", + "\n", + "29\n", + "\n", + "\n", + "30\n", + "\n", + "\n", + "31\n", + "\n", + "\n", + "32\n", + "\n", + "\n", + "33\n", + "\n", + "\n", + "34\n", + "\n", + "\n", + "35\n", + "\n", + "\n", + "36\n", + "\n", + "\n", + "37\n", + "\n", + "\n", + "38\n", + "\n", + "\n", + "39\n", + "\n", + "\n", + "40\n", + "\n", + "\n", + "41\n", + "\n", + "cond\n", + "\n", + "1\n", + "\n", + "!a & !b & c\n", + "\n", + "!a & b & !c\n", + "\n", + "!a & b & c\n", + "\n", + "a & !b & !c\n", + "\n", + "a & !b & c\n", + "\n", + "a & b & !c\n", + "\n", + "a & b & c\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "!a & !b & c\n", + "\n", + "!a & b & !c\n", + "\n", + "!a & b & c\n", + "\n", + "a & !b & !c\n", + "\n", + "a & !b & c\n", + "\n", + "a & b & !c\n", + "\n", + "a & b & c\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "x\n", + "\n", + "!x\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "acc\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "dst\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "\n", + "14\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "15\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "17\n", + "\n", + "\n", + "18\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "next_succ\n", + "\n", + "0\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "\n", + "14\n", + "\n", + "\n", + "15\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "17\n", + "\n", + "0\n", + "\n", + "\n", + "19\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "23\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "26\n", + "\n", + "0\n", + "\n", + "\n", + "28\n", + "\n", + "0\n", + "\n", + "\n", + "30\n", + "\n", + "\n", + "31\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "36\n", + "\n", + "0\n", + "\n", + "\n", + "38\n", + "\n", + "0\n", + "\n", + "\n", + "40\n", + "\n", + "\n", + "41\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "\n", + "14\n", + "\n", + "\n", + "15\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "17\n", + "\n", + "\n", + "17\n", + "\n", + "\n", + "18\n", + "\n", + "\n", + "18\n", + "\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "meta\n", + "init_state:\n", + "\n", + "0\n", + "num_sets:\n", + "1\n", + "acceptance:\n", + "Inf(0)\n", + "ap_vars:\n", + "a b c x\n", + "\n", + "\n", + "\n", + "\n", + "props\n", + "prop_state_acc:\n", + "maybe\n", + "prop_inherently_weak:\n", + "maybe\n", + "prop_terminal:\n", + "maybe\n", + "prop_weak:\n", + "maybe\n", + "prop_very_weak:\n", + "maybe\n", + "prop_complete:\n", + "maybe\n", + "prop_universal:\n", + "maybe\n", + "prop_unambiguous:\n", + "maybe\n", + "prop_semi_deterministic:\n", + "maybe\n", + "prop_stutter_invariant:\n", + "maybe\n", + "\n", + "\n", + "\n", + "\n", + "namedprops\n", + "named properties:\n", + "state-player\n", + "synthesis-outputs\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!a & b & c\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "1->9\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "1->10\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "1->11\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "1->12\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "2->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "3->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "14->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "!a & b & c\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "15\n", + "\n", + "\n", + "\n", + "4->15\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "4->16\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "4->17\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "4->18\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "15->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc3910f0> >" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut = spot.make_twa_graph()\n", + "aut.set_buchi()\n", + "aut.new_states(5)\n", + "\n", + "a = buddy.bdd_ithvar(aut.register_ap(\"a\"))\n", + "b = buddy.bdd_ithvar(aut.register_ap(\"b\"))\n", + "c = buddy.bdd_ithvar(aut.register_ap(\"c\"))\n", + "x = buddy.bdd_ithvar(aut.register_ap(\"x\"))\n", + "\n", + "\n", + "aut.new_edge(0, 1, buddy.bddtrue)\n", + "aut.new_edge(0, 4, buddy.bddtrue)\n", + "\n", + "aut.new_edge(1, 1, a)\n", + "aut.new_edge(1, 2, b)\n", + "aut.new_edge(1, 3, c)\n", + "\n", + "aut.new_edge(4, 4, a)\n", + "aut.new_edge(4, 2, b)\n", + "aut.new_edge(4, 3, c)\n", + "\n", + "aut.new_edge(2, 2, x)\n", + "aut.new_edge(3, 3, buddy.bdd_not(x))\n", + "\n", + "display(aut)\n", + "\n", + "aut = spot.split_2step(aut, x, False)\n", + "\n", + "display(aut.show_storage())\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "2009f279", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "!a & b & c\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "1->9\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "1->10\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "1->11\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "1->12\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "2->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "3->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "14->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "!a & !b & c\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "!a & b & !c\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "!a & b & c\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "15\n", + "\n", + "\n", + "\n", + "4->15\n", + "\n", + "\n", + "a & !b & !c\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "4->16\n", + "\n", + "\n", + "a & !b & c\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "4->17\n", + "\n", + "\n", + "a & b & !c\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "4->18\n", + "\n", + "\n", + "a & b & c\n", + "\n", + "\n", + "\n", + "15->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc3910f0> >" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "print(aut.merge_states())\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "17c8d6bc", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "4->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc364570> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "g\n", + "\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "\n", + "14\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "17\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "\n", + "15\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "11\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "13\n", + "\n", + "\n", + "14\n", + "\n", + "\n", + "15\n", + "\n", + "\n", + "16\n", + "\n", + "\n", + "17\n", + "\n", + "cond\n", + "\n", + "1\n", + "\n", + "!b & c\n", + "\n", + "b & !c\n", + "\n", + "b & c\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "!b & c\n", + "\n", + "b & !c\n", + "\n", + "b & c\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "x\n", + "\n", + "!x\n", + "\n", + "acc\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "dst\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "next_succ\n", + "\n", + "0\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "0\n", + "\n", + "\n", + "11\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "15\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "9\n", + "\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "meta\n", + "init_state:\n", + "\n", + "0\n", + "num_sets:\n", + "1\n", + "acceptance:\n", + "Inf(0)\n", + "ap_vars:\n", + "a b c x\n", + "\n", + "\n", + "\n", + "\n", + "props\n", + "prop_state_acc:\n", + "maybe\n", + "prop_inherently_weak:\n", + "maybe\n", + "prop_terminal:\n", + "maybe\n", + "prop_weak:\n", + "maybe\n", + "prop_very_weak:\n", + "maybe\n", + "prop_complete:\n", + "maybe\n", + "prop_universal:\n", + "maybe\n", + "prop_unambiguous:\n", + "maybe\n", + "prop_semi_deterministic:\n", + "maybe\n", + "prop_stutter_invariant:\n", + "maybe\n", + "\n", + "\n", + "\n", + "\n", + "namedprops\n", + "named properties:\n", + "state-player\n", + "synthesis-outputs\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "0->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "5->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "2->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "3->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc35a9c0> >" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Merging possible even in split case\n", + "aut = spot.make_twa_graph()\n", + "aut.set_buchi()\n", + "aut.new_states(5)\n", + "\n", + "a = buddy.bdd_ithvar(aut.register_ap(\"a\"))\n", + "b = buddy.bdd_ithvar(aut.register_ap(\"b\"))\n", + "c = buddy.bdd_ithvar(aut.register_ap(\"c\"))\n", + "x = buddy.bdd_ithvar(aut.register_ap(\"x\"))\n", + "\n", + "\n", + "aut.new_edge(0, 1, buddy.bddtrue)\n", + "aut.new_edge(0, 4, buddy.bddtrue)\n", + "\n", + "aut.new_edge(1, 2, b)\n", + "aut.new_edge(1, 3, c)\n", + "\n", + "aut.new_edge(4, 2, b)\n", + "aut.new_edge(4, 3, c)\n", + "\n", + "aut.new_edge(2, 2, x)\n", + "aut.new_edge(3, 3, buddy.bdd_not(x))\n", + "\n", + "display(aut)\n", + "\n", + "aut = spot.split_2step(aut, x, False)\n", + "\n", + "display(aut.show_storage())\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "b3e90235", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "1->5\n", + "\n", + "\n", + "!b & c\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "1->6\n", + "\n", + "\n", + "b & !c\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "b & c\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "5->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->2\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "3->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->3\n", + "\n", + "\n", + "!x\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc35a9c0> >" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "print(aut.merge_states())\n", + "aut" + ] + }, + { + "cell_type": "markdown", + "id": "05785bb1", + "metadata": {}, + "source": [ + "Fail case for alternate_players" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "df4aa681", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "i\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc3646c0> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "i\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!i\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "o\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc3646c0> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "aut = spot.make_twa_graph()\n", + "aut.set_buchi()\n", + "i = buddy.bdd_ithvar(aut.register_ap(\"i\"))\n", + "o = buddy.bdd_ithvar(aut.register_ap(\"o\"))\n", + "\n", + "spot.set_synthesis_outputs(aut, o)\n", + "\n", + "aut.new_states(2)\n", + "aut.new_edge(0,1,i)\n", + "aut.new_edge(1,0,o,spot.mark_t([0]))\n", + "display(aut)\n", + "spot.alternate_players(aut)\n", + "display(aut)" + ] + }, + { + "cell_type": "markdown", + "id": "7efe7450", + "metadata": {}, + "source": [ + "# Test improved aiger INF encoding" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "31872ccc", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "I->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!a & !b & !c & !d\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "a | b | c | d\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc3911e0> >" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "si = spot.synthesis_info()\n", + "\n", + "aut = spot.ltl_to_game(\"(a|b|c|d)->x\", [\"x\"], si)\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "9064bc60", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "True\n" + ] + } + ], + "source": [ + "print(spot.solve_game(aut))" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "303ada1e", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a & !b & !c & !d\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "a | b | c | d\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbcd407ca20> >" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ctrl = spot.solved_game_to_split_mealy(aut)\n", + "ctrl" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "9874a530", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "L0_out\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "10->18\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "12->14\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "14->16\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "16->18\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o0\n", + "\n", + "x\n", + "\n", + "\n", + "\n", + "18->o0:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "L0\n", + "\n", + "L0_in\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->16\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "4->14\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "c\n", + "\n", + "\n", + "\n", + "6->12\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "d\n", + "\n", + "\n", + "\n", + "8->12\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "False\n", + "\n", + "\n", + "\n", + "0->L0\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fbccc33adb0> >" + ] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aig = spot.mealy_machine_to_aig(ctrl, \"ite\")\n", + "aig" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eb81b7d3", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/tests/python/acc.py b/tests/python/acc.py new file mode 100644 index 000000000..8a23dcd46 --- /dev/null +++ b/tests/python/acc.py @@ -0,0 +1,65 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (C) 2022 Laboratoire de Recherche et Développement +# de l'Epita +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import spot +from unittest import TestCase +tc = TestCase() + +a = spot.acc_cond('parity min odd 5') +tc.assertEqual(str(a.fin_unit_one_split()), + '(0, {}, spot.acc_cond(5, "f"))'.format(repr(a))) + +a.set_acceptance('Rabin 3') +tc.assertEqual(str(a.fin_unit_one_split()), + '(0, spot.acc_cond(5, "Inf(1)"), ' + 'spot.acc_cond(5, "(Fin(2) & Inf(3)) | (Fin(4) & Inf(5))"))') + +a.set_acceptance('(Fin(0)|Inf(3))&(Fin(1)|Inf(4))&(Fin(2)|Inf(5)) |\ +(Fin(0)|Inf(4))&(Fin(1)|Inf(5))&(Fin(2)|Inf(3)) |\ +(Fin(0)|Inf(5))&(Fin(1)|Inf(3))&(Fin(2)|Inf(4))') + +tc.maxDiff = None +tc.assertEqual(str(a.fin_unit_one_split()), + '(0, spot.acc_cond(5, ' + '"((Fin(1) | Inf(4)) & (Fin(2) | Inf(5))) | ' + '((Fin(1) | Inf(5)) & (Fin(2) | Inf(3))) | ' + '((Fin(1) | Inf(3)) & (Fin(2) | Inf(4)))"), ' + 'spot.acc_cond(5, ' + '"(Inf(3) & (Fin(1) | Inf(4)) & (Fin(2) | Inf(5))) | ' + '(Inf(4) & (Fin(1) | Inf(5)) & (Fin(2) | Inf(3))) | ' + '(Inf(5) & (Fin(1) | Inf(3)) & (Fin(2) | Inf(4)))"))') + +a = a.remove([4], True) +tc.assertEqual(str(a.fin_unit_one_split()), + '(1, spot.acc_cond(5, ' + '"(Fin(0) | Inf(3)) & (Fin(2) | Inf(5))"), ' + 'spot.acc_cond(5, ' + '"(Fin(0) & (Fin(1) | Inf(5)) & (Fin(2) | Inf(3))) | ' + '((Fin(0) | Inf(5)) & (Fin(1) | Inf(3)) & Fin(2))"))') + +def report_missing_exception(): + raise RuntimeError("missing exception") + +a.set_acceptance("Inf(0)") +try: + a.fin_unit_one_split() +except RuntimeError as e: + tc.assertIn('no Fin', str(e)) +else: + report_missing_exception() diff --git a/tests/python/acc_cond.ipynb b/tests/python/acc_cond.ipynb index 76580fcdd..492c416ca 100644 --- a/tests/python/acc_cond.ipynb +++ b/tests/python/acc_cond.ipynb @@ -1416,7 +1416,7 @@ "source": [ "`fin_one()` return the number of one color `x` that appears as `Fin(x)` in the formula, or `-1` if the formula is Fin-less.\n", "\n", - "The variant `fin_one_extract()` consider the acceptance condition as a disjunction (if the top-level operator is not a disjunction, we just assume the formula is a disjunction with only one disjunct), and return a pair `(x,c)` where `c` is the disjunction of all disjuncts of the original formula where `Fin(x)` appear. Also this function tries to choose an `x` such that one of the disjunct has the form `...&Fin(x)&...` if possible: this is visible in the third example, where 5 is prefered to 2." + "The variant `fin_one_extract()` consider the acceptance condition as a disjunction (if the top-level operator is not a disjunction, we just assume the formula is a disjunction with only one disjunct), and return a pair `(x,c)` where `c` is the disjunction of all disjuncts of the original formula where `Fin(x)` used to appear but where `Fin(x)` have been replaced by `true`, and `Inf(x)` by `false`. Also this function tries to choose an `x` such that one of the disjunct has the form `...&Fin(x)&...` if possible: this is visible in the third example, where 5 is prefered to 2." ] }, { @@ -1430,7 +1430,7 @@ "text": [ "(4, (Fin(0) | Inf(1)) & (Fin(2) | Inf(3)))\n", "0\n", - "(0, spot.acc_cond(4, \"(Fin(0) | Inf(1)) & (Fin(2) | Inf(3))\"))\n" + "(0, spot.acc_cond(4, \"Fin(2) | Inf(3)\"))\n" ] } ], @@ -1451,7 +1451,7 @@ "text": [ "(6, (Fin(0) & Inf(1)) | (Fin(2) & Inf(3)) | (Fin(4) & Inf(5)))\n", "0\n", - "(0, spot.acc_cond(6, \"Fin(0) & Inf(1)\"))\n" + "(0, spot.acc_cond(6, \"Inf(1)\"))\n" ] } ], @@ -1473,7 +1473,7 @@ "text": [ "(6, (Inf(0) & (Fin(2) | Inf(3))) | (Inf(4) & Fin(5)) | ((Inf(0)&Inf(5)) & (Fin(0)|Fin(5))))\n", "2\n", - "(5, spot.acc_cond(6, \"(Inf(4) & Fin(5)) | ((Inf(0)&Inf(5)) & (Fin(0)|Fin(5)))\"))\n" + "(5, spot.acc_cond(6, \"Inf(4)\"))\n" ] } ], @@ -1483,11 +1483,40 @@ "print(acc3.fin_one())\n", "print(acc3.fin_one_extract())" ] + }, + { + "cell_type": "code", + "execution_count": 58, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(8, (Fin(1) & Inf(2)) | (Inf(3)&Inf(4)) | (Inf(5) & (Fin(1)|Fin(7))))\n", + "1\n", + "(1, spot.acc_cond(8, \"Inf(2) | Inf(5)\"))\n" + ] + } + ], + "source": [ + "acc4 = spot.acc_cond('Fin(1)&Inf(2)|Inf(3)&Inf(4)|Inf(5)&(Fin(1)|Fin(7))')\n", + "print(acc4)\n", + "print(acc4.fin_one())\n", + "print(acc4.fin_one_extract())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -1501,7 +1530,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.10.5" } }, "nbformat": 4, diff --git a/tests/python/accparse2.py b/tests/python/accparse2.py index 4e6eb1cb3..d9c7274a0 100644 --- a/tests/python/accparse2.py +++ b/tests/python/accparse2.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2017-2018 Laboratoire de Recherche et Développement +# Copyright (C) 2015, 2017-2018, 2022 Laboratoire de Recherche et Développement # de l'Epita # # This file is part of Spot, a model checking library. @@ -18,99 +18,101 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() a = spot.acc_cond(5) a.set_acceptance(spot.acc_code('parity min odd 5')) -assert(a.is_parity() == [True, False, True]) +tc.assertEqual(a.is_parity(), [True, False, True]) a.set_acceptance('parity max even 5') -assert(a.is_parity() == [True, True, False]) +tc.assertEqual(a.is_parity(), [True, True, False]) a.set_acceptance('generalized-Buchi 5') -assert(a.is_parity()[0] == False) -assert(a.is_parity(True)[0] == False) +tc.assertEqual(a.is_parity()[0], False) +tc.assertEqual(a.is_parity(True)[0], False) a.set_acceptance('Inf(4) | (Fin(3)&Inf(2)) | (Fin(3)&Fin(1)&Inf(0))') -assert(a.is_parity()[0] == False) -assert(a.is_parity(True) == [True, True, False]) +tc.assertEqual(a.is_parity()[0], False) +tc.assertEqual(a.is_parity(True), [True, True, False]) -assert a.maybe_accepting([1, 2, 3], [0, 4]).is_true() -assert a.maybe_accepting([0], []).is_true() -assert a.maybe_accepting([0], [3]).is_false() -assert a.maybe_accepting([0, 3], []).is_maybe() -assert a.maybe_accepting([2, 3], [3]).is_false() -assert a.maybe_accepting([2, 3], []).is_maybe() -assert a.maybe_accepting([2], []).is_true() -assert a.maybe_accepting([0, 1], []).is_maybe() -assert a.maybe_accepting([0, 1], [1]).is_false() +tc.assertTrue(a.maybe_accepting([1, 2, 3], [0, 4]).is_true()) +tc.assertTrue(a.maybe_accepting([0], []).is_true()) +tc.assertTrue(a.maybe_accepting([0], [3]).is_false()) +tc.assertTrue(a.maybe_accepting([0, 3], []).is_maybe()) +tc.assertTrue(a.maybe_accepting([2, 3], [3]).is_false()) +tc.assertTrue(a.maybe_accepting([2, 3], []).is_maybe()) +tc.assertTrue(a.maybe_accepting([2], []).is_true()) +tc.assertTrue(a.maybe_accepting([0, 1], []).is_maybe()) +tc.assertTrue(a.maybe_accepting([0, 1], [1]).is_false()) a.set_acceptance('Fin(0)|Fin(1)') -assert a.maybe_accepting([0, 1], [1]).is_maybe() -assert a.maybe_accepting([0, 1], [0, 1]).is_false() -assert a.maybe_accepting([0], []).is_true() -assert a.maybe_accepting([], [0]).is_true() +tc.assertTrue(a.maybe_accepting([0, 1], [1]).is_maybe()) +tc.assertTrue(a.maybe_accepting([0, 1], [0, 1]).is_false()) +tc.assertTrue(a.maybe_accepting([0], []).is_true()) +tc.assertTrue(a.maybe_accepting([], [0]).is_true()) a = spot.acc_cond(0) a.set_acceptance('all') -assert(a.is_rabin() == -1) -assert(a.is_streett() == 0) -assert(a.is_parity() == [True, True, True]) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), 0) +tc.assertEqual(a.is_parity(), [True, True, True]) a.set_acceptance('none') -assert(a.is_rabin() == 0) -assert(a.is_streett() == -1) -assert(a.is_parity() == [True, True, False]) +tc.assertEqual(a.is_rabin(), 0) +tc.assertEqual(a.is_streett(), -1) +tc.assertEqual(a.is_parity(), [True, True, False]) a = spot.acc_cond('(Fin(0)&Inf(1))') -assert(a.is_rabin() == 1) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), 1) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance('Inf(1)&Fin(0)') -assert(a.is_rabin() == 1) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), 1) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance('(Fin(0)|Inf(1))') -assert(a.is_rabin() == -1) -assert(a.is_streett() == 1) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), 1) a.set_acceptance('Inf(1)|Fin(0)') -assert(a.is_rabin() == -1) -assert(a.is_streett() == 1) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), 1) a = spot.acc_cond('(Fin(0)&Inf(1))|(Fin(2)&Inf(3))') -assert(a.is_rabin() == 2) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), 2) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance(spot.acc_code('(Inf(3)&Fin(2))|(Fin(0)&Inf(1))')) -assert(a.is_rabin() == 2) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), 2) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance(spot.acc_code('(Inf(2)&Fin(3))|(Fin(0)&Inf(1))')) -assert(a.is_rabin() == -1) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance(spot.acc_code('(Inf(3)&Fin(2))|(Fin(2)&Inf(1))')) -assert(a.is_rabin() == -1) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance(spot.acc_code('(Inf(1)&Fin(0))|(Fin(0)&Inf(1))')) -assert(a.is_rabin() == -1) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance('(Fin(0)&Inf(1))|(Inf(1)&Fin(0))|(Inf(3)&Fin(2))') -assert(a.is_rabin() == 2) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), 2) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance('(Fin(0)|Inf(1))&(Fin(2)|Inf(3))') -assert(a.is_rabin() == -1) -assert(a.is_streett() == 2) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), 2) a.set_acceptance('(Inf(3)|Fin(2))&(Fin(0)|Inf(1))') -assert(a.is_rabin() == -1) -assert(a.is_streett() == 2) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), 2) a.set_acceptance('(Inf(2)|Fin(3))&(Fin(0)|Inf(1))') -assert(a.is_rabin() == -1) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance('(Inf(3)|Fin(2))&(Fin(2)|Inf(1))') -assert(a.is_rabin() == -1) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance('(Inf(1)|Fin(0))&(Fin(0)|Inf(1))') -assert(a.is_rabin() == -1) -assert(a.is_streett() == -1) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), -1) a.set_acceptance('(Fin(0)|Inf(1))&(Inf(1)|Fin(0))&(Inf(3)|Fin(2))') -assert(a.is_rabin() == -1) -assert(a.is_streett() == 2) +tc.assertEqual(a.is_rabin(), -1) +tc.assertEqual(a.is_streett(), 2) a = spot.acc_code('Inf(0)&Inf(1)&Inf(3) | Fin(0)&(Fin(1)|Fin(3))') u = a.symmetries() -assert u[0] == 0 -assert u[1] == 1 -assert u[2] == 2 -assert u[3] == 1 +tc.assertEqual(u[0], 0) +tc.assertEqual(u[1], 1) +tc.assertEqual(u[2], 2) +tc.assertEqual(u[3], 1) diff --git a/tests/python/aiger.py b/tests/python/aiger.py index 5148fef5f..f490465b0 100644 --- a/tests/python/aiger.py +++ b/tests/python/aiger.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2021 Laboratoire de Recherche et +# Copyright (C) 2021, 2022 Laboratoire de Recherche et # Développement de l'Epita # # This file is part of Spot, a model checking library. @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot, buddy +from unittest import TestCase +tc = TestCase() strats = (("""HOA: v1 States: 4 @@ -3346,7 +3348,7 @@ for strat_string, (ins_str, outs_str) in strats: print(f"Mode is {m+ss+ddx+uud}") print(f"""Strat is \n{strat_s.to_str("hoa")}""") print(f"""Aig as aut is \n{strat2_s.to_str("hoa")}""") - assert 0 + raise AssertionError("not a specialization") # Check stepwise simulation @@ -3386,7 +3388,7 @@ for (i, e_latch) in zip(ins, exp_latches): # Variable names -assert(spot.aiger_circuit("""aag 2 2 0 2 0 +tc.assertEqual(spot.aiger_circuit("""aag 2 2 0 2 0 2 4 2 @@ -3394,9 +3396,9 @@ assert(spot.aiger_circuit("""aag 2 2 0 2 0 i0 a i1 b c c -""").to_str() == 'aag 2 2 0 2 0\n2\n4\n2\n1\ni0 a\ni1 b c\no0 o0\no1 o1') +""").to_str(), 'aag 2 2 0 2 0\n2\n4\n2\n1\ni0 a\ni1 b c\no0 o0\no1 o1') -assert(spot.aiger_circuit("""aag 2 2 0 2 0 +tc.assertEqual(spot.aiger_circuit("""aag 2 2 0 2 0 2 4 2 @@ -3404,7 +3406,7 @@ assert(spot.aiger_circuit("""aag 2 2 0 2 0 o0 x o1 y c -""").to_str() == 'aag 2 2 0 2 0\n2\n4\n2\n1\ni0 i0\ni1 i1\no0 x\no1 y') +""").to_str(), 'aag 2 2 0 2 0\n2\n4\n2\n1\ni0 i0\ni1 i1\no0 x\no1 y') def report_missing_exception(): @@ -3415,7 +3417,7 @@ try: 0 """) except SyntaxError as e: - assert str(e) == "\n:1: invalid header line" + tc.assertEqual(str(e), "\n:1: invalid header line") else: report_missing_exception() @@ -3423,14 +3425,15 @@ try: spot.aiger_circuit("""aag 2 2 3 2 0 """) except SyntaxError as e: - assert str(e) == "\n:1: more variables than indicated by max var" + tc.assertEqual(str(e), + "\n:1: more variables than indicated by max var") else: report_missing_exception() try: spot.aiger_circuit("""aag 2 2 0 2 0\n""") except SyntaxError as e: - assert str(e) == "\n:2: expecting input number 2" + tc.assertEqual(str(e), "\n:2: expecting input number 2") else: report_missing_exception() @@ -3439,7 +3442,7 @@ try: 3 """) except SyntaxError as e: - assert str(e) == "\n:2: expecting input number 2" + tc.assertEqual(str(e), "\n:2: expecting input number 2") else: report_missing_exception() @@ -3448,7 +3451,7 @@ try: 3 4 5 """) except SyntaxError as e: - assert str(e) == "\n:2: invalid format for an input" + tc.assertEqual(str(e), "\n:2: invalid format for an input") else: report_missing_exception() @@ -3457,7 +3460,7 @@ try: 2 """) except SyntaxError as e: - assert str(e) == "\n:3: expecting input number 4" + tc.assertEqual(str(e), "\n:3: expecting input number 4") else: report_missing_exception() @@ -3468,7 +3471,7 @@ try: 1 """) except SyntaxError as e: - assert str(e) == "\n:4: invalid format for a latch" + tc.assertEqual(str(e), "\n:4: invalid format for a latch") else: report_missing_exception() @@ -3479,7 +3482,7 @@ try: 1 1 """) except SyntaxError as e: - assert str(e) == "\n:4: expecting latch number 6" + tc.assertEqual(str(e), "\n:4: expecting latch number 6") else: report_missing_exception() @@ -3490,7 +3493,7 @@ try: 6 1 """) except SyntaxError as e: - assert str(e) == "\n:5: expecting latch number 8" + tc.assertEqual(str(e), "\n:5: expecting latch number 8") else: report_missing_exception() @@ -3502,7 +3505,7 @@ try: 8 7 """) except SyntaxError as e: - assert str(e) == "\n:6: expecting an output" + tc.assertEqual(str(e), "\n:6: expecting an output") else: report_missing_exception() @@ -3515,7 +3518,7 @@ try: 9 9 9 """) except SyntaxError as e: - assert str(e) == "\n:6: invalid format for an output" + tc.assertEqual(str(e), "\n:6: invalid format for an output") else: report_missing_exception() @@ -3528,7 +3531,7 @@ try: 9 9 9 """) except SyntaxError as e: - assert str(e) == "\n:6: invalid format for an output" + tc.assertEqual(str(e), "\n:6: invalid format for an output") else: report_missing_exception() @@ -3541,7 +3544,7 @@ try: 9 """) except SyntaxError as e: - assert str(e) == "\n:7: expecting AND gate number 10" + tc.assertEqual(str(e), "\n:7: expecting AND gate number 10") else: report_missing_exception() @@ -3555,7 +3558,7 @@ try: 10 3 8 9 """) except SyntaxError as e: - assert str(e) == "\n:7: invalid format for an AND gate" + tc.assertEqual(str(e), "\n:7: invalid format for an AND gate") else: report_missing_exception() @@ -3569,7 +3572,7 @@ try: 10 3 """) except SyntaxError as e: - assert str(e) == "\n:7: invalid format for an AND gate" + tc.assertEqual(str(e), "\n:7: invalid format for an AND gate") else: report_missing_exception() @@ -3583,7 +3586,7 @@ try: 10 3 8 """) except SyntaxError as e: - assert str(e) == "\n:8: expecting AND gate number 12" + tc.assertEqual(str(e), "\n:8: expecting AND gate number 12") else: report_missing_exception() @@ -3599,7 +3602,7 @@ try: i0 """) except SyntaxError as e: - assert str(e) == "\n:9: could not parse as input name" + tc.assertEqual(str(e), "\n:9: could not parse as input name") else: report_missing_exception() @@ -3616,7 +3619,7 @@ i0 foo i3 bar """) except SyntaxError as e: - assert str(e) == "\n:10: value 3 exceeds input count" + tc.assertEqual(str(e), "\n:10: value 3 exceeds input count") else: report_missing_exception() @@ -3633,7 +3636,7 @@ i1 bar i0 foo """) except SyntaxError as e: - assert str(e) == "\n:9: expecting name for input 0" + tc.assertEqual(str(e), "\n:9: expecting name for input 0") else: report_missing_exception() @@ -3650,8 +3653,8 @@ i0 name with spaces i1 name with spaces """) except SyntaxError as e: - assert str(e) == \ - "\n:10: name 'name with spaces' already used" + tc.assertEqual(str(e), \ + "\n:10: name 'name with spaces' already used") else: report_missing_exception() @@ -3669,7 +3672,7 @@ i1 bar o0 """) except SyntaxError as e: - assert str(e) == "\n:11: could not parse as output name" + tc.assertEqual(str(e), "\n:11: could not parse as output name") else: report_missing_exception() @@ -3689,7 +3692,7 @@ o1 hmm o0 foo bar baz """) except SyntaxError as e: - assert str(e) == "\n:12: expecting name for output 0" + tc.assertEqual(str(e), "\n:12: expecting name for output 0") else: report_missing_exception() @@ -3709,7 +3712,7 @@ o0 hmm o2 foo bar baz """) except SyntaxError as e: - assert str(e) == "\n:13: value 2 exceeds output count" + tc.assertEqual(str(e), "\n:13: value 2 exceeds output count") else: report_missing_exception() @@ -3729,7 +3732,7 @@ o0 foo o1 foo """) except SyntaxError as e: - assert str(e) == "\n:13: name 'foo' already used" + tc.assertEqual(str(e), "\n:13: name 'foo' already used") else: report_missing_exception() @@ -3749,7 +3752,7 @@ o0 foo o1 bar """) except SyntaxError as e: - assert str(e) == "\n:13: name 'bar' already used" + tc.assertEqual(str(e), "\n:13: name 'bar' already used") else: report_missing_exception() @@ -3770,7 +3773,7 @@ o1 baz this is a bug """) except SyntaxError as e: - assert str(e) == "\n:14: unsupported line type" + tc.assertEqual(str(e), "\n:14: unsupported line type") else: report_missing_exception() @@ -3791,8 +3794,8 @@ c this is not a bug """) except SyntaxError as e: - assert str(e) == \ - "\n:10: either all or none of the inputs should be named" + tc.assertEqual(str(e), \ + "\n:10: either all or none of the inputs should be named") else: report_missing_exception() @@ -3815,8 +3818,8 @@ c this is not a bug """) except SyntaxError as e: - assert str(e) == \ - "\n:11-12: either all or none of the inputs should be named" + tc.assertEqual(str(e), \ + "\n:11-12: either all or none of the inputs should be named") else: report_missing_exception() @@ -3841,8 +3844,8 @@ c this is not a bug """) except SyntaxError as e: - assert str(e) == \ - "\n:14-16: either all or none of the outputs should be named" + tc.assertEqual(str(e), \ + "\n:14-16: either all or none of the outputs should be named") else: report_missing_exception() @@ -3866,4 +3869,4 @@ o2 bar c this is not a bug """).to_str() -assert x == spot.aiger_circuit(x).to_str() +tc.assertEqual(x, spot.aiger_circuit(x).to_str()) diff --git a/tests/python/aliases.py b/tests/python/aliases.py index 6f861a880..40dd4d0ec 100644 --- a/tests/python/aliases.py +++ b/tests/python/aliases.py @@ -20,6 +20,8 @@ # Test for parts of Issue #497. import spot +from unittest import TestCase +tc = TestCase() aut = spot.automaton(""" HOA: v1 @@ -63,11 +65,11 @@ State: 0 --END--""") s = aut.to_str('hoa') aut2 = spot.automaton(s) -assert aut.equivalent_to(aut2) +tc.assertTrue(aut.equivalent_to(aut2)) s2 = aut.to_str('hoa') -assert s == s2 +tc.assertEqual(s, s2) -assert s == """HOA: v1 +tc.assertEqual(s, """HOA: v1 States: 1 Start: 0 AP: 3 "x" "y" "z" @@ -105,7 +107,7 @@ State: 0 [@a&2 | @p1&@p0&2] 0 [@a&2] 0 [@p0&2 | @p1&2] 0 ---END--""" +--END--""") # Check what happens to aliases when an AP has been removed, but # the aliases have been preserved... @@ -115,7 +117,7 @@ aut3 = rem.strip(aut) spot.set_aliases(aut3, spot.get_aliases(aut)) s2 = aut3.to_str('hoa') # Aliases based on "x" should have disappeared. -assert(s2 == """HOA: v1 +tc.assertEqual(s2, """HOA: v1 States: 1 Start: 0 AP: 2 "y" "z" diff --git a/tests/python/alternating.py b/tests/python/alternating.py index 7b3a5d713..5b38ca378 100755 --- a/tests/python/alternating.py +++ b/tests/python/alternating.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016, 2017, 2021 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2016-2017, 2021-2022 Laboratoire de Recherche +# et Développement de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -20,6 +20,8 @@ import spot import buddy +from unittest import TestCase +tc = TestCase() aut = spot.make_twa_graph(spot._bdd_dict) @@ -38,9 +40,8 @@ aut.new_edge(2, 2, p1 | p2) tr = [(s, [[x for x in aut.univ_dests(i)] for i in aut.out(s)]) for s in range(3)] -print(tr) -assert [(0, [[1, 2], [0, 1]]), (1, [[0, 2, 1]]), (2, [[2]])] == tr -assert not aut.is_existential() +tc.assertEqual([(0, [[1, 2], [0, 1]]), (1, [[0, 2, 1]]), (2, [[2]])], tr) +tc.assertFalse(aut.is_existential()) received = False try: @@ -49,11 +50,10 @@ try: pass except RuntimeError: received = True -assert received +tc.assertTrue(received) h = aut.to_str('hoa') -print(h) -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 3 Start: 0 AP: 2 "p1" "p2" @@ -68,22 +68,20 @@ State: 1 [0&1] 0&2&1 State: 2 [0 | 1] 2 ---END--""" +--END--""") aut2 = spot.automaton(h) h2 = aut2.to_str('hoa') -print(h2) -assert h != h2 +tc.assertNotEqual(h, h2) # This will sort destination groups aut.merge_univ_dests() h = aut.to_str('hoa') -assert h == h2 +tc.assertEqual(h, h2) aut2.set_univ_init_state([0, 1]) h3 = aut2.to_str('hoa') -print(h3) -assert h3 == """HOA: v1 +tc.assertEqual(h3, """HOA: v1 States: 3 Start: 0&1 AP: 2 "p1" "p2" @@ -98,23 +96,22 @@ State: 1 [0&1] 0&1&2 State: 2 [0 | 1] 2 ---END--""" +--END--""") st = spot.states_and(aut, [0, 2]) st2 = spot.states_and(aut, [1, st]) st3 = spot.states_and(aut, [0, 1, 2]) -assert (st, st2, st3) == (3, 4, 5) +tc.assertEqual((st, st2, st3), (3, 4, 5)) received = False try: st4 = spot.states_and(aut, []) except RuntimeError: received = True -assert received +tc.assertTrue(received) h = aut.to_str('hoa') -print(h) -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 6 Start: 0 AP: 2 "p1" "p2" @@ -136,11 +133,10 @@ State: 4 [0&1] 0&1&2 State: 5 [0&1] 0&1&2 ---END--""" +--END--""") h = spot.split_edges(aut).to_str('hoa') -print(h) -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 6 Start: 0 AP: 2 "p1" "p2" @@ -168,7 +164,7 @@ State: 4 [0&1] 0&1&2 State: 5 [0&1] 0&1&2 ---END--""" +--END--""") # remove_univ_otf @@ -206,11 +202,11 @@ State: 2 --END--""" desalt = spot.remove_univ_otf(aut) -assert(desalt.to_str('hoa') == out) +tc.assertEqual(desalt.to_str('hoa'), out) -assert aut.num_states() == 3 -assert aut.num_edges() == 3 +tc.assertEqual(aut.num_states(), 3) +tc.assertEqual(aut.num_edges(), 3) aut.edge_storage(3).cond = buddy.bddfalse aut.purge_dead_states() -assert aut.num_states() == 1 -assert aut.num_edges() == 0 +tc.assertEqual(aut.num_states(), 1) +tc.assertEqual(aut.num_edges(), 0) diff --git a/tests/python/bdddict.py b/tests/python/bdddict.py index d6222b58f..b7b442b1f 100644 --- a/tests/python/bdddict.py +++ b/tests/python/bdddict.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2019, 2021 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2019, 2021, 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -17,8 +17,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -# Make sure we can leep track of BDD association in Python using bdd_dict, as -# discussed in issue #372. +# Make sure we can keep track of BDD association in Python using bdd_dict, as +# discussed in (deleted???) issue #372. # CPython use reference counting, so that automata are destructed # when we expect them to be. However other implementations like @@ -33,6 +33,8 @@ else: gc.collect() import spot +from unittest import TestCase +tc = TestCase() class bdd_holder: @@ -64,7 +66,7 @@ class bdd_holder3: def check_ok(): - assert type(bdict.varnum(spot.formula.ap("a"))) is int + tc.assertIs(type(bdict.varnum(spot.formula.ap("a"))), int) def check_nok(): @@ -123,7 +125,7 @@ debug("h2") h3 = bdd_holder3(h2) var = bdict.register_anonymous_variables(1, h3) debug("h3") -assert var == 2 +tc.assertEqual(var, 2) del h2 gcollect() debug("-h2") diff --git a/tests/python/bdditer.py b/tests/python/bdditer.py index 3d3bb7894..4a2afeea1 100644 --- a/tests/python/bdditer.py +++ b/tests/python/bdditer.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2021 Laboratoire de Recherche et +# Copyright (C) 2017, 2018, 2021, 2022, 2023 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -24,11 +24,26 @@ import spot import buddy import sys +from unittest import TestCase +tc = TestCase() + +# CPython use reference counting, so that automata are destructed +# when we expect them to be. However other implementations like +# PyPy may call destructors latter, causing different output. +from platform import python_implementation +if python_implementation() == 'CPython': + def gcollect(): + pass +else: + import gc + def gcollect(): + gc.collect() + run = spot.translate('a & !b').accepting_run() b = run.prefix[0].label c = buddy.bdd_satone(b) -assert c != buddy.bddfalse +tc.assertNotEqual(c, buddy.bddfalse) res = [] while c != buddy.bddtrue: var = buddy.bdd_var(c) @@ -40,23 +55,67 @@ while c != buddy.bddtrue: res.append(var) c = h -assert res == [0, -1] +tc.assertEqual(res, [0, -1]) +del res res2 = [] for i in run.aut.ap(): res2.append((str(i), run.aut.register_ap(i))) -assert str(res2) == "[('a', 0), ('b', 1)]" - +tc.assertEqual(str(res2), "[('a', 0), ('b', 1)]") +del res2 +del c +gcollect() f = spot.bdd_to_formula(b) -assert f._is(spot.op_And) -assert f[0]._is(spot.op_ap) -assert f[1]._is(spot.op_Not) -assert f[1][0]._is(spot.op_ap) -assert str(f) == 'a & !b' +tc.assertTrue(f._is(spot.op_And)) +tc.assertTrue(f[0]._is(spot.op_ap)) +tc.assertTrue(f[1]._is(spot.op_Not)) +tc.assertTrue(f[1][0]._is(spot.op_ap)) +tc.assertEqual(str(f), 'a & !b') +del f +gcollect() try: f = spot.bdd_to_formula(b, spot.make_bdd_dict()) sys.exit(2) except RuntimeError as e: - assert "not in the dictionary" in str(e) + tc.assertIn("not in the dictionary", str(e)) + +f = spot.bdd_to_cnf_formula(b) +tc.assertEqual(str(f), 'a & !b') + +del run +del f + +gcollect() + +f = spot.bdd_to_cnf_formula(buddy.bddtrue) +tc.assertEqual(str(f), '1') +del f +gcollect() + +f = spot.bdd_to_cnf_formula(buddy.bddfalse) +tc.assertEqual(str(f), '0') +del f +gcollect() + +aut = spot.translate('(a & b) <-> c') +# With pypy, running GC here will destroy the translator object used +# by translate(). That object has temporary automata that reference +# the BDDs variables and those affect the order in which the +# bdd_to_formula() result is object is presented. The different order +# is not wrong, but it makes it diffuclt to write tests. +gcollect() + +for e in aut.out(aut.get_init_state_number()): + b = e.cond + break + +f1 = spot.bdd_to_formula(b) +tc.assertEqual(str(f1), '(!a & !c) | (a & b & c) | (!b & !c)') +f2 = spot.bdd_to_cnf_formula(b) +tc.assertEqual(str(f2), '(a | !c) & (!a | !b | c) & (b | !c)') + +b1 = spot.formula_to_bdd(f1, spot._bdd_dict, aut) +b2 = spot.formula_to_bdd(f2, spot._bdd_dict, aut) +tc.assertEqual(b1, b2) diff --git a/tests/python/bugdet.py b/tests/python/bugdet.py index 9e06e0db3..19434c967 100644 --- a/tests/python/bugdet.py +++ b/tests/python/bugdet.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2016, 2022 Laboratoire de Recherche et Développement +# de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -22,6 +22,8 @@ # sent to the Spot mailing list on 2016-10-31. import spot +from unittest import TestCase +tc = TestCase() a = spot.automaton(""" HOA: v1 @@ -80,12 +82,12 @@ State: 7 {0} # was fine. print("use_simulation=True") b1 = spot.tgba_determinize(b, False, True, True, True) -assert b1.num_states() == 5 +tc.assertEqual(b1.num_states(), 5) b1 = spot.remove_fin(spot.dualize(b1)) -assert not a.intersects(b1) +tc.assertFalse(a.intersects(b1)) print("\nuse_simulation=False") b2 = spot.tgba_determinize(b, False, True, False, True) -assert b2.num_states() == 5 +tc.assertEqual(b2.num_states(), 5) b2 = spot.remove_fin(spot.dualize(b2)) -assert not a.intersects(b2) +tc.assertFalse(a.intersects(b2)) diff --git a/tests/python/cav22-figs.ipynb b/tests/python/cav22-figs.ipynb new file mode 100644 index 000000000..ea84319a2 --- /dev/null +++ b/tests/python/cav22-figs.ipynb @@ -0,0 +1,1582 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "4d225dd2-8b41-4bb4-9cae-136c314bbcc9", + "metadata": {}, + "source": [ + "This notebook reproduces the examples shown in our CAV'22 paper, as well as a few more. It was part of the CAV'22 artifact, but has been updated to keep up with recent version of Spot." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "a1948c94-e737-4b8b-88b8-00d896c5c928", + "metadata": {}, + "outputs": [], + "source": [ + "import spot\n", + "from spot.jupyter import display_inline\n", + "from buddy import bdd_ithvar\n", + "spot.setup()" + ] + }, + { + "cell_type": "markdown", + "id": "c03f8776-c657-4f87-99b5-a56ed1fdcbe3", + "metadata": {}, + "source": [ + "# Figure 1\n", + "\n", + "Fig. 1 of the paper shows (1) how to convert an LTL formula to an automaton with arbitrary acceptance condition, and (2) how to display the internal representation of the automaton." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d7b6e2d6-7472-4136-8114-ecb03dde1edd", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[Rabin 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & !b\n", + "\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "!a & b\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fb6a430f5a0> >" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut = spot.translate('GF(a <-> Xa) & FGb', 'det', 'gen')\n", + "aut" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "18248bd4-8d80-4ae7-a466-c347e1ea5ad4", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "g\n", + "\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "5\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "cond\n", + "\n", + "a & !b\n", + "\n", + "a & b\n", + "\n", + "!a & b\n", + "\n", + "!a & !b\n", + "\n", + "a & b\n", + "\n", + "a & !b\n", + "\n", + "!a & !b\n", + "\n", + "!a & b\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "{}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{1}\n", + "\n", + "dst\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "0\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "8\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "meta\n", + "init_state:\n", + "\n", + "0\n", + "num_sets:\n", + "2\n", + "acceptance:\n", + "Fin(0) & Inf(1)\n", + "ap_vars:\n", + "b a\n", + "\n", + "\n", + "\n", + "\n", + "props\n", + "prop_state_acc:\n", + "maybe\n", + "prop_inherently_weak:\n", + "maybe\n", + "prop_terminal:\n", + "no\n", + "prop_weak:\n", + "maybe\n", + "prop_very_weak:\n", + "maybe\n", + "prop_complete:\n", + "maybe\n", + "prop_universal:\n", + "yes\n", + "prop_unambiguous:\n", + "yes\n", + "prop_semi_deterministic:\n", + "yes\n", + "prop_stutter_invariant:\n", + "maybe\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut.show_storage()" + ] + }, + { + "cell_type": "markdown", + "id": "ef2e2a61-046c-42f0-b0a1-8dc7f1f2b37a", + "metadata": {}, + "source": [ + "# Figure 2\n", + "\n", + "Fig.2 shows an example of alternating automaton, represented in two different ways, along with its internal representation." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "5d2dd179-470c-47c1-b9f2-c5df9f76b2b8", + "metadata": {}, + "outputs": [], + "source": [ + "# We enter the automaton using the HOA format.\n", + "aut2 = spot.automaton(\"\"\"\n", + "HOA: v1\n", + "States: 5\n", + "Start: 3\n", + "acc-name: co-Buchi\n", + "Acceptance: 1 Fin(0)\n", + "AP: 2 \"a\" \"b\"\n", + "--BODY--\n", + "State: 0 {0} \n", + "[0] 1\n", + "[!0] 2\n", + "State: 1 {0} \n", + "[0&1] 0&1\n", + "State: 2 \n", + "[t] 2 \n", + "State: 3 \n", + "[0] 4&0\n", + "State: 4 \n", + "[t] 3 \n", + "--END--\n", + "\"\"\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "e1517479-6947-43fd-8369-c4fcdca72e1d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "I->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-4\n", + "\n", + "\n", + "\n", + "\n", + "3->-4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "-1\n", + "\n", + "\n", + "\n", + "\n", + "1->-1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "-1->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-1->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-4->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "-4->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "I->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-4\n", + "\n", + "\n", + "\n", + "\n", + "3->-4\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "0->T2T0\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "-1\n", + "\n", + "\n", + "\n", + "\n", + "1->-1\n", + "\n", + "\n", + "a & b\n", + "\n", + "\n", + "\n", + "-1->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-1->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "-4->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "-4->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display_inline(aut2, aut2.show('.u'), per_row=2)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "ef26e6e8-3206-4e51-9858-33f8d27f915c", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "g\n", + "\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "states\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "succ\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "succ_tail\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "edges\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "5\n", + "\n", + "\n", + "6\n", + "\n", + "cond\n", + "\n", + "a\n", + "\n", + "!a\n", + "\n", + "a & b\n", + "\n", + "1\n", + "\n", + "a\n", + "\n", + "1\n", + "\n", + "acc\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{0}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "{}\n", + "\n", + "dst\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "~0\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "~3\n", + "\n", + "\n", + "3\n", + "\n", + "next_succ\n", + "\n", + "\n", + "2\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "src\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "2\n", + "\n", + "\n", + "3\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "dests\n", + "\n", + "\n", + "dests\n", + "\n", + "\n", + "~0\n", + "\n", + "\n", + "\n", + "\n", + "~3\n", + "\n", + "\n", + "\n", + "#cnt/dst\n", + "\n", + "#2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "1\n", + "\n", + "#2\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "meta\n", + "init_state:\n", + "\n", + "3\n", + "num_sets:\n", + "1\n", + "acceptance:\n", + "Fin(0)\n", + "ap_vars:\n", + "b a\n", + "\n", + "\n", + "\n", + "\n", + "props\n", + "prop_state_acc:\n", + "yes\n", + "prop_inherently_weak:\n", + "maybe\n", + "prop_terminal:\n", + "maybe\n", + "prop_weak:\n", + "maybe\n", + "prop_very_weak:\n", + "maybe\n", + "prop_complete:\n", + "no\n", + "prop_universal:\n", + "yes\n", + "prop_unambiguous:\n", + "yes\n", + "prop_semi_deterministic:\n", + "yes\n", + "prop_stutter_invariant:\n", + "maybe\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "aut2.show_storage()" + ] + }, + { + "cell_type": "markdown", + "id": "f4f651b6-974e-4783-a54a-17a280d30782", + "metadata": {}, + "source": [ + "# Figure 3\n", + "\n", + "Fig. 3 shows an example of game generated by `ltlsynt` from the LTL specification of a reactive controler, and then how this game can be encoded into an And-Inverter-Graph.\n", + "First we retrieve the game generated by `ltlsynt` (any argument passed to `spot.automaton` is interpreted as a command if it ends with a pipe), then we solve it to compute a possible winning strategy. \n", + "\n", + "Player 0 plays from round states and tries to violate the acceptance condition; Player 1 plays from diamond states and tries to satisfy the acceptance condition. Once a game has been solved, the `highlight_strategy` function will decorate the automaton with winning region and computed strategies for player 0 and 1 in red and green respectively. Therefore this game is winning for player 1 from the initial state.\n", + "\n", + "Compared to the paper, the production of parity automata in `ltlsynt` has been improved, and it generates a Büchi game instead (but Büchi can be seen one case of parity)." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "ac90284d-2493-428b-9db7-cc7aa63384cb", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "I->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "4->7\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "4->12\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "7->4\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "7->2\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "8->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "8->5\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "2->9\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "2->10\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "9->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "10->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->9\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "3->11\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "11->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "12->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "12->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "5->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->0\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "13->5\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fb6a430f300> >" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "game = spot.automaton(\"ltlsynt --outs=b -f 'F(a & Xa) <-> Fb' --print-game-hoa |\")\n", + "spot.solve_game(game)\n", + "spot.highlight_strategy(game)\n", + "game" + ] + }, + { + "cell_type": "markdown", + "id": "d8b3ad5a-fef2-498b-8fd3-2d3940dacbf5", + "metadata": {}, + "source": [ + "The `solved_game_to_mealy()` shown in the paper does not always produce the same type of output, so it is\n", + "better to explicitely call `solved_game_to_split_mealy()` or `solved_game_to_separated_mealy()` depending on the type of output one need. We also show how to use the `reduce_mealy()` method to simplify one." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "39156f1a-945c-46db-bac2-01565d17b82e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "!a\n", + "/\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "a\n", + "/\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "\n", + "!a\n", + "/\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "\n", + "a\n", + "/\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "2->2\n", + "\n", + "\n", + "\n", + "1\n", + "/\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "!a\n", + "/\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "a\n", + "/\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "\n", + "!a\n", + "/\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "1->1\n", + "\n", + "\n", + "\n", + "a\n", + "/\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "L0_out\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o0\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "6->o0:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "L0\n", + "\n", + "L0_in\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->L0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "mealy = spot.solved_game_to_separated_mealy(game)\n", + "mealy_min = spot.reduce_mealy(mealy, True)\n", + "aig = spot.mealy_machine_to_aig(mealy_min, \"isop\")\n", + "display_inline(mealy, mealy_min, aig)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tests/python/complement_semidet.py b/tests/python/complement_semidet.py index 5ab4557bc..da06749a3 100644 --- a/tests/python/complement_semidet.py +++ b/tests/python/complement_semidet.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018 Laboratoire de Recherche et Développement de +# Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -19,6 +19,8 @@ import spot import buddy +from unittest import TestCase +tc = TestCase() def complement(aut): @@ -35,4 +37,4 @@ for aut in spot.automata( comp = complement(aut) semidet_comp = spot.complement_semidet(aut, True) - assert(comp.equivalent_to(semidet_comp)) + tc.assertTrue(comp.equivalent_to(semidet_comp)) diff --git a/tests/python/dbranch.py b/tests/python/dbranch.py new file mode 100644 index 000000000..268c4a3c6 --- /dev/null +++ b/tests/python/dbranch.py @@ -0,0 +1,174 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (C) 2022, 2023 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Test that the spot.gen package works, in particular, we want +# to make sure that the objects created from spot.gen methods +# are usable with methods from the spot package. + + +import spot +from unittest import TestCase +tc = TestCase() + +aut5 = spot.automaton("""HOA: v1 States: 28 Start: 0 AP: 4 "alive" "b" +"a" "c" acc-name: Buchi Acceptance: 1 Inf(0) properties: trans-labels +explicit-labels state-acc very-weak --BODY-- State: 0 [0] 1 [0] 2 [0] +3 [0] 4 [0] 5 [0&!1] 6 [0] 7 State: 1 [0] 8 State: 2 [!0] 9 [0] 10 +State: 3 [!0] 9 [0] 11 State: 4 [!0] 9 [0] 12 State: 5 [!0] 9 [0] 13 +State: 6 [!0] 9 [0&!1] 14 State: 7 [!0] 9 [0&!1&!2] 14 State: 8 [0] 15 +State: 9 {0} [!0] 9 State: 10 [!0] 9 [0] 16 State: 11 [!0] 9 [0] 17 +State: 12 [!0] 9 [0] 18 State: 13 [!0] 9 [0&!1&!2] 19 State: 14 [!0] 9 +[0&!1] 19 State: 15 [0] 20 State: 16 [!0] 9 [0] 21 State: 17 [!0] 9 +[0] 22 State: 18 [!0] 9 [0&!1&!2] 23 State: 19 [!0] 9 [0&!1] 23 State: +20 [0] 24 State: 21 [!0] 9 [0] 25 State: 22 [!0] 9 [0&!1&!2] 26 State: +23 [!0] 9 [0&!1] 26 State: 24 [0&3] 27 State: 25 [!0] 9 [0&!1&!2] 27 +State: 26 [!0] 9 [0&!1] 27 State: 27 [!0] 9 [0] 27 --END--""") + +copy = spot.make_twa_graph(aut5, spot.twa_prop_set.all()) + +tc.assertFalse(spot.is_deterministic(aut5)) +if spot.delay_branching_here(aut5): + aut5.purge_unreachable_states() + aut5.merge_edges() +tc.assertEqual(aut5.num_states(), 13) +tc.assertEqual(aut5.num_edges(), 29) +tc.assertTrue(spot.are_equivalent(copy, aut5)) + +a = spot.automaton("""HOA: v1 States: 8 Start: 0 AP: 3 "a" "b" "c" +Acceptance: 0 t --BODY-- State: 0 [0] 1 [0] 2 [0] 3 State: 1 [!1] 4&5 +[1] 5&6 State: 2 [0] 4&6 State: 3 [0] 3&6 State: 4 [!0] 7 State: 5 +[!0] 7 State: 6 [!0] 6 State: 7 [0] 7 --END--""") + +copy = spot.make_twa_graph(a, spot.twa_prop_set.all()) +if spot.delay_branching_here(a): + a.purge_unreachable_states() + a.merge_edges() +tc.assertEqual(a.to_str(), """HOA: v1 +States: 7 +Start: 0 +AP: 3 "b" "a" "c" +acc-name: all +Acceptance: 0 t +properties: trans-labels explicit-labels state-acc univ-branch +--BODY-- +State: 0 +[1] 1 +[1] 2 +State: 1 +[1] 3&5 +[0] 4&5 +[!0] 3&4 +State: 2 +[1] 2&5 +State: 3 +[!1] 6 +State: 4 +[!1] 6 +State: 5 +[!1] 5 +State: 6 +[1] 6 +--END--""") + +a = spot.automaton("""HOA: v1 +States: 9 +Start: 0 AP: 2 "a" "b" +spot.state-player: 0 1 1 0 0 0 0 1 1 +Acceptance: 0 t +--BODY-- +State: 0 +[0] 1 +[0] 2 +[0] 3 +[0] 4 +State: 1 +[1] 5 +State: 2 +[!1] 6 +State: 3 +[1] 7 +State: 4 +[!1] 8 +State: 5 +[t] 5 +State: 6 +[t] 6 +State: 7 +[t] 7 +State: 8 +[t] 8 +--END--""") +copy = spot.make_twa_graph(a, spot.twa_prop_set.all()) +if spot.delay_branching_here(a): + a.purge_unreachable_states() +tc.assertTrue(spot.are_equivalent(a, copy)) +tc.assertEqual(a.to_str(), """HOA: v1 +States: 7 +Start: 0 +AP: 2 "b" "a" +acc-name: all +Acceptance: 0 t +properties: trans-labels explicit-labels state-acc very-weak +spot-state-player: 0 1 0 0 0 1 1 +--BODY-- +State: 0 +[1] 1 +[1] 2 +State: 1 +[0] 3 +[!0] 4 +State: 2 +[0] 5 +[!0] 6 +State: 3 +[t] 3 +State: 4 +[t] 4 +State: 5 +[t] 5 +State: 6 +[t] 6 +--END--""") + +# Running delay_branching_here on state-based acceptance may require +# the output to use transition-based acceptance. (Issue #525.) +a = spot.automaton(""" +HOA: v1 States: 4 Start: 0 AP: 2 "a" "b" Acceptance: 1 Inf(0) --BODY-- +State: 0 [0] 1 [0] 2 State: 1 [1] 3 State: 2 {0} [!1] 3 State: 3 [t] 0 +--END--""") +copy = spot.make_twa_graph(a, spot.twa_prop_set.all()) +if spot.delay_branching_here(a): + a.purge_unreachable_states() +tc.assertTrue(spot.are_equivalent(a, copy)) +tc.assertEqual(a.to_str(), """HOA: v1 +States: 3 +Start: 0 +AP: 2 "b" "a" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels trans-acc deterministic +--BODY-- +State: 0 +[1] 1 +State: 1 +[0] 2 +[!0] 2 {0} +State: 2 +[t] 0 +--END--""") diff --git a/tests/python/declenv.py b/tests/python/declenv.py index 868f6ca1d..3ab47736b 100644 --- a/tests/python/declenv.py +++ b/tests/python/declenv.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2017, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -21,6 +21,8 @@ # This file tests various error conditions on the twa API import spot +from unittest import TestCase +tc = TestCase() env = spot.declarative_environment() env.declare("a") @@ -28,26 +30,27 @@ env.declare("b") f1a = spot.parse_infix_psl("a U b") f1b = spot.parse_infix_psl("a U b", env) -assert not f1a.errors -assert not f1b.errors +tc.assertFalse(f1a.errors) +tc.assertFalse(f1b.errors) + # In the past, atomic propositions requires via different environments were # never equal, but this feature was never used and we changed that in Spot 2.0 # for the sake of simplicity. -assert f1a.f == f1b.f +tc.assertEqual(f1a.f, f1b.f) f2 = spot.parse_infix_psl("(a U b) U c", env) -assert f2.errors +tc.assertTrue(f2.errors) ostr = spot.ostringstream() f2.format_errors(ostr) err = ostr.str() -assert "unknown atomic proposition `c'" in err +tc.assertIn("unknown atomic proposition `c'", err) f3 = spot.parse_prefix_ltl("R a d", env) -assert f3.errors +tc.assertTrue(f3.errors) ostr = spot.ostringstream() f3.format_errors(ostr) err = ostr.str() -assert "unknown atomic proposition `d'" in err +tc.assertIn("unknown atomic proposition `d'", err) f4 = spot.parse_prefix_ltl("R a b", env) -assert not f4.errors +tc.assertFalse(f4.errors) diff --git a/tests/python/decompose_scc.py b/tests/python/decompose_scc.py index 5f6ad46cb..47741fb72 100644 --- a/tests/python/decompose_scc.py +++ b/tests/python/decompose_scc.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2021 Laboratoire de Recherche et +# Copyright (C) 2017, 2021, 2022 Laboratoire de Recherche et # Développement de l'Epita # # This file is part of Spot, a model checking library. @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() aut = spot.translate('(Ga -> Gb) W c') si = spot.scc_info(aut) @@ -26,10 +28,10 @@ si = spot.scc_info(aut) # if the generation of the automaton changes, so just scan # for it. rej = [j for j in range(si.scc_count()) if si.is_rejecting_scc(j)] -assert len(rej) == 1 +tc.assertEqual(len(rej), 1) s = spot.decompose_scc(si, rej[0]).to_str('hoa', '1.1') -assert (s == """HOA: v1.1 +tc.assertEqual(s, """HOA: v1.1 States: 3 Start: 0 AP: 3 "b" "a" "c" @@ -56,7 +58,8 @@ except RuntimeError: else: raise AssertionError -assert (spot.decompose_scc(si, 0, True).to_str('hoa', '1.1') == """HOA: v1.1 +tc.assertEqual(spot.decompose_scc(si, 0, True).to_str('hoa', '1.1'), +"""HOA: v1.1 States: 4 Start: 0 AP: 3 "b" "a" "c" @@ -81,7 +84,8 @@ State: 3 [1] 3 --END--""") -assert (spot.decompose_scc(si, 2, True).to_str('hoa', '1.1') == """HOA: v1.1 +tc.assertEqual(spot.decompose_scc(si, 2, True).to_str('hoa', '1.1'), +"""HOA: v1.1 States: 2 Start: 0 AP: 3 "b" "a" "c" @@ -103,4 +107,4 @@ try: except RuntimeError: pass else: - raise AssertionError + raise AssertionError("missing exception") diff --git a/tests/python/det.py b/tests/python/det.py index 03f07c096..36fa31ff3 100644 --- a/tests/python/det.py +++ b/tests/python/det.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2021 Laboratoire de Recherche et Développement de +# Copyright (C) 2021, 2022 Laboratoire de Recherche et Développement de # l'EPITA. # # This file is part of Spot, a model checking library. @@ -19,6 +19,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() a = spot.translate('FGa | FGb') @@ -26,10 +28,10 @@ a = spot.translate('FGa | FGb') d = spot.tgba_determinize(a, False, True, True, True, None, -1, True) cld = list(d.get_original_classes()) -assert [0, 1, 2, 3, 3] == cld +tc.assertEqual([0, 1, 2, 3, 3], cld) e = spot.sbacc(d) -assert e.get_original_states() is None +tc.assertIsNone(e.get_original_states()) cle = list(e.get_original_classes()) -assert len(cle) == e.num_states() -assert set(cle) == set(cld) +tc.assertEqual(len(cle), e.num_states()) +tc.assertEqual(set(cle), set(cld)) diff --git a/tests/python/dualize.py b/tests/python/dualize.py index 81d2a2b23..b870e1e5e 100755 --- a/tests/python/dualize.py +++ b/tests/python/dualize.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017-2019, 2021 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2017-2019, 2021-2022 Laboratoire de Recherche et +# Développement de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -20,6 +20,8 @@ import spot import buddy +from unittest import TestCase +tc = TestCase() match_strings = [('is_buchi', 'is_co_buchi'), ('is_generalized_buchi', 'is_generalized_co_buchi'), @@ -79,19 +81,19 @@ def test_aut(aut, d=None): def test_complement(aut): - assert aut.is_deterministic() + tc.assertTrue(aut.is_deterministic()) d = spot.dualize(aut) s = spot.product_or(aut, d) - assert spot.dualize(s).is_empty() + tc.assertTrue(spot.dualize(s).is_empty()) def test_assert(a, d=None): t = test_aut(a, d) if not t[0]: - print (t[1]) - print (a.to_str('hoa')) - print (spot.dualize(a).to_str('hoa')) - assert False + print(t[1]) + print(a.to_str('hoa')) + print(spot.dualize(a).to_str('hoa')) + tc.assertTrue(t[0]) aut = spot.translate('a') @@ -101,7 +103,7 @@ test_assert(aut) dual = spot.dualize(aut) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 3 Start: 1 AP: 1 "a" @@ -117,7 +119,7 @@ State: 1 [!0] 2 State: 2 [t] 2 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -141,7 +143,7 @@ test_assert(aut) dual = spot.dualize(aut) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 4 Start: 0 AP: 2 "a" "b" @@ -161,7 +163,7 @@ State: 2 {0} [!1] 3 State: 3 [t] 3 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -186,7 +188,7 @@ test_assert(aut) dual = spot.dualize(aut) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 2 Start: 1 AP: 2 "a" "b" @@ -198,7 +200,7 @@ State: 0 [t] 0 State: 1 [!0 | !1] 0 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -219,10 +221,10 @@ State: 3 {1} --END--""") dual = spot.dualize(aut) -assert dualtype(aut, dual) +tc.assertTrue(dualtype(aut, dual)) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 2 Start: 1 AP: 2 "a" "b" @@ -234,7 +236,7 @@ State: 0 [t] 0 State: 1 [!0 | !1] 0 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -255,10 +257,10 @@ State: 3 {0} --END--""") dual = spot.dualize(aut) -assert dualtype(aut, dual) +tc.assertTrue(dualtype(aut, dual)) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 5 Start: 0 AP: 2 "a" "b" @@ -280,7 +282,7 @@ State: 3 {0} [t] 3 State: 4 [t] 4 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -302,10 +304,10 @@ State: 2 --END--""") dual = spot.dualize(aut) -assert dualtype(aut, dual) +tc.assertTrue(dualtype(aut, dual)) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 4 Start: 0 AP: 2 "a" "b" @@ -327,7 +329,7 @@ State: 2 [!0&!1] 0&2 State: 3 [t] 3 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -348,10 +350,10 @@ State: 2 --END--""") dual = spot.dualize(aut) -assert dualtype(aut, dual) +tc.assertTrue(dualtype(aut, dual)) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 1 Start: 0 AP: 1 "a" @@ -362,7 +364,7 @@ properties: deterministic terminal --BODY-- State: 0 [t] 0 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -382,10 +384,10 @@ State: 2 --END--""") dual = spot.dualize(aut) -assert dualtype(aut, dual) +tc.assertTrue(dualtype(aut, dual)) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 1 Start: 0 AP: 1 "a" @@ -396,7 +398,7 @@ properties: deterministic terminal --BODY-- State: 0 [t] 0 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -419,7 +421,7 @@ State: 2 dual = spot.dualize(aut) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -435,7 +437,7 @@ State: 1 {0} [t] 1 State: 2 [t] 2 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -456,10 +458,10 @@ State: 2 dual = spot.dualize(aut) -assert dualtype(aut, dual) +tc.assertTrue(dualtype(aut, dual)) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 2 Start: 0 AP: 1 "a" @@ -471,7 +473,7 @@ State: 0 [!0] 1 State: 1 {0} [t] 1 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -495,10 +497,10 @@ State: 3 {0} --END--""") dual = spot.dualize(aut) -assert dualtype(aut, dual) +tc.assertTrue(dualtype(aut, dual)) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 3 Start: 0 AP: 1 "a" @@ -515,7 +517,7 @@ State: 1 [0] 2 State: 2 {0} [t] 2 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -536,10 +538,10 @@ State: 2 --END--""") dual = spot.dualize(aut) -assert dualtype(aut, dual) +tc.assertTrue(dualtype(aut, dual)) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 3 Start: 0 AP: 1 "a" @@ -555,14 +557,14 @@ State: 1 {0} [t] 0 State: 2 {1} [t] 0 ---END--""" +--END--""") aut = spot.translate('G!a R XFb') test_assert(aut) dual = spot.dualize(aut) h = dual.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 5 Start: 0 AP: 2 "a" "b" @@ -589,7 +591,7 @@ State: 3 {0} [0] 4 State: 4 [t] 4 ---END--""" +--END--""") opts = spot.option_map() opts.set('output', spot.randltlgenerator.LTL) diff --git a/tests/python/ecfalse.py b/tests/python/ecfalse.py index 36301914b..ccbaa2693 100644 --- a/tests/python/ecfalse.py +++ b/tests/python/ecfalse.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ import spot from buddy import bddfalse, bddtrue +from unittest import TestCase +tc = TestCase() a = spot.automaton(""" HOA: v1 @@ -43,8 +45,8 @@ for e in a.out(1): if e.dst == 0: e.cond = bddfalse -assert a.accepting_run() is None -assert a.is_empty() +tc.assertIsNone(a.accepting_run()) +tc.assertTrue(a.is_empty()) for name in ['SE05', 'CVWY90', 'GV04', 'Cou99(shy)', 'Cou99', 'Tau03']: print(name) @@ -52,13 +54,13 @@ for name in ['SE05', 'CVWY90', 'GV04', 'Cou99(shy)', 'Cou99', 'Tau03']: res = ec.check() if res is not None: print(res.accepting_run()) - assert res is None + tc.assertIsNone(res) si = spot.scc_info(a) -assert si.scc_count() == 1 # only one accessible SCC +tc.assertEqual(si.scc_count(), 1) # only one accessible SCC a.set_init_state(0) si = spot.scc_info(a) -assert si.scc_count() == 2 +tc.assertEqual(si.scc_count(), 2) a = spot.automaton("""HOA: v1 States: 11 Start: 0 AP: 2 "a" "b" Acceptance: 8 (Fin(0) | Inf(1)) & (Fin(2) | Inf(3)) & ((Fin(4) & Inf(5)) | (Fin(6) & Inf(7))) @@ -71,16 +73,16 @@ State: 5 State: 6 State: 7 [!0&!1] 1 {4 6 7} [!0&!1] 2 {5 6} State: 8 [!0&!1] 2 {4} State: 9 [!0&!1] 2 {0 4} [!0&!1] 4 {3 4} State: 10 --END-- """) r = a.accepting_run() -assert r is not None -assert r.replay(spot.get_cout()) +tc.assertIsNotNone(r) +tc.assertTrue(r.replay(spot.get_cout())) for e in a.out(7): if e.dst == 2: e.cond = bddfalse s = a.accepting_run() -assert s is not None -assert s.replay(spot.get_cout()) +tc.assertIsNotNone(s) +tc.assertTrue(s.replay(spot.get_cout())) for e in a.out(2): if e.dst == 1: e.cond = bddfalse s = a.accepting_run() -assert s is None +tc.assertIsNone(s) diff --git a/tests/python/except.py b/tests/python/except.py index 178e419b4..03076c01b 100644 --- a/tests/python/except.py +++ b/tests/python/except.py @@ -24,6 +24,8 @@ import spot import buddy +from unittest import TestCase +tc = TestCase() def report_missing_exception(): @@ -35,7 +37,7 @@ aut.set_acceptance(spot.acc_cond("parity min even 4")) try: spot.iar(aut) except RuntimeError as e: - assert 'iar() expects Rabin-like or Streett-like input' in str(e) + tc.assertIn('iar() expects Rabin-like or Streett-like input', str(e)) else: report_missing_exception() @@ -43,7 +45,7 @@ alt = spot.dualize(spot.translate('FGa | FGb')) try: spot.tgba_determinize(alt) except RuntimeError as e: - assert 'tgba_determinize() does not support alternation' in str(e) + tc.assertIn('tgba_determinize() does not support alternation', str(e)) else: report_missing_exception() @@ -52,18 +54,18 @@ aps = aut.ap() rem = spot.remove_ap() rem.add_ap('"a"=0,b') aut = rem.strip(aut) -assert aut.ap() == aps[2:] +tc.assertEqual(aut.ap(), aps[2:]) try: rem.add_ap('"a=0,b') except ValueError as e: - assert """missing closing '"'""" in str(e) + tc.assertIn("""missing closing '"'""", str(e)) else: report_missing_exception() try: rem.add_ap('a=0=b') except ValueError as e: - assert """unexpected '=' at position 3""" in str(e) + tc.assertIn("""unexpected '=' at position 3""", str(e)) else: report_missing_exception() @@ -73,7 +75,7 @@ for meth in ('scc_has_rejecting_cycle', 'is_inherently_weak_scc', try: getattr(spot, meth)(si, 20) except ValueError as e: - assert "invalid SCC number" in str(e) + tc.assertIn("invalid SCC number", str(e)) else: report_missing_exception() @@ -89,14 +91,15 @@ si = spot.scc_info(alt) try: si.determine_unknown_acceptance() except RuntimeError as e: - assert "scc_info::determine_unknown_acceptance() does not supp" in str(e) + tc.assertIn("scc_info::determine_unknown_acceptance() does not supp", + str(e)) else: report_missing_exception() try: alt.set_init_state(999) except ValueError as e: - assert "set_init_state()" in str(e) + tc.assertIn("set_init_state()", str(e)) else: report_missing_exception() @@ -107,7 +110,7 @@ alt.set_init_state(u) try: alt.set_init_state(u - 1) except ValueError as e: - assert "set_init_state()" in str(e) + tc.assertIn("set_init_state()", str(e)) else: report_missing_exception() @@ -116,21 +119,21 @@ r = spot.twa_run(aut) try: a = r.as_twa() except RuntimeError as e: - assert "empty cycle" in str(e) + tc.assertIn("empty cycle", str(e)) else: report_missing_exception() try: a = r.replay(spot.get_cout()) except RuntimeError as e: - assert "empty cycle" in str(e) + tc.assertIn("empty cycle", str(e)) else: report_missing_exception() try: a = r.reduce() except RuntimeError as e: - assert "empty cycle" in str(e) + tc.assertIn("empty cycle", str(e)) else: report_missing_exception() @@ -138,12 +141,12 @@ a = spot.translate('Fa') a = spot.to_generalized_rabin(a, False) r = a.accepting_run() r = r.reduce() -assert r.cycle[0].acc == spot.mark_t([1]) +tc.assertEqual(r.cycle[0].acc, spot.mark_t([1])) r.cycle[0].acc = spot.mark_t([0]) try: r.reduce(); except RuntimeError as e: - assert "expects an accepting cycle" in str(e) + tc.assertIn("expects an accepting cycle", str(e)) else: report_missing_exception() @@ -151,7 +154,7 @@ f = spot.formula('GF(a | Gb)') try: spot.gf_guarantee_to_ba(f, spot._bdd_dict) except RuntimeError as e: - assert "guarantee" in str(e) + tc.assertIn("guarantee", str(e)) else: report_missing_exception() @@ -159,7 +162,7 @@ f = spot.formula('FG(a | Fb)') try: spot.fg_safety_to_dca(f, spot._bdd_dict) except RuntimeError as e: - assert "safety" in str(e) + tc.assertIn("safety", str(e)) else: report_missing_exception() @@ -168,28 +171,28 @@ m = spot.mark_t([n - 1]) try: m = spot.mark_t([0]) << n except RuntimeError as e: - assert "Too many acceptance sets" in str(e) + tc.assertIn("Too many acceptance sets", str(e)) else: report_missing_exception() try: m.set(n) except RuntimeError as e: - assert "bit index is out of bounds" in str(e) + tc.assertIn("bit index is out of bounds", str(e)) else: report_missing_exception() try: m = spot.mark_t([0, n, 1]) except RuntimeError as e: - assert "Too many acceptance sets used. The limit is" in str(e) + tc.assertIn("Too many acceptance sets used. The limit is", str(e)) else: report_missing_exception() try: spot.complement_semidet(spot.translate('Gb R a', 'ba')) except RuntimeError as e: - assert "requires a semi-deterministic input" in str(e) + tc.assertIn("requires a semi-deterministic input", str(e)) else: report_missing_exception() @@ -197,69 +200,62 @@ try: spot.translate('F(G(a | !a) & ((b <-> c) W d))', 'det', 'any') except ValueError as e: s = str(e) - assert 'det' in s - assert 'any' in s + tc.assertIn('det', s) + tc.assertIn('any', s) else: report_missing_exception() a1 = spot.translate('FGa') a2 = spot.translate('Gb') -assert not spot.is_deterministic(a1) -assert spot.is_deterministic(a2) +tc.assertFalse(spot.is_deterministic(a1)) +tc.assertTrue(spot.is_deterministic(a2)) try: spot.product_xor(a1, a2) except RuntimeError as e: - assert "product_xor() only works with deterministic automata" in str(e) + tc.assertIn("product_xor() only works with deterministic automata", str(e)) else: report_missing_exception() try: spot.product_xor(a2, a1) except RuntimeError as e: - assert "product_xor() only works with deterministic automata" in str(e) + tc.assertIn("product_xor() only works with deterministic automata", str(e)) else: report_missing_exception() try: spot.product_xnor(a1, a2) except RuntimeError as e: - assert "product_xnor() only works with deterministic automata" in str(e) + tc.assertIn("product_xnor() only works with deterministic automata", str(e)) else: report_missing_exception() try: spot.product_xnor(a2, a1) except RuntimeError as e: - assert "product_xnor() only works with deterministic automata" in str(e) + tc.assertIn("product_xnor() only works with deterministic automata", str(e)) else: report_missing_exception() try: spot.solve_safety_game(a1) except RuntimeError as e: - assert "solve_safety_game(): arena should have true acceptance" in str(e) + tc.assertIn( + "solve_safety_game(): arena should have true acceptance", + str(e)) else: report_missing_exception() -try: - spot.solve_parity_game(a1) -except RuntimeError as e: - assert "solve_parity_game(): arena must have max-odd acceptance condition" \ - in str(e) -else: - report_missing_exception() - - try: spot.formula_Star(spot.formula("a"), 10, 333) except OverflowError as e: - assert "333" in str(e) - assert "254" in str(e) + tc.assertIn("333", str(e)) + tc.assertIn("254", str(e)) else: report_missing_exception() try: spot.formula_FStar(spot.formula("a"), 333, 400) except OverflowError as e: - assert "333" in str(e) - assert "254" in str(e) + tc.assertIn("333", str(e)) + tc.assertIn("254", str(e)) else: report_missing_exception() @@ -267,15 +263,15 @@ try: spot.formula_nested_unop_range(spot.op_F, spot.op_Or, 333, 400, spot.formula("a")) except OverflowError as e: - assert "333" in str(e) - assert "254" in str(e) + tc.assertIn("333", str(e)) + tc.assertIn("254", str(e)) else: report_missing_exception() try: spot.formula_FStar(spot.formula("a"), 50, 40) except OverflowError as e: - assert "reversed" in str(e) + tc.assertIn("reversed", str(e)) else: report_missing_exception() @@ -287,5 +283,112 @@ try: a.to_str() except RuntimeError as e: se = str(e) - assert "synthesis-outputs" in se - assert "unregistered proposition" in se + tc.assertIn("synthesis-outputs", se) + tc.assertIn("unregistered proposition", se) +else: + report_missing_exception() + + +a = spot.make_twa_graph() +s = a.new_state() +b = spot.formula_to_bdd("a & b", a.get_dict(), a) +a.new_edge(s, s, b, []) +try: + print(a.to_str('hoa')) +except RuntimeError as e: + tc.assertIn("unregistered atomic propositions", str(e)) +else: + report_missing_exception() + +a.register_aps_from_dict() +tc.assertEqual(a.to_str('hoa'), """HOA: v1 +States: 1 +Start: 0 +AP: 2 "a" "b" +acc-name: all +Acceptance: 0 t +properties: trans-labels explicit-labels state-acc deterministic +--BODY-- +State: 0 +[0&1] 0 +--END--""") + +try: + a.register_aps_from_dict() +except RuntimeError as e: + se = str(e) + tc.assertIn("register_aps_from_dict", se) + tc.assertIn("already registered", se) +else: + report_missing_exception() + + +try: + spot.minimize_mealy(a, 100) +except RuntimeError as e: + se = str(e) + tc.assertIn("minimize_mealy", se) + tc.assertIn("minimize_lvl", se) +else: + report_missing_exception() + +opt = spot.synthesis_info() +opt.minimize_lvl = 3 +try: + spot.minimize_mealy(a, opt) +except RuntimeError as e: + se = str(e) + tc.assertIn("minimize_mealy", se) + tc.assertIn("synthesis-output", se) + +spot.set_synthesis_outputs(a, buddy.bdd_ithvar(a.register_ap("b"))) +filename = "/THIS-FILE/SHOULD/NOT/EXIST" +opt.opt.set_str("satlogdimacs", filename) +try: + spot.minimize_mealy(a, opt) +except RuntimeError as e: + tc.assertIn(filename, str(e)) +else: + report_missing_exception() + +opt.opt.set_str("satlogdimacs", "") +opt.opt.set_str("satlogcsv", filename) +try: + spot.minimize_mealy(a, opt) +except RuntimeError as e: + tc.assertIn(filename, str(e)) +else: + report_missing_exception() + + +# Relabeling must use new variables +aut = spot.make_twa_graph() +aut.new_states(2) +ap = buddy.bdd_ithvar(aut.register_ap("__nv0")) +aut.new_edge(0,1,ap) + +try: + spot.partitioned_relabel_here(aut) +except RuntimeError as e: + tc.assertIn("The given prefix for new variables", + str(e)) +else: + report_missing_exception() + +# Relabeling games must not use the +# globally reserved aps +aut = spot.make_twa_graph() +aut.new_states(2) +apin = buddy.bdd_ithvar(aut.register_ap("__AP_IN__")) +apout = buddy.bdd_ithvar(aut.register_ap("__AP_OUT__")) +aut.new_edge(0,1,apin & apout) +aut.new_edge(1,0,buddy.bdd_not(apin & apout)) +spot.set_state_players(aut, [False, True]) + +try: + spot.partitioned_game_relabel_here(aut, True, True) +except RuntimeError as e: + tc.assertIn("You can not use __AP_IN__ or __AP_OUT__", + str(e)) +else: + report_missing_exception() diff --git a/tests/python/formulas.ipynb b/tests/python/formulas.ipynb index 95241be9d..7075cf653 100644 --- a/tests/python/formulas.ipynb +++ b/tests/python/formulas.ipynb @@ -976,6 +976,62 @@ "print(ap) # print as a string\n", "display(ap) # LaTeX-style, for notebooks" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Converting to Suffix Operator Normal Form:" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "data": { + "text/latex": [ + "$\\mathsf{G} (\\{x^{\\star}\\}\\mathrel{\\Box\\kern-1.7pt\\raise.4pt\\hbox{$\\mathord{\\rightarrow}$}} \\mathsf{F} a)$" + ], + "text/plain": [ + "spot.formula(\"G({x[*]}[]-> Fa)\")" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/latex": [ + "$\\mathsf{G} \\mathit{sonf\\_}_{0} \\land \\mathsf{G} (\\lnot \\mathit{sonf\\_}_{1} \\lor \\mathsf{F} a) \\land \\mathsf{G} (\\lnot \\mathit{sonf\\_}_{0} \\lor (\\{x^{\\star}\\}\\mathrel{\\Box\\kern-1.7pt\\raise.4pt\\hbox{$\\mathord{\\rightarrow}$}} \\mathit{sonf\\_}_{1}))$" + ], + "text/plain": [ + "spot.formula(\"Gsonf_0 & G(!sonf_1 | Fa) & G(!sonf_0 | ({x[*]}[]-> sonf_1))\")" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "('sonf_0', 'sonf_1')" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "f = spot.formula('G({x*} []-> Fa)')\n", + "display(f)\n", + "\n", + "# In addition to the formula, returns a list of newly introduced APs\n", + "f, aps = spot.suffix_operator_normal_form(f, 'sonf_')\n", + "display(f)\n", + "display(aps)" + ] } ], "metadata": { diff --git a/tests/python/game.py b/tests/python/game.py index 9d77c153d..857390335 100644 --- a/tests/python/game.py +++ b/tests/python/game.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Développement de +# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement de # l'EPITA. # # This file is part of Spot, a model checking library. @@ -18,7 +18,9 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import spot +import spot, buddy +from unittest import TestCase +tc = TestCase() g = spot.automaton("""HOA: v1 States: 9 Start: 0 AP: 2 "a" "b" acc-name: Streett 1 Acceptance: 2 Fin(0) | Inf(1) properties: @@ -27,10 +29,10 @@ trans-labels explicit-labels state-acc spot-state-player: 0 1 0 1 0 1 {1} [0] 8 State: 3 {1} [1] 4 State: 4 {1} [0] 5 State: 5 {1} [0] 6 State: 6 {1} [1] 7 State: 7 State: 8 {1} [0] 2 --END--""") -assert spot.solve_parity_game(g) == False +tc.assertFalse(spot.solve_parity_game(g)) s = spot.highlight_strategy(g).to_str("HOA", "1.1") -assert s == """HOA: v1.1 +tc.assertEqual(s, """HOA: v1.1 States: 9 Start: 0 AP: 2 "a" "b" @@ -60,4 +62,463 @@ State: 6 {1} State: 7 State: 8 {1} [0] 2 +--END--""") + +# Testing case where parity_game optimization +# lead to wrong results +si = spot.synthesis_info() + +game = spot.automaton("""HOA: v1 +States: 27 +Start: 7 +AP: 11 "a" "b" "c" "d" "e" "f" "g" "h" "i" "j" "k" +acc-name: parity max odd 3 +Acceptance: 3 Fin(2) & (Inf(1) | Fin(0)) +properties: trans-labels explicit-labels trans-acc colored +properties: deterministic +spot-state-player: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 +controllable-AP: 0 1 2 3 4 5 6 7 +--BODY-- +State: 0 +[t] 8 {0} +State: 1 +[8&9] 8 {0} +[!8&!10 | !9&!10] 9 {0} +[!8&10 | !9&10] 10 {0} +State: 2 +[8&9] 8 {0} +[!8&!10 | !9&!10] 11 {0} +[!8&10 | !9&10] 12 {0} +State: 3 +[8&9] 8 {0} +[!9&!10] 13 {0} +[!8&10 | !9&10] 14 {0} +[!8&9&!10] 15 {0} +State: 4 +[8&9] 8 {0} +[!8&!10 | !9&!10] 16 {0} +[!8&!9&10] 17 {0} +[!8&9&10] 18 {0} +[8&!9&10] 19 {0} +State: 5 +[8&9] 8 {0} +[!9&!10] 20 {0} +[!8&10 | !9&10] 21 {0} +[!8&9&!10] 22 {0} +State: 6 +[8&9] 8 {0} +[!8&!10 | !9&!10] 23 {0} +[!8&!9&10] 24 {0} +[!8&9&10] 25 {0} +[8&!9&10] 26 {0} +State: 7 +[8&9] 8 {0} +[!9&!10] 13 {0} +[!8&9&!10] 15 {0} +[!8&!9&10] 17 {0} +[!8&9&10] 18 {0} +[8&!9&10] 19 {0} +State: 8 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | +!0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | +!0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | + 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 0 {1} +State: 9 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 1 {2} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | +!0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {2} +State: 10 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 0 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | +!0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {2} +State: 11 +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 0 {1} +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {2} +State: 12 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {2} +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {2} +State: 13 +[!0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7] 1 {1} +[!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&4&!5&!6&7] 3 {1} +[!0&!1&2&3&!4&!5&!6&7] 5 {1} +State: 14 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 0 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | +!0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +State: 15 +[!0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7] 1 {1} +[!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&4&!5&!6&7] 4 {1} +[!0&!1&2&3&!4&!5&!6&7] 6 {1} +State: 16 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 1 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | +!0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +State: 17 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 0 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&3&!4&!5&!6&7] 6 {1} +State: 18 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 0 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&3&!4&!5&!6&7] 5 {1} +State: 19 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 | +!0&!1&2&!3&4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7] 0 {1} +[!0&!1&2&3&!4&!5&!6&7 | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 | +0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&3&!4&!5&6&!7] 6 {1} +State: 20 +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 0 {1} +[!0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&!4&5&!6&7] 3 {1} +State: 21 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 2 {1} +State: 22 +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 0 {1} +[!0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | !0&1&!2&!3&!4&5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&!4&5&!6&7] 4 {1} +State: 23 +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&!3&4&!5&6&!7 | +!0&1&!2&3&!4&!5&!6&7 | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 | +0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | 0&!1&!2&3&!4&!5&6&!7] 0 {1} +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +!0&1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +State: 24 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | +0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&!3&!4&5&!6&7] 4 {1} +[!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&3&!4&!5&!6&7] 6 {1} +State: 25 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&6&!7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&6&!7 | +0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | +0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&!3&!4&5&!6&7] 3 {1} +[!0&1&!2&!3&4&!5&!6&7 | !0&1&!2&3&!4&!5&!6&7] 5 {1} +State: 26 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&1&!2&!3&!4&5&!6&7 | +0&!1&!2&!3&!4&5&!6&7 | 0&!1&!2&!3&!4&5&6&!7] 1 {1} +[!0&!1&2&!3&4&!5&!6&7 | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 | +!0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&4&!5&!6&7 | !0&1&!2&3&!4&!5&!6&7 | +0&!1&!2&!3&4&!5&!6&7 | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 | +0&!1&!2&3&!4&!5&6&!7] 2 {1} +[!0&1&!2&!3&!4&5&6&!7] 4 {1} +[!0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&6&!7] 6 {1} +--END--""") + +tc.assertTrue(spot.solve_game(game, si)) + +games = spot.split_edges(game) +spot.set_state_players(games, spot.get_state_players(game)) +tc.assertTrue(spot.solve_game(games, si)) + +g = spot.translate("GF(a&X(a)) -> GFb") +a = buddy.bdd_ithvar(g.register_ap("a")) +b = buddy.bdd_ithvar(g.register_ap("b")) +gdpa = spot.tgba_determinize(spot.degeneralize_tba(g), + False, True, True, False) +spot.change_parity_here(gdpa, spot.parity_kind_max, spot.parity_style_odd) +gsdpa = spot.split_2step(gdpa, b, True) +spot.colorize_parity_here(gsdpa, True) +tc.assertTrue(spot.solve_parity_game(gsdpa)) +tc.assertEqual(spot.highlight_strategy(gsdpa).to_str("HOA", "1.1"), +"""HOA: v1.1 +States: 18 +Start: 0 +AP: 2 "a" "b" +acc-name: parity max odd 5 +Acceptance: 5 Fin(4) & (Inf(3) | (Fin(2) & (Inf(1) | Fin(0)))) +properties: trans-labels explicit-labels trans-acc colored complete +properties: deterministic +spot.highlight.states: 0 4 1 4 2 4 3 4 4 4 5 4 6 4 7 4 8 4 9 4 """ ++"""10 4 11 4 12 4 13 4 14 4 15 4 16 4 17 4 +spot.highlight.edges: 15 4 17 4 20 4 22 4 24 4 26 4 28 4 30 4 31 4 32 4 33 4 +spot.state-player: 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 +controllable-AP: 1 +--BODY-- +State: 0 +[!0] 7 {0} +[0] 8 {0} +State: 1 +[!0] 9 {3} +[0] 10 {3} +State: 2 +[!0] 11 {1} +[0] 12 {1} +State: 3 +[!0] 9 {3} +[0] 13 {4} +State: 4 +[!0] 11 {1} +[0] 14 {2} +State: 5 +[!0] 15 {3} +[0] 16 {3} +State: 6 +[!0] 15 {3} +[0] 17 {4} +State: 7 +[!1] 1 {0} +[1] 2 {0} +State: 8 +[!1] 3 {0} +[1] 4 {0} +State: 9 +[!1] 1 {3} +[1] 5 {3} +State: 10 +[!1] 3 {3} +[1] 6 {3} +State: 11 +[!1] 2 {1} +[1] 2 {3} +State: 12 +[!1] 4 {1} +[1] 4 {3} +State: 13 +[!1] 3 {4} +[1] 4 {4} +State: 14 +[!1] 4 {2} +[1] 4 {3} +State: 15 +[t] 5 {3} +State: 16 +[t] 6 {3} +State: 17 +[t] 4 {4} --END--""" +) + +# Test the different parity conditions +gdpa = spot.tgba_determinize(spot.degeneralize_tba(g), + False, True, True, False) + +g_test = spot.change_parity(gdpa, spot.parity_kind_max, spot.parity_style_odd) +g_test_split = spot.split_2step(g_test, b, True) +sp = spot.get_state_players(g_test_split) +g_test_split_c = spot.colorize_parity(g_test_split) +spot.set_state_players(g_test_split_c, sp) +tc.assertTrue(spot.solve_parity_game(g_test_split_c)) +c_strat = spot.get_strategy(g_test_split_c) +# All versions of parity need to result in the same strategy +for kind in [spot.parity_kind_min, spot.parity_kind_max]: + for style in [spot.parity_style_even, spot.parity_style_odd]: + g_test_split1 = spot.change_parity(g_test_split, kind, style) + spot.set_state_players(g_test_split1, sp) + tc.assertTrue(spot.solve_parity_game(g_test_split1)) + c_strat1 = spot.get_strategy(g_test_split1) + tc.assertTrue(c_strat == c_strat1) + +# Test that strategies are not appended +# if solve is called multiple times +aut = spot.make_twa_graph() +aut.set_buchi() +aut.new_states(2) +aut.new_edge(0,1,buddy.bddtrue, [0]) +aut.new_edge(1,0,buddy.bddtrue, []) +spot.set_state_players(aut, [False, True]) +spot.solve_game(aut) +S1 = list(spot.get_strategy(aut)) +spot.solve_game(aut) +S2 = list(spot.get_strategy(aut)) +tc.assertEqual(S1, S2) + + +# Finite games +alive = "__alive__" +def finite_existential(auts): + # 1 Accepting state -> selfloop + # 2 Prune + acc_state = set() + sp = list(spot.get_state_players(auts)) + for e in auts.edges(): + if e.acc: + acc_state.add(e.src) + for s in acc_state: + e_kill = auts.out_iteraser(s) + while (e_kill): + e_kill.erase() + for s in acc_state: + sprime = auts.new_state() + sp.append(not sp[s]) + auts.new_edge(s, sprime, buddy.bddtrue, [0]) + auts.new_edge(sprime, s, buddy.bddtrue, [0]) + spot.set_state_players(auts, sp) + auts.purge_dead_states() + spot.alternate_players(auts, False, False) + return auts + +def is_input_complete(auts): + sp = spot.get_state_players(auts) + for s in range(auts.num_states()): + if sp[s]: + continue # Player + cumul = buddy.bddfalse + for e in auts.out(s): + cumul |= e.cond + if cumul != buddy.bddtrue: + return False + + return True + +def synt_from_ltlf(f:str, outs): + ff = spot.from_ltlf(f, alive) + aut = ff.translate("buchi", "sbacc") + outbdd = buddy.bddtrue + for out in outs: + outbdd &= buddy.bdd_ithvar(aut.register_ap(out)) + alive_bdd = buddy.bdd_ithvar(aut.register_ap(alive)) + auts = spot.split_2step(aut, outbdd & alive_bdd, False) + auts = spot.to_finite(auts, alive) + spot.alternate_players(auts, False, False) + spot.set_synthesis_outputs(auts, outbdd) + if not is_input_complete(auts): + print("Not synthesizable") + return None + auts = finite_existential(auts) + + return auts + +def synt_ltlf(f:str, outs, res:str = "aut"): + auts = synt_from_ltlf(f, outs) + + succ = spot.solve_parity_game(auts) + if not succ: + if res == "aut": + return False, auts + else: + return False, None + + mealy_cc = spot.solved_game_to_split_mealy(auts) + + if res == "aut": + return True, mealy_cc + elif res == "aig": + return True, spot.mealy_machine_to_aig(mealy_cc, "isop") + else: + raise RuntimeError("Unknown option") + + +sink_player = None + +def negate_ltlf(f:str, outs, opt = "buchi"): + + global sink_player + sink_player = None + + aut = synt_from_ltlf(f, outs) + # Implies input completeness + # We need output completeness + acc = [] + + sp = list(spot.get_state_players(aut)) + + def get_sink(): + global sink_player + if sink_player is None: + sink_player = aut.new_states(2) + aut.new_edge(sink_player, sink_player + 1, buddy.bddtrue, acc) + aut.new_edge(sink_player + 1, sink_player, buddy.bddtrue, acc) + sp.append(False) + sp.append(True) + spot.set_state_players(aut, sp) + return sink_player + + for s in range(aut.num_states()): + if not sp[s]: + continue + rem = buddy.bddtrue + for e in aut.out(s): + rem -= e.cond + if rem != buddy.bddfalse: + aut.new_edge(s, get_sink(), rem) + + # Better to invert colors or condition? + if opt == "buchi": + for e in aut.edges(): + if e.acc: + e.acc = spot.mark_t() + else: + e.acc = spot.mark_t([0]) + elif opt == "cobuchi": + aut.set_co_buchi() + else: + raise RuntimeError("Unknown opt") + return aut + +# Game where the edge_vector is larger +# than the number of transitions +f1 = "((((G (F (idle))) && (G (((idle) && (X ((! (grant_0)) \ + && (! (grant_1))))) -> (X (idle))))) && (G ((X (! (grant_0))) \ + || (X (((! (request_0)) && (! (idle))) U ((! (request_0)) \ + && (idle))))))) -> (((G (((((X (((! (grant_0)) && (true)) \ + || ((true) && (! (grant_1))))) && ((X (grant_0)) -> (request_0))) \ + && ((X (grant_1)) -> (request_1))) && ((request_0) -> (grant_1))) \ + && ((! (idle)) -> (X ((! (grant_0)) && (! (grant_1))))))) \ + && (! (F (G ((request_0) && (X (! (grant_0)))))))) \ + && (! (F (G ((request_1) && (X (! (grant_1)))))))))" +outs = ["grant_0", "grant1"] +tc.assertEqual(synt_ltlf(f1, outs)[0], False) \ No newline at end of file diff --git a/tests/python/games.ipynb b/tests/python/games.ipynb index 324aab546..9ec8bb76e 100644 --- a/tests/python/games.ipynb +++ b/tests/python/games.ipynb @@ -670,7 +670,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Input/Output\n", + "## Input/Output in HOA format\n", "\n", "An extension of the HOA format makes it possible to store the `state-player` property. This allows us to read the parity game constructed by `ltlsynt` using `spot.automaton()` like any other automaton." ] @@ -689,250 +689,215 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - "))))\n", - "[parity max odd 5]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", - "\n", - "\n", - "4\n", - "\n", - "4\n", - "\n", - "\n", - "\n", - "I->4\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "10\n", - "\n", - "10\n", - "\n", - "\n", - "\n", - "4->10\n", - "\n", - "\n", - "!a\n", - "\n", - "\n", - "\n", - "\n", - "11\n", - "\n", - "11\n", - "\n", - "\n", - "\n", - "4->11\n", - "\n", - "\n", - "a\n", - "\n", - "\n", "\n", - "\n", + "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "0->5\n", - "\n", - "\n", - "!a\n", - "\n", + "\n", + "\n", + "!a\n", "\n", "\n", - "\n", + "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "0->6\n", - "\n", - "\n", - "a\n", - "\n", - "\n", - "\n", - "\n", - "5->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "a\n", "\n", "\n", - "\n", + "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", - "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", "\n", - "6->1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "\n", - "1->6\n", - "\n", - "\n", - "a\n", - "\n", - "\n", - "\n", - "\n", - "7\n", - "\n", - "7\n", - "\n", - "\n", - "\n", - "1->7\n", - "\n", - "\n", - "!a\n", - "\n", - "\n", - "\n", - "\n", - "7->0\n", - "\n", - "\n", - "1\n", - "\n", + "5->3\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "!a\n", "\n", "\n", - "\n", + "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", - "\n", - "\n", - "2->8\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "7->1\n", + "\n", + "\n", + "1\n", "\n", "\n", - "\n", + "\n", "8->2\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", - "\n", - "\n", - "3\n", - "\n", - "3\n", + "\n", + "\n", + "2->7\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", - "\n", + "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", - "\n", + "\n", "3->9\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "\n", - "9->2\n", - "\n", - "\n", - "!b\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", - "\n", - "9->3\n", - "\n", - "\n", - "b\n", - "\n", - "\n", - "\n", "\n", - "10->0\n", - "\n", - "\n", - "!b\n", - "\n", + "9->3\n", + "\n", + "\n", + "b\n", + "\n", "\n", - "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", "\n", - "10->3\n", - "\n", - "\n", - "b\n", - "\n", + "9->4\n", + "\n", + "\n", + "!b\n", "\n", - "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "4->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", "\n", - "11->1\n", - "\n", - "\n", - "!b\n", - "\n", - "\n", - "\n", - "\n", - "11->3\n", - "\n", - "\n", - "b\n", - "\n", + "10->4\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f5c143ec630> >" + " *' at 0x7fcbe436f840> >" ] }, "execution_count": 8, @@ -964,46 +929,44 @@ "output_type": "stream", "text": [ "HOA: v1\n", - "States: 12\n", - "Start: 4\n", + "States: 11\n", + "Start: 0\n", "AP: 2 \"b\" \"a\"\n", - "acc-name: parity max odd 5\n", - "Acceptance: 5 Fin(4) & (Inf(3) | (Fin(2) & (Inf(1) | Fin(0))))\n", - "properties: trans-labels explicit-labels trans-acc colored complete\n", + "acc-name: co-Buchi\n", + "Acceptance: 1 Fin(0)\n", + "properties: trans-labels explicit-labels trans-acc complete\n", "properties: deterministic\n", - "spot-state-player: 0 0 0 0 0 1 1 1 1 1 1 1\n", + "spot-state-player: 0 0 0 0 0 1 1 1 1 1 1\n", "controllable-AP: 0\n", "--BODY--\n", "State: 0\n", - "[!1] 5 {1}\n", - "[1] 6 {1}\n", + "[!1] 5\n", + "[1] 6\n", "State: 1\n", - "[1] 6 {1}\n", - "[!1] 7 {1}\n", + "[!1] 7\n", + "[1] 8 {0}\n", "State: 2\n", - "[t] 8 {1}\n", + "[!1] 7\n", + "[1] 8 {0}\n", "State: 3\n", - "[t] 9 {1}\n", + "[t] 9\n", "State: 4\n", - "[!1] 10 {1}\n", - "[1] 11 {1}\n", + "[t] 10\n", "State: 5\n", - "[t] 0 {3}\n", + "[!0] 1\n", + "[0] 3\n", "State: 6\n", - "[t] 1 {4}\n", + "[!0] 2\n", + "[0] 3\n", "State: 7\n", - "[t] 0 {4}\n", + "[t] 1\n", "State: 8\n", - "[t] 2 {3}\n", + "[t] 2 {0}\n", "State: 9\n", - "[!0] 2 {3}\n", - "[0] 3 {4}\n", + "[0] 3 {0}\n", + "[!0] 4\n", "State: 10\n", - "[!0] 0 {3}\n", - "[0] 3 {3}\n", - "State: 11\n", - "[!0] 1 {3}\n", - "[0] 3 {3}\n", + "[t] 4\n", "--END--\n" ] } @@ -1053,250 +1016,215 @@ "\n", "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - "))))\n", - "[parity max odd 5]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", - "\n", - "\n", - "4\n", - "\n", - "4\n", - "\n", - "\n", - "\n", - "I->4\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "10\n", - "\n", - "10\n", - "\n", - "\n", - "\n", - "4->10\n", - "\n", - "\n", - "!a\n", - "\n", - "\n", - "\n", - "\n", - "11\n", - "\n", - "11\n", - "\n", - "\n", - "\n", - "4->11\n", - "\n", - "\n", - "a\n", - "\n", - "\n", "\n", - "\n", + "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "0->5\n", - "\n", - "\n", - "!a\n", - "\n", + "\n", + "\n", + "!a\n", "\n", "\n", - "\n", + "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "0->6\n", - "\n", - "\n", - "a\n", - "\n", - "\n", - "\n", - "\n", - "5->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "a\n", "\n", "\n", - "\n", + "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", - "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", "\n", - "6->1\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "\n", - "1->6\n", - "\n", - "\n", - "a\n", - "\n", - "\n", - "\n", - "\n", - "7\n", - "\n", - "7\n", - "\n", - "\n", - "\n", - "1->7\n", - "\n", - "\n", - "!a\n", - "\n", - "\n", - "\n", - "\n", - "7->0\n", - "\n", - "\n", - "1\n", - "\n", + "5->3\n", + "\n", + "\n", + "b\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "6->2\n", + "\n", + "\n", + "!b\n", + "\n", + "\n", + "\n", + "6->3\n", + "\n", + "\n", + "b\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "1->7\n", + "\n", + "\n", + "!a\n", "\n", "\n", - "\n", + "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", - "\n", - "\n", - "2->8\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1->8\n", + "\n", + "\n", + "a\n", + "\n", + "\n", + "\n", + "\n", + "7->1\n", + "\n", + "\n", + "1\n", "\n", "\n", - "\n", + "\n", "8->2\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", - "\n", - "\n", - "3\n", - "\n", - "3\n", + "\n", + "\n", + "2->7\n", + "\n", + "\n", + "!a\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "a\n", + "\n", "\n", "\n", - "\n", + "\n", "9\n", - "\n", - "9\n", + "\n", + "9\n", "\n", "\n", - "\n", + "\n", "3->9\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "\n", - "9->2\n", - "\n", - "\n", - "!b\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", - "\n", - "9->3\n", - "\n", - "\n", - "b\n", - "\n", - "\n", - "\n", "\n", - "10->0\n", - "\n", - "\n", - "!b\n", - "\n", + "9->3\n", + "\n", + "\n", + "b\n", + "\n", "\n", - "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", "\n", - "10->3\n", - "\n", - "\n", - "b\n", - "\n", + "9->4\n", + "\n", + "\n", + "!b\n", "\n", - "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "4->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", "\n", - "11->1\n", - "\n", - "\n", - "!b\n", - "\n", - "\n", - "\n", - "\n", - "11->3\n", - "\n", - "\n", - "b\n", - "\n", + "10->4\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7f5c143fdf30> >" + " *' at 0x7fcbe436e9a0> >" ] }, "execution_count": 11, @@ -1307,11 +1235,1576 @@ "source": [ "spot.highlight_strategy(game)" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Input/Output in PGSolver format\n", + "\n", + "The automaton parser is also able to parse the [PGSolver](https://github.com/tcsprojects/pgsolver) format. Here are two examples from the manual of PGSolver. The support for C-style comments is not part of the PGSolver format.\n", + "\n", + "Note that we use diamond node for player 1, while PGSolver use those of player 0. Also in Spot the acceptance condition is what Player 1 should satisfy; player 0 has two way to not satisfy it: leading to a rejecting cycle, or to a state without successor. In PGSolver, the graph is assumed to be total (i.e. each state has a successor), so player 0 can only win by reaching a rejecting cycle, which is equivalent to a `parity max even` acceptance." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | (Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))))))))\n", + "[parity max odd 9]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "Africa\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "Antarctica\n", + "\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "Asia\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "America\n", + "\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "Australia\n", + "\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ") | Fin(\n", + "\n", + ")\n", + "[Streett 1]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "\n", + "2->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "\n", + "5->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7->1\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "a,b = spot.automata(\"\"\"\n", + "parity 4; /* Example 6 in the manual for PGSolver 4.1 */\n", + "0 6 1 4,2 \"Africa\";\n", + "4 5 1 0 \"Antarctica\";\n", + "1 8 1 2,4,3 \"America\";\n", + "3 6 0 4,2 \"Australia\";\n", + "2 7 0 3,1,0,4 \"Asia\";\n", + "parity 8; /* Example 7 in the manual for PGSolver 4.1 */\n", + "0 0 0 1,2;\n", + "1 1 1 2,3;\n", + "2 0 0 3,4;\n", + "3 1 1 4,5;\n", + "4 0 0 5,6;\n", + "5 1 1 6,7;\n", + "6 0 0 7,0;\n", + "7 1 1 0,1;\n", + "\"\"\")\n", + "spot.solve_game(a)\n", + "spot.highlight_strategy(a)\n", + "spot.solve_game(b)\n", + "spot.highlight_strategy(b)\n", + "display(a.show('.g'), b.show('.g'))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To output a parity-game in PG-solver format, use `to_str('pg')`." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "parity 4;\n", + "0 6 1 4,2 \"Africa\";\n", + "2 7 0 3,1,0,4 \"Asia\";\n", + "4 5 1 0 \"Antarctica\";\n", + "1 8 1 2,4,3 \"America\";\n", + "3 6 0 4,2 \"Australia\";\n", + "parity 7;\n", + "0 0 0 1,2;\n", + "2 0 0 3,4;\n", + "4 0 0 5,6;\n", + "6 0 0 7,0;\n", + "7 1 1 0,1;\n", + "1 1 1 2,3;\n", + "3 1 1 4,5;\n", + "5 1 1 6,7;\n", + "\n" + ] + } + ], + "source": [ + "print(a.to_str('pg') + b.to_str('pg'))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Global vs local solver\n", + "\n", + "The parity game solver now supports \"local\" and global solutions.\n", + "\n", + "- \"Local\" solutions are the ones computed so far. A strategy is only computed for the part of the automaton that is rachable from the initial state\n", + "- Global solutions can now be obtained by setting the argument \"solve_globally\" to true. In this case a strategy will be computed even for states not reachable in the original automaton.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "7->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "8->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "10->11\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "12->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "12->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->12\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "15\n", + "\n", + "\n", + "\n", + "14->15\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "14->16\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "15->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "17->18\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "19\n", + "\n", + "19\n", + "\n", + "\n", + "\n", + "19->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20\n", + "\n", + "20\n", + "\n", + "\n", + "\n", + "19->20\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20->19\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcbe4382370> >" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "arena = spot.make_twa_graph()\n", + "\n", + "arena.new_states(3*7)\n", + "arena.set_buchi()\n", + "\n", + "edges = [(0,1), (0,2), (1,3), (2,3), (3,4), (4,0), (5,0), (5,6), (6,5)]\n", + "\n", + "for src, dst in edges:\n", + " arena.new_edge(src, dst, bddtrue, [0] if src == 4 else [])\n", + " arena.new_edge(src + 7, dst + 7, bddtrue, [0] if src == 4 else [])\n", + " arena.new_edge(src + 14, dst + 14, bddtrue, [0] if src == 6 else [])\n", + "\n", + "arena.set_state_players(3*[False, True, True, False, True, True, False])\n", + "arena" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(0, 7, 10, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "7->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "8->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "10->11\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "12->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "12->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->12\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "15\n", + "\n", + "\n", + "\n", + "14->15\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "14->16\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "15->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "17->18\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "19\n", + "\n", + "19\n", + "\n", + "\n", + "\n", + "19->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20\n", + "\n", + "20\n", + "\n", + "\n", + "\n", + "19->20\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20->19\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcbe4382370> >" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# 1) Solving the game locally\n", + "# Unreachable parts are ignored, all of them are \"won\" by the env,\n", + "# the associated strategy is the 0 edges indicating no strategy\n", + "spot.solve_parity_game(arena)\n", + "spot.highlight_strategy(arena)\n", + "print(arena.get_strategy())\n", + "arena" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(0, 7, 10, 0, 16, 19, 0, 0, 8, 11, 0, 17, 20, 0, 3, 0, 0, 15, 0, 24, 0)\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "4->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "5->0\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "5->6\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "6->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "7->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "7->9\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "8->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9->10\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "10->11\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "11->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "12->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "12->13\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "13->12\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "15\n", + "\n", + "15\n", + "\n", + "\n", + "\n", + "14->15\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16\n", + "\n", + "16\n", + "\n", + "\n", + "\n", + "14->16\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "17\n", + "\n", + "17\n", + "\n", + "\n", + "\n", + "15->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "16->17\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "17->18\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "18->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "19\n", + "\n", + "19\n", + "\n", + "\n", + "\n", + "19->14\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20\n", + "\n", + "20\n", + "\n", + "\n", + "\n", + "19->20\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "20->19\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7fcbe4382370> >" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# 1) Solving the game globally\n", + "# The whole automaton is considered in this case\n", + "spot.solve_parity_game(arena, True)\n", + "spot.highlight_strategy(arena)\n", + "print(arena.get_strategy())\n", + "arena" + ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -1325,7 +2818,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.10.7" } }, "nbformat": 4, diff --git a/tests/python/gen.py b/tests/python/gen.py index dd844741c..a9fed6890 100644 --- a/tests/python/gen.py +++ b/tests/python/gen.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017 Laboratoire de Recherche et Développement de +# Copyright (C) 2017, 2022 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -23,63 +23,66 @@ import spot.gen as gen from sys import exit +from unittest import TestCase +tc = TestCase() k2 = gen.aut_pattern(gen.AUT_KS_NCA, 2) -assert k2.prop_state_acc() -assert k2.num_states() == 5 -assert k2.prop_universal().is_false() -assert k2.prop_inherently_weak().is_false() -assert k2.prop_stutter_invariant().is_false() -assert k2.prop_semi_deterministic().is_false() -assert k2.prop_deterministic().is_false() -assert k2.prop_terminal().is_false() +tc.assertTrue(k2.prop_state_acc()) +tc.assertEqual(k2.num_states(), 5) +tc.assertTrue(k2.prop_universal().is_false()) +tc.assertTrue(k2.prop_inherently_weak().is_false()) +tc.assertTrue(k2.prop_stutter_invariant().is_false()) +tc.assertTrue(k2.prop_semi_deterministic().is_false()) +tc.assertTrue(k2.prop_deterministic().is_false()) +tc.assertTrue(k2.prop_terminal().is_false()) # to_str is defined in the spot package, so this makes sure # the type returned by spot.gen.ks_nca() is the correct one. -assert 'to_str' in dir(k2) +tc.assertIn('to_str', dir(k2)) k3 = gen.aut_pattern(gen.AUT_L_NBA, 3) -assert k3.num_states() == 10 -assert k3.prop_state_acc() -assert k3.prop_universal().is_false() -assert k3.prop_inherently_weak().is_false() -assert k3.prop_stutter_invariant().is_false() -assert k3.prop_semi_deterministic().is_false() -assert k3.prop_deterministic().is_false() -assert k3.prop_terminal().is_false() +tc.assertEqual(k3.num_states(), 10) +tc.assertTrue(k3.prop_state_acc()) +tc.assertTrue(k3.prop_universal().is_false()) +tc.assertTrue(k3.prop_inherently_weak().is_false()) +tc.assertTrue(k3.prop_stutter_invariant().is_false()) +tc.assertTrue(k3.prop_semi_deterministic().is_false()) +tc.assertTrue(k3.prop_deterministic().is_false()) +tc.assertTrue(k3.prop_terminal().is_false()) -assert k2.get_dict() == k3.get_dict() +tc.assertEqual(k2.get_dict(), k3.get_dict()) try: gen.aut_pattern(gen.AUT_KS_NCA, 0) except RuntimeError as e: - assert 'positive argument' in str(e) + tc.assertIn('positive argument', str(e)) else: exit(2) f = gen.ltl_pattern(gen.LTL_AND_F, 3) -assert f.size() == 3 -assert gen.ltl_pattern_name(gen.LTL_AND_F) == "and-f" +tc.assertEqual(f.size(), 3) +tc.assertEqual(gen.ltl_pattern_name(gen.LTL_AND_F), "and-f") try: gen.ltl_pattern(1000, 3) except RuntimeError as e: - assert 'unsupported pattern' in str(e) + tc.assertIn('unsupported pattern', str(e)) else: exit(2) try: gen.ltl_pattern(gen.LTL_OR_G, -10) except RuntimeError as e: - assert 'or-g' in str(e) - assert 'positive' in str(e) + tc.assertIn('or-g', str(e)) + tc.assertIn('positive', str(e)) else: exit(2) -assert 40 == sum(p.size() for p in gen.ltl_patterns((gen.LTL_OR_G, 1, 5), - (gen.LTL_GH_Q, 3), - gen.LTL_EH_PATTERNS)) +tc.assertEqual(40, sum(p.size() + for p in gen.ltl_patterns((gen.LTL_OR_G, 1, 5), + (gen.LTL_GH_Q, 3), + gen.LTL_EH_PATTERNS))) -assert 32 == sum(p.num_states() - for p in gen.aut_patterns((gen.AUT_L_NBA, 1, 3), - (gen.AUT_KS_NCA, 5))) +tc.assertEqual(32, sum(p.num_states() + for p in gen.aut_patterns((gen.AUT_L_NBA, 1, 3), + (gen.AUT_KS_NCA, 5)))) diff --git a/tests/python/genem.py b/tests/python/genem.py index 5da9ce85c..970fe705b 100644 --- a/tests/python/genem.py +++ b/tests/python/genem.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018-2022 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2018-2023 Laboratoire de Recherche et Développement de +# l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -22,6 +22,8 @@ # are usable with methods from the spot package. import spot +from unittest import TestCase +tc = TestCase() a1 = spot.automaton(''' HOA: v1 name: "aut" States: 4 Start: 0 AP: 0 @@ -179,7 +181,7 @@ def generic_emptiness2_rec(aut): # Find some Fin set, we necessarily have one, otherwise the SCC # would have been found to be either rejecting or accepting. fo = acc.fin_one() - assert fo >= 0, acc + tc.assertTrue(fo >= 0, acc) for part in si.split_on_sets(scc, [fo]): if not generic_emptiness2(part): return False @@ -303,16 +305,22 @@ def run_bench(automata): res3c = spot.generic_emptiness_check(aut) spot.generic_emptiness_check_select_version("spot210") res3d = spot.generic_emptiness_check(aut) + spot.generic_emptiness_check_select_version("spot211") + res3e = spot.generic_emptiness_check(aut) + spot.generic_emptiness_check_select_version("spot212") + res3f = spot.generic_emptiness_check(aut) + spot.generic_emptiness_check_select_version("spot29") res2 = spot.remove_fin(aut).is_empty() res1 = generic_emptiness2(aut) res = (str(res1)[0] + str(res2)[0] + str(res3a)[0] + str(res3b)[0] + str(res3c)[0] + str(res3d)[0] - + str(res4)[0] + str(res5)[0]) + + str(res3e)[0] + str(res3f)[0] + str(res4)[0] + + str(res5)[0]) print(res) - assert res in ('TTTTTTTT', 'FFFFFFFF') - if res == 'FFFFFFFF': + tc.assertIn(res, ('TTTTTTTTTT', 'FFFFFFFFFF')) + if res == 'FFFFFFFFFF': run3 = spot.generic_accepting_run(aut) - assert run3.replay(spot.get_cout()) is True + tc.assertTrue(run3.replay(spot.get_cout())) run_bench([a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a360, act]) diff --git a/tests/python/implies.py b/tests/python/implies.py index 2e4e64ddd..24d74b720 100755 --- a/tests/python/implies.py +++ b/tests/python/implies.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2012 Laboratoire de Recherche et Développement +# Copyright (C) 2012, 2022 Laboratoire de Recherche et Développement # de l'EPITA. # # This file is part of Spot, a model checking library. @@ -19,6 +19,8 @@ import sys from buddy import * +from unittest import TestCase +tc = TestCase() bdd_init(10000, 10000) bdd_setvarnum(5) @@ -33,26 +35,26 @@ e = V[1] & V[2] & -V[3] & V[4] f = V[0] & -V[3] & V[4] g = -V[0] | V[1] -assert(bdd_implies(b, a)) -assert(not bdd_implies(a, b)) -assert(not bdd_implies(c, a)) -assert(bdd_implies(a, d)) -assert(bdd_implies(b, d)) -assert(bdd_implies(c, d)) -assert(bdd_implies(d, d)) -assert(not bdd_implies(e, d)) -assert(not bdd_implies(d, e)) -assert(not bdd_implies(f, e)) -assert(not bdd_implies(e, f)) -assert(bdd_implies(bddfalse, f)) -assert(not bdd_implies(bddtrue, f)) -assert(bdd_implies(f, bddtrue)) -assert(not bdd_implies(f, bddfalse)) -assert(bdd_implies(a, g)) +tc.assertTrue(bdd_implies(b, a)) +tc.assertFalse(bdd_implies(a, b)) +tc.assertFalse(bdd_implies(c, a)) +tc.assertTrue(bdd_implies(a, d)) +tc.assertTrue(bdd_implies(b, d)) +tc.assertTrue(bdd_implies(c, d)) +tc.assertTrue(bdd_implies(d, d)) +tc.assertFalse(bdd_implies(e, d)) +tc.assertFalse(bdd_implies(d, e)) +tc.assertFalse(bdd_implies(f, e)) +tc.assertFalse(bdd_implies(e, f)) +tc.assertTrue(bdd_implies(bddfalse, f)) +tc.assertFalse(bdd_implies(bddtrue, f)) +tc.assertTrue(bdd_implies(f, bddtrue)) +tc.assertFalse(bdd_implies(f, bddfalse)) +tc.assertTrue(bdd_implies(a, g)) a = (-V[2] & (-V[1] | V[0])) | (-V[0] & V[1] & V[2]) b = V[1] | -V[2] -assert(bdd_implies(a, b)) +tc.assertTrue(bdd_implies(a, b)) # Cleanup all BDD variables before calling bdd_done(), otherwise # bdd_delref will be called after bdd_done() and this is unsafe in diff --git a/tests/python/intrun.py b/tests/python/intrun.py index c86c6d643..02a7aedd6 100644 --- a/tests/python/intrun.py +++ b/tests/python/intrun.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2020, 2022, 2023 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() # This issue was reported by Florian Renkin. The reduce() call used in # intersecting_run() was bogus, and could incorrectly reduce a word @@ -34,5 +36,43 @@ trans-labels explicit-labels trans-acc complete properties: deterministic State: 3 [t] 1 {1 2} State: 4 [!0&1] 4 {2} [!0&!1] 3 {2} [0] 2 {0 2} --END--""") r = b.intersecting_run(spot.complement(a)); c = spot.twa_word(r).as_automaton() -assert c.intersects(b) -assert not c.intersects(a) +tc.assertTrue(c.intersects(b)) +tc.assertFalse(c.intersects(a)) + +# The next test came from Philipp Schlehuber-Caissier: running +# as_twa() on a run built from a A.intersecting_run(B) failed to build +# the automaton because it tried to rebuild the run on A and did not +# find transitions matching exactly. Additionally the idea of merging +# states in as_twa() seems to be a way to create some disasters, so we +# removed that too. +a = spot.translate("a"); +b = spot.translate("{a;1;a}"); +r = a.intersecting_run(b) +tc.assertEqual(str(r), """Prefix: + 1 + | a + 0 + | 1 {0} + 0 + | a {0} +Cycle: + 0 + | 1 {0} +""") +tc.assertEqual(r.as_twa().to_str(), """HOA: v1 +States: 4 +Start: 0 +AP: 1 "a" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels state-acc deterministic +--BODY-- +State: 0 +[0] 1 +State: 1 {0} +[t] 2 +State: 2 {0} +[0] 3 +State: 3 {0} +[t] 3 +--END--""") diff --git a/tests/python/ipnbdoctest.py b/tests/python/ipnbdoctest.py index 18da81cf8..47b73f901 100755 --- a/tests/python/ipnbdoctest.py +++ b/tests/python/ipnbdoctest.py @@ -101,6 +101,11 @@ def canonicalize(s, type, ignores): # %%file writes `Writing`, or `Overwriting` if the file exists. s = re.sub(r'^Overwriting ', 'Writing ', s) + # Swig 4.1.0 fixed an ordering issue with how types are printed. + # aig_ptr is expected to be printed as shared_ptr, but prior + # Swig version did not do that. + s = re.sub(r'spot::aig_ptr ', 'std::shared_ptr< spot::aig > ', s) + # SVG generated by graphviz may put note at different positions # depending on the graphviz build. Let's just strip anything that # look like a position. @@ -143,6 +148,10 @@ def canonicalize(s, type, ignores): # timing result we cannot compare between runs. s = re.sub(r'', '
', s, flags=re.DOTALL) + # Table that contains premin_time are log from the mealy minimization. + # They contain timing result so we cannot compare between runs. + s = re.sub(r'', '
', + s, flags=re.DOTALL) for n, p in enumerate(ignores): s = re.sub(p, 'IGN{}'.format(n), s) @@ -360,7 +369,8 @@ if __name__ == '__main__': except RuntimeError as e: # If the Kernel dies, try again. It seems we have spurious # failures when multiple instances of jupyter start in parallel. - if 'Kernel died' in str(e): + stre = str(e) + if 'Kernel died' in stre or "Kernel didn't respond" in stre: tries -= 1 if tries: s = random.randint(1, 5) diff --git a/tests/python/kripke.py b/tests/python/kripke.py index f3ce218b2..3670f592d 100644 --- a/tests/python/kripke.py +++ b/tests/python/kripke.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2019 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2019, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -19,6 +19,9 @@ import spot import buddy +from unittest import TestCase +tc = TestCase() + bdict = spot.make_bdd_dict() k = spot.make_kripke_graph(bdict) p1 = buddy.bdd_ithvar(k.register_ap("p1")) @@ -26,50 +29,86 @@ p2 = buddy.bdd_ithvar(k.register_ap("p2")) cond1 = p1 & p2 cond2 = p1 & -p2 cond3 = -p1 & -p2 -s2 = k.new_state(cond1) +s0 = k.new_state(cond1) s1 = k.new_state(cond2) -s3 = k.new_state(cond3) +s2 = k.new_state(cond3) +k.new_edge(s1, s0) +k.new_edge(s0, s0) k.new_edge(s1, s2) k.new_edge(s2, s2) -k.new_edge(s1, s3) -k.new_edge(s3, s3) -k.new_edge(s3, s2) +k.new_edge(s2, s0) k.set_init_state(s1) hoa = """HOA: v1 States: 3 -Start: 0 +Start: 1 AP: 2 "p1" "p2" acc-name: all Acceptance: 0 t properties: state-labels explicit-labels state-acc --BODY-- -State: [0&!1] 0 "1" -1 2 -State: [0&1] 1 "0" -1 -State: [!0&!1] 2 "2" -2 1 +State: [0&1] 0 +0 +State: [0&!1] 1 +0 2 +State: [!0&!1] 2 +2 0 --END--""" -assert hoa == k.to_str('HOA') -assert k.num_states() == 3 -assert k.num_edges() == 5 +tc.assertEqual(hoa, k.to_str('HOA')) +tc.assertEqual(k.num_states(), 3) +tc.assertEqual(k.num_edges(), 5) + +k.set_state_names(["s0", "s1", "s2"]) +hoa = """HOA: v1 +States: 3 +Start: 1 +AP: 2 "p1" "p2" +acc-name: all +Acceptance: 0 t +properties: state-labels explicit-labels state-acc +--BODY-- +State: [0&1] 0 "s0" +0 +State: [0&!1] 1 "s1" +0 2 +State: [!0&!1] 2 "s2" +2 0 +--END--""" +tc.assertEqual(hoa, k.to_str('HOA')) + +k.set_state_names(["s0", "s1", "s2"]) +hoa = """HOA: v1 +States: 3 +Start: 1 +AP: 2 "p1" "p2" +acc-name: all +Acceptance: 0 t +properties: state-labels explicit-labels state-acc +--BODY-- +State: [0&1] 0 "s0" +0 +State: [0&!1] 1 "s1" +0 2 +State: [!0&!1] 2 "s2" +2 0 +--END--""" +tc.assertEqual(hoa, k.to_str('HOA')) res = [] for e in k.out(s1): res.append((e.src, e.dst)) -assert res == [(1, 0), (1, 2)] +tc.assertEqual(res, [(1, 0), (1, 2)]) res = [] for e in k.edges(): res.append((e.src, e.dst)) -assert res == [(1, 0), (0, 0), (1, 2), (2, 2), (2, 0)] +tc.assertEqual(res, [(1, 0), (0, 0), (1, 2), (2, 2), (2, 0)]) res = [] for s in k.states(): res.append(s.cond()) -assert res == [cond1, cond2, cond3] +tc.assertEqual(res, [cond1, cond2, cond3]) -assert k.states()[0].cond() == cond1 -assert k.states()[1].cond() == cond2 -assert k.states()[2].cond() == cond3 +tc.assertEqual(k.states()[0].cond(), cond1) +tc.assertEqual(k.states()[1].cond(), cond2) +tc.assertEqual(k.states()[2].cond(), cond3) diff --git a/tests/python/langmap.py b/tests/python/langmap.py index 6fd860986..723a5c0d5 100644 --- a/tests/python/langmap.py +++ b/tests/python/langmap.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright (C) 2016, 2017, 2020 Laboratoire de Recherche et Développement -# de l'Epita (LRDE) +# Copyright (C) 2016, 2017, 2020, 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE) # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ import spot import sys +from unittest import TestCase +tc = TestCase() def hstates(txt): @@ -31,13 +33,10 @@ def hstates(txt): def test(f, opt, expected): aut = spot.translate(f, *opt, 'deterministic') v = spot.language_map(aut) - assert len(v) == aut.num_states() + tc.assertEqual(len(v), aut.num_states()) spot.highlight_languages(aut) l = hstates(aut.to_str('hoa', '1.1')) - if l != expected: - print('for {}\nexpected: {}\n but got: {}'.format(f, expected, l), - file=sys.stderr) - exit(1) + tc.assertEqual(l, expected) test('GF(a) & GFb & c', ['Buchi', 'SBAcc'], '1 0 2 0 3 0') @@ -50,6 +49,6 @@ test('Xa', ['Buchi', 'SBAcc'], '') try: test('FGa', ['Buchi'], '') except RuntimeError as e: - assert 'language_map only works with deterministic automata'in str(e) + tc.assertIn('language_map only works with deterministic automata', str(e)) else: exit(1) diff --git a/tests/python/ltl2tgba.py b/tests/python/ltl2tgba.py index 25fff4566..913c557be 100755 --- a/tests/python/ltl2tgba.py +++ b/tests/python/ltl2tgba.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2012, 2014-2016, 2021 Laboratoire de Recherche -# et Développement de l'Epita (LRDE). +# Copyright (C) 2009, 2010, 2012, 2014-2016, 2021-2022 Laboratoire de +# Recherche et Développement de l'Epita (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre # et Marie Curie. @@ -98,7 +98,7 @@ if f: elif taa_opt: a = concrete = spot.ltl_to_taa(f, dict) else: - assert "unspecified translator" + raise RuntimeError("unspecified translator") if wdba: a = spot.ensure_digraph(a) @@ -117,7 +117,7 @@ if f: elif output == 6: spot.print_lbtt(cout, a) else: - assert "unknown output option" + raise RuntimeError("unknown output option") if degeneralize_opt: del degeneralized @@ -137,4 +137,6 @@ del dict # not necessary in other implementations. from platform import python_implementation if python_implementation() == 'CPython': - assert spot.fnode_instances_check() + from unittest import TestCase + tc = TestCase() + tc.assertTrue(spot.fnode_instances_check()) diff --git a/tests/python/ltlf.py b/tests/python/ltlf.py index 5676a2a1b..b13432d3e 100644 --- a/tests/python/ltlf.py +++ b/tests/python/ltlf.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016 Laboratoire de Recherche et Développement de +# Copyright (C) 2016, 2022 Laboratoire de Recherche et Développement de # l'Epita # # This file is part of Spot, a model checking library. @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() lcc = spot.language_containment_checker() @@ -43,5 +45,5 @@ for f in formulas: f4 = spot.formula_And([spot.from_ltlf(f2), cst]) print("{}\t=>\t{}".format(f1, f3)) print("{}\t=>\t{}".format(f2, f4)) - assert lcc.equal(f3, f4) + tc.assertTrue(lcc.equal(f3, f4)) print() diff --git a/tests/python/ltlparse.py b/tests/python/ltlparse.py index 98562743c..208e0c321 100755 --- a/tests/python/ltlparse.py +++ b/tests/python/ltlparse.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2009-2012, 2014-2017, 2019, 2021 Laboratoire de Recherche et -# Développement de l'Epita (LRDE). +# Copyright (C) 2009-2012, 2014-2017, 2019, 2021-2022 Laboratoire de +# Recherche et Développement de l'Epita (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre # et Marie Curie. @@ -22,6 +22,8 @@ import sys import spot +from unittest import TestCase +tc = TestCase() e = spot.default_environment.instance() @@ -41,11 +43,11 @@ for str1, isl in l: pf = spot.parse_infix_psl(str2, e) if pf.format_errors(spot.get_cout()): sys.exit(1) - assert isl == pf.f.is_leaf() + tc.assertEqual(isl, pf.f.is_leaf()) del pf -assert spot.formula('a').is_leaf() -assert spot.formula('0').is_leaf() +tc.assertTrue(spot.formula('a').is_leaf()) +tc.assertTrue(spot.formula('0').is_leaf()) for str1 in ['a * b', 'a xor b', 'a <-> b']: pf = spot.parse_infix_boolean(str1, e, False) @@ -66,21 +68,21 @@ for (x, op) in [('a* <-> b*', "`<->'"), ('a*[=2]', "[=...]"), ('a*[->2]', "[->...]")]: f5 = spot.parse_infix_sere(x) - assert f5.errors + tc.assertTrue(f5.errors) ostr = spot.ostringstream() f5.format_errors(ostr) err = ostr.str() - assert "not a Boolean expression" in err - assert op in err - assert "SERE" in err + tc.assertIn("not a Boolean expression", err) + tc.assertIn(op, err) + tc.assertIn("SERE", err) del f5 f6 = spot.parse_infix_sere('(a <-> b -> c ^ "b\n\n\rc")[=2] & c[->2]') -assert not f6.errors +tc.assertFalse(f6.errors) del f6 f6 = spot.parse_infix_sere('-') -assert f6.errors +tc.assertTrue(f6.errors) del f6 for (x, msg) in [('{foo[->bug]}', "treating this goto block as [->]"), @@ -150,12 +152,12 @@ for (x, msg) in [('{foo[->bug]}', "treating this goto block as [->]"), ('{"X}', "missing closing brace"), ]: f7 = spot.parse_infix_psl(x) - assert f7.errors + tc.assertTrue(f7.errors) ostr = spot.ostringstream() f7.format_errors(ostr) err = ostr.str() print(err) - assert msg in err + tc.assertIn(msg, err) del f7 for (x, msg) in [('a&', "missing right operand for \"and operator\""), @@ -174,12 +176,12 @@ for (x, msg) in [('a&', "missing right operand for \"and operator\""), ('!', "missing right operand for \"not operator\""), ]: f8 = spot.parse_infix_boolean(x) - assert f8.errors + tc.assertTrue(f8.errors) ostr = spot.ostringstream() f8.format_errors(ostr) err = ostr.str() print(err) - assert msg in err + tc.assertIn(msg, err) del f8 for (x, msg) in [('a->', "missing right operand for \"implication operator\""), @@ -191,12 +193,12 @@ for (x, msg) in [('a->', "missing right operand for \"implication operator\""), ]: f9 = spot.parse_infix_psl(x, spot.default_environment.instance(), False, True) - assert f9.errors + tc.assertTrue(f9.errors) ostr = spot.ostringstream() f9.format_errors(ostr) err = ostr.str() print(err) - assert msg in err + tc.assertIn(msg, err) del f9 # force GC before fnode_instances_check(), unless it's CPython @@ -205,15 +207,15 @@ if python_implementation() != 'CPython': import gc gc.collect() -assert spot.fnode_instances_check() +tc.assertTrue(spot.fnode_instances_check()) f = spot.formula_F(2, 4, spot.formula_ap("a")) -assert f == spot.formula("XX(a | X(a | X(a)))") +tc.assertEqual(f, spot.formula("XX(a | X(a | X(a)))")) f = spot.formula_G(2, 4, spot.formula_ap("a")) -assert f == spot.formula("XX(a & X(a & X(a)))") +tc.assertEqual(f, spot.formula("XX(a & X(a & X(a)))")) f = spot.formula_X(2, spot.formula_ap("a")) -assert f == spot.formula("XX(a)") +tc.assertEqual(f, spot.formula("XX(a)")) f = spot.formula_G(2, spot.formula_unbounded(), spot.formula_ap("a")) -assert f == spot.formula("XXG(a)") +tc.assertEqual(f, spot.formula("XXG(a)")) f = spot.formula_F(2, spot.formula_unbounded(), spot.formula_ap("a")) -assert f == spot.formula("XXF(a)") +tc.assertEqual(f, spot.formula("XXF(a)")) diff --git a/tests/python/ltlsimple.py b/tests/python/ltlsimple.py index 7b88f07dc..c21c3b7f1 100755 --- a/tests/python/ltlsimple.py +++ b/tests/python/ltlsimple.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2009, 2010, 2012, 2015, 2018, 2021 Laboratoire de +# Copyright (C) 2009, 2010, 2012, 2015, 2018, 2021-2022 Laboratoire de # Recherche et Développement de l'Epita (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systemes Répartis Coopératifs (SRC), Université Pierre @@ -22,6 +22,8 @@ import spot import sys +from unittest import TestCase +tc = TestCase() # Some of the tests here assume timely destructor calls, as they occur # in the the reference-counted CPython implementation. Other @@ -35,13 +37,13 @@ b = spot.formula.ap('b') c = spot.formula.ap('c') c2 = spot.formula.ap('c') -assert c == c2 +tc.assertEqual(c, c2) op = spot.formula.And([a, b]) op2 = spot.formula.And([op, c]) op3 = spot.formula.And([a, c, b]) -assert op2 == op3 +tc.assertEqual(op2, op3) # The symbol for a subformula which hasn't been cloned is better # suppressed, so we don't attempt to reuse it elsewhere. @@ -52,12 +54,12 @@ sys.stdout.write('op2 = %s\n' % str(op2)) del a, b, c2 sys.stdout.write('op3 = %s\n' % str(op3)) -assert op2 == op3 +tc.assertEqual(op2, op3) op4 = spot.formula.Or([op2, op3]) sys.stdout.write('op4 = %s\n' % str(op4)) -assert op4 == op2 +tc.assertEqual(op4, op2) del op2, op3, op4 @@ -78,10 +80,11 @@ f5 = spot.formula.Xor(F, c) del a, b, c, T, F, f1, f2, f4, f5 if is_cpython: - assert spot.fnode_instances_check() + tc.assertTrue(spot.fnode_instances_check()) # ---------------------------------------------------------------------- -assert str([str(x) for x in spot.formula('a &b & c')]) == "['a', 'b', 'c']" +tc.assertEqual(str([str(x) for x in spot.formula('a &b & c')]), + "['a', 'b', 'c']") def switch_g_f(x): @@ -93,7 +96,7 @@ def switch_g_f(x): f = spot.formula('GFa & XFGb & Fc & G(a | b | Fd)') -assert str(switch_g_f(f)) == 'FGa & XGFb & Gc & F(a | b | Gd)' +tc.assertEqual(str(switch_g_f(f)), 'FGa & XGFb & Gc & F(a | b | Gd)') x = 0 @@ -105,7 +108,7 @@ def count_g(f): f.traverse(count_g) -assert x == 3 +tc.assertEqual(x, 3) # ---------------------------------------------------------------------- @@ -121,14 +124,14 @@ LBT for shell: echo {f:lq} | ... Default for CSV: ...,{f:c},... Wring, centered: {f:w:~^50}""".format(f=formula) -assert res == """\ +tc.assertEqual(res, """\ Default output: a U (b U "$strange[0]=name") Spin syntax: a U (b U ($strange[0]=name)) (Spin syntax): (a) U ((b) U ($strange[0]=name)) Default for shell: echo 'a U (b U "$strange[0]=name")' | ... LBT for shell: echo 'U "a" U "b" "$strange[0]=name"' | ... Default for CSV: ...,"a U (b U ""$strange[0]=name"")",... -Wring, centered: ~~~~~(a=1) U ((b=1) U ("$strange[0]=name"=1))~~~~~""" +Wring, centered: ~~~~~(a=1) U ((b=1) U ("$strange[0]=name"=1))~~~~~""") opt = spot.tl_simplifier_options(False, True, True, @@ -144,9 +147,8 @@ for (input, output) in [('(a&b)<->b', 'b->(a&b)'), ('b xor (!(a&b))', 'b->(a&b)'), ('!b xor (a&b)', 'b->(a&b)')]: f = spot.tl_simplifier(opt).simplify(input) - print(input, f, output) - assert(f == output) - assert(spot.are_equivalent(input, output)) + tc.assertEqual(f, output) + tc.assertTrue(spot.are_equivalent(input, output)) def myparse(input): @@ -157,7 +159,7 @@ def myparse(input): # This used to fail, because myparse would return a pointer # to pf.f inside the destroyed pf. -assert myparse('a U b') == spot.formula('a U b') +tc.assertEqual(myparse('a U b'), spot.formula('a U b')) -assert spot.is_liveness('a <-> GFb') -assert not spot.is_liveness('a & GFb') +tc.assertTrue(spot.is_liveness('a <-> GFb')) +tc.assertFalse(spot.is_liveness('a & GFb')) diff --git a/tests/python/ltsmin-pml.ipynb b/tests/python/ltsmin-pml.ipynb index 120ab11f5..5d25b207f 100644 --- a/tests/python/ltsmin-pml.ipynb +++ b/tests/python/ltsmin-pml.ipynb @@ -40,8 +40,8 @@ "SpinS Promela Compiler - version 1.1 (3-Feb-2015)\n", "(C) University of Twente, Formal Methods and Tools group\n", "\n", - "Parsing tmprn9_nun3.pml...\n", - "Parsing tmprn9_nun3.pml done (0.1 sec)\n", + "Parsing tmpwot5yb9c.pml...\n", + "Parsing tmpwot5yb9c.pml done (0.0 sec)\n", "\n", "Optimizing graphs...\n", " StateMerging changed 0 states/transitions.\n", @@ -84,8 +84,8 @@ " Found 2 / 2 (100.0%) Commuting actions \n", "Generating guard dependency matrices done (0.0 sec)\n", "\n", - "Written C code to /home/adl/git/spot/tests/python/tmprn9_nun3.pml.spins.c\n", - "Compiled C code to PINS library tmprn9_nun3.pml.spins\n", + "Written C code to /home/adl/git/spot/tests/python/tmpwot5yb9c.pml.spins.c\n", + "Compiled C code to PINS library tmpwot5yb9c.pml.spins\n", "\n" ] } @@ -419,7 +419,7 @@ "\n" ], "text/plain": [ - " *' at 0x7fb7d8049450> >" + " *' at 0x7f7f9849ee20> >" ] }, "execution_count": 4, @@ -1120,6 +1120,33 @@ "k.show('.1K')" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Since a kripke structure is a `twa`, can be used on the right-hand side of `contains`. Here we show that every path of `k` contains a step where `P_0.a < 2`." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "spot.contains('F\"P_0.a < 2\"', k)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -1132,7 +1159,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -1141,7 +1168,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "metadata": {}, "outputs": [ { @@ -1173,7 +1200,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -1239,7 +1266,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -1251,7 +1278,7 @@ " P_0.b: int" ] }, - "execution_count": 10, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -1262,7 +1289,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ @@ -1286,7 +1313,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.2" + "version": "3.10.5" } }, "nbformat": 4, diff --git a/tests/python/mealy.py b/tests/python/mealy.py index da71d1bfb..7f6070146 100644 --- a/tests/python/mealy.py +++ b/tests/python/mealy.py @@ -19,6 +19,8 @@ # along with this program. If not, see . import spot, buddy +from unittest import TestCase +tc = TestCase() # Testing Sat-based approach @@ -42,8 +44,8 @@ spot.set_state_players(a, [False,True,False,True,False,True]) spot.set_synthesis_outputs(a, o1&o2) b = spot.minimize_mealy(a) -assert(list(spot.get_state_players(b)).count(False) == 2) -assert(spot.is_split_mealy_specialization(a, b)) +tc.assertEqual(list(spot.get_state_players(b)).count(False), 2) +tc.assertTrue(spot.is_split_mealy_specialization(a, b)) test_auts = [ ("""HOA: v1 @@ -371,21 +373,21 @@ for (mealy_str, nenv_min) in test_auts: elif aap.ap_name().startswith("i"): ins = ins & buddy.bdd_ithvar(mealy.register_ap(aap.ap_name())) else: - assert("""Aps must start with either "i" or "o".""") + raise AssertionError("""Aps must start with either "i" or "o".""") spot.set_synthesis_outputs(mealy, outs) mealy_min_ks = spot.minimize_mealy(mealy, -1) n_e = sum([s == 0 for s in spot.get_state_players(mealy_min_ks)]) - assert(n_e == nenv_min) - assert(spot.is_split_mealy_specialization(mealy, mealy_min_ks)) + tc.assertEqual(n_e, nenv_min) + tc.assertTrue(spot.is_split_mealy_specialization(mealy, mealy_min_ks)) # Test un- and resplit tmp = spot.unsplit_2step(mealy_min_ks) mealy_min_rs = spot.split_2step(tmp, spot.get_synthesis_outputs(tmp), False) - assert(spot.is_split_mealy_specialization(mealy, mealy_min_rs, True)) - assert(spot.are_equivalent(mealy_min_ks, mealy_min_rs)) + tc.assertTrue(spot.is_split_mealy_specialization(mealy, mealy_min_rs, True)) + tc.assertTrue(spot.are_equivalent(mealy_min_ks, mealy_min_rs)) # Testing bisimulation (with output assignment) @@ -515,15 +517,15 @@ spot.set_synthesis_outputs(aut, & buddy.bdd_ithvar( aut.register_ap("u02alarm29control0f1d2alarm29turn2off1b"))) min_equiv = spot.reduce_mealy(aut, False) -assert min_equiv.num_states() == 6 -assert spot.are_equivalent(min_equiv, aut) +tc.assertEqual(min_equiv.num_states(), 6) +tc.assertTrue(spot.are_equivalent(min_equiv, aut)) # Build an automaton that recognizes a subset of the language of the original # automaton min_sub = spot.reduce_mealy(aut, True) -assert min_sub.num_states() == 5 +tc.assertEqual(min_sub.num_states(), 5) prod = spot.product(spot.complement(aut), min_sub) -assert spot.generic_emptiness_check(prod) +tc.assertTrue(spot.generic_emptiness_check(prod)) aut = spot.automaton(""" HOA: v1 @@ -564,7 +566,7 @@ State: 0 # An example that shows that we should not build a tree when we use inclusion. res = spot.reduce_mealy(aut, True) -assert res.to_str() == exp +tc.assertEqual(res.to_str(), exp) aut = spot.automaton(""" HOA: v1 @@ -608,4 +610,71 @@ State: 1 --END--""" res = spot.reduce_mealy(aut, True) -assert res.to_str() == exp +tc.assertEqual(res.to_str(), exp) + +exp = """digraph "" { + rankdir=LR + node [shape="circle"] + I [label="", style=invis, width=0] + I -> 0 + 0 [label="0"] + 0 -> 1 [label=""] + 0 -> 1 [label=""] + 0 -> 1 [label=""] + 1 [label="1"] + 1 -> 1 [label=""] +} +""" +tc.assertEqual(res.to_str("dot", "g"), exp) + +# assertion bug: original machine is not always +# correctly split before testing inside minimize_mealy +aut = spot.automaton("""HOA: v1 +States: 2 +Start: 0 +AP: 11 "u0accel0accel" "u0accel0f1dcon23p81b" "u0accel0f1dcon231b" + "u0gear0f1dmin0f1dcon61b0f1dadd0gear0f1dcon241b1b1b" + "u0gear0gear" "u0gear0f1dmax0f1dcon241b0f1dsub0gear0f1dcon241b1b1b" + "u0steer0f1dsteering0angle0trackpos1b" "u0steer0steer" + "p0p0gt0rpm0f1dcon5523231b" "p0p0lt0rpm0f1dcon32323231b" + "p0p0lt0speed0f1dsub0target2speed0f1dmultp0f1dabs0steer1b0f1dcon248881b1b1b" +acc-name: all +Acceptance: 0 t +properties: trans-labels explicit-labels state-acc deterministic +controllable-AP: 0 1 2 3 4 5 6 7 +--BODY-- +State: 0 +[!0&!1&2&!3&4&!5&6&!7&!8&!9&!10] 0 +[!0&1&!2&!3&4&!5&6&!7&!8&!9&10] 0 +[!0&!1&2&!3&!4&5&6&!7&!8&9&!10] 0 +[!0&1&!2&!3&!4&5&6&!7&!8&9&10] 0 +[!0&!1&2&3&!4&!5&6&!7&8&!9&!10] 0 +[!0&1&!2&3&!4&!5&6&!7&8&!9&10] 0 +[!0&!1&2&!3&!4&5&!6&7&8&9 | !0&!1&2&!3&!4&5&6&!7&8&9 | !0&!1&2&!3&4&!5&!6&7&8&9 + | !0&!1&2&!3&4&!5&6&!7&8&9 | !0&!1&2&3&!4&!5&!6&7&8&9 + | !0&!1&2&3&!4&!5&6&!7&8&9 | !0&1&!2&!3&!4&5&!6&7&8&9 + | !0&1&!2&!3&!4&5&6&!7&8&9 | !0&1&!2&!3&4&!5&!6&7&8&9 + | !0&1&!2&!3&4&!5&6&!7&8&9 | !0&1&!2&3&!4&!5&!6&7&8&9 + | !0&1&!2&3&!4&!5&6&!7&8&9 | 0&!1&!2&!3&!4&5&!6&7&8&9 + | 0&!1&!2&!3&!4&5&6&!7&8&9 | 0&!1&!2&!3&4&!5&!6&7&8&9 + | 0&!1&!2&!3&4&!5&6&!7&8&9 | 0&!1&!2&3&!4&!5&!6&7&8&9 + | 0&!1&!2&3&!4&!5&6&!7&8&9] 1 +State: 1 +[!0&!1&2&!3&!4&5&!6&7 | !0&!1&2&!3&!4&5&6&!7 | !0&!1&2&!3&4&!5&!6&7 + | !0&!1&2&!3&4&!5&6&!7 | !0&!1&2&3&!4&!5&!6&7 + | !0&!1&2&3&!4&!5&6&!7 | !0&1&!2&!3&!4&5&!6&7 + | !0&1&!2&!3&!4&5&6&!7 | !0&1&!2&!3&4&!5&!6&7 + | !0&1&!2&!3&4&!5&6&!7 | !0&1&!2&3&!4&!5&!6&7 + | !0&1&!2&3&!4&!5&6&!7 | 0&!1&!2&!3&!4&5&!6&7 + | 0&!1&!2&!3&!4&5&6&!7 | 0&!1&!2&!3&4&!5&!6&7 + | 0&!1&!2&!3&4&!5&6&!7 | 0&!1&!2&3&!4&!5&!6&7 + | 0&!1&!2&3&!4&!5&6&!7] 1 +--END--""") + +spot.minimize_mealy(aut, -1) +spot.minimize_mealy(aut, 0) +spot.minimize_mealy(aut, 1) +auts = spot.split_2step(aut) +spot.minimize_mealy(auts, -1) +spot.minimize_mealy(auts, 0) +spot.minimize_mealy(auts, 1) \ No newline at end of file diff --git a/tests/python/merge.py b/tests/python/merge.py index c56d8f309..893916953 100644 --- a/tests/python/merge.py +++ b/tests/python/merge.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2020 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2017, 2020, 2022 Laboratoire de Recherche et +# Développement de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() aut = spot.automaton(""" HOA: v1 @@ -39,7 +41,7 @@ State: 2 out = spot.simplify_acceptance(aut) hoa = out.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -54,8 +56,8 @@ State: 1 [1] 2 {0} State: 2 [1] 0 ---END--""" -assert spot.are_equivalent(out, aut) +--END--""") +tc.assertTrue(spot.are_equivalent(out, aut)) aut = spot.automaton("""HOA: v1 States: 3 @@ -75,7 +77,7 @@ State: 2 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -90,7 +92,7 @@ State: 1 [1] 2 {0} State: 2 [1] 0 ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -111,7 +113,7 @@ State: 2 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -126,7 +128,7 @@ State: 1 [1] 2 State: 2 [1] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -146,7 +148,7 @@ State: 2 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -161,7 +163,7 @@ State: 1 [1] 2 {1} State: 2 [1] 0 {0} ---END--""" +--END--""") aut = spot.automaton(""" HOA: v1 @@ -182,7 +184,7 @@ State: 2 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -197,7 +199,7 @@ State: 1 [1] 2 State: 2 [1] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -217,7 +219,7 @@ State: 2 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -232,7 +234,7 @@ State: 1 {0} [1] 2 State: 2 {0} [1] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -252,7 +254,7 @@ State: 2 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -267,7 +269,7 @@ State: 1 {0} [1] 2 State: 2 {0} [1] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -287,7 +289,7 @@ State: 2 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -301,7 +303,7 @@ State: 1 {1} [1] 2 State: 2 [1] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 4 @@ -335,7 +337,7 @@ State: 3 {1 3} spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 4 Start: 0 AP: 2 "a" "b" @@ -364,7 +366,7 @@ State: 3 {1} [0&!1] 0 [!0&1] 3 [0&1] 2 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -388,7 +390,7 @@ State: 2 out = spot.simplify_acceptance(aut) hoa = out.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "p0" "p1" @@ -406,8 +408,8 @@ State: 1 State: 2 [0] 2 {0} [!0] 1 {0} ---END--""" -assert spot.are_equivalent(out, aut) +--END--""") +tc.assertTrue(spot.are_equivalent(out, aut)) aut = spot.automaton("""HOA: v1 States: 4 @@ -435,7 +437,7 @@ State: 3 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 4 Start: 0 AP: 2 "p0" "p1" @@ -457,7 +459,7 @@ State: 3 [0&1] 0 {1} [0&!1] 3 {1 2} [!0] 1 {3} ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 1 @@ -475,7 +477,7 @@ State: 0 {1 2} spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 1 Start: 0 AP: 2 "p0" "p1" @@ -486,7 +488,7 @@ properties: deterministic --BODY-- State: 0 [t] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 2 @@ -506,7 +508,7 @@ State: 1 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 2 Start: 0 AP: 2 "p0" "p1" @@ -519,7 +521,7 @@ State: 0 [!0] 1 {2} State: 1 [t] 1 {1 2} ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 1 @@ -536,7 +538,7 @@ State: 0 {0 1 3} spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 1 Start: 0 AP: 2 "p0" "p1" @@ -547,7 +549,7 @@ properties: deterministic --BODY-- State: 0 [t] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 2 @@ -568,7 +570,7 @@ State: 1 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 2 Start: 0 AP: 2 "p0" "p1" @@ -583,7 +585,7 @@ State: 0 State: 1 [0] 1 [!0] 0 {1} ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 2 @@ -602,7 +604,7 @@ State: 1 {1} spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 2 Start: 0 AP: 2 "p0" "p1" @@ -615,7 +617,7 @@ State: 0 [t] 1 State: 1 [t] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -636,7 +638,7 @@ State: 2 {2} spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "p0" "p1" @@ -650,7 +652,7 @@ State: 1 {0} [t] 1 State: 2 {2} [t] 1 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -672,7 +674,7 @@ State: 2 {1 2 3} out = spot.simplify_acceptance(aut) hoa = out.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "p0" "p1" @@ -687,8 +689,8 @@ State: 1 {1} [t] 2 State: 2 {0 1} [t] 1 ---END--""" -assert spot.are_equivalent(out, aut) +--END--""") +tc.assertTrue(spot.are_equivalent(out, aut)) aut = spot.automaton("""HOA: v1 States: 2 @@ -708,7 +710,7 @@ State: 1 spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 2 Start: 0 AP: 2 "p0" "p1" @@ -722,7 +724,7 @@ State: 0 State: 1 [0] 1 [!0] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -740,7 +742,7 @@ State: 2 --END--""") spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -755,7 +757,7 @@ State: 1 [1] 2 State: 2 [1] 0 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -773,7 +775,7 @@ State: 2 --END--""") spot.simplify_acceptance_here(aut) hoa = aut.to_str('hoa') -assert hoa == """HOA: v1 +tc.assertEqual(hoa, """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -788,4 +790,4 @@ State: 1 [1] 2 State: 2 [1] 0 ---END--""" +--END--""") diff --git a/tests/python/mergedge.py b/tests/python/mergedge.py index e55bdabf2..b3e934946 100644 --- a/tests/python/mergedge.py +++ b/tests/python/mergedge.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020, 2021 Laboratoire de Recherche et Développement de +# Copyright (C) 2020-2022 Laboratoire de Recherche et Développement de # l'EPITA. # # This file is part of Spot, a model checking library. @@ -20,12 +20,14 @@ import spot +from unittest import TestCase +tc = TestCase() aut = spot.automaton("""HOA: v1 States: 1 Start: 0 AP: 1 "a" Acceptance: 1 Inf(0) --BODY-- State: 0 [0] 0 [0] 0 {0} --END--""") -assert aut.num_edges() == 2 +tc.assertEqual(aut.num_edges(), 2) aut.merge_edges() -assert aut.num_edges() == 1 +tc.assertEqual(aut.num_edges(), 1) aut = spot.automaton(""" HOA: v1 @@ -44,125 +46,126 @@ State: 1 [0 | 1] 1 [0&!1] 1 {0} --END--""") -assert aut.num_edges() == 5 +tc.assertEqual(aut.num_edges(), 5) aut.merge_edges() -assert aut.num_edges() == 5 -assert not spot.is_deterministic(aut) +tc.assertEqual(aut.num_edges(), 5) +tc.assertFalse(spot.is_deterministic(aut)) aut = spot.split_edges(aut) -assert aut.num_edges() == 9 +tc.assertEqual(aut.num_edges(), 9) aut.merge_edges() -assert aut.num_edges() == 5 -assert spot.is_deterministic(aut) +tc.assertEqual(aut.num_edges(), 5) +tc.assertTrue(spot.is_deterministic(aut)) -aut = spot.automaton(""" -HOA: v1 -States: 3 -Start: 0 -AP: 1 "a" -acc-name: Buchi -Acceptance: 1 Inf(0) -properties: trans-labels explicit-labels trans-acc complete ---BODY-- -State: 0 -[!0] 1 {0} -[0] 2 {0} -State: 1 -[!0] 1 {0} -[0] 1 -State: 2 -[!0] 2 {0} -[0] 1 ---END--""") -aut.merge_states() -assert aut.num_edges() == 4 -assert aut.num_states() == 2 -assert spot.is_deterministic(aut) -assert aut.prop_complete() -aut.merge_states() -assert aut.num_edges() == 4 -assert aut.num_states() == 2 -assert spot.is_deterministic(aut) -assert aut.prop_complete() +for nthread in range(1, 16, 2): + aut = spot.automaton(""" + HOA: v1 + States: 3 + Start: 0 + AP: 1 "a" + acc-name: Buchi + Acceptance: 1 Inf(0) + properties: trans-labels explicit-labels trans-acc complete + --BODY-- + State: 0 + [!0] 1 {0} + [0] 2 {0} + State: 1 + [!0] 1 {0} + [0] 1 + State: 2 + [!0] 2 {0} + [0] 1 + --END--""") + aut.merge_states(nthread) + tc.assertEqual(aut.num_edges(), 4) + tc.assertEqual(aut.num_states(), 2) + tc.assertTrue(spot.is_deterministic(aut)) + tc.assertTrue(aut.prop_complete()) + aut.merge_states(nthread) + tc.assertEqual(aut.num_edges(), 4) + tc.assertEqual(aut.num_states(), 2) + tc.assertTrue(spot.is_deterministic(aut)) + tc.assertTrue(aut.prop_complete()) -aa = spot.automaton(""" -HOA: v1 States: 41 Start: 0 AP: 3 "allfinished" "finished_0" -"finished_1" acc-name: parity max odd 4 Acceptance: 4 Inf(3) | (Fin(2) -& (Inf(1) | Fin(0))) properties: trans-labels explicit-labels -trans-acc colored properties: deterministic --BODY-- State: 0 -[!0&!1&!2] 1 {1} [!0&!1&2] 2 {1} [!0&1&!2] 3 {1} [!0&1&2] 4 {1} -[0&!1&!2] 5 {1} [0&!1&2] 6 {1} [0&1&!2] 7 {1} [0&1&2] 8 {1} State: 1 -[!0&!1&!2] 1 {1} [!0&!1&2] 2 {1} [0&1&!2] 7 {1} [0&1&2] 8 {1} -[!0&1&!2] 9 {1} [!0&1&2] 10 {1} [0&!1&!2] 11 {1} [0&!1&2] 12 {1} -State: 2 [!0&!1&2] 2 {1} [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} -[0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&!1&!2] 13 {1} [!0&1&!2] 14 {1} -State: 3 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&!2] 9 {1} [!0&1&2] 10 {1} -[0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&!1&!2] 15 {1} [!0&!1&2] 16 {1} -State: 4 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} [0&!1&!2] 11 {1} -[0&!1&2] 12 {1} [!0&1&!2] 14 {1} [!0&!1&2] 16 {1} [!0&!1&!2] 17 {1} -State: 5 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&!2] 18 {1} [!0&!1&2] 19 -{1} [!0&1&!2] 20 {1} State: 6 [0&1&2] 8 {1} [!0&1&2] 10 {1} [!0&!1&2] -19 {1} [!0&1&!2] 20 {1} [!0&!1&!2] 21 {1} State: 7 [0&1&2] 8 {3} -[!0&1&2] 10 {1} [!0&!1&2] 19 {1} [!0&1&!2] 20 {1} [!0&!1&!2] 22 {1} -State: 8 [!0&!1&!2] 5 {1} [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] 19 -{1} [!0&1&!2] 20 {1} State: 9 [0&!1&!2] 5 {1} [0&1&!2] 7 {1} [0&1&2] 8 -{1} [!0&1&!2] 9 {1} [!0&1&2] 10 {1} [0&!1&2] 12 {1} [!0&!1&!2] 23 {1} -[!0&!1&2] 24 {1} State: 10 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 -{3} [!0&1&2] 10 {2} [0&!1&2] 12 {3} [!0&1&!2] 14 {1} [!0&!1&2] 24 {1} -[!0&!1&!2] 25 {1} State: 11 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] 19 -{1} [!0&!1&!2] 26 {1} [!0&1&!2] 27 {1} State: 12 [0&1&2] 8 {3} -[!0&1&2] 10 {1} [!0&!1&2] 19 {1} [!0&1&!2] 27 {1} [!0&!1&!2] 28 {1} -State: 13 [!0&!1&2] 2 {1} [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} -[0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&!1&!2] 13 {1} [!0&1&!2] 14 {1} -State: 14 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} [!0&1&2] 10 {2} -[0&!1&2] 12 {3} [!0&1&!2] 14 {1} [!0&!1&2] 24 {2} [!0&!1&!2] 29 {1} -State: 15 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&!2] 9 {1} [!0&1&2] 10 {1} -[0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&!1&!2] 15 {1} [!0&!1&2] 16 {1} -State: 16 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} [0&!1&!2] 11 -{1} [0&!1&2] 12 {1} [!0&1&!2] 14 {1} [!0&!1&2] 16 {1} [!0&!1&!2] 17 -{1} State: 17 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} [0&!1&!2] -11 {1} [0&!1&2] 12 {1} [!0&1&!2] 14 {1} [!0&!1&2] 16 {1} [!0&!1&!2] 17 -{1} State: 18 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&!2] 18 {1} -[!0&!1&2] 19 {1} [!0&1&!2] 20 {1} State: 19 [0&1&!2] 7 {3} [0&1&2] 8 -{3} [!0&!1&2] 19 {1} [!0&!1&!2] 30 {1} [!0&1&!2] 31 {1} [!0&1&2] 32 -{1} State: 20 [0&1&2] 8 {3} [0&!1&2] 12 {1} [!0&1&!2] 20 {1} [!0&1&2] -32 {1} [!0&!1&!2] 33 {1} [!0&!1&2] 34 {1} State: 21 [0&1&2] 8 {1} -[!0&1&2] 10 {1} [!0&!1&!2] 18 {1} [!0&!1&2] 19 {1} [!0&1&!2] 20 {1} -State: 22 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] 19 {1} [!0&1&!2] 20 -{1} [!0&!1&!2] 35 {1} State: 23 [0&!1&!2] 5 {1} [0&1&!2] 7 {1} [0&1&2] -8 {1} [!0&1&!2] 9 {1} [!0&1&2] 10 {1} [0&!1&2] 12 {1} [!0&!1&!2] 23 -{1} [!0&!1&2] 24 {1} State: 24 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] -8 {3} [!0&1&2] 10 {2} [0&!1&2] 12 {3} [!0&1&!2] 14 {2} [!0&!1&2] 24 -{1} [!0&!1&!2] 25 {1} State: 25 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] -8 {3} [!0&1&2] 10 {2} [0&!1&2] 12 {3} [!0&1&!2] 14 {2} [!0&!1&2] 24 -{1} [!0&!1&!2] 25 {1} State: 26 [0&1&2] 8 {3} [!0&1&2] 10 {1} -[!0&!1&2] 19 {1} [!0&!1&!2] 26 {1} [!0&1&!2] 27 {1} State: 27 [0&1&2] -8 {3} [0&!1&2] 12 {3} [!0&1&!2] 27 {1} [!0&1&2] 32 {1} [!0&!1&!2] 36 -{1} [!0&!1&2] 37 {1} State: 28 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] -19 {1} [!0&!1&!2] 26 {1} [!0&1&!2] 27 {1} State: 29 [0&!1&!2] 5 {3} -[0&1&!2] 7 {3} [0&1&2] 8 {3} [!0&1&2] 10 {2} [0&!1&2] 12 {3} [!0&1&!2] -14 {1} [!0&!1&2] 24 {2} [!0&!1&!2] 29 {1} State: 30 [0&1&!2] 7 {3} -[0&1&2] 8 {3} [!0&!1&2] 19 {1} [!0&!1&!2] 30 {1} [!0&1&!2] 31 {1} -[!0&1&2] 32 {1} State: 31 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} -[0&!1&2] 12 {3} [!0&1&!2] 31 {1} [!0&1&2] 32 {2} [!0&!1&2] 37 {2} -[!0&!1&!2] 38 {1} State: 32 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 -{3} [0&!1&2] 12 {3} [!0&1&!2] 31 {1} [!0&1&2] 32 {2} [!0&!1&2] 37 {1} -[!0&!1&!2] 39 {1} State: 33 [0&1&2] 8 {3} [0&!1&2] 12 {1} [!0&1&!2] 20 -{1} [!0&1&2] 32 {1} [!0&!1&!2] 33 {1} [!0&!1&2] 34 {1} State: 34 -[0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&!2] 11 {1} [0&!1&2] 12 {1} -[!0&1&!2] 31 {1} [!0&1&2] 32 {1} [!0&!1&2] 34 {1} [!0&!1&!2] 40 {1} -State: 35 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] 19 {1} [!0&1&!2] 20 -{1} [!0&!1&!2] 35 {1} State: 36 [0&1&2] 8 {3} [0&!1&2] 12 {3} -[!0&1&!2] 27 {1} [!0&1&2] 32 {1} [!0&!1&!2] 36 {1} [!0&!1&2] 37 {1} -State: 37 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&2] 12 {3} -[!0&1&!2] 31 {2} [!0&1&2] 32 {2} [!0&!1&2] 37 {1} [!0&!1&!2] 39 {1} -State: 38 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&2] 12 {3} -[!0&1&!2] 31 {1} [!0&1&2] 32 {2} [!0&!1&2] 37 {2} [!0&!1&!2] 38 {1} -State: 39 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&2] 12 {3} -[!0&1&!2] 31 {2} [!0&1&2] 32 {2} [!0&!1&2] 37 {1} [!0&!1&!2] 39 {1} -State: 40 [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&!2] 11 {1} [0&!1&2] 12 -{1} [!0&1&!2] 31 {1} [!0&1&2] 32 {1} [!0&!1&2] 34 {1} [!0&!1&!2] 40 -{1} --END--""") -aa.merge_states() -# This used to cause a segfault reported by Philipp. -print(aa.to_str()) + aa = spot.automaton(""" + HOA: v1 States: 41 Start: 0 AP: 3 "allfinished" "finished_0" + "finished_1" acc-name: parity max odd 4 Acceptance: 4 Inf(3) | (Fin(2) + & (Inf(1) | Fin(0))) properties: trans-labels explicit-labels + trans-acc colored properties: deterministic --BODY-- State: 0 + [!0&!1&!2] 1 {1} [!0&!1&2] 2 {1} [!0&1&!2] 3 {1} [!0&1&2] 4 {1} + [0&!1&!2] 5 {1} [0&!1&2] 6 {1} [0&1&!2] 7 {1} [0&1&2] 8 {1} State: 1 + [!0&!1&!2] 1 {1} [!0&!1&2] 2 {1} [0&1&!2] 7 {1} [0&1&2] 8 {1} + [!0&1&!2] 9 {1} [!0&1&2] 10 {1} [0&!1&!2] 11 {1} [0&!1&2] 12 {1} + State: 2 [!0&!1&2] 2 {1} [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} + [0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&!1&!2] 13 {1} [!0&1&!2] 14 {1} + State: 3 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&!2] 9 {1} [!0&1&2] 10 {1} + [0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&!1&!2] 15 {1} [!0&!1&2] 16 {1} + State: 4 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} [0&!1&!2] 11 {1} + [0&!1&2] 12 {1} [!0&1&!2] 14 {1} [!0&!1&2] 16 {1} [!0&!1&!2] 17 {1} + State: 5 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&!2] 18 {1} [!0&!1&2] 19 + {1} [!0&1&!2] 20 {1} State: 6 [0&1&2] 8 {1} [!0&1&2] 10 {1} [!0&!1&2] + 19 {1} [!0&1&!2] 20 {1} [!0&!1&!2] 21 {1} State: 7 [0&1&2] 8 {3} + [!0&1&2] 10 {1} [!0&!1&2] 19 {1} [!0&1&!2] 20 {1} [!0&!1&!2] 22 {1} + State: 8 [!0&!1&!2] 5 {1} [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] 19 + {1} [!0&1&!2] 20 {1} State: 9 [0&!1&!2] 5 {1} [0&1&!2] 7 {1} [0&1&2] 8 + {1} [!0&1&!2] 9 {1} [!0&1&2] 10 {1} [0&!1&2] 12 {1} [!0&!1&!2] 23 {1} + [!0&!1&2] 24 {1} State: 10 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 + {3} [!0&1&2] 10 {2} [0&!1&2] 12 {3} [!0&1&!2] 14 {1} [!0&!1&2] 24 {1} + [!0&!1&!2] 25 {1} State: 11 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] 19 + {1} [!0&!1&!2] 26 {1} [!0&1&!2] 27 {1} State: 12 [0&1&2] 8 {3} + [!0&1&2] 10 {1} [!0&!1&2] 19 {1} [!0&1&!2] 27 {1} [!0&!1&!2] 28 {1} + State: 13 [!0&!1&2] 2 {1} [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} + [0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&!1&!2] 13 {1} [!0&1&!2] 14 {1} + State: 14 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} [!0&1&2] 10 {2} + [0&!1&2] 12 {3} [!0&1&!2] 14 {1} [!0&!1&2] 24 {2} [!0&!1&!2] 29 {1} + State: 15 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&!2] 9 {1} [!0&1&2] 10 {1} + [0&!1&!2] 11 {1} [0&!1&2] 12 {1} [!0&!1&!2] 15 {1} [!0&!1&2] 16 {1} + State: 16 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} [0&!1&!2] 11 + {1} [0&!1&2] 12 {1} [!0&1&!2] 14 {1} [!0&!1&2] 16 {1} [!0&!1&!2] 17 + {1} State: 17 [0&1&!2] 7 {1} [0&1&2] 8 {1} [!0&1&2] 10 {1} [0&!1&!2] + 11 {1} [0&!1&2] 12 {1} [!0&1&!2] 14 {1} [!0&!1&2] 16 {1} [!0&!1&!2] 17 + {1} State: 18 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&!2] 18 {1} + [!0&!1&2] 19 {1} [!0&1&!2] 20 {1} State: 19 [0&1&!2] 7 {3} [0&1&2] 8 + {3} [!0&!1&2] 19 {1} [!0&!1&!2] 30 {1} [!0&1&!2] 31 {1} [!0&1&2] 32 + {1} State: 20 [0&1&2] 8 {3} [0&!1&2] 12 {1} [!0&1&!2] 20 {1} [!0&1&2] + 32 {1} [!0&!1&!2] 33 {1} [!0&!1&2] 34 {1} State: 21 [0&1&2] 8 {1} + [!0&1&2] 10 {1} [!0&!1&!2] 18 {1} [!0&!1&2] 19 {1} [!0&1&!2] 20 {1} + State: 22 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] 19 {1} [!0&1&!2] 20 + {1} [!0&!1&!2] 35 {1} State: 23 [0&!1&!2] 5 {1} [0&1&!2] 7 {1} [0&1&2] + 8 {1} [!0&1&!2] 9 {1} [!0&1&2] 10 {1} [0&!1&2] 12 {1} [!0&!1&!2] 23 + {1} [!0&!1&2] 24 {1} State: 24 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] + 8 {3} [!0&1&2] 10 {2} [0&!1&2] 12 {3} [!0&1&!2] 14 {2} [!0&!1&2] 24 + {1} [!0&!1&!2] 25 {1} State: 25 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] + 8 {3} [!0&1&2] 10 {2} [0&!1&2] 12 {3} [!0&1&!2] 14 {2} [!0&!1&2] 24 + {1} [!0&!1&!2] 25 {1} State: 26 [0&1&2] 8 {3} [!0&1&2] 10 {1} + [!0&!1&2] 19 {1} [!0&!1&!2] 26 {1} [!0&1&!2] 27 {1} State: 27 [0&1&2] + 8 {3} [0&!1&2] 12 {3} [!0&1&!2] 27 {1} [!0&1&2] 32 {1} [!0&!1&!2] 36 + {1} [!0&!1&2] 37 {1} State: 28 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] + 19 {1} [!0&!1&!2] 26 {1} [!0&1&!2] 27 {1} State: 29 [0&!1&!2] 5 {3} + [0&1&!2] 7 {3} [0&1&2] 8 {3} [!0&1&2] 10 {2} [0&!1&2] 12 {3} [!0&1&!2] + 14 {1} [!0&!1&2] 24 {2} [!0&!1&!2] 29 {1} State: 30 [0&1&!2] 7 {3} + [0&1&2] 8 {3} [!0&!1&2] 19 {1} [!0&!1&!2] 30 {1} [!0&1&!2] 31 {1} + [!0&1&2] 32 {1} State: 31 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} + [0&!1&2] 12 {3} [!0&1&!2] 31 {1} [!0&1&2] 32 {2} [!0&!1&2] 37 {2} + [!0&!1&!2] 38 {1} State: 32 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 + {3} [0&!1&2] 12 {3} [!0&1&!2] 31 {1} [!0&1&2] 32 {2} [!0&!1&2] 37 {1} + [!0&!1&!2] 39 {1} State: 33 [0&1&2] 8 {3} [0&!1&2] 12 {1} [!0&1&!2] 20 + {1} [!0&1&2] 32 {1} [!0&!1&!2] 33 {1} [!0&!1&2] 34 {1} State: 34 + [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&!2] 11 {1} [0&!1&2] 12 {1} + [!0&1&!2] 31 {1} [!0&1&2] 32 {1} [!0&!1&2] 34 {1} [!0&!1&!2] 40 {1} + State: 35 [0&1&2] 8 {3} [!0&1&2] 10 {1} [!0&!1&2] 19 {1} [!0&1&!2] 20 + {1} [!0&!1&!2] 35 {1} State: 36 [0&1&2] 8 {3} [0&!1&2] 12 {3} + [!0&1&!2] 27 {1} [!0&1&2] 32 {1} [!0&!1&!2] 36 {1} [!0&!1&2] 37 {1} + State: 37 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&2] 12 {3} + [!0&1&!2] 31 {2} [!0&1&2] 32 {2} [!0&!1&2] 37 {1} [!0&!1&!2] 39 {1} + State: 38 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&2] 12 {3} + [!0&1&!2] 31 {1} [!0&1&2] 32 {2} [!0&!1&2] 37 {2} [!0&!1&!2] 38 {1} + State: 39 [0&!1&!2] 5 {3} [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&2] 12 {3} + [!0&1&!2] 31 {2} [!0&1&2] 32 {2} [!0&!1&2] 37 {1} [!0&!1&!2] 39 {1} + State: 40 [0&1&!2] 7 {3} [0&1&2] 8 {3} [0&!1&!2] 11 {1} [0&!1&2] 12 + {1} [!0&1&!2] 31 {1} [!0&1&2] 32 {1} [!0&!1&2] 34 {1} [!0&!1&!2] 40 + {1} --END--""") + aa.merge_states(nthread) + # This used to cause a segfault reported by Philipp. + print(aa.to_str()) diff --git a/tests/python/misc-ec.py b/tests/python/misc-ec.py index d1234bd69..85d4aaa47 100644 --- a/tests/python/misc-ec.py +++ b/tests/python/misc-ec.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2020 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2017, 2020, 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -18,6 +18,9 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() + aut = spot.translate("G(p0 | (p0 R Xp0) | XF(!p0 & p1))", 'Buchi', 'SBAcc') ec = spot.make_emptiness_check_instantiator('SE05')[0].instantiate(aut) n = 0 @@ -27,7 +30,7 @@ while True: break print(res.accepting_run()) n += 1 -assert n == 2 +tc.assertEqual(n, 2) for name in ['SE05', 'CVWY90', 'GV04']: aut = spot.translate("GFa && GFb") @@ -35,13 +38,13 @@ for name in ['SE05', 'CVWY90', 'GV04']: ec = spot.make_emptiness_check_instantiator(name)[0].instantiate(aut) print(ec.check().accepting_run()) except RuntimeError as e: - assert "Büchi or weak" in str(e) + tc.assertIn("Büchi or weak", str(e)) aut = spot.translate("a", 'monitor') try: ec = spot.make_emptiness_check_instantiator('Tau03')[0].instantiate(aut) except RuntimeError as e: - assert "at least one" in str(e) + tc.assertIn("at least one", str(e)) aut = spot.translate("a", 'ba') ec = spot.make_emptiness_check_instantiator('Tau03')[0].instantiate(aut) diff --git a/tests/python/optionmap.py b/tests/python/optionmap.py index 667ef0b19..ad526f510 100755 --- a/tests/python/optionmap.py +++ b/tests/python/optionmap.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2010, 2012, 2018 Laboratoire de Recherche et Développement -# de l'EPITA. +# Copyright (C) 2010, 2012, 2018, 2022 Laboratoire de Recherche et +# Développement de l'EPITA. # Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6), # département Systèmes Répartis Coopératifs (SRC), Université Pierre # et Marie Curie. @@ -21,65 +21,67 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() o = spot.option_map() res = o.parse_options("optA, opta=2M, optb =4 ; optB = 7\ , optC= 10") -assert not res +tc.assertFalse(res) -assert o.get('optA') == 1 -assert o.get('opta') == 2*1024*1024 -assert o.get('optb') == 4 -assert o.get('optB') == 7 -assert o.get('optC') == 10 -assert o.get('none') == 0 -assert o.get('none', 16) == 16 +tc.assertEqual(o.get('optA'), 1) +tc.assertEqual(o.get('opta'), 2*1024*1024) +tc.assertEqual(o.get('optb'), 4) +tc.assertEqual(o.get('optB'), 7) +tc.assertEqual(o.get('optC'), 10) +tc.assertEqual(o.get('none'), 0) +tc.assertEqual(o.get('none', 16), 16) o.set('optb', 40) -assert o.get('optb') == 40 +tc.assertEqual(o.get('optb'), 40) res = o.parse_options("!optA !optb optC, !optB") -assert not res -assert o.get('optA') == 0 -assert o.get('opta') == 2*1024*1024 -assert o.get('optb') == 0 -assert o.get('optB') == 0 -assert o.get('optC') == 1 +tc.assertFalse(res) +tc.assertEqual(o.get('optA'), 0) +tc.assertEqual(o.get('opta'), 2*1024*1024) +tc.assertEqual(o.get('optb'), 0) +tc.assertEqual(o.get('optB'), 0) +tc.assertEqual(o.get('optC'), 1) res = o.parse_options("!") -assert res == "!" +tc.assertEqual(res, "!") res = o.parse_options("foo, !opt = 1") -assert res == "!opt = 1" +tc.assertEqual(res, "!opt = 1") res = o.parse_options("foo=3, opt == 1") -assert res == "opt == 1" +tc.assertEqual(res, "opt == 1") res = o.parse_options("foo=3opt == 1") -assert res == "3opt == 1" +tc.assertEqual(res, "3opt == 1") aut1 = spot.translate('GF(a <-> XXa)', 'det') -assert aut1.num_states() == 4 +tc.assertEqual(aut1.num_states(), 4) aut2 = spot.translate('GF(a <-> XXa)', 'det', xargs='gf-guarantee=0') -assert aut2.num_states() == 9 +tc.assertEqual(aut2.num_states(), 9) try: spot.translate('GF(a <-> XXa)', 'det', xargs='foobar=1') except RuntimeError as e: - assert "option 'foobar' was not used" in str(e) + tc.assertIn("option 'foobar' was not used", str(e)) else: raise RuntimeError("missing exception") try: spot.translate('GF(a <-> XXa)').postprocess('det', xargs='gf-guarantee=0') except RuntimeError as e: - assert "option 'gf-guarantee' was not used" in str(e) + tc.assertIn("option 'gf-guarantee' was not used", str(e)) else: raise RuntimeError("missing exception") try: spot.translate('GF(a <-> XXa)').postprocess('det', xargs='gf-guarantee=x') except RuntimeError as e: - assert "failed to parse option at: 'gf-guarantee=x'" in str(e) + tc.assertIn("failed to parse option at: 'gf-guarantee=x'", str(e)) else: raise RuntimeError("missing exception") diff --git a/tests/python/origstate.py b/tests/python/origstate.py index 0ca013889..15a7ab0ad 100644 --- a/tests/python/origstate.py +++ b/tests/python/origstate.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2017 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) 2015, 2017, 2022 Laboratoire de Recherche et +# Développement de l'Epita # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ import spot from sys import exit +from unittest import TestCase +tc = TestCase() aut = spot.automaton(""" HOA: v1 @@ -38,7 +40,7 @@ State: 1 """) aut2 = spot.degeneralize(aut) -assert aut2.to_str() == """HOA: v1 +tc.assertEqual(aut2.to_str(), """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -56,10 +58,10 @@ State: 1 [1] 2 State: 2 {0} [1] 2 ---END--""" +--END--""") aut2.copy_state_names_from(aut) -assert aut2.to_str() == """HOA: v1 +tc.assertEqual(aut2.to_str(), """HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -77,7 +79,7 @@ State: 1 "0#0" [1] 2 State: 2 "1#1" {0} [1] 2 ---END--""" +--END--""") aut2.set_init_state(2) aut2.purge_unreachable_states() @@ -93,16 +95,16 @@ properties: deterministic State: 0 "1#1" {0} [1] 0 --END--""" -assert aut2.to_str() == ref +tc.assertEqual(aut2.to_str(), ref) # This makes sure that the original-states vector has also been renamed. aut2.copy_state_names_from(aut) -assert aut2.to_str() == ref +tc.assertEqual(aut2.to_str(), ref) aut2 = spot.degeneralize(aut) aut2.release_named_properties() try: aut2.copy_state_names_from(aut) except RuntimeError as e: - assert "state does not exist in source automaton" in str(e) + tc.assertIn("state does not exist in source automaton", str(e)) else: exit(1) diff --git a/tests/python/otfcrash.py b/tests/python/otfcrash.py index 69acbcb1a..8e30cb501 100644 --- a/tests/python/otfcrash.py +++ b/tests/python/otfcrash.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016, 2018 Laboratoire de Recherche et Développement +# Copyright (C) 2016, 2018, 2022 Laboratoire de Recherche et Développement # de l'Epita # # This file is part of Spot, a model checking library. @@ -23,6 +23,8 @@ import spot.aux import tempfile import shutil import sys +from unittest import TestCase +tc = TestCase() spot.ltsmin.require('divine') @@ -51,4 +53,4 @@ system async; p = spot.otf_product(k, a) return p.is_empty() - assert(modelcheck('X "R.found"', m) == True) + tc.assertTrue(modelcheck('X "R.found"', m)) diff --git a/tests/python/parity.ipynb b/tests/python/parity.ipynb index 7323717da..56d6af350 100644 --- a/tests/python/parity.ipynb +++ b/tests/python/parity.ipynb @@ -72,9 +72,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Of course the case of parity automata with a single color is a bit degenerate, as the same formula correspond to two parity conditions with different kinds. \n", + "Of course the case of parity automata with a single color is a bit degenerate, as the same formula corresponds to two parity conditions of different kinds. \n", "\n", - "In addition the the above, an automaton is said to be **colored** if each of its edges (or states) has exactly one color. Automata that people usually call *parity automata* correspond in Spot to *colored* automata with *parity acceptance*. For this reason try to use the term *automata with parity acceptance* rather than *parity automata* for automata that are not *colored*." + "In addition to the above, an automaton is said to be **colored** if each of its edges (or states) has exactly one color. Automata that people usually call *parity automata* correspond in Spot to *colored* automata with *parity acceptance*. For this reason try to use the term *automata with parity acceptance* rather than *parity automata* for automata that are not *colored*." ] }, { @@ -3009,11 +3009,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[co-Büchi]\n", + "\n", + "\n", + "\n", + "[co-Büchi]\n", "\n", "\n", "\n", @@ -3237,11 +3237,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -4223,14 +4223,15 @@ "\n", "# Reduce parity\n", "\n", - "The `reduce_parity()` function is a more elaborate version of `cleanup_parity()`. It implements an algorithm by Carton and Maceiras (*Computing the Rabin index of a parity automaton*, Informatique théorique et applications, 1999), to obtain the minimal parity acceptance condition for a given automaton. Why the original algorithm assume *max odd* parity, this version with work with the four types of parity acceptance. It will only try to preserve the kind (max/min) and may change the style if it allows saving one color. Furthermore, it can colorize (or uncolorize) automata at the same time,\n", + "The `reduce_parity()` function is a more elaborate version of `cleanup_parity()`. It implements an algorithm by Carton and Maceiras (*Computing the Rabin index of a parity automaton*, Informatique théorique et applications, 1999), to obtain the minimal parity acceptance condition for a given automaton. While the original algorithm assumes *max odd* parity, this version works with the four types of parity acceptance. It will only try to preserve the kind (max/min) and may change the style if it allows saving one color. Furthermore, it can colorize (or uncolorize) automata at the same time,\n", "making it a very nice replacement for both `cleanup_parity()` and `colorize_parity()`.\n", "\n", - "It takes two arguments:\n", + "It takes three arguments:\n", "1. the automaton whose parity acceptance condition should be reduced\n", "2. a Boolean indicating whether the output should be colored (`True`), or if transition with no color can be used (`False`).\n", + "3. a Boolean indicating whether the output should be layered, i.e., in a max parity automaton, that means the color of a transition should be the maximal color visited by all cycles going through it.\n", "\n", - "By default, the second argument is `False`, because acceptance sets is a scarse ressource in Spot." + "By default, the second argument is `False`, because acceptance sets is a scarse ressource in Spot. The third argument also defaults to `False`, but for empircal reason: adding more colors like this tends to hinder simulation-based reductions." ] }, { @@ -4715,8 +4716,8 @@ "outputs": [ { "data": { - "text/html": [ - "
\n", + "image/svg+xml": [ + "\n", "\n", "\n", - "\n", - "\n", - "\n", - "\n", - "Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & Inf(\n", - "\n", - ")))\n", - "[parity max even 4]\n", - "\n", - "\n", - "\n", - "0\n", - "\n", - "0\n", - "\n", - "\n", - "\n", - "I->0\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "2\n", - "\n", - "2\n", - "\n", - "\n", - "\n", - "0->2\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", - "\n", - "\n", - "3\n", - "\n", - "3\n", - "\n", - "\n", - "\n", - "0->3\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "2->1\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", - "\n", - "3->0\n", - "\n", - "\n", - "p0 & !p1\n", - "\n", - "\n", - "\n", - "\n", - "3->2\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", - "\n", - "\n", - "\n", - "3->3\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", - "\n", - "\n", - "1->0\n", - "\n", - "\n", - "!p0 & p1\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "1->2\n", - "\n", - "\n", - "!p0 & !p1\n", - "\n", - "\n", - "\n", "
\n", "\n", @@ -5188,17 +5074,263 @@ }, "metadata": {}, "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & Inf(\n", + "\n", + ")\n", + "[parity max even 2]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ") & (Inf(\n", + "\n", + ") | Fin(\n", + "\n", + "))\n", + "[parity max odd 3]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "3->0\n", + "\n", + "\n", + "p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "3->3\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->0\n", + "\n", + "\n", + "!p0 & p1\n", + "\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "!p0 & !p1\n", + "\n", + "\n", + "\n", + "\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ - "display2(maxeven4, spot.reduce_parity(maxeven4))\n", - "display2(maxeven4, spot.reduce_parity(maxeven4, True))" + "display(maxeven4)\n", + "display2(spot.reduce_parity(maxeven4), spot.reduce_parity(maxeven4, True))\n", + "display2(spot.reduce_parity(maxeven4, False, True), spot.reduce_parity(maxeven4, True, True))" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -5212,7 +5344,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.2" + "version": "3.10.7" } }, "nbformat": 4, diff --git a/tests/python/parity.py b/tests/python/parity.py index b0389c40e..6ced51c40 100644 --- a/tests/python/parity.py +++ b/tests/python/parity.py @@ -19,36 +19,38 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() max_even_5 = spot.acc_code.parity(True, False, 5) -assert max_even_5 == spot.acc_code.parity_max_even(5) -assert max_even_5 == spot.acc_code.parity_max(False, 5) +tc.assertEqual(max_even_5, spot.acc_code.parity_max_even(5)) +tc.assertEqual(max_even_5, spot.acc_code.parity_max(False, 5)) min_even_5 = spot.acc_code.parity(False, False, 5) -assert min_even_5 == spot.acc_code.parity_min_even(5) -assert min_even_5 == spot.acc_code.parity_min(False, 5) +tc.assertEqual(min_even_5, spot.acc_code.parity_min_even(5)) +tc.assertEqual(min_even_5, spot.acc_code.parity_min(False, 5)) max_odd_5 = spot.acc_code.parity(True, True, 5) -assert max_odd_5 == spot.acc_code.parity_max_odd(5) -assert max_odd_5 == spot.acc_code.parity_max(True, 5) +tc.assertEqual(max_odd_5, spot.acc_code.parity_max_odd(5)) +tc.assertEqual(max_odd_5, spot.acc_code.parity_max(True, 5)) min_odd_5 = spot.acc_code.parity(False, True, 5) -assert min_odd_5 == spot.acc_code.parity_min_odd(5) -assert min_odd_5 == spot.acc_code.parity_min(True, 5) +tc.assertEqual(min_odd_5, spot.acc_code.parity_min_odd(5)) +tc.assertEqual(min_odd_5, spot.acc_code.parity_min(True, 5)) for f in ('FGa', 'GFa & GFb & FGc', 'XXX(a U b)'): a1 = spot.translate(f, 'parity') - assert a1.acc().is_parity() + tc.assertTrue(a1.acc().is_parity()) a2 = spot.translate(f).postprocess('parity') - assert a2.acc().is_parity() + tc.assertTrue(a2.acc().is_parity()) a3 = spot.translate(f, 'det').postprocess('parity', 'colored') - assert a3.acc().is_parity() - assert spot.is_colored(a3) + tc.assertTrue(a3.acc().is_parity()) + tc.assertTrue(spot.is_colored(a3)) a = spot.translate('GFa & GFb') try: spot.change_parity_here(a, spot.parity_kind_same, spot.parity_style_even) except RuntimeError as e: - assert 'input should have parity acceptance' in str(e) + tc.assertIn('input should have parity acceptance', str(e)) else: exit(2) @@ -64,7 +66,7 @@ State: 0 --END-- """) spot.cleanup_parity_here(a) -assert a.to_str() == """HOA: v1 +tc.assertEqual(a.to_str(), """HOA: v1 States: 1 Start: 0 AP: 1 "a" @@ -75,7 +77,7 @@ properties: deterministic --BODY-- State: 0 [t] 0 ---END--""" +--END--""") a = spot.automaton(""" HOA: v1 @@ -89,7 +91,7 @@ State: 0 --END-- """) spot.cleanup_parity_here(a) -assert a.to_str() == """HOA: v1 +tc.assertEqual(a.to_str(), """HOA: v1 States: 1 Start: 0 AP: 1 "a" @@ -100,7 +102,7 @@ properties: deterministic --BODY-- State: 0 [t] 0 ---END--""" +--END--""") a = spot.automaton("""HOA: v1 States: 3 @@ -120,39 +122,39 @@ State: 2 try: spot.get_state_players(a) except RuntimeError as e: - assert "not a game" in str(e) + tc.assertIn("not a game", str(e)) else: report_missing_exception() try: spot.set_state_player(a, 1, True) except RuntimeError as e: - assert "Can only" in str(e) + tc.assertIn("Can only", str(e)) else: report_missing__exception() spot.set_state_players(a, (False, True, False)) -assert spot.get_state_player(a, 0) == False -assert spot.get_state_player(a, 1) == True -assert spot.get_state_player(a, 2) == False +tc.assertEqual(spot.get_state_player(a, 0), False) +tc.assertEqual(spot.get_state_player(a, 1), True) +tc.assertEqual(spot.get_state_player(a, 2), False) try: spot.set_state_players(a, [True, False, False, False]) except RuntimeError as e: - assert "many owners as states" in str(e) + tc.assertIn("many owners as states", str(e)) else: report_missing_exception() try: spot.get_state_player(a, 4) except RuntimeError as e: - assert "invalid state number" in str(e) + tc.assertIn("invalid state number", str(e)) else: report_missing_exception() try: spot.set_state_player(a, 4, True) except RuntimeError as e: - assert "invalid state number" in str(e) + tc.assertIn("invalid state number", str(e)) else: report_missing_exception() @@ -168,4 +170,4 @@ oi.erase() # postprocess used to call reduce_parity that did not # work correctly on automata with deleted edges. sm = a.postprocess("gen", "small") -assert sm.num_states() == 3 +tc.assertEqual(sm.num_states(), 3) diff --git a/tests/python/parsetgba.py b/tests/python/parsetgba.py index cbcacb183..038b33a19 100755 --- a/tests/python/parsetgba.py +++ b/tests/python/parsetgba.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2012, 2014, 2015 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) 2012, 2014, 2015, 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ import os import spot +from unittest import TestCase +tc = TestCase() contents = ''' HOA: v1 name: "a U b" States: 2 Start: 1 AP: 2 "a" "b" acc-name: Buchi @@ -34,7 +36,7 @@ out.close() a = spot.parse_aut(filename, spot.make_bdd_dict()) -assert not a.errors +tc.assertFalse(a.errors) spot.print_dot(spot.get_cout(), a.aut) diff --git a/tests/python/pdegen.py b/tests/python/pdegen.py index 02150d375..00f3df7e0 100644 --- a/tests/python/pdegen.py +++ b/tests/python/pdegen.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2019, 2020, 2021 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) 2019, 2020, 2021, 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -23,6 +23,8 @@ import spot +from unittest import TestCase +tc = TestCase() a, b, d, f = spot.automata(""" HOA: v1 @@ -73,19 +75,19 @@ State: 1 --END-- """) -assert spot.is_partially_degeneralizable(a) == [0, 1] +tc.assertEqual(spot.is_partially_degeneralizable(a), [0, 1]) da = spot.partial_degeneralize(a, [0, 1]) -assert da.equivalent_to(a) -assert da.num_states() == 2 +tc.assertTrue(da.equivalent_to(a)) +tc.assertEqual(da.num_states(), 2) -assert spot.is_partially_degeneralizable(b) == [0, 1] +tc.assertEqual(spot.is_partially_degeneralizable(b), [0, 1]) db = spot.partial_degeneralize(b, [0, 1]) -assert db.equivalent_to(b) -assert db.num_states() == 3 +tc.assertTrue(db.equivalent_to(b)) +tc.assertEqual(db.num_states(), 3) db.copy_state_names_from(b) dbhoa = db.to_str('hoa') -assert dbhoa == """HOA: v1 +tc.assertEqual(dbhoa, """HOA: v1 States: 3 Start: 0 AP: 1 "p0" @@ -99,28 +101,28 @@ State: 1 "0#0" {0 1} [0] 2 State: 2 "1#0" {1} [0] 1 ---END--""" +--END--""") c = spot.automaton("randaut -A'(Fin(0)&Inf(1)&Inf(2))|Fin(2)' 1 |") -assert spot.is_partially_degeneralizable(c) == [1, 2] +tc.assertEqual(spot.is_partially_degeneralizable(c), [1, 2]) dc = spot.partial_degeneralize(c, [1, 2]) -assert dc.equivalent_to(c) -assert str(dc.get_acceptance()) == '(Fin(0) & Inf(2)) | Fin(1)' +tc.assertTrue(dc.equivalent_to(c)) +tc.assertEqual(str(dc.get_acceptance()), '(Fin(0) & Inf(2)) | Fin(1)') -assert spot.is_partially_degeneralizable(d) == [] +tc.assertEqual(spot.is_partially_degeneralizable(d), []) dd = spot.partial_degeneralize(d, []) -assert dd.equivalent_to(d) -assert dd.num_states() == 1 -assert str(dd.get_acceptance()) == 'Inf(1) & Fin(0)' +tc.assertTrue(dd.equivalent_to(d)) +tc.assertEqual(dd.num_states(), 1) +tc.assertEqual(str(dd.get_acceptance()), 'Inf(1) & Fin(0)') e = spot.dualize(b) de = spot.partial_degeneralize(e, [0, 1]) -assert de.equivalent_to(e) -assert de.num_states() == 4 +tc.assertTrue(de.equivalent_to(e)) +tc.assertEqual(de.num_states(), 4) de.copy_state_names_from(e) dehoa = de.to_str('hoa') -assert dehoa == """HOA: v1 +tc.assertEqual(dehoa, """HOA: v1 States: 4 Start: 0 AP: 1 "p0" @@ -140,18 +142,21 @@ State: 2 "3#0" State: 3 "2#0" [0] 1 {0} [!0] 2 ---END--""" +--END--""") -assert spot.is_partially_degeneralizable(de) == [] +tc.assertEqual(spot.is_partially_degeneralizable(de), []) df = spot.partial_degeneralize(f, [0, 1]) df.equivalent_to(f) -assert str(df.acc()) == '(1, Fin(0))' +tc.assertEqual(str(df.acc()), '(1, Fin(0))') +df2 = spot.degeneralize(f) +df.equivalent_to(f) +tc.assertEqual(str(df2.acc()), '(1, Fin(0))') try: df = spot.partial_degeneralize(f, [0, 1, 2]) except RuntimeError as e: - assert 'partial_degeneralize(): {0,1,2} does not' in str(e) + tc.assertIn('partial_degeneralize(): {0,1,2} does not', str(e)) else: raise RuntimeError("missing exception") @@ -165,13 +170,13 @@ State: 2 [0&!1&2] 3 {1 4 9} State: 3 [0&!1&2] 4 {0 1 5 9} State: 4 [!0&!1&2] 1 State: 7 [0&!1&!2] 0 {4 7} --END--""") daut5 = spot.degeneralize_tba(aut5) -assert daut5.equivalent_to(aut5) +tc.assertTrue(daut5.equivalent_to(aut5)) sets = list(range(aut5.num_sets())) -assert spot.is_partially_degeneralizable(aut5) == sets +tc.assertEqual(spot.is_partially_degeneralizable(aut5), sets) pdaut5 = spot.partial_degeneralize(aut5, sets) -assert pdaut5.equivalent_to(aut5) -assert daut5.num_states() == 9 -assert pdaut5.num_states() == 8 +tc.assertTrue(pdaut5.equivalent_to(aut5)) +tc.assertEqual(daut5.num_states(), 9) +tc.assertEqual(pdaut5.num_states(), 8) aut6 = spot.automaton("""HOA: v1 States: 6 Start: 0 AP: 3 "p0" "p1" "p2" acc-name: generalized-Buchi 3 Acceptance: 3 Inf(0)&Inf(1)&Inf(2) properties: @@ -180,13 +185,13 @@ trans-labels explicit-labels trans-acc deterministic --BODY-- State: 0 [0&1&!2] 5 {1} State: 4 [!0&1&!2] 0 {1 2} [0&!1&!2] 3 {0} State: 5 [!0&1&2] 1 --END-- """) daut6 = spot.degeneralize_tba(aut6) -assert daut6.equivalent_to(aut6) +tc.assertTrue(daut6.equivalent_to(aut6)) sets = list(range(aut6.num_sets())) -assert spot.is_partially_degeneralizable(aut6) == sets +tc.assertEqual(spot.is_partially_degeneralizable(aut6), sets) pdaut6 = spot.partial_degeneralize(aut6, sets) -assert pdaut6.equivalent_to(aut6) -assert daut6.num_states() == 8 -assert pdaut6.num_states() == 8 +tc.assertTrue(pdaut6.equivalent_to(aut6)) +tc.assertEqual(daut6.num_states(), 8) +tc.assertEqual(pdaut6.num_states(), 8) aut7 = spot.automaton("""HOA: v1 States: 8 Start: 0 AP: 3 "p0" "p1" "p2" @@ -197,13 +202,23 @@ State: 0 [0&!1&2] 1 {2 3} State: 1 [0&!1&2] 0 {0 2} [0&!1&!2] 6 State: 2 [!0&!1&!2] 3 State: 5 [0&1&!2] 0 [!0&1&2] 7 State: 6 [0&1&2] 2 {1} State: 7 [!0&!1&2] 0 {0} [!0&1&!2] 4 --END--""") daut7 = spot.degeneralize_tba(aut7) -assert daut7.equivalent_to(aut7) +tc.assertTrue(daut7.equivalent_to(aut7)) sets = list(range(aut7.num_sets())) -assert spot.is_partially_degeneralizable(aut7) == sets +tc.assertEqual(spot.is_partially_degeneralizable(aut7), sets) pdaut7 = spot.partial_degeneralize(aut7, sets) -assert pdaut7.equivalent_to(aut7) -assert daut7.num_states() == 10 -assert pdaut7.num_states() == 10 +tc.assertTrue(pdaut7.equivalent_to(aut7)) +tc.assertEqual(daut7.num_states(), 10) +tc.assertEqual(pdaut7.num_states(), 10) +ddaut7 = spot.dualize(aut7) +ddaut7a = spot.scc_filter(spot.dualize(spot.degeneralize_tba(ddaut7))) +tc.assertTrue(ddaut7a.equivalent_to(aut7)) +tc.assertEqual(ddaut7a.num_states(), daut7.num_states()) +ddaut7b = spot.scc_filter(spot.dualize(spot.to_nca(ddaut7))) +tc.assertTrue(ddaut7b.equivalent_to(aut7)) +tc.assertEqual(ddaut7b.num_states(), daut7.num_states()) +ddaut7c = spot.scc_filter(spot.dualize(spot.to_dca(ddaut7))) +tc.assertTrue(ddaut7c.equivalent_to(aut7)) +tc.assertEqual(ddaut7c.num_states(), daut7.num_states()) aut8 = spot.automaton("""HOA: v1 States: 8 Start: 0 AP: 3 "p0" "p1" "p2" acc-name: generalized-Buchi 5 Acceptance: 5 Inf(0)&Inf(1)&Inf(2)&Inf(3)&Inf(4) @@ -213,19 +228,19 @@ State: 0 [!0&1&!2] 7 {0} State: 1 [!0&1&2] 1 {4} [0&!1&2] 6 {1 2} State: 2 5 [!0&1&!2] 0 {1 3} State: 6 [0&1&2] 4 [0&1&!2] 6 State: 7 [!0&!1&!2] 1 --END--""") daut8 = spot.degeneralize_tba(aut8) -assert daut8.equivalent_to(aut8) +tc.assertTrue(daut8.equivalent_to(aut8)) sets = list(range(aut8.num_sets())) -assert spot.is_partially_degeneralizable(aut8) == sets +tc.assertEqual(spot.is_partially_degeneralizable(aut8), sets) pdaut8 = spot.partial_degeneralize(aut8, sets) -assert pdaut8.equivalent_to(aut8) -assert daut8.num_states() == 22 -assert pdaut8.num_states() == 9 +tc.assertTrue(pdaut8.equivalent_to(aut8)) +tc.assertEqual(daut8.num_states(), 22) +tc.assertEqual(pdaut8.num_states(), 9) aut9 = spot.dualize(aut8) pdaut9 = spot.partial_degeneralize(aut9, sets) -assert pdaut9.equivalent_to(aut9) +tc.assertTrue(pdaut9.equivalent_to(aut9)) # one more state than aut9, because dualize completed the automaton. -assert pdaut9.num_states() == 10 +tc.assertEqual(pdaut9.num_states(), 10) aut10 = spot.automaton("""HOA: v1 States: 3 @@ -242,10 +257,10 @@ State: 2 [0] 0 {1} [!0] 1 --END--""") -assert spot.is_partially_degeneralizable(aut10) == [0, 1] +tc.assertEqual(spot.is_partially_degeneralizable(aut10), [0, 1]) pdaut10 = spot.partial_degeneralize(aut10, [0, 1]) -assert pdaut10.equivalent_to(aut10) -assert pdaut10.to_str() == """HOA: v1 +tc.assertTrue(pdaut10.equivalent_to(aut10)) +tc.assertEqual(pdaut10.to_str(), """HOA: v1 States: 3 Start: 0 AP: 1 "p0" @@ -260,7 +275,7 @@ State: 1 State: 2 [0] 0 {1} [!0] 1 ---END--""" +--END--""") aut11 = spot.automaton("""HOA: v1 States: 3 @@ -277,10 +292,10 @@ State: 2 [0] 0 {1} [!0] 1 --END--""") -assert spot.is_partially_degeneralizable(aut11) == [0, 1] +tc.assertEqual(spot.is_partially_degeneralizable(aut11), [0, 1]) pdaut11 = spot.partial_degeneralize(aut11, [0, 1]) -assert pdaut11.equivalent_to(aut11) -assert pdaut11.to_str() == """HOA: v1 +tc.assertTrue(pdaut11.equivalent_to(aut11)) +tc.assertEqual(pdaut11.to_str(), """HOA: v1 States: 3 Start: 0 AP: 1 "p0" @@ -295,7 +310,7 @@ State: 1 State: 2 [0] 0 {2} [!0] 1 ---END--""" +--END--""") aut12 = spot.automaton("""HOA: v1 States: 3 @@ -313,24 +328,24 @@ State: 2 [0] 0 [!0] 1 {3} --END--""") -assert spot.is_partially_degeneralizable(aut12) == [0,1] +tc.assertEqual(spot.is_partially_degeneralizable(aut12), [0,1]) aut12b = spot.partial_degeneralize(aut12, [0,1]) aut12c = spot.partial_degeneralize(aut12b, [1,2]) -assert aut12c.equivalent_to(aut12) -assert aut12c.num_states() == 9 +tc.assertTrue(aut12c.equivalent_to(aut12)) +tc.assertEqual(aut12c.num_states(), 9) aut12d = spot.partial_degeneralize(aut12, [0,1,3]) aut12e = spot.partial_degeneralize(aut12d, [0,1]) -assert aut12e.equivalent_to(aut12) -assert aut12e.num_states() == 9 +tc.assertTrue(aut12e.equivalent_to(aut12)) +tc.assertEqual(aut12e.num_states(), 9) aut12f = spot.partial_degeneralize(aut12) -assert aut12f.equivalent_to(aut12) -assert aut12f.num_states() == 9 +tc.assertTrue(aut12f.equivalent_to(aut12)) +tc.assertEqual(aut12f.num_states(), 9) # Check handling of original-states dot = aut12f.to_str('dot', 'd') -assert dot == """digraph "" { +tc.assertEqual(dot, """digraph "" { rankdir=LR label="Inf(2) | (Inf(1) & Fin(0))\\n[Rabin-like 2]" labelloc="t" @@ -367,10 +382,10 @@ assert dot == """digraph "" { 8 -> 4 [label="p0\\n{1,2}"] 8 -> 7 [label="p0"] } -""" +""") aut12g = spot.partial_degeneralize(aut12f) -assert aut12f == aut12g +tc.assertEqual(aut12f, aut12g) aut13 = spot.automaton("""HOA: v1 States: 2 @@ -390,8 +405,8 @@ State: 1 [!0&!1&2&3] 1 {0 2} --END--""") aut13g = spot.partial_degeneralize(aut13) -assert aut13g.equivalent_to(aut13) -assert aut13g.num_states() == 3 +tc.assertTrue(aut13g.equivalent_to(aut13)) +tc.assertEqual(aut13g.num_states(), 3) aut14 = spot.automaton("""HOA: v1 @@ -412,8 +427,8 @@ State: 1 --END-- """) aut14g = spot.partial_degeneralize(aut14) -assert aut14g.equivalent_to(aut14) -assert aut14g.num_states() == 3 +tc.assertTrue(aut14g.equivalent_to(aut14)) +tc.assertEqual(aut14g.num_states(), 3) # Extracting an SCC from this large automaton will produce an automaton A in # which original-states refers to states larger than those in A. Some version @@ -439,4 +454,45 @@ State: 10 [!0&1] 4 [0&1] 8 [!0&!1] 10 {0 1 2 3 5} [0&!1] 13 {1 2 3} State: 11 si = spot.scc_info(aut15) aut15b = si.split_on_sets(2, [])[0]; d aut15c = spot.partial_degeneralize(aut15b) -assert aut15c.equivalent_to(aut15b) +tc.assertTrue(aut15c.equivalent_to(aut15b)) + + +# Testing property propagation/update +# for propagate_marks_here + +s = """HOA: v1 +States: 3 +Start: 0 +AP: 1 "a" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels state-acc +--BODY-- +State: 0 +[0] 1 +[!0] 2 +State: 1 {0} +[0] 0 +State: 2 +[!0] 0 +--END--""" +aut = spot.automaton(s) +spot.propagate_marks_here(aut) +s2 = aut.to_str("hoa") + +tc.assertEqual(s2, """HOA: v1 +States: 3 +Start: 0 +AP: 1 "a" +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels trans-acc deterministic +--BODY-- +State: 0 +[0] 1 {0} +[!0] 2 +State: 1 +[0] 0 {0} +State: 2 +[!0] 0 +--END--""") diff --git a/tests/python/prodexpt.py b/tests/python/prodexpt.py index 098bafb26..4d00b4dae 100644 --- a/tests/python/prodexpt.py +++ b/tests/python/prodexpt.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016-2017, 2019-2020 Laboratoire de Recherche et Développement -# de l'Epita (LRDE). +# Copyright (C) 2016-2017, 2019-2020, 2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() # make sure that we are not allowed to build the product of two automata with # different dictionaries. @@ -94,14 +96,14 @@ State: 60 40 38 60 68 State: 61 40 41 57 61 State: 62 40 59 44 62 State: State: 70 40 59 57 70 State: 71 40 63 57 71 State: 72 40 69 57 72 --END-- ''') res = spot.product(left, right) -assert res.num_states() == 977 -assert res.num_edges() == 8554 +tc.assertEqual(res.num_states(), 977) +tc.assertEqual(res.num_edges(), 8554) res = spot.product(left, right, spot.output_aborter(1000, 6000)) -assert res is None +tc.assertIsNone(res) res = spot.product(left, right, spot.output_aborter(900, 9000)) -assert res is None +tc.assertIsNone(res) res = spot.product(left, right, spot.output_aborter(1000, 9000)) -assert res is not None +tc.assertIsNotNone(res) a, b = spot.automata("""HOA: v1 States: 1 Start: 0 AP: 0 acc-name: all Acceptance: 0 t properties: trans-labels explicit-labels state-acc complete @@ -110,7 +112,7 @@ properties: deterministic stutter-invariant weak --BODY-- State: 0 [t] 0 properties: trans-labels explicit-labels state-acc complete properties: deterministic stutter-invariant weak --BODY-- State: 0 [t] 0 --END--""") out = spot.product(a, b).to_str() -assert out == """HOA: v1 +tc.assertEqual(out, """HOA: v1 States: 1 Start: 0 AP: 0 @@ -120,9 +122,9 @@ properties: trans-labels explicit-labels state-acc deterministic properties: stutter-invariant terminal --BODY-- State: 0 ---END--""" +--END--""") out = spot.product_susp(a, b).to_str() -assert out == """HOA: v1 +tc.assertEqual(out, """HOA: v1 States: 1 Start: 0 AP: 0 @@ -132,4 +134,4 @@ properties: trans-labels explicit-labels state-acc deterministic properties: stutter-invariant terminal --BODY-- State: 0 ---END--""" +--END--""") diff --git a/tests/python/randgen.py b/tests/python/randgen.py index 094ddcb3f..32762d02e 100755 --- a/tests/python/randgen.py +++ b/tests/python/randgen.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) 2015, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -18,9 +18,11 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() o = spot.option_map() g = spot.randltlgenerator(0, o) -assert str(g.next()) == '1' -assert str(g.next()) == '0' -assert str(g.next()) == 'None' +tc.assertEqual(str(g.next()), '1') +tc.assertEqual(str(g.next()), '0') +tc.assertEqual(str(g.next()), 'None') diff --git a/tests/python/relabel.py b/tests/python/relabel.py index 5a4a370eb..b32ebd752 100644 --- a/tests/python/relabel.py +++ b/tests/python/relabel.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2017-2019 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) 2015, 2017-2019, 2022 Laboratoire de Recherche et +# Développement de l'Epita # # This file is part of Spot, a model checking library. # @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() f = spot.formula('GF(a & b) -> (FG(a & b) & Gc)') m = spot.relabeling_map() @@ -26,19 +28,18 @@ res = "" for old, new in m.items(): res += "#define {} {}\n".format(old, new) res += str(g) -print(res) -assert(res == """#define p0 a & b +tc.assertEqual(res, """#define p0 a & b #define p1 c GFp0 -> (FGp0 & Gp1)""") h = spot.relabel_apply(g, m) -assert h == f +tc.assertEqual(h, f) autg = g.translate() spot.relabel_here(autg, m) -assert str(autg.ap()) == \ - '(spot.formula("a"), spot.formula("b"), spot.formula("c"))' -assert spot.isomorphism_checker.are_isomorphic(autg, f.translate()) +tc.assertEqual(str(autg.ap()), \ + '(spot.formula("a"), spot.formula("b"), spot.formula("c"))') +tc.assertTrue(spot.isomorphism_checker.are_isomorphic(autg, f.translate())) a = spot.formula('a') u = spot.formula('a U b') @@ -46,11 +47,15 @@ m[a] = u try: spot.relabel_here(autg, m) except RuntimeError as e: - assert "new labels" in str(e) + tc.assertIn("new labels", str(e)) m = spot.relabeling_map() m[u] = a try: spot.relabel_here(autg, m) except RuntimeError as e: - assert "old labels" in str(e) + tc.assertIn("old labels", str(e)) + +f = spot.parse_infix_sere("(p9;p21|p22):(p1&p2;p11&p22;p1&p2)").f +g = spot.relabel_bse(f, spot.Abc) +tc.assertEqual(str(g), "(a;(b | c)):(d;(c & e);d)") diff --git a/tests/python/remfin.py b/tests/python/remfin.py index 20115a14f..ffff3e22a 100644 --- a/tests/python/remfin.py +++ b/tests/python/remfin.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015-2018, 2020 Laboratoire de Recherche et Développement de -# l'Epita +# Copyright (C) 2015-2018, 2020, 2022 Laboratoire de Recherche et +# Développement de l'Epita # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ import spot +from unittest import TestCase +tc = TestCase() # This test used to trigger an assertion (or a segfault) # in scc_filter_states(). @@ -41,7 +43,7 @@ State: 2 aut.prop_inherently_weak(True) aut = spot.dualize(aut) aut1 = spot.scc_filter_states(aut) -assert(aut1.to_str('hoa') == """HOA: v1 +tc.assertEqual(aut1.to_str('hoa'), """HOA: v1 States: 2 Start: 0 AP: 1 "a" @@ -56,17 +58,17 @@ State: 1 [t] 1 --END--""") -assert(aut.scc_filter_states().to_str() == aut1.to_str()) -assert(aut1.get_name() == None) +tc.assertEqual(aut.scc_filter_states().to_str(), aut1.to_str()) +tc.assertIsNone(aut1.get_name()) aut1.set_name("test me") -assert(aut1.get_name() == "test me") +tc.assertEqual(aut1.get_name(), "test me") # The method is the same as the function a = spot.translate('true', 'low', 'any') -assert(a.prop_universal().is_maybe()) -assert(a.prop_unambiguous().is_maybe()) -assert(a.is_deterministic() == True) -assert(a.is_unambiguous() == True) +tc.assertTrue(a.prop_universal().is_maybe()) +tc.assertTrue(a.prop_unambiguous().is_maybe()) +tc.assertTrue(a.is_deterministic()) +tc.assertTrue(a.is_unambiguous()) a = spot.automaton(""" HOA: v1 @@ -92,4 +94,4 @@ State: 2 """) b = spot.remove_fin(a) size = (b.num_states(), b.num_edges()) -assert size == (5, 13); +tc.assertEqual(size, (5, 13)) diff --git a/tests/python/removeap.py b/tests/python/removeap.py index 7a9268c85..ba656ac89 100644 --- a/tests/python/removeap.py +++ b/tests/python/removeap.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2019 Laboratoire de Recherche et Développement +# Copyright (C) 2019, 2022 Laboratoire de Recherche et Développement # de l'Epita # # This file is part of Spot, a model checking library. @@ -18,16 +18,18 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() aut = spot.translate('a U (c & Gb)') -assert not spot.is_terminal_automaton(aut) -assert aut.prop_terminal().is_false() +tc.assertFalse(spot.is_terminal_automaton(aut)) +tc.assertTrue(aut.prop_terminal().is_false()) rem = spot.remove_ap() rem.add_ap("b") aut = rem.strip(aut) -assert not aut.prop_terminal().is_false() -assert spot.is_terminal_automaton(aut) -assert aut.prop_terminal().is_true() +tc.assertFalse(aut.prop_terminal().is_false()) +tc.assertTrue(spot.is_terminal_automaton(aut)) +tc.assertTrue(aut.prop_terminal().is_true()) aut = rem.strip(aut) -assert aut.prop_terminal().is_true() +tc.assertTrue(aut.prop_terminal().is_true()) diff --git a/tests/python/rs_like.py b/tests/python/rs_like.py index 7b4ee75cf..669af5885 100644 --- a/tests/python/rs_like.py +++ b/tests/python/rs_like.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017 Laboratoire de Recherche et Développement +# Copyright (C) 2017, 2022 Laboratoire de Recherche et Développement # de l'Epita # # This file is part of Spot, a model checking library. @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() a = spot.vector_rs_pair() @@ -30,12 +32,13 @@ mall = spot.mark_t() def test_rs(acc, rs, expected_res, expected_pairs): res, p = getattr(acc, 'is_' + rs + '_like')() - assert res == expected_res + tc.assertEqual(res, expected_res) if expected_res: expected_pairs.sort() p = sorted(p) for a, b in zip(p, expected_pairs): - assert a.fin == b.fin and a.inf == b.inf + tc.assertEqual(a.fin, b.fin) + tc.assertEqual(a.inf, b.inf) def switch_pairs(pairs): diff --git a/tests/python/satmin.py b/tests/python/satmin.py index 2d28dd405..f9fa466f8 100644 --- a/tests/python/satmin.py +++ b/tests/python/satmin.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2020, 2021 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) 2015, 2020, 2021, 2022 Laboratoire de Recherche et +# Développement de l'Epita # # This file is part of Spot, a model checking library. # @@ -18,232 +18,234 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() aut = spot.translate('GFa & GFb', 'Buchi', 'SBAcc') -assert aut.num_sets() == 1 -assert aut.num_states() == 3 -assert aut.is_deterministic() +tc.assertEqual(aut.num_sets(), 1) +tc.assertEqual(aut.num_states(), 3) +tc.assertTrue(aut.is_deterministic()) min1 = spot.sat_minimize(aut, acc='Rabin 1') -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_langmap=True) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=1) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=1, sat_incr_steps=0) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=1, sat_incr_steps=1) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=1, sat_incr_steps=2) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=1, sat_incr_steps=50) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=2) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=2, sat_incr_steps=-1) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=2, sat_incr_steps=0) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=2, sat_incr_steps=1) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=2, sat_incr_steps=2) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_incr=2, sat_incr_steps=50) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min1 = spot.sat_minimize(aut, acc='Rabin 1', sat_naive=True) -assert min1.num_sets() == 2 -assert min1.num_states() == 2 +tc.assertEqual(min1.num_sets(), 2) +tc.assertEqual(min1.num_states(), 2) min2 = spot.sat_minimize(aut, acc='Streett 2') -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_langmap=True) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=1) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=1, sat_incr_steps=0) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=1, sat_incr_steps=1) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=1, sat_incr_steps=2) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=1, sat_incr_steps=50) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=2) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=2, sat_incr_steps=-1) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=2, sat_incr_steps=0) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=2, sat_incr_steps=1) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=2, sat_incr_steps=2) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_incr=2, sat_incr_steps=50) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min2 = spot.sat_minimize(aut, acc='Streett 2', sat_naive=True) -assert min2.num_sets() == 4 -assert min2.num_states() == 1 +tc.assertEqual(min2.num_sets(), 4) +tc.assertEqual(min2.num_states(), 1) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_langmap=True) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=1) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=1, sat_incr_steps=0) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=1, sat_incr_steps=1) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=1, sat_incr_steps=2) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=1, sat_incr_steps=50) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=2) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=2, sat_incr_steps=-1) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=2, sat_incr_steps=0) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=2, sat_incr_steps=1) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=2, sat_incr_steps=2) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_incr=2, sat_incr_steps=50) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min3 = spot.sat_minimize(aut, acc='Rabin 2', state_based=True, max_states=5, sat_naive=True) -assert min3.num_sets() == 4 -assert min3.num_states() == 3 +tc.assertEqual(min3.num_sets(), 4) +tc.assertEqual(min3.num_states(), 3) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_langmap=True) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=1) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=1, sat_incr_steps=0) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=1, sat_incr_steps=1) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=1, sat_incr_steps=2) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=1, sat_incr_steps=50) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=2) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=2, sat_incr_steps=-1) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=2, sat_incr_steps=0) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=2, sat_incr_steps=1) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=2, sat_incr_steps=2) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_incr=2, sat_incr_steps=50) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) min4 = spot.sat_minimize(aut, acc='parity max odd 3', colored=True, sat_naive=True) -assert min4.num_sets() == 3 -assert min4.num_states() == 2 +tc.assertEqual(min4.num_sets(), 3) +tc.assertEqual(min4.num_states(), 2) aut = spot.translate('GFa') -assert aut.num_sets() == 1 -assert aut.num_states() == 1 -assert aut.is_deterministic() +tc.assertEqual(aut.num_sets(), 1) +tc.assertEqual(aut.num_states(), 1) +tc.assertTrue(aut.is_deterministic()) out = spot.sat_minimize(aut, state_based=True) -assert out.num_states() == 2 +tc.assertEqual(out.num_states(), 2) out = spot.sat_minimize(aut, state_based=True, max_states=1) -assert out is None +tc.assertTrue(out is None) diff --git a/tests/python/sbacc.py b/tests/python/sbacc.py index 445845dbc..22d937014 100644 --- a/tests/python/sbacc.py +++ b/tests/python/sbacc.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017-2018, 2021 Laboratoire de Recherche et +# Copyright (C) 2017-2018, 2021, 2022 Laboratoire de Recherche et # Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -18,13 +18,15 @@ # along with this program. If not, see . import spot -aut = spot.translate('GFa') -assert aut.num_states() == 1 -assert not aut.prop_state_acc().is_true() -aut = spot.sbacc(aut) -assert aut.num_states() == 2 -assert aut.prop_state_acc().is_true() +from unittest import TestCase +tc = TestCase() +aut = spot.translate('GFa') +tc.assertEqual(aut.num_states(), 1) +tc.assertFalse(aut.prop_state_acc().is_true()) +aut = spot.sbacc(aut) +tc.assertEqual(aut.num_states(), 2) +tc.assertTrue(aut.prop_state_acc().is_true()) aut = spot.automaton("""HOA: v1 States: 3 @@ -48,7 +50,7 @@ s = spot.sbacc(aut) s.copy_state_names_from(aut) h = s.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 2 Start: 0 AP: 2 "a" "b" @@ -59,7 +61,7 @@ State: 0 "0" [0] 1 State: 1 "2" {1} [t] 1 ---END--""" +--END--""") aut = spot.automaton("""HOA: v1 States: 3 @@ -83,7 +85,7 @@ d = spot.degeneralize(aut) d.copy_state_names_from(aut) h = d.to_str('hoa') -assert h == """HOA: v1 +tc.assertEqual(h, """HOA: v1 States: 2 Start: 0 AP: 2 "a" "b" @@ -95,4 +97,4 @@ State: 0 "0#0" [0] 1 State: 1 "2#0" {0} [t] 1 ---END--""" +--END--""") diff --git a/tests/python/sccfilter.py b/tests/python/sccfilter.py index 6edd33e9f..7728b70a6 100644 --- a/tests/python/sccfilter.py +++ b/tests/python/sccfilter.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016 Laboratoire de Recherche et Développement de +# Copyright (C) 2016, 2022 Laboratoire de Recherche et Développement de # l'EPITA. # # This file is part of Spot, a model checking library. @@ -22,6 +22,8 @@ # Major) import spot +from unittest import TestCase +tc = TestCase() a = spot.automaton(""" HOA: v1.1 @@ -43,7 +45,7 @@ State: 1 "bar" --END-- """) -assert (spot.scc_filter(a, True).to_str('hoa', '1.1') == """HOA: v1.1 +tc.assertEqual(spot.scc_filter(a, True).to_str('hoa', '1.1'), """HOA: v1.1 States: 2 Start: 0 AP: 1 "a" diff --git a/tests/python/sccinfo.py b/tests/python/sccinfo.py index 0ac645726..f8ade7e4b 100644 --- a/tests/python/sccinfo.py +++ b/tests/python/sccinfo.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2021 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2017, 2021, 2022 Laboratoire de Recherche et +# Développement de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() a = spot.translate('(Ga -> Gb) W c') @@ -26,11 +28,11 @@ try: si = spot.scc_info(a, 10) exit(2) except RuntimeError as e: - assert "initial state does not exist" in str(e) + tc.assertIn("initial state does not exist", str(e)) si = spot.scc_info(a) n = si.scc_count() -assert n == 4 +tc.assertEqual(n, 4) acc = 0 rej = 0 @@ -39,24 +41,24 @@ for i in range(n): acc += si.is_accepting_scc(i) rej += si.is_rejecting_scc(i) triv += si.is_trivial(i) -assert acc == 3 -assert rej == 1 -assert triv == 0 +tc.assertEqual(acc, 3) +tc.assertEqual(rej, 1) +tc.assertEqual(triv, 0) for scc in si: acc -= scc.is_accepting() rej -= scc.is_rejecting() triv -= scc.is_trivial() -assert acc == 0 -assert rej == 0 -assert triv == 0 +tc.assertEqual(acc, 0) +tc.assertEqual(rej, 0) +tc.assertEqual(triv, 0) l0 = si.states_of(0) l1 = si.states_of(1) l2 = si.states_of(2) l3 = si.states_of(3) l = sorted(list(l0) + list(l1) + list(l2) + list(l3)) -assert l == [0, 1, 2, 3, 4] +tc.assertEqual(l, [0, 1, 2, 3, 4]) i = si.initial() todo = [i] @@ -73,14 +75,14 @@ while todo: if s not in seen: seen.add(s) todo.append(s) -assert seen == {0, 1, 2, 3} -assert trans == [(0, 0), (0, 1), (0, 2), (0, 3), - (2, 0), (2, 1), (2, 2), (2, 4), - (3, 3), (4, 1), (4, 4), (1, 1)] -assert transi == [(0, 0, 1), (0, 2, 3), (2, 0, 6), - (2, 2, 8), (3, 3, 10), (4, 4, 12), (1, 1, 5)] +tc.assertEqual(seen, {0, 1, 2, 3}) +tc.assertEqual(trans, [(0, 0), (0, 1), (0, 2), (0, 3), + (2, 0), (2, 1), (2, 2), (2, 4), + (3, 3), (4, 1), (4, 4), (1, 1)]) +tc.assertEqual(transi, [(0, 0, 1), (0, 2, 3), (2, 0, 6), + (2, 2, 8), (3, 3, 10), (4, 4, 12), (1, 1, 5)]) -assert not spot.is_weak_automaton(a, si) +tc.assertFalse(spot.is_weak_automaton(a, si)) a = spot.automaton(""" @@ -107,8 +109,8 @@ State: 3 """) si = spot.scc_info(a) si.determine_unknown_acceptance() -assert si.scc_count() == 2 -assert si.is_accepting_scc(0) -assert not si.is_rejecting_scc(0) -assert si.is_rejecting_scc(1) -assert not si.is_accepting_scc(1) +tc.assertEqual(si.scc_count(), 2) +tc.assertTrue(si.is_accepting_scc(0)) +tc.assertFalse(si.is_rejecting_scc(0)) +tc.assertTrue(si.is_rejecting_scc(1)) +tc.assertFalse(si.is_accepting_scc(1)) diff --git a/tests/python/sccsplit.py b/tests/python/sccsplit.py index 9095a1a29..4a1781475 100644 --- a/tests/python/sccsplit.py +++ b/tests/python/sccsplit.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement +# de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -19,6 +19,9 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() + aut = spot.translate('GF(a <-> Xa) & GF(b <-> XXb)') si = spot.scc_info(aut) @@ -27,4 +30,4 @@ for aut2 in si.split_on_sets(0, [0]): # This call to to_str() used to fail because split_on_sets had not # registered the atomic propositions of aut s += aut2.to_str() -assert spot.automaton(s).num_states() == 8 +tc.assertEqual(spot.automaton(s).num_states(), 8) diff --git a/tests/python/semidet.py b/tests/python/semidet.py index 856b3b7d2..9072f5917 100644 --- a/tests/python/semidet.py +++ b/tests/python/semidet.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017 Laboratoire de Recherche et Développement de -# l'Epita (LRDE). +# Copyright (C) 2017, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() formulas = [('(Gp0 | Fp1) M 1', False, True), ('(!p1 U p1) U X(!p0 -> Fp1)', False, True), @@ -31,9 +33,9 @@ for f, isd, issd in formulas: aut = spot.translate(f) # The formula with isd=True, issd=True is the only one # for which both properties are already set. - assert (aut.prop_deterministic().is_maybe() or - aut.prop_semi_deterministic().is_maybe() or - isd == issd) + tc.assertTrue(aut.prop_deterministic().is_maybe() or + aut.prop_semi_deterministic().is_maybe() or + isd == issd) spot.check_determinism(aut) - assert aut.prop_deterministic() == isd - assert aut.prop_semi_deterministic() == issd + tc.assertEqual(aut.prop_deterministic(), isd) + tc.assertEqual(aut.prop_semi_deterministic(), issd) diff --git a/tests/python/setacc.py b/tests/python/setacc.py index 8d20b6a49..7246bf5cc 100644 --- a/tests/python/setacc.py +++ b/tests/python/setacc.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016, 2018, 2021 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2016, 2018, 2021, 2022 Laboratoire de Recherche et +# Développement de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -19,54 +19,56 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() # Test case reduced from a report from Juraj Major . a = spot.make_twa_graph(spot._bdd_dict) a.set_acceptance(0, spot.acc_code("t")) -assert(a.prop_state_acc() == True) +tc.assertTrue(a.prop_state_acc()) a.set_acceptance(1, spot.acc_code("Fin(0)")) -assert(a.prop_state_acc() == spot.trival.maybe()) +tc.assertEqual(a.prop_state_acc(), spot.trival.maybe()) # Some tests for used_inf_fin_sets(), which return a pair of mark_t. (inf, fin) = a.get_acceptance().used_inf_fin_sets() -assert inf == [] -assert fin == [0] +tc.assertEqual(inf, []) +tc.assertEqual(fin, [0]) (inf, fin) = spot.acc_code("(Fin(0)|Inf(1))&Fin(2)&Inf(0)").used_inf_fin_sets() -assert inf == [0, 1] -assert fin == [0, 2] +tc.assertEqual(inf, [0, 1]) +tc.assertEqual(fin, [0, 2]) # is_rabin_like() returns (bool, [(inf, fin), ...]) (b, v) = spot.acc_cond("(Fin(0)&Inf(1))|(Fin(2)&Inf(0))").is_rabin_like() -assert b == True -assert len(v) == 2 -assert v[0].fin == [0] -assert v[0].inf == [1] -assert v[1].fin == [2] -assert v[1].inf == [0] +tc.assertTrue(b) +tc.assertEqual(len(v), 2) +tc.assertEqual(v[0].fin, [0]) +tc.assertEqual(v[0].inf, [1]) +tc.assertEqual(v[1].fin, [2]) +tc.assertEqual(v[1].inf, [0]) (b, v) = spot.acc_cond("(Fin(0)|Inf(1))&(Fin(2)|Inf(0))").is_rabin_like() -assert b == False -assert len(v) == 0 +tc.assertFalse(b) +tc.assertEqual(len(v), 0) (b, v) = spot.acc_cond("(Fin(0)|Inf(1))&(Fin(2)|Inf(0))").is_streett_like() -assert b == True -assert repr(v) == \ - '(spot.rs_pair(fin=[0], inf=[1]), spot.rs_pair(fin=[2], inf=[0]))' +tc.assertTrue(b) +tc.assertEqual(repr(v), \ + '(spot.rs_pair(fin=[0], inf=[1]), spot.rs_pair(fin=[2], inf=[0]))') v2 = (spot.rs_pair(fin=[0], inf=[1]), spot.rs_pair(fin=[2], inf=[0])) -assert v == v2 +tc.assertEqual(v, v2) acc = spot.acc_cond("generalized-Rabin 1 2") (b, v) = acc.is_generalized_rabin() -assert b == True -assert v == (2,) +tc.assertTrue(b) +tc.assertEqual(v, (2,)) (b, v) = acc.is_generalized_streett() -assert b == False -assert v == () +tc.assertFalse(b) +tc.assertEqual(v, ()) (b, v) = acc.is_streett_like() -assert b == True +tc.assertTrue(b) ve = (spot.rs_pair([0], []), spot.rs_pair([], [1]), spot.rs_pair([], [2])) -assert v == ve -assert acc.name() == "generalized-Rabin 1 2" +tc.assertEqual(v, ve) +tc.assertEqual(acc.name(), "generalized-Rabin 1 2") # At the time of writting, acc_cond does not yet recognize # "generalized-Streett", as there is no definition for that in the HOA format, @@ -74,23 +76,23 @@ assert acc.name() == "generalized-Rabin 1 2" # being a generalized-Streett. See issue #249. acc = spot.acc_cond("Inf(0)|Fin(1)|Fin(2)") (b, v) = acc.is_generalized_streett() -assert b == True -assert v == (2,) +tc.assertTrue(b) +tc.assertEqual(v, (2,)) (b, v) = acc.is_generalized_rabin() -assert b == False -assert v == () +tc.assertFalse(b) +tc.assertEqual(v, ()) # FIXME: We should have a way to disable the following output, as it is not # part of HOA v1. -assert acc.name() == "generalized-Streett 1 2" +tc.assertEqual(acc.name(), "generalized-Streett 1 2") # issue #469. This test is meaningful only if Spot is compiled with # --enable-max-accsets=64 or more. try: m = spot.mark_t([33]) - assert m.lowest() == m + tc.assertEqual(m.lowest(), m) n = spot.mark_t([33,34]) - assert n.lowest() == m + tc.assertEqual(n.lowest(), m) except RuntimeError as e: if "Too many acceptance sets used." in str(e): pass @@ -102,24 +104,24 @@ except RuntimeError as e: from gc import collect acc = spot.translate('a').acc() collect() -assert acc == spot.acc_cond('Inf(0)') +tc.assertEqual(acc, spot.acc_cond('Inf(0)')) acc = spot.translate('b').get_acceptance() collect() -assert acc == spot.acc_code('Inf(0)') +tc.assertEqual(acc, spot.acc_code('Inf(0)')) c = spot.acc_cond('Fin(0)&Fin(1)&(Inf(2)|Fin(3))') m1 = c.fin_unit() m2 = c.inf_unit() -assert m1 == [0,1] -assert m2 == [] +tc.assertEqual(m1, [0,1]) +tc.assertEqual(m2, []) c = spot.acc_cond('Inf(0)&Inf(1)&(Inf(2)|Fin(3))') m1 = c.fin_unit() m2 = c.inf_unit() -assert m1 == [] -assert m2 == [0,1] +tc.assertEqual(m1, []) +tc.assertEqual(m2, [0,1]) c = spot.acc_cond('Inf(0)&Inf(1)|(Inf(2)|Fin(3))') m1 = c.fin_unit() m2 = c.inf_unit() -assert m1 == [] -assert m2 == [] +tc.assertEqual(m1, []) +tc.assertEqual(m2, []) diff --git a/tests/python/setxor.py b/tests/python/setxor.py index 7cd1e5da1..2fe69cd99 100755 --- a/tests/python/setxor.py +++ b/tests/python/setxor.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2010, 2011 Laboratoire de Recherche et Développement -# de l'EPITA. +# Copyright (C) 2010, 2011, 2022 Laboratoire de Recherche et +# Développement de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ import sys from buddy import * +from unittest import TestCase +tc = TestCase() bdd_init(10000, 10000) bdd_setvarnum(5) @@ -29,18 +31,18 @@ a = V[0] & -V[1] & V[2] & -V[3] b = V[0] & V[1] & V[2] & -V[3] c = -V[0] & V[1] & -V[2] & -V[3] -assert(c == bdd_setxor(a, b)) -assert(c == bdd_setxor(b, a)) -assert(a == bdd_setxor(b, c)) -assert(a == bdd_setxor(c, b)) -assert(b == bdd_setxor(a, c)) -assert(b == bdd_setxor(c, a)) +tc.assertEqual(c, bdd_setxor(a, b)) +tc.assertEqual(c, bdd_setxor(b, a)) +tc.assertEqual(a, bdd_setxor(b, c)) +tc.assertEqual(a, bdd_setxor(c, b)) +tc.assertEqual(b, bdd_setxor(a, c)) +tc.assertEqual(b, bdd_setxor(c, a)) d = V[1] & V[2] & -V[3] & V[4] e = V[0] & V[1] & -V[2] & -V[3] & V[4] -assert(e == bdd_setxor(a, d)) -assert(e == bdd_setxor(d, a)) +tc.assertEqual(e, bdd_setxor(a, d)) +tc.assertEqual(e, bdd_setxor(d, a)) # Cleanup all BDD variables before calling bdd_done(), otherwise # bdd_delref will be called after bdd_done() and this is unsafe in diff --git a/tests/python/simplacc.py b/tests/python/simplacc.py index e742d69a4..50dc2d74a 100644 --- a/tests/python/simplacc.py +++ b/tests/python/simplacc.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2020 Laboratoire de Recherche et Développement de l'Epita +# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement de l'Epita # (LRDE). # # This file is part of Spot, a model checking library. @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() auts = list(spot.automata(""" @@ -70,19 +72,19 @@ explicit-labels trans-acc deterministic --BODY-- State: 0 [0&!1] 0 {2 3} res = [] for a in auts: b = spot.simplify_acceptance(a) - assert b.equivalent_to(a) + tc.assertTrue(b.equivalent_to(a)) res.append(str(b.get_acceptance())) c = spot.simplify_acceptance(b) - assert b.get_acceptance() == c.get_acceptance() + tc.assertEqual(b.get_acceptance(), c.get_acceptance()) a.set_acceptance(a.num_sets(), a.get_acceptance().complement()) b = spot.simplify_acceptance(a) - assert b.equivalent_to(a) + tc.assertTrue(b.equivalent_to(a)) res.append(str(b.get_acceptance())) c = spot.simplify_acceptance(b) - assert b.get_acceptance() == c.get_acceptance() + tc.assertEqual(b.get_acceptance(), c.get_acceptance()) -assert res == [ +tc.assertEqual(res, [ 'Inf(0)', 'Fin(0)', 'Inf(1) & Fin(0)', @@ -101,4 +103,4 @@ assert res == [ '(Inf(0) | Fin(2)) & Inf(1)', '(Fin(2) & (Inf(1) | Fin(0))) | (Inf(0)&Inf(2))', '(Inf(2) | (Fin(1) & Inf(0))) & (Fin(0)|Fin(2))', - ] + ]) diff --git a/tests/python/simstate.py b/tests/python/simstate.py index 6c2ca8bc3..b0b62267d 100644 --- a/tests/python/simstate.py +++ b/tests/python/simstate.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2015, 2017-2018, 2020-2021 Laboratoire de Recherche +# Copyright (C) 2015, 2017-2018, 2020-2022 Laboratoire de Recherche # et Développement de l'Epita # # This file is part of Spot, a model checking library. @@ -19,6 +19,8 @@ import spot from sys import exit +from unittest import TestCase +tc = TestCase() # CPython use reference counting, so that automata are destructed # when we expect them to be. However other implementations like @@ -48,7 +50,7 @@ State: 1 """) aut2 = spot.simulation(aut) -assert aut2.to_str() == """HOA: v1 +tc.assertEqual(aut2.to_str(), """HOA: v1 States: 1 Start: 0 AP: 2 "a" "b" @@ -59,10 +61,10 @@ properties: deterministic --BODY-- State: 0 {0} [t] 0 ---END--""" +--END--""") aut2.copy_state_names_from(aut) -assert aut2.to_str() == """HOA: v1 +tc.assertEqual(aut2.to_str(), """HOA: v1 States: 1 Start: 0 AP: 2 "a" "b" @@ -73,7 +75,7 @@ properties: deterministic --BODY-- State: 0 "[0,1]" {0} [t] 0 ---END--""" +--END--""") del aut del aut2 @@ -82,7 +84,7 @@ gcollect() aut = spot.translate('GF((p0 -> Gp0) R p1)') daut = spot.tgba_determinize(aut, True) -assert daut.to_str() == """HOA: v1 +tc.assertEqual(daut.to_str(), """HOA: v1 States: 3 Start: 0 AP: 2 "p1" "p0" @@ -106,7 +108,7 @@ State: 2 "{₀[0]₀}{₁[1]₁}" [!0&1] 2 [0&!1] 0 {0} [0&1] 1 {2} ---END--""" +--END--""") del aut del daut @@ -129,7 +131,7 @@ State: 1 """) daut = spot.tgba_determinize(aut, True) -assert daut.to_str() == """HOA: v1 +tc.assertEqual(daut.to_str(), """HOA: v1 States: 12 Start: 0 AP: 2 "a" "b" @@ -185,18 +187,18 @@ State: 11 "{₀[1#1]{₁[0#0,0#1]{₂[1#0]₂}₁}₀}" [!0&1] 2 {0} [0&!1] 6 {0} [0&1] 9 {0} ---END--""" +--END--""") a = spot.translate('!Gp0 xor FG((p0 W Gp1) M p1)') a = spot.degeneralize_tba(a) -assert a.num_states() == 8 +tc.assertEqual(a.num_states(), 8) b = spot.simulation(a) -assert b.num_states() == 3 +tc.assertEqual(b.num_states(), 3) b.set_init_state(1) b.purge_unreachable_states() b.copy_state_names_from(a) -assert b.to_str() == """HOA: v1 +tc.assertEqual(b.to_str(), """HOA: v1 States: 1 Start: 0 AP: 2 "p0" "p1" @@ -208,7 +210,7 @@ properties: deterministic stutter-invariant State: 0 "[1,7]" [1] 0 [!1] 0 {0} ---END--""" +--END--""") aut = spot.automaton('''HOA: v1 States: 12 @@ -267,7 +269,7 @@ State: 11 [0&!1] 6 {0} [0&1] 9 {0} --END--''') -assert spot.reduce_iterated(aut).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_iterated(aut).to_str(), '''HOA: v1 States: 9 Start: 0 AP: 2 "a" "b" @@ -308,7 +310,7 @@ State: 8 [0&!1] 4 {0} [!0&1] 6 [0&1] 7 ---END--''' +--END--''') aut = spot.automaton('''HOA: v1 States: 6 @@ -332,7 +334,7 @@ State: 4 State: 5 [0] 5 --END--''') -assert spot.reduce_iterated(aut).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_iterated(aut).to_str(), '''HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -347,7 +349,7 @@ State: 1 [0] 2 State: 2 [1] 2 ---END--''' +--END--''') aut = spot.automaton('''HOA: v1 States: 5 @@ -374,7 +376,7 @@ State: 4 [0&1&!2&3] 4 {0} --END--''') -assert spot.reduce_direct_cosim(aut).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_direct_cosim(aut).to_str(), '''HOA: v1 States: 5 Start: 0 AP: 4 "p0" "p2" "p3" "p1" @@ -395,7 +397,7 @@ State: 3 [0&!1&2&3] 3 {1} State: 4 [0&!1&2&3] 4 {0} ---END--''' +--END--''') aut = spot.automaton('''HOA: v1 States: 2 @@ -410,7 +412,7 @@ State: 0 State: 1 [0] 0 --END--''') -assert spot.reduce_direct_sim(aut).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_direct_sim(aut).to_str(), '''HOA: v1 States: 1 Start: 0 AP: 2 "a" "b" @@ -418,7 +420,7 @@ Acceptance: 2 Fin(0) & Fin(1) properties: trans-labels explicit-labels state-acc deterministic --BODY-- State: 0 ---END--''' +--END--''') aut = spot.automaton('''HOA: v1 name: "(p1 U p2) U p3" @@ -445,7 +447,7 @@ State: 3 [1] 1 [0&!1] 3 --END--''') -assert spot.reduce_direct_cosim_sba(aut).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_direct_cosim_sba(aut).to_str(), '''HOA: v1 States: 4 Start: 0 AP: 3 "p2" "p3" "p1" @@ -468,7 +470,7 @@ State: 2 State: 3 [0] 1 [!0&2] 3 ---END--''' +--END--''') aut = spot.automaton('''HOA: v1 States: 4 @@ -488,7 +490,7 @@ State: 2 State: 3 {0} [1] 3 --END--''') -assert spot.reduce_direct_cosim(aut).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_direct_cosim(aut).to_str(), '''HOA: v1 States: 3 Start: 0 AP: 2 "a" "b" @@ -502,9 +504,9 @@ State: 1 [1] 2 State: 2 {0} [1] 2 ---END--''' +--END--''') -assert spot.reduce_direct_sim_sba(aut).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_direct_sim_sba(aut).to_str(), '''HOA: v1 States: 2 Start: 0 AP: 2 "a" "b" @@ -516,7 +518,7 @@ State: 0 [0] 1 State: 1 {0} [1] 1 ---END--''' +--END--''') aut = spot.automaton('''HOA: v1 States: 3 @@ -532,7 +534,7 @@ State: 1 State: 2 {0} [0] 2 --END--''') -assert spot.reduce_iterated_sba(aut).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_iterated_sba(aut).to_str(), '''HOA: v1 States: 1 Start: 0 AP: 1 "a" @@ -542,7 +544,7 @@ properties: deterministic --BODY-- State: 0 {0} [0] 0 ---END--''' +--END--''') aut = spot.automaton('''HOA: v1 States: 30 @@ -630,7 +632,7 @@ State: 28 State: 29 [0&!1&!2&!3] 29 --END--''') -assert spot.reduce_iterated(a).to_str() == '''HOA: v1 +tc.assertEqual(spot.reduce_iterated(a).to_str(), '''HOA: v1 States: 8 Start: 0 AP: 2 "p0" "p1" @@ -669,7 +671,7 @@ State: 7 [!1] 1 {0} [0&1] 5 [1] 7 ---END--''' +--END--''') # issue #452 @@ -707,4 +709,4 @@ State: 8 [@p] 3 {0 1} --END--""") aut = spot.simulation(aut) -assert aut.num_states() == 1 +tc.assertEqual(aut.num_states(), 1) diff --git a/tests/python/sonf.py b/tests/python/sonf.py new file mode 100644 index 000000000..40af758b0 --- /dev/null +++ b/tests/python/sonf.py @@ -0,0 +1,43 @@ +# -*- mode: python; coding: utf-8 -*- +# Copyright (C) 2020, 2022 Laboratoire de Recherche et Développement de l'Epita +# (LRDE). +# +# This file is part of Spot, a model checking library. +# +# Spot is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# Spot is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +# License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import spot +from unittest import TestCase +tc = TestCase() + +formulas = """\ +{x[*]}[]-> F({y[*]}<>-> GFz) +<>(({{p12}[*0..3]}[]-> ((p9) || (!(p17)))) V ((true) U (p17))) +{{true} || {[*0]}}[]-> (false) +{{p14} & {{p0}[*]}}[]-> (p11) +{{{!{p6}} -> {!{p3}}}[*]}[]-> ((p3)V((p3) || ((X((false))) && ((p2)V(p18))))) +""" + +for f1 in formulas.splitlines(): + f1 = spot.formula(f1) + a1 = spot.translate(f1) + + f2, aps = spot.suffix_operator_normal_form(f1, 'sonf_') + a2 = spot.translate(f2) + rm = spot.remove_ap() + for ap in aps: + rm.add_ap(ap) + a2 = rm.strip(a2) + + tc.assertTrue(spot.are_equivalent(a1, a2)) diff --git a/tests/python/split.py b/tests/python/split.py index adab5a931..b916f494f 100644 --- a/tests/python/split.py +++ b/tests/python/split.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018-2021 Laboratoire de Recherche et +# Copyright (C) 2018-2022 Laboratoire de Recherche et # Développement de l'Epita # # This file is part of Spot, a model checking library. @@ -19,6 +19,8 @@ import spot import buddy +from unittest import TestCase +tc = TestCase() # CPython use reference counting, so that automata are destructed # when we expect them to be. However other implementations like @@ -51,16 +53,17 @@ def do_split(f, out_list): return aut, s aut, s = do_split('(FG !a) <-> (GF b)', ['b']) -assert equiv(aut, spot.unsplit_2step(s)) +tc.assertTrue(equiv(aut, spot.unsplit_2step(s))) del aut del s gcollect() aut, s = do_split('GFa && GFb', ['b']) -assert equiv(aut, spot.unsplit_2step(s)) -# FIXME see below -# assert str_diff("""HOA: v1 +tc.assertTrue(equiv(aut, spot.unsplit_2step(s))) +# FIXME s.to_str() is NOT the same on Debian stable and on Debian unstable +# we should investigate this. See Issue #502. +# tc.assertEqual("""HOA: v1 # States: 3 # Start: 0 # AP: 2 "a" "b" @@ -86,10 +89,11 @@ del s gcollect() aut, s = do_split('! ((G (req -> (F ack))) && (G (go -> (F grant))))', ['ack']) -assert equiv(aut, spot.unsplit_2step(s)) +tc.assertTrue(equiv(aut, spot.unsplit_2step(s))) + # FIXME s.to_str() is NOT the same on Debian stable and on Debian unstable -# we should investigate this -# assert s.to_str() == """HOA: v1 +# we should investigate this. See Issue #502. +# tc.assertEqual(s.to_str(), """HOA: v1 # States: 9 # Start: 0 # AP: 4 "ack" "req" "go" "grant" @@ -122,7 +126,7 @@ assert equiv(aut, spot.unsplit_2step(s)) # [!0] 1 # State: 8 {0} # [!3] 2 -# --END--""" +# --END--""") del aut del s @@ -131,4 +135,4 @@ gcollect() aut, s = do_split('((G (((! g_0) || (! g_1)) && ((r_0 && (X r_1)) -> (F (g_0 \ && g_1))))) && (G (r_0 -> F g_0))) && (G (r_1 -> F g_1))', ['g_0', 'g_1']) -assert equiv(aut, spot.unsplit_2step(s)) +tc.assertTrue(equiv(aut, spot.unsplit_2step(s))) diff --git a/tests/python/streett_totgba.py b/tests/python/streett_totgba.py index 1c0bfc13e..8a18defbc 100644 --- a/tests/python/streett_totgba.py +++ b/tests/python/streett_totgba.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2021 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2017-2018, 2021-2022 Laboratoire de Recherche et +# Développement de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -22,7 +22,8 @@ import spot import os import shutil import sys - +from unittest import TestCase +tc = TestCase() def tgba(a): if not a.is_existential(): @@ -33,11 +34,11 @@ def tgba(a): def test_aut(aut): stgba = tgba(aut) - assert stgba.equivalent_to(aut) + tc.assertTrue(stgba.equivalent_to(aut)) os.environ["SPOT_STREETT_CONV_MIN"] = '1' sftgba = tgba(aut) del os.environ["SPOT_STREETT_CONV_MIN"] - assert stgba.equivalent_to(sftgba) + tc.assertTrue(stgba.equivalent_to(sftgba)) slike = spot.simplify_acceptance(aut) @@ -45,8 +46,7 @@ def test_aut(aut): os.environ["SPOT_STREETT_CONV_MIN"] = "1" slftgba = tgba(slike) del os.environ["SPOT_STREETT_CONV_MIN"] - assert sltgba.equivalent_to(slftgba) - + tc.assertTrue(sltgba.equivalent_to(slftgba)) if shutil.which('ltl2dstar') is None: sys.exit(77) diff --git a/tests/python/streett_totgba2.py b/tests/python/streett_totgba2.py index 852eff0af..5ff97a369 100644 --- a/tests/python/streett_totgba2.py +++ b/tests/python/streett_totgba2.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018 Laboratoire de Recherche et Développement de -# l'EPITA. +# Copyright (C) 2018, 2022 Laboratoire de Recherche et Développement +# de l'EPITA. # # This file is part of Spot, a model checking library. # @@ -19,6 +19,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() # Issue 316 a = spot.automaton(""" @@ -60,11 +62,11 @@ State: 7 {1 3 4} """) tgba = spot.streett_to_generalized_buchi(a) -assert tgba.acc().is_generalized_buchi() +tc.assertTrue(tgba.acc().is_generalized_buchi()) ba = spot.simplify_acceptance(a) -assert ba.acc().is_buchi() +tc.assertTrue(ba.acc().is_buchi()) nba = spot.dualize(ba.postprocess('generic', 'deterministic')) ntgba = spot.dualize(tgba.postprocess('generic', 'deterministic')) -assert not ba.intersects(ntgba) -assert not tgba.intersects(nba) +tc.assertFalse(ba.intersects(ntgba)) +tc.assertFalse(tgba.intersects(nba)) diff --git a/tests/python/stutter.py b/tests/python/stutter.py index dafb03b7e..05c28fda9 100644 --- a/tests/python/stutter.py +++ b/tests/python/stutter.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2019-2021 Laboratoire de Recherche et Développement de +# Copyright (C) 2019-2022 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # # This file is part of Spot, a model checking library. @@ -23,6 +23,8 @@ import spot +from unittest import TestCase +tc = TestCase() def explain_stut(f): @@ -45,20 +47,20 @@ def explain_stut(f): # Test from issue #388 w1, w2 = explain_stut('{(a:b) | (a;b)}|->Gc') -assert str(w1) == 'a & !b & !c; cycle{!a & b & !c}' -assert str(w2) == 'a & !b & !c; a & !b & !c; cycle{!a & b & !c}' +tc.assertEqual(str(w1), 'a & !b & !c; cycle{!a & b & !c}') +tc.assertEqual(str(w2), 'a & !b & !c; a & !b & !c; cycle{!a & b & !c}') # Test from issue #401 w1, w2 = explain_stut('G({x} |-> ({x[+]} <>-> ({Y1[+]} <>=> Y2)))') -assert str(w1) == 'cycle{!Y1 & !Y2 & x; Y1 & Y2 & x}' -assert str(w2) == 'cycle{!Y1 & !Y2 & x; Y1 & Y2 & x; Y1 & Y2 & x}' +tc.assertEqual(str(w1), 'cycle{!Y1 & !Y2 & x; Y1 & Y2 & x}') +tc.assertEqual(str(w2), 'cycle{!Y1 & !Y2 & x; Y1 & Y2 & x; Y1 & Y2 & x}') # Related to issue #401 as well. sl() and sl2() should upgrade # the t acceptance condition into inf(0). pos = spot.translate('Xa & XXb') w = pos.accepting_word().as_automaton() -assert w.acc().is_t() +tc.assertTrue(w.acc().is_t()) a = spot.sl2(w) -assert a.acc().is_buchi() +tc.assertTrue(a.acc().is_buchi()) a = spot.sl(w) -assert a.acc().is_buchi() +tc.assertTrue(a.acc().is_buchi()) diff --git a/tests/python/sum.py b/tests/python/sum.py index 7e2e74220..1f7c6e0a1 100644 --- a/tests/python/sum.py +++ b/tests/python/sum.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017-2019 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) 2017-2019, 2022 Laboratoire de Recherche et +# Développement de l'Epita # # This file is part of Spot, a model checking library. # @@ -20,6 +20,8 @@ import spot import sys import itertools +from unittest import TestCase +tc = TestCase() # make sure that we are not allowed to build the sum of two automata with # different dictionaries. @@ -65,8 +67,8 @@ for p in zip(phi1, phi2): p0orp1 = spot.formula.Or(p) a1ora2 = spot.remove_alternation(spot.sum(a1, a2), True) - assert p0orp1.equivalent_to(a1ora2) + tc.assertTrue(p0orp1.equivalent_to(a1ora2)) p0andp1 = spot.formula.And(p) a1anda2 = spot.remove_alternation(spot.sum_and(a1, a2), True) - assert p0andp1.equivalent_to(a1anda2) + tc.assertTrue(p0andp1.equivalent_to(a1anda2)) diff --git a/tests/python/synthesis.ipynb b/tests/python/synthesis.ipynb index be001a9b3..ba1b562cc 100644 --- a/tests/python/synthesis.ipynb +++ b/tests/python/synthesis.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": 1, - "id": "452c00ae", + "id": "4f84fa79", "metadata": {}, "outputs": [], "source": [ @@ -14,13 +14,13 @@ }, { "cell_type": "markdown", - "id": "7545ab74", + "id": "4ad017a0", "metadata": {}, "source": [ "This notebook presents functions that can be used to solve the Reactive Synthesis problem using games.\n", - "If you are not familiar with how Spot represent games, please read the `games` notebook first.\n", + "If you are not familiar with how Spot represents games, please read the `games` notebook first.\n", "\n", - "In Reactive Synthesis, the goal is to build an electronic circuit that reacts to some input signals by producing some output signals, under some LTL constraints that tie both input and output. Of course the input signals are not controlable, so only job is to decide what output signal to produce.\n", + "In Reactive Synthesis, the goal is to build an electronic circuit that reacts to some input signals by producing some output signals, under some LTL constraints that tie both input and output. Of course the input signals are not controllable, so only job is to decide what output signal to produce.\n", "\n", "# Reactive synthesis in four steps\n", "\n", @@ -33,13 +33,13 @@ "\n", "Each of these steps is parametrized by a structure called `synthesis_info`. This structure stores some additional data needed to pass fine-tuning options or to store statistics.\n", "\n", - "The `ltl_to_game` function takes the LTL specification, and the list of controlable atomic propositions (or output signals). It returns a two-player game, where player 0 plays the input variables (and wants to invalidate the acceptance condition), and player 1 plays the output variables (and wants to satisfy the output condition). The conversion from LTL to parity automata can use one of many algorithms, and can be specified in the `synthesis_info` structure (this works like the `--algo=` option of `ltlsynt`)." + "The `ltl_to_game` function takes the LTL specification, and the list of controllable atomic propositions (or output signals). It returns a two-player game, where player 0 plays the input variables (and wants to invalidate the acceptance condition), and player 1 plays the output variables (and wants to satisfy the output condition). The conversion from LTL to parity automata can use one of many algorithms, and can be specified in the `synthesis_info` structure (this works like the `--algo=` option of `ltlsynt`)." ] }, { "cell_type": "code", "execution_count": 2, - "id": "fb49e681", + "id": "e333be09", "metadata": {}, "outputs": [ { @@ -56,649 +56,590 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))\n", - "[parity max odd 4]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", - "\n", - "\n", - "9\n", - "\n", - "9\n", - "\n", - "\n", - "\n", - "I->9\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "25\n", - "\n", - "25\n", - "\n", - "\n", - "\n", - "9->25\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", - "\n", - "\n", - "\n", - "26\n", - "\n", - "26\n", - "\n", - "\n", - "\n", - "9->26\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", - "\n", - "\n", - "\n", - "27\n", - "\n", - "27\n", - "\n", - "\n", - "\n", - "9->27\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", - "\n", - "\n", - "\n", - "28\n", - "\n", - "28\n", - "\n", - "\n", - "\n", - "9->28\n", - "\n", - "\n", - "i0 & i1\n", - "\n", - "\n", "\n", - "\n", + "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "0->10\n", - "\n", - "\n", - "!i1\n", - "\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", - "\n", + "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "0->11\n", - "\n", - "\n", - "i1\n", - "\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "10->1\n", - "\n", - "\n", - "o0\n", - "\n", - "\n", - "\n", - "\n", - "11->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", - "\n", + "\n", "12\n", - "\n", - "12\n", + "\n", + "12\n", "\n", - "\n", + "\n", "\n", - "1->12\n", - "\n", - "\n", - "!i1\n", - "\n", + "0->12\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", - "\n", + "\n", "13\n", - "\n", - "13\n", + "\n", + "13\n", "\n", - "\n", + "\n", "\n", - "1->13\n", - "\n", - "\n", - "i1\n", - "\n", + "0->13\n", + "\n", + "\n", + "i0 & i1\n", "\n", - "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "10->8\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "11->7\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", "\n", - "12->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "12->9\n", + "\n", + "\n", + "1\n", "\n", - "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", "\n", - "13->0\n", - "\n", - "\n", - "!o0\n", - "\n", + "13->5\n", + "\n", + "\n", + "1\n", "\n", - "\n", - "\n", - "2\n", - "\n", - "2\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", "\n", "\n", - "\n", + "\n", "14\n", - "\n", - "14\n", + "\n", + "14\n", "\n", - "\n", + "\n", "\n", - "2->14\n", - "\n", - "\n", - "i1\n", - "\n", + "1->14\n", + "\n", + "\n", + "i0\n", "\n", "\n", - "\n", + "\n", "16\n", - "\n", - "16\n", + "\n", + "16\n", "\n", - "\n", + "\n", "\n", - "2->16\n", - "\n", - "\n", - "!i1\n", - "\n", + "1->16\n", + "\n", + "\n", + "!i0\n", "\n", "\n", "\n", "15\n", - "\n", - "15\n", + "\n", + "15\n", "\n", "\n", "\n", "14->15\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", - "\n", + "\n", "\n", - "16->2\n", - "\n", - "\n", - "1\n", - "\n", + "16->1\n", + "\n", + "\n", + "1\n", "\n", - "\n", - "\n", - "3\n", - "\n", - "3\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", "\n", - "\n", + "\n", "\n", - "3->13\n", - "\n", - "\n", - "i1\n", - "\n", + "2->14\n", + "\n", + "\n", + "i1\n", "\n", "\n", - "\n", + "\n", "17\n", - "\n", - "17\n", + "\n", + "17\n", "\n", - "\n", + "\n", "\n", - "3->17\n", - "\n", - "\n", - "!i1\n", - "\n", + "2->17\n", + "\n", + "\n", + "!i1\n", "\n", "\n", "\n", "17->2\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "1\n", "\n", - "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "3->14\n", + "\n", + "\n", + "i0 & i1\n", + "\n", + "\n", + "\n", + "3->16\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", + "\n", + "\n", + "3->17\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "3->18\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", + "\n", "\n", - "17->3\n", - "\n", - "\n", - "!o0\n", - "\n", + "18->3\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", - "\n", - "\n", - "\n", - "4->14\n", - "\n", - "\n", - "i0\n", - "\n", - "\n", - "\n", - "\n", - "18\n", - "\n", - "18\n", - "\n", - "\n", - "\n", - "4->18\n", - "\n", - "\n", - "!i0\n", - "\n", - "\n", - "\n", - "\n", - "18->4\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "\n", - "5\n", - "\n", - "5\n", - "\n", - "\n", - "\n", - "5->14\n", - "\n", - "\n", - "i0 & i1\n", - "\n", - "\n", - "\n", - "\n", - "5->16\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", - "\n", - "\n", - "\n", - "5->18\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "4\n", "\n", "\n", - "\n", + "\n", "19\n", - "\n", - "19\n", + "\n", + "19\n", "\n", - "\n", - "\n", - "5->19\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", - "\n", - "\n", - "\n", - "19->5\n", - "\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "\n", - "6\n", - "\n", - "6\n", - "\n", - "\n", - "\n", - "6->10\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", - "\n", - "\n", - "\n", - "6->11\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "4->19\n", + "\n", + "\n", + "!i1\n", "\n", "\n", - "\n", + "\n", "20\n", - "\n", - "20\n", + "\n", + "20\n", "\n", - "\n", - "\n", - "6->20\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "4->20\n", + "\n", + "\n", + "i1\n", + "\n", + "\n", + "\n", + "19->4\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "20->5\n", + "\n", + "\n", + "!o0\n", "\n", "\n", - "\n", + "\n", "21\n", - "\n", - "21\n", + "\n", + "21\n", "\n", - "\n", - "\n", - "6->21\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", - "\n", - "\n", - "\n", - "20->4\n", - "\n", - "\n", - "!o0\n", - "\n", - "\n", - "\n", - "\n", - "7\n", - "\n", - "7\n", - "\n", - "\n", - "\n", - "20->7\n", - "\n", - "\n", - "o0\n", - "\n", - "\n", - "\n", - "\n", - "21->4\n", - "\n", - "\n", - "!o0\n", - "\n", - "\n", - "\n", - "\n", - "21->6\n", - "\n", - "\n", - "o0\n", - "\n", - "\n", - "\n", - "\n", - "7->12\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", - "\n", - "\n", - "\n", - "7->13\n", - "\n", - "\n", - "i0 & i1\n", - "\n", + "\n", + "\n", + "5->21\n", + "\n", + "\n", + "!i1\n", "\n", "\n", - "\n", + "\n", "22\n", - "\n", - "22\n", + "\n", + "22\n", "\n", - "\n", - "\n", - "7->22\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "5->22\n", + "\n", + "\n", + "i1\n", + "\n", + "\n", + "\n", + "21->4\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "22->5\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "6->19\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", + "\n", + "\n", + "6->20\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", - "\n", + "\n", "23\n", - "\n", - "23\n", + "\n", + "23\n", "\n", - "\n", - "\n", - "7->23\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", - "\n", - "\n", - "\n", - "22->4\n", - "\n", - "\n", - "o0\n", - "\n", - "\n", - "\n", - "\n", - "22->7\n", - "\n", - "\n", - "!o0\n", - "\n", - "\n", - "\n", - "\n", - "23->4\n", - "\n", - "\n", - "o0\n", - "\n", - "\n", - "\n", - "\n", - "23->6\n", - "\n", - "\n", - "!o0\n", - "\n", - "\n", - "\n", - "\n", - "8\n", - "\n", - "8\n", - "\n", - "\n", - "\n", - "8->13\n", - "\n", - "\n", - "i0 & i1\n", - "\n", - "\n", - "\n", - "\n", - "8->17\n", - "\n", - "\n", - "i0 & !i1\n", - "\n", - "\n", - "\n", - "\n", - "8->23\n", - "\n", - "\n", - "!i0 & i1\n", - "\n", + "\n", + "\n", + "6->23\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", - "\n", + "\n", "24\n", - "\n", - "24\n", + "\n", + "24\n", + "\n", + "\n", + "\n", + "6->24\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", + "\n", + "\n", + "23->1\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "23->6\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "24->1\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "24->9\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "7->20\n", + "\n", + "\n", + "i1\n", + "\n", + "\n", + "\n", + "25\n", + "\n", + "25\n", + "\n", + "\n", + "\n", + "7->25\n", + "\n", + "\n", + "!i1\n", + "\n", + "\n", + "\n", + "25->2\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "25->7\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "8->20\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", - "\n", + "\n", "8->24\n", - "\n", - "\n", - "!i0 & !i1\n", - "\n", + "\n", + "\n", + "!i0 & i1\n", "\n", - "\n", - "\n", - "24->5\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "8->25\n", + "\n", + "\n", + "i0 & !i1\n", "\n", - "\n", - "\n", - "24->8\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "26\n", + "\n", + "26\n", "\n", - "\n", - "\n", - "25->8\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "8->26\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", - "\n", + "\n", "26->3\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "26->8\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "9->21\n", + "\n", + "\n", + "i0 & !i1\n", + "\n", + "\n", + "\n", + "9->22\n", + "\n", + "\n", + "i0 & i1\n", + "\n", + "\n", + "\n", + "27\n", + "\n", + "27\n", + "\n", + "\n", + "\n", + "9->27\n", + "\n", + "\n", + "!i0 & !i1\n", + "\n", + "\n", + "\n", + "28\n", + "\n", + "28\n", + "\n", + "\n", + "\n", + "9->28\n", + "\n", + "\n", + "!i0 & i1\n", + "\n", + "\n", + "\n", + "27->1\n", + "\n", + "\n", + "!o0\n", "\n", "\n", - "\n", + "\n", "27->6\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "o0\n", "\n", - "\n", + "\n", + "\n", + "28->1\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", "\n", - "28->0\n", - "\n", - "\n", - "1\n", - "\n", + "28->9\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "15->14\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7ff27d57dc90> >" + " *' at 0x7f0e584de570> >" ] }, "metadata": {}, @@ -717,7 +658,7 @@ }, { "cell_type": "markdown", - "id": "3797307f", + "id": "4d030586", "metadata": {}, "source": [ "Solving the game, is done with `solve_game()` as with any game. There is also a version that takes a `synthesis_info` as second argument in case the time it takes has to be recorded. Here passing `si` or not makes no difference." @@ -726,7 +667,7 @@ { "cell_type": "code", "execution_count": 3, - "id": "62fb169f", + "id": "f13ac820", "metadata": {}, "outputs": [ { @@ -742,588 +683,529 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))\n", - "[parity max odd 4]\n", + "\n", + "\n", + "\n", + "Fin(\n", + "\n", + ")\n", + "[co-Büchi]\n", "\n", - "\n", - "\n", - "9\n", - "\n", - "9\n", - "\n", - "\n", - "\n", - "I->9\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "25\n", - "\n", - "25\n", - "\n", - "\n", - "\n", - "9->25\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "26\n", - "\n", - "26\n", - "\n", - "\n", - "\n", - "9->26\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "27\n", - "\n", - "27\n", - "\n", - "\n", - "\n", - "9->27\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "28\n", - "\n", - "28\n", - "\n", - "\n", - "\n", - "9->28\n", - "\n", - "\n", - "\n", - "\n", "\n", - "\n", + "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "0->10\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "11\n", - "\n", - "11\n", + "\n", + "11\n", "\n", "\n", "\n", "0->11\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "1\n", - "\n", - "1\n", - "\n", - "\n", - "\n", - "10->1\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "11->0\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "12\n", - "\n", - "12\n", + "\n", + "12\n", "\n", - "\n", + "\n", "\n", - "1->12\n", - "\n", - "\n", - "\n", + "0->12\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "13\n", - "\n", - "13\n", + "\n", + "13\n", "\n", - "\n", + "\n", "\n", - "1->13\n", - "\n", - "\n", - "\n", + "0->13\n", + "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "10->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "11->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", "\n", - "12->1\n", - "\n", - "\n", - "\n", + "12->9\n", + "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", "\n", - "13->0\n", - "\n", - "\n", - "\n", + "13->5\n", + "\n", + "\n", "\n", - "\n", - "\n", - "2\n", - "\n", - "2\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", "\n", "\n", - "\n", + "\n", "14\n", - "\n", - "14\n", + "\n", + "14\n", "\n", - "\n", + "\n", "\n", - "2->14\n", - "\n", - "\n", - "\n", + "1->14\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "16\n", - "\n", - "16\n", + "\n", + "16\n", "\n", - "\n", + "\n", "\n", - "2->16\n", - "\n", - "\n", - "\n", + "1->16\n", + "\n", + "\n", "\n", "\n", "\n", "15\n", - "\n", - "15\n", + "\n", + "15\n", "\n", "\n", "\n", "14->15\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", - "\n", + "\n", "\n", - "16->2\n", - "\n", - "\n", - "\n", + "16->1\n", + "\n", + "\n", "\n", - "\n", - "\n", - "3\n", - "\n", - "3\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", "\n", - "\n", + "\n", "\n", - "3->13\n", - "\n", - "\n", - "\n", + "2->14\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "17\n", - "\n", - "17\n", + "\n", + "17\n", "\n", - "\n", + "\n", "\n", - "3->17\n", - "\n", - "\n", - "\n", + "2->17\n", + "\n", + "\n", "\n", "\n", "\n", "17->2\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "3->14\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3->16\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3->17\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "18\n", + "\n", + "18\n", + "\n", + "\n", + "\n", + "3->18\n", + "\n", + "\n", + "\n", + "\n", "\n", - "17->3\n", - "\n", - "\n", - "\n", + "18->3\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", - "\n", - "\n", - "\n", - "4->14\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "18\n", - "\n", - "18\n", - "\n", - "\n", - "\n", - "4->18\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "18->4\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "5\n", - "\n", - "5\n", - "\n", - "\n", - "\n", - "5->14\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "5->16\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "5->18\n", - "\n", - "\n", - "\n", + "\n", + "4\n", "\n", "\n", - "\n", + "\n", "19\n", - "\n", - "19\n", + "\n", + "19\n", "\n", - "\n", - "\n", - "5->19\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "19->5\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "6\n", - "\n", - "6\n", - "\n", - "\n", - "\n", - "6->10\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "6->11\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "4->19\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "20\n", - "\n", - "20\n", + "\n", + "20\n", "\n", - "\n", - "\n", - "6->20\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "4->20\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "19->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "20->5\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "21\n", - "\n", - "21\n", + "\n", + "21\n", "\n", - "\n", - "\n", - "6->21\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "20->4\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "7\n", - "\n", - "7\n", - "\n", - "\n", - "\n", - "20->7\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "21->4\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "21->6\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "7->12\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "7->13\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "5->21\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "22\n", - "\n", - "22\n", + "\n", + "22\n", "\n", - "\n", - "\n", - "7->22\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "5->22\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "21->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "22->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "6->19\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6->20\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "23\n", - "\n", - "23\n", + "\n", + "23\n", "\n", - "\n", - "\n", - "7->23\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "22->4\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "22->7\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "23->4\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "23->6\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "8\n", - "\n", - "8\n", - "\n", - "\n", - "\n", - "8->13\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "8->17\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "8->23\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "6->23\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "24\n", - "\n", - "24\n", + "\n", + "24\n", + "\n", + "\n", + "\n", + "6->24\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "23->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "23->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "24->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "24->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7->20\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "25\n", + "\n", + "25\n", + "\n", + "\n", + "\n", + "7->25\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "25->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "25->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8->20\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "8->24\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", - "\n", - "\n", - "24->5\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "8->25\n", + "\n", + "\n", "\n", - "\n", - "\n", - "24->8\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "26\n", + "\n", + "26\n", "\n", - "\n", - "\n", - "25->8\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "8->26\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "26->3\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "26->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9->21\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9->22\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "27\n", + "\n", + "27\n", + "\n", + "\n", + "\n", + "9->27\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "28\n", + "\n", + "28\n", + "\n", + "\n", + "\n", + "9->28\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "27->1\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "27->6\n", - "\n", - "\n", - "\n", + "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "28->1\n", + "\n", + "\n", + "\n", + "\n", "\n", - "28->0\n", - "\n", - "\n", - "\n", + "28->9\n", + "\n", + "\n", "\n", "\n", "\n", "15->14\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n" @@ -1345,16 +1227,16 @@ }, { "cell_type": "markdown", - "id": "d5a53d3f", + "id": "98aa1402", "metadata": {}, "source": [ - "Once a strategy has been found, it can be extracted as an automaton and simplified using 6 different levels (the default is 2). The output should be interpreted as a mealy automaton, where transition have the form `(ins)&(outs)` where `ins` and `outs` are Boolean formulas representing possible possibles inputs and outputs (they could be more than just conjunctions of atomic proposition). Mealy machines with this type of labels are called \"separated\" in Spot." + "Once a strategy has been found, it can be extracted as an automaton and simplified using 6 different levels (the default is 2). The output should be interpreted as a Mealy automaton, where transition have the form `(ins)&(outs)` where `ins` and `outs` are Boolean formulas representing possible inputs and outputs (they could be more than just conjunctions of atomic proposition). Mealy machines with this type of labels are called \"separated\" in Spot." ] }, { "cell_type": "code", "execution_count": 4, - "id": "cdf8f5f1", + "id": "4c93add7", "metadata": {}, "outputs": [ { @@ -1370,309 +1252,309 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "3->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "3->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "3->5\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "4->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "4->5\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "5->4\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5->5\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->3\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->4\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->5\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "6->6\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - "" + " *' at 0x7f0e5855c9f0> >" ] }, "metadata": {}, @@ -1691,175 +1573,175 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "1\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "1\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "2->2\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n" ], "text/plain": [ - "" + " *' at 0x7f0e5855cb10> >" ] }, "metadata": {}, @@ -1869,7 +1751,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "simplification lvl 2 : bisimulation-based reduction with output output assignement\n" + "simplification lvl 2 : bisimulation-based reduction with output assignement\n" ] }, { @@ -1878,125 +1760,125 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "I->1\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - "" + " *' at 0x7f0e5855ccf0> >" ] }, "metadata": {}, @@ -2015,81 +1897,81 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - "" + " *' at 0x7f0e5855cd80> >" ] }, "metadata": {}, @@ -2108,81 +1990,81 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", - "\n", + "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", - "\n", + "\n", "0->1\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", - "\n", + "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", - "\n", + "\n", "1->1\n", - "\n", - "\n", - "\n", - "i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n" ], "text/plain": [ - "" + " *' at 0x7f0e584defc0> >" ] }, "metadata": {}, @@ -2201,125 +2083,125 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - "" + " *' at 0x7f0e5855ca20> >" ] }, "metadata": {}, @@ -2330,14 +2212,14 @@ "# We have different levels of simplification:\n", "# 0 : No simplification\n", "# 1 : bisimulation-based reduction\n", - "# 2 : bisimulation-based reduction with output output assignement\n", + "# 2 : bisimulation-based reduction with output assignement\n", "# 3 : SAT-based exact minimization\n", "# 4 : First 1 then 3 (exact)\n", "# 5 : First 2 then 3 (not exact)\n", "\n", "descr = [\"0 : No simplification\", \n", " \"1 : bisimulation-based reduction\", \n", - " \"2 : bisimulation-based reduction with output output assignement\",\n", + " \"2 : bisimulation-based reduction with output assignement\",\n", " \"3 : SAT-based exact minimization\",\n", " \"4 : First 1 then 3 (exact)\",\n", " \"5 : First 2 then 3 (not exact)\"]\n", @@ -2346,13 +2228,14 @@ "for i in range(6):\n", " print(\"simplification lvl \", descr[i])\n", " si.minimize_lvl = i\n", - " mealy = spot.solved_game_to_separated_mealy(game, si)\n", - " display(mealy.show())" + " mealy = spot.solved_game_to_mealy(game, si)\n", + " spot.simplify_mealy_here(mealy, si.minimize_lvl, False)\n", + " display(mealy)" ] }, { "cell_type": "markdown", - "id": "511093c3", + "id": "9d8d52f6", "metadata": {}, "source": [ "If needed, a separated Mealy machine can be turned into game shape using `split_sepearated_mealy()`, which is more efficient than `split_2step()`." @@ -2361,7 +2244,7 @@ { "cell_type": "code", "execution_count": 5, - "id": "cc977286", + "id": "707f4cf6", "metadata": {}, "outputs": [ { @@ -2370,260 +2253,260 @@ "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "\n", - "!i0 & i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", - "\n", - "!i0 & !i1\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "!i0 & !i1\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "0->3\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "2->1\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "3->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "i0 & i1\n", + "\n", + "\n", + "i0 & i1\n", "\n", "\n", "\n", "1->4\n", - "\n", - "\n", - "!i0 & i1\n", + "\n", + "\n", + "!i0 & i1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "i0 & !i1\n", + "\n", + "\n", + "i0 & !i1\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "!i0 & !i1\n", + "\n", + "\n", + "!i0 & !i1\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", @@ -2643,12 +2526,12 @@ }, { "cell_type": "markdown", - "id": "aa6fe484", + "id": "b9e4412e", "metadata": {}, "source": [ - "# Converting the separated mealy machine to AIGER\n", + "# Converting the separated Mealy machine to AIG\n", "\n", - "A separated mealy machine can be converted to a circuit in the [AIGER format](http://fmv.jku.at/aiger/FORMAT.aiger) using `mealy_machine_to_aig()`. This takes a second argument specifying what type of encoding to use (exactly like `ltlsynt`'s `--aiger=...` option). \n", + "A separated Mealy machine can be converted to a circuit in the [AIGER format](http://fmv.jku.at/aiger/FORMAT.aiger) using `mealy_machine_to_aig()`. This takes a second argument specifying what type of encoding to use (exactly like `ltlsynt`'s `--aiger=...` option). \n", "\n", "In this case, the circuit is quite simple: `o0` should be the negation of previous value of `i1`. This is done by storing the value of `i1` in a latch. And the value if `i0` can be ignored." ] @@ -2656,7 +2539,7 @@ { "cell_type": "code", "execution_count": 6, - "id": "78261ec4", + "id": "9f344931", "metadata": {}, "outputs": [ { @@ -2665,60 +2548,60 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "2\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "2->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " *' at 0x7f0e584deae0> >" ] }, "metadata": {}, @@ -2732,7 +2615,7 @@ }, { "cell_type": "markdown", - "id": "f95dc6b7", + "id": "92bbe8d0", "metadata": {}, "source": [ "While we are at it, let us mention that you can render those circuits horizontally as follows:" @@ -2741,7 +2624,7 @@ { "cell_type": "code", "execution_count": 7, - "id": "14410565", + "id": "3ae7ce32", "metadata": {}, "outputs": [ { @@ -2750,54 +2633,54 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:w\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "2\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "2->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n" @@ -2817,16 +2700,16 @@ }, { "cell_type": "markdown", - "id": "9ccbc2e2", + "id": "44fbc0ac", "metadata": {}, "source": [ - "To encode the circuit in the aig format (ASCII version) use:" + "To encode the circuit in the AIGER format (ASCII version) use:" ] }, { "cell_type": "code", "execution_count": 8, - "id": "06e485d0", + "id": "566715d5", "metadata": {}, "outputs": [ { @@ -2850,7 +2733,7 @@ }, { "cell_type": "markdown", - "id": "5f006648", + "id": "ef304f36", "metadata": {}, "source": [ "# Adding more inputs and outputs by force" @@ -2858,7 +2741,7 @@ }, { "cell_type": "markdown", - "id": "9905208f", + "id": "5c2b0b78", "metadata": {}, "source": [ "It can happen that propositions declared as output are ommited in the aig circuit (either because they are not part of the specification, or because they do not appear in the winning strategy). In that case those \n", @@ -2870,7 +2753,7 @@ { "cell_type": "code", "execution_count": 9, - "id": "560a7e46", + "id": "874c7df1", "metadata": {}, "outputs": [ { @@ -2879,167 +2762,151 @@ "\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))))\n", - "[parity max odd 6]\n", + "\n", + "\n", + "\n", + "Inf(\n", + "\n", + ")\n", + "[Büchi]\n", "\n", "\n", "\n", "3\n", - "\n", - "3\n", + "\n", + "3\n", "\n", "\n", "\n", "I->3\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "3->6\n", - "\n", - "\n", - "i0\n", - "\n", + "\n", + "\n", + "i0\n", "\n", "\n", "\n", "7\n", - "\n", - "7\n", + "\n", + "7\n", "\n", "\n", "\n", "3->7\n", - "\n", - "\n", - "!i0\n", - "\n", + "\n", + "\n", + "!i0\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "4\n", - "\n", - "4\n", + "\n", + "4\n", "\n", "\n", "\n", "0->4\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "4->0\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "5\n", - "\n", - "5\n", + "\n", + "5\n", "\n", "\n", "\n", "1->5\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", + "\n", "\n", "\n", "\n", "5->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", - "1\n", - "\n", + "\n", + "\n", + "1\n", "\n", "\n", "\n", "6->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->2\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "7->1\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7ff27ca90390> >" + " *' at 0x7f0e5855cb70> >" ] }, "metadata": {}, @@ -3051,70 +2918,268 @@ "\n", "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "i0\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "0->4\n", + "\n", + "\n", + "!i0\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "2->1\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "4->3\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "1->5\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "5->1\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f0e5855cc60> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "i0\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "!i0\n", + "\n", + "\n", + "\n", + "2->0\n", + "\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "3->1\n", + "\n", + "\n", + "!o0\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "i0\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "i0\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", + "\n", + "\n", "\n", - "!i0\n", - "/\n", + "!i0\n", + "/\n", "\n", - "!o0\n", + "!o0\n", "\n", "\n", "\n", "1->1\n", - "\n", - "\n", + "\n", + "\n", "\n", - "1\n", - "/\n", + "1\n", + "/\n", "\n", - "!o0\n", + "!o0\n", "\n", "\n", - "\n" + "\n", + "
" ], "text/plain": [ - " *' at 0x7ff27d57dd20> >" + "" ] }, "metadata": {}, @@ -3126,72 +3191,72 @@ "\n", "\n", - "\n", "\n", "\n", + " viewBox=\"0.00 0.00 143.20 352.00\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n", "\n", - "\n", + "\n", "\n", "\n", "4\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "6->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "6->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " *' at 0x7f0e5855c9f0> >" ] }, "metadata": {}, @@ -3203,15 +3268,17 @@ "spot.solve_game(game)\n", "spot.highlight_strategy(game)\n", "display(game)\n", - "mealy = spot.solved_game_to_separated_mealy(game)\n", + "mealy = spot.solved_game_to_mealy(game)\n", "display(mealy)\n", + "spot.simplify_mealy_here(mealy, 2, True)\n", + "display_inline(mealy, spot.unsplit_mealy(mealy))\n", "aig = spot.mealy_machine_to_aig(mealy, \"isop\")\n", "display(aig)" ] }, { "cell_type": "markdown", - "id": "06d42ec3", + "id": "c564dba3", "metadata": {}, "source": [ "To force the presence of extra variables in the circuit, they can be passed to `mealy_machine_to_aig()`." @@ -3220,7 +3287,7 @@ { "cell_type": "code", "execution_count": 10, - "id": "6ea759ea", + "id": "c31a3b38", "metadata": {}, "outputs": [ { @@ -3229,96 +3296,96 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "6\n", - "\n", - "L0_out\n", + "\n", + "L0_out\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "6->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "L0\n", - "\n", - "L0_in\n", + "\n", + "L0_in\n", "\n", "\n", "\n", "8->L0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "8->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "0\n", - "\n", - "False\n", + "\n", + "False\n", "\n", "\n", "\n", "0->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " *' at 0x7f0e5855c900> >" ] }, "metadata": {}, @@ -3331,22 +3398,27 @@ }, { "cell_type": "markdown", - "id": "4135f43e", + "id": "3323fc84", "metadata": {}, "source": [ - "# Combining mealy machines\n", + "# Combining Mealy machines\n", "\n", - "It can happen that the complet specification of the controller can be separated into sub-specifications with DISJOINT output propositions, see Finkbeiner et al. Specification Decomposition for Reactive Synthesis.\n", - "This results in multiple mealy machines which have to be converted into one single aiger circuit.\n", + "It can happen that the complete specification of the controller can be separated into sub-specifications with DISJOINT output propositions, see Finkbeiner et al. Specification Decomposition for Reactive Synthesis.\n", + "This results in multiple Mealy machines which have to be converted into one single AIG circuit.\n", "\n", - "This can be done using the function `mealy_machines_to_aig()`, which takes a vector of separated mealy machines as argument.\n", - "In order for this to work, all mealy machines need to share the same `bdd_dict`. This can be ensured by passing a common options strucuture." + "This can be done in two ways:\n", + "\n", + "1. Using the function `mealy_machines_to_aig()`, which takes a vector of separated Mealy machines as argument.\n", + "2. Combine the mealy machines into one before passing it to `mealy_machine_to aig(). This currently only supports input complete machines of the same type (mealy/separated mealy/split mealy)\n", + "\n", + "Note that the method version is usually preferable as it is faster.\n", + "Also note that in order for this to work, all mealy machines need to share the same `bdd_dict`. This can be ensured by passing a common options strucuture." ] }, { "cell_type": "code", "execution_count": 11, - "id": "4f9be142", + "id": "5d8e4cdb", "metadata": {}, "outputs": [ { @@ -3362,158 +3434,134 @@ "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))\n", - "[parity max odd 4]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!o0\n", - "\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", - "\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "Inf(\n", - "\n", - ") | (Fin(\n", - "\n", - ") & (Inf(\n", - "\n", - ") | Fin(\n", - "\n", - ")))\n", - "[parity max odd 4]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "o1\n", - "\n", + "\n", + "\n", + "o1\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "!o1\n", - "\n", + "\n", + "\n", + "!o1\n", "\n", "\n", "\n", @@ -3539,94 +3587,94 @@ "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "o1\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "o1\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "!o1\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "!o1\n", "\n", "\n", "\n", @@ -3643,7 +3691,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Circuit implementing both machines:\n" + "Circuit implementing both machines from a vector of machines:\n" ] }, { @@ -3652,108 +3700,286 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "o1\n", + "\n", + "o1\n", "\n", "\n", "\n", "10->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " *' at 0x7f0e584def00> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Combining the two machines into one.\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0,0\n", + "\n", + "\n", + "\n", + "I->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0 & o1\n", + "\n", + "\n", + "\n", + "0->0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0 & !o1\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f0e5855cd20> >" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "6->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "8->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o0\n", + "\n", + "o0\n", + "\n", + "\n", + "\n", + "10->o0:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "o1\n", + "\n", + "o1\n", + "\n", + "\n", + "\n", + "10->o1:s\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "i0\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "i1\n", + "\n", + "\n", + "\n", + "4->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " *' at 0x7f0e5855c930> >" ] }, "metadata": {}, @@ -3773,14 +3999,21 @@ "strat2 = spot.solved_game_to_separated_mealy(g2)\n", "print(\"Reduced strategies:\")\n", "display_inline(strat1, strat2)\n", - "print(\"Circuit implementing both machines:\")\n", + "#Method 1\n", + "print(\"Circuit implementing both machines from a vector of machines:\")\n", "aig = spot.mealy_machines_to_aig([strat1, strat2], \"isop\")\n", - "display(aig)" + "display(aig)\n", + "#Method 2\n", + "strat_comb = spot.mealy_product(strat1, strat2)\n", + "print(\"Combining the two machines into one.\")\n", + "display(strat_comb)\n", + "aig_comb = spot.mealy_machine_to_aig(strat_comb, \"isop\")\n", + "display(aig_comb)" ] }, { "cell_type": "markdown", - "id": "b3985f04", + "id": "c7a1986f", "metadata": {}, "source": [ "# Reading an AIGER-file\n", @@ -3795,7 +4028,7 @@ { "cell_type": "code", "execution_count": 12, - "id": "3bc0b1f2", + "id": "a10d7e3b", "metadata": {}, "outputs": [], "source": [ @@ -3816,7 +4049,7 @@ { "cell_type": "code", "execution_count": 13, - "id": "1455e6ab", + "id": "2c40e19b", "metadata": {}, "outputs": [ { @@ -3825,108 +4058,108 @@ "\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o1\n", - "\n", - "d\n", + "\n", + "d\n", "\n", "\n", "\n", "6->o1:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "c\n", + "\n", + "c\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "a\n", + "\n", + "a\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "b\n", + "\n", + "b\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - " >" + " *' at 0x7f0e584def90> >" ] }, "metadata": {}, @@ -3941,7 +4174,7 @@ { "cell_type": "code", "execution_count": 14, - "id": "0c418256", + "id": "0ad6c566", "metadata": {}, "outputs": [ { @@ -3970,7 +4203,7 @@ { "cell_type": "code", "execution_count": 15, - "id": "bd4b6aa2", + "id": "2e1996c1", "metadata": {}, "outputs": [ { @@ -3987,16 +4220,16 @@ }, { "cell_type": "markdown", - "id": "94fd22a1", + "id": "41a8e042", "metadata": {}, "source": [ - "An aiger circuit can be transformed into a monitor/mealy machine. This can be used for instance to check that it does not intersect the negation of the specification." + "An AIG circuit can be transformed into a monitor/Mealy machine. This can be used for instance to check that it does not intersect the negation of the specification." ] }, { "cell_type": "code", "execution_count": 16, - "id": "b157ec16", + "id": "7399ea38", "metadata": {}, "outputs": [ { @@ -4005,52 +4238,52 @@ "\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "!a & !b\n", - "/\n", - "\n", - "!c & !d\n", - "\n", - "a & b\n", - "/\n", - "\n", - "!c & d\n", - "\n", - "(!a & b) | (a & !b)\n", - "/\n", - "\n", - "c & !d\n", + "\n", + "\n", + "\n", + "!a & !b\n", + "/\n", + "\n", + "!c & !d\n", + "\n", + "a & b\n", + "/\n", + "\n", + "!c & d\n", + "\n", + "(!a & b) | (a & !b)\n", + "/\n", + "\n", + "c & !d\n", "\n", "\n", "\n" ], "text/plain": [ - " *' at 0x7ff27ca90cf0> >" + " *' at 0x7f0e584ee4e0> >" ] }, "execution_count": 16, @@ -4064,17 +4297,17 @@ }, { "cell_type": "markdown", - "id": "671b849d", + "id": "7ac06afc", "metadata": {}, "source": [ - "Note that the generation of aiger circuits from mealy machines is flexible and accepts separated mealy machines\n", - "as well as split mealy machines." + "Note that the generation of aiger circuits from Mealy machines is flexible and accepts separated Mealy machines\n", + "as well as split Mealy machines." ] }, { "cell_type": "code", "execution_count": 17, - "id": "fcf3b73e", + "id": "bac68923", "metadata": {}, "outputs": [ { @@ -4083,114 +4316,114 @@ "
\n", "\n", - "\n", "\n", - "\n", + "\n", "\n", - "\n", + "\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", - "/\n", - "\n", - "!o0\n", + "\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", + "/\n", + "\n", + "!o0\n", "\n", "\n", "\n", "0->0\n", - "\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", - "/\n", - "\n", - "o0\n", + "\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", + "/\n", + "\n", + "o0\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", - "\n", - "\n", - "\n", - "t\n", - "[all]\n", + "\n", + "\n", + "\n", + "t\n", + "[all]\n", "\n", "\n", "\n", "0\n", - "\n", - "0\n", + "\n", + "0\n", "\n", "\n", "\n", "I->0\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "1\n", - "\n", - "1\n", + "\n", + "1\n", "\n", "\n", "\n", "0->1\n", - "\n", - "\n", - "(!i0 & !i1) | (i0 & i1)\n", + "\n", + "\n", + "(!i0 & !i1) | (i0 & i1)\n", "\n", "\n", "\n", "2\n", - "\n", - "2\n", + "\n", + "2\n", "\n", "\n", "\n", "0->2\n", - "\n", - "\n", - "(!i0 & i1) | (i0 & !i1)\n", + "\n", + "\n", + "(!i0 & i1) | (i0 & !i1)\n", "\n", "\n", "\n", "1->0\n", - "\n", - "\n", - "!o0\n", + "\n", + "\n", + "!o0\n", "\n", "\n", "\n", "2->0\n", - "\n", - "\n", - "o0\n", + "\n", + "\n", + "o0\n", "\n", "\n", "\n", @@ -4222,7 +4455,7 @@ { "cell_type": "code", "execution_count": 18, - "id": "cd06f9ab", + "id": "03ceb2a8", "metadata": {}, "outputs": [ { @@ -4231,180 +4464,180 @@ "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "
\n", "\n", - "\n", "\n", "\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "6\n", - "\n", - "6\n", + "\n", + "6\n", "\n", "\n", "\n", "10\n", - "\n", - "10\n", + "\n", + "10\n", "\n", "\n", "\n", "6->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "8\n", - "\n", - "8\n", + "\n", + "8\n", "\n", "\n", "\n", "8->10\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "o0\n", - "\n", - "o0\n", + "\n", + "o0\n", "\n", "\n", "\n", "10->o0:s\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2\n", - "\n", - "i0\n", + "\n", + "i0\n", "\n", "\n", "\n", "2->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "2->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4\n", - "\n", - "i1\n", + "\n", + "i1\n", "\n", "\n", "\n", "4->6\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "4->8\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", diff --git a/tests/python/synthesis.py b/tests/python/synthesis.py index 59022624c..98ac889d8 100644 --- a/tests/python/synthesis.py +++ b/tests/python/synthesis.py @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() # A shared variable caused the 2nd call to ltl_to_game to give an incorrect # result. @@ -25,26 +27,26 @@ for i in range(0, 2): gi = spot.synthesis_info() gi.s = spot.synthesis_info.algo_LAR game = spot.ltl_to_game("(Ga) <-> (Fb)", ["b"], gi) - assert not spot.solve_game(game) + tc.assertFalse(spot.solve_game(game)) # A game can have only inputs game = spot.ltl_to_game("GFa", []) -assert(game.to_str() == """HOA: v1 +tc.assertEqual(game.to_str(), """HOA: v1 States: 3 Start: 0 AP: 1 "a" -acc-name: parity max odd 6 -Acceptance: 6 Inf(5) | (Fin(4) & (Inf(3) | (Fin(2) & (Inf(1) | Fin(0))))) -properties: trans-labels explicit-labels state-acc colored complete +acc-name: Buchi +Acceptance: 1 Inf(0) +properties: trans-labels explicit-labels trans-acc complete properties: deterministic spot-state-player: 0 1 1 controllable-AP: --BODY-- -State: 0 {1} +State: 0 [!0] 1 -[0] 2 -State: 1 {4} -[t] 0 -State: 2 {5} +[0] 2 {0} +State: 1 [t] 0 +State: 2 +[t] 0 {0} --END--""") diff --git a/tests/python/toparity.py b/tests/python/toparity.py index df226ebe4..80c2c19ef 100644 --- a/tests/python/toparity.py +++ b/tests/python/toparity.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2018-2021 Laboratoire de Recherche et Développement de +# Copyright (C) 2018-2022 Laboratoire de Recherche et Développement de # l'EPITA. # # This file is part of Spot, a model checking library. @@ -21,65 +21,165 @@ import spot from itertools import zip_longest from buddy import bddfalse +from unittest import TestCase +tc = TestCase() # Tests for the new version of to_parity +# It is no more a no_option as we now have more options (like iar, bscc, …) no_option = spot.to_parity_options() no_option.search_ex = False no_option.use_last = False -no_option.force_order = False +no_option.use_last_post_process = False no_option.partial_degen = False no_option.acc_clean = False no_option.parity_equiv = False +no_option.tar = False +no_option.iar = True +no_option.lar_dfs = True +no_option.bscc = True no_option.parity_prefix = False +no_option.parity_prefix_general = False +no_option.generic_emptiness = False no_option.rabin_to_buchi = False +no_option.buchi_type_to_buchi = False +no_option.parity_type_to_parity = False +no_option.reduce_col_deg = False no_option.propagate_col = False +no_option.use_generalized_rabin = False acc_clean_search_opt = spot.to_parity_options() +acc_clean_search_opt.search_ex = False +acc_clean_search_opt.use_last = False +acc_clean_search_opt.use_last_post_process = False acc_clean_search_opt.force_order = False acc_clean_search_opt.partial_degen = False +acc_clean_search_opt.acc_clean = True acc_clean_search_opt.parity_equiv = False +acc_clean_search_opt.tar = False +acc_clean_search_opt.iar = True +acc_clean_search_opt.lar_dfs = True +acc_clean_search_opt.bscc = True acc_clean_search_opt.parity_prefix = False +acc_clean_search_opt.parity_prefix_general = False +acc_clean_search_opt.generic_emptiness = False acc_clean_search_opt.rabin_to_buchi = False +acc_clean_search_opt.buchi_type_to_buchi = False +acc_clean_search_opt.parity_type_to_parity = False +acc_clean_search_opt.reduce_col_deg = False acc_clean_search_opt.propagate_col = False +acc_clean_search_opt.use_generalized_rabin = False partial_degen_opt = spot.to_parity_options() partial_degen_opt.search_ex = False +partial_degen_opt.use_last = False +partial_degen_opt.use_last_post_process = False partial_degen_opt.force_order = False +partial_degen_opt.partial_degen = True +partial_degen_opt.acc_clean = False partial_degen_opt.parity_equiv = False +partial_degen_opt.tar = False +partial_degen_opt.iar = True +partial_degen_opt.lar_dfs = True +partial_degen_opt.bscc = True partial_degen_opt.parity_prefix = False +partial_degen_opt.parity_prefix_general = False +partial_degen_opt.generic_emptiness = False partial_degen_opt.rabin_to_buchi = False +partial_degen_opt.buchi_type_to_buchi = False +partial_degen_opt.parity_type_to_parity = False +partial_degen_opt.reduce_col_deg = False partial_degen_opt.propagate_col = False +partial_degen_opt.use_generalized_rabin = False parity_equiv_opt = spot.to_parity_options() parity_equiv_opt.search_ex = False parity_equiv_opt.use_last = False -parity_equiv_opt.force_order = False +parity_equiv_opt.use_last_post_process = False parity_equiv_opt.partial_degen = False +parity_equiv_opt.acc_clean = False +parity_equiv_opt.parity_equiv = True +parity_equiv_opt.tar = False +parity_equiv_opt.iar = True +parity_equiv_opt.lar_dfs = True +parity_equiv_opt.bscc = True parity_equiv_opt.parity_prefix = False +parity_equiv_opt.parity_prefix_general = False +parity_equiv_opt.generic_emptiness = False parity_equiv_opt.rabin_to_buchi = False +parity_equiv_opt.buchi_type_to_buchi = False +parity_equiv_opt.parity_type_to_parity = False +parity_equiv_opt.reduce_col_deg = False parity_equiv_opt.propagate_col = False +parity_equiv_opt.use_generalized_rabin = False rab_to_buchi_opt = spot.to_parity_options() +rab_to_buchi_opt.search_ex = False rab_to_buchi_opt.use_last = False -rab_to_buchi_opt.force_order = False +rab_to_buchi_opt.use_last_post_process = False rab_to_buchi_opt.partial_degen = False -rab_to_buchi_opt.parity_equiv = False +rab_to_buchi_opt.acc_clean = False +rab_to_buchi_opt.parity_equiv = True +rab_to_buchi_opt.tar = False +rab_to_buchi_opt.iar = True +rab_to_buchi_opt.lar_dfs = False +rab_to_buchi_opt.bscc = False rab_to_buchi_opt.parity_prefix = False +rab_to_buchi_opt.parity_prefix_general = False +rab_to_buchi_opt.generic_emptiness = False +rab_to_buchi_opt.rabin_to_buchi = True +rab_to_buchi_opt.buchi_type_to_buchi = False +rab_to_buchi_opt.parity_type_to_parity = False +rab_to_buchi_opt.reduce_col_deg = False rab_to_buchi_opt.propagate_col = False +rab_to_buchi_opt.use_generalized_rabin = False -# Force to use CAR or IAR for each SCC +# Force to use CAR, IAR or TAR for each SCC use_car_opt = spot.to_parity_options() +use_car_opt.search_ex = True +use_car_opt.use_last = True +use_car_opt.use_last_post_process = True use_car_opt.partial_degen = False +use_car_opt.acc_clean = False use_car_opt.parity_equiv = False +use_car_opt.tar = True +use_car_opt.iar = True +use_car_opt.lar_dfs = True +use_car_opt.bscc = True use_car_opt.parity_prefix = False +use_car_opt.parity_prefix_general = False +use_car_opt.generic_emptiness = False use_car_opt.rabin_to_buchi = False +use_car_opt.buchi_type_to_buchi = False +use_car_opt.parity_type_to_parity = False +use_car_opt.reduce_col_deg = False use_car_opt.propagate_col = False +use_car_opt.use_generalized_rabin = False + +default_opt = spot.to_parity_options() all_opt = spot.to_parity_options() +all_opt.search_ex = True +all_opt.use_last = True +all_opt.use_last_post_process = True +all_opt.partial_degen = True +all_opt.acc_clean = True +all_opt.parity_equiv = True +all_opt.tar = True +all_opt.iar = True +all_opt.lar_dfs = True +all_opt.bscc = True +all_opt.parity_prefix = True +all_opt.parity_prefix_general = True +all_opt.generic_emptiness = True +all_opt.rabin_to_buchi = True +all_opt.buchi_type_to_buchi = True +all_opt.parity_type_to_parity = True +all_opt.reduce_col_deg = True +all_opt.propagate_col = True +all_opt.use_generalized_rabin = True all_opt.pretty_print = True - options = [ no_option, acc_clean_search_opt, @@ -87,6 +187,7 @@ options = [ parity_equiv_opt, rab_to_buchi_opt, use_car_opt, + default_opt, all_opt, None, # acd_transform ] @@ -94,19 +195,34 @@ options = [ def test(aut, expected_num_states=[], full=True): for (opt, expected_num) in zip_longest(options, expected_num_states): + if type(expected_num) is str and expected_num == 'skip': + continue if opt is not None: p1 = spot.to_parity(aut, search_ex = opt.search_ex, use_last = opt.use_last, + use_last_post_process = \ + opt.use_last_post_process, force_order = opt.force_order, partial_degen = opt.partial_degen, acc_clean = opt.acc_clean, parity_equiv = opt.parity_equiv, + tar = opt.tar, + iar = opt.iar, + lar_dfs = opt.lar_dfs, + bscc = opt.bscc, parity_prefix = opt.parity_prefix, + parity_prefix_general = \ + opt.parity_prefix_general, + generic_emptiness = opt.generic_emptiness, rabin_to_buchi = opt.rabin_to_buchi, + buchi_type_to_buchi = opt.buchi_type_to_buchi, + parity_type_to_parity = \ + opt.parity_type_to_parity, reduce_col_deg = opt.reduce_col_deg, propagate_col = opt.propagate_col, - pretty_print = opt.pretty_print, + use_generalized_rabin = \ + opt.use_generalized_rabin ) else: p1 = spot.acd_transform(aut) @@ -114,17 +230,16 @@ def test(aut, expected_num_states=[], full=True): if opt is not None and opt.parity_prefix is False: # Reduce the number of colors to help are_equivalent spot.reduce_parity_here(p1) - assert spot.are_equivalent(aut, p1) + tc.assertTrue(spot.are_equivalent(aut, p1)) if expected_num is not None: - # print(p1.num_states(), expected_num) - assert p1.num_states() == expected_num + tc.assertEqual(p1.num_states(), expected_num) if full and opt is not None: # Make sure passing opt is the same as setting # each argument individually p2 = spot.to_parity(aut, opt) - assert p2.num_states() == p1st - assert p2.num_edges() == p1ed - assert p2.num_sets() == p1se + tc.assertEqual(p2.num_states(), p1st) + tc.assertEqual(p2.num_edges(), p1ed) + tc.assertEqual(p2.num_sets(), p1se) test(spot.automaton("""HOA: v1 name: "(FGp0 & ((XFp0 & F!p1) | F(Gp1 & XG!p0))) | G(F!p0 & (XFp0 | F!p1) & @@ -204,7 +319,7 @@ State: 13 [0&1] 5 [!0&!1] 10 {0 1 3 5} [0&!1] 13 {1 3} ---END--"""), [35, 30, 23, 32, 31, 28, 22, 21]) +--END--"""), [30, 32, 23, 30, 33, 45, 22, 22, 21]) test(spot.automaton(""" HOA: v1 @@ -222,7 +337,7 @@ State: 1 [0&!1] 1 {4} [!0&1] 1 {0 1 2 3} [!0&!1] 1 {0 3} ---END--"""), [7, 5, 3, 6, 5, 5, 3, 3]) +--END--"""), [5, 6, 3, 5, 5, 26, 3, 3, 3]) test(spot.automaton("""HOA: v1 States: 2 @@ -238,14 +353,7 @@ State: 0 State: 1 [0&1] 1 {2 3 4} [!0&!1] 0 {1 2} ---END--"""), [9, 3, 2, 3, 3, 3, 2, 2]) - -for i,f in enumerate(spot.randltl(10, 200)): - test(spot.translate(f, "det", "G"), full=(i<50)) - -for f in spot.randltl(5, 500): - test(spot.translate(f), full=False) - +--END--"""), [9, 3, 2, 9, 9, 10, 2, 2, 2]) test(spot.automaton(""" HOA: v1 @@ -278,7 +386,7 @@ State: 3 [!0&1] 2 {1 4} [0&1] 3 {0} --END-- -"""), [80, 47, 104, 104, 102, 29, 6, 5]) +"""), [23, 104, 80, 23, 27, 17, "skip", "skip", 5]) test(spot.automaton(""" HOA: v1 @@ -312,7 +420,7 @@ State: 4 [0&!1] 4 [0&1] 4 {1 2 4} --END-- -"""), [9, 6, 7, 7, 6, 6, 6, 6]) +"""), [9, 6, 7, 9, 9, 10, 6, 6, 6]) test(spot.automaton(""" HOA: v1 @@ -334,7 +442,7 @@ State: 1 [0&!1] 1 {2 3} [0&1] 1 {1 2 4} --END-- -"""), [11, 3, 2, 3, 3, 3, 2, 2]) +"""), [6, 3, 2, 6, 6, 6, 2, 2, 2]) # Tests both the old and new version of to_parity @@ -351,7 +459,7 @@ State: 0 [!0&!1] 0 --END--""") p = spot.to_parity_old(a, True) -assert spot.are_equivalent(a, p) +tc.assertTrue(spot.are_equivalent(a, p)) test(a) a = spot.automaton(""" @@ -363,9 +471,9 @@ explicit-labels trans-acc --BODY-- State: 0 [0&1] 2 {4 5} [0&1] 4 {0 4} 4 [!0&!1] 1 {2 4} State: 5 [!0&1] 4 --END-- """) p = spot.to_parity_old(a, True) -assert p.num_states() == 22 -assert spot.are_equivalent(a, p) -test(a, [8, 6, 6, 6, 6, 6, 6, 6]) +tc.assertEqual(p.num_states(), 22) +tc.assertTrue(spot.are_equivalent(a, p)) +test(a, [8, 6, 7, 8, 6, 7, 6, 6, 6]) # Force a few edges to false, to make sure to_parity() is OK with that. for e in a.out(2): @@ -377,22 +485,88 @@ for e in a.out(3): e.cond = bddfalse break p = spot.to_parity_old(a, True) -assert p.num_states() == 22 -assert spot.are_equivalent(a, p) -test(a, [7, 6, 6, 6, 6, 6, 6, 6]) +tc.assertEqual(p.num_states(), 22) +tc.assertTrue(spot.are_equivalent(a, p)) +test(a, [8, 6, 7, 8, 6, 7, 6, 6, 6]) for f in spot.randltl(4, 400): d = spot.translate(f, "det", "G") p = spot.to_parity_old(d, True) - assert spot.are_equivalent(p, d) + tc.assertTrue(spot.are_equivalent(p, d)) for f in spot.randltl(5, 2000): n = spot.translate(f) p = spot.to_parity_old(n, True) - assert spot.are_equivalent(n, p) + tc.assertTrue(spot.are_equivalent(n, p)) + +for i,f in enumerate(spot.randltl(10, 200)): + test(spot.translate(f, "det", "G"), full=(i<50)) + +for f in spot.randltl(5, 500): + test(spot.translate(f), full=False) # Issue #390. a = spot.translate('!(GFa -> (GFb & GF(!b & !Xb)))', 'gen', 'det') b = spot.to_parity_old(a, True) -assert a.equivalent_to(b) -test(a, [7, 7, 3, 7, 7, 7, 3, 3]) +tc.assertTrue(a.equivalent_to(b)) +test(a, [8, 7, 3, 8, 8, 7, 3, 3, 3]) + +# owl-21.0 ltl2dra -f '(GFa -> GFb) & GF(c <-> Xc)' | autfilt -Hi | fmt +a = spot.automaton(""" +HOA: v1 name: "Automaton for ((((F(G(!a))) | (G(F(b))))) & (G(F(((c) <-> +(X(c)))))))" States: 11 Start: 0 AP: 3 "a" "b" "c" acc-name: Rabin 3 +Acceptance: 6 (Fin(0) & Inf(1)) | (Fin(2) & Inf(3)) | (Fin(4) & Inf(5)) +properties: implicit-labels trans-acc complete deterministic --BODY-- +State: 0 0 {3} 0 {2 4} 3 {3} 3 {2 4} 1 0 {2 4} 2 4 {2 4} State: 1 0 0 {2 +4} 3 3 {2 4} 1 {5} 0 {2 4} 2 {5} 4 {2 4} State: 2 3 3 {2 4} 3 3 {2 4} +6 {1 5} 5 {1 2 4} 2 {1 5} 4 {1 2 4} State: 3 7 {1 3} 7 {1 2 4} 3 {1 3} +3 {1 2 4} 2 4 {2 4} 2 4 {2 4} State: 4 3 {3} 3 {2 4} 3 {3} 3 {2 4} 6 {1} +5 {1 2 4} 2 {1} 4 {1 2 4} State: 5 8 {3} 8 {2 4} 3 {3} 3 {2 4} 6 5 {2 +4} 2 4 {2 4} State: 6 8 8 {2 4} 3 3 {2 4} 6 {5} 5 {2 4} 2 {5} 4 {2 4} +State: 7 7 {3} 7 {2 4} 3 {3} 3 {2 4} 9 10 {2 4} 2 4 {2 4} State: 8 0 {1 +3} 0 {1 2 4} 3 {1 3} 3 {1 2 4} 6 5 {2 4} 2 4 {2 4} State: 9 7 7 {2 4} +3 3 {2 4} 1 {1 5} 0 {1 2 4} 2 {1 5} 4 {1 2 4} State: 10 7 {3} 7 {2 4} +3 {3} 3 {2 4} 1 {1} 0 {1 2 4} 2 {1} 4 {1 2 4} --END-- +""") +b = spot.iar_maybe(a) +tc.assertEqual(b.num_states(), 11) +tc.assertTrue(a.equivalent_to(b)) +test(a, [11, 11, 11, 11, 11, 11, 11, 11, 11]) + +a = spot.automaton(""" +HOA: v1 States: 10 Start: 0 AP: 2 "p0" "p1" acc-name: Rabin 4 Acceptance: +8 (Fin(0) & Inf(1)) | (Fin(2) & Inf(3)) | (Fin(4) & Inf(5)) | (Fin(6) +& Inf(7)) properties: implicit-labels trans-acc complete deterministic +--BODY-- State: 0 2 {7} 7 {3} 2 {7} 3 State: 1 5 {0 3} 9 {3 4} 5 {0 3} +9 {3 4} State: 2 9 {1 6} 9 {1 6} 9 {1 6} 9 {1 6} State: 3 3 {4} 9 {0} +1 {4} 4 {5 6} State: 4 7 8 {1 5 7} 9 {3 7} 8 {1 5 7} State: 5 6 {4} 9 +{1 2 6} 6 {4} 9 {1 2 6} State: 6 1 {3 7} 1 {3 7} 1 {3 7} 1 {3 7} State: +7 1 {3 6} 8 {2} 1 {3 6} 8 {2} State: 8 8 {3 4 7} 3 {2} 8 {3 4 7} 3 {2} +State: 9 3 {4} 2 3 {4} 6 --END-- +""") +b = spot.iar_maybe(a) +tc.assertEqual(b.num_states(), 87) +tc.assertTrue(a.equivalent_to(b)) +test(a, [87, 91, 91, 87, 87, 87, 51, 35, 21]) + +a = spot.automaton("""HOA: v1 +States: 4 +Start: 0 +AP: 2 "p0" "p1" +Acceptance: 2 Fin(1) & Fin(0) +properties: trans-labels explicit-labels state-acc +--BODY-- +State: 0 +[!0&!1] 2 +[!0&!1] 1 +State: 1 +[!0&1] 0 +[0&1] 3 +State: 2 +[0&!1] 1 +State: 3 {0} +[!0&1] 3 +[!0&!1] 1 +--END--""") +b = spot.parity_type_to_parity(a) +tc.assertTrue(spot.are_equivalent(a, b)) diff --git a/tests/python/toweak.py b/tests/python/toweak.py index b2d908037..23dcf66fa 100644 --- a/tests/python/toweak.py +++ b/tests/python/toweak.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2018, 2020 Laboratoire de Recherche et Développement -# de l'Epita +# Copyright (C) 2017, 2018, 2020, 2022 Laboratoire de Recherche et +# Développement de l'Epita # # This file is part of Spot, a model checking library. # @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() phi1 = """GFb X(!b | GF!a) @@ -33,7 +35,7 @@ b | (a & XF(b R a)) | (!a & XG(!b U !a))""" def test_phi(phi): a = spot.translate(phi, 'GeneralizedBuchi', 'SBAcc') res = spot.to_weak_alternating(spot.dualize(a)) - assert res.equivalent_to(spot.formula.Not(spot.formula(phi))) + tc.assertTrue(res.equivalent_to(spot.formula.Not(spot.formula(phi)))) for p in phi1.split('\n'): @@ -83,4 +85,4 @@ State: 6 --END-- """) a2 = spot.to_weak_alternating(a2) -assert a2.equivalent_to(phi2) +tc.assertTrue(a2.equivalent_to(phi2)) diff --git a/tests/python/tra2tba.py b/tests/python/tra2tba.py index b303c010b..354ced630 100644 --- a/tests/python/tra2tba.py +++ b/tests/python/tra2tba.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016-2018, 2020-2021 Laboratoire de Recherche +# Copyright (C) 2016-2018, 2020-2022 Laboratoire de Recherche # et Développement de l'Epita # # This file is part of Spot, a model checking library. @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() # CPython use reference counting, so that automata are destructed # when we expect them to be. However other implementations like @@ -57,7 +59,7 @@ State: 1 --END--""" res = spot.remove_fin(aut) -assert(res.to_str('hoa') == exp) +tc.assertEqual(res.to_str('hoa'), exp) # Test 2. aut = spot.automaton(""" @@ -97,7 +99,7 @@ State: 2 --END--""" res = spot.remove_fin(aut) -assert(res.to_str('hoa') == exp) +tc.assertEqual(res.to_str('hoa'), exp) # Test 3. aut = spot.automaton(""" @@ -128,7 +130,7 @@ State: 0 --END--""" res = spot.remove_fin(aut) -assert(res.to_str('hoa') == exp) +tc.assertEqual(res.to_str('hoa'), exp) # Test 4. aut = spot.automaton(""" @@ -168,7 +170,7 @@ State: 2 {0} --END--""" res = spot.remove_fin(aut) -assert(res.to_str('hoa') == exp) +tc.assertEqual(res.to_str('hoa'), exp) # Test 5. aut = spot.automaton(""" @@ -214,7 +216,7 @@ State: 3 {0} --END--""" res = spot.remove_fin(aut) -assert(res.to_str('hoa') == exp) +tc.assertEqual(res.to_str('hoa'), exp) # Test 6. aut = spot.automaton(""" @@ -257,7 +259,7 @@ State: 2 {0} --END--""" res = spot.remove_fin(aut) -assert(res.to_str('hoa') == exp) +tc.assertEqual(res.to_str('hoa'), exp) # Test 7. aut = spot.automaton(""" @@ -292,7 +294,7 @@ State: 1 {0} --END--""" res = spot.remove_fin(aut) -assert(res.to_str('hoa') == exp) +tc.assertEqual(res.to_str('hoa'), exp) # Test 8. aut = spot.automaton(""" @@ -372,9 +374,9 @@ State: 7 res = spot.remove_fin(aut) if is_cpython: - assert(res.to_str('hoa') == exp) + tc.assertEqual(res.to_str('hoa'), exp) else: - assert(res.equivalent_to(spot.automaton(exp))) + tc.assertTrue(res.equivalent_to(spot.automaton(exp))) # Test 9. aut = spot.automaton(""" @@ -411,9 +413,9 @@ State: 1 res = spot.remove_fin(aut) if is_cpython: - assert(res.to_str('hoa') == exp) + tc.assertEqual(res.to_str('hoa'), exp) else: - assert(res.equivalent_to(spot.automaton(exp))) + tc.assertTrue(res.equivalent_to(spot.automaton(exp))) # Test 10. aut = spot.automaton(""" @@ -453,9 +455,9 @@ State: 2 {0} res = spot.remove_fin(aut) if is_cpython: - assert(res.to_str('hoa') == exp) + tc.assertEqual(res.to_str('hoa'), exp) else: - assert(res.equivalent_to(spot.automaton(exp))) + tc.assertTrue(res.equivalent_to(spot.automaton(exp))) # Test 11. aut = spot.automaton(""" @@ -493,9 +495,9 @@ State: 1 res = spot.remove_fin(aut) if is_cpython: - assert(res.to_str('hoa') == exp) + tc.assertEqual(res.to_str('hoa'), exp) else: - assert(res.equivalent_to(spot.automaton(exp))) + tc.assertTrue(res.equivalent_to(spot.automaton(exp))) # Different order for rabin_to_buchi_if_realizable() due to merge_edges() not # being called. This is on purpose: the edge order should match exactly the @@ -518,9 +520,9 @@ State: 1 --END--""" res = spot.rabin_to_buchi_if_realizable(aut) if is_cpython: - assert(res.to_str('hoa') == exp2) + tc.assertEqual(res.to_str('hoa'), exp2) else: - assert(res.equivalent_to(spot.automaton(exp2))) + tc.assertTrue(res.equivalent_to(spot.automaton(exp2))) # Test 12. aut = spot.automaton(""" @@ -565,9 +567,9 @@ State: 3 {0} res = spot.remove_fin(aut) if is_cpython: - assert(res.to_str('hoa') == exp) + tc.assertEqual(res.to_str('hoa'), exp) else: - assert(res.equivalent_to(spot.automaton(exp))) + tc.assertTrue(res.equivalent_to(spot.automaton(exp))) # Test 13. aut = spot.automaton(""" @@ -615,9 +617,9 @@ State: 1 res = spot.remove_fin(aut) if is_cpython: - assert(res.to_str('hoa') == exp) + tc.assertEqual(res.to_str('hoa'), exp) else: - assert(res.equivalent_to(spot.automaton(exp))) + tc.assertTrue(res.equivalent_to(spot.automaton(exp))) # rabin_to_buchi_if_realizable() does not call merge_edges() on purpose: the # edge order should match exactly the original automaton. @@ -644,9 +646,9 @@ State: 1 res = spot.rabin_to_buchi_if_realizable(aut) if is_cpython: - assert(res.to_str('hoa') == exp2) + tc.assertEqual(res.to_str('hoa'), exp2) else: - assert(res.equivalent_to(spot.automaton(exp2))) + tc.assertTrue(res.equivalent_to(spot.automaton(exp2))) # Test 14. aut = spot.automaton(""" @@ -681,7 +683,7 @@ State: 1 res = spot.remove_fin(aut) if is_cpython: - assert(res.to_str('hoa') == exp) + tc.assertEqual(res.to_str('hoa'), exp) else: - assert(res.equivalent_to(spot.automaton(exp))) -assert spot.rabin_to_buchi_if_realizable(aut) is None + tc.assertTrue(res.equivalent_to(spot.automaton(exp))) +tc.assertIsNone(spot.rabin_to_buchi_if_realizable(aut)) diff --git a/tests/python/trival.py b/tests/python/trival.py index 8fcf6a1fa..ea844e29c 100644 --- a/tests/python/trival.py +++ b/tests/python/trival.py @@ -1,5 +1,5 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2016, 2018 Laboratoire de Recherche et Développement +# Copyright (C) 2016, 2018, 2022 Laboratoire de Recherche et Développement # de l'Epita # # This file is part of Spot, a model checking library. @@ -18,30 +18,32 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() v1 = spot.trival() v2 = spot.trival(False) v3 = spot.trival(True) v4 = spot.trival_maybe() -assert v1 != v2 -assert v1 != v3 -assert v2 != v3 -assert v2 == spot.trival(spot.trival.no_value) -assert v2 != spot.trival(spot.trival.yes_value) -assert v4 != v2 -assert v4 != v3 -assert v2 == False -assert True == v3 -assert v2 != True -assert False != v3 -assert v4 == spot.trival_maybe() -assert v4 == spot.trival(spot.trival.maybe_value) -assert v3 -assert -v2 -assert not -v1 -assert not v1 -assert not -v3 +tc.assertNotEqual(v1, v2) +tc.assertNotEqual(v1, v3) +tc.assertNotEqual(v2, v3) +tc.assertEqual(v2, spot.trival(spot.trival.no_value)) +tc.assertNotEqual(v2, spot.trival(spot.trival.yes_value)) +tc.assertNotEqual(v4, v2) +tc.assertNotEqual(v4, v3) +tc.assertEqual(v2, False) +tc.assertEqual(True, v3) +tc.assertNotEqual(v2, True) +tc.assertNotEqual(False, v3) +tc.assertEqual(v4, spot.trival_maybe()) +tc.assertEqual(v4, spot.trival(spot.trival.maybe_value)) +tc.assertTrue(v3) +tc.assertTrue(-v2) +tc.assertFalse(-v1) +tc.assertFalse(v1) +tc.assertFalse(-v3) for u in (v1, v2, v3): for v in (v1, v2, v3): - assert (u & v) == -(-u | -v) + tc.assertEqual((u & v), -(-u | -v)) diff --git a/tests/python/twagraph.py b/tests/python/twagraph.py index b8834b211..1ebcb8ac5 100644 --- a/tests/python/twagraph.py +++ b/tests/python/twagraph.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2017, 2021 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2017, 2021-2022 Laboratoire de Recherche et +# Développement de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -22,6 +22,8 @@ import spot from buddy import bddtrue, bddfalse +from unittest import TestCase +tc = TestCase() aut = spot.make_twa_graph(spot.make_bdd_dict()) @@ -29,98 +31,98 @@ try: print(aut.to_str()) exit(2) except RuntimeError as e: - assert "no state" in str(e) + tc.assertIn("no state", str(e)) try: aut.set_init_state(2) except ValueError as e: - assert "nonexisting" in str(e) + tc.assertIn("nonexisting", str(e)) try: aut.set_univ_init_state([2, 1]) except ValueError as e: - assert "nonexisting" in str(e) + tc.assertIn("nonexisting", str(e)) aut.new_states(3) aut.set_init_state(2) -assert aut.get_init_state_number() == 2 +tc.assertEqual(aut.get_init_state_number(), 2) aut.set_univ_init_state([2, 1]) -assert [2, 1] == list(aut.univ_dests(aut.get_init_state_number())) +tc.assertEqual([2, 1], list(aut.univ_dests(aut.get_init_state_number()))) try: aut.get_init_state() except RuntimeError as e: s = str(e) - assert "abstract interface" in s and "alternating automata" in s + tc.assertIn("abstract interface" in s and "alternating automata", s) cpy = spot.make_twa_graph(aut, spot.twa_prop_set.all()) -assert aut.to_str() == cpy.to_str() +tc.assertEqual(aut.to_str(), cpy.to_str()) all = aut.set_buchi() -assert aut.to_str() != cpy.to_str() +tc.assertNotEqual(aut.to_str(), cpy.to_str()) cpy = spot.make_twa_graph(aut, spot.twa_prop_set.all()) aut.new_acc_edge(0, 1, bddtrue, True) -assert aut.num_edges() == 1 + cpy.num_edges() +tc.assertEqual(aut.num_edges(), 1 + cpy.num_edges()) aut.prop_universal(True) aut.set_name("some name") cpy = spot.make_twa_graph(aut, spot.twa_prop_set(False, False, False, False, False, False)) -assert cpy.prop_universal() != aut.prop_universal() -assert cpy.prop_universal() == spot.trival.maybe() -assert cpy.get_name() == None +tc.assertNotEqual(cpy.prop_universal(), aut.prop_universal()) +tc.assertEqual(cpy.prop_universal(), spot.trival.maybe()) +tc.assertEqual(cpy.get_name(), None) cpy = spot.make_twa_graph(aut, spot.twa_prop_set(False, False, False, False, False, False), True) -assert cpy.get_name() == "some name" +tc.assertEqual(cpy.get_name(), "some name") from copy import copy cpy = copy(aut) -assert aut.to_str() == cpy.to_str() +tc.assertEqual(aut.to_str(), cpy.to_str()) cpy.set_init_state(1) -assert [2, 1] == list(aut.univ_dests(aut.get_init_state_number())) -assert cpy.get_init_state_number() == 1 -assert cpy.get_name() == "some name" +tc.assertEqual([2, 1], list(aut.univ_dests(aut.get_init_state_number()))) +tc.assertEqual(cpy.get_init_state_number(), 1) +tc.assertEqual(cpy.get_name(), "some name") try: s = aut.state_acc_sets(0) except RuntimeError as e: - assert "state-based acceptance" in str(e) + tc.assertIn("state-based acceptance", str(e)) try: s = aut.state_is_accepting(0) except RuntimeError as e: - assert "state-based acceptance" in str(e) + tc.assertIn("state-based acceptance", str(e)) aut.prop_state_acc(True) -assert aut.state_acc_sets(0) == all -assert aut.state_is_accepting(0) == True +tc.assertEqual(aut.state_acc_sets(0), all) +tc.assertEqual(aut.state_is_accepting(0), True) aut.set_init_state(0) aut.purge_unreachable_states() i = aut.get_init_state() -assert aut.state_is_accepting(i) == True +tc.assertEqual(aut.state_is_accepting(i), True) it = aut.succ_iter(i) it.first() -assert aut.edge_number(it) == 1 -assert aut.state_number(it.dst()) == 1 -assert aut.edge_storage(it).src == 0 -assert aut.edge_storage(1).src == 0 -assert aut.edge_data(it).cond == bddtrue -assert aut.edge_data(1).cond == bddtrue +tc.assertEqual(aut.edge_number(it), 1) +tc.assertEqual(aut.state_number(it.dst()), 1) +tc.assertEqual(aut.edge_storage(it).src, 0) +tc.assertEqual(aut.edge_storage(1).src, 0) +tc.assertEqual(aut.edge_data(it).cond, bddtrue) +tc.assertEqual(aut.edge_data(1).cond, bddtrue) aut.release_iter(it) aut.purge_dead_states() i = aut.get_init_state() -assert aut.state_is_accepting(i) == False +tc.assertEqual(aut.state_is_accepting(i), False) aut = spot.translate('FGa') # Kill the edge between state 0 and 1 -assert aut.edge_storage(2).src == 0 -assert aut.edge_storage(2).dst == 1 +tc.assertEqual(aut.edge_storage(2).src, 0) +tc.assertEqual(aut.edge_storage(2).dst, 1) aut.edge_data(2).cond = bddfalse -assert aut.num_edges() == 3 -assert aut.num_states() == 2 +tc.assertEqual(aut.num_edges(), 3) +tc.assertEqual(aut.num_states(), 2) aut.purge_dead_states() -assert aut.num_edges() == 1 -assert aut.num_states() == 1 +tc.assertEqual(aut.num_edges(), 1) +tc.assertEqual(aut.num_states(), 1) diff --git a/tests/python/zlktree.ipynb b/tests/python/zlktree.ipynb index d46e2ce2c..c9eb3503d 100644 --- a/tests/python/zlktree.ipynb +++ b/tests/python/zlktree.ipynb @@ -216,7 +216,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "execution_count": 2, @@ -640,7 +640,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c49328600> >" + " *' at 0x7f82c009d7a0> >" ] }, "execution_count": 10, @@ -1063,7 +1063,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c493284b0> >" + " *' at 0x7f82c009c630> >" ] }, "execution_count": 11, @@ -1256,7 +1256,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c49328f30> >" + " *' at 0x7f82c009c6c0> >" ] }, "execution_count": 13, @@ -1701,7 +1701,7 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c49330420> >" + " *' at 0x7f82c009c480> >" ] }, "execution_count": 14, @@ -2096,7 +2096,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2427,7 +2427,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2513,7 +2513,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2624,7 +2624,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2662,7 +2662,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2700,7 +2700,7 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, "metadata": {}, @@ -2717,6 +2717,237 @@ " display(tcond)" ] }, + { + "cell_type": "markdown", + "id": "77db26c3", + "metadata": {}, + "source": [ + "## `zielonka_tree_options`\n", + "\n", + "The `zielonka_tree` class accepts a few options that can alter its behaviour.\n", + "\n", + "Options `CHECK_RABIN`, `CHECK_STREETT`, `CHECK_PARITY` can be combined with\n", + "`ABORT_WRONG_SHAPE` to abort the construction as soon as it is detected that the Zielonka tree has the wrong shape. When this happens, the number of branchs of the tree is set to 0.\n", + "\n", + "For instance we can check that the original acceptance condition does not behaves like a Parity condition." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "4fa47daf", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(4, (Fin(0) & Inf(1) & (Inf(2) | Fin(3))) | ((Inf(0) | Fin(1)) & Fin(2) & Inf(3)))\n", + "0\n" + ] + } + ], + "source": [ + "print(c)\n", + "z = spot.zielonka_tree(c, spot.zielonka_tree_options_ABORT_WRONG_SHAPE \n", + " | spot.zielonka_tree_options_CHECK_PARITY)\n", + "print(z.num_branches())" + ] + }, + { + "cell_type": "markdown", + "id": "4786f64c", + "metadata": {}, + "source": [ + "Option `MERGE_SUBTREE` will fuse identical nodes, turning the tree into a DAG. (Actually, because this tree is stored as a left-child right-sibling tree, only the children of identical nodes are merged.):" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "bc826090", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "zielonka_tree\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "{0,1,2,3}\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "{1,2,3}\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "{0,1,3}\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "{2,3}\n", + "\n", + "\n", + "\n", + "1->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "{1,3}\n", + "\n", + "\n", + "\n", + "1->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "{1,3}\n", + "\n", + "\n", + "\n", + "2->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "{0,1}\n", + "\n", + "\n", + "\n", + "2->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "{3}\n", + "<7>\n", + "\n", + "\n", + "\n", + "3->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "{1}\n", + "<8>\n", + "\n", + "\n", + "\n", + "4->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "{3}\n", + "<9>\n", + "\n", + "\n", + "\n", + "4->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "{1}\n", + "<10>\n", + "\n", + "\n", + "\n", + "6->10\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + " >" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "spot.zielonka_tree(c, spot.zielonka_tree_options_MERGE_SUBTREES)" + ] + }, + { + "cell_type": "markdown", + "id": "9d7688b3", + "metadata": {}, + "source": [ + "Such a DAG cannot be used by `zielonka_tree_transform()`, but it saves memory if we are only checking the shape of the tree/DAG." + ] + }, { "cell_type": "markdown", "id": "75838579", @@ -2731,7 +2962,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 19, "id": "ea3488b1", "metadata": {}, "outputs": [], @@ -2763,7 +2994,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 20, "id": "fad721c0", "metadata": {}, "outputs": [ @@ -3833,39 +4064,162 @@ "\n", "\n", "
" + " acdaddclasses(\"#acdaut0 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd0 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut0 #E9\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E10\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E11\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E12\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E13\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E14\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E15\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E16\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E21\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E22\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E23\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E24\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E25\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E26\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E27\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E28\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E33\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E34\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E35\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E36\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut0 #E31\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut0 #E32\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut0 #E39\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut0 #E40\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut0 #E5\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut0 #E7\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut0 #E17\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut0 #E1\", [\"acdN3\"]);\n", + "acdaddclasses(\"#acdaut0 #E10\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E12\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E13\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E15\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E21\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E22\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut0 #E23\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut0 #E24\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut0 #E34\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut0 #E36\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut0 #E14\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut0 #E15\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut0 #E22\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut0 #E23\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut0 #E14\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut0 #E16\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut0 #E26\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut0 #E9\", [\"acdN8\"]);\n", + "acdaddclasses(\"#acdaut0 #E40\", [\"acdN9\"]);\n", + "acdaddclasses(\"#acdaut0 #E5\", [\"acdN10\"]);\n", + "acdaddclasses(\"#acdaut0 #E23\", [\"acdN11\"]);\n", + "acdaddclasses(\"#acdaut0 #E14\", [\"acdN12\"]);\n", + "acdaddclasses(\"#acdaut0 #E23\", [\"acdN13\"]);\n", + "acdaddclasses(\"#acdaut0 #E14\", [\"acdN14\"]);\n", + "acdonclick(\"#acdaut0 #E1\",function(){acd0_edge(1);});\n", + "acdonclick(\"#acdaut0 #E2\",function(){acd0_edge(2);});\n", + "acdonclick(\"#acdaut0 #E3\",function(){acd0_edge(3);});\n", + "acdonclick(\"#acdaut0 #E4\",function(){acd0_edge(4);});\n", + "acdonclick(\"#acdaut0 #E5\",function(){acd0_edge(5);});\n", + "acdonclick(\"#acdaut0 #E6\",function(){acd0_edge(6);});\n", + "acdonclick(\"#acdaut0 #E7\",function(){acd0_edge(7);});\n", + "acdonclick(\"#acdaut0 #E8\",function(){acd0_edge(8);});\n", + "acdonclick(\"#acdaut0 #E9\",function(){acd0_edge(9);});\n", + "acdonclick(\"#acdaut0 #E10\",function(){acd0_edge(10);});\n", + "acdonclick(\"#acdaut0 #E11\",function(){acd0_edge(11);});\n", + "acdonclick(\"#acdaut0 #E12\",function(){acd0_edge(12);});\n", + "acdonclick(\"#acdaut0 #E13\",function(){acd0_edge(13);});\n", + "acdonclick(\"#acdaut0 #E14\",function(){acd0_edge(14);});\n", + "acdonclick(\"#acdaut0 #E15\",function(){acd0_edge(15);});\n", + "acdonclick(\"#acdaut0 #E16\",function(){acd0_edge(16);});\n", + "acdonclick(\"#acdaut0 #E17\",function(){acd0_edge(17);});\n", + "acdonclick(\"#acdaut0 #E18\",function(){acd0_edge(18);});\n", + "acdonclick(\"#acdaut0 #E19\",function(){acd0_edge(19);});\n", + "acdonclick(\"#acdaut0 #E20\",function(){acd0_edge(20);});\n", + "acdonclick(\"#acdaut0 #E21\",function(){acd0_edge(21);});\n", + "acdonclick(\"#acdaut0 #E22\",function(){acd0_edge(22);});\n", + "acdonclick(\"#acdaut0 #E23\",function(){acd0_edge(23);});\n", + "acdonclick(\"#acdaut0 #E24\",function(){acd0_edge(24);});\n", + "acdonclick(\"#acdaut0 #E25\",function(){acd0_edge(25);});\n", + "acdonclick(\"#acdaut0 #E26\",function(){acd0_edge(26);});\n", + "acdonclick(\"#acdaut0 #E27\",function(){acd0_edge(27);});\n", + "acdonclick(\"#acdaut0 #E28\",function(){acd0_edge(28);});\n", + "acdonclick(\"#acdaut0 #E29\",function(){acd0_edge(29);});\n", + "acdonclick(\"#acdaut0 #E30\",function(){acd0_edge(30);});\n", + "acdonclick(\"#acdaut0 #E31\",function(){acd0_edge(31);});\n", + "acdonclick(\"#acdaut0 #E32\",function(){acd0_edge(32);});\n", + "acdonclick(\"#acdaut0 #E33\",function(){acd0_edge(33);});\n", + "acdonclick(\"#acdaut0 #E34\",function(){acd0_edge(34);});\n", + "acdonclick(\"#acdaut0 #E35\",function(){acd0_edge(35);});\n", + "acdonclick(\"#acdaut0 #E36\",function(){acd0_edge(36);});\n", + "acdonclick(\"#acdaut0 #E37\",function(){acd0_edge(37);});\n", + "acdonclick(\"#acdaut0 #E38\",function(){acd0_edge(38);});\n", + "acdonclick(\"#acdaut0 #E39\",function(){acd0_edge(39);});\n", + "acdonclick(\"#acdaut0 #E40\",function(){acd0_edge(40);});\n", + "acdonclick(\"#acdaut0 #S0\",function(){acd0_state(0);});\n", + "acdonclick(\"#acdaut0 #S1\",function(){acd0_state(1);});\n", + "acdonclick(\"#acdaut0 #S2\",function(){acd0_state(2);});\n", + "acdonclick(\"#acdaut0 #S3\",function(){acd0_state(3);});\n", + "acdonclick(\"#acdaut0 #S4\",function(){acd0_state(4);});\n", + "acdonclick(\"#acdaut0 #S5\",function(){acd0_state(5);});\n", + "acdonclick(\"#acdaut0 #S6\",function(){acd0_state(6);});\n", + "acdonclick(\"#acdaut0 #S7\",function(){acd0_state(7);});\n", + "acdonclick(\"#acdaut0 #S8\",function(){acd0_state(8);});\n", + "acdonclick(\"#acdaut0 #S9\",function(){acd0_state(9);});\n", + "acdonclick(\"#acd0 #N0\",function(){acd0_node(0, 0);});\n", + "acdonclick(\"#acd0 #N1\",function(){acd0_node(1, 1);});\n", + "acdonclick(\"#acd0 #N2\",function(){acd0_node(2, 1);});\n", + "acdonclick(\"#acd0 #N3\",function(){acd0_node(3, 1);});\n", + "acdonclick(\"#acd0 #N4\",function(){acd0_node(4, 1);});\n", + "acdonclick(\"#acd0 #N5\",function(){acd0_node(5, 1);});\n", + "acdonclick(\"#acd0 #N6\",function(){acd0_node(6, 1);});\n", + "acdonclick(\"#acd0 #N7\",function(){acd0_node(7, 1);});\n", + "acdonclick(\"#acd0 #N8\",function(){acd0_node(8, 1);});\n", + "acdonclick(\"#acd0 #N9\",function(){acd0_node(9, 0);});\n", + "acdonclick(\"#acd0 #N10\",function(){acd0_node(10, 0);});\n", + "acdonclick(\"#acd0 #N11\",function(){acd0_node(11, 0);});\n", + "acdonclick(\"#acd0 #N12\",function(){acd0_node(12, 0);});\n", + "acdonclick(\"#acd0 #N13\",function(){acd0_node(13, 0);});\n", + "acdonclick(\"#acd0 #N14\",function(){acd0_node(14, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, - "execution_count": 18, + "execution_count": 20, "metadata": {}, "output_type": "execute_result" } @@ -3886,7 +4240,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 21, "id": "859a993a", "metadata": {}, "outputs": [ @@ -3896,7 +4250,7 @@ "False" ] }, - "execution_count": 19, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } @@ -3932,7 +4286,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 22, "id": "a8bd0844", "metadata": {}, "outputs": [ @@ -3942,7 +4296,7 @@ "(4, 1)" ] }, - "execution_count": 20, + "execution_count": 22, "metadata": {}, "output_type": "execute_result" } @@ -3953,7 +4307,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 23, "id": "93116a05", "metadata": {}, "outputs": [ @@ -3963,7 +4317,7 @@ "(4, 1)" ] }, - "execution_count": 21, + "execution_count": 23, "metadata": {}, "output_type": "execute_result" } @@ -3984,7 +4338,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 24, "id": "23940b6a", "metadata": {}, "outputs": [ @@ -3994,7 +4348,7 @@ "(12, 0)" ] }, - "execution_count": 22, + "execution_count": 24, "metadata": {}, "output_type": "execute_result" } @@ -4005,7 +4359,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 25, "id": "de7cbd02", "metadata": {}, "outputs": [ @@ -4015,7 +4369,7 @@ "(8, 0)" ] }, - "execution_count": 23, + "execution_count": 25, "metadata": {}, "output_type": "execute_result" } @@ -4026,7 +4380,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 26, "id": "8b0305d4", "metadata": {}, "outputs": [ @@ -4036,7 +4390,7 @@ "(4, 0)" ] }, - "execution_count": 24, + "execution_count": 26, "metadata": {}, "output_type": "execute_result" } @@ -4047,7 +4401,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 27, "id": "4f0a10f5", "metadata": {}, "outputs": [ @@ -4057,7 +4411,7 @@ "(4, 1)" ] }, - "execution_count": 25, + "execution_count": 27, "metadata": {}, "output_type": "execute_result" } @@ -4094,7 +4448,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 28, "id": "2bd04c1f", "metadata": {}, "outputs": [ @@ -4104,7 +4458,7 @@ "4" ] }, - "execution_count": 26, + "execution_count": 28, "metadata": {}, "output_type": "execute_result" } @@ -4131,7 +4485,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 29, "id": "e28035e8", "metadata": {}, "outputs": [ @@ -4737,10 +5091,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c49328840> >" + " *' at 0x7f82c00bc870> >" ] }, - "execution_count": 27, + "execution_count": 29, "metadata": {}, "output_type": "execute_result" } @@ -4761,7 +5115,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 30, "id": "numerical-education", "metadata": {}, "outputs": [ @@ -4771,7 +5125,7 @@ "True" ] }, - "execution_count": 28, + "execution_count": 30, "metadata": {}, "output_type": "execute_result" } @@ -4790,7 +5144,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 31, "id": "3e239a0c", "metadata": {}, "outputs": [ @@ -5376,10 +5730,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c49328e70> >" + " *' at 0x7f82c00bc060> >" ] }, - "execution_count": 29, + "execution_count": 31, "metadata": {}, "output_type": "execute_result" } @@ -5401,7 +5755,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 32, "id": "4f62e612", "metadata": {}, "outputs": [ @@ -5411,7 +5765,7 @@ "15" ] }, - "execution_count": 30, + "execution_count": 32, "metadata": {}, "output_type": "execute_result" } @@ -5422,7 +5776,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 33, "id": "20f2a45c", "metadata": {}, "outputs": [ @@ -5432,7 +5786,7 @@ "27" ] }, - "execution_count": 31, + "execution_count": 33, "metadata": {}, "output_type": "execute_result" } @@ -5461,7 +5815,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 34, "id": "7727735d", "metadata": {}, "outputs": [], @@ -5471,7 +5825,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 35, "id": "2d0bbc0b", "metadata": {}, "outputs": [ @@ -5505,7 +5859,7 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 36, "id": "78643aae", "metadata": {}, "outputs": [ @@ -5523,7 +5877,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 37, "id": "13a7796b", "metadata": {}, "outputs": [], @@ -5533,7 +5887,7 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 38, "id": "3ee900b7", "metadata": {}, "outputs": [ @@ -5564,7 +5918,7 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 39, "id": "e12bb020", "metadata": {}, "outputs": [], @@ -5574,9 +5928,11 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": 40, "id": "813d15ed", - "metadata": {}, + "metadata": { + "scrolled": false + }, "outputs": [ { "data": { @@ -6644,39 +7000,162 @@ "\n", "\n", "
" + " acdaddclasses(\"#acdaut1 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd1 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut1 #E9\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E10\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E11\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E12\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E13\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E14\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E15\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E16\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E21\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E22\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E23\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E24\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E25\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E26\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E27\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E28\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E33\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E34\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E35\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E36\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut1 #E31\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut1 #E32\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut1 #E39\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut1 #E40\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut1 #E5\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut1 #E7\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut1 #E17\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut1 #E1\", [\"acdN3\"]);\n", + "acdaddclasses(\"#acdaut1 #E10\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E12\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E13\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E15\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E21\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E22\", [\"acdN4\"]);\n", + "acdaddclasses(\"#acdaut1 #E14\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut1 #E15\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut1 #E22\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut1 #E23\", [\"acdN5\"]);\n", + "acdaddclasses(\"#acdaut1 #E23\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut1 #E24\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut1 #E34\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut1 #E36\", [\"acdN6\"]);\n", + "acdaddclasses(\"#acdaut1 #E14\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut1 #E16\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut1 #E26\", [\"acdN7\"]);\n", + "acdaddclasses(\"#acdaut1 #E9\", [\"acdN8\"]);\n", + "acdaddclasses(\"#acdaut1 #E40\", [\"acdN9\"]);\n", + "acdaddclasses(\"#acdaut1 #E5\", [\"acdN10\"]);\n", + "acdaddclasses(\"#acdaut1 #E14\", [\"acdN11\"]);\n", + "acdaddclasses(\"#acdaut1 #E23\", [\"acdN12\"]);\n", + "acdaddclasses(\"#acdaut1 #E23\", [\"acdN13\"]);\n", + "acdaddclasses(\"#acdaut1 #E14\", [\"acdN14\"]);\n", + "acdonclick(\"#acdaut1 #E1\",function(){acd1_edge(1);});\n", + "acdonclick(\"#acdaut1 #E2\",function(){acd1_edge(2);});\n", + "acdonclick(\"#acdaut1 #E3\",function(){acd1_edge(3);});\n", + "acdonclick(\"#acdaut1 #E4\",function(){acd1_edge(4);});\n", + "acdonclick(\"#acdaut1 #E5\",function(){acd1_edge(5);});\n", + "acdonclick(\"#acdaut1 #E6\",function(){acd1_edge(6);});\n", + "acdonclick(\"#acdaut1 #E7\",function(){acd1_edge(7);});\n", + "acdonclick(\"#acdaut1 #E8\",function(){acd1_edge(8);});\n", + "acdonclick(\"#acdaut1 #E9\",function(){acd1_edge(9);});\n", + "acdonclick(\"#acdaut1 #E10\",function(){acd1_edge(10);});\n", + "acdonclick(\"#acdaut1 #E11\",function(){acd1_edge(11);});\n", + "acdonclick(\"#acdaut1 #E12\",function(){acd1_edge(12);});\n", + "acdonclick(\"#acdaut1 #E13\",function(){acd1_edge(13);});\n", + "acdonclick(\"#acdaut1 #E14\",function(){acd1_edge(14);});\n", + "acdonclick(\"#acdaut1 #E15\",function(){acd1_edge(15);});\n", + "acdonclick(\"#acdaut1 #E16\",function(){acd1_edge(16);});\n", + "acdonclick(\"#acdaut1 #E17\",function(){acd1_edge(17);});\n", + "acdonclick(\"#acdaut1 #E18\",function(){acd1_edge(18);});\n", + "acdonclick(\"#acdaut1 #E19\",function(){acd1_edge(19);});\n", + "acdonclick(\"#acdaut1 #E20\",function(){acd1_edge(20);});\n", + "acdonclick(\"#acdaut1 #E21\",function(){acd1_edge(21);});\n", + "acdonclick(\"#acdaut1 #E22\",function(){acd1_edge(22);});\n", + "acdonclick(\"#acdaut1 #E23\",function(){acd1_edge(23);});\n", + "acdonclick(\"#acdaut1 #E24\",function(){acd1_edge(24);});\n", + "acdonclick(\"#acdaut1 #E25\",function(){acd1_edge(25);});\n", + "acdonclick(\"#acdaut1 #E26\",function(){acd1_edge(26);});\n", + "acdonclick(\"#acdaut1 #E27\",function(){acd1_edge(27);});\n", + "acdonclick(\"#acdaut1 #E28\",function(){acd1_edge(28);});\n", + "acdonclick(\"#acdaut1 #E29\",function(){acd1_edge(29);});\n", + "acdonclick(\"#acdaut1 #E30\",function(){acd1_edge(30);});\n", + "acdonclick(\"#acdaut1 #E31\",function(){acd1_edge(31);});\n", + "acdonclick(\"#acdaut1 #E32\",function(){acd1_edge(32);});\n", + "acdonclick(\"#acdaut1 #E33\",function(){acd1_edge(33);});\n", + "acdonclick(\"#acdaut1 #E34\",function(){acd1_edge(34);});\n", + "acdonclick(\"#acdaut1 #E35\",function(){acd1_edge(35);});\n", + "acdonclick(\"#acdaut1 #E36\",function(){acd1_edge(36);});\n", + "acdonclick(\"#acdaut1 #E37\",function(){acd1_edge(37);});\n", + "acdonclick(\"#acdaut1 #E38\",function(){acd1_edge(38);});\n", + "acdonclick(\"#acdaut1 #E39\",function(){acd1_edge(39);});\n", + "acdonclick(\"#acdaut1 #E40\",function(){acd1_edge(40);});\n", + "acdonclick(\"#acdaut1 #S0\",function(){acd1_state(0);});\n", + "acdonclick(\"#acdaut1 #S1\",function(){acd1_state(1);});\n", + "acdonclick(\"#acdaut1 #S2\",function(){acd1_state(2);});\n", + "acdonclick(\"#acdaut1 #S3\",function(){acd1_state(3);});\n", + "acdonclick(\"#acdaut1 #S4\",function(){acd1_state(4);});\n", + "acdonclick(\"#acdaut1 #S5\",function(){acd1_state(5);});\n", + "acdonclick(\"#acdaut1 #S6\",function(){acd1_state(6);});\n", + "acdonclick(\"#acdaut1 #S7\",function(){acd1_state(7);});\n", + "acdonclick(\"#acdaut1 #S8\",function(){acd1_state(8);});\n", + "acdonclick(\"#acdaut1 #S9\",function(){acd1_state(9);});\n", + "acdonclick(\"#acd1 #N0\",function(){acd1_node(0, 0);});\n", + "acdonclick(\"#acd1 #N1\",function(){acd1_node(1, 1);});\n", + "acdonclick(\"#acd1 #N2\",function(){acd1_node(2, 1);});\n", + "acdonclick(\"#acd1 #N3\",function(){acd1_node(3, 1);});\n", + "acdonclick(\"#acd1 #N4\",function(){acd1_node(4, 1);});\n", + "acdonclick(\"#acd1 #N5\",function(){acd1_node(5, 1);});\n", + "acdonclick(\"#acd1 #N6\",function(){acd1_node(6, 1);});\n", + "acdonclick(\"#acd1 #N7\",function(){acd1_node(7, 1);});\n", + "acdonclick(\"#acd1 #N8\",function(){acd1_node(8, 1);});\n", + "acdonclick(\"#acd1 #N9\",function(){acd1_node(9, 0);});\n", + "acdonclick(\"#acd1 #N10\",function(){acd1_node(10, 0);});\n", + "acdonclick(\"#acd1 #N11\",function(){acd1_node(11, 0);});\n", + "acdonclick(\"#acd1 #N12\",function(){acd1_node(12, 0);});\n", + "acdonclick(\"#acd1 #N13\",function(){acd1_node(13, 0);});\n", + "acdonclick(\"#acd1 #N14\",function(){acd1_node(14, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, - "execution_count": 38, + "execution_count": 40, "metadata": {}, "output_type": "execute_result" } @@ -6695,7 +7174,7 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 41, "id": "00acde97", "metadata": {}, "outputs": [ @@ -6705,7 +7184,7 @@ "4" ] }, - "execution_count": 39, + "execution_count": 41, "metadata": {}, "output_type": "execute_result" } @@ -6716,7 +7195,7 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": 42, "id": "23c5f4df", "metadata": {}, "outputs": [ @@ -6726,7 +7205,7 @@ "8" ] }, - "execution_count": 40, + "execution_count": 42, "metadata": {}, "output_type": "execute_result" } @@ -6737,7 +7216,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 43, "id": "da0bbcbe", "metadata": {}, "outputs": [ @@ -6747,7 +7226,7 @@ "0" ] }, - "execution_count": 41, + "execution_count": 43, "metadata": {}, "output_type": "execute_result" } @@ -6758,7 +7237,7 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 44, "id": "da0dc5bc", "metadata": {}, "outputs": [ @@ -6768,7 +7247,7 @@ "8" ] }, - "execution_count": 42, + "execution_count": 44, "metadata": {}, "output_type": "execute_result" } @@ -6788,7 +7267,7 @@ }, { "cell_type": "code", - "execution_count": 43, + "execution_count": 45, "id": "94999c2e", "metadata": {}, "outputs": [ @@ -7586,10 +8065,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c49342570> >" + " *' at 0x7f82c00be460> >" ] }, - "execution_count": 43, + "execution_count": 45, "metadata": {}, "output_type": "execute_result" } @@ -7611,7 +8090,7 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 46, "id": "b57476cf", "metadata": {}, "outputs": [ @@ -7640,7 +8119,7 @@ }, { "cell_type": "code", - "execution_count": 45, + "execution_count": 47, "id": "f082b433", "metadata": {}, "outputs": [ @@ -7883,39 +8362,72 @@ "\n", "\n", "
" + " acdaddclasses(\"#acdaut2 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd2 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut2 #E1\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E2\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E3\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E4\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E5\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E6\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut2 #E2\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut2 #E3\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut2 #E4\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut2 #E5\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut2 #E6\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut2 #E1\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut2 #E2\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut2 #E4\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut2 #E6\", [\"acdN2\"]);\n", + "acdonclick(\"#acdaut2 #E1\",function(){acd2_edge(1);});\n", + "acdonclick(\"#acdaut2 #E2\",function(){acd2_edge(2);});\n", + "acdonclick(\"#acdaut2 #E3\",function(){acd2_edge(3);});\n", + "acdonclick(\"#acdaut2 #E4\",function(){acd2_edge(4);});\n", + "acdonclick(\"#acdaut2 #E5\",function(){acd2_edge(5);});\n", + "acdonclick(\"#acdaut2 #E6\",function(){acd2_edge(6);});\n", + "acdonclick(\"#acdaut2 #S0\",function(){acd2_state(0);});\n", + "acdonclick(\"#acdaut2 #S1\",function(){acd2_state(1);});\n", + "acdonclick(\"#acdaut2 #S2\",function(){acd2_state(2);});\n", + "acdonclick(\"#acd2 #N0\",function(){acd2_node(0, 1);});\n", + "acdonclick(\"#acd2 #N1\",function(){acd2_node(1, 0);});\n", + "acdonclick(\"#acd2 #N2\",function(){acd2_node(2, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, - "execution_count": 45, + "execution_count": 47, "metadata": {}, "output_type": "execute_result" } @@ -7931,7 +8443,7 @@ }, { "cell_type": "code", - "execution_count": 46, + "execution_count": 48, "id": "597185c0", "metadata": {}, "outputs": [ @@ -7944,11 +8456,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -8122,10 +8634,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480e0690> >" + " *' at 0x7f82c00bdd40> >" ] }, - "execution_count": 46, + "execution_count": 48, "metadata": {}, "output_type": "execute_result" } @@ -8154,7 +8666,7 @@ }, { "cell_type": "code", - "execution_count": 47, + "execution_count": 49, "id": "a4fd4105", "metadata": {}, "outputs": [ @@ -8397,39 +8909,72 @@ "\n", "\n", "
" + " acdaddclasses(\"#acdaut3 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd3 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut3 #E1\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E2\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E3\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E4\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E5\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E6\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut3 #E1\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut3 #E2\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut3 #E4\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut3 #E6\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut3 #E2\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut3 #E3\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut3 #E4\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut3 #E5\", [\"acdN2\"]);\n", + "acdaddclasses(\"#acdaut3 #E6\", [\"acdN2\"]);\n", + "acdonclick(\"#acdaut3 #E1\",function(){acd3_edge(1);});\n", + "acdonclick(\"#acdaut3 #E2\",function(){acd3_edge(2);});\n", + "acdonclick(\"#acdaut3 #E3\",function(){acd3_edge(3);});\n", + "acdonclick(\"#acdaut3 #E4\",function(){acd3_edge(4);});\n", + "acdonclick(\"#acdaut3 #E5\",function(){acd3_edge(5);});\n", + "acdonclick(\"#acdaut3 #E6\",function(){acd3_edge(6);});\n", + "acdonclick(\"#acdaut3 #S0\",function(){acd3_state(0);});\n", + "acdonclick(\"#acdaut3 #S1\",function(){acd3_state(1);});\n", + "acdonclick(\"#acdaut3 #S2\",function(){acd3_state(2);});\n", + "acdonclick(\"#acd3 #N0\",function(){acd3_node(0, 1);});\n", + "acdonclick(\"#acd3 #N1\",function(){acd3_node(1, 0);});\n", + "acdonclick(\"#acd3 #N2\",function(){acd3_node(2, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, - "execution_count": 47, + "execution_count": 49, "metadata": {}, "output_type": "execute_result" } @@ -8440,7 +8985,7 @@ }, { "cell_type": "code", - "execution_count": 48, + "execution_count": 50, "id": "1a68f96a", "metadata": {}, "outputs": [ @@ -8453,11 +8998,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -8610,10 +9155,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480e0de0> >" + " *' at 0x7f82c00bf300> >" ] }, - "execution_count": 48, + "execution_count": 50, "metadata": {}, "output_type": "execute_result" } @@ -8636,7 +9181,7 @@ }, { "cell_type": "code", - "execution_count": 49, + "execution_count": 51, "id": "criminal-northwest", "metadata": {}, "outputs": [ @@ -8762,10 +9307,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480e21e0> >" + " *' at 0x7f82c00be5b0> >" ] }, - "execution_count": 49, + "execution_count": 51, "metadata": {}, "output_type": "execute_result" } @@ -8796,7 +9341,7 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": 52, "id": "63c7c062", "metadata": {}, "outputs": [ @@ -8874,10 +9419,10 @@ "\n" ], "text/plain": [ - " >" + " >" ] }, - "execution_count": 50, + "execution_count": 52, "metadata": {}, "output_type": "execute_result" } @@ -8888,7 +9433,7 @@ }, { "cell_type": "code", - "execution_count": 51, + "execution_count": 53, "id": "balanced-investing", "metadata": {}, "outputs": [ @@ -9040,10 +9585,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480e2360> >" + " *' at 0x7f82c00bf5d0> >" ] }, - "execution_count": 51, + "execution_count": 53, "metadata": {}, "output_type": "execute_result" } @@ -9054,7 +9599,7 @@ }, { "cell_type": "code", - "execution_count": 52, + "execution_count": 54, "id": "nutritional-rugby", "metadata": {}, "outputs": [], @@ -9064,7 +9609,7 @@ }, { "cell_type": "code", - "execution_count": 53, + "execution_count": 55, "id": "criminal-marking", "metadata": {}, "outputs": [ @@ -9304,39 +9849,66 @@ "\n", "\n", "
" + " acdaddclasses(\"#acdaut4 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd4 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut4 #E1\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E2\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E3\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E4\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E5\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E6\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E7\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E8\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut4 #E6\", [\"acdN1\"]);\n", + "acdonclick(\"#acdaut4 #E1\",function(){acd4_edge(1);});\n", + "acdonclick(\"#acdaut4 #E2\",function(){acd4_edge(2);});\n", + "acdonclick(\"#acdaut4 #E3\",function(){acd4_edge(3);});\n", + "acdonclick(\"#acdaut4 #E4\",function(){acd4_edge(4);});\n", + "acdonclick(\"#acdaut4 #E5\",function(){acd4_edge(5);});\n", + "acdonclick(\"#acdaut4 #E6\",function(){acd4_edge(6);});\n", + "acdonclick(\"#acdaut4 #E7\",function(){acd4_edge(7);});\n", + "acdonclick(\"#acdaut4 #E8\",function(){acd4_edge(8);});\n", + "acdonclick(\"#acdaut4 #S0\",function(){acd4_state(0);});\n", + "acdonclick(\"#acdaut4 #S1\",function(){acd4_state(1);});\n", + "acdonclick(\"#acd4 #N0\",function(){acd4_node(0, 1);});\n", + "acdonclick(\"#acd4 #N1\",function(){acd4_node(1, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, - "execution_count": 53, + "execution_count": 55, "metadata": {}, "output_type": "execute_result" } @@ -9347,7 +9919,7 @@ }, { "cell_type": "code", - "execution_count": 54, + "execution_count": 56, "id": "e7760d51", "metadata": {}, "outputs": [ @@ -9357,7 +9929,7 @@ "0" ] }, - "execution_count": 54, + "execution_count": 56, "metadata": {}, "output_type": "execute_result" } @@ -9368,7 +9940,7 @@ }, { "cell_type": "code", - "execution_count": 55, + "execution_count": 57, "id": "unusual-dependence", "metadata": { "scrolled": true @@ -9477,10 +10049,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480e2d50> >" + " *' at 0x7f82c00f4240> >" ] }, - "execution_count": 55, + "execution_count": 57, "metadata": {}, "output_type": "execute_result" } @@ -9491,7 +10063,7 @@ }, { "cell_type": "code", - "execution_count": 56, + "execution_count": 58, "id": "d5440de1", "metadata": {}, "outputs": [ @@ -9504,11 +10076,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -9624,10 +10196,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480ea150> >" + " *' at 0x7f82c00f4090> >" ] }, - "execution_count": 56, + "execution_count": 58, "metadata": {}, "output_type": "execute_result" } @@ -9638,7 +10210,7 @@ }, { "cell_type": "code", - "execution_count": 57, + "execution_count": 59, "id": "9ed0bc59", "metadata": {}, "outputs": [], @@ -9658,7 +10230,7 @@ }, { "cell_type": "code", - "execution_count": 58, + "execution_count": 60, "id": "deb92971", "metadata": {}, "outputs": [ @@ -9934,39 +10506,71 @@ "\n", "\n", "
" + " acdaddclasses(\"#acdaut5 .acdN\" + node,\n", + " [acc ? \"acdacc\" : \"acdrej\", \"acdbold\"]);\n", + " acdaddclasses(\"#acd5 #N\" + node, [\"acdbold\", \"acdhigh\"]);\n", + "};acdaddclasses(\"#acdaut5 #E1\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E2\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E3\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E4\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E5\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E6\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E7\", [\"acdN0\"]);\n", + "acdaddclasses(\"#acdaut5 #E1\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut5 #E3\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut5 #E4\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut5 #E5\", [\"acdN1\"]);\n", + "acdaddclasses(\"#acdaut5 #E7\", [\"acdN2\"]);\n", + "acdonclick(\"#acdaut5 #E1\",function(){acd5_edge(1);});\n", + "acdonclick(\"#acdaut5 #E2\",function(){acd5_edge(2);});\n", + "acdonclick(\"#acdaut5 #E3\",function(){acd5_edge(3);});\n", + "acdonclick(\"#acdaut5 #E4\",function(){acd5_edge(4);});\n", + "acdonclick(\"#acdaut5 #E5\",function(){acd5_edge(5);});\n", + "acdonclick(\"#acdaut5 #E6\",function(){acd5_edge(6);});\n", + "acdonclick(\"#acdaut5 #E7\",function(){acd5_edge(7);});\n", + "acdonclick(\"#acdaut5 #S0\",function(){acd5_state(0);});\n", + "acdonclick(\"#acdaut5 #S1\",function(){acd5_state(1);});\n", + "acdonclick(\"#acdaut5 #S2\",function(){acd5_state(2);});\n", + "acdonclick(\"#acdaut5 #S3\",function(){acd5_state(3);});\n", + "acdonclick(\"#acd5 #N0\",function(){acd5_node(0, 1);});\n", + "acdonclick(\"#acd5 #N1\",function(){acd5_node(1, 0);});\n", + "acdonclick(\"#acd5 #N2\",function(){acd5_node(2, 0);});\n", + "" ], "text/plain": [ - " >" + " >" ] }, - "execution_count": 58, + "execution_count": 60, "metadata": {}, "output_type": "execute_result" } @@ -9984,7 +10588,7 @@ }, { "cell_type": "code", - "execution_count": 59, + "execution_count": 61, "id": "94a02201", "metadata": {}, "outputs": [ @@ -10091,10 +10695,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480e2300> >" + " *' at 0x7f82c00f50b0> >" ] }, - "execution_count": 59, + "execution_count": 61, "metadata": {}, "output_type": "execute_result" } @@ -10105,7 +10709,7 @@ }, { "cell_type": "code", - "execution_count": 60, + "execution_count": 62, "id": "d484ba8f", "metadata": {}, "outputs": [ @@ -10118,11 +10722,11 @@ "\n", "\n", - "\n", - "\n", - "\n", - "[Büchi]\n", + "\n", + "\n", + "\n", + "[Büchi]\n", "\n", "\n", "\n", @@ -10221,10 +10825,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480f0420> >" + " *' at 0x7f82c00f52c0> >" ] }, - "execution_count": 60, + "execution_count": 62, "metadata": {}, "output_type": "execute_result" } @@ -10235,7 +10839,7 @@ }, { "cell_type": "code", - "execution_count": 61, + "execution_count": 63, "id": "3332e850", "metadata": {}, "outputs": [ @@ -10501,10 +11105,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480f0840> >" + " *' at 0x7f82c00f4960> >" ] }, - "execution_count": 61, + "execution_count": 63, "metadata": {}, "output_type": "execute_result" } @@ -10525,7 +11129,7 @@ }, { "cell_type": "code", - "execution_count": 62, + "execution_count": 64, "id": "german-vienna", "metadata": {}, "outputs": [ @@ -10595,10 +11199,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480f30c0> >" + " *' at 0x7f82c00f5a10> >" ] }, - "execution_count": 62, + "execution_count": 64, "metadata": {}, "output_type": "execute_result" } @@ -10622,7 +11226,7 @@ }, { "cell_type": "code", - "execution_count": 63, + "execution_count": 65, "id": "chemical-primary", "metadata": {}, "outputs": [ @@ -10632,7 +11236,7 @@ "(spot.trival_maybe(), spot.trival(True))" ] }, - "execution_count": 63, + "execution_count": 65, "metadata": {}, "output_type": "execute_result" } @@ -10643,7 +11247,7 @@ }, { "cell_type": "code", - "execution_count": 64, + "execution_count": 66, "id": "hispanic-floor", "metadata": {}, "outputs": [ @@ -10706,10 +11310,10 @@ "\n" ], "text/plain": [ - " *' at 0x7f0c480f3210> >" + " *' at 0x7f82c00f5ce0> >" ] }, - "execution_count": 64, + "execution_count": 66, "metadata": {}, "output_type": "execute_result" } @@ -10720,7 +11324,7 @@ }, { "cell_type": "code", - "execution_count": 65, + "execution_count": 67, "id": "central-london", "metadata": {}, "outputs": [ @@ -10730,7 +11334,7 @@ "(spot.trival(True), spot.trival(True))" ] }, - "execution_count": 65, + "execution_count": 67, "metadata": {}, "output_type": "execute_result" } @@ -10750,7 +11354,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -10764,7 +11368,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.2" + "version": "3.10.7" } }, "nbformat": 4, diff --git a/tests/python/zlktree.py b/tests/python/zlktree.py index df8fd86f0..c3cb262f2 100644 --- a/tests/python/zlktree.py +++ b/tests/python/zlktree.py @@ -1,6 +1,6 @@ # -*- mode: python; coding: utf-8 -*- -# Copyright (C) 2021 Laboratoire de Recherche et Développement de l'Epita -# (LRDE). +# Copyright (C) 2021, 2022 Laboratoire de Recherche et Développement +# de l'Epita (LRDE). # # This file is part of Spot, a model checking library. # @@ -18,6 +18,8 @@ # along with this program. If not, see . import spot +from unittest import TestCase +tc = TestCase() a = spot.automaton("""HOA: v1 States: 5 Start: 0 AP: 2 "p0" "p1" Acceptance: 4 Inf(3) | Fin(3) properties: trans-labels explicit-labels @@ -25,8 +27,8 @@ trans-acc --BODY-- State: 0 [!0&!1] 3 [!0&!1] 4 State: 1 [!0&!1] 4 {3} [0&!1] 0 {2} [!0&1] 1 {2} State: 2 [!0&1] 0 {0 2} [!0&!1] 1 State: 3 [!0&1] 2 State: 4 [0&!1] 3 --END--""") b = spot.zielonka_tree_transform(a) -assert spot.are_equivalent(a, b) -assert b.acc().is_buchi() +tc.assertTrue(spot.are_equivalent(a, b)) +tc.assertTrue(b.acc().is_buchi()) def report_missing_exception(): raise RuntimeError("missing exception") @@ -45,95 +47,96 @@ State: 2 [0&1] 8 {3} [0&1] 2 {1} [!0&1] 4 {3 4} [!0&!1] 3 {2 5} State: [!0&!1] 2 {5} [!0&!1] 0 {3} [!0&!1] 5 --END--""") aa = spot.acd(a) try: - assert aa.has_rabin_shape() + tc.assertTrue(aa.has_rabin_shape()) except RuntimeError as e: - assert 'CHECK_RABIN' in str(e) + tc.assertIn('CHECK_RABIN', str(e)) else: report_missing_exception() try: - assert not aa.has_streett_shape() + tc.assertFalse(aa.has_streett_shape()) except RuntimeError as e: - assert 'CHECK_STREETT' in str(e) + tc.assertIn('CHECK_STREETT', str(e)) else: report_missing_exception() try: - assert not aa.has_parity_shape() + tc.assertFalse(aa.has_parity_shape()) except RuntimeError as e: - assert 'CHECK_PARITY' in str(e) + tc.assertIn('CHECK_PARITY', str(e)) else: report_missing_exception() aa = spot.acd(a, spot.acd_options_CHECK_RABIN) -assert aa.has_rabin_shape() -assert aa.node_count() == 13 +tc.assertTrue(aa.has_rabin_shape()) +tc.assertEqual(aa.node_count(), 13) try: - assert not aa.has_streett_shape() + tc.assertFalse(aa.has_streett_shape()) except RuntimeError as e: - assert 'CHECK_STREETT' in str(e) + tc.assertIn('CHECK_STREETT', str(e)) else: report_missing_exception() try: - assert aa.has_parity_shape() + tc.assertTrue(aa.has_parity_shape()) except RuntimeError as e: - assert 'CHECK_PARITY' in str(e) + tc.assertIn('CHECK_PARITY', str(e)) else: report_missing_exception() aa = spot.acd(a, (spot.acd_options_CHECK_PARITY | spot.acd_options_ABORT_WRONG_SHAPE)) -assert aa.has_rabin_shape() -assert not aa.has_streett_shape() -assert not aa.has_parity_shape() -assert aa.node_count() == 0 +tc.assertTrue(aa.has_rabin_shape()) +tc.assertFalse(aa.has_streett_shape()) +tc.assertFalse(aa.has_parity_shape()) +tc.assertEqual(aa.node_count(), 0) + try: aa.first_branch(0) except RuntimeError as e: - assert 'ABORT_WRONG_SHAPE' in str(e) + tc.assertIn('ABORT_WRONG_SHAPE', str(e)) else: report_missing_exception() try: aa.step(0, 0) except RuntimeError as e: - assert 'incorrect branch number' in str(e) + tc.assertIn('incorrect branch number', str(e)) else: report_missing_exception() try: aa.node_acceptance(0) except RuntimeError as e: - assert 'unknown node' in str(e) + tc.assertIn('unknown node', str(e)) else: report_missing_exception() try: aa.edges_of_node(0) except RuntimeError as e: - assert 'unknown node' in str(e) + tc.assertIn('unknown node', str(e)) else: report_missing_exception() try: aa.node_level(0) except RuntimeError as e: - assert 'unknown node' in str(e) + tc.assertIn('unknown node', str(e)) else: report_missing_exception() a = spot.translate('true') a.set_acceptance(spot.acc_cond('f')) b = spot.acd_transform(a) -assert a.equivalent_to(b) +tc.assertTrue(a.equivalent_to(b)) a = spot.translate('true') a.set_acceptance(spot.acc_cond('f')) b = spot.zielonka_tree_transform(a) -assert a.equivalent_to(b) +tc.assertTrue(a.equivalent_to(b)) a = spot.automaton("""HOA: v1 name: "^ G F p0 G F p1" States: 5 Start: 2 AP: 2 "a" "b" acc-name: Rabin 2 Acceptance: 4 (Fin(0) & Inf(1)) | @@ -144,8 +147,17 @@ complete properties: deterministic --BODY-- State: 0 {0} [!0&!1] 0 2} [!0&!1] 1 [0&!1] 4 [!0&1] 3 [0&1] 2 State: 4 {0 3} [!0&!1] 0 [0&!1] 4 [!0&1] 3 [0&1] 2 --END--""") b = spot.acd_transform_sbacc(a, True) -assert str(b.acc()) == '(3, Fin(0) & (Inf(1) | Fin(2)))' -assert a.equivalent_to(b) +tc.assertEqual(str(b.acc()), '(3, Fin(0) & (Inf(1) | Fin(2)))') +tc.assertTrue(a.equivalent_to(b)) b = spot.acd_transform_sbacc(a, False) -assert str(b.acc()) == '(2, Fin(0) & Inf(1))' -assert a.equivalent_to(b) +tc.assertEqual(str(b.acc()), '(2, Fin(0) & Inf(1))') +tc.assertTrue(a.equivalent_to(b)) + + +# This used to be very slow. +c = spot.acc_cond("Rabin 9") +n = spot.zielonka_tree(c).num_branches() +tc.assertEqual(n, 362880) +opt = spot.zielonka_tree_options_MERGE_SUBTREES; +n = spot.zielonka_tree(c, opt).num_branches() +tc.assertEqual(n, 9) diff --git a/tests/run.in b/tests/run.in index d14bf52a9..7502b88f8 100755 --- a/tests/run.in +++ b/tests/run.in @@ -1,6 +1,6 @@ #!/bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2010-2011, 2014-2016, 2018-2021 Laboratoire de Recherche +# Copyright (C) 2010-2011, 2014-2016, 2018-2022 Laboratoire de Recherche # et Developpement de l'EPITA (LRDE). # Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6 # (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -46,7 +46,9 @@ PATH="@abs_top_builddir@/bin:$PATH" export PATH test -z "$1" && - PYTHONPATH=$pypath DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ + PYTHONPATH=$pypath \ + DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ + LD_LIBRARY_PATH=$modpath:$LD_LIBRARY_PATH \ exec $PREFIXCMD @PYTHON@ srcdir="@srcdir@" @@ -75,6 +77,9 @@ export SPOT_DOTDEFAULT= SPOT_UNINSTALLED=1 export SPOT_UNINSTALLED +MAX_ACCSETS=@MAX_ACCSETS@ +export MAX_ACCSETS + case $1 in */*) dir=${1%/*} @@ -104,18 +109,24 @@ export srcdir case $1 in *.ipynb) - PYTHONPATH=$pypath DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ + PYTHONPATH=$pypath:$PYTHONPATH \ + DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ + LD_LIBRARY_PATH=$modpath:$LD_LIBRARY_PATH \ PYTHONIOENCODING=utf-8:surrogateescape \ exec $PREFIXCMD @PYTHON@ @abs_srcdir@/python/ipnbdoctest.py "$@";; *.py) - PYTHONPATH=$pypath DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ + PYTHONPATH=$pypath:$PYTHONPATH \ + LD_LIBRARY_PATH=$modpath:$LD_LIBRARY_PATH \ + DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ exec $PREFIXCMD @PYTHON@ "$@";; *.test) exec sh -x "$@";; *.pl) exec $PERL "$@";; *python*|*jupyter*|*pypy*) - PYTHONPATH=$pypath DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ + PYTHONPATH=$pypath:$PYTHONPATH \ + LD_LIBRARY_PATH=$modpath:$LD_LIBRARY_PATH \ + DYLD_LIBRARY_PATH=$modpath:$DYLD_LIBRARY_PATH \ exec $PREFIXCMD "$@";; *) echo "Unknown extension" >&2 diff --git a/tests/sanity/style.test b/tests/sanity/style.test index 8f157014d..325ebe78d 100755 --- a/tests/sanity/style.test +++ b/tests/sanity/style.test @@ -1,6 +1,6 @@ #! /bin/sh # -*- coding: utf-8 -*- -# Copyright (C) 2009-2021 Laboratoire de Recherche et Développement de +# Copyright (C) 2009-2022 Laboratoire de Recherche et Développement de # l'Epita (LRDE). # Copyright (C) 2004, 2005 Laboratoire d'Informatique de Paris 6 # (LIP6), département Systèmes Répartis Coopératifs (SRC), Université @@ -40,7 +40,7 @@ GREP=grep # Get some help from GNU grep. if (grep --color=auto -n --version)>/dev/null 2>&1; then GREP="$GREP --color=auto -n" - GREP_COLOR='1;31' + GREP_COLOR='mt=1;31' export GREP_COLOR fi @@ -295,7 +295,7 @@ for dir in "$TOP/spot" "$TOP/bin" "$TOP/tests"; do fi # we want catch (const reftype&) or catch (...) - $GREP 'catch *([^.]' $tmp | $GREP -v 'const.*\&' && + $GREP 'catch *([^.]' $tmp | $GREP -v 'const.*&' && diag 'Always capture exceptions by const reference.' case $file in @@ -392,6 +392,27 @@ for dir in "${INCDIR-..}" "${INCDIR-..}/../bin" "${INCDIR-..}/../tests"; do done || : # Make sure sh does not abort when read exits with false. done +# Rules for Python tests +for dir in "${INCDIR-..}/../tests"; do + + find "$dir" -name "*.py" -a -type f -a -print | + while read file; do + fail=false + + # Strip comments. + sed 's,[ ]*#.*,,' < $file > $tmp + + $GREP '[ ]$' $tmp && + diag 'Trailing whitespace.' + + $GREP -E '([ ]|^)assert[ (]' $tmp && + diag "replace assert keywords by unittest assertion tests" + + $fail && echo "$file" >>failures.style + done || : # Make sure sh does not abort when read exits with false. +done + + if test -f failures.style; then echo "The following files contain style errors:" cat failures.style diff --git a/utf8/LICENSE b/utf8/LICENSE new file mode 100644 index 000000000..36b7cd93c --- /dev/null +++ b/utf8/LICENSE @@ -0,0 +1,23 @@ +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/utf8/README.md b/utf8/README.md index d4369e85d..a519cdb96 100644 --- a/utf8/README.md +++ b/utf8/README.md @@ -3,9 +3,9 @@ ## Introduction -Many C++ developers miss an easy and portable way of handling Unicode encoded strings. The original C++ Standard (known as C++98 or C++03) is Unicode agnostic. C++11 provides some support for Unicode on core language and library level: u8, u, and U character and string literals, char16_t and char32_t character types, u16string and u32string library classes, and codecvt support for conversions between Unicode encoding forms. In the meantime, developers use third party libraries like ICU, OS specific capabilities, or simply roll out their own solutions. +C++ developers miss an easy and portable way of handling Unicode encoded strings. The original C++ Standard (known as C++98 or C++03) is Unicode agnostic. C++11 provides some support for Unicode on core language and library level: u8, u, and U character and string literals, char16_t and char32_t character types, u16string and u32string library classes, and codecvt support for conversions between Unicode encoding forms. In the meantime, developers use third party libraries like ICU, OS specific capabilities, or simply roll out their own solutions. -In order to easily handle UTF-8 encoded Unicode strings, I came up with a small, C++98 compatible generic library. For anybody used to work with STL algorithms and iterators, it should be easy and natural to use. The code is freely available for any purpose - check out the license at the beginning of the utf8.h file. The library has been used a lot in the past ten years both in commercial and open-source projects and is considered feature-complete now. If you run into bugs or performance issues, please let me know and I'll do my best to address them. +In order to easily handle UTF-8 encoded Unicode strings, I came up with a small, C++98 compatible generic library. For anybody used to work with STL algorithms and iterators, it should be easy and natural to use. The code is freely available for any purpose - check out the [license](./LICENSE). The library has been used a lot in the past ten years both in commercial and open-source projects and is considered feature-complete now. If you run into bugs or performance issues, please let me know and I'll do my best to address them. The purpose of this article is not to offer an introduction to Unicode in general, and UTF-8 in particular. If you are not familiar with Unicode, be sure to check out [Unicode Home Page](http://www.unicode.org/) or some other source of information for Unicode. Also, it is not my aim to advocate the use of UTF-8 encoded strings in C++ programs; if you want to handle UTF-8 encoded strings from C++, I am sure you have good reasons for it. @@ -28,50 +28,78 @@ int main(int argc, char** argv) cout << "\nUsage: docsample filename\n"; return 0; } - const char* test_file_path = argv[1]; - // Open the test file (contains UTF-8 encoded text) + // Open the test file (must be UTF-8 encoded) ifstream fs8(test_file_path); if (!fs8.is_open()) { - cout << "Could not open " << test_file_path << endl; - return 0; + cout << "Could not open " << test_file_path << endl; + return 0; } unsigned line_count = 1; string line; // Play with all the lines in the file while (getline(fs8, line)) { - // check for invalid utf-8 (for a simple yes/no check, there is also utf8::is_valid function) + // check for invalid utf-8 (for a simple yes/no check, there is also utf8::is_valid function) +#if __cplusplus >= 201103L // C++ 11 or later + auto end_it = utf8::find_invalid(line.begin(), line.end()); +#else string::iterator end_it = utf8::find_invalid(line.begin(), line.end()); +#endif // C++ 11 if (end_it != line.end()) { cout << "Invalid UTF-8 encoding detected at line " << line_count << "\n"; cout << "This part is fine: " << string(line.begin(), end_it) << "\n"; } - // Get the line length (at least for the valid part) int length = utf8::distance(line.begin(), end_it); cout << "Length of line " << line_count << " is " << length << "\n"; // Convert it to utf-16 +#if __cplusplus >= 201103L // C++ 11 or later + u16string utf16line = utf8::utf8to16(line); +#else vector utf16line; utf8::utf8to16(line.begin(), end_it, back_inserter(utf16line)); - - // And back to utf-8 +#endif // C++ 11 + // And back to utf-8; +#if __cplusplus >= 201103L // C++ 11 or later + string utf8line = utf8::utf16to8(utf16line); +#else string utf8line; utf8::utf16to8(utf16line.begin(), utf16line.end(), back_inserter(utf8line)); - +#endif // C++ 11 // Confirm that the conversion went OK: if (utf8line != string(line.begin(), end_it)) cout << "Error in UTF-16 conversion at line: " << line_count << "\n"; line_count++; - } + } + return 0; } ``` In the previous code sample, for each line we performed a detection of invalid UTF-8 sequences with `find_invalid`; the number of characters (more precisely - the number of Unicode code points, including the end of line and even BOM if there is one) in each line was determined with a use of `utf8::distance`; finally, we have converted each line to UTF-16 encoding with `utf8to16` and back to UTF-8 with `utf16to8`. +Note a different pattern of usage for old compilers. For instance, this is how we convert +a UTF-8 encoded string to a UTF-16 encoded one with a pre - C++11 compiler: +```cpp + vector utf16line; + utf8::utf8to16(line.begin(), end_it, back_inserter(utf16line)); +``` + +With a more modern compiler, the same operation would look like: +```cpp + u16string utf16line = utf8::utf8to16(line); +``` +If `__cplusplus` macro points to a C++ 11 or later, the library exposes API that takes into +account C++ standard Unicode strings and move semantics. With an older compiler, it is still +possible to use the same functionality, just in a little less convenient way + +In case you do not trust the `__cplusplus` macro or, for instance, do not want to include +the C++ 11 helper functions even with a modern compiler, define `UTF_CPP_CPLUSPLUS` macro +before including `utf8.h` and assign it a value for the standard you want to use - the values are the same as for the `__cplusplus` macro. This can be also useful with compilers that are conservative in setting the `__cplusplus` macro even if they have a good support for a recent standard edition - Microsoft's Visual C++ is one example. + ### Checking if a file contains valid UTF-8 text Here is a function that checks whether the content of a file is valid UTF-8 encoded text without reading the content into the memory: @@ -90,7 +118,7 @@ bool valid_utf8_file(const char* file_name) } ``` -Because the function `utf8::is_valid()` works with input iterators, we were able to pass an `istreambuf_iterator` to it and read the content of the file directly without loading it to the memory first. +Because the function `utf8::is_valid()` works with input iterators, we were able to pass an `istreambuf_iterator` to `it` and read the content of the file directly without loading it to the memory first. Note that other functions that take input iterator arguments can be used in a similar way. For instance, to read the content of a UTF-8 encoded text file and convert the text to UTF-16, just do something like: @@ -113,10 +141,56 @@ void fix_utf8_string(std::string& str) The function will replace any invalid UTF-8 sequence with a Unicode replacement character. There is an overloaded function that enables the caller to supply their own replacement character. + +## Points of interest + +#### Design goals and decisions + +The library was designed to be: + +1. Generic: for better or worse, there are many C++ string classes out there, and the library should work with as many of them as possible. +2. Portable: the library should be portable both accross different platforms and compilers. The only non-portable code is a small section that declares unsigned integers of different sizes: three typedefs. They can be changed by the users of the library if they don't match their platform. The default setting should work for Windows (both 32 and 64 bit), and most 32 bit and 64 bit Unix derivatives. Support for post C++03 language features is included for modern compilers at API level only, so the library should work even with pretty old compilers. +3. Lightweight: follow the "pay only for what you use" guideline. +4. Unintrusive: avoid forcing any particular design or even programming style on the user. This is a library, not a framework. + +#### Alternatives + +In case you want to look into other means of working with UTF-8 strings from C++, here is the list of solutions I am aware of: + +1. [ICU Library](http://icu.sourceforge.net/). It is very powerful, complete, feature-rich, mature, and widely used. Also big, intrusive, non-generic, and doesn't play well with the Standard Library. I definitelly recommend looking at ICU even if you don't plan to use it. +2. C++11 language and library features. Still far from complete, and not easy to use. +3. [Glib::ustring](http://www.gtkmm.org/gtkmm2/docs/tutorial/html/ch03s04.html). A class specifically made to work with UTF-8 strings, and also feel like `std::string`. If you prefer to have yet another string class in your code, it may be worth a look. Be aware of the licensing issues, though. +4. Platform dependent solutions: Windows and POSIX have functions to convert strings from one encoding to another. That is only a subset of what my library offers, but if that is all you need it may be good enough. + + ## Reference ### Functions From utf8 Namespace +#### utf8::append + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Encodes a 32 bit code point as a UTF-8 sequence of octets and appends the sequence to a UTF-8 string. + +```cpp +void append(char32_t cp, std::string& s); +``` + +`cp`: a code point to append to the string. +`s`: a utf-8 encoded string to append the code point to. + +Example of use: + +```cpp +std::string u; +append(0x0448, u); +assert (u[0] == char(0xd1) && u[1] == char(0x88) && u.length() == 2); +``` + +In case of an invalid code point, a `utf8::invalid_code_point` exception is thrown. + + #### utf8::append Available in version 1.0 and later. @@ -238,39 +312,6 @@ In case `start` is reached before a UTF-8 lead octet is hit, or if an invalid UT In case `start` equals `it`, a `not_enough_room` exception is thrown. -#### utf8::previous - -Deprecated in version 1.02 and later. - -Given a reference to an iterator pointing to an octet in a UTF-8 seqence, it decreases the iterator until it hits the beginning of the previous UTF-8 encoded code point and returns the 32 bits representation of the code point. - -```cpp -template -uint32_t previous(octet_iterator& it, octet_iterator pass_start); -``` - -`octet_iterator`: a random access iterator. -`it`: a reference pointing to an octet within a UTF-8 encoded string. After the function returns, it is decremented to point to the beginning of the previous code point. -`pass_start`: an iterator to the point in the sequence where the search for the beginning of a code point is aborted if no result was reached. It is a safety measure to prevent passing the beginning of the string in the search for a UTF-8 lead octet. -Return value: the 32 bit representation of the previous code point. - -Example of use: - -```cpp -char* twochars = "\xe6\x97\xa5\xd1\x88"; -unsigned char* w = twochars + 3; -int cp = previous (w, twochars - 1); -assert (cp == 0x65e5); -assert (w == twochars); -``` - - -`utf8::previous` is deprecated, and `utf8::prior` should be used instead, although the existing code can continue using this function. The problem is the parameter `pass_start` that points to the position just before the beginning of the sequence. Standard containers don't have the concept of "pass start" and the function can not be used with their iterators. - -`it` will typically point to the beginning of a code point, and `pass_start` will point to the octet just before the beginning of the string to ensure we don't go backwards too far. `it` is decreased until it points to a lead UTF-8 octet, and then the UTF-8 sequence beginning with that octet is decoded to a 32 bit representation and returned. - -In case `pass_start` is reached before a UTF-8 lead octet is hit, or if an invalid UTF-8 sequence is started by the lead octet, an `invalid_utf8` exception is thrown - #### utf8::advance Available in version 1.0 and later. @@ -284,8 +325,8 @@ void advance (octet_iterator& it, distance_type n, octet_iterator end); `octet_iterator`: an input iterator. `distance_type`: an integral type convertible to `octet_iterator`'s difference type. `it`: a reference to an iterator pointing to the beginning of an UTF-8 encoded code point. After the function returns, it is incremented to point to the nth following code point. -`n`: a positive integer that shows how many code points we want to advance. -`end`: end of the UTF-8 sequence to be processed. If `it` gets equal to `end` during the extraction of a code point, an `utf8::not_enough_room` exception is thrown. +`n`: number of code points `it` should be advanced. A negative value means decrement. +`end`: limit of the UTF-8 sequence to be processed. If `n` is positive and `it` gets equal to `end` during the extraction of a code point, an `utf8::not_enough_room` exception is thrown. If `n` is negative and `it` reaches `end` while `it` points t a trail byte of a UTF-8 sequence, a `utf8::invalid_code_point` exception is thrown. Example of use: @@ -294,10 +335,10 @@ char* twochars = "\xe6\x97\xa5\xd1\x88"; unsigned char* w = twochars; advance (w, 2, twochars + 6); assert (w == twochars + 5); +advance (w, -2, twochars); +assert (w == twochars); ``` -This function works only "forward". In case of a negative `n`, there is no effect. - In case of an invalid code point, a `utf8::invalid_code_point` exception is thrown. #### utf8::distance @@ -328,6 +369,54 @@ This function is used to find the length (in code points) of a UTF-8 encoded str In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. If `last` does not point to the past-of-end of a UTF-8 seqence, a `utf8::not_enough_room` exception is thrown. +#### utf8::utf16to8 + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Converts a UTF-16 encoded string to UTF-8. + +```cpp +std::string utf16to8(const std::u16string& s); +``` + +`s`: a UTF-16 encoded string. +Return value: A UTF-8 encoded string. + +Example of use: + +```cpp + u16string utf16string = {0x41, 0x0448, 0x65e5, 0xd834, 0xdd1e}; + string u = utf16to8(utf16string); + assert (u.size() == 10); +``` + +In case of invalid UTF-16 sequence, a `utf8::invalid_utf16` exception is thrown. + +#### utf8::utf16to8 + +Available in version 3.2 and later. Requires a C++ 17 compliant compiler. + +Converts a UTF-16 encoded string to UTF-8. + +```cpp +std::string utf16to8(std::u16string_view s); +``` + +`s`: a UTF-16 encoded string. +Return value: A UTF-8 encoded string. + +Example of use: + +```cpp + u16string utf16string = {0x41, 0x0448, 0x65e5, 0xd834, 0xdd1e}; + u16string_view utf16stringview(u16string); + string u = utf16to8(utf16string); + assert (u.size() == 10); +``` + +In case of invalid UTF-16 sequence, a `utf8::invalid_utf16` exception is thrown. + + #### utf8::utf16to8 Available in version 1.0 and later. @@ -357,6 +446,57 @@ assert (utf8result.size() == 10); In case of invalid UTF-16 sequence, a `utf8::invalid_utf16` exception is thrown. +#### utf8::utf8to16 + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Converts an UTF-8 encoded string to UTF-16. + +```cpp +std::u16string utf8to16(const std::string& s); +``` + +`s`: an UTF-8 encoded string to convert. +Return value: A UTF-16 encoded string + +Example of use: + +```cpp +string utf8_with_surrogates = "\xe6\x97\xa5\xd1\x88\xf0\x9d\x84\x9e"; +u16string utf16result = utf8to16(utf8_with_surrogates); +assert (utf16result.length() == 4); +assert (utf16result[2] == 0xd834); +assert (utf16result[3] == 0xdd1e); +``` + +In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. + +#### utf8::utf8to16 + +Available in version 3.2 and later. Requires a C++ 17 compliant compiler. + +Converts an UTF-8 encoded string to UTF-16. + +```cpp +std::u16string utf8to16(std::string_view s); +``` + +`s`: an UTF-8 encoded string to convert. +Return value: A UTF-16 encoded string + +Example of use: + +```cpp +string_view utf8_with_surrogates = "\xe6\x97\xa5\xd1\x88\xf0\x9d\x84\x9e"; +u16string utf16result = utf8to16(utf8_with_surrogates); +assert (utf16result.length() == 4); +assert (utf16result[2] == 0xd834); +assert (utf16result[3] == 0xdd1e); +``` + +In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. + + #### utf8::utf8to16 Available in version 1.0 and later. @@ -387,6 +527,54 @@ assert (utf16result[3] == 0xdd1e); In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. If `end` does not point to the past-of-end of a UTF-8 seqence, a `utf8::not_enough_room` exception is thrown. +#### utf8::utf32to8 + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Converts a UTF-32 encoded string to UTF-8. + +```cpp +std::string utf32to8(const std::u32string& s); +``` + +`s`: a UTF-32 encoded string. +Return value: a UTF-8 encoded string. + +Example of use: + +```cpp +u32string utf32string = {0x448, 0x65E5, 0x10346}; +string utf8result = utf32to8(utf32string); +assert (utf8result.size() == 9); +``` + +In case of invalid UTF-32 string, a `utf8::invalid_code_point` exception is thrown. + +#### utf8::utf32to8 + +Available in version 3.2 and later. Requires a C++ 17 compliant compiler. + +Converts a UTF-32 encoded string to UTF-8. + +```cpp +std::string utf32to8(std::u32string_view s); +``` + +`s`: a UTF-32 encoded string. +Return value: a UTF-8 encoded string. + +Example of use: + +```cpp +u32string utf32string = {0x448, 0x65E5, 0x10346}; +u32string_view utf32stringview(utf32string); +string utf8result = utf32to8(utf32stringview); +assert (utf8result.size() == 9); +``` + +In case of invalid UTF-32 string, a `utf8::invalid_code_point` exception is thrown. + + #### utf8::utf32to8 Available in version 1.0 and later. @@ -407,7 +595,7 @@ Return value: An iterator pointing to the place after the appended UTF-8 string. Example of use: -``` +```cpp int utf32string[] = {0x448, 0x65E5, 0x10346, 0}; vector utf8result; utf32to8(utf32string, utf32string + 3, back_inserter(utf8result)); @@ -416,6 +604,53 @@ assert (utf8result.size() == 9); In case of invalid UTF-32 string, a `utf8::invalid_code_point` exception is thrown. +#### utf8::utf8to32 + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Converts a UTF-8 encoded string to UTF-32. + +```cpp +std::u32string utf8to32(const std::string& s); +``` + +`s`: a UTF-8 encoded string. +Return value: a UTF-32 encoded string. + +Example of use: + +```cpp +const char* twochars = "\xe6\x97\xa5\xd1\x88"; +u32string utf32result = utf8to32(twochars); +assert (utf32result.size() == 2); +``` + +In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. + +#### utf8::utf8to32 + +Available in version 3.2 and later. Requires a C++ 17 compliant compiler. + +Converts a UTF-8 encoded string to UTF-32. + +```cpp +std::u32string utf8to32(std::string_view s); +``` + +`s`: a UTF-8 encoded string. +Return value: a UTF-32 encoded string. + +Example of use: + +```cpp +string_view twochars = "\xe6\x97\xa5\xd1\x88"; +u32string utf32result = utf8to32(twochars); +assert (utf32result.size() == 2); +``` + +In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. + + #### utf8::utf8to32 Available in version 1.0 and later. @@ -445,6 +680,53 @@ assert (utf32result.size() == 2); In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. If `end` does not point to the past-of-end of a UTF-8 seqence, a `utf8::not_enough_room` exception is thrown. +#### utf8::find_invalid + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Detects an invalid sequence within a UTF-8 string. + +```cpp +std::size_t find_invalid(const std::string& s); +``` + +`s`: a UTF-8 encoded string. +Return value: the index of the first invalid octet in the UTF-8 string. In case none were found, equals `std::string::npos`. + +Example of use: + +```cpp +string utf_invalid = "\xe6\x97\xa5\xd1\x88\xfa"; +auto invalid = find_invalid(utf_invalid); +assert (invalid == 5); +``` + +This function is typically used to make sure a UTF-8 string is valid before processing it with other functions. It is especially important to call it if before doing any of the _unchecked_ operations on it. + +#### utf8::find_invalid + +Available in version 3.2 and later. Requires a C++ 17 compliant compiler. + +Detects an invalid sequence within a UTF-8 string. + +```cpp +std::size_t find_invalid(std::string_view s); +``` + +`s`: a UTF-8 encoded string. +Return value: the index of the first invalid octet in the UTF-8 string. In case none were found, equals `std::string_view::npos`. + +Example of use: + +```cpp +string_view utf_invalid = "\xe6\x97\xa5\xd1\x88\xfa"; +auto invalid = find_invalid(utf_invalid); +assert (invalid == 5); +``` + +This function is typically used to make sure a UTF-8 string is valid before processing it with other functions. It is especially important to call it if before doing any of the _unchecked_ operations on it. + + #### utf8::find_invalid Available in version 1.0 and later. @@ -471,6 +753,53 @@ assert (invalid == utf_invalid + 5); This function is typically used to make sure a UTF-8 string is valid before processing it with other functions. It is especially important to call it if before doing any of the _unchecked_ operations on it. +#### utf8::is_valid + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Checks whether a string object contains valid UTF-8 encoded text. + +```cpp +bool is_valid(const std::string& s); +``` + +`s`: a UTF-8 encoded string. +Return value: `true` if the string contains valid UTF-8 encoded text; `false` if not. + +Example of use: + +```cpp +char utf_invalid[] = "\xe6\x97\xa5\xd1\x88\xfa"; +bool bvalid = is_valid(utf_invalid); +assert (bvalid == false); +``` + +You may want to use `is_valid` to make sure that a string contains valid UTF-8 text without the need to know where it fails if it is not valid. + +#### utf8::is_valid + +Available in version 3.2 and later. Requires a C++ 17 compliant compiler. + +Checks whether a string object contains valid UTF-8 encoded text. + +```cpp +bool is_valid(std::string_view s); +``` + +`s`: a UTF-8 encoded string. +Return value: `true` if the string contains valid UTF-8 encoded text; `false` if not. + +Example of use: + +```cpp +string_view utf_invalid = "\xe6\x97\xa5\xd1\x88\xfa"; +bool bvalid = is_valid(utf_invalid); +assert (bvalid == false); +``` + +You may want to use `is_valid` to make sure that a string contains valid UTF-8 text without the need to know where it fails if it is not valid. + + #### utf8::is_valid Available in version 1.0 and later. @@ -497,6 +826,59 @@ assert (bvalid == false); `is_valid` is a shorthand for `find_invalid(start, end) == end;`. You may want to use it to make sure that a byte seqence is a valid UTF-8 string without the need to know where it fails if it is not valid. +#### utf8::replace_invalid + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Replaces all invalid UTF-8 sequences within a string with a replacement marker. + +```cpp +std::string replace_invalid(const std::string& s, char32_t replacement); +std::string replace_invalid(const std::string& s); +``` + +`s`: a UTF-8 encoded string. +`replacement`: A Unicode code point for the replacement marker. The version without this parameter assumes the value `0xfffd` +Return value: A UTF-8 encoded string with replaced invalid sequences. + +Example of use: + +```cpp +string invalid_sequence = "a\x80\xe0\xa0\xc0\xaf\xed\xa0\x80z"; +string replace_invalid_result = replace_invalid(invalid_sequence, '?'); +bvalid = is_valid(replace_invalid_result); +assert (bvalid); +const string fixed_invalid_sequence = "a????z"; +assert (fixed_invalid_sequence == replace_invalid_result); +``` + +#### utf8::replace_invalid + +Available in version 3.2 and later. Requires a C++ 17 compliant compiler. + +Replaces all invalid UTF-8 sequences within a string with a replacement marker. + +```cpp +std::string replace_invalid(std::string_view s, char32_t replacement); +std::string replace_invalid(std::string_view s); +``` + +`s`: a UTF-8 encoded string. +`replacement`: A Unicode code point for the replacement marker. The version without this parameter assumes the value `0xfffd` +Return value: A UTF-8 encoded string with replaced invalid sequences. + +Example of use: + +```cpp +string_view invalid_sequence = "a\x80\xe0\xa0\xc0\xaf\xed\xa0\x80z"; +string replace_invalid_result = replace_invalid(invalid_sequence, '?'); +bool bvalid = is_valid(replace_invalid_result); +assert (bvalid); +const string fixed_invalid_sequence = "a????z"; +assert(fixed_invalid_sequence, replace_invalid_result); +``` + + #### utf8::replace_invalid Available in version 2.0 and later. @@ -532,11 +914,64 @@ assert (std::equal(replace_invalid_result.begin(), replace_invalid_result.end(), `replace_invalid` does not perform in-place replacement of invalid sequences. Rather, it produces a copy of the original string with the invalid sequences replaced with a replacement marker. Therefore, `out` must not be in the `[start, end]` range. -If `end` does not point to the past-of-end of a UTF-8 sequence, a `utf8::not_enough_room` exception is thrown. +#### utf8::starts_with_bom + +Available in version 3.0 and later. Requires a C++ 11 compliant compiler. + +Checks whether a string starts with a UTF-8 byte order mark (BOM) + +```cpp +bool starts_with_bom(const std::string& s); +``` + +`s`: a UTF-8 encoded string. +Return value: `true` if the string starts with a UTF-8 byte order mark; `false` if not. + +Example of use: + +```cpp +string byte_order_mark = {char(0xef), char(0xbb), char(0xbf)}; +bool bbom = starts_with_bom(byte_order_mark); +assert (bbom == true); +string threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88"; +bool no_bbom = starts_with_bom(threechars); +assert (no_bbom == false); + ``` + +The typical use of this function is to check the first three bytes of a file. If they form the UTF-8 BOM, we want to skip them before processing the actual UTF-8 encoded text. + #### utf8::starts_with_bom -Available in version 2.3 and later. Relaces deprecated `is_bom()` function. +Available in version 3.2 and later. Requires a C++ 17 compliant compiler. + +Checks whether a string starts with a UTF-8 byte order mark (BOM) + +```cpp +bool starts_with_bom(std::string_view s); +``` + +`s`: a UTF-8 encoded string. +Return value: `true` if the string starts with a UTF-8 byte order mark; `false` if not. + +Example of use: + +```cpp +string byte_order_mark = {char(0xef), char(0xbb), char(0xbf)}; +string_view byte_order_mark_view(byte_order_mark); +bool bbom = starts_with_bom(byte_order_mark_view); +assert (bbom); +string_view threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88"; +bool no_bbom = starts_with_bom(threechars); +assert (!no_bbom); + ``` + +The typical use of this function is to check the first three bytes of a file. If they form the UTF-8 BOM, we want to skip them before processing the actual UTF-8 encoded text. + + +#### utf8::starts_with_bom + +Available in version 2.3 and later. Checks whether an octet sequence starts with a UTF-8 byte order mark (BOM) @@ -560,33 +995,6 @@ assert (bbom == true); The typical use of this function is to check the first three bytes of a file. If they form the UTF-8 BOM, we want to skip them before processing the actual UTF-8 encoded text. -#### utf8::is_bom - -Available in version 1.0 and later. Deprecated in version 2.3\. `starts_with_bom()` should be used instead. - -Checks whether a sequence of three octets is a UTF-8 byte order mark (BOM) - -```cpp -template -bool is_bom (octet_iterator it); // Deprecated -``` - -`octet_iterator`: an input iterator. -`it`: beginning of the 3-octet sequence to check -Return value: `true` if the sequence is UTF-8 byte order mark; `false` if not. - -Example of use: - -```cpp -unsigned char byte_order_mark[] = {0xef, 0xbb, 0xbf}; -bool bbom = is_bom(byte_order_mark); -assert (bbom == true); -``` - -The typical use of this function is to check the first three bytes of a file. If they form the UTF-8 BOM, we want to skip them before processing the actual UTF-8 encoded text. - -If a sequence is shorter than three bytes, an invalid iterator will be dereferenced. Therefore, this function is deprecated in favor of `starts_with_bom()`that takes the end of sequence as an argument. - ### Types From utf8 Namespace #### utf8::exception @@ -678,15 +1086,24 @@ class iterator; ##### Member functions -`iterator();` the deafult constructor; the underlying octet_iterator is constructed with its default constructor. +`iterator();` the deafult constructor; the underlying octet_iterator is constructed with its default constructor. + `explicit iterator (const octet_iterator& octet_it, const octet_iterator& range_start, const octet_iterator& range_end);` a constructor that initializes the underlying octet_iterator with octet_it and sets the range in which the iterator is considered valid. + `octet_iterator base () const;` returns the underlying octet_iterator. + `uint32_t operator * () const;` decodes the utf-8 sequence the underlying octet_iterator is pointing to and returns the code point. + `bool operator == (const iterator& rhs) const;` returns `true` if the two underlaying iterators are equal. + `bool operator != (const iterator& rhs) const;` returns `true` if the two underlaying iterators are not equal. + `iterator& operator ++ ();` the prefix increment - moves the iterator to the next UTF-8 encoded code point. + `iterator operator ++ (int);` the postfix increment - moves the iterator to the next UTF-8 encoded code point and returns the current one. + `iterator& operator -- ();` the prefix decrement - moves the iterator to the previous UTF-8 encoded code point. + `iterator operator -- (int);` the postfix decrement - moves the iterator to the previous UTF-8 encoded code point and returns the current one. Example of use: @@ -824,34 +1241,6 @@ assert (w == twochars); This is a faster but less safe version of `utf8::prior`. It does not check for validity of the supplied UTF-8 sequence and offers no boundary checking. -#### utf8::unchecked::previous (deprecated, see utf8::unchecked::prior) - -Deprecated in version 1.02 and later. - -Given a reference to an iterator pointing to an octet in a UTF-8 seqence, it decreases the iterator until it hits the beginning of the previous UTF-8 encoded code point and returns the 32 bits representation of the code point. - -```cpp -template -uint32_t previous(octet_iterator& it); -``` - -`it`: a reference pointing to an octet within a UTF-8 encoded string. After the function returns, it is decremented to point to the beginning of the previous code point. -Return value: the 32 bit representation of the previous code point. - -Example of use: - -```cpp -char* twochars = "\xe6\x97\xa5\xd1\x88"; -char* w = twochars + 3; -int cp = unchecked::previous (w); -assert (cp == 0x65e5); -assert (w == twochars); -``` - -The reason this function is deprecated is just the consistency with the "checked" versions, where `prior` should be used instead of `previous`. In fact, `unchecked::previous` behaves exactly the same as `unchecked::prior` - -This is a faster but less safe version of `utf8::previous`. It does not check for validity of the supplied UTF-8 sequence and offers no boundary checking. - #### utf8::unchecked::advance Available in version 1.0 and later. @@ -863,8 +1252,8 @@ template void advance (octet_iterator& it, distance_type n); ``` -`it`: a reference to an iterator pointing to the beginning of an UTF-8 encoded code point. After the function returns, it is incremented to point to the nth following code point. -`n`: a positive integer that shows how many code points we want to advance. +`it`: a reference to an iterator pointing to the beginning of an UTF-8 encoded code point. After the function returns, it is incremented to point to the nth following code point. +`n`: number of code points `it` should be advanced. A negative value means decrement. Example of use: @@ -875,8 +1264,6 @@ unchecked::advance (w, 2); assert (w == twochars + 5); ``` -This function works only "forward". In case of a negative `n`, there is no effect. - This is a faster but less safe version of `utf8::advance`. It does not check for validity of the supplied UTF-8 sequence and offers no boundary checking. #### utf8::unchecked::distance @@ -1013,6 +1400,43 @@ assert (utf32result.size() == 2); This is a faster but less safe version of `utf8::utf8to32`. It does not check for validity of the supplied UTF-8 sequence. +#### utf8::unchecked::replace_invalid + +Available in version 3.1 and later. + +Replaces all invalid UTF-8 sequences within a string with a replacement marker. + +```cpp +template +output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out, uint32_t replacement); +template +output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out); +``` + +`octet_iterator`: an input iterator. +`output_iterator`: an output iterator. +`start`: an iterator pointing to the beginning of the UTF-8 string to look for invalid UTF-8 sequences. +`end`: an iterator pointing to pass-the-end of the UTF-8 string to look for invalid UTF-8 sequences. +`out`: An output iterator to the range where the result of replacement is stored. +`replacement`: A Unicode code point for the replacement marker. The version without this parameter assumes the value `0xfffd` +Return value: An iterator pointing to the place after the UTF-8 string with replaced invalid sequences. + +Example of use: + +```cpp +char invalid_sequence[] = "a\x80\xe0\xa0\xc0\xaf\xed\xa0\x80z"; +vector replace_invalid_result; +unchecked::replace_invalid (invalid_sequence, invalid_sequence + sizeof(invalid_sequence), back_inserter(replace_invalid_result), '?'); +bvalid = utf8::is_valid(replace_invalid_result.begin(), replace_invalid_result.end()); +assert (bvalid); +char* fixed_invalid_sequence = "a????z"; +assert (std::equal(replace_invalid_result.begin(), replace_invalid_result.end(), fixed_invalid_sequence)); +``` + +`replace_invalid` does not perform in-place replacement of invalid sequences. Rather, it produces a copy of the original string with the invalid sequences replaced with a replacement marker. Therefore, `out` must not be in the `[start, end]` range. + +Unlike `utf8::replace_invalid`, this function does not verify validity of the replacement marker. + ### Types From utf8::unchecked Namespace #### utf8::iterator @@ -1029,14 +1453,23 @@ class iterator; ##### Member functions `iterator();` the deafult constructor; the underlying octet_iterator is constructed with its default constructor. -`explicit iterator (const octet_iterator& octet_it);` a constructor that initializes the underlying octet_iterator with `octet_it` + +`explicit iterator (const octet_iterator& octet_it);` a constructor that initializes the underlying octet_iterator with `octet_it`. + `octet_iterator base () const;` returns the underlying octet_iterator. + `uint32_t operator * () const;` decodes the utf-8 sequence the underlying octet_iterator is pointing to and returns the code point. + `bool operator == (const iterator& rhs) const;` returns `true` if the two underlaying iterators are equal. + `bool operator != (const iterator& rhs) const;` returns `true` if the two underlaying iterators are not equal. + `iterator& operator ++ ();` the prefix increment - moves the iterator to the next UTF-8 encoded code point. + `iterator operator ++ (int);` the postfix increment - moves the iterator to the next UTF-8 encoded code point and returns the current one. + `iterator& operator -- ();` the prefix decrement - moves the iterator to the previous UTF-8 encoded code point. + `iterator operator -- (int);` the postfix decrement - moves the iterator to the previous UTF-8 encoded code point and returns the current one. Example of use: @@ -1062,26 +1495,6 @@ assert (*un_it == 0x10346); This is an unchecked version of `utf8::iterator`. It is faster in many cases, but offers no validity or range checks. -## Points of interest - -#### Design goals and decisions - -The library was designed to be: - -1. Generic: for better or worse, there are many C++ string classes out there, and the library should work with as many of them as possible. -2. Portable: the library should be portable both accross different platforms and compilers. The only non-portable code is a small section that declares unsigned integers of different sizes: three typedefs. They can be changed by the users of the library if they don't match their platform. The default setting should work for Windows (both 32 and 64 bit), and most 32 bit and 64 bit Unix derivatives. At this point I don't plan to use any post C++03 features, so the library should work even with pretty old compilers. -3. Lightweight: follow the "pay only for what you use" guideline. -4. Unintrusive: avoid forcing any particular design or even programming style on the user. This is a library, not a framework. - -#### Alternatives - -In case you want to look into other means of working with UTF-8 strings from C++, here is the list of solutions I am aware of: - -1. [ICU Library](http://icu.sourceforge.net/). It is very powerful, complete, feature-rich, mature, and widely used. Also big, intrusive, non-generic, and doesn't play well with the Standard Library. I definitelly recommend looking at ICU even if you don't plan to use it. -2. C++11 language and library features. Still far from complete, and not easy to use. -3. [Glib::ustring](http://www.gtkmm.org/gtkmm2/docs/tutorial/html/ch03s04.html). A class specifically made to work with UTF-8 strings, and also feel like `std::string`. If you prefer to have yet another string class in your code, it may be worth a look. Be aware of the licensing issues, though. -4. Platform dependent solutions: Windows and POSIX have functions to convert strings from one encoding to another. That is only a subset of what my library offers, but if that is all you need it may be good enough. - ## Links 1. [The Unicode Consortium](http://www.unicode.org/). diff --git a/utf8/utf8/checked.h b/utf8/utf8/checked.h index 2aef5838d..993b7f7c5 100644 --- a/utf8/utf8/checked.h +++ b/utf8/utf8/checked.h @@ -42,7 +42,7 @@ namespace utf8 uint32_t cp; public: invalid_code_point(uint32_t codepoint) : cp(codepoint) {} - virtual const char* what() const throw() { return "Invalid code point"; } + virtual const char* what() const UTF_CPP_NOEXCEPT UTF_CPP_OVERRIDE { return "Invalid code point"; } uint32_t code_point() const {return cp;} }; @@ -50,7 +50,7 @@ namespace utf8 uint8_t u8; public: invalid_utf8 (uint8_t u) : u8(u) {} - virtual const char* what() const throw() { return "Invalid UTF-8"; } + virtual const char* what() const UTF_CPP_NOEXCEPT UTF_CPP_OVERRIDE { return "Invalid UTF-8"; } uint8_t utf8_octet() const {return u8;} }; @@ -58,13 +58,13 @@ namespace utf8 uint16_t u16; public: invalid_utf16 (uint16_t u) : u16(u) {} - virtual const char* what() const throw() { return "Invalid UTF-16"; } + virtual const char* what() const UTF_CPP_NOEXCEPT UTF_CPP_OVERRIDE { return "Invalid UTF-16"; } uint16_t utf16_word() const {return u16;} }; class not_enough_room : public exception { public: - virtual const char* what() const throw() { return "Not enough space"; } + virtual const char* what() const UTF_CPP_NOEXCEPT UTF_CPP_OVERRIDE { return "Not enough space"; } }; /// The library API - functions intended to be called by the users @@ -107,7 +107,9 @@ namespace utf8 *out++ = *it; break; case internal::NOT_ENOUGH_ROOM: - throw not_enough_room(); + out = utf8::append (replacement, out); + start = end; + break; case internal::INVALID_LEAD: out = utf8::append (replacement, out); ++start; @@ -174,23 +176,19 @@ namespace utf8 return utf8::peek_next(it, end); } - /// Deprecated in versions that include "prior" - template - uint32_t previous(octet_iterator& it, octet_iterator pass_start) - { - octet_iterator end = it; - while (utf8::internal::is_trail(*(--it))) - if (it == pass_start) - throw invalid_utf8(*it); // error - no lead byte in the sequence - octet_iterator temp = it; - return utf8::next(temp, end); - } - template void advance (octet_iterator& it, distance_type n, octet_iterator end) { - for (distance_type i = 0; i < n; ++i) - utf8::next(it, end); + const distance_type zero(0); + if (n < zero) { + // backward + for (distance_type i = n; i < zero; ++i) + utf8::prior(it, end); + } else { + // forward + for (distance_type i = zero; i < n; ++i) + utf8::next(it, end); + } } template @@ -265,11 +263,16 @@ namespace utf8 // The iterator class template - class iterator : public std::iterator { + class iterator { octet_iterator it; octet_iterator range_start; octet_iterator range_end; public: + typedef uint32_t value_type; + typedef uint32_t* pointer; + typedef uint32_t& reference; + typedef std::ptrdiff_t difference_type; + typedef std::bidirectional_iterator_tag iterator_category; iterator () {} explicit iterator (const octet_iterator& octet_it, const octet_iterator& rangestart, @@ -322,6 +325,11 @@ namespace utf8 } // namespace utf8 +#if UTF_CPP_CPLUSPLUS >= 201703L // C++ 17 or later +#include "cpp17.h" +#elif UTF_CPP_CPLUSPLUS >= 201103L // C++ 11 or later +#include "cpp11.h" +#endif // C++ 11 or later + #endif //header guard - diff --git a/utf8/utf8/core.h b/utf8/utf8/core.h index ae0f367db..de6199f2a 100644 --- a/utf8/utf8/core.h +++ b/utf8/utf8/core.h @@ -30,6 +30,23 @@ DEALINGS IN THE SOFTWARE. #include +// Determine the C++ standard version. +// If the user defines UTF_CPP_CPLUSPLUS, use that. +// Otherwise, trust the unreliable predefined macro __cplusplus + +#if !defined UTF_CPP_CPLUSPLUS + #define UTF_CPP_CPLUSPLUS __cplusplus +#endif + +#if UTF_CPP_CPLUSPLUS >= 201103L // C++ 11 or later + #define UTF_CPP_OVERRIDE override + #define UTF_CPP_NOEXCEPT noexcept +#else // C++ 98/03 + #define UTF_CPP_OVERRIDE + #define UTF_CPP_NOEXCEPT throw() +#endif // C++ 11 or later + + namespace utf8 { // The typedefs for 8-bit, 16-bit and 32-bit unsigned integers @@ -49,8 +66,8 @@ namespace internal const uint16_t LEAD_SURROGATE_MAX = 0xdbffu; const uint16_t TRAIL_SURROGATE_MIN = 0xdc00u; const uint16_t TRAIL_SURROGATE_MAX = 0xdfffu; - const uint16_t LEAD_OFFSET = LEAD_SURROGATE_MIN - (0x10000 >> 10); - const uint32_t SURROGATE_OFFSET = 0x10000u - (LEAD_SURROGATE_MIN << 10) - TRAIL_SURROGATE_MIN; + const uint16_t LEAD_OFFSET = 0xd7c0u; // LEAD_SURROGATE_MIN - (0x10000 >> 10) + const uint32_t SURROGATE_OFFSET = 0xfca02400u; // 0x10000u - (LEAD_SURROGATE_MIN << 10) - TRAIL_SURROGATE_MIN // Maximum valid value for a Unicode code point const uint32_t CODE_POINT_MAX = 0x0010ffffu; @@ -142,7 +159,7 @@ namespace internal if (!utf8::internal::is_trail(*it)) return INCOMPLETE_SEQUENCE; - + return UTF8_OK; } @@ -165,7 +182,7 @@ namespace internal { if (it == end) return NOT_ENOUGH_ROOM; - + code_point = utf8::internal::mask8(*it); UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR(it, end) @@ -222,7 +239,7 @@ namespace internal template utf_error validate_next(octet_iterator& it, octet_iterator end, uint32_t& code_point) { - if (it == end) + if (it == end) return NOT_ENOUGH_ROOM; // Save the original value of it so we can go back in case of failure @@ -237,7 +254,7 @@ namespace internal // Get trail octets and calculate the code point utf_error err = UTF8_OK; switch (length) { - case 0: + case 0: return INVALID_LEAD; case 1: err = utf8::internal::get_sequence_1(it, end, cp); @@ -313,18 +330,7 @@ namespace internal ((it != end) && (utf8::internal::mask8(*it++)) == bom[1]) && ((it != end) && (utf8::internal::mask8(*it)) == bom[2]) ); - } - - //Deprecated in release 2.3 - template - inline bool is_bom (octet_iterator it) - { - return ( - (utf8::internal::mask8(*it++)) == bom[0] && - (utf8::internal::mask8(*it++)) == bom[1] && - (utf8::internal::mask8(*it)) == bom[2] - ); - } + } } // namespace utf8 #endif // header guard diff --git a/utf8/utf8/cpp11.h b/utf8/utf8/cpp11.h new file mode 100644 index 000000000..d93961b04 --- /dev/null +++ b/utf8/utf8/cpp11.h @@ -0,0 +1,103 @@ +// Copyright 2018 Nemanja Trifunovic + +/* +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +*/ + + +#ifndef UTF8_FOR_CPP_a184c22c_d012_11e8_a8d5_f2801f1b9fd1 +#define UTF8_FOR_CPP_a184c22c_d012_11e8_a8d5_f2801f1b9fd1 + +#include "checked.h" +#include + +namespace utf8 +{ + + inline void append(char32_t cp, std::string& s) + { + append(uint32_t(cp), std::back_inserter(s)); + } + + inline std::string utf16to8(const std::u16string& s) + { + std::string result; + utf16to8(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline std::u16string utf8to16(const std::string& s) + { + std::u16string result; + utf8to16(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline std::string utf32to8(const std::u32string& s) + { + std::string result; + utf32to8(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline std::u32string utf8to32(const std::string& s) + { + std::u32string result; + utf8to32(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline std::size_t find_invalid(const std::string& s) + { + std::string::const_iterator invalid = find_invalid(s.begin(), s.end()); + return (invalid == s.end()) ? std::string::npos : (invalid - s.begin()); + } + + inline bool is_valid(const std::string& s) + { + return is_valid(s.begin(), s.end()); + } + + inline std::string replace_invalid(const std::string& s, char32_t replacement) + { + std::string result; + replace_invalid(s.begin(), s.end(), std::back_inserter(result), replacement); + return result; + } + + inline std::string replace_invalid(const std::string& s) + { + std::string result; + replace_invalid(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline bool starts_with_bom(const std::string& s) + { + return starts_with_bom(s.begin(), s.end()); + } + +} // namespace utf8 + +#endif // header guard + diff --git a/utf8/utf8/cpp17.h b/utf8/utf8/cpp17.h new file mode 100644 index 000000000..7bfa86994 --- /dev/null +++ b/utf8/utf8/cpp17.h @@ -0,0 +1,103 @@ +// Copyright 2018 Nemanja Trifunovic + +/* +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +*/ + + +#ifndef UTF8_FOR_CPP_7e906c01_03a3_4daf_b420_ea7ea952b3c9 +#define UTF8_FOR_CPP_7e906c01_03a3_4daf_b420_ea7ea952b3c9 + +#include "checked.h" +#include + +namespace utf8 +{ + + inline void append(char32_t cp, std::string& s) + { + append(uint32_t(cp), std::back_inserter(s)); + } + + inline std::string utf16to8(std::u16string_view s) + { + std::string result; + utf16to8(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline std::u16string utf8to16(std::string_view s) + { + std::u16string result; + utf8to16(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline std::string utf32to8(std::u32string_view s) + { + std::string result; + utf32to8(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline std::u32string utf8to32(std::string_view s) + { + std::u32string result; + utf8to32(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline std::size_t find_invalid(std::string_view s) + { + std::string_view::const_iterator invalid = find_invalid(s.begin(), s.end()); + return (invalid == s.end()) ? std::string_view::npos : (invalid - s.begin()); + } + + inline bool is_valid(std::string_view s) + { + return is_valid(s.begin(), s.end()); + } + + inline std::string replace_invalid(std::string_view s, char32_t replacement) + { + std::string result; + replace_invalid(s.begin(), s.end(), std::back_inserter(result), replacement); + return result; + } + + inline std::string replace_invalid(std::string_view s) + { + std::string result; + replace_invalid(s.begin(), s.end(), std::back_inserter(result)); + return result; + } + + inline bool starts_with_bom(std::string_view s) + { + return starts_with_bom(s.begin(), s.end()); + } + +} // namespace utf8 + +#endif // header guard + diff --git a/utf8/utf8/unchecked.h b/utf8/utf8/unchecked.h index cb2427166..0e1b51cc7 100644 --- a/utf8/utf8/unchecked.h +++ b/utf8/utf8/unchecked.h @@ -32,13 +32,13 @@ DEALINGS IN THE SOFTWARE. namespace utf8 { - namespace unchecked + namespace unchecked { template octet_iterator append(uint32_t cp, octet_iterator result) { if (cp < 0x80) // one octet - *(result++) = static_cast(cp); + *(result++) = static_cast(cp); else if (cp < 0x800) { // two octets *(result++) = static_cast((cp >> 6) | 0xc0); *(result++) = static_cast((cp & 0x3f) | 0x80); @@ -57,6 +57,46 @@ namespace utf8 return result; } + template + output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out, uint32_t replacement) + { + while (start != end) { + octet_iterator sequence_start = start; + internal::utf_error err_code = utf8::internal::validate_next(start, end); + switch (err_code) { + case internal::UTF8_OK : + for (octet_iterator it = sequence_start; it != start; ++it) + *out++ = *it; + break; + case internal::NOT_ENOUGH_ROOM: + out = utf8::unchecked::append (replacement, out); + start = end; + break; + case internal::INVALID_LEAD: + out = utf8::unchecked::append (replacement, out); + ++start; + break; + case internal::INCOMPLETE_SEQUENCE: + case internal::OVERLONG_SEQUENCE: + case internal::INVALID_CODE_POINT: + out = utf8::unchecked::append (replacement, out); + ++start; + // just one replacement mark for the sequence + while (start != end && utf8::internal::is_trail(*start)) + ++start; + break; + } + } + return out; + } + + template + inline output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out) + { + static const uint32_t replacement_marker = utf8::internal::mask16(0xfffd); + return utf8::unchecked::replace_invalid(start, end, out, replacement_marker); + } + template uint32_t next(octet_iterator& it) { @@ -85,13 +125,13 @@ namespace utf8 break; } ++it; - return cp; + return cp; } template uint32_t peek_next(octet_iterator it) { - return utf8::unchecked::next(it); + return utf8::unchecked::next(it); } template @@ -102,18 +142,19 @@ namespace utf8 return utf8::unchecked::next(temp); } - // Deprecated in versions that include prior, but only for the sake of consistency (see utf8::previous) - template - inline uint32_t previous(octet_iterator& it) - { - return utf8::unchecked::prior(it); - } - template void advance (octet_iterator& it, distance_type n) { - for (distance_type i = 0; i < n; ++i) - utf8::unchecked::next(it); + const distance_type zero(0); + if (n < zero) { + // backward + for (distance_type i = n; i < zero; ++i) + utf8::unchecked::prior(it); + } else { + // forward + for (distance_type i = zero; i < n; ++i) + utf8::unchecked::next(it); + } } template @@ -128,7 +169,7 @@ namespace utf8 template octet_iterator utf16to8 (u16bit_iterator start, u16bit_iterator end, octet_iterator result) - { + { while (start != end) { uint32_t cp = utf8::internal::mask16(*start++); // Take care of surrogate pairs first @@ -138,7 +179,7 @@ namespace utf8 } result = utf8::unchecked::append(cp, result); } - return result; + return result; } template @@ -176,9 +217,14 @@ namespace utf8 // The iterator class template - class iterator : public std::iterator { + class iterator { octet_iterator it; public: + typedef uint32_t value_type; + typedef uint32_t* pointer; + typedef uint32_t& reference; + typedef std::ptrdiff_t difference_type; + typedef std::bidirectional_iterator_tag iterator_category; iterator () {} explicit iterator (const octet_iterator& octet_it): it(octet_it) {} // the default "big three" are OK