Compare commits

..

2 commits

Author SHA1 Message Date
bdae5563c6 ltlfilt: add --sonf and --sonf-aps flags
* bin/ltlfilt.cc: Here.
* NEWS: Mention new ltlfilt flags.
* tests/Makefile.am, tests/core/sonf.test: Test these flags.
2022-03-03 12:42:26 +01:00
0505ee9310 tl: implement suffix operator normal form
* spot/tl/Makefile.am: New sonf files
* spot/tl/sonf.cc,spot/tl/sonf.hh: Here.
* python/spot/impl.i: include sonf.hh header
* doc/spot.bib: add entry for the SONF paper
* tests/python/formulas.ipynb: show sample usage
* tests/python/spot.py: test automata equivalence before/after SONF
* NEWS: mention the change
2022-03-03 09:13:04 +01:00
926 changed files with 23923 additions and 107264 deletions

View file

@ -3,9 +3,9 @@
(require-final-newline . t)
(mode . global-whitespace)
(bug-reference-bug-regexp
. "\\(?1:\\(?:[Ff]ix\\(?:es\\)? \\|[Ii]ssue \\)#\\(?2:[0-9]+\\)\\)")
. "\\(?:[Ff]ix\\(es\\)? \\|[Ii]ssue \\)#\\(?2:[0-9]+\\)")
(bug-reference-url-format
. "https://gitlab.lre.epita.fr/spot/spot/issues/%s")
. "https://gitlab.lrde.epita.fr/spot/spot/issues/%s")
(mode . bug-reference)
(magit-branch-adjust-remote-upstream-alist ("origin/next" . "/"))))
(c++-mode . ((c-default-style . "gnu")

1
.gitignore vendored
View file

@ -4,7 +4,6 @@ configure
config.log
config.status
aclocal.m4
ltargz.m4
autom4te.cache
libtool
auto

View file

@ -19,12 +19,12 @@ debian-stable-gcc:
- branches
except:
- /wip/
image: gitlab-registry.lre.epita.fr/spot/buildenv/debian:stable
image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian:stable
script:
- autoreconf -vfi
- ./configure --enable-max-accsets=256 --enable-pthread
- ./configure --enable-max-accsets=256
- make
- make distcheck DISTCHECK_CONFIGURE_FLAGS='--enable-max-accsets=256 --enable-pthread'
- make distcheck
artifacts:
when: always
paths:
@ -32,47 +32,22 @@ debian-stable-gcc:
- ./*.log
- ./*.tar.gz
# We build on Debian unstable because we want an up-to-date Automake.
# (See issue #512.) We do not run distcheck here to speedup this build
# that several other builds depend upon. Other builds will run distcheck.
make-dist:
stage: build
only:
- branches
except:
- /wip/
image: gitlab-registry.lre.epita.fr/spot/buildenv/debian
script:
- autoreconf -vfi
- ./configure --disable-static --enable-doxygen
- make
- make dist
- autoconf --trace='AC_INIT:$2' > VERSION
artifacts:
when: always
paths:
- spot-*/_build/sub/tests/*/*.log
- ./*.log
- ./*.tar.gz
- VERSION
# We --disable-devel for coverage, because debug mode replaces
# SPOT_UNREACHABLE by an assertion wich is never reachable, lowering
# our coverage.
debian-unstable-gcc-coverage:
stage: build
only:
- branches
except:
- /wip/
image: gitlab-registry.lre.epita.fr/spot/buildenv/debian
image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian
script:
- autoreconf -vfi
- ./configure CXX='g++ --coverage' --disable-devel --enable-warnings --disable-static --enable-doxygen
- ./configure CXX='g++ --coverage' --enable-devel --disable-static --enable-doxygen
- make
- make check
- gcovr --xml-pretty --exclude-unreachable-branches --print-summary -o coverage.xml --root $PWD -e 'bin/spot.cc' -e 'bin/spot-x.cc' -e 'spot/bricks/.*' -e 'spot/parse.*/scan.*.cc' -e 'spot/parse.*/parse.*.cc' -e 'utf8/.*' -e 'python/.*' -e 'buddy/.*' -e 'doc/org/tmp/.*' --html-details coverage.html --html-tab-size 8 --fail-under-line 90.7
coverage: /^\s*lines:\s*\d+.\d+\%/
- lcov --capture --directory . --no-external --output spot.info
- lcov --remove spot.info '*/bin/spot.cc' '*/bin/spot-x.cc' '*/spot/parse*/scan*.cc' '*/spot/parse*/parse*.cc' '*/utf8/*' '*/python/*' '*/buddy/*' '*/doc/org/tmp/*' --output spot2.info
- lcov --summary spot2.info
- genhtml --legend --demangle-cpp --output-directory coverage spot2.info
artifacts:
when: always
paths:
@ -80,66 +55,50 @@ debian-unstable-gcc-coverage:
- ./*.log
- doc/spot.html/
- doc/userdoc/
- coverage*.html
- coverage*.css
- coverage/
- ./*.tar.gz
reports:
coverage_report:
coverage_format: cobertura
path: coverage.xml
- spot2.info
debian-unstable-gcc-pypy:
stage: build2
needs:
- job: make-dist
artifacts: true
variables:
GIT_STRATEGY: none
stage: build
only:
- branches
except:
- /wip/
image: gitlab-registry.lre.epita.fr/spot/buildenv/debian
image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian
script:
- VERSION=`cat VERSION`
- tar xvf spot-$VERSION.tar.gz
- cd spot-$VERSION
- autoreconf -vfi
- ./configure PYTHON=/usr/bin/pypy3 --disable-static
- make
- make check TESTS='$(TESTS_python) $(TESTS_ipython)'
artifacts:
when: always
paths:
- spot-*/tests/*/*.log
- spot-*/*.log
- tests/*/*.log
- ./*.log
# With emacs now using gcc for on-the-fly compilation,
# we cannot rebuild the documentation using gcc-snapshot. So we start
# from the tarball instead.
debian-gcc-snapshot:
stage: build2
needs:
- job: make-dist
artifacts: true
stage: build
only:
- branches
except:
- /wip/
image: gitlab-registry.lre.epita.fr/spot/buildenv/debian
image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian
script:
- export PATH="/usr/lib/gcc-snapshot/bin:$PATH" LD_LIBRARY_PATH="/usr/lib/gcc-snapshot/lib:$LD_LIBRARY_PATH"
- VERSION=`cat VERSION`
- tar xvf spot-$VERSION.tar.gz
- cd spot-$VERSION
- autoreconf -vfi
- ./configure --with-included-ltdl CXX='g++'
- make
- make distcheck DISTCHECK_CONFIGURE_FLAGS='--with-included-ltdl'
- make distcheck DISTCHECK_CONFIGURE_FLAGS='--with-included-ltdl'
allow_failure: true
artifacts:
when: always
paths:
- spot-*/tests/*/*.log
- spot-*/*.log
- ./spot-*/_build/sub/tests/*/*.log
- ./*.log
- doc/spot.html/
- doc/userdoc/
- ./*.tar.gz
alpine-gcc:
stage: build
@ -147,12 +106,12 @@ alpine-gcc:
- branches
except:
- /wip/
image: gitlab-registry.lre.epita.fr/spot/buildenv/alpine
image: gitlab-registry.lrde.epita.fr/spot/buildenv/alpine
script:
- autoreconf -vfi
- ./configure
- make
- make distcheck DISTCHECK_CONFIGURE_FLAGS='--enable-pthread' || { chmod -R u+w ./spot-*; false; }
- make distcheck || { chmod -R u+w ./spot-*; false; }
artifacts:
when: always
paths:
@ -166,7 +125,7 @@ arch-clang:
- branches
except:
- /wip/
image: gitlab-registry.lre.epita.fr/spot/buildenv/arch
image: gitlab-registry.lrde.epita.fr/spot/buildenv/arch
script:
- autoreconf -vfi
- ./configure --prefix ~/install_dir CC='clang -Qunused-arguments' CXX='clang++ -Qunused-arguments' --enable-devel --enable-c++20 --enable-doxygen
@ -179,30 +138,22 @@ arch-clang:
- ./*.log
arch-gcc-glibcxxdebug:
stage: build2
needs:
- job: make-dist
artifacts: true
variables:
GIT_STRATEGY: none
stage: build
only:
- branches
except:
- /wip/
image: gitlab-registry.lre.epita.fr/spot/buildenv/arch
image: gitlab-registry.lrde.epita.fr/spot/buildenv/arch
script:
- VERSION=`cat VERSION`
- tar xvf spot-$VERSION.tar.gz
- mkdir build-$VERSION
- cd build-$VERSION
- ../spot-$VERSION/configure --enable-devel --enable-c++20 --enable-glibcxx-debug
- autoreconf -vfi
- ./configure --enable-devel --enable-c++20 --enable-glibcxx-debug
- make
- make distcheck DISTCHECK_CONFIGURE_FLAGS='--enable-devel --enable-c++20 --enable-glibcxx-debug'
artifacts:
when: on_failure
paths:
- build-*/spot-*/_build/sub/tests/*/*.log
- build-*/*.log
- ./spot-*/_build/sub/tests/*/*.log
- ./*.log
mingw-shared:
stage: build2
@ -210,17 +161,15 @@ mingw-shared:
# We start from the tarball generated from a non-cross-compiling
# job, so that all generated files are included, especially those
# built from the executables.
- job: make-dist
- job: debian-stable-gcc
artifacts: true
variables:
GIT_STRATEGY: none
only:
- branches
except:
- /wip/
image: gitlab-registry.lre.epita.fr/spot/buildenv/debian
image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian
script:
- VERSION=`cat VERSION`
- VERSION=`autoconf --trace='AC_INIT:$2'`
- tar xvf spot-$VERSION.tar.gz
- cd spot-$VERSION
- ./configure CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++-posix --host i686-w64-mingw32 --disable-python
@ -237,17 +186,15 @@ mingw-static:
# We start from the tarball generated from a non-cross-compiling
# job, so that all generated files are included, especially those
# built from the executables.
- job: make-dist
- job: debian-stable-gcc
artifacts: true
variables:
GIT_STRATEGY: none
only:
- branches
except:
- /wip/
image: gitlab-registry.lre.epita.fr/spot/buildenv/debian
image: gitlab-registry.lrde.epita.fr/spot/buildenv/debian
script:
- VERSION=`cat VERSION`
- VERSION=`autoconf --trace='AC_INIT:$2'`
- tar xvf spot-$VERSION.tar.gz
- cd spot-$VERSION
- mkdir install_dir
@ -267,19 +214,17 @@ mingw-static:
debpkg-stable:
stage: build
variables:
GIT_STRATEGY: none
only:
- /-deb$/
- master
- next
- stable
script:
- docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian:stable
- vol=spot-stable-$CI_COMMIT_SHA-$CI_JOB_ID
- docker pull gitlab-registry.lrde.epita.fr/spot/buildenv/debian:stable
- vol=spot-stable-$CI_COMMIT_SHA
- docker volume create $vol
- exitcode=0
- docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian:stable ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$?
- docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lrde.epita.fr/spot/buildenv/debian:stable ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$?
- docker cp helper-$vol:/build/result _build_stable || exitcode=$?
- docker rm helper-$vol || exitcode=$?
- docker volume rm $vol || exitcode=$?
@ -293,8 +238,6 @@ debpkg-stable:
debpkg-stable-i386:
stage: build2
variables:
GIT_STRATEGY: none
only:
- /-deb$/
- master
@ -303,11 +246,11 @@ debpkg-stable-i386:
tags: ["x86"]
needs: ["debpkg-stable"]
script:
- docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386:stable
- vol=spot-stable-$CI_COMMIT_SHA-$CI_JOB_ID
- docker pull gitlab-registry.lrde.epita.fr/spot/buildenv/debian-i386:stable
- vol=spot-stable-$CI_COMMIT_SHA
- docker volume create $vol
- exitcode=0
- docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386:stable ./bin-spot.sh -j${NBPROC-1} || exitcode=$?
- docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lrde.epita.fr/spot/buildenv/debian-i386:stable ./bin-spot.sh -j${NBPROC-1} || exitcode=$?
- docker cp _build_stable/. helper-$vol:/build/result || exitcode=$?
- rm -rf _build_stable
- docker start -a helper-$vol || exitcode=$?
@ -324,17 +267,15 @@ debpkg-stable-i386:
debpkg-unstable:
stage: build
variables:
GIT_STRATEGY: none
only:
- /-deb$/
- next
script:
- docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian
- vol=spot-unstable-$CI_COMMIT_SHA-$CI_JOB_ID
- docker pull gitlab-registry.lrde.epita.fr/spot/buildenv/debian
- vol=spot-unstable-$CI_COMMIT_SHA
- docker volume create $vol
- exitcode=0
- docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$?
- docker run -v $vol:/build/result --name helper-$vol gitlab-registry.lrde.epita.fr/spot/buildenv/debian ./build-spot.sh $CI_COMMIT_REF_NAME -j${NBPROC-1} || exitcode=$?
- docker cp helper-$vol:/build/result _build_unstable || exitcode=$?
- docker rm helper-$vol || exitcode=$?
- docker volume rm $vol || exitcode=$?
@ -348,19 +289,17 @@ debpkg-unstable:
debpkg-unstable-i386:
stage: build2
variables:
GIT_STRATEGY: none
only:
- /-deb$/
- next
tags: ["x86"]
needs: ["debpkg-unstable"]
script:
- docker pull gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386
- vol=spot-unstable-$CI_COMMIT_SHA-$CI_JOB_ID
- docker pull gitlab-registry.lrde.epita.fr/spot/buildenv/debian-i386
- vol=spot-unstable-$CI_COMMIT_SHA
- docker volume create $vol
- exitcode=0
- docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lre.epita.fr/spot/buildenv/debian-i386 ./bin-spot.sh -j${NBPROC-1} || exitcode=$?
- docker create -v $vol:/build/result --name helper-$vol gitlab-registry.lrde.epita.fr/spot/buildenv/debian-i386 ./bin-spot.sh -j${NBPROC-1} || exitcode=$?
- docker cp _build_unstable/. helper-$vol:/build/result || exitcode=$?
- rm -rf _build_unstable
- docker start -a helper-$vol || exitcode=$?
@ -376,23 +315,20 @@ debpkg-unstable-i386:
- _build_unstable/
rpm-pkg:
stage: build2
needs:
- job: make-dist
artifacts: true
variables:
GIT_STRATEGY: none
stage: build
only:
- /-rpm$/
- master
- next
- stable
image: gitlab-registry.lre.epita.fr/spot/buildenv/fedora
image: gitlab-registry.lrde.epita.fr/spot/buildenv/fedora
script:
- VERSION=`cat VERSION`
- tar xvf spot-$VERSION.tar.gz spot-$VERSION/spot.spec
- cp spot-$VERSION.tar.gz ~/rpmbuild/SOURCES/
- cp spot-$VERSION/spot.spec ~/rpmbuild/SPECS/
- autoreconf -vfi
- ./configure
- make
- make dist
- cp spot-*.tar.gz ~/rpmbuild/SOURCES/
- cp spot.spec ~/rpmbuild/SPECS/
- rpmbuild -bb ~/rpmbuild/SPECS/spot.spec
- mv ~/rpmbuild/RPMS/x86_64/*.rpm .
allow_failure: true
@ -404,8 +340,6 @@ rpm-pkg:
publish-rpm:
stage: publish
variables:
GIT_STRATEGY: none
only:
- /-rpm$/
- next
@ -416,7 +350,6 @@ publish-rpm:
- rpm-pkg
script:
- case $CI_COMMIT_REF_NAME in stable) rput fedora stable *.rpm;; next) rput fedora unstable *.rpm;; esac
- rm -rf ./*
publish-stable:
only:
@ -424,24 +357,15 @@ publish-stable:
tags:
- dput
stage: publish
variables:
GIT_STRATEGY: none
dependencies:
- debpkg-stable-i386
- make-dist
script:
- cd _build_stable
- ls -l
- dput lrde `ls -t *amd64.changes | head -1` `ls -t *i386.changes | head -1`
- cd ..
- ls -l
- dput lrde *.changes
- tgz=`ls spot-*.tar.* | head -n 1`
- case $tgz in *[0-9].tar.*) scp $tgz doc@perso:/var/www/dload/spot/; scp $tgz doc@dload:/var/www/html/spot/;; esac
- rm -rf ./*
- curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=stable" https://gitlab.lre.epita.fr/api/v4/projects/131/trigger/pipeline
- curl -X POST "https://archive.softwareheritage.org/api/1/origin/save/git/url/https://gitlab.lre.epita.fr/spot/spot/"
- curl "https://web.archive.org/save/https://www.lrde.epita.fr/dload/spot/$tgz"
- curl "https://web.archive.org/save/https://www.lre.epita.fr/dload/spot/$tgz"
- case $tgz in *[0-9].tar.*) scp $tgz doc@perso:/var/www/dload/spot/;; esac
- curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=stable" https://gitlab.lrde.epita.fr/api/v4/projects/131/trigger/pipeline
publish-unstable:
only:
@ -449,19 +373,14 @@ publish-unstable:
tags:
- dput
stage: publish
variables:
GIT_STRATEGY: none
dependencies:
- debpkg-unstable-i386
script:
- cd _build_unstable
- ls -l
- dput lrde `ls -t *amd64.changes | head -1` `ls -t *i386.changes | head -1`
- cd ..
- rm -rf _build_unstable
- curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=next" https://gitlab.lre.epita.fr/api/v4/projects/131/trigger/pipeline
- curl -X POST -F ref=master -F token=$TRIGGER_SANDBOX https://gitlab.lre.epita.fr/api/v4/projects/181/trigger/pipeline
- dput lrde *.changes
- curl -X POST -F ref=master -F token=$TRIGGER_SPOT_WEB -F "variables[spot_branch]=next" https://gitlab.lrde.epita.fr/api/v4/projects/131/trigger/pipeline
- curl -X POST -F ref=master -F token=$TRIGGER_SANDBOX https://gitlab.lrde.epita.fr/api/v4/projects/181/trigger/pipeline
raspbian:
stage: build
@ -482,29 +401,3 @@ raspbian:
- ./spot-*/_build/sub/tests/*/*.log
- ./*.log
- ./*.tar.gz
centos7:
stage: build2
needs:
- job: make-dist
artifacts: true
variables:
GIT_STRATEGY: none
only:
- branches
except:
- /wip/
image: gitlab-registry.lre.epita.fr/spot/buildenv/centos7
script:
- VERSION=`cat VERSION`
- tar xvf spot-$VERSION.tar.gz
- mkdir build-$VERSION
- cd build-$VERSION
- scl enable devtoolset-7 "../spot-$VERSION/configure --enable-devel"
- scl enable devtoolset-7 make
- scl enable devtoolset-7 "make distcheck DISTCHECK_CONFIGURE_FLAGS='--enable-devel --enable-optimizations'"
artifacts:
when: on_failure
paths:
- build-*/spot-*/_build/sub/tests/*/*.log
- build-*/*.log

View file

@ -1,20 +0,0 @@
Ala-Eddine Ben-Salem <ala@lrde.epita.fr> <ala@gaborone.lrde.epita.fr>
Ala-Eddine Ben-Salem <ala@lrde.epita.fr> <ala@pret-move.rsr.lip6.fr>
Ala-Eddine Ben-Salem <ala@lrde.epita.fr> <ala@gaborone.lrde.epita.fr>
Antoine Martin <amartin@lrde.epita.fr> <antoine97.martin@gmail.com>
Arthur Remaud <aremaud@lrde.epita.fr> <aremaud@avignon.lrde.epita.fr>
Arthur Remaud <aremaud@lrde.epita.fr> <aremaud@node7.lrde.epita.fr>
Damien Lefortier <dam@lrde.epita.fr> <eg@feather.vurt>
Felix Abecassis <felix.abecassis@lrde.epita.fr> <abecassis@bosc-guerard.lrde.epita.fr>
Felix Abecassis <felix.abecassis@lrde.epita.fr> <abecassis@scalea.lrde.epita.fr>
Felix Abecassis <felix.abecassis@lrde.epita.fr> <abecassis@lrde.epita.fr>
Guillaume Sadegh <sadegh@lrde.epita.fr>
Guillaume Sadegh <sadegh@lrde.epita.fr> <guillaume.sadegh@lrde.epita.fr>
Henrich Lauko <xlauko@mail.muni.cz>
Henrich Lauko <xlauko@mail.muni.cz> <henrich.lau@gmail.com>
Jerome Dubois <jdubois@lrde.epita.fr> Jérôme Dubois <jdubois@lrde.epita.fr>
Philipp Schlehuber-Caissier <philipp@lrde.epita.fr>
Thibaud Michaud <thibaud.michaud@epita.fr> <michau_n@epita.fr>
Thomas Badie <badie@lrde.epita.fr> <badie@champoteux.lrde.epita.fr>
Rachid Rebiha <rebiha>
Thomas Martinez <martinez>

129
AUTHORS
View file

@ -1,101 +1,32 @@
The following people have contributed code to Spot:
- Ala-Eddine Ben-Salem (2010-2012)
During is PhD thesis at EPITA.
- Alexandre Duret-Lutz (2002-)
During his master and then PhD at Université Pierre et Marie Curie.
Then as an employee of EPITA.
- Alexandre Gbaguidi Aïsse (2016-2017)
While a student at EPITA.
- Alexandre Lewkowicz (2014-2016)
While a student at EPITA.
- Amaury Fauchille (2016)
While a student at EPITA.
- Antoine Martin (2018-)
While a student at EPITA, and then during his PhD at EPITA.
- Arthur Remaud (2017)
While a student at EPITA.
- Clément Gillard (2017-2019)
While a student at EPITA.
- Damien Lefortier (2008-2010)
While a student at EPITA.
- Denis Poitrenaud (2004-2005)
As an employee of Univesité Paris V.
- Elie Abi Saad (2013)
While a student at EPITA.
- Étienne Renault (2013-2020)
During his PhD at EPITA & Université Pierre et Marie Curie.
Then as an employee of EPITA.
- Félix Abecassis (2009-2010)
While a student at EPITA.
- Florian Perlié-Long (2017)
While a student at EPITA.
- Florian Renkin (2020-)
During his PhD at EPITA.
Then as an employee of Université Paris Cité.
- Guillaume Sadegh (2008-2010)
While a student at EPITA.
- Heikki Tauriainen (2004-2005)
During his PhD at Helsinki University of Technology.
- Henrich Lauko (2017)
As an ERASMUS student from Masaryk University (Brno) visiting EPITA.
- Jérôme Dubois (2020-2021)
While a student at EPITA.
- Jonah Romero (2023)
During an internship at IMDEA Software Institute, supervised by Pierre Ganty.
- Laurent Xu (2016-2017)
While a student at EPITA.
- Maximilien Colange (2016-2018)
As an employee of EPITA.
- Philipp Schlehuber (2020-)
As an employee of EPITA.
- Pierre Parutto (2012)
While a student at EPITA.
- Rachid Rebiha (2003)
During his master's internship at Université Pierre et Marie Curie.
- Souheib Baarir (2004, 2008, 2013)
During his PhD at Université Pierre et Marie Curie.
Then during a sabatical visit at EPITA.
- Thibaud Michaud (2014, 2017)
While a student at EPITA.
- Thomas Badie (2011-2013)
While a student at EPITA.
- Thomas Martinez (2004)
While a student at EPITA.
- Thomas Medioni (2017)
During a master internship done at EPITA.
- Tomáš Babiak (2012)
During his PHD at Masaryk University (Brno).
- Vincent Tourneur (2017)
While a student at EPITA.
Ala-Eddine Ben-Salem
Alexandre Duret-Lutz
Alexandre Gbaguidi Aïsse
Alexandre Lewkowicz
Amaury Fauchille
Antoine Martin
Arthur Remaud
Clément Gillard
Damien Lefortier
Denis Poitrenaud
Elie Abi Saad
Étienne Renault
Félix Abecassis
Florian Renkin
Guillaume Sadegh
Heikki Tauriainen
Henrich Lauko
Jérôme Dubois
Laurent Xu
Maximilien Colange
Philipp Schlehuber
Pierre Parutto
Rachid Rebiha
Souheib Baarir
Thibaud Michaud
Thomas Badie
Thomas Martinez
Thomas Medioni
Tomáš Babiak
Vincent Tourneur

74
HACKING
View file

@ -5,11 +5,11 @@ Bootstraping from the GIT repository
Spot's gitlab page is at
https://gitlab.lre.epita.fr/spot/spot
https://gitlab.lrde.epita.fr/spot/spot
The GIT repository can be cloned with
git clone https://gitlab.lre.epita.fr/spot/spot.git
git clone https://gitlab.lrde.epita.fr/spot/spot.git
Some files in SPOT's source tree are generated. They are distributed
so that users do not need to install tools to rebuild them, but we
@ -25,7 +25,7 @@ since the generated files they produce are distributed.)
GNU Automake >= 1.11
GNU Libtool >= 2.4
GNU Flex >= 2.6
GNU Bison >= 3.3
GNU Bison >= 3.0
GNU Emacs (preferably >= 24 but it may work with older versions)
org-mode >= 9.1 (the version that comes bundled with your emacs
version is likely out-of-date; but distribution often have
@ -42,7 +42,7 @@ since the generated files they produce are distributed.)
A complete LaTeX distribution, including latexmk and extra fonts
like dsfont.sty.
ImageMagick
Python >= 3.6, IPython >= 2.3
Python >= 3.5, IPython >= 2.3
Jupyter >= 4, with nbconvert
GraphViz
Java >= 1.7 (needed to run PlantUML while generating the doc)
@ -55,7 +55,7 @@ only for certain operations (like releases):
pandoc used during Debian packaging for the conversion of
IPython notebooks to html
svgo for reducing SVG images before generating the tarball
(install with: npm install -g svgo@1.3.2)
(install with: npm install -g svgo)
ltl2ba used in the generated documentation and the test suite
ltl2dstar likewise
ltl3dra likewise
@ -290,28 +290,8 @@ would understand with:
make check LOG_DRIVER=$PWD/tools/test-driver-teamcity
Generating compile_commands.json
--------------------------------
The file compile_commands.json is used by many clang tools in order to
know how a single file is compiled (in particular, which include paths
should be used). Autotools-based build systems do not support the
generation of this file, but there is a tool called "bear" (for "Build
EAR") that is packaged with most distribution that can be used here.
Simply run a full build through "bear" using something like this:
% ./configure CC=clang-17 CXX=clang++-17
% make -j8 clean # make sure your will rebuild everything
% bear -- make -j8
This will simply intercept all command executions are record them in
the compile_commands.json database. Depending on the tools you plan to
use, you probably want to compile everything with clang, as shown above.
C++ Coding conventions
======================
Coding conventions
==================
Here some of the conventions we follow in Spot, so that the code looks
homogeneous. Please follow these strictly. Since this is free
@ -702,43 +682,3 @@ Other style recommandations
* Always code as if the person who ends up maintaining your code is
a violent psychopath who knows where you live.
Coding conventions for Python Tests
===================================
Unless you have some specific reason to write test cases in C++ (for
instance do test some specific C++ constructions, or to use valgrind),
prefer writing test cases in Python. Writing test cases in C++
requires some compilation, which slows down the test suite. Doing the
same test in Python is therefore faster, and it has the added benefit
of ensuring that the Python bindings works.
We have two types of Python tests: Python scripts or jupyter
notebooks. Jupyter notebooks are usually used for a sequence of
examples and comments that can also serve as part of the
documentation. Such jupyter notebooks should be added to the list of
code examples in doc/org/tut.org. Testing a notebook is done by the
tests/python/ipnbdoctest.py scripts, which evaluate each cells, and
checks that the obtainted result is equivalent to the result saved in
the notebook. The process is a bit slow, so plain Python scripts
should be prefered for most tests.
If you do need a notebook to tests Jupyter-specific code but this
notebook should not be shown in the documentation, use a filename
starting with '_'.
Tests written as Python scripts should follow the same convention as
shell scripts: exit 0 for PASS, exit 77 for SKIP, and any other exit
code for FAIL.
Do not use assert() in those scripts, as (1) asserts can be disabled,
and (2) they provide poor insights in case of failures. Instead do
from unittest import TestCase
tc = TestCase()
and then use tc.assertTrue(...), tc.assertEqual(..., ...),
tc.assertIn(..., ...), etc. In case of failures, those will print
useful messages in the trace of the tests. For instance multiline
strings that should have been equal will be presented with a diff.

View file

@ -1,5 +1,9 @@
## -*- coding: utf-8 -*-
## Copyright (C) by the Spot authors, see the AUTHORS file for details.
## Copyright (C) 2011-2017, 2020 Laboratoire de Recherche et Développement
## de l'Epita (LRDE).
## Copyright (C) 2003, 2005 Laboratoire d'Informatique de Paris 6 (LIP6),
## département Systèmes Répartis Coopératifs (SRC), Université Pierre
## et Marie Curie.
##
## This file is part of Spot, a model checking library.
##
@ -32,9 +36,8 @@ DOC_SUBDIR = doc
SUBDIRS = picosat buddy lib ltdl spot bin tests $(PYTHON_SUBDIR) $(DOC_SUBDIR) \
$(NEVER_SUBDIRS)
UTF8 = utf8/README.md utf8/LICENSE utf8/utf8.h \
utf8/utf8/checked.h utf8/utf8/core.h utf8/utf8/unchecked.h \
utf8/utf8/cpp11.h utf8/utf8/cpp17.h
UTF8 = utf8/README.md utf8/utf8.h \
utf8/utf8/checked.h utf8/utf8/core.h utf8/utf8/unchecked.h
DEBIAN = \
debian/changelog \
@ -65,7 +68,7 @@ EXTRA_DIST = HACKING ChangeLog.1 tools/gitlog-to-changelog \
tools/help2man tools/man2html.pl \
tools/test-driver-teamcity $(UTF8) $(DEBIAN) \
m4/gnulib-cache.m4 .dir-locals.el \
spot.spec spot.spec.in README.ltsmin
spot.spec spot.spec.in
dist-hook: gen-ChangeLog

832
NEWS
View file

@ -1,658 +1,9 @@
New in spot 2.12.2.dev (not yet released)
Command-line tools:
- ltlmix is a new tool that generates formulas by combining existing
ones. See https://spot.lre.epita.fr/ltlmix.html for examples.
- (BACKWARD INCOMPATIBILITY) ltlsynt's CSV output has been largely
overhauled, and the output columns have been both renamed (for
consistency) and augmented (with new statistics). The new CSV
output should be more useful when the input specification is
decomposed, in particular, there is a column giving the number of
sub-specifications obtained, and other statistics columns have
names starting with "max_" or "sum_" indicating how said
statistics are updated across sub-specifications.
See https://spot.lre.epita.fr/ltlsynt.html#csv for an example.
Additionally, --csv=FILENAME will now save the source filename for
the processed formulas as the first column of the CSV file, and
the "formula" column is no longer output by default to save space.
This "formula" column can be added back with the new option
--csv-with-formula=FILENAME if really needed.
- ltlsynt learned a --part-file option, to specify the partition of
input/output proposition from a *.part file, as used in several
other tools.
- ltlfilt learned a --relabel=io mode, that is useful to shorten
atomic propositions in the context of LTL synthesis. For instance
% ltlfilt -f 'G(req->Fack)&G(go->Fgrant)' --relabel=io --ins=req,go
G(i1 -> Fo0) & G(i0 -> Fo1)
The resulting formulas are now usable by ltlsynt without having to
specify which atomic propositions are input or output, as this can
be inferred from their name. (This suspports a --part-file option
as well.)
- ltlfilt learned --save-part-file[=FILENAME] option that can be
used to create partition files suitable for many synthesis tools
(including ltlsynt).
- genltl learned --lily-patterns to generate the example LTL
synthesis specifications from Lily 1.0.2. Those come with input
and output atomic proposition rewriten in the form "iNN" or "oNN",
so they can be fed to ltlsynt directly, as in
% genltl --lily-patterns | ltlsynt -q
- genltl's --pps-arbiter-standard and --pps-arbiter-strict have been
changed to rename variables {r1,r2,...,g1,g2...} as
{i1,i1,...,o1,o2,...} so that these formula can be fed directly to
ltlsynt.
- autfilt learned --restrict-dead-end-edges, to restricts labels of
edges leading to dead-ends. See the description of
restrict_dead_end_edges_here() below.
- autfilt learned --track-formula=F to label states with formulas
derived from F. (This is more precise on deterministic automata.)
- autfilt learned --mcs[=any|scc] to reorder states according to a
maximum cardinality search. The argument specifies how to break
ties.
- ltlfilt learned --pi1, --sigma1, --delta1, --pi2, --sigma2, and
--delta2 to filter according to classes Π₁,Σ₁,Δ₁,Π₂,Σ₂, and Δ₂.
- ltlfilt learned --to-delta2 to transform an LTL formula into Δ₂.
Library:
- restrict_dead_end_edges_here() can reduce non-determinism (but
not remove it) by restricting the label L of some edge (S)-L->(D)
going to a state D that does not have other successor than
itself. The conditions are detailled in the documentation of
this function.
- spot::postprocessor will now call restrict_dead_end_edges_here()
in its highest setting. This can be fine-tuned with the "rde"
extra option, see the spot-x (7) man page for detail.
- The formula class now keeps track of membership to the Δ₁, Σ₂,
Π₂, and Δ₂ syntactic class. This can be tested with
formula::is_delta1(), formula::is_sigma2(), formula::is_pi2(),
formula::is_delta2(). See doc/tl/tl.pdf from more discussion.
- spot::to_delta2() implements Δ₂-normalization for LTL formulas,
following "Efficient Normalization of Linear Temporal Logic" by
Esparza et al. (J. ACM, 2024).
- Trivial rewritings (those performed everytime at construction)
were missing the rule "[*0]|f ≡ f" when f already accepts the
empty word. (Issue #545.)
- spot::match_states(A, B) is a new function that returns a vector
V such that V[x] contains all states y such that state (x, y) can
reach an accepting cycle in product(A, B). In particular, if A
and B accept the same language, any word accepted by A from state
x can be accepted in B from some state in V[x].
That function also has a variant spot::match_states(A, f) where f
is an LTL formula. In this case it returns and array of
formulas. If f represents a superset of the language of A, then
any word accepted by A from state x satisfies V[x]. Related to
Issue #591.
- twa_graph::defrag_states(num) no longer require num[i]≤i; num
can now describe a permutation of the state numbers.
- spot::maximum_cardinality_search() and
spot::maximum_cardinality_search_reorder_here() are new function
to compute (and apply) an ordering of state based on a maximum
cardinality search.
Bug fixes:
- "ltlsynt --aiger -q ..." was still printing the realizability
status and the AIG circuit; it now does the job silently as
requested.
- ltlsynt had a minor memory leak
New in spot 2.12.2 (2025-01-18)
Bug fixes:
- to_finite() was dealing incorrectly with edges that were
both alive and dead. (Issue #596.)
- LaTeX output of the X[!] operator with broken in both
LaTeX and self-contained LaTeX mode. (Issue #597)
- Fix a bug in the AIGER encoding with certain forms of
global-equivalence.
- Work around a spurious test failure with Python 3.13.
New in spot 2.12.1 (2024-09-23)
Bug fixes:
- Generating random formula without any unary opertors would very
often create formulas much smaller than asked.
- The parity game solver, which internally works on "parity max
odd", but actually accept any type of parity acceptance, could be
confused by games with "parity min" acceptance using transition
with several colors (a rather uncommon situation).
- "ltlsynt ... --print-game --dot=ARGS" was ignoring ARGS.
- Work around various warnings from g++14.
- Improved handling of spot-extra/ directory with newer Swig
versions. Necessary to recompile Seminator 2 with Swig 4.
New in spot 2.12 (2024-05-16)
Build:
- When Python bindings are enabled, Spot now requires Python 3.6 or
later. Python 3.6 has reached end-of-life in 2021, but is still
used on CentOS 7 (which will reach end-of-support later in 2024).
Documentation:
- https://spot.lre.epita.fr/tut25.html is a new example showing
how to print an automaton in the "BA format" (used by Rabbit
and other tools).
Command-line tools:
- In places that accept format strings with '%' sequences, like
options --stats, --name, or --output, the new '%l' can now be used
to produce the 0-based serial number of the produced object. This
differs from the existing '%L' that is usually related to the line
number of the input (when that makes sense). For instance to
split a file that contains many automata into one file per
automaton, do
autfilt input.hoa -o output-%l.hoa
- For tools that produce automata, using -Hb or --hoa=b will produce
an HOA file in which aliases are used to form a basis for the
whole set of labels. Those aliases are only used when more than
one atomic proposition is used (otherwise, the atomic proposition
and its negation is already a basis). This can help reduce the
size of large HOA files.
- autfilt learned --separate-edges, to split the labels of
the automaton using a basis of disjoint labels. See
https://spot.lre.epita.fr/tut25.html for some motivation.
- autfilt learned --reduce-acceptance-set/--enlarge-acceptance-set
to heurisitcally remove/add to unnecessary acceptance marks in
Büchi automata. (Issue #570)
- ltlfilt has a new option --relabel-overlapping-bool=abc|pnn that
will replace boolean subformulas by fresh atomic propositions even
if those subformulas share atomic propositions.
- ltlsynt's --ins and --outs options will iterpret any atomic
proposition surrounded by '/' as a regular expression.
For intance with
ltlsynt --ins='/^in/,/env/' --outs=/^out/,/control/' ...
any atomic proposition that starts with 'in' or contains 'env'
will be considered as input, and one that starts with 'out'
or contain 'control' will be considered output.
By default, if neither --ins nor --outs is given, ltlsynt will
behave as if --ins='/^[iI]/' and --outs='/^[oO]/ were used.
- ltlsynt will now check for atomic propositions that always have
the same polarity in the specification. When this happens, these
output APs are replaced by true or false before running the
synthesis pipeline, and the resulting game, Mealy machine, or
Aiger circuit is eventually patched to include that constant
output. This can be disabled with --polarity=no.
- ltlsynt will now check for atomic propositions that are specified
as equivalent. When this is detected, equivalent atomic
propositions are replaced by one representative of their class, to
limit the number of different APs processed by the synthesis
pipeline. The resulting game, Mealy machine, or Aiger circuit is
eventually patched to include the removed APs. This optimization
can be disabled with --global-equivalence=no. As an exception, an
equivalence between input and output signals (such as G(in<->out))
will be ignored if ltlsynt is configured to output a game (because
patching the game a posteriori is cumbersome if the equivalence
concerns different players).
- genaut learned two new familes of automata, --cycle-log-nba and
--cycle-onehot-nba. They both create a cycle of n^2 states that
can be reduced to a cycle of n states using reduction based on
direct simulation.
Library:
- split_2step has now multiple ways to split for improved performance.
The option can be controlled via the synthesis_info::splittype
- EXPL: explicit splitting of each state as before
- SEMISYM: The outgoing transition of each state are encoded
as a bdd; Works better for larger number of input APs
- FULLYSYM: The automaton is first translated into a
fully symbolic version, then split.
- AUTO: Let the heuristic decide what to do.
- The following new trivial simplifications have been implemented for SEREs:
- f|[+] = [+] if f rejects [*0]
- f|[*] = [*] if f accepts [*0]
- f&&[+] = f if f rejects [*0]
- b:b[*i..j] = b[*max(i,1)..j]
- b[*i..j]:b[*k..l] = b[*max(i,1)+max(k,1)-1, j+l-1]
- The following new trivial simplifications have been implemented
for PSL operators:
- {[*]}[]->0 = 0
- {[*]}<>->1 = 1
- The HOA parser is a bit smarter when merging multiple initial
states into a single initial state (Spot's automaton class
supports only one): it now reuses the edges leaving initial states
without incoming transitions.
- The automaton parser has a new option "drop_false_edges" to
specify whether edges labeled by "false" should be ignored during
parsing. It is enabled by default for backward compatibility.
- spot::bdd_to_cnf_formula() is a new variant of spot::bdd_to_formula()
that converts a BDD into a CNF instead of a DNF.
- spot::relabel_bse() has been improved to better deal with more
cases. For instance '(a & b & c) U (!c & d & e)' is now
correctly reduced as '(p0 & p1) U (!p1 & p2)', and
'((a & !b) | (a->b)) U c' now becomes '1 U c'. (Issue #540.)
- spot::relabel_overlapping_bse() is a new function that will
replace boolean subformulas by fresh atomic propositions even if
those subformulas share atomic propositions.
- spot::translate() has a new -x option "relabel-overlap=M" that
augments the existing "relabel-bool=N". By default, N=4, M=8.
When the formula to translate has more than N atomic propositions,
relabel_bse() is first called to attempt to rename non-overlapping
boolean subexpressions (i.e., no shared atomic proposition) in
order to reduce the number of atomic proposition, a source of
exponential explosion in several places of the translation
pipeline. This relabel-bool optimization exists since Spot 2.4.
The new feature is that if, after relabel-bool, the formula still
has more than M atomic propositions, then spot::translate() now
attempts to relabel boolean subexpressions even if they have
overlapping atomic propositions, in an attempt to reduce the
number of atomic proposition even more. Doing so has the slightly
unfortunate side effect of hindering some simplifications (because
the new atomic propositions hide their interactions), but it
usually incures a large speedup. (See Issue #500, Issue #536.)
For instance on Alexandre's laptop, running
'ltlsynt --tlsf SPIReadManag.tlsf --aiger'
with Spot 2.11.6 used to produce an AIG circuit with 48 nodes in
36 seconds; it now produces an AIG circuit with 53 nodes in only
0.1 second.
- spot::contains_forq() is an implementation of the paper "FORQ-Based
Language Inclusion Formal Testing" (Doveri, Ganty, Mazzocchi;
CAV'22) contributed by Jonah Romero.
- spot::contains() still defaults to the complementation-based
algorithm, however by calling
spot::containment_select_version("forq") or by setting
SPOT_CONTAINMENT_CHECK=forq in the environment, the
spot::contains_forq() implementation will be used instead when
applicable (inclusion between Büchi automata).
The above also impacts autfilt's --included-in option.
- spot::reduce_buchi_acceptance_set_here() and
spot::enlarge_buchi_acceptance_set_here() will heuristically
remove/add unnecessary acceptance marks in Büchi automata.
(Issue #570.)
- Given a twa_word_ptr W and a twa_ptr A both sharing the same
alphabet, one can now write W->intersects(A) or A->intersects(W)
instead of the longuer W->as_automaton()->intersects(A) or
A->intersects(W->as_automaton()).
- spot::scc_info has a new option PROCESS_UNREACHABLE_STATES that
causes it to enumerate even unreachable SCCs.
- spot::realizability_simplifier is a new class that performs the
removal of superfluous APs that is now performed by ltlsynt
(search for --polarity and --global-equivalence above).
- scc_filter used to reduce automata tagged with the inherently-weak
property to weak Büchi automata (unless the acceptance was already
t or co-Büchi). In cases where the input automaton had no
rejecting cycle, the Büchi acceptance was overkill: scc_filter
will now use "t" acceptance. This change may have unexpected
consequences in code paths that assume running scc_filter on a
Büchi automaton will always return a Büchi automaton. For those,
a "keep_one_color" option has been added to scc_filter.
- spot::separate_edges() and spot::edge_separator offers more ways
to split labels. See https://spot.lrde.epita.fr/ipynb/split.html
- ltsmin's interface will now point to README.ltsmin in case an
error is found while running divine or spins.
- spot::is_safety_automaton() was generalized to detect any
automaton for which the acceptance could be changed to "t" without
changing the language. In previous versions this function assumed
weak automata as input, but the documentation did not reflect
this.
- spot::remove_alternation() has a new argument to decide whether it
should raise an exception or return nullptr if it requires more
acceptance sets than supported.
- spot::dualize() learned a trick to be faster on states that have
less outgoing edges than atomic proposition declared on the
automaton. spot::remove_alternation(), spot::tgba_powerset(),
simulation-based reductions, and spot::tgba_determinize() learned
a similar trick, except it isn't applied at the state level but if
the entire automaton uses few distinct labels. These changes may
speed up the processing of automata with many atomic propositions
but few distinct labels. (Issue #566 and issue #568.)
Backward incompatibilities:
- spot::dualize() does not call cleanup_acceptance() anymore. This
change ensures that the dual of a Büchi automaton will always be a
co-Büchi automaton. Previously cleanup_acceptance(), which remove
unused colors from the acceptance, was sometimes able to simplify
co-Büchi to "t", causing surprizes.
- Function minimize_obligation_garanteed_to_work() was renamed to
minimize_obligation_guaranteed_to_work(). We believe this
function is only used by Spot currently.
- Function split_independant_formulas() was renamed to
split_independent_formulas(). We believe this function is only
used by Spot currently.
Python:
- The spot.automata() and spot.automaton() functions now accept a
drop_false_edges=False argument to disable the historical behavior
of ignoring edges labeled by False.
- Calling aut.highlight_state(s, None) or aut.highlight_edge(e,
None) may now be used to remove the highlighting color of some
given state or edge. Use aut.remove_highlight_states() or
aut.remove_highlight_edges() to remove all colors. (Issue #554.)
- Calling aut.get_hight_state(s) or get.highlight_edge(e) will
return the highlight color of that state/edge or None.
- Recent version Jupyter Notebook and Jupyter Lab started to render
SVG elements using <img...> tag to make it easier to copy/paste
those images. This breaks several usages, including the
possibility to have informative tooltips on states and edges (used
in Spot). See the following issues for more details.
https://github.com/jupyter/notebook/issues/7114
https://github.com/jupyterlab/jupyterlab/issues/10464
This version of Spot now declares its svg outputs as HTML to
prevent Jypyter from wrapping them is images.
Bugs fixed:
- tgba_determinize()'s use_simulation option would cause it to
segfault on automata with more than 2^16 SCCs, due to overflows in
computations of indices in the reachability matrix for SCCs.
(Issue #541.) This has been fixed by disabling the use_simulation
optimization in this case.
- product_or_susp() and product_susp() would behave incorrectly in
presence of unsatisifable or universal acceptance conditions that
were not f or t. (Part of issue #546.)
- Several algorithms were incorrectly dealing with the "!weak"
property. Because they (rightly) assumed that a weak input would
produce a weak output, they also wrongly assumed that a non-weak
output would produce a non-weak output. Unfortunately removing
states or edges in a non-weak automaton can lead a weak automaton.
The incorrect property lead to issue #546. In addition to fixing
several algorithms, product() and print_hoa() will now raise an
exception if automaton with t or f acceptance is declared !weak.
(This cheap check will not catch all automata incorrectly labeled
by !weak, but helps detects some issues nonetheless.)
- The automaton parser forgot to update the list of highlighted
edges while dropping edges labeled by bddfalse. (issue #548.)
- The comparison operators for acceptance condition (==, !=)
could fail to equate two "t" condition, because we have two ways
to represent "t": the empty condition, or the empty "Inf({})".
- Functions complement() and change_parity() could incorrectly read
or write the unused edge #0. In the case of complement(), writing
that edge was usually harmless. However, in some scenario,
complement could need to stick a ⓪ acceptance mark on edge #0,
then the acceptance condition could be simplified to "t", and
finally change_parity could be confused to find such an accepting
mark in an automaton that declares no colors, and perform some
computation on that color that caused it to crash with a "Too many
acceptance sets used" message. (issue #552)
- The detection of terminal automata used did not exactly
match the definition used in the HOA format. The definition
of a terminal automaton is supposed to be:
1. the automaton is weak
2. its accepting SCCs are complete
3. no accepting cycle can reach a rejecting cycle
However the implementation actually replaced the last point
by the following variant:
3'. no accepting edge can reach a rejecting cycle
This caused issues in automata with t acceptance. Fixing the code
by replacing 3' by 3 this also made the third argument of
is_terminal_automaton(), an optional boolean indicating whether
transition between SCCs should be ignored when computing 3',
completely obsolete. This third argument has been marked as
depreated. (Issue #553)
- twa::defrag_states(), which is called for instance by
purge_dead_state(), did not update the highlight-edges property.
(Issue #555.)
- spot::minimize_obligation will skip attempts to complement very
weak automata when those would require too many acceptance sets.
- acd_transform() was not used by spot::postprocessor unless an
option_map was passed. This was due to some bad default for the
"acd" option: it defaulted to true when an option_map was given,
and to false otherwise. This had no consequences on
ltl2tgba/autfilt were some option_map is always passed, but for
instance the parity automata generated by spot.postprocessor in
Python were not using ACD by default.
- Using spot::postprocessor to produce colored parity automata could
fail to color some transiant edges when the "acd" option was
activated.
- The formula printer incorrectly replaced a SERE like "(!a)[*3];a"
by "a[->]". The latter should only replace "(!a)[*];a".
(Issue #559.)
- The configure script failed to detect the include path for Python 3.12.
(Issue #577.)
- Work around many failures caused by incorrect rounding of floating
point values in the counting of transitions. (Issue #582)
- Some incorrectly escaped strings in Python code were causing
warnings with Python 3.12.
New in spot 2.11.6 (2023-08-01)
Bug fixes:
- Running command lines such as "autfilt input.hoa -o output-%L.hoa"
where thousands of different filenames can be created failed with
"Too many open files". (Issue #534)
- Using --format=... on a tool that output formulas would force
the output on standard output, even when --output was given.
- Using "randltl -S" did not correctly go through the SERE printer
functions.
- Our copy of BuDDy's bdd_forall() had a 20 year old typo that
caused cache entries from bdd_exist() and bdd_forall() to be
mixed. Spot was safe from this bug because it was only using
bdd_exist(). (Issue #535)
- Work around recent Pandas and GCC changes.
New in spot 2.11.5 (2023-04-20)
Bug fixes:
- Fix spurious failure of ltlsynt2.test when Python is not installed
(issue #530).
- Building from the git repository would fail to report a missing
emacs (issue #528).
- Fix exception raised by aut1.intersecting_run(aut2).as_twa()
because the run did not match transitions present in aut1
verbatim. We also changed the behavior of as_twa() to not merge
identical states.
- Fix segfaults occuring in determinization of 1-state terminal
automata.
- Fix incorrect assertion in game solver when the edge vector
contains deleted transitions.
New in spot 2.11.4 (2023-02-10)
Python:
- spot.acd() no longer depends on jQuery for interactivity.
Bug fixes:
- When merging initial states from state-based automata with
multiple initial states (because Spot supports only one), the HOA
parser could break state-based acceptance. (Issue #522.)
- autfilt --highlight-word refused to work on automata with Fin
acceptance for historical reasons, however the code has been
perfectly able to handle this for a while. (Issue #523.)
- delay_branching_here(), a new optimization of Spot 2.11 had an
incorrect handling of states without successors, causing some
segfaults. (Issue #524.)
- Running delay_branching_here() on state-based automata (this was not
done in Spot so far) may require the output to use transition-based
acceptance. (Issue #525.)
- to_finite(), introduce in 2.11, had a bug that could break the
completeness of automata and trigger an exception from the HOA
printer. (Issue #526.)
New in spot 2.11.3 (2022-12-09)
Bug fixes:
- Automata-based implication checks, used to simplify formulas were
slower than necessary because the translator was configured to
favor determinism unnecessarily. (Issue #521.)
- Automata-based implication checks for f&g and f|g could be
very slow when those n-ary operator had two many arguments.
They have been limited to 16 operands, but this value can be changed
with option -x tls-max-ops=N. (Issue #521 too.)
- Running ltl_to_tgba_fm() with an output_aborter (which is done
during automata-based implication checks) would leak memory on
abort.
- configure --with-pythondir should also redefine pyexecdir,
otherwise, libraries get installed in the wrong place on Debian.
(Issue #512.)
- The HOA parser used to silently declare unused and undefined states
(e.g., when the State: header declare many more states than the body
of the file). It now warns about those.
- 'autfilt -c ...' should display a match count even in presence of
parse errors.
- Calling solve_parity_game() multiple times on the same automaton
used to append the new strategy to the existing one instead of
overwriting it.
New in spot 2.11.2 (2022-10-26)
Command-line tools:
- The --stats specifications %s, %e, %t for printing the number of
(reachable) states, edges, and transitions, learned to support
options [r], [u], [a] to indicate if only reachable, unreachable,
or all elements should be counted.
Library:
- spot::reduce_parity() now has a "layered" option to force all
transition in the same parity layer to receive the same color;
like acd_transform() would do.
Bugs fixed:
- Fix pkg-config files containing @LIBSPOT_PTHREAD@ (issue #520)
- spot::relabel_bse() was incorrectly relabeling some dependent
Boolean subexpressions in SERE. (Note that this had no
consequence on automata translated from those SERE.)
New in spot 2.11.1 (2022-10-10)
Bugs fixed:
- Fix a build issue preventing the update of website (issue #516).
- Fix a compilation error with clang-14 on FreeBSD (issue #515).
New in spot 2.11 (2022-10-08)
Build:
- configure will now diagnose situations where Python bindings will
be installed in a directory that is not part of Python's search
path. A new configure option --with-pythondir can be used to
modify this installation path.
- A new configure option --enable-pthread enables the compilation of
Spot with -pthread, and render available the parallel version of
some algorithms. If Spot is compiled with -pthread enabled, any
user linking with Spot should also link with the pthread library.
In order to not break existing build setups using Spot, this
option is currently disabled by default in this release. We plan
to turn it on by default in some future release. Third-party
project using Spot may want to start linking with -pthread in
prevision for this change.
New in spot 2.10.4.dev (net yet released)
Command-line tools:
- autfilt has a new options --aliases=drop|keep to specify
if the HOA printer should attempt to preserve aliases
if the output code should attempt to preserve aliases
present in the HOA input. This defaults to "keep".
- autfilt has a new --to-finite option, illustrated on
@ -663,38 +14,6 @@ New in spot 2.11 (2022-10-08)
associated option --sonf-aps allows listing the newly introduced
atomic propositions.
- autcross learned a --language-complemented option to assist in the
case one is testing tools that complement automata. (issue #504).
- ltlsynt has a new option --tlsf that takes the filename of a TLSF
specification and calls syfco (which must be installed) to convert
it into an LTL formula.
- ltlsynt has a new option --from-pgame that takes a parity game in
extended HOA format, as used in the Synthesis Competition.
- ltlsynt has a new option --hide-status to hide the REALIZABLE or
UNREALIZABLE output expected by SYNTCOMP. (This line is
superfluous, because the exit status of ltlsynt already indicate
whether the formula is realizable or not.)
- ltlsynt has a new option --dot to request GraphViz output instead
of most output. This works for displaying Mealy machines, games,
or AIG circuits. See https://spot.lrde.epita.fr/ltlsynt.html for
examples.
- genaut learned the --cyclist-trace-nba and --cyclist-proof-dba
options. Those are used to generate pairs of automata that should
include each other, and are used to show a regression (in speed)
present in Spot 2.10.x and fixed in 2.11.
- genltl learned --eil-gsi to generate a familly a function whose
translation and simplification used to be very slow. In particular
genltl --eil-gsi=23 | ltlfilt --from-ltlf | ltl2tgba
was reported as taking 9 days. This is now instantaneous.
Library:
- The new function suffix_operator_normal_form() implements
@ -711,20 +30,16 @@ New in spot 2.11 (2022-10-08)
- tgba_determinize() learned to fill the "original-classes" property.
States of the determinized automaton that correspond to the same
subset of states of the original automaton belong to the same
class. Filling this property is only done on demand as it inccurs
a small overhead.
class. Filling this property is only done on demand has it inccurs
on small overhead.
- sbacc() learned to take the "original-classes" property into
account and to preserve it.
account and preserve it.
- The HOA parser and printer learned to map the synthesis-outputs
property of Spot to the controllable-AP header for the Extended
HOA format used in SyntComp. https://arxiv.org/abs/1912.05793
- The automaton parser learned to parse games in the PGSolver format.
See the bottom of https://spot.lrde.epita.fr/ipynb/games.html for
an example.
- "aliases" is a new named property that is filled by the HOA parser
using the list of aliases declared in the HOA file, and then used
by the HOA printer on a best-effort basis. Aliases can be used to
@ -751,147 +66,14 @@ New in spot 2.11 (2022-10-08)
- purge_dead_states() will now also remove edges labeled by false
(except self-loops).
- When parsing formulas with a huge number of operands for an n-ary
operator (for instance 'p1 | p2 | ... | p1000') the LTL parser
would construct that formula two operand at a time, and the
formula constructor for that operator would be responsible for
inlining, sorting, deduplicating, ... all operands at each step.
This resulted in a worst-than-quadratic slowdown. This is now
averted in the parser by delaying the construction of such n-ary
nodes until all children are known.
- complement() used to always turn tautological acceptance conditions
into Büchi. It now only does that if the automaton is modified.
- The zielonka_tree construction was optimized using the same
memoization trick that is used in ACD. Additionally it can now be
run with additional options to abort when the tree as an unwanted
shape, or to turn the tree into a DAG.
- contains() can now take a twa as a second argument, not just a
twa_graph. This allows for instance to do contains(ltl, kripke)
to obtain a simple model checker (that returns true or false,
without counterexample).
- degeneralize() and degeneralize_tba() learned to work on
generalized-co-Büchi as well.
- product() learned that the product of two co-Büchi automata
is a co-Büchi automaton. And product_or() learned that the
"or"-product of two Büchi automata is a Büchi automaton.
- spot::postprocessor has a new extra option "merge-states-min" that
indicates above how many states twa_graph::merge_states() (which
perform a very cheap pass to fuse states with identicall
succesors) should be called before running simulation-based
reductions.
- A new function delay_branching_here(aut) can be used to simplify
some non-deterministic branching. If two transitions (q₁,,M,q₂)
and (q₁,,M,q₃) differ only by their destination state, and are
the only incoming transitions of their destination states, then q₂
and q₃ can be merged (taking the union of their outgoing
transitions). This is cheap function is automatically called by
spot::translate() after translation of a formula to GBA, before
further simplification. This was introduced to help with automata
produced from formulas output by "genltl --eil-gsi" (see above).
- spot::postprocessor has new configuration variable branch-post
that can be used to control the use of branching-postponement
(disabled by default) or delayed-branching (see above, enabled by
default). See the spot-x(7) man page for details.
- spot::postprocessor is now using acd_transform() by default when
building parity automata. Setting option "acd=0" will revert
to using "to_parity()" instead.
- to_parity() has been almost entirely rewritten and is a bit
faster.
- When asked to build parity automata, spot::translator is now more
aggressively using LTL decomposition, as done in the Generic
acceptance case before paritizing the result. This results in
much smaller automata in many cases.
- spot::parallel_policy is an object that can be passed to some
algorithm to specify how many threads can be used if Spot has been
compiled with --enable-pthread. Currently, only
twa_graph::merge_states() supports it.
Python bindings:
- The to_str() method of automata can now export a parity game into
the PG-Solver format by passing option 'pg'. See
https://spot.lrde.epita.fr/ipynb/games.html for an example.
Deprectation notice:
- spot::pg_print() has been deprecated in favor of spot::print_pg()
for consistency with the rest of the API.
Bugs fixed:
- calling twa_graph::new_univ_edge(src, begin, end, cond, acc) could
produce unexpected result if begin and end where already pointing
into the universal edge vector, since the later can be
reallocated during that process.
- Printing an alternating automaton with print_dot() using 'u' to
hide true state could produce some incorrect GraphViz output if
the automaton as a true state as part of a universal group.
- Due to an optimization introduces in 2.10 to parse HOA label more
efficiently, the automaton parser could crash when parsing random
input (not HOA) containing '[' (issue #509).
New in spot 2.10.6 (2022-05-18)
Bugs fixed:
- Fix compilation error on MacOS X.
- Using -Ffile/N to read column N of a CSV file would not reset the
/N specification for the next file.
- make_twa_graph() will now preserve state number when copying a
kripke_graph object. As a consequence, print_dot() and
print_hoa() will now use state numbers matching those of the
kripke_graph (issue #505).
- Fix several compilation warning introduced by newer versions
of GCC and Clang.
New in spot 2.10.5 (2022-05-03)
Bugs fixed:
- reduce_parity() produced incorrect results when applied to
automata with deleted edges.
- An optimization of Zielonka could result in incorrect results
in some cases.
- ltlsynt --print-pg incorrectly solved the game in addition to
printing it.
- ltlsynt would fail if only one of --ins or --outs was set, and
if it was set empty.
- Work around a portability issue in Flex 2.6.4 preventing
- work around a portability issue in Flex 2.6.4 preventing
compilation on OpenBSD.
- Do not use the seq command in test cases, it is not available
everywhere.
- Do not erase the previous contents of the PYTHONPATH environment
variable when running tests, prepend to it instead.
- Simplify Debian instructions for LTO build to work around newer
libtool version.
- Fix invalid read in digraph::sort_edges_of_(), currently unused in
Spot.
New in spot 2.10.4 (2022-02-01)
Bug fixed:
@ -2251,7 +1433,7 @@ New in spot 2.6.2 (2018-09-28)
- We no longer distribute the Python-based CGI script + javascript
code for the online translator. Its replacement has its own
repository: https://gitlab.lre.epita.fr/spot/spot-web-app/
repository: https://gitlab.lrde.epita.fr/spot/spot-web-app/
Library:

58
README
View file

@ -76,7 +76,7 @@ Requirements
Spot requires a C++17-compliant compiler. G++ 7.x or later, as well
as Clang++ 5.0 or later should work.
Spot expects a complete installation of Python (version 3.6 or later).
Spot expects a complete installation of Python (version 3.5 or later).
Especially, Python's headers files should be installed. If you don't
have Python installed, and do NOT want to install it, you should run
configure with the --disable-python option (see below).
@ -93,10 +93,6 @@ automata.
If the SAT-solver glucose is found on your system, it will be used by
our test suite to test our SAT-based minimization algorithm.
If you want to use Spot with DiVinE2 (for model checking DVE models)
or with SpinS (for model checking Promela models), please read the
file named "README.ltsmin" for installation instructions.
Spot used to distribute a modified version of LBTT (an LTL to Büchi
test bench), mostly fixing errors reported by recent compilers.
However Spot now distributes its own reimplementation of LBTT, called
@ -114,16 +110,16 @@ Spot follows the traditional `./configure && make && make check &&
make install' process. People unfamiliar with the GNU Build System
should read the file INSTALL for generic instructions.
If you plan to use the Python bindings, we recommend you use the
following --prefix options when calling configure:
If you plan to use the Python binding, we recommend you use one
of the following --prefix options when calling configure:
--prefix ~/.local
--prefix /usr
--prefix /usr/local (the default)
--prefix ~/.local (if you do not have root permissions)
The reason is that ~/.local/lib/python3.X/site-packages, where Spot's
Python bindings will be installed, is automatically searched by
Python. If you use a different prefix directory, you may have to tune
the PYTHONPATH environment variable, or use the --with-pythondir
option to specify different installation paths.
The reason is that all these locations are usually automatically
searched by Python. If you use a different prefix directory, you may
have to tune the PYTHONPATH environment variable.
In addition to its usual options, ./configure will accept some
flags specific to Spot:
@ -133,7 +129,7 @@ flags specific to Spot:
offers a convenient interface when used in an IPython notebook,
and are also used to build the CGI script that translates LTL
formulas on-line. You may safely disable these, especially if you
do not have a working Python 3.6+ installation or if you are
do not have a working Python 3.2+ installation or if you are
attempting some cross-compilation.
--enable-max-accsets=N
@ -177,12 +173,6 @@ flags specific to Spot:
client code should be compiled with -D_GLIBCXX_DEBUG as well. This
options should normally only be useful to run Spot's test-suite.
--enable-pthread
Build and link with the -pthread option, and activate a few
parallel variants of the algorithms. This is currently disabled
by default, as it require all third-party tools using Spot to
build with -pthread as well.
--enable-c++20
Build everything in C++20 mode. We use that in our build farm to
ensure that Spot can be used in C++20 projects as well.
@ -254,31 +244,17 @@ To test the Python bindings, try running
>>> import spot
>>> print(spot.version())
If you installed Spot with a prefix that is not searched by Python by
default it is likely that the above import statement will fail to
locate the spot package. You can show the list of directories that
are searched by Python using:
If you installed Spot with a prefix that is not one of those suggested
in the "Building and installing" section, it is likely that the above
import statement will fail to locate the spot package. You can show
the list of directories that are searched by Python using:
% python3
>>> import sys
>>> print(sys.path)
And you can modify that list of searched directories using the
PYTHONPATH environment variable. Alternatively, you can instruct Spot
to install its Python files in one of those directory using the
--with-pythondir configure option. As an example, an issue in
distributions derived from Debian is that if you run
./configure && make && make install
Python files get installed in /usr/local/lib/python3.X/site-packages
while Debian's version of Python only looks for them into
/usr/local/lib/python3.X/dist-packages instead. You can fix that by
instructing configure that you want packages installed into the right
directory instead:
./configure --with-pythondir=/usr/local/lib/python3.X/dist-packages \
&& make && make install
PYTHONPATH environment variable.
To test if man pages can be found, simply try:
@ -343,13 +319,13 @@ bench/ Benchmarks for ...
wdba/ ... WDBA minimization (for obligation properties).
python/ Python bindings for Spot and BuDDy
Third-party software
Third party software
--------------------
buddy/ A customized version of BuDDy 2.3 (a BDD library).
ltdl/ Libtool's portable dlopen() wrapper library.
lib/ Gnulib's portability modules.
utf8/ Trifunovic's utf-8 routines. https://github.com/nemtrif/utfcpp
utf8/ Nemanja Trifunovic's utf-8 routines.
elisp/ Related emacs modes, used for building the documentation.
picosat/ A distribution of PicoSAT 965 (a satsolver library).
spot/bricks/ A collection of useful C++ code provided by DiVinE

20
THANKS
View file

@ -5,31 +5,22 @@ Andreas Tollkötter
Andrew Wells
Anton Pirogov
Ayrat Khalimov
Blake C. Rawlings
Cambridge Yang
Caroline Lemieux
Christian Dax
Christopher Ziegler
Clément Tamines
Daniel Stan
David Dokoupil
David Müller
Dávid Smolka
Edmond Irani Liu
Emmanuel Filiot
Ernesto Posse
Étienne Renault
Fabrice Kordon
Fangyi Zhou
Felix Klaedtke
Florian Perlié-Long
František Blahoudek
Gerard J. Holzmann
Guillermo A. Perez
Hashim Ali
Heikki Tauriainen
Henrich Lauko
Jacopo Binchi
Jan Strejček
Jean-Michel Couvreur
Jean-Michel Ilié
@ -41,7 +32,6 @@ Juan Tzintzun
Juraj Major
Kristin Y. Rozier
Marc Espie
Marek Jankola
Martin Dieguez Lodeiro
Matthias Heizmann
Maxime Bouton
@ -51,20 +41,14 @@ Michael Weber
Mikuláš Klokočka
Ming-Hsien Tsai
Nikos Gorogiannis
Ondřej Lengál
Paul Guénézan
Pierre Ganty
Raven Beutner
Reuben Rowe
Roei Nahum
Rüdiger Ehlers
Samuel Judson
Scott Buckley
Shachar Itzhaky
Shengping Shaw
Shufang Zhu
Silien Hong
Simon Jantsch
Shengping Shaw
Shufang Zhu
Sonali Dutta
Tereza Šťastná
Tobias Meggendorfer.

View file

@ -1,4 +1,8 @@
## Copyright (C) by the Spot authors, see the AUTHORS file for details.
## Copyright (C) 2008, 2009, 2010, 2012, 2013, 2014 Laboratoire de Recherche
## et Développement de l'Epita (LRDE).
## Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6),
## département Systèmes Répartis Coopératifs (SRC), Université Pierre
## et Marie Curie.
##
## This file is part of Spot, a model checking library.
##

View file

@ -1,4 +1,5 @@
## Copyright (C) by the Spot authors, see the AUTHORS file for details.
## Copyright (C) 2013 Laboratoire de Recherche et Développement de
## l'Epita (LRDE).
##
## This file is part of Spot, a model checking library.
##

View file

@ -1,5 +1,6 @@
#!/usr/bin/env python3
# Copyright (C) by the Spot authors, see the AUTHORS file for details.
# Copyright (C) 2016-2018 Laboratoire de Recherche et Développement de
# l'Epita (LRDE).
#
# This file is part of Spot, a model checking library.
#
@ -54,12 +55,12 @@ class BenchConfig(object):
if line[0] == '#' or line.isspace():
continue
elif line[0:2] == "sh":
sh = re.search('sh (.+)$', line).group(1)
sh = re.search('sh (.+?)$', line).group(1)
continue
else:
name = re.search('(.+?):', line).group(1)
code = re.search(':(.+?)>', line).group(1)
xoptions = re.search('>(.+)$', line).group(1)
xoptions = re.search('>(.+?)$', line).group(1)
b = Bench(name=name, code=code, xoptions=xoptions)
self.l.append(b)
self.sh.append(sh)

View file

@ -1,4 +1,6 @@
## Copyright (C) by the Spot authors, see the AUTHORS file for details.
## Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6),
## département Systèmes Répartis Coopératifs (SRC), Université Pierre
## et Marie Curie.
##
## This file is part of Spot, a model checking library.
##

View file

@ -1,5 +1,7 @@
# -*- shell-script -*-
# Copyright (C) by the Spot authors, see the AUTHORS file for details.
# Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6),
# département Systèmes Répartis Coopératifs (SRC), Université Pierre
# et Marie Curie.
#
# This file is part of Spot, a model checking library.
#

View file

@ -1,5 +1,6 @@
## -*- coding: utf-8 -*-
## Copyright (C) by the Spot authors, see the AUTHORS file for details.
## Copyright (C) 2013, 2016 Laboratoire de Recherche et Développement
## de l'Epita (LRDE).
##
## This file is part of Spot, a model checking library.
##

View file

@ -1,5 +1,9 @@
# -*- mode: shell-script; coding: utf-8 -*-
# Copyright (C) by the Spot authors, see the AUTHORS file for details.
# Copyright (C) 2012, 2013, 2016, 2018 Laboratoire de Recherche et
# Développement de l'Epita (LRDE).
# Copyright (C) 2005 Laboratoire d'Informatique de Paris 6 (LIP6),
# département Systèmes Répartis Coopératifs (SRC), Université Pierre
# et Marie Curie.
#
# This file is part of Spot, a model checking library.
#

View file

@ -1,6 +1,7 @@
#!/usr/bin/env python3
## -*- coding: utf-8 -*-
## Copyright (C) by the Spot authors, see the AUTHORS file for details.
## Copyright (C) 2013 Laboratoire de Recherche et Développement de
## l'Epita (LRDE).
##
## This file is part of Spot, a model checking library.
##

View file

@ -1,4 +1,5 @@
# Copyright (C) by the Spot authors, see the AUTHORS file for details.
# Copyright (C) 2009, 2010 Laboratoire de Recherche et Développement de
# l'EPITA (LRDE)
#
# This file is part of Spot, a model checking library.
#

View file

@ -1,4 +1,5 @@
# Copyright (C) by the Spot authors, see the AUTHORS file for details.
# Copyright (C) 2009, 2014 Laboratoire de Recherche et Développement
# de l'EPITA (LRDE)
#
# This file is part of Spot, a model checking library.
#

View file

@ -1,4 +1,5 @@
## Copyright (C) by the Spot authors, see the AUTHORS file for details.
## Copyright (C) 2013 Laboratoire de Recherche et Développement de
## l'Epita (LRDE).
##
## This file is part of Spot, a model checking library.
##

View file

@ -1,5 +1,6 @@
## -*- coding: utf-8 -*-
## Copyright (C) by the Spot authors, see the AUTHORS file for details.
## Copyright (C) 2014, 2015, 2017 Laboratoire de Recherche et Développement
## de l'Epita (LRDE).
##
## This file is part of Spot, a model checking library.
##

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2014, 2015, 2016, 2017 Laboratoire de Recherche et
// Développement de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -27,7 +28,7 @@
#include <spot/misc/timer.hh>
#include <argp.h>
static const char argp_program_doc[] = "";
const char argp_program_doc[] ="";
const struct argp_child children[] =
{

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2014, 2015, 2017 Laboratoire de Recherche et
// Développement de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//

View file

@ -1,4 +1,5 @@
# Copyright (C) by the Spot authors, see the AUTHORS file for details.
# Copyright (C) 2010 Laboratoire de Recherche et Développement de
# l'EPITA (LRDE)
#
# This file is part of Spot, a model checking library.
#

1
bin/.gitignore vendored
View file

@ -9,7 +9,6 @@ ltlcross
ltldo
ltlfilt
ltlgrind
ltlmix
ltlsynt
randaut
randltl

View file

@ -1,5 +1,6 @@
## -*- coding: utf-8 -*-
## Copyright (C) by the Spot authors, see the AUTHORS file for details.
## Copyright (C) 2012-2017 Laboratoire de Recherche et Développement
## de l'Epita (LRDE).
##
## This file is part of Spot, a model checking library.
##
@ -43,8 +44,6 @@ libcommon_a_SOURCES = \
common_finput.hh \
common_hoaread.cc \
common_hoaread.hh \
common_ioap.cc \
common_ioap.hh \
common_output.cc \
common_output.hh \
common_post.cc \
@ -71,7 +70,6 @@ bin_PROGRAMS = \
ltldo \
ltlfilt \
ltlgrind \
ltlmix \
ltlsynt \
randaut \
randltl
@ -95,7 +93,6 @@ ltl2tgta_SOURCES = ltl2tgta.cc
ltlcross_SOURCES = ltlcross.cc
ltlgrind_SOURCES = ltlgrind.cc
ltldo_SOURCES = ltldo.cc
ltlmix_SOURCES = ltlmix.cc
ltlsynt_SOURCES = ltlsynt.cc
dstar2tgba_SOURCES = dstar2tgba.cc
spot_x_SOURCES = spot-x.cc

View file

@ -12,7 +12,7 @@ whose purpose is just to generate a man-page with the same format as
the other man pages (this includes keeping the version number
up-to-date).
There is also a script called 'options.py' that summarizes how the
There is also a script called 'options.py' that summerizes how the
different short options are used among the tools.
Routines that are shared by multiple command-line tools are stored in

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2017-2020 Laboratoire de Recherche et Développement de
// l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -50,7 +51,7 @@
#include <spot/misc/escape.hh>
#include <spot/misc/timer.hh>
static const char argp_program_doc[] = "\
const char argp_program_doc[] ="\
Call several tools that process automata and cross-compare their output \
to detect bugs, or to gather statistics. The list of automata to use \
should be supplied on standard input, or using the -F option.\v\
@ -63,7 +64,6 @@ Exit status:\n\
enum {
OPT_BOGUS = 256,
OPT_COMPLEMENTED,
OPT_CSV,
OPT_HIGH,
OPT_FAIL_ON_TIMEOUT,
@ -94,8 +94,6 @@ static const argp_option options[] =
"consider timeouts as errors", 0 },
{ "language-preserved", OPT_LANG, nullptr, 0,
"expect that each tool preserves the input language", 0 },
{ "language-complemented", OPT_COMPLEMENTED, nullptr, 0,
"expect that each tool complements the input language", 0 },
{ "no-checks", OPT_NOCHECKS, nullptr, 0,
"do not perform any sanity checks", 0 },
/**************************************************/
@ -146,7 +144,6 @@ static bool fail_on_timeout = false;
static bool stop_on_error = false;
static bool no_checks = false;
static bool opt_language_preserved = false;
static bool opt_language_complemented = false;
static bool opt_omit = false;
static const char* csv_output = nullptr;
static unsigned round_num = 0;
@ -161,7 +158,7 @@ parse_opt(int key, char* arg, struct argp_state*)
switch (key)
{
case 'F':
jobs.emplace_back(arg, job_type::AUT_FILENAME);
jobs.emplace_back(arg, true);
break;
case 'q':
quiet = true;
@ -173,9 +170,6 @@ parse_opt(int key, char* arg, struct argp_state*)
bogus_output_filename = arg;
break;
}
case OPT_COMPLEMENTED:
opt_language_complemented = true;
break;
case OPT_CSV:
csv_output = arg ? arg : "-";
break;
@ -215,7 +209,7 @@ parse_opt(int key, char* arg, struct argp_state*)
break;
case ARGP_KEY_ARG:
if (arg[0] == '-' && !arg[1])
jobs.emplace_back(arg, job_type::AUT_FILENAME);
jobs.emplace_back(arg, true);
else
tools_push_autproc(arg);
break;
@ -344,6 +338,7 @@ struct in_statistics
struct out_statistics
{
// If OK is false, output statistics are not available.
bool ok;
const char* status_str;
@ -351,7 +346,7 @@ struct out_statistics
double time;
aut_statistics output;
out_statistics() noexcept
out_statistics()
: ok(false),
status_str(nullptr),
status_code(0),
@ -538,32 +533,25 @@ namespace
const spot::const_twa_graph_ptr& aut_j,
size_t i, size_t j)
{
auto is_really_comp = [lc = opt_language_complemented,
ts = tools.size()](unsigned i) {
return lc && i == ts;
};
if (aut_i->num_sets() + aut_j->num_sets() >
spot::acc_cond::mark_t::max_accsets())
{
if (!quiet)
std::cerr << "info: building " << autname(i, is_really_comp(i))
<< '*' << autname(j, !is_really_comp(j))
std::cerr << "info: building " << autname(i)
<< '*' << autname(j, true)
<< " requires more acceptance sets than supported\n";
return false;
}
if (verbose)
std::cerr << "info: check_empty "
<< autname(i, is_really_comp(i))
<< '*' << autname(j, !is_really_comp(j)) << '\n';
<< autname(i) << '*' << autname(j, true) << '\n';
auto w = aut_i->intersecting_word(aut_j);
if (w)
{
std::ostream& err = global_error();
err << "error: " << autname(i, is_really_comp(i))
<< '*' << autname(j, !is_really_comp(j))
err << "error: " << autname(i) << '*' << autname(j, true)
<< (" is nonempty; both automata accept the infinite word:\n"
" ");
example() << *w << '\n';
@ -612,7 +600,7 @@ namespace
return src.str();
}();
input_statistics.emplace_back(in_statistics());
input_statistics.push_back(in_statistics());
input_statistics[round_num].input_source = std::move(source);
if (auto name = input->get_named_prop<std::string>("automaton-name"))
@ -633,15 +621,12 @@ namespace
int problems = 0;
size_t m = tools.size();
size_t mi = m + opt_language_preserved + opt_language_complemented;
size_t mi = m + opt_language_preserved;
std::vector<spot::twa_graph_ptr> pos(mi);
std::vector<spot::twa_graph_ptr> neg(mi);
vector_tool_statistics stats(m);
// For --language-complemented, we store the input automata in
// pos and will compute its complement in neg. Before running
// checks we will swap both automata.
if (opt_language_preserved || opt_language_complemented)
if (opt_language_preserved)
pos[mi - 1] = input;
if (verbose)
@ -657,7 +642,7 @@ namespace
problems += prob;
}
spot::cleanup_tmpfiles();
output_statistics.emplace_back(std::move(stats));
output_statistics.push_back(std::move(stats));
if (verbose)
{
@ -733,9 +718,6 @@ namespace
};
}
if (opt_language_complemented)
std::swap(pos[mi - 1], neg[mi - 1]);
// Just make a circular implication check
// A0 <= A1, A1 <= A2, ..., AN <= A0
unsigned ok = 0;
@ -842,15 +824,10 @@ main(int argc, char** argv)
check_no_automaton();
if (s == 1 && !no_checks
&& !opt_language_preserved
&& !opt_language_complemented)
error(2, 0, "Since --language-preserved and --language-complemented "
"are not used, you need at least two tools to compare.");
if (s == 1 && !opt_language_preserved && !no_checks)
error(2, 0, "Since --language-preserved is not used, you need "
"at least two tools to compare.");
if (opt_language_preserved && opt_language_complemented)
error(2, 0, "Options --language-preserved and --language-complemented "
"are incompatible.");
setup_color();
setup_sig_handler();

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2013-2022 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -44,15 +45,12 @@
#include <spot/misc/timer.hh>
#include <spot/parseaut/public.hh>
#include <spot/tl/exclusive.hh>
#include <spot/tl/parse.hh>
#include <spot/twaalgos/alternation.hh>
#include <spot/twaalgos/are_isomorphic.hh>
#include <spot/twaalgos/canonicalize.hh>
#include <spot/twaalgos/cobuchi.hh>
#include <spot/twaalgos/cleanacc.hh>
#include <spot/twaalgos/complement.hh>
#include <spot/twaalgos/contains.hh>
#include <spot/twaalgos/deadends.hh>
#include <spot/twaalgos/degen.hh>
#include <spot/twaalgos/dtwasat.hh>
#include <spot/twaalgos/dualize.hh>
@ -64,8 +62,6 @@
#include <spot/twaalgos/isweakscc.hh>
#include <spot/twaalgos/langmap.hh>
#include <spot/twaalgos/mask.hh>
#include <spot/twaalgos/matchstates.hh>
#include <spot/twaalgos/mcs.hh>
#include <spot/twaalgos/product.hh>
#include <spot/twaalgos/randomize.hh>
#include <spot/twaalgos/remfin.hh>
@ -79,7 +75,7 @@
#include <spot/twaalgos/sum.hh>
#include <spot/twaalgos/totgba.hh>
static const char argp_program_doc[] = "\
static const char argp_program_doc[] ="\
Convert, transform, and filter omega-automata.\v\
Exit status:\n\
0 if some automata were output\n\
@ -106,7 +102,6 @@ enum {
OPT_DUALIZE,
OPT_DNF_ACC,
OPT_EDGES,
OPT_ENLARGE_ACCEPTANCE_SET,
OPT_EQUIVALENT_TO,
OPT_EXCLUSIVE_AP,
OPT_GENERALIZED_RABIN,
@ -138,14 +133,12 @@ enum {
OPT_KEEP_STATES,
OPT_KILL_STATES,
OPT_MASK_ACC,
OPT_MCS,
OPT_MERGE,
OPT_NONDET_STATES,
OPT_PARTIAL_DEGEN,
OPT_PRODUCT_AND,
OPT_PRODUCT_OR,
OPT_RANDOMIZE,
OPT_REDUCE_ACCEPTANCE_SET,
OPT_REJ_SCCS,
OPT_REJECT_WORD,
OPT_REM_AP,
@ -153,11 +146,9 @@ enum {
OPT_REM_UNREACH,
OPT_REM_UNUSED_AP,
OPT_REM_FIN,
OPT_RESTRICT_DEAD_ENDS,
OPT_SAT_MINIMIZE,
OPT_SCCS,
OPT_SEED,
OPT_SEPARATE_EDGES,
OPT_SEP_SETS,
OPT_SIMPL_ACC,
OPT_SIMPLIFY_EXCLUSIVE_AP,
@ -169,7 +160,6 @@ enum {
OPT_SUM_AND,
OPT_TERMINAL_SCCS,
OPT_TO_FINITE,
OPT_TRACK_FORMULA,
OPT_TRIV_SCCS,
OPT_USED_AP_N,
OPT_UNUSED_AP_N,
@ -240,7 +230,7 @@ static const argp_option options[] =
{ "is-alternating", OPT_IS_ALTERNATING, nullptr, 0,
"keep only automata using universal branching", 0 },
{ "intersect", OPT_INTERSECT, "FILENAME", 0,
"keep automata whose languages have a non-empty intersection with"
"keep automata whose languages have an non-empty intersection with"
" the automaton from FILENAME", 0 },
{ "included-in", OPT_INCLUDED_IN, "FILENAME", 0,
"keep automata whose languages are included in that of the "
@ -287,20 +277,11 @@ static const argp_option options[] =
{ "nth", 'N', "RANGE", 0,
"assuming input automata are numbered from 1, keep only those in RANGE",
0 },
{ "enlarge-acceptance-set", OPT_ENLARGE_ACCEPTANCE_SET, nullptr, 0,
"enlarge the number of accepting transitions (or states if -S) in a "
"Büchi automaton", 0 },
{ "reduce-acceptance-set", OPT_REDUCE_ACCEPTANCE_SET, nullptr, 0,
"reduce the number of accepting transitions (or states if -S) in a "
"Büchi automaton", 0 },
/**************************************************/
RANGE_DOC_FULL,
WORD_DOC,
/**************************************************/
{ nullptr, 0, nullptr, 0, "Transformations:", 7 },
{ "mcs-order", OPT_MCS, "any|scc", OPTION_ARG_OPTIONAL,
"reorder states using a maximum cardinality search; use option to"
" specify how to break ties", 0 },
{ "merge-transitions", OPT_MERGE, nullptr, 0,
"merge transitions with same destination and acceptance", 0 },
{ "product", OPT_PRODUCT_AND, "FILENAME", 0,
@ -383,18 +364,12 @@ static const argp_option options[] =
{ "remove-dead-states", OPT_REM_DEAD, nullptr, 0,
"remove states that are unreachable, or that cannot belong to an "
"infinite path", 0 },
{ "restrict-dead-end-edges", OPT_RESTRICT_DEAD_ENDS, nullptr, 0,
"restrict labels of dead-end edges, based on useful transitions of the "
"state they reach", 0 },
{ "simplify-acceptance", OPT_SIMPL_ACC, nullptr, 0,
"simplify the acceptance condition by merging identical acceptance sets "
"and by simplifying some terms containing complementary sets", 0 },
{ "split-edges", OPT_SPLIT_EDGES, nullptr, 0,
"split edges into transitions labeled by conjunctions of all atomic "
"propositions, so they can be read as letters", 0 },
{ "separate-edges", OPT_SEPARATE_EDGES, nullptr, 0,
"split edges into transitions labeled by a disjoint set of labels that"
" form a basis for the original automaton", 0 },
{ "sum", OPT_SUM_OR, "FILENAME", 0,
"build the sum with the automaton in FILENAME "
"to sum languages", 0 },
@ -421,7 +396,6 @@ static const argp_option options[] =
"Convert an automaton with \"alive\" and \"!alive\" propositions "
"into a Büchi automaton interpretable as a finite automaton. "
"States with a outgoing \"!alive\" edge are marked as accepting.", 0 },
/**************************************************/
{ nullptr, 0, nullptr, 0, "Decorations (for -d and -H1.1 output):", 9 },
{ "highlight-accepting-run", OPT_HIGHLIGHT_ACCEPTING_RUN, "NUM",
OPTION_ARG_OPTIONAL, "highlight one accepting run using color NUM", 0},
@ -435,12 +409,9 @@ static const argp_option options[] =
OPTION_ARG_OPTIONAL,
"highlight nondeterministic states and edges with color NUM", 0},
{ "highlight-word", OPT_HIGHLIGHT_WORD, "[NUM,]WORD", 0,
"highlight one run matching WORD using color NUM", 0 },
"highlight one run matching WORD using color NUM", 0},
{ "highlight-languages", OPT_HIGHLIGHT_LANGUAGES, nullptr, 0 ,
"highlight states that recognize identical languages", 0 },
{ "track-formula", OPT_TRACK_FORMULA, "FORMULA", 0,
"attempt to label the states of the automaton assuming the automaton "
"recognize FORMULA (use deterministic automata for precision)", 0 },
"highlight states that recognize identical languages", 0},
/**************************************************/
{ nullptr, 0, nullptr, 0,
"If any option among --small, --deterministic, or --any is given, "
@ -477,7 +448,7 @@ struct canon_aut
std::vector<tr_t> edges;
std::string acc;
explicit canon_aut(const spot::const_twa_graph_ptr& aut)
canon_aut(const spot::const_twa_graph_ptr& aut)
: num_states(aut->num_states())
, edges(aut->edge_vector().begin() + 1,
aut->edge_vector().end())
@ -527,21 +498,6 @@ static bool const aliases_types[] =
};
ARGMATCH_VERIFY(aliases_args, aliases_types);
spot::mcs_tie_break opt_mcs_tie = spot::MCS_TIE_ANY;
static char const* const mcs_args[] =
{
"any",
"scc",
nullptr,
};
static spot::mcs_tie_break const mcs_types[] =
{
spot::MCS_TIE_ANY,
spot::MCS_TIE_SCC,
};
ARGMATCH_VERIFY(mcs_args, mcs_types);
enum acc_type {
ACC_Any = 0,
ACC_Given,
@ -673,7 +629,6 @@ static struct opt_t
std::vector<std::pair<spot::twa_graph_ptr, unsigned>> hl_words;
}* opt;
static bool opt_mcs = false;
static bool opt_merge = false;
static bool opt_has_univ_branching = false;
static bool opt_has_exist_branching = false;
@ -737,19 +692,14 @@ static bool opt_rem_unreach = false;
static bool opt_rem_unused_ap = false;
static bool opt_sep_sets = false;
static bool opt_split_edges = false;
static bool opt_separate_edges = false;
static const char* opt_sat_minimize = nullptr;
static const char* opt_to_finite = nullptr;
static int opt_highlight_nondet_states = -1;
static int opt_highlight_nondet_edges = -1;
static int opt_highlight_accepting_run = -1;
static bool opt_highlight_languages = false;
static spot::formula opt_track_formula = nullptr;
static bool opt_dca = false;
static bool opt_streett_like = false;
static bool opt_enlarge_acceptance_set = false;
static bool opt_reduce_acceptance_set = false;
static bool opt_restrict_dead_ends = false;
static spot::twa_graph_ptr
ensure_deterministic(const spot::twa_graph_ptr& aut, bool nonalt = false)
@ -763,12 +713,10 @@ ensure_deterministic(const spot::twa_graph_ptr& aut, bool nonalt = false)
return p.run(aut);
}
static spot::twa_graph_ptr
ensure_tba(spot::twa_graph_ptr aut,
spot::postprocessor::output_type type = spot::postprocessor::Buchi)
static spot::twa_graph_ptr ensure_tba(spot::twa_graph_ptr aut)
{
spot::postprocessor p;
p.set_type(type);
p.set_type(spot::postprocessor::Buchi);
p.set_pref(spot::postprocessor::Any);
p.set_level(spot::postprocessor::Low);
return p.run(aut);
@ -778,14 +726,12 @@ ensure_tba(spot::twa_graph_ptr aut,
static spot::twa_graph_ptr
product(spot::twa_graph_ptr left, spot::twa_graph_ptr right)
{
// Are we likely to fail because of too many colors?
if ((left->num_sets() + right->num_sets() >
spot::acc_cond::mark_t::max_accsets())
&& (type == spot::postprocessor::Buchi
|| type == spot::postprocessor::CoBuchi))
if ((type == spot::postprocessor::Buchi)
&& (left->num_sets() + right->num_sets() >
spot::acc_cond::mark_t::max_accsets()))
{
left = ensure_tba(left, type);
right = ensure_tba(right, type);
left = ensure_tba(left);
right = ensure_tba(right);
}
return spot::product(left, right);
}
@ -793,34 +739,16 @@ product(spot::twa_graph_ptr left, spot::twa_graph_ptr right)
static spot::twa_graph_ptr
product_or(spot::twa_graph_ptr left, spot::twa_graph_ptr right)
{
// Are we likely to fail because of too many colors?
if ((left->num_sets() + right->num_sets() >
spot::acc_cond::mark_t::max_accsets())
&& (type == spot::postprocessor::Buchi
|| type == spot::postprocessor::CoBuchi))
if ((type == spot::postprocessor::Buchi)
&& (left->num_sets() + right->num_sets() >
spot::acc_cond::mark_t::max_accsets()))
{
left = ensure_tba(left, type);
right = ensure_tba(right, type);
left = ensure_tba(left);
right = ensure_tba(right);
}
return spot::product_or(left, right);
}
static spot::twa_graph_ptr
word_to_aut(const char* word, const char *argname)
{
try
{
return spot::parse_word(word, opt->dict)->as_automaton();
}
catch (const spot::parse_error& e)
{
error(2, 0, "failed to parse the argument of --%s:\n%s",
argname, e.what());
}
SPOT_UNREACHABLE();
return nullptr;
}
static int
parse_opt(int key, char* arg, struct argp_state*)
{
@ -833,7 +761,7 @@ parse_opt(int key, char* arg, struct argp_state*)
automaton_format = Count;
break;
case 'F':
jobs.emplace_back(arg, job_type::AUT_FILENAME);
jobs.emplace_back(arg, true);
break;
case 'n':
opt_max_count = to_pos_int(arg, "-n/--max-count");
@ -842,14 +770,17 @@ parse_opt(int key, char* arg, struct argp_state*)
opt_nth = parse_range(arg, 0, std::numeric_limits<int>::max());
break;
case 'u':
opt->uniq = std::make_unique<unique_aut_t>();
opt->uniq = std::unique_ptr<unique_aut_t>(new std::set<canon_aut>());
break;
case 'v':
opt_invert = true;
break;
case 'x':
if (const char* opt = extra_options.parse_options(arg))
error(2, 0, "failed to parse --options near '%s'", opt);
{
const char* opt = extra_options.parse_options(arg);
if (opt)
error(2, 0, "failed to parse --options near '%s'", opt);
}
break;
case OPT_ALIASES:
opt_aliases = XARGMATCH("--aliases", arg, aliases_args, aliases_types);
@ -865,7 +796,16 @@ parse_opt(int key, char* arg, struct argp_state*)
opt_art_sccs_set = true;
break;
case OPT_ACCEPT_WORD:
opt->acc_words.emplace_back(word_to_aut(arg, "accept-word"));
try
{
opt->acc_words.push_back(spot::parse_word(arg, opt->dict)
->as_automaton());
}
catch (const spot::parse_error& e)
{
error(2, 0, "failed to parse the argument of --accept-word:\n%s",
e.what());
}
break;
case OPT_ACCEPTANCE_IS:
{
@ -944,12 +884,12 @@ parse_opt(int key, char* arg, struct argp_state*)
opt_dnf_acc = true;
opt_cnf_acc = false;
break;
case OPT_STREETT_LIKE:
opt_streett_like = true;
break;
case OPT_EDGES:
opt_edges = parse_range(arg, 0, std::numeric_limits<int>::max());
break;
case OPT_ENLARGE_ACCEPTANCE_SET:
opt_enlarge_acceptance_set = true;
break;
case OPT_EXCLUSIVE_AP:
opt->excl_ap.add_group(arg);
break;
@ -1018,7 +958,16 @@ parse_opt(int key, char* arg, struct argp_state*)
"%d should be followed by a comma and WORD", res);
arg = endptr + 1;
}
opt->hl_words.emplace_back(word_to_aut(arg, "highlight-word"), res);
try
{
opt->hl_words.emplace_back(spot::parse_word(arg, opt->dict)
->as_automaton(), res);
}
catch (const spot::parse_error& e)
{
error(2, 0, "failed to parse the argument of --highlight-word:\n%s",
e.what());
}
}
break;
case OPT_HIGHLIGHT_LANGUAGES:
@ -1034,24 +983,12 @@ parse_opt(int key, char* arg, struct argp_state*)
break;
case OPT_INCLUDED_IN:
{
auto aut = read_automaton(arg, opt->dict);
if (spot::containment_select_version() == 0)
{
aut = spot::complement(aut);
if (!aut->is_existential())
aut = spot::remove_alternation(aut);
if (!opt->included_in)
opt->included_in = aut;
else
opt->included_in = ::product_or(opt->included_in, aut);
}
auto aut = ensure_deterministic(read_automaton(arg, opt->dict), true);
aut = spot::dualize(aut);
if (!opt->included_in)
opt->included_in = aut;
else
{
if (opt->included_in)
error(2, 0, "FORQ-based inclusion check only works "
"with one inclusion-test at a time");
opt->included_in = aut;
}
opt->included_in = spot::product_or(opt->included_in, aut);
}
break;
case OPT_INHERENTLY_WEAK_SCCS:
@ -1128,6 +1065,9 @@ parse_opt(int key, char* arg, struct argp_state*)
opt_rem_dead = true;
break;
}
case OPT_MERGE:
opt_merge = true;
break;
case OPT_MASK_ACC:
{
for (auto res : to_longs(arg))
@ -1143,14 +1083,6 @@ parse_opt(int key, char* arg, struct argp_state*)
}
break;
}
case OPT_MCS:
opt_mcs = true;
if (arg)
opt_mcs_tie = XARGMATCH("--mcs", arg, mcs_args, mcs_types);
break;
case OPT_MERGE:
opt_merge = true;
break;
case OPT_NONDET_STATES:
opt_nondet_states = parse_range(arg, 0, std::numeric_limits<int>::max());
opt_nondet_states_set = true;
@ -1214,15 +1146,21 @@ parse_opt(int key, char* arg, struct argp_state*)
randomize_st = true;
}
break;
case OPT_REDUCE_ACCEPTANCE_SET:
opt_reduce_acceptance_set = true;
break;
case OPT_REJ_SCCS:
opt_rej_sccs = parse_range(arg, 0, std::numeric_limits<int>::max());
opt_art_sccs_set = true;
break;
case OPT_REJECT_WORD:
opt->rej_words.emplace_back(word_to_aut(arg, "reject-word"));
try
{
opt->rej_words.push_back(spot::parse_word(arg, opt->dict)
->as_automaton());
}
catch (const spot::parse_error& e)
{
error(2, 0, "failed to parse the argument of --reject-word:\n%s",
e.what());
}
break;
case OPT_REM_AP:
opt->rem_ap.add_ap(arg);
@ -1239,9 +1177,6 @@ parse_opt(int key, char* arg, struct argp_state*)
case OPT_REM_UNUSED_AP:
opt_rem_unused_ap = true;
break;
case OPT_RESTRICT_DEAD_ENDS:
opt_restrict_dead_ends = true;
break;
case OPT_SAT_MINIMIZE:
opt_sat_minimize = arg ? arg : "";
break;
@ -1265,15 +1200,9 @@ parse_opt(int key, char* arg, struct argp_state*)
case OPT_SPLIT_EDGES:
opt_split_edges = true;
break;
case OPT_SEPARATE_EDGES:
opt_separate_edges = true;
break;
case OPT_STATES:
opt_states = parse_range(arg, 0, std::numeric_limits<int>::max());
break;
case OPT_STREETT_LIKE:
opt_streett_like = true;
break;
case OPT_STRIPACC:
opt_stripacc = true;
break;
@ -1305,9 +1234,6 @@ parse_opt(int key, char* arg, struct argp_state*)
case OPT_TO_FINITE:
opt_to_finite = arg ? arg : "alive";
break;
case OPT_TRACK_FORMULA:
opt_track_formula = spot::parse_formula(arg);
break;
case OPT_TRIV_SCCS:
opt_triv_sccs = parse_range(arg, 0, std::numeric_limits<int>::max());
opt_art_sccs_set = true;
@ -1326,7 +1252,7 @@ parse_opt(int key, char* arg, struct argp_state*)
opt_art_sccs_set = true;
break;
case ARGP_KEY_ARG:
jobs.emplace_back(arg, job_type::AUT_FILENAME);
jobs.emplace_back(arg, true);
break;
default:
@ -1359,7 +1285,7 @@ namespace
static
bool match_acceptance(spot::twa_graph_ptr aut)
{
const spot::acc_cond& acc = aut->acc();
auto& acc = aut->acc();
switch (opt_acceptance_is)
{
case ACC_Any:
@ -1414,7 +1340,8 @@ namespace
{
bool max;
bool odd;
if (!acc.is_parity(max, odd, true))
bool is_p = acc.is_parity(max, odd, true);
if (!is_p)
return false;
switch (opt_acceptance_is)
{
@ -1488,9 +1415,6 @@ namespace
else if (opt_clean_acc)
cleanup_acceptance_here(aut);
if (opt_restrict_dead_ends)
restrict_dead_end_edges_here(aut);
if (opt_sep_sets)
separate_sets_here(aut);
if (opt_complement_acc)
@ -1530,7 +1454,7 @@ namespace
if (matched && opt_acceptance_is)
matched = match_acceptance(aut);
if (matched && (opt_sccs_set || opt_art_sccs_set))
if (matched && (opt_sccs_set | opt_art_sccs_set))
{
spot::scc_info si(aut);
unsigned n = si.scc_count();
@ -1604,25 +1528,20 @@ namespace
if (opt->intersect)
matched &= aut->intersects(opt->intersect);
if (opt->included_in)
{
if (spot::containment_select_version() == 0)
matched &= !aut->intersects(opt->included_in);
else
matched &= spot::contains(opt->included_in, aut);
}
matched &= !aut->intersects(opt->included_in);
if (opt->equivalent_pos)
matched &= !aut->intersects(opt->equivalent_neg)
&& spot::contains(aut, opt->equivalent_pos);
if (matched && !opt->acc_words.empty())
for (const spot::twa_graph_ptr& word_aut: opt->acc_words)
for (auto& word_aut: opt->acc_words)
if (spot::product(aut, word_aut)->is_empty())
{
matched = false;
break;
}
if (matched && !opt->rej_words.empty())
for (const spot::twa_graph_ptr& word_aut: opt->rej_words)
for (auto& word_aut: opt->rej_words)
if (!spot::product(aut, word_aut)->is_empty())
{
matched = false;
@ -1735,18 +1654,8 @@ namespace
else if (opt_rem_unused_ap) // constrain(aut, true) already does that
aut->remove_unused_ap();
if (opt_enlarge_acceptance_set)
spot::enlarge_buchi_acceptance_set_here(aut, sbacc);
if (opt_reduce_acceptance_set)
spot::reduce_buchi_acceptance_set_here(aut, sbacc);
if (opt_split_edges)
aut = spot::split_edges(aut);
else if (opt_separate_edges)
aut = spot::separate_edges(aut);
if (opt_track_formula)
match_states_decorate(aut, opt_track_formula);
if (opt_to_finite)
aut = spot::to_finite(aut, opt_to_finite);
@ -1766,12 +1675,14 @@ namespace
aut->accepting_run()->highlight(opt_highlight_accepting_run);
if (!opt->hl_words.empty())
for (auto& [word_aut, color]: opt->hl_words)
if (auto run = spot::product(aut, word_aut)->accepting_run())
run->project(aut)->highlight(color);
if (opt_mcs)
spot::maximum_cardinality_search_reorder_here(aut, opt_mcs_tie);
for (auto& word_aut: opt->hl_words)
{
if (aut->acc().uses_fin_acceptance())
error(2, 0,
"--highlight-word does not yet work with Fin acceptance");
if (auto run = spot::product(aut, word_aut.first)->accepting_run())
run->project(aut)->highlight(word_aut.second);
}
timer.stop();
if (opt->uniq)
@ -1783,6 +1694,8 @@ namespace
return 0;
}
++match_count;
if (aliases)
{
if (opt_aliases)
@ -1791,9 +1704,7 @@ namespace
set_aliases(aut, {});
}
printer.print(aut, timer, nullptr, haut->filename.c_str(), -1,
match_count, haut, prefix, suffix);
++match_count;
haut, prefix, suffix);
if (opt_max_count >= 0 && match_count >= opt_max_count)
abort_run = true;
@ -1846,17 +1757,15 @@ main(int argc, char** argv)
post.set_level(level);
autfilt_processor processor(post, o.dict);
int err = processor.run();
if (processor.run())
return 2;
// Diagnose unused -x options
extra_options.report_unused_options();
if (automaton_format == Count)
std::cout << match_count << std::endl;
// Diagnose unused -x options
if (!err)
extra_options.report_unused_options();
else
return 2;
check_cout();
return match_count ? 0 : 1;
});

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012-2021 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -40,7 +41,7 @@
#include <spot/twaalgos/isdet.hh>
automaton_format_t automaton_format = Hoa;
const char* automaton_format_opt = nullptr;
static const char* automaton_format_opt = nullptr;
const char* opt_name = nullptr;
static const char* opt_output = nullptr;
static const char* stats = "";
@ -91,19 +92,6 @@ enum {
OPT_CHECK,
};
const char* hoa_option_doc_short = "1.1|b|i|k|l|m|s|t|v";
const char* hoa_option_doc_long =
"Output the automaton in HOA format (default). Add letters to select "
"(1.1) version 1.1 of the format, "
"(b) create an alias basis if >=2 AP are used, "
"(i) use implicit labels for complete deterministic automata, "
"(s) prefer state-based acceptance when possible [default], "
"(t) force transition-based acceptance, "
"(m) mix state and transition-based acceptance, "
"(k) use state labels when possible, "
"(l) single-line output, "
"(v) verbose properties";
static const argp_option options[] =
{
/**************************************************/
@ -142,8 +130,16 @@ static const argp_option options[] =
"(+INT) add INT to all set numbers, "
"(<INT) display at most INT states, "
"(#) show internal edge numbers", 0 },
{ "hoaf", 'H', hoa_option_doc_short, OPTION_ARG_OPTIONAL,
hoa_option_doc_long, 0 },
{ "hoaf", 'H', "1.1|i|k|l|m|s|t|v", OPTION_ARG_OPTIONAL,
"Output the automaton in HOA format (default). Add letters to select "
"(1.1) version 1.1 of the format, "
"(i) use implicit labels for complete deterministic automata, "
"(s) prefer state-based acceptance when possible [default], "
"(t) force transition-based acceptance, "
"(m) mix state and transition-based acceptance, "
"(k) use state labels when possible, "
"(l) single-line output, "
"(v) verbose properties", 0 },
{ "lbtt", OPT_LBTT, "t", OPTION_ARG_OPTIONAL,
"LBTT's format (add =t to force transition-based acceptance even"
" on Büchi automata)", 0 },
@ -202,25 +198,17 @@ static const argp_option io_options[] =
" minuscules for output):", 4 },
{ "%F", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, F_doc, 0 },
{ "%L", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, L_doc, 0 },
{ "%l", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"serial number of the output automaton (0-based)", 0 },
{ "%H, %h", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"the automaton in HOA format on a single line (use %[opt]H or %[opt]h "
"to specify additional options as in --hoa=opt)", 0 },
{ "%M, %m", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"name of the automaton", 0 },
{ "%S, %s, %[LETTER]S, %[LETTER]s",
0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"number of states (add one LETTER to select (r) reachable [default], "
"(u) unreachable, (a) all).", 0 },
{ "%E, %e, %[LETTER]E, %[LETTER]e",
0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"number of edges (add one LETTER to select (r) reachable [default], "
"(u) unreachable, (a) all).", 0 },
{ "%T, %t, %[LETTER]T, %[LETTER]t",
0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"number of transitions (add one LETTER to select (r) reachable "
"[default], (u) unreachable, (a) all).", 0 },
{ "%S, %s", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"number of reachable states", 0 },
{ "%E, %e", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"number of reachable edges", 0 },
{ "%T, %t", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"number of reachable transitions", 0 },
{ "%A, %a", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"number of acceptance sets", 0 },
{ "%G, %g, %[LETTERS]G, %[LETTERS]g", 0, nullptr,
@ -233,7 +221,7 @@ static const argp_option io_options[] =
"(iw) inherently weak. Use uppercase letters to negate them.", 0 },
{ "%R, %[LETTERS]R", 0, nullptr,
OPTION_DOC | OPTION_NO_USAGE,
"CPU time (excluding parsing), in seconds; add LETTERS to restrict to "
"CPU time (excluding parsing), in seconds; Add LETTERS to restrict to"
"(u) user time, (s) system time, (p) parent process, "
"or (c) children processes.", 0 },
{ "%N, %n", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
@ -275,22 +263,17 @@ static const argp_option o_options[] =
"the following interpreted sequences:", 4 },
{ "%F", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, F_doc, 0 },
{ "%L", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, L_doc, 0 },
{ "%l", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"serial number of the output automaton (0-based)", 0 },
{ "%h", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"the automaton in HOA format on a single line (use %[opt]h "
"to specify additional options as in --hoa=opt)", 0 },
{ "%m", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"name of the automaton", 0 },
{ "%s, %[LETTER]s", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"number of states (add one LETTER to select (r) reachable [default], "
"(u) unreachable, (a) all).", 0 },
{ "%e, %[LETTER]e", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"number of edges (add one LETTER to select (r) reachable [default], "
"(u) unreachable, (a) all).", 0 },
{ "%t, %[LETTER]t", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"number of transitions (add one LETTER to select (r) reachable "
"[default], (u) unreachable, (a) all).", 0 },
{ "%s", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"number of reachable states", 0 },
{ "%e", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"number of reachable edges", 0 },
{ "%t", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"number of reachable transitions", 0 },
{ "%a", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"number of acceptance sets", 0 },
{ "%g, %[LETTERS]g", 0, nullptr,
@ -302,7 +285,7 @@ static const argp_option o_options[] =
"(iw) inherently weak. Use uppercase letters to negate them.", 0 },
{ "%R, %[LETTERS]R", 0, nullptr,
OPTION_DOC | OPTION_NO_USAGE,
"CPU time (excluding parsing), in seconds; add LETTERS to restrict to"
"CPU time (excluding parsing), in seconds; Add LETTERS to restrict to"
"(u) user time, (s) system time, (p) parent process, "
"or (c) children processes.", 0 },
{ "%n", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
@ -450,7 +433,6 @@ hoa_stat_printer::hoa_stat_printer(std::ostream& os, const char* format,
if (input != ltl_input)
declare('f', &filename_); // Override the formula printer.
declare('h', &output_aut_);
declare('l', &index_);
declare('m', &aut_name_);
declare('u', &aut_univbranch_);
declare('w', &aut_word_);
@ -462,12 +444,11 @@ hoa_stat_printer::print(const spot::const_parsed_aut_ptr& haut,
const spot::const_twa_graph_ptr& aut,
spot::formula f,
const char* filename, int loc,
unsigned index,
const spot::process_timer& ptimer,
spot::process_timer& ptimer,
const char* csv_prefix, const char* csv_suffix)
{
timer_ = ptimer;
index_ = index;
filename_ = filename ? filename : "";
csv_prefix_ = csv_prefix ? csv_prefix : "";
csv_suffix_ = csv_suffix ? csv_suffix : "";
@ -491,15 +472,15 @@ hoa_stat_printer::print(const spot::const_parsed_aut_ptr& haut,
if (has('T'))
{
spot::twa_sub_statistics s = sub_stats_reachable(haut->aut);
haut_states_.set(s.states, haut->aut->num_states());
haut_edges_.set(s.edges, haut->aut->num_edges());
haut_trans_.set(s.transitions, count_all_transitions(haut->aut));
haut_states_ = s.states;
haut_edges_ = s.edges;
haut_trans_ = s.transitions;
}
else if (has('E') || has('S'))
{
spot::twa_statistics s = stats_reachable(haut->aut);
haut_states_.set(s.states, haut->aut->num_states());
haut_edges_.set(s.edges, haut->aut->num_edges());
haut_states_ = s.states;
haut_edges_ = s.edges;
}
if (has('M'))
{
@ -609,7 +590,6 @@ automaton_printer::print(const spot::twa_graph_ptr& aut,
// Input location for errors and statistics.
const char* filename,
int loc,
unsigned index,
// input automaton for statistics
const spot::const_parsed_aut_ptr& haut,
const char* csv_prefix,
@ -633,8 +613,7 @@ automaton_printer::print(const spot::twa_graph_ptr& aut,
if (opt_name)
{
name.str("");
namer.print(haut, aut, f, filename, loc, index,
ptimer, csv_prefix, csv_suffix);
namer.print(haut, aut, f, filename, loc, ptimer, csv_prefix, csv_suffix);
aut->set_named_prop("automaton-name", new std::string(name.str()));
}
@ -642,33 +621,13 @@ automaton_printer::print(const spot::twa_graph_ptr& aut,
if (opt_output)
{
outputname.str("");
outputnamer.print(haut, aut, f, filename, loc, index,
ptimer, csv_prefix, csv_suffix);
outputnamer.print(haut, aut, f, filename, loc, ptimer,
csv_prefix, csv_suffix);
std::string fname = outputname.str();
auto [it, b] = outputfiles.try_emplace(fname, nullptr);
if (b)
it->second.reset(new output_file(fname.c_str()));
else
// reopen if the file has been closed; see below
it->second->reopen_for_append(fname);
out = &it->second->ostream();
// If we have opened fewer than 10 files, we keep them all open
// to avoid wasting time on open/close calls.
//
// However we cannot keep all files open, especially in
// scenarios were we use thousands of files only once. To keep
// things simple, we only close the previous file if it is not
// the current output. This way we still save the close/open
// cost when consecutive automata are sent to the same file.
static output_file* previous = nullptr;
static const std::string* previous_name = nullptr;
if (previous
&& outputfiles.size() > 10
&& &previous->ostream() != out)
previous->close(*previous_name);
previous = it->second.get();
previous_name = &it->first;
auto p = outputfiles.emplace(fname, nullptr);
if (p.second)
p.first->second.reset(new output_file(fname.c_str()));
out = &p.first->second->ostream();
}
// Output it.
@ -692,8 +651,8 @@ automaton_printer::print(const spot::twa_graph_ptr& aut,
break;
case Stats:
statistics.set_output(*out);
statistics.print(haut, aut, f, filename, loc, index,
ptimer, csv_prefix, csv_suffix) << '\n';
statistics.print(haut, aut, f, filename, loc, ptimer,
csv_prefix, csv_suffix) << '\n';
break;
}
flush_cout();

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2014-2018, 2020 Laboratoire de Recherche et
// Développement de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -46,16 +47,11 @@ enum automaton_format_t {
// The format to use in output_automaton()
extern automaton_format_t automaton_format;
extern const char* automaton_format_opt;
// Set to the argument of --name, else nullptr.
extern const char* opt_name;
// Output options
extern const struct argp aoutput_argp;
// help text for --hoaf
extern const char* hoa_option_doc_short;
extern const char* hoa_option_doc_long;
// help text for %F and %L
extern char F_doc[32];
extern char L_doc[32];
@ -136,10 +132,10 @@ public:
void print(std::ostream& os, const char* pos) const override;
};
/// \brief prints various statistics about a TwA
/// \brief prints various statistics about a TGBA
///
/// This object can be configured to display various statistics
/// about a TwA. Some %-sequence of characters are interpreted in
/// about a TGBA. Some %-sequence of characters are interpreted in
/// the format string, and replaced by the corresponding statistics.
class hoa_stat_printer: protected spot::stat_printer
{
@ -156,12 +152,10 @@ public:
/// to be output.
std::ostream&
print(const spot::const_parsed_aut_ptr& haut,
const spot::const_twa_graph_ptr& aut,
spot::formula f,
const char* filename, int loc,
unsigned index,
const spot::process_timer& ptimer,
const char* csv_prefix, const char* csv_suffix);
const spot::const_twa_graph_ptr& aut,
spot::formula f,
const char* filename, int loc, spot::process_timer& ptimer,
const char* csv_prefix, const char* csv_suffix);
private:
spot::printable_value<const char*> filename_;
@ -170,11 +164,10 @@ private:
spot::printable_value<std::string> aut_name_;
spot::printable_value<std::string> aut_word_;
spot::printable_value<std::string> haut_word_;
spot::printable_value<unsigned> index_;
spot::printable_acc_cond haut_gen_acc_;
spot::printable_size haut_states_;
spot::printable_size haut_edges_;
spot::printable_long_size haut_trans_;
spot::printable_value<unsigned> haut_states_;
spot::printable_value<unsigned> haut_edges_;
spot::printable_value<unsigned long long> haut_trans_;
spot::printable_value<unsigned> haut_acc_;
printable_varset haut_ap_;
printable_varset aut_ap_;
@ -202,7 +195,7 @@ class automaton_printer
std::map<std::string, std::unique_ptr<output_file>> outputfiles;
public:
explicit automaton_printer(stat_style input = no_input);
automaton_printer(stat_style input = no_input);
~automaton_printer();
void
@ -212,8 +205,6 @@ public:
// Input location for errors and statistics.
const char* filename = nullptr,
int loc = -1,
// serial numbner
unsigned index = 0,
// Time and input automaton for statistics
const spot::const_parsed_aut_ptr& haut = nullptr,
const char* csv_prefix = nullptr,

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2017, 2019 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2017 Laboratoire de Recherche et Développement de
// l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2015, 2018 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -24,15 +25,10 @@ int
to_int(const char* s, const char* where)
{
char* endptr;
errno = 0;
long int lres = strtol(s, &endptr, 10);
int res = strtol(s, &endptr, 10);
if (*endptr)
error(2, 0, "failed to parse '%s' as an integer (in argument of %s).",
s, where);
int res = lres;
if (res != lres || errno == ERANGE)
error(2, 0, "value '%s' is too large for an int (in argument of %s).",
s, where);
return res;
}
@ -49,17 +45,11 @@ unsigned
to_unsigned (const char *s, const char* where)
{
char* endptr;
errno = 0;
unsigned long lres = strtoul(s, &endptr, 10);
unsigned res = strtoul(s, &endptr, 10);
if (*endptr)
error(2, 0,
"failed to parse '%s' as an unsigned integer (in argument of %s).",
s, where);
unsigned res = lres;
if (res != lres || errno == ERANGE)
error(2, 0,
"value '%s' is too large for a unsigned int (in argument of %s).",
s, where);
return res;
}
@ -67,9 +57,8 @@ float
to_float(const char* s, const char* where)
{
char* endptr;
errno = 0;
float res = strtof(s, &endptr);
if (*endptr || errno == ERANGE)
if (*endptr)
error(2, 0, "failed to parse '%s' as a float (in argument of %s)",
s, where);
return res;
@ -91,9 +80,8 @@ to_longs(const char* arg)
while (*arg)
{
char* endptr;
errno = 0;
long value = strtol(arg, &endptr, 10);
if (endptr == arg || errno)
if (endptr == arg)
error(2, 0, "failed to parse '%s' as an integer.", arg);
res.push_back(value);
while (*endptr == ' ' || *endptr == ',')

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2015, 2018 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -27,5 +28,5 @@ unsigned to_unsigned (const char *s, const char* where);
float to_float(const char* s, const char* where);
float to_probability(const char* s, const char* where);
// Parse the comma or space separated string of numbers.
// Parse the comma or space seperate string of numbers.
std::vector<long> to_longs(const char* s);

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012, 2016 Laboratoire de Recherche et Développement de
// l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012 Laboratoire de Recherche et Développement de
// l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2015, 2016 Laboratoire de Recherche et Développement de
// l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -20,50 +21,34 @@
#include <error.h>
#include <iostream>
output_file::output_file(const char* name, bool force_append)
output_file::output_file(const char* name)
{
std::ios_base::openmode mode = std::ios_base::trunc;
if (name[0] == '>' && name[1] == '>')
{
mode = std::ios_base::app;
append_ = true;
name += 2;
}
if (force_append)
append_ = true;
if (append_)
mode = std::ios_base::app;
if (name[0] == '-' && name[1] == 0)
{
os_ = &std::cout;
return;
}
of_ = std::make_unique<std::ofstream>(name, mode);
of_ = new std::ofstream(name, mode);
if (!*of_)
error(2, errno, "cannot open '%s'", name);
os_ = of_.get();
os_ = of_;
}
void
output_file::reopen_for_append(const std::string& name)
{
if (os_ == &std::cout || of_->is_open()) // nothing to do
return;
const char* cname = name.c_str();
if (cname[0] == '>' && cname[1] == '>')
cname += 2;
// the name cannot be "-" at this point, otherwise os_ would be
// equal to std::cout.
of_->open(cname, std::ios_base::app);
if (!*of_)
error(2, errno, "cannot reopen '%s'", cname);
}
void output_file::close(const std::string& name)
{
// We close of_, not os_, so that we never close std::cout.
if (os_)
os_->flush();
if (of_ && of_->is_open())
if (of_)
of_->close();
if (os_ && !*os_)
error(2, 0, "error writing to %s",

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2015, 2016 Laboratoire de Recherche et Développement de
// l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -20,23 +21,26 @@
#include "common_sys.hh"
#include <iosfwd>
#include <memory>
#include <fstream>
#include <error.h>
class output_file
{
std::ostream* os_;
std::unique_ptr<std::ofstream> of_;
std::ofstream* of_ = nullptr;
bool append_ = false;
public:
// Open a file for output. "-" is interpreted as stdout.
// Names that start with ">>" are opened for append.
// The function calls error() on... error.
output_file(const char* name, bool force_append = false);
output_file(const char* name);
void close(const std::string& name);
void reopen_for_append(const std::string& name);
~output_file()
{
delete of_;
}
bool append() const
{

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012-2017, 2019, 2021 Laboratoire de Recherche et
// Développement de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -67,10 +68,10 @@ parse_opt_finput(int key, char* arg, struct argp_state*)
switch (key)
{
case 'f':
jobs.emplace_back(arg, job_type::LTL_STRING);
jobs.emplace_back(arg, false);
break;
case 'F':
jobs.emplace_back(arg, job_type::LTL_FILENAME);
jobs.emplace_back(arg, true);
break;
case OPT_LBT:
lbt_input = true;
@ -95,6 +96,12 @@ parse_formula(const std::string& s)
(s, spot::default_environment::instance(), false, lenient);
}
job_processor::job_processor()
: abort_run(false), real_filename(nullptr),
col_to_read(0), prefix(nullptr), suffix(nullptr)
{
}
job_processor::~job_processor()
{
if (real_filename)
@ -296,22 +303,8 @@ job_processor::process_stream(std::istream& is,
}
int
job_processor::process_aut_file(const char*)
job_processor::process_file(const char* filename)
{
throw std::runtime_error("process_aut_file not defined for this tool");
}
int
job_processor::process_tlsf_file(const char*)
{
throw std::runtime_error("process_tlsf_file not defined for this tool");
}
int
job_processor::process_ltl_file(const char* filename)
{
col_to_read = 0;
// Special case for stdin.
if (filename[0] == '-' && filename[1] == 0)
return process_stream(std::cin, filename);
@ -363,40 +356,27 @@ int
job_processor::run()
{
int error = 0;
for (const auto& j: jobs)
for (auto& j: jobs)
{
switch (j.type)
{
case job_type::LTL_STRING:
error |= process_string(j.str);
break;
case job_type::LTL_FILENAME:
error |= process_ltl_file(j.str);
break;
case job_type::AUT_FILENAME:
error |= process_aut_file(j.str);
break;
case job_type::TLSF_FILENAME:
error |= process_tlsf_file(j.str);
break;
default:
throw std::runtime_error("unexpected job type");
}
if (!j.file_p)
error |= process_string(j.str);
else
error |= process_file(j.str);
if (abort_run)
break;
}
return error;
}
void check_no_formula(const char* action)
void check_no_formula()
{
if (!jobs.empty())
return;
if (isatty(STDIN_FILENO))
error(2, 0, "No formula to %s? Run '%s --help' for help.\n"
error(2, 0, "No formula to translate? Run '%s --help' for help.\n"
"Use '%s -' to force reading formulas from the standard "
"input.", action, program_name, program_name);
jobs.emplace_back("-", job_type::LTL_FILENAME);
"input.", program_name, program_name);
jobs.emplace_back("-", true);
}
void check_no_automaton()
@ -407,5 +387,5 @@ void check_no_automaton()
error(2, 0, "No automaton to process? Run '%s --help' for help.\n"
"Use '%s -' to force reading automata from the standard "
"input.", program_name, program_name);
jobs.emplace_back("-", job_type::AUT_FILENAME);
jobs.emplace_back("-", true);
}

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012-2017 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -24,18 +25,13 @@
#include <vector>
#include <spot/tl/parse.hh>
enum class job_type : char { LTL_STRING,
LTL_FILENAME,
AUT_FILENAME,
TLSF_FILENAME };
struct job
{
const char* str;
job_type type;
bool file_p; // true if str is a filename, false if it is a formula
job(const char* str, job_type type) noexcept
: str(str), type(type)
job(const char* str, bool file_p) noexcept
: str(str), file_p(file_p)
{
}
};
@ -55,11 +51,9 @@ spot::parsed_formula parse_formula(const std::string& s);
class job_processor
{
protected:
bool abort_run = false; // Set to true in process_formula() to abort run().
bool abort_run; // Set to true in process_formula() to abort run().
public:
job_processor() = default;
job_processor(const job_processor&) = delete;
job_processor& operator=(const job_processor&) = delete;
job_processor();
virtual ~job_processor();
@ -74,24 +68,18 @@ public:
process_stream(std::istream& is, const char* filename);
virtual int
process_ltl_file(const char* filename);
virtual int
process_aut_file(const char* filename);
virtual int
process_tlsf_file(const char* filename);
process_file(const char* filename);
virtual int
run();
char* real_filename = nullptr;
long int col_to_read = 0;
char* prefix = nullptr;
char* suffix = nullptr;
char* real_filename;
long int col_to_read;
char* prefix;
char* suffix;
};
// Report and error message or add a default job depending on whether
// the input is a tty.
void check_no_formula(const char* action = "translate");
void check_no_formula();
void check_no_automaton();

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2015, 2019 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2015, 2017, 2018 Laboratoire de Recherche et Développement de
// l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -79,10 +80,8 @@ public:
}
int
process_aut_file(const char* filename) override
process_file(const char* filename) override
{
col_to_read = 0;
// If we have a filename like "foo/NN" such
// that:
// ① foo/NN is not a file,

View file

@ -1,268 +0,0 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
//
// This file is part of Spot, a model checking library.
//
// Spot is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 3 of the License, or
// (at your option) any later version.
//
// Spot is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
// License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
#include "common_ioap.hh"
#include "error.h"
#include <fstream>
#include <unordered_set>
// --ins and --outs, as supplied on the command-line
std::optional<std::vector<std::string>> all_output_aps;
std::optional<std::vector<std::string>> all_input_aps;
// Store refirst, separate the filters that are regular expressions from
// the others. Compile the regular expressions while we are at it.
std::vector<std::regex> regex_in;
std::vector<std::regex> regex_out;
// map identifier to input/output (false=input, true=output)
std::unordered_map<std::string, bool> identifier_map;
static bool a_part_file_was_read = false;
static std::string
str_tolower(std::string s)
{
std::transform(s.begin(), s.end(), s.begin(),
[](unsigned char c){ return std::tolower(c); });
return s;
}
void
split_aps(const std::string& arg, std::vector<std::string>& where)
{
std::istringstream aps(arg);
std::string ap;
while (std::getline(aps, ap, ','))
{
ap.erase(remove_if(ap.begin(), ap.end(), isspace), ap.end());
where.push_back(str_tolower(ap));
}
}
void process_io_options()
{
// Filter identifiers from regexes.
if (all_input_aps.has_value())
for (const std::string& f: *all_input_aps)
{
unsigned sz = f.size();
if (f[0] == '/' && f[sz - 1] == '/')
regex_in.push_back(std::regex(f.substr(1, sz - 2)));
else
identifier_map.emplace(f, false);
}
if (all_output_aps.has_value())
for (const std::string& f: *all_output_aps)
{
unsigned sz = f.size();
if (f[0] == '/' && f[sz - 1] == '/')
regex_out.push_back(std::regex(f.substr(1, sz - 2)));
else if (auto [it, is_new] = identifier_map.try_emplace(f, true);
!is_new && !it->second)
error(2, 0,
a_part_file_was_read ?
"'%s' appears in both inputs and outputs" :
"'%s' appears in both --ins and --outs",
f.c_str());
}
}
static std::unordered_set<std::string>
list_aps_in_formula(spot::formula f)
{
std::unordered_set<std::string> aps;
f.traverse([&aps](spot::formula s) {
if (s.is(spot::op::ap))
aps.emplace(s.ap_name());
return false;
});
return aps;
}
bool
is_output(const std::string& a, const char* filename, int linenum)
{
if (auto it = identifier_map.find(a); it != identifier_map.end())
return it->second;
bool found_in = false;
for (const std::regex& r: regex_in)
if (std::regex_search(a, r))
{
found_in = true;
break;
}
bool found_out = false;
for (const std::regex& r: regex_out)
if (std::regex_search(a, r))
{
found_out = true;
break;
}
if (all_input_aps.has_value() == all_output_aps.has_value())
{
if (!all_input_aps.has_value())
{
// If the atomic proposition hasn't been classified
// because neither --ins nor --out were specified,
// attempt to classify automatically using the first
// letter.
int fl = a[0];
if (fl == 'i' || fl == 'I')
found_in = true;
else if (fl == 'o' || fl == 'O')
found_out = true;
}
if (found_in && found_out)
error_at_line(2, 0, filename, linenum,
a_part_file_was_read ?
"'%s' matches both inputs and outputs" :
"'%s' matches both --ins and --outs",
a.c_str());
if (!found_in && !found_out)
{
if (all_input_aps.has_value() || all_output_aps.has_value())
error_at_line(2, 0, filename, linenum,
a_part_file_was_read ?
"'%s' does not match any input or output" :
"one of --ins or --outs should match '%s'",
a.c_str());
else
error_at_line(2, 0, filename, linenum,
"since '%s' does not start with 'i' or 'o', "
"it is unclear if it is an input or "
"an output;\n use --ins, --outs, or --part-file",
a.c_str());
}
}
else
{
// if we had only --ins or only --outs, anything not
// matching that was given is assumed to belong to the
// other one.
if (!all_input_aps.has_value() && !found_out)
found_in = true;
else if (!all_output_aps.has_value() && !found_in)
found_out = true;
}
return found_out;
}
// Takes a set of the atomic propositions appearing in the formula,
// and separate them into two vectors: input APs and output APs.
std::pair<std::vector<std::string>, std::vector<std::string>>
filter_list_of_aps(spot::formula f, const char* filename, int linenum)
{
std::unordered_set<std::string> aps = list_aps_in_formula(f);
// now iterate over the list of atomic propositions to filter them
std::vector<std::string> matched[2]; // 0 = input, 1 = output
for (const std::string& a: aps)
matched[is_output(a, filename, linenum)].push_back(a);
return {matched[0], matched[1]};
}
spot::formula relabel_io(spot::formula f, spot::relabeling_map& fro,
const char* filename, int linenum)
{
auto [ins, outs] = filter_list_of_aps(f, filename, linenum);
// Different implementation of unordered_set, usinged in
// filter_list_of_aps can cause aps to be output in different order.
// Let's sort everything for the sake of determinism.
std::sort(ins.begin(), ins.end());
std::sort(outs.begin(), outs.end());
spot::relabeling_map to;
unsigned ni = 0;
for (std::string& i: ins)
{
std::ostringstream s;
s << 'i' << ni++;
spot::formula a1 = spot::formula::ap(i);
spot::formula a2 = spot::formula::ap(s.str());
fro[a2] = a1;
to[a1] = a2;
}
unsigned no = 0;
for (std::string& o: outs)
{
std::ostringstream s;
s << 'o' << no++;
spot::formula a1 = spot::formula::ap(o);
spot::formula a2 = spot::formula::ap(s.str());
fro[a2] = a1;
to[a1] = a2;
}
return spot::relabel_apply(f, &to);
}
// Read FILENAME as a ".part" file. It should
// contains lines of text of the following form:
//
// .inputs IN1 IN2 IN3...
// .outputs OUT1 OUT2 OUT3...
void read_part_file(const char* filename)
{
std::ifstream in(filename);
if (!in)
error(2, errno, "cannot open '%s'", filename);
// This parsing is inspired from Lily's parser for .part files. We
// read words one by one, and change the "mode" if we the word is
// ".inputs" or ".outputs". A '#' introduce a comment until the end
// of the line.
std::string word;
enum { Unknown, Input, Output } mode = Unknown;
while (in >> word)
{
// The benchmarks for Syft use ".inputs:" instead of ".inputs".
if (word == ".inputs" || word == ".inputs:")
{
mode = Input;
if (!all_input_aps.has_value())
all_input_aps.emplace();
}
// The benchmarks for Syft use ".outputs:" instead of ".outputs".
else if (word == ".outputs" || word == ".outputs:")
{
mode = Output;
if (!all_output_aps.has_value())
all_output_aps.emplace();
}
else if (word[0] == '#')
{
// Skip the rest of the line.
in.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
}
else if (mode == Unknown)
{
error_at_line(2, 0, filename, 0,
"expected '.inputs' or '.outputs' instead of '%s'",
word.c_str());
}
else if (mode == Input)
{
all_input_aps->push_back(str_tolower(word));
}
else /* mode == Output */
{
all_output_aps->push_back(str_tolower(word));
}
}
a_part_file_was_read = true;
}

View file

@ -1,69 +0,0 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
//
// This file is part of Spot, a model checking library.
//
// Spot is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 3 of the License, or
// (at your option) any later version.
//
// Spot is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
// License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
#pragma once
#include "common_sys.hh"
#include <optional>
#include <regex>
#include <string>
#include <vector>
#include <unordered_map>
#include <spot/tl/formula.hh>
#include <spot/tl/relabel.hh>
// --ins and --outs, as supplied on the command-line
extern std::optional<std::vector<std::string>> all_output_aps;
extern std::optional<std::vector<std::string>> all_input_aps;
// Comma-separated list of strings, such as those passed to --ins/--outs
void split_aps(const std::string& arg, std::vector<std::string>& where);
// process the all_output_aps and all_input_aps above to
// fill regex_in, regex_out, and identifier_map.
void process_io_options();
// Store refirst, separate the filters that are regular expressions from
// the others. Compile the regular expressions while we are at it.
extern std::vector<std::regex> regex_in;
extern std::vector<std::regex> regex_out;
// map identifier to input/output (false=input, true=output)
extern std::unordered_map<std::string, bool> identifier_map;
// Given an atomic proposition AP and the above
// regex_in/regex_out/identifier_map, decide if this AP is an output
// (true) or input (false.
bool
is_output(const std::string& ap,
const char* filename = nullptr, int linenum = 0);
// Separate the set of the atomic propositions appearing in f, into
// two vectors: input APs and output APs, becased on regex_in,
// regex_out, and identifier_map.
std::pair<std::vector<std::string>, std::vector<std::string>>
filter_list_of_aps(spot::formula f, const char* filename, int linenum);
// Relabel APs incrementally, based on i/o class.
spot::formula relabel_io(spot::formula f, spot::relabeling_map& fro,
const char* filename, int linenum);
// Read a .part file.
void read_part_file(const char* filename);

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012-2019 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -22,7 +23,6 @@
#include "common_setup.hh"
#include <iostream>
#include <sstream>
#include <memory>
#include <spot/tl/print.hh>
#include <spot/tl/length.hh>
#include <spot/tl/apcollect.hh>
@ -44,7 +44,6 @@ output_format_t output_format = spot_output;
bool full_parenth = false;
bool escape_csv = false;
char output_terminator = '\n';
bool output_ratexp = false;
static const argp_option options[] =
{
@ -105,10 +104,7 @@ stream_formula(std::ostream& out,
report_not_ltl(f, filename, linenum, "LBT");
break;
case spot_output:
if (output_ratexp)
spot::print_sere(out, f, full_parenth);
else
spot::print_psl(out, f, full_parenth);
spot::print_psl(out, f, full_parenth);
break;
case spin_output:
if (f.is_ltl_formula())
@ -123,16 +119,10 @@ stream_formula(std::ostream& out,
report_not_ltl(f, filename, linenum, "Wring");
break;
case utf8_output:
if (output_ratexp)
spot::print_utf8_sere(out, f, full_parenth);
else
spot::print_utf8_psl(out, f, full_parenth);
spot::print_utf8_psl(out, f, full_parenth);
break;
case latex_output:
if (output_ratexp)
spot::print_latex_sere(out, f, full_parenth);
else
spot::print_latex_psl(out, f, full_parenth);
spot::print_latex_psl(out, f, full_parenth);
break;
case count_output:
case quiet_output:
@ -169,7 +159,6 @@ namespace
spot::formula f;
const char* filename;
const char* line;
unsigned index;
const char* prefix;
const char* suffix;
};
@ -234,7 +223,7 @@ namespace
}
};
class formula_printer final: public spot::formater
class formula_printer final: protected spot::formater
{
public:
formula_printer(std::ostream& os, const char* format)
@ -247,7 +236,6 @@ namespace
declare('R', &timer_);
declare('r', &timer_);
declare('L', &line_);
declare('l', &index_);
declare('s', &size_);
declare('h', &class_);
declare('n', &nesting_);
@ -259,8 +247,7 @@ namespace
}
std::ostream&
print(const formula_with_location& fl,
spot::process_timer* ptimer)
print(const formula_with_location& fl, spot::process_timer* ptimer)
{
if (has('R') || has('r'))
timer_ = *ptimer;
@ -268,7 +255,6 @@ namespace
fl_ = &fl;
filename_ = fl.filename ? fl.filename : "";
line_ = fl.line;
index_ = fl.index;
prefix_ = fl.prefix ? fl.prefix : "";
suffix_ = fl.suffix ? fl.suffix : "";
auto f = fl_.val()->f;
@ -301,7 +287,6 @@ namespace
printable_timer timer_;
spot::printable_value<const char*> filename_;
spot::printable_value<const char*> line_;
spot::printable_value<unsigned> index_;
spot::printable_value<const char*> prefix_;
spot::printable_value<const char*> suffix_;
spot::printable_value<int> size_;
@ -312,9 +297,9 @@ namespace
};
}
static std::unique_ptr<formula_printer> format;
static formula_printer* format = nullptr;
static std::ostringstream outputname;
static std::unique_ptr<formula_printer> outputnamer;
static formula_printer* outputnamer = nullptr;
static std::map<std::string, std::unique_ptr<output_file>> outputfiles;
int
@ -335,7 +320,7 @@ parse_opt_output(int key, char* arg, struct argp_state*)
output_format = lbt_output;
break;
case 'o':
outputnamer = std::make_unique<formula_printer>(outputname, arg);
outputnamer = new formula_printer(outputname, arg);
break;
case 'p':
full_parenth = true;
@ -356,7 +341,8 @@ parse_opt_output(int key, char* arg, struct argp_state*)
output_format = wring_output;
break;
case OPT_FORMAT:
format = std::make_unique<formula_printer>(std::cout, arg);
delete format;
format = new formula_printer(std::cout, arg);
break;
default:
return ARGP_ERR_UNKNOWN;
@ -370,7 +356,6 @@ static void
output_formula(std::ostream& out,
spot::formula f, spot::process_timer* ptimer,
const char* filename, const char* linenum,
unsigned index,
const char* prefix, const char* suffix)
{
if (!format)
@ -406,9 +391,7 @@ output_formula(std::ostream& out,
}
else
{
formula_with_location fl = { f, filename, linenum,
index, prefix, suffix };
format->set_output(out);
formula_with_location fl = { f, filename, linenum, prefix, suffix };
format->print(fl, ptimer);
}
}
@ -416,7 +399,6 @@ output_formula(std::ostream& out,
void
output_formula_checked(spot::formula f, spot::process_timer* ptimer,
const char* filename, const char* linenum,
unsigned index,
const char* prefix, const char* suffix)
{
if (output_format == count_output)
@ -432,36 +414,15 @@ output_formula_checked(spot::formula f, spot::process_timer* ptimer,
if (outputnamer)
{
outputname.str("");
formula_with_location fl = { f, filename, linenum,
index, prefix, suffix };
formula_with_location fl = { f, filename, linenum, prefix, suffix };
outputnamer->print(fl, ptimer);
std::string fname = outputname.str();
auto [it, b] = outputfiles.try_emplace(fname, nullptr);
if (b)
it->second.reset(new output_file(fname.c_str()));
else
// reopen if the file has been closed; see below
it->second->reopen_for_append(fname);
out = &it->second->ostream();
// If we have opened fewer than 10 files, we keep them all open
// to avoid wasting time on open/close calls.
//
// However we cannot keep all files open, especially in
// scenarios were we use thousands of files only once. To keep
// things simple, we only close the previous file if it is not
// the current output. This way we still save the close/open
// cost when consecutive formulas are sent to the same file.
static output_file* previous = nullptr;
static const std::string* previous_name = nullptr;
if (previous
&& outputfiles.size() > 10
&& &previous->ostream() != out)
previous->close(*previous_name);
previous = it->second.get();
previous_name = &it->first;
auto p = outputfiles.emplace(fname, nullptr);
if (p.second)
p.first->second.reset(new output_file(fname.c_str()));
out = &p.first->second->ostream();
}
output_formula(*out, f, ptimer, filename, linenum, index, prefix, suffix);
output_formula(*out, f, ptimer, filename, linenum, prefix, suffix);
*out << output_terminator;
// Make sure we abort if we can't write to std::cout anymore
// (like disk full or broken pipe with SIGPIPE ignored).
@ -471,12 +432,10 @@ output_formula_checked(spot::formula f, spot::process_timer* ptimer,
void output_formula_checked(spot::formula f,
spot::process_timer* ptimer,
const char* filename, int linenum,
unsigned index,
const char* prefix,
const char* suffix)
{
output_formula_checked(f, ptimer, filename,
std::to_string(linenum).c_str(),
index,
prefix, suffix);
}

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012-2018 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -35,7 +36,6 @@ enum output_format_t { spot_output, spin_output, utf8_output,
extern output_format_t output_format;
extern bool full_parenth;
extern bool escape_csv;
extern bool output_ratexp;
#define COMMON_X_OUTPUT_SPECS(where) \
"number of atomic propositions " #where "; " \
@ -81,13 +81,11 @@ void output_formula_checked(spot::formula f,
spot::process_timer* ptimer = nullptr,
const char* filename = nullptr,
const char* linenum = nullptr,
unsigned output_index = 0U,
const char* prefix = nullptr,
const char* suffix = nullptr);
void output_formula_checked(spot::formula f,
spot::process_timer* ptimer,
const char* filename, int linenum,
unsigned output_index,
const char* prefix = nullptr,
const char* suffix = nullptr);

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012-2016, 2018-2020 Laboratoire de Recherche et
// Développement de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -231,14 +232,7 @@ parse_opt_post(int key, char* arg, struct argp_state*)
if (arg)
type = XARGMATCH(key == 'P' ? "--parity" : "--colored-parity",
arg, parity_args, parity_types);
else if (!(type & spot::postprocessor::Parity))
// If no argument was given, we just require Parity.
// However, if a Parity condition was already set before,
// don't overwrite it. This way if someone mistakenly write
// `--parity='max even' --colored` without realizing that
// `--colored` is just the abbreviation for
// `--colored-parity=...` with the default argument, we
// won't reset the 'max even' setting.
else
type = spot::postprocessor::Parity;
if (key == 'p')
colored = spot::postprocessor::Colored;

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012-2018 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012, 2013, 2014 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012, 2015 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012, 2014, 2016 Laboratoire de Recherche et
// Développement de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -35,17 +36,13 @@ parse_range(const char* str, int missing_left, int missing_right)
{
range res;
char* end;
errno = 0;
long lres = strtol(str, &end, 10);
res.min = lres;
if (res.min != lres || errno == ERANGE)
error(2, 0, "start of range '%s' is too large for an int.", str);
res.min = strtol(str, &end, 10);
if (end == str)
{
// No leading number. It's OK as long as the string is not
// empty.
if (!*end)
error(2, 0, "invalid empty range");
error(1, 0, "invalid empty range");
res.min = missing_left;
}
if (!*end)
@ -69,23 +66,19 @@ parse_range(const char* str, int missing_left, int missing_right)
{
// Parse the next integer.
char* end2;
errno = 0;
lres = strtol(end, &end2, 10);
res.max = lres;
if (res.max != lres || errno == ERANGE)
error(2, 0, "end of range '%s' is too large for an int.", str);
res.max = strtol(end, &end2, 10);
if (str == end2)
error(2, 0, "invalid range '%s' "
error(1, 0, "invalid range '%s' "
"(should start with digits, dots, or colon)", str);
if (end == end2)
error(2, 0, "invalid range '%s' (missing end?)", str);
error(1, 0, "invalid range '%s' (missing end?)", str);
if (*end2)
error(2, 0, "invalid range '%s' (trailing garbage?)", str);
error(1, 0, "invalid range '%s' (trailing garbage?)", str);
}
}
if (res.min < 0 || res.max < 0)
error(2, 0, "invalid range '%s': values must be positive", str);
error(1, 0, "invalid range '%s': values must be positive", str);
return res;
}

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012, 2014, 2015, 2016 Laboratoire de Recherche et
// Développement de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012-2022 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -19,14 +20,13 @@
#include "common_setup.hh"
#include "common_aoutput.hh"
#include <argp.h>
#include <closeout.h>
#include "argp.h"
#include "closeout.h"
#include <cstdlib>
#include <unistd.h>
#include <iostream>
#include <signal.h>
#include <sys/wait.h>
#include <error.h>
#include <spot/misc/tmpfile.hh>
static void
@ -35,7 +35,7 @@ display_version(FILE *stream, struct argp_state*)
fputs(program_name, stream);
fputs(" (" PACKAGE_NAME ") " PACKAGE_VERSION "\n\
\n\
Copyright (C) 2025 by the Spot authors, see the AUTHORS File for details.\n\
Copyright (C) 2022 Laboratoire de Recherche et Développement de l'Epita.\n\
License GPLv3+: \
GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.\n\
This is free software: you are free to change and redistribute it.\n\

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012, 2013, 2018, 2019 Laboratoire de Recherche et
// Développement de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -33,5 +34,5 @@ int protected_main(char** progname, std::function<int()> mainfun);
// Diagnose exceptions.
[[noreturn]] void handle_any_exception();
#define BEGIN_EXCEPTION_PROTECT try { (void)0
#define BEGIN_EXCEPTION_PROTECT try { (void)0;
#define END_EXCEPTION_PROTECT } catch (...) { handle_any_exception(); }

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012 Laboratoire de Recherche et Développement de
// l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2015-2022 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -26,7 +27,6 @@
#include <sys/wait.h>
#include <fcntl.h>
#include <iomanip>
#include <fstream>
#if __has_include(<spawn.h>)
#define HAVE_SPAWN_H 1
#include <spawn.h>
@ -52,7 +52,7 @@ struct shorthands_t
};
#define SHORTHAND(PRE, POST) { PRE, std::regex("^" PRE), POST }
static const shorthands_t shorthands_ltl[] = {
static shorthands_t shorthands_ltl[] = {
SHORTHAND("delag", " %f>%O"),
SHORTHAND("lbt", " <%L>%O"),
SHORTHAND("ltl2ba", " -f %s>%O"),
@ -72,7 +72,7 @@ static const shorthands_t shorthands_ltl[] = {
SHORTHAND("owl.* ltl-utilities\\b", " -f %f"),
};
static const shorthands_t shorthands_autproc[] = {
static shorthands_t shorthands_autproc[] = {
SHORTHAND("autfilt", " %H>%O"),
SHORTHAND("dra2dpa", " <%H>%O"),
SHORTHAND("dstar2tgba", " %H>%O"),
@ -84,7 +84,7 @@ static const shorthands_t shorthands_autproc[] = {
" <%H>%O"),
};
static void show_shorthands(const shorthands_t* begin, const shorthands_t* end)
static void show_shorthands(shorthands_t* begin, shorthands_t* end)
{
std::cout
<< ("If a COMMANDFMT does not use any %-sequence, and starts with one of\n"
@ -99,8 +99,7 @@ static void show_shorthands(const shorthands_t* begin, const shorthands_t* end)
}
tool_spec::tool_spec(const char* spec,
const shorthands_t* begin, const shorthands_t* end,
tool_spec::tool_spec(const char* spec, shorthands_t* begin, shorthands_t* end,
bool is_ref) noexcept
: spec(spec), cmd(spec), name(spec), reference(is_ref)
{
@ -113,15 +112,15 @@ tool_spec::tool_spec(const char* spec,
{
if (*pos == '{')
++count;
else if (*pos == '}' && --count == 0)
{
name = strndup(cmd + 1, pos - cmd - 1);
cmd = pos + 1;
// skip leading whitespace
while (*cmd == ' ' || *cmd == '\t')
++cmd;
break;
}
else if (*pos == '}')
if (!--count)
{
name = strndup(cmd + 1, pos - cmd - 1);
cmd = pos + 1;
while (*cmd == ' ' || *cmd == '\t')
++cmd;
break;
}
}
}
// If there is no % in the string, look for a known
@ -147,11 +146,11 @@ tool_spec::tool_spec(const char* spec,
auto& p = *begin++;
if (std::regex_search(basename, p.rprefix))
{
size_t m = strlen(p.suffix);
size_t q = strlen(cmd);
int m = strlen(p.suffix);
int q = strlen(cmd);
char* tmp = static_cast<char*>(malloc(q + m + 1));
memcpy(tmp, cmd, q);
memcpy(tmp + q, p.suffix, m + 1);
strcpy(tmp, cmd);
strcpy(tmp + q, p.suffix);
cmd = tmp;
allocated = true;
break;
@ -462,92 +461,6 @@ autproc_runner::round_automaton(spot::const_twa_graph_ptr aut, unsigned serial)
filename_automaton.new_round(aut, serial);
}
std::string
read_stdout_of_command(char* const* args)
{
#if HAVE_SPAWN_H
int cout_pipe[2];
if (int err = pipe(cout_pipe))
error(2, err, "pipe() failed");
posix_spawn_file_actions_t actions;
if (int err = posix_spawn_file_actions_init(&actions))
error(2, err, "posix_spawn_file_actions_init() failed");
posix_spawn_file_actions_addclose(&actions, STDIN_FILENO);
posix_spawn_file_actions_addclose(&actions, cout_pipe[0]);
posix_spawn_file_actions_adddup2(&actions, cout_pipe[1], STDOUT_FILENO);
posix_spawn_file_actions_addclose(&actions, cout_pipe[1]);
pid_t pid;
if (int err = posix_spawnp(&pid, args[0], &actions, nullptr, args, environ))
error(2, err, "failed to run '%s'", args[0]);
if (int err = posix_spawn_file_actions_destroy(&actions))
error(2, err, "posix_spawn_file_actions_destroy() failed");
if (close(cout_pipe[1]) < 0)
error(2, errno, "closing write-side of pipe failed");
std::string results;
ssize_t bytes_read;
for (;;)
{
static char buffer[512];
bytes_read = read(cout_pipe[0], buffer, sizeof(buffer));
if (bytes_read > 0)
results.insert(results.end(), buffer, buffer + bytes_read);
else
break;
}
if (bytes_read < 0)
error(2, bytes_read, "failed to read from pipe");
if (cout_pipe[0] < 0)
error(2, errno, "closing read-side of pipe failed");
int exit_code = 0;
if (waitpid(pid, &exit_code, 0) == -1)
error(2, errno, "waitpid() failed");
if (exit_code)
error(2, 0, "'%s' exited with status %d", args[0], exit_code);
return results;
#else
// We could provide a pipe+fork+exec alternative implementation, but
// systems without posix_spawn() might also not have fork and exec.
// For instance MinGW does not. So let's fallback to system+tmpfile
// instead for maximum portability.
char prefix[30];
snprintf(prefix, sizeof prefix, "spot-tmp");
spot::temporary_file* tmpfile = spot::create_tmpfile(prefix);
std::string tmpname = tmpfile->name();
std::ostringstream cmd;
for (auto t = args; *t != nullptr; ++t)
spot::quote_shell_string(cmd, *t) << ' ';
cmd << '>';
spot::quote_shell_string(cmd, tmpfile->name());
std::string cmdstr = cmd.str();
int exit_code = system(cmdstr.c_str());
if (exit_code < 0)
error(2, errno, "failed to execute %s", cmdstr.c_str());
if (exit_code > 0)
error(2, 0, "'%s' exited with status %d", args[0], exit_code);
std::ifstream ifs(tmpname, std::ifstream::in);
if (!ifs)
error(2, 0, "failed to open %s (output of %s)", tmpname.c_str(), args[0]);
ifs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
std::stringstream buffer;
buffer << ifs.rdbuf();
delete tmpfile;
return buffer.str();
#endif
}
std::atomic<bool> timed_out{false};
unsigned timeout_count = 0;
@ -611,7 +524,7 @@ get_arg(const char*& cmd)
{
const char* start = cmd;
std::string arg;
while (char c = *cmd)
while (int c = *cmd)
{
switch (c)
{
@ -641,14 +554,14 @@ get_arg(const char*& cmd)
goto end_loop;
case '\'':
{
char d = '\0';
int d = 0;
while ((d = *++cmd))
{
if (d == '\'')
break;
arg.push_back(d);
}
if (d == '\0')
if (d == 0)
return nullptr;
}
break;
@ -793,7 +706,6 @@ parse_simple_command(const char* cmd)
return res;
}
#ifndef HAVE_SPAWN_H
static void
exec_command(const char* cmd)
@ -839,6 +751,8 @@ exec_command(const char* cmd)
SPOT_UNREACHABLE();
return;
}
#else
extern char **environ;
#endif
int
@ -980,9 +894,9 @@ static const argp_option options[] =
"atomic proposition that compatible with Spin's syntax. You can "
"force this relabeling to always occur with option --relabel.\n"
"The sequences %f,%s,%l,%w,%F,%S,%L,%W can optionally be \"infixed\""
" by a bracketed sequence of operators to unabbreviate before outputting"
" by a bracketed sequence of operators to unabbreviate before outputing"
" the formula. For instance %[MW]f will rewrite operators M and W"
" before outputting it.\n"
" before outputing it.\n"
"Furthermore, if COMMANDFMT has the form \"{NAME}CMD\", then only CMD "
"will be passed to the shell, and NAME will be used to name the tool "
"in the output.", 4 },

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2015-2018, 2020 Laboratoire de Recherche et
// Développement de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -50,8 +51,7 @@ struct tool_spec
// Whether the tool is a reference.
bool reference;
tool_spec(const char* spec,
const shorthands_t* begin, const shorthands_t* end,
tool_spec(const char* spec, shorthands_t* begin, shorthands_t* end,
bool is_ref) noexcept;
tool_spec(const tool_spec& other) noexcept;
tool_spec& operator=(const tool_spec& other);
@ -71,7 +71,7 @@ struct quoted_formula final: public spot::printable_value<spot::formula>
struct filed_formula final: public spot::printable
{
explicit filed_formula(const quoted_formula& ltl) : f_(ltl)
filed_formula(const quoted_formula& ltl) : f_(ltl)
{
}
@ -89,7 +89,9 @@ struct filed_formula final: public spot::printable
struct filed_automaton final: public spot::printable
{
filed_automaton() = default;
filed_automaton()
{
}
void print(std::ostream& os, const char* pos) const override;
@ -110,7 +112,7 @@ struct printable_result_filename final:
unsigned translator_num;
printable_result_filename();
~printable_result_filename() override;
~printable_result_filename();
void reset(unsigned n);
void cleanup();
@ -124,7 +126,7 @@ protected:
spot::bdd_dict_ptr dict;
// Round-specific variables
quoted_formula ltl_formula;
filed_formula filename_formula{ltl_formula};
filed_formula filename_formula = ltl_formula;
// Run-specific variables
printable_result_filename output;
public:
@ -149,9 +151,9 @@ protected:
public:
using spot::formater::has;
explicit autproc_runner(// whether we accept the absence of output
// specifier
bool no_output_allowed = false);
autproc_runner(// whether we accept the absence of output
// specifier
bool no_output_allowed = false);
void round_automaton(spot::const_twa_graph_ptr aut, unsigned serial);
};
@ -173,9 +175,3 @@ int exec_with_timeout(const char* cmd);
#define exec_with_timeout(cmd) system(cmd)
#define setup_sig_handler() while (0);
#endif // !ENABLE_TIMEOUT
// Run a command (whose args[0], args[1], etc. are given by args), and
// return its captured stdout. Stderr is not captured. Will abort
// with an error message if the command is not found, or if it exit
// with a non-zero status code.
std::string read_stdout_of_command(char* const* args);

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2013-2019 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -47,7 +48,7 @@
#include <spot/parseaut/public.hh>
#include <spot/twaalgos/sccinfo.hh>
static const char argp_program_doc[] = "\
static const char argp_program_doc[] ="\
Convert automata with any acceptance condition into variants of \
Büchi automata.\n\nThis reads automata into any supported format \
(HOA, LBTT, ltl2dstar, never claim) and outputs a \
@ -88,7 +89,7 @@ parse_opt(int key, char* arg, struct argp_state*)
switch (key)
{
case 'F':
jobs.emplace_back(arg, job_type::AUT_FILENAME);
jobs.emplace_back(arg, true);
break;
case 'x':
{
@ -98,7 +99,7 @@ parse_opt(int key, char* arg, struct argp_state*)
}
break;
case ARGP_KEY_ARG:
jobs.emplace_back(arg, job_type::AUT_FILENAME);
jobs.emplace_back(arg, true);
break;
default:
return ARGP_ERR_UNKNOWN;
@ -116,7 +117,7 @@ namespace
spot::postprocessor& post;
automaton_printer printer;
explicit dstar_processor(spot::postprocessor& post)
dstar_processor(spot::postprocessor& post)
: hoa_processor(spot::make_bdd_dict()), post(post), printer(aut_input)
{
}
@ -128,9 +129,7 @@ namespace
timer.start();
auto aut = post.run(haut->aut, nullptr);
timer.stop();
static unsigned index = 0;
printer.print(aut, timer, nullptr, haut->filename.c_str(), -1,
index++, haut);
printer.print(aut, timer, nullptr, haut->filename.c_str(), -1, haut);
flush_cout();
return 0;
}

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2017-2019 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -42,8 +43,7 @@
using namespace spot;
static const char argp_program_doc[] =
"Generate ω-automata from predefined patterns.";
const char argp_program_doc[] ="Generate ω-automata from predefined patterns.";
static const argp_option options[] =
{
@ -61,18 +61,7 @@ static const argp_option options[] =
"equivalent deterministic Rabin automaton of less than N! states.", 0},
{ "m-nba", gen::AUT_M_NBA, "RANGE", 0,
"An NBA with N+1 states whose determinization needs at least "
"N! states.", 0},
{ "cyclist-trace-nba", gen::AUT_CYCLIST_TRACE_NBA, "RANGE", 0,
"An NBA with N+2 states that should include cyclist-proof-dba=B.", 0},
{ "cyclist-proof-dba", gen::AUT_CYCLIST_PROOF_DBA, "RANGE", 0,
"A DBA with N+2 states that should be included "
"in cyclist-trace-nba=B.", 0},
{ "cycle-log-nba", gen::AUT_CYCLE_LOG_NBA, "RANGE", 0,
"A cyclic NBA with N*N states and log(N) atomic propositions, that "
"should be simplifiable to a cyclic NBA with N states.", 0 },
{ "cycle-onehot-nba", gen::AUT_CYCLE_ONEHOT_NBA, "RANGE", 0,
"A cyclic NBA with N*N states and N atomic propositions, that "
"should be simplifiable to a cyclic NBA with N states.", 0 },
"N! states", 0},
RANGE_DOC,
/**************************************************/
{ nullptr, 0, nullptr, 0, "Miscellaneous options:", -1 },
@ -127,14 +116,13 @@ output_pattern(gen::aut_pattern_id pattern, int n)
twa_graph_ptr aut = spot::gen::aut_pattern(pattern, n);
timer.stop();
automaton_printer printer;
static unsigned serial = 0;
printer.print(aut, timer, nullptr, aut_pattern_name(pattern), n, serial++);
printer.print(aut, timer, nullptr, aut_pattern_name(pattern), n);
}
static void
run_jobs()
{
for (const auto& j: jobs)
for (auto& j: jobs)
{
int inc = (j.range.max < j.range.min) ? -1 : 1;
int n = j.range.min;

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012, 2013, 2015-2019 Laboratoire de Recherche et
// Développement de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -44,7 +45,7 @@
using namespace spot;
static const char argp_program_doc[] = "\
const char argp_program_doc[] ="\
Generate temporal logic formulas from predefined patterns.";
// We reuse the values from gen::ltl_pattern_id as option keys.
@ -83,8 +84,6 @@ static const argp_option options[] =
{ "eh-patterns", gen::LTL_EH_PATTERNS, "RANGE", OPTION_ARG_OPTIONAL,
"Etessami and Holzmann [Concur'00] patterns "
"(range should be included in 1..12)", 0 },
{ "eil-gsi", gen::LTL_EIL_GSI, "RANGE", 0,
"G[0..n]((a S b) -> c) rewritten using future operators", 0 },
{ "fxg-or", gen::LTL_FXG_OR, "RANGE", 0,
"F(p0 | XG(p1 | XG(p2 | ... XG(pn))))", 0},
{ "gf-equiv", gen::LTL_GF_EQUIV, "RANGE", 0,
@ -115,11 +114,6 @@ static const argp_option options[] =
{ "kv-psi", gen::LTL_KV_PSI, "RANGE", 0,
"quadratic formula with doubly exponential DBA", 0 },
OPT_ALIAS(kr-n2),
{ "lily-patterns", gen::LTL_LILY_PATTERNS, "RANGE", OPTION_ARG_OPTIONAL,
"LTL synthesis specification examples from Lily 1.0.2 "
"[Jobstmann & Bloem, FMCAD'06] "
"(range should be included in 1..23)", 0 },
OPT_ALIAS(jb-patterns),
{ "ms-example", gen::LTL_MS_EXAMPLE, "RANGE[,RANGE]", 0,
"GF(a1&X(a2&X(a3&...Xan)))&F(b1&F(b2&F(b3&...&Xbm)))", 0 },
{ "ms-phi-h", gen::LTL_MS_PHI_H, "RANGE", 0,
@ -140,11 +134,11 @@ static const argp_option options[] =
OPT_ALIAS(beem-patterns),
OPT_ALIAS(p),
{ "pps-arbiter-standard", gen::LTL_PPS_ARBITER_STANDARD, "RANGE", 0,
"Arbiter with n clients that sent requests (iN) and "
"receive grants (oN). Standard semantics.", 0 },
"Arbiter with n clients that sent requests (ri) and "
"receive grants (gi). Standard semantics.", 0 },
{ "pps-arbiter-strict", gen::LTL_PPS_ARBITER_STRICT, "RANGE", 0,
"Arbiter with n clients that sent requests (iN) and "
"receive grants (oN). Strict semantics.", 0 },
"Arbiter with n clients that sent requests (ri) and "
"receive grants (gi). Strict semantics.", 0 },
{ "r-left", gen::LTL_R_LEFT, "RANGE", 0, "(((p1 R p2) R p3) ... R pn)", 0 },
{ "r-right", gen::LTL_R_RIGHT, "RANGE", 0, "(p1 R (p2 R (... R pn)))", 0 },
{ "rv-counter", gen::LTL_RV_COUNTER, "RANGE", 0, "n-bit counter", 0 },
@ -196,8 +190,6 @@ static const argp_option options[] =
"the formula (in the selected syntax)", 0 },
{ "%F", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"the name of the pattern", 0 },
{ "%l", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"serial number of the output formula (0-based)", 0 },
{ "%L", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"the argument of the pattern", 0 },
{ "%%", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
@ -293,8 +285,6 @@ parse_opt(int key, char* arg, struct argp_state*)
}
static unsigned output_count = 0U;
static void
output_pattern(gen::ltl_pattern_id pattern, int n, int n2)
{
@ -311,21 +301,21 @@ output_pattern(gen::ltl_pattern_id pattern, int n, int n2)
if (opt_positive || !opt_negative)
{
output_formula_checked(f, nullptr, gen::ltl_pattern_name(pattern),
args.c_str(), output_count++);
args.c_str());
}
if (opt_negative)
{
std::string tmp = "!";
tmp += gen::ltl_pattern_name(pattern);
output_formula_checked(formula::Not(f), nullptr, tmp.c_str(),
args.c_str(), output_count++);
args.c_str());
}
}
static void
run_jobs()
{
for (const auto& j: jobs)
for (auto& j: jobs)
{
int inc = (j.range.max < j.range.min) ? -1 : 1;
int n = j.range.min;

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012-2019 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -38,7 +39,7 @@
#include <spot/misc/optionmap.hh>
#include <spot/misc/timer.hh>
static const char argp_program_doc[] = "\
static const char argp_program_doc[] ="\
Translate linear-time formulas (LTL/PSL) into various types of automata.\n\n\
By default it will apply all available optimizations to output \
the smallest Transition-based Generalized Büchi Automata, \
@ -104,9 +105,10 @@ parse_opt(int key, char* arg, struct argp_state*)
break;
case ARGP_KEY_ARG:
// FIXME: use stat() to distinguish filename from string?
jobs.emplace_back(arg, ((*arg == '-' && !arg[1])
? job_type::LTL_FILENAME
: job_type::LTL_STRING));
if (*arg == '-' && !arg[1])
jobs.emplace_back(arg, true);
else
jobs.emplace_back(arg, false);
break;
default:
@ -123,10 +125,10 @@ namespace
{
public:
spot::translator& trans;
automaton_printer printer{ltl_input};
automaton_printer printer;
explicit trans_processor(spot::translator& trans)
: trans(trans)
trans_processor(spot::translator& trans)
: trans(trans), printer(ltl_input)
{
}
@ -154,9 +156,8 @@ namespace
auto aut = trans.run(&f);
timer.stop();
static unsigned index = 0;
printer.print(aut, timer, f, filename, linenum, index++,
nullptr, prefix, suffix);
printer.print(aut, timer, f, filename, linenum, nullptr,
prefix, suffix);
// If we keep simplification caches around, atomic propositions
// will still be defined, and one translation may influence the
// variable order of the next one.

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012-2020 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -45,7 +46,7 @@
#include <spot/taalgos/minimize.hh>
#include <spot/misc/optionmap.hh>
static const char argp_program_doc[] = "\
const char argp_program_doc[] ="\
Translate linear-time formulas (LTL/PSL) into Testing Automata.\n\n\
By default it outputs a transition-based generalized Testing Automaton \
the smallest Transition-based Generalized Büchi Automata, \
@ -147,9 +148,10 @@ parse_opt(int key, char* arg, struct argp_state*)
break;
case ARGP_KEY_ARG:
// FIXME: use stat() to distinguish filename from string?
jobs.emplace_back(arg, ((*arg == '-' && !arg[1])
? job_type::LTL_FILENAME
: job_type::LTL_STRING));
if (*arg == '-' && !arg[1])
jobs.emplace_back(arg, true);
else
jobs.emplace_back(arg, false);
break;
default:
@ -167,7 +169,7 @@ namespace
public:
spot::translator& trans;
explicit trans_processor(spot::translator& trans)
trans_processor(spot::translator& trans)
: trans(trans)
{
}

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012-2020 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -68,7 +69,7 @@
#include <spot/misc/tmpfile.hh>
#include <spot/misc/timer.hh>
static const char argp_program_doc[] = "\
const char argp_program_doc[] ="\
Call several LTL/PSL translators and cross-compare their output to detect \
bugs, or to gather statistics. The list of formulas to use should be \
supplied on standard input, or using the -f or -F options.\v\
@ -263,32 +264,55 @@ end_error()
struct statistics
{
statistics()
: ok(false),
alternating(false),
status_str(nullptr),
status_code(0),
time(0),
states(0),
edges(0),
transitions(0),
acc(0),
scc(0),
nonacc_scc(0),
terminal_scc(0),
weak_scc(0),
strong_scc(0),
nondetstates(0),
nondeterministic(false),
terminal_aut(false),
weak_aut(false),
strong_aut(false)
{
}
// If OK is false, only the status_str, status_code, and time fields
// should be valid.
bool ok = false;
bool alternating = false;
const char* status_str = nullptr;
int status_code = 0;
double time = 0.0;
unsigned states = 0;
unsigned edges = 0;
unsigned long long transitions = 0;
unsigned acc = 0;
unsigned scc = 0;
unsigned nonacc_scc = 0;
unsigned terminal_scc = 0;
unsigned weak_scc = 0;
unsigned strong_scc = 0;
unsigned nondetstates = 0;
bool nondeterministic = false;
bool terminal_aut = false;
bool weak_aut = false;
bool strong_aut = false;
bool ok;
bool alternating;
const char* status_str;
int status_code;
double time;
unsigned states;
unsigned edges;
unsigned long long transitions;
unsigned acc;
unsigned scc;
unsigned nonacc_scc;
unsigned terminal_scc;
unsigned weak_scc;
unsigned strong_scc;
unsigned nondetstates;
bool nondeterministic;
bool terminal_aut;
bool weak_aut;
bool strong_aut;
std::vector<double> product_states;
std::vector<double> product_transitions;
std::vector<double> product_scc;
bool ambiguous = false;
bool complete = false;
bool ambiguous;
bool complete;
std::string hoa_str;
static void
@ -460,7 +484,7 @@ parse_opt(int key, char* arg, struct argp_state*)
break;
case ARGP_KEY_ARG:
if (arg[0] == '-' && !arg[1])
jobs.emplace_back(arg, job_type::LTL_FILENAME);
jobs.emplace_back(arg, true);
else
tools_push_trans(arg);
break;
@ -557,7 +581,7 @@ namespace
class xtranslator_runner final: public translator_runner
{
public:
explicit xtranslator_runner(spot::bdd_dict_ptr dict)
xtranslator_runner(spot::bdd_dict_ptr dict)
: translator_runner(dict)
{
}
@ -1071,14 +1095,17 @@ namespace
}
// Make sure we do not translate the same formula twice.
if (!allow_dups && !unique_set.insert(f).second)
if (!allow_dups)
{
if (!quiet)
std::cerr
<< ("warning: This formula or its negation has already"
" been checked.\n Use --allow-dups if it "
"should not be ignored.\n\n");
return 0;
if (!unique_set.insert(f).second)
{
if (!quiet)
std::cerr
<< ("warning: This formula or its negation has already"
" been checked.\n Use --allow-dups if it "
"should not be ignored.\n\n");
return 0;
}
}
int problems = 0;

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2015-2020 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -46,7 +47,7 @@
#include <spot/twaalgos/totgba.hh>
#include <spot/parseaut/public.hh>
static const char argp_program_doc[] = "\
const char argp_program_doc[] ="\
Run LTL/PSL formulas through another program, performing conversion\n\
of input and output as required.";
@ -192,7 +193,7 @@ parse_opt(int key, char* arg, struct argp_state*)
break;
case ARGP_KEY_ARG:
if (arg[0] == '-' && !arg[1])
jobs.emplace_back(arg, job_type::LTL_FILENAME);
jobs.emplace_back(arg, true);
else
tools_push_trans(arg);
break;
@ -208,7 +209,7 @@ namespace
class xtranslator_runner final: public translator_runner
{
public:
explicit xtranslator_runner(spot::bdd_dict_ptr dict)
xtranslator_runner(spot::bdd_dict_ptr dict)
: translator_runner(dict, true)
{
}
@ -223,6 +224,8 @@ namespace
format(command, tools[translator_num].cmd);
std::string cmd = command.str();
//std::cerr << "Running [" << l << translator_num << "]: "
// << cmd << std::endl;
timer.start();
int es = exec_with_timeout(cmd.c_str());
timer.stop();
@ -309,7 +312,7 @@ namespace
spot::printable_value<std::string> inputf;
public:
explicit processor(spot::postprocessor& post)
processor(spot::postprocessor& post)
: runner(dict), best_printer(best_stream, best_format), post(post)
{
printer.add_stat('T', &cmdname);
@ -320,7 +323,9 @@ namespace
best_printer.declare('f', &inputf);
}
~processor() override = default;
~processor()
{
}
int
process_string(const std::string& input,
@ -359,7 +364,8 @@ namespace
const char* csv_prefix, const char* csv_suffix)
{
static long int output_count = 0;
printer.print(aut, ptimer, f, filename, loc, output_count++, nullptr,
++output_count;
printer.print(aut, ptimer, f, filename, loc, nullptr,
csv_prefix, csv_suffix);
if (opt_max_count >= 0 && output_count >= opt_max_count)
abort_run = true;
@ -418,8 +424,8 @@ namespace
aut = post.run(aut, f);
if (best_type)
{
best_printer.print(nullptr, aut, f, filename, linenum, 0,
timer, prefix, suffix);
best_printer.print(nullptr, aut, f, filename, linenum, timer,
prefix, suffix);
std::string aut_stats = best_stream.str();
if (!best_aut ||
(strverscmp(best_stats.c_str(), aut_stats.c_str())

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012-2021 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -31,7 +32,6 @@
#include "common_output.hh"
#include "common_cout.hh"
#include "common_conv.hh"
#include "common_ioap.hh"
#include "common_r.hh"
#include "common_range.hh"
@ -39,7 +39,6 @@
#include <spot/misc/timer.hh>
#include <spot/tl/simplify.hh>
#include <spot/tl/sonf.hh>
#include <spot/tl/delta2.hh>
#include <spot/tl/length.hh>
#include <spot/tl/relabel.hh>
#include <spot/tl/unabbrev.hh>
@ -60,7 +59,7 @@
#include <spot/twaalgos/totgba.hh>
#include <spot/twaalgos/word.hh>
static const char argp_program_doc[] = "\
const char argp_program_doc[] ="\
Read a list of formulas and output them back after some optional processing.\v\
Exit status:\n\
0 if some formulas were output (skipped syntax errors do not count)\n\
@ -76,8 +75,6 @@ enum {
OPT_BSIZE_MAX,
OPT_BSIZE_MIN,
OPT_DEFINE,
OPT_DELTA1,
OPT_DELTA2,
OPT_DROP_ERRORS,
OPT_EQUIVALENT_TO,
OPT_EXCLUSIVE_AP,
@ -87,24 +84,19 @@ enum {
OPT_IGNORE_ERRORS,
OPT_IMPLIED_BY,
OPT_IMPLY,
OPT_INS,
OPT_LIVENESS,
OPT_LTL,
OPT_NEGATE,
OPT_NNF,
OPT_OBLIGATION,
OPT_PERSISTENCE,
OPT_PI2,
OPT_RECURRENCE,
OPT_REJECT_WORD,
OPT_RELABEL,
OPT_RELABEL_BOOL,
OPT_RELABEL_OVERLAP,
OPT_REMOVE_WM,
OPT_REMOVE_X,
OPT_SAFETY,
OPT_SAVE_PART_FILE,
OPT_SIGMA2,
OPT_SIZE,
OPT_SIZE_MAX,
OPT_SIZE_MIN,
@ -119,9 +111,6 @@ enum {
OPT_SYNTACTIC_RECURRENCE,
OPT_SYNTACTIC_SAFETY,
OPT_SYNTACTIC_SI,
OPT_TO_DELTA2,
OPT_OUTS,
OPT_PART_FILE,
OPT_UNABBREVIATE,
OPT_UNIVERSAL,
};
@ -146,16 +135,12 @@ static const argp_option options[] =
{ "sonf-aps", OPT_SONF_APS, "FILENAME", OPTION_ARG_OPTIONAL,
"when used with --sonf, output the newly introduced atomic "
"propositions", 0 },
{ "relabel", OPT_RELABEL, "abc|pnn|io", OPTION_ARG_OPTIONAL,
{ "relabel", OPT_RELABEL, "abc|pnn", OPTION_ARG_OPTIONAL,
"relabel all atomic propositions, alphabetically unless " \
"specified otherwise", 0 },
{ "relabel-bool", OPT_RELABEL_BOOL, "abc|pnn", OPTION_ARG_OPTIONAL,
"relabel Boolean subexpressions that do not share atomic propositions,"
" relabel alphabetically unless specified otherwise", 0 },
{ "relabel-overlapping-bool", OPT_RELABEL_OVERLAP, "abc|pnn",
OPTION_ARG_OPTIONAL,
"relabel Boolean subexpressions even if they share atomic propositions,"
" relabel alphabetically unless specified otherwise", 0 },
"relabel Boolean subexpressions, alphabetically unless " \
"specified otherwise", 0 },
{ "define", OPT_DEFINE, "FILENAME", OPTION_ARG_OPTIONAL,
"when used with --relabel or --relabel-bool, output the relabeling map "
"using #define statements", 0 },
@ -168,8 +153,6 @@ static const argp_option options[] =
{ "remove-x", OPT_REMOVE_X, nullptr, 0,
"remove X operators (valid only for stutter-insensitive properties)",
0 },
{ "to-delta2", OPT_TO_DELTA2, nullptr, 0,
"rewrite LTL formula in Δ₂-form", 0 },
{ "unabbreviate", OPT_UNABBREVIATE, "STR", OPTION_ARG_OPTIONAL,
"remove all occurrences of the operators specified by STR, which "
"must be a substring of \"eFGiMRW^\", where 'e', 'i', and '^' stand "
@ -183,20 +166,6 @@ static const argp_option options[] =
{ "from-ltlf", OPT_FROM_LTLF, "alive", OPTION_ARG_OPTIONAL,
"transform LTLf (finite LTL) to LTL by introducing some 'alive'"
" proposition", 0 },
{ "ins", OPT_INS, "PROPS", 0,
"comma-separated list of input atomic propositions to use with "
"--relabel=io or --save-part-file, interpreted as a regex if enclosed "
"in slashes", 0 },
{ "outs", OPT_OUTS, "PROPS", 0,
"comma-separated list of output atomic propositions to use with "
"--relabel=io or --save-part-file, interpreted as a regex if "
"enclosed in slashes", 0 },
{ "part-file", OPT_PART_FILE, "FILENAME", 0,
"file containing the partition of atomic propositions to use with "
"--relabel=io", 0 },
{ "save-part-file", OPT_SAVE_PART_FILE, "FILENAME", OPTION_ARG_OPTIONAL,
"file containing the partition of atomic propositions, "
"readable by --part-file", 0 },
DECLARE_OPT_R,
LEVEL_DOC(4),
/**************************************************/
@ -211,22 +180,15 @@ static const argp_option options[] =
{ "suspendable", OPT_SUSPENDABLE, nullptr, 0,
"synonym for --universal --eventual", 0 },
{ "syntactic-safety", OPT_SYNTACTIC_SAFETY, nullptr, 0,
"match syntactic-safety (a.k.a. Π₁) formulas", 0 },
{ "pi1", 0, nullptr, OPTION_ALIAS, nullptr, 0 },
"match syntactic-safety formulas", 0 },
{ "syntactic-guarantee", OPT_SYNTACTIC_GUARANTEE, nullptr, 0,
"match syntactic-guarantee (a.k.a. Σ₁) formulas", 0 },
{ "sigma1", 0, nullptr, OPTION_ALIAS, nullptr, 0 },
"match syntactic-guarantee formulas", 0 },
{ "syntactic-obligation", OPT_SYNTACTIC_OBLIGATION, nullptr, 0,
"match syntactic-obligation formulas", 0 },
{ "delta1", OPT_DELTA1, nullptr, 0,
"match Δ₁ formulas", 0 },
{ "syntactic-recurrence", OPT_SYNTACTIC_RECURRENCE, nullptr, 0,
"match syntactic-recurrence formulas", 0 },
{ "pi2", OPT_PI2, nullptr, 0, "match Π₂ formulas", 0 },
{ "syntactic-persistence", OPT_SYNTACTIC_PERSISTENCE, nullptr, 0,
"match syntactic-persistence formulas", 0 },
{ "sigma2", OPT_SIGMA2, nullptr, 0, "match Σ₂ formulas", 0 },
{ "delta2", OPT_DELTA2, nullptr, 0, "match Δ₂ formulas", 0 },
{ "syntactic-stutter-invariant", OPT_SYNTACTIC_SI, nullptr, 0,
"match stutter-invariant formulas syntactically (LTL-X or siPSL)", 0 },
{ "nox", 0, nullptr, OPTION_ALIAS, nullptr, 0 },
@ -292,8 +254,6 @@ static const argp_option options[] =
"the formula (in the selected syntax)", 0 },
{ "%F", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"the name of the input file", 0 },
{ "%l", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"the serial number of the output formula", 0 },
{ "%L", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"the original line number in the input file", 0 },
{ "%r", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
@ -346,10 +306,6 @@ static bool syntactic_guarantee = false;
static bool syntactic_obligation = false;
static bool syntactic_recurrence = false;
static bool syntactic_persistence = false;
static bool delta1 = false;
static bool delta2 = false;
static bool sigma2 = false;
static bool pi2 = false;
static bool syntactic_si = false;
static bool safety = false;
static bool guarantee = false;
@ -358,11 +314,7 @@ static bool recurrence = false;
static bool persistence = false;
static range size = { -1, -1 };
static range bsize = { -1, -1 };
enum relabeling_mode { NoRelabeling = 0,
ApRelabeling,
IOApRelabeling,
BseRelabeling,
OverlappingRelabeling };
enum relabeling_mode { NoRelabeling = 0, ApRelabeling, BseRelabeling };
static relabeling_mode relabeling = NoRelabeling;
static spot::relabeling_style style = spot::Abc;
static bool remove_x = false;
@ -373,7 +325,7 @@ static int opt_max_count = -1;
static long int match_count = 0;
static const char* from_ltlf = nullptr;
static const char* sonf = nullptr;
static bool to_delta2 = false;
// We want all these variables to be destroyed when we exit main, to
// make sure it happens before all other global variables (like the
@ -383,7 +335,6 @@ static struct opt_t
{
spot::bdd_dict_ptr dict = spot::make_bdd_dict();
spot::exclusive_ap excl_ap;
std::unique_ptr<output_file> output_part = nullptr;
std::unique_ptr<output_file> output_define = nullptr;
std::unique_ptr<output_file> output_sonf = nullptr;
spot::formula implied_by = nullptr;
@ -405,22 +356,6 @@ parse_formula_arg(const std::string& input)
return pf.f;
}
static void
parse_relabeling_style(const char* arg, const char* optname)
{
if (!arg || !strncasecmp(arg, "abc", 6))
style = spot::Abc;
else if (!strncasecmp(arg, "pnn", 4))
style = spot::Pnn;
else if (!*optname && !strncasecmp(arg, "io", 2))
relabeling = IOApRelabeling; // style is actually not supported
else
error(2, 0, "invalid argument for --relabel%s: '%s'\n"
"expecting %s", optname, arg,
*optname ? "'abc' or 'pnn'" : "'abc', 'pnn', or 'io'");
}
static int
parse_opt(int key, char* arg, struct argp_state*)
{
@ -452,7 +387,7 @@ parse_opt(int key, char* arg, struct argp_state*)
break;
case ARGP_KEY_ARG:
// FIXME: use stat() to distinguish filename from string?
jobs.emplace_back(arg, job_type::LTL_FILENAME);
jobs.emplace_back(arg, true);
break;
case OPT_ACCEPT_WORD:
try
@ -484,12 +419,6 @@ parse_opt(int key, char* arg, struct argp_state*)
case OPT_DEFINE:
opt->output_define.reset(new output_file(arg ? arg : "-"));
break;
case OPT_DELTA1:
delta1 = true;
break;
case OPT_DELTA2:
delta2 = true;
break;
case OPT_DROP_ERRORS:
error_style = drop_errors;
break;
@ -526,10 +455,6 @@ parse_opt(int key, char* arg, struct argp_state*)
opt->imply = spot::formula::And({opt->imply, i});
break;
}
case OPT_INS:
all_input_aps.emplace();
split_aps(arg, *all_input_aps);
break;
case OPT_LIVENESS:
liveness = true;
break;
@ -545,13 +470,6 @@ parse_opt(int key, char* arg, struct argp_state*)
case OPT_NNF:
nnf = true;
break;
case OPT_OUTS:
all_output_aps.emplace();
split_aps(arg, *all_output_aps);
break;
case OPT_PART_FILE:
read_part_file(arg);
break;
case OPT_SONF:
sonf = arg ? arg : "sonf_";
break;
@ -561,9 +479,6 @@ parse_opt(int key, char* arg, struct argp_state*)
case OPT_OBLIGATION:
obligation = true;
break;
case OPT_PI2:
pi2 = true;
break;
case OPT_PERSISTENCE:
persistence = true;
break;
@ -583,16 +498,16 @@ parse_opt(int key, char* arg, struct argp_state*)
}
break;
case OPT_RELABEL:
relabeling = ApRelabeling;
parse_relabeling_style(arg, "");
break;
case OPT_RELABEL_BOOL:
relabeling = BseRelabeling;
parse_relabeling_style(arg, "-bool");
break;
case OPT_RELABEL_OVERLAP:
relabeling = OverlappingRelabeling;
parse_relabeling_style(arg, "-overlapping-bool");
relabeling = (key == OPT_RELABEL_BOOL ? BseRelabeling : ApRelabeling);
if (!arg || !strncasecmp(arg, "abc", 6))
style = spot::Abc;
else if (!strncasecmp(arg, "pnn", 4))
style = spot::Pnn;
else
error(2, 0, "invalid argument for --relabel%s: '%s'",
(key == OPT_RELABEL_BOOL ? "-bool" : ""),
arg);
break;
case OPT_REMOVE_WM:
unabbreviate += "MW";
@ -603,9 +518,6 @@ parse_opt(int key, char* arg, struct argp_state*)
case OPT_SAFETY:
safety = obligation = true;
break;
case OPT_SAVE_PART_FILE:
opt->output_part.reset(new output_file(arg ? arg : "-"));
break;
case OPT_SIZE:
size = parse_range(arg, 0, std::numeric_limits<int>::max());
break;
@ -621,9 +533,6 @@ parse_opt(int key, char* arg, struct argp_state*)
case OPT_STUTTER_INSENSITIVE:
stutter_insensitive = true;
break;
case OPT_TO_DELTA2:
to_delta2 = true;
break;
case OPT_UNABBREVIATE:
if (arg)
unabbreviate += arg;
@ -633,9 +542,6 @@ parse_opt(int key, char* arg, struct argp_state*)
case OPT_AP_N:
ap_n = parse_range(arg, 0, std::numeric_limits<int>::max());
break;
case OPT_SIGMA2:
sigma2 = true;
break;
case OPT_SUSPENDABLE:
universal = true;
eventual = true;
@ -680,7 +586,7 @@ namespace
fset_t unique_set;
spot::relabeling_map relmap;
explicit ltl_processor(spot::tl_simplifier& simpl)
ltl_processor(spot::tl_simplifier& simpl)
: simpl(simpl)
{
}
@ -779,9 +685,6 @@ namespace
}
}
if (to_delta2)
f = spot::to_delta2(f);
switch (relabeling)
{
case ApRelabeling:
@ -790,24 +693,12 @@ namespace
f = spot::relabel(f, style, &relmap);
break;
}
case IOApRelabeling:
{
relmap.clear();
f = relabel_io(f, relmap, filename, linenum);
break;
}
case BseRelabeling:
{
relmap.clear();
f = spot::relabel_bse(f, style, &relmap);
break;
}
case OverlappingRelabeling:
{
relmap.clear();
f = spot::relabel_overlapping_bse(f, style, &relmap);
break;
}
case NoRelabeling:
break;
}
@ -826,16 +717,12 @@ namespace
matched &= !syntactic_safety || f.is_syntactic_safety();
matched &= !syntactic_guarantee || f.is_syntactic_guarantee();
matched &= !syntactic_obligation || f.is_syntactic_obligation();
matched &= !delta1 || f.is_delta1();
matched &= !syntactic_recurrence || f.is_syntactic_recurrence();
matched &= !pi2 || f.is_pi2();
matched &= !syntactic_persistence || f.is_syntactic_persistence();
matched &= !sigma2 || f.is_sigma2();
matched &= !delta2 || f.is_delta2();
matched &= !syntactic_si || f.is_syntactic_stutter_invariant();
if (matched && (ap_n.min > 0 || ap_n.max >= 0))
{
spot::atomic_prop_set* s = atomic_prop_collect(f);
auto s = atomic_prop_collect(f);
int n = s->size();
delete s;
matched &= (ap_n.min <= 0) || (n >= ap_n.min);
@ -874,7 +761,7 @@ namespace
aut = ltl_to_tgba_fm(f, simpl.get_dict(), true);
if (matched && !opt->acc_words.empty())
for (const spot::twa_graph_ptr& word_aut: opt->acc_words)
for (auto& word_aut: opt->acc_words)
if (spot::product(aut, word_aut)->is_empty())
{
matched = false;
@ -882,7 +769,7 @@ namespace
}
if (matched && !opt->rej_words.empty())
for (const spot::twa_graph_ptr& word_aut: opt->rej_words)
for (auto& word_aut: opt->rej_words)
if (!spot::product(aut, word_aut)->is_empty())
{
matched = false;
@ -928,7 +815,7 @@ namespace
{
spot::scc_info si(min);
matched &= !guarantee
|| is_terminal_automaton(min, &si);
|| is_terminal_automaton(min, &si, true);
matched &= !safety || is_safety_automaton(min, &si);
}
}
@ -956,51 +843,16 @@ namespace
{
// Sort the formulas alphabetically.
std::map<std::string, spot::formula> m;
for (const auto& [newformula, oldname]: relmap)
m.emplace(str_psl(newformula), oldname);
for (const auto& [newname, oldname]: m)
for (auto& p: relmap)
m.emplace(str_psl(p.first), p.second);
for (auto& p: m)
stream_formula(opt->output_define->ostream()
<< "#define " << newname << " (",
oldname, filename,
<< "#define " << p.first << " (",
p.second, filename,
std::to_string(linenum).c_str()) << ")\n";
}
if (opt->output_part
&& output_format != count_output
&& output_format != quiet_output)
{
std::vector<spot::formula> ins;
std::vector<spot::formula> outs;
spot::atomic_prop_set* s = atomic_prop_collect(f);
for (spot::formula ap: *s)
{
spot::formula apo = ap;
if (auto it = relmap.find(ap); it != relmap.end())
apo = it->second;
if (is_output(apo.ap_name(), filename, linenum))
outs.push_back(ap);
else
ins.push_back(ap);
}
delete s;
auto& os = opt->output_part->ostream();
if (!ins.empty())
{
os << ".inputs";
for (const auto& ap: ins)
os << ' ' << str_psl(ap);
os << '\n';
}
if (!outs.empty())
{
os << ".outputs";
for (const auto& ap: outs)
os << ' ' << str_psl(ap);
os << '\n';
}
}
one_match = true;
output_formula_checked(f, &timer, filename, linenum,
match_count, prefix, suffix);
output_formula_checked(f, &timer, filename, linenum, prefix, suffix);
++match_count;
}
return 0;
@ -1024,10 +876,7 @@ main(int argc, char** argv)
exit(err);
if (jobs.empty())
jobs.emplace_back("-", job_type::LTL_FILENAME);
if (relabeling == IOApRelabeling || opt->output_part)
process_io_options();
jobs.emplace_back("-", 1);
if (boolean_to_isop && simplification_level == 0)
simplification_level = 1;

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2014, 2015, 2016, 2017, 2018, 2019 Laboratoire de Recherche et
// Développement de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -86,8 +87,6 @@ static const argp_option options[] = {
"the formula (in the selected syntax)", 0 },
{ "%F", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"the name of the input file", 0 },
{ "%l", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"the serial number of the output formula (0-based)", 0 },
{ "%L", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"the original line number in the input file", 0 },
{ "%<", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
@ -113,8 +112,6 @@ static const argp_child children[] = {
namespace
{
static unsigned output_count = 0;
class mutate_processor final: public job_processor
{
public:
@ -125,8 +122,7 @@ namespace
auto mutations =
spot::mutate(f, mut_opts, max_output, mutation_nb, opt_sort);
for (auto g: mutations)
output_formula_checked(g, nullptr, filename, linenum,
output_count++, prefix, suffix);
output_formula_checked(g, nullptr, filename, linenum, prefix, suffix);
return 0;
}
};
@ -147,7 +143,7 @@ parse_opt(int key, char* arg, struct argp_state*)
break;
case ARGP_KEY_ARG:
// FIXME: use stat() to distinguish filename from string?
jobs.emplace_back(arg, job_type::LTL_FILENAME);
jobs.emplace_back(arg, true);
break;
case OPT_AP2CONST:
opt_all = 0;
@ -199,7 +195,7 @@ main(int argc, char* argv[])
mut_opts |= opt_all;
check_no_formula("mutate");
check_no_formula();
mutate_processor processor;
if (processor.run())

View file

@ -1,389 +0,0 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
//
// This file is part of Spot, a model checking library.
//
// Spot is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 3 of the License, or
// (at your option) any later version.
//
// Spot is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
// License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
#include "common_sys.hh"
#include <argp.h>
#include "error.h"
#include "common_setup.hh"
#include "common_finput.hh"
#include "common_output.hh"
#include "common_ioap.hh"
#include "common_conv.hh"
#include "common_cout.hh"
#include "common_range.hh"
#include <spot/tl/defaultenv.hh>
#include <spot/tl/randomltl.hh>
#include <spot/misc/random.hh>
enum {
OPT_BOOLEAN_PRIORITIES = 256,
OPT_DUMP_PRIORITIES,
OPT_DUPS,
OPT_INS,
OPT_LTL_PRIORITIES,
OPT_OUTS,
OPT_PART_FILE,
OPT_SEED,
OPT_TREE_SIZE,
};
static const char * argp_program_doc =
"Combine formulas taken randomly from an input set.\n\n\
The input set is specified using FILENAME, -F FILENAME, or -f formula.\n\
By default this generates a Boolean pattern of size 5, for instance\n\
\"(φ₁ & φ₂) | φ₃\", where each φᵢ is randomly taken from the input set.\n\
The size and nature of the pattern can be changed using generation\n\
parameters. Additionally, it is possible to rename the atomic propositions\n\
in each φ using -A or -P.\v\
Example:\n\
\n\
Generates 10 random Boolean combinations of terms of the form GFa, with\n\
'a' picked from a set of 5 atomic propositions:\n\
% ltlmix -f GFa -n10 -A5\n\
\n\
Build a single LTL formula over subformulas taken randomly from the list of\n\
55 patterns by Dwyer et al., using a choice of 10 atomic propositions to\n\
relabel subformulas:\n\
% genltl --dac | ltlmix -L -A10\n\
\n\
Build 5 random positive Boolean combination of GFa and GFb:\n"
// next line is in its own double-quote to please sanity.test
" % ltlmix -f GFa -f GFb --boolean-prio=not=0,xor=0,implies=0,equiv=0 -n5";
static const argp_option options[] = {
// Keep this alphabetically sorted (expect for aliases).
/**************************************************/
{ nullptr, 0, nullptr, 0, "Generation parameters:", 2 },
{ "allow-dups", OPT_DUPS, nullptr, 0,
"allow duplicate formulas to be output", 0 },
{ "ap-count", 'A', "N[,M]", 0,
"rename the atomic propositions in each selected formula by drawing "
"randomly from N atomic propositions (the rewriting is bijective "
"if N is larger than the original set). If M is specified, two sets "
"of atomic propositions are used to represent inputs and outputs, and "
"options --ins/--outs can be used to classify the original propositions.",
0 },
{ "boolean", 'B', nullptr, 0,
"generate Boolean combinations of formulas (default)", 0 },
{ "formulas", 'n', "INT", 0,
"number of formulas to generate (default: 1);\n"
"use a negative value for unbounded generation", 0 },
{ "ltl", 'L', nullptr, 0, "generate LTL combinations of subformulas", 0 },
{ "polarized-ap", 'P', "N[,M]", 0,
"similar to -A, but randomize the polarity of the new atomic "
"propositions", 0 },
{ "random-conjuncts", 'C', "N", 0,
"generate random-conjunctions of N conjuncts; "
"shorthand for --tree-size {2N-1} -B "
"--boolean-priorities=[disable everything but 'and']", 0 },
{ "seed", OPT_SEED, "INT", 0,
"seed for the random number generator (default: 0)", 0 },
{ "tree-size", OPT_TREE_SIZE, "RANGE", 0,
"tree size of main pattern generated (default: 5);\n"
"input formulas count as size 1.", 0 },
{ "ins", OPT_INS, "PROPS", 0,
"comma-separated list of atomic propositions to consider as input, "
"interpreted as a regex if enclosed in slashes", 0 },
{ "outs", OPT_OUTS, "PROPS", 0,
"comma-separated list of atomic propositions to consider as output, "
"interpreted as a regex if enclosed in slashes", 0 },
{ "part-file", OPT_PART_FILE, "FILENAME", 0,
"read the I/O partition of atomic propositions from FILENAME", 0 },
RANGE_DOC,
/**************************************************/
{ nullptr, 0, nullptr, 0, "Adjusting probabilities:", 4 },
{ "dump-priorities", OPT_DUMP_PRIORITIES, nullptr, 0,
"show current priorities, do not generate any formula", 0 },
{ "ltl-priorities", OPT_LTL_PRIORITIES, "STRING", 0,
"set priorities for LTL formulas", 0 },
{ "boolean-priorities", OPT_BOOLEAN_PRIORITIES, "STRING", 0,
"set priorities for Boolean formulas", 0 },
{ nullptr, 0, nullptr, 0, "STRING should be a comma-separated list of "
"assignments, assigning integer priorities to the tokens "
"listed by --dump-priorities.", 0 },
/**************************************************/
{ nullptr, 0, nullptr, 0, "Output options:", -20 },
{ nullptr, 0, nullptr, 0, "The FORMAT string passed to --format may use "
"the following interpreted sequences:", -19 },
{ "%f", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"the formula (in the selected syntax)", 0 },
{ "%l", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"the (serial) number of the formula (0-based)", 0 },
{ "%L", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"the (serial) number of the formula (1-based)", 0 },
{ "%%", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"a single %", 0 },
COMMON_LTL_OUTPUT_SPECS,
/**************************************************/
{ nullptr, 0, nullptr, 0, "Miscellaneous options:", -1 },
{ nullptr, 0, nullptr, 0, nullptr, 0 }
};
static const argp_child children[] = {
{ &finput_argp, 0, nullptr, 0 },
{ &output_argp, 0, nullptr, 0 },
{ &misc_argp, 0, nullptr, 0 },
{ nullptr, 0, nullptr, 0 }
};
static int opt_formulas = 1;
static spot::randltlgenerator::output_type output =
spot::randltlgenerator::Bool;
static char* opt_pL = nullptr;
static char* opt_pB = nullptr;
static char random_conj[] = "not=0,implies=0,equiv=0,xor=0,or=0";
static bool opt_dump_priorities = false;
static int opt_seed = 0;
static range opt_tree_size = { 5, 5 };
static bool opt_unique = true;
static int opt_ap_count = 0;
static int opt_out_ap_count = 0;
static bool opt_literal = false;
static bool opt_io = false;
namespace
{
// We want all these variables to be destroyed when we exit main, to
// make sure it happens before all other global variables (like the
// atomic propositions maps) are destroyed. Otherwise we risk
// accessing deleted stuff.
static struct opt_t
{
spot::atomic_prop_set sub;
}* opt = nullptr;
class sub_processor final: public job_processor
{
public:
int
process_formula(spot::formula f, const char* filename = nullptr,
int linenum = 0) override
{
if (opt_io)
// Filter the atomic propositions of each formula in order to
// report those that are not classifiable. Throw the result
// of that filtering away, as we only care about the potential
// diagnostics.
(void) filter_list_of_aps(f, filename, linenum);
opt->sub.insert(f);
return 0;
}
};
}
static sub_processor subreader;
std::pair<int, int> to_int_pair(const char* arg, const char* opt)
{
const char* comma = strchr(arg, ',');
if (!comma)
{
int res = to_int(arg, opt);
return {res, 0};
}
else
{
std::string arg1(arg, comma);
int res1 = to_int(arg1.c_str(), opt);
int res2 = to_int(comma + 1, opt);
return {res1, res2};
}
}
static int
parse_opt(int key, char* arg, struct argp_state*)
{
// Called from C code, so should not raise any exception.
BEGIN_EXCEPTION_PROTECT;
switch (key)
{
case 'A':
std::tie(opt_ap_count, opt_out_ap_count) =
to_int_pair(arg, "-A/--ap-count");
opt_literal = false;
break;
case 'B':
output = spot::randltlgenerator::Bool;
break;
case 'C':
{
int s = 2 * to_int(arg, "-C/--random-conjuncs") - 1;
opt_tree_size = {s, s};
output = spot::randltlgenerator::Bool;
opt_pB = random_conj;
break;
}
case 'L':
output = spot::randltlgenerator::LTL;
break;
case 'n':
opt_formulas = to_int(arg, "-n/--formulas");
break;
case 'P':
std::tie(opt_ap_count, opt_out_ap_count) =
to_int_pair(arg, "-P/--polarized-ap");
opt_literal = true;
break;
case OPT_BOOLEAN_PRIORITIES:
opt_pB = arg;
break;
case OPT_LTL_PRIORITIES:
opt_pL = arg;
break;
case OPT_DUMP_PRIORITIES:
opt_dump_priorities = true;
break;
case OPT_DUPS:
opt_unique = false;
break;
case OPT_INS:
all_input_aps.emplace();
split_aps(arg, *all_input_aps);
opt_io = true;
break;
case OPT_OUTS:
all_output_aps.emplace();
split_aps(arg, *all_output_aps);
opt_io = true;
break;
case OPT_PART_FILE:
read_part_file(arg);
opt_io = true;
break;
case OPT_SEED:
opt_seed = to_int(arg, "--seed");
break;
case OPT_TREE_SIZE:
opt_tree_size = parse_range(arg);
if (opt_tree_size.min > opt_tree_size.max)
std::swap(opt_tree_size.min, opt_tree_size.max);
break;
case ARGP_KEY_ARG:
jobs.emplace_back(arg, job_type::LTL_FILENAME);
break;
default:
return ARGP_ERR_UNKNOWN;
}
END_EXCEPTION_PROTECT;
return 0;
}
int
main(int argc, char* argv[])
{
return protected_main(argv, [&] {
const argp ap = { options, parse_opt, "[FILENAME[/COL]...]",
argp_program_doc, children, nullptr, nullptr };
// This will ensure that all objects stored in this struct are
// destroyed before global variables.
opt_t o;
opt = &o;
if (int err = argp_parse(&ap, argc, argv, ARGP_NO_HELP, nullptr, nullptr))
exit(err);
check_no_formula("combine");
if (opt_io && !opt_out_ap_count)
error(2, 0,
"options --ins, --outs, --part-file only make sense when the "
"two-argument version of '-A N,M' or '-P N,M' is used.");
if (opt_out_ap_count > 0)
// Do not require --ins/--outs to be used, as the input
// pattern may use atomic propositions starting with i/o
// already. Setting opt_io will cause the subreader to
// complain about unclassifible atomic propositions.
opt_io = true;
if (opt_io)
process_io_options();
if (subreader.run())
return 2;
if (opt->sub.empty())
error(2, 0, "the set of subformulas to build from is empty");
spot::srand(opt_seed);
std::function<bool(spot::formula)> output_p = nullptr;
if (opt_out_ap_count)
output_p = [&](spot::formula f) { return is_output(f.ap_name()); };
spot::randltlgenerator rg
(opt_ap_count,
[&] (){
spot::option_map opts;
opts.set("output", output);
opts.set("out_ap_size", opt_out_ap_count);
opts.set("tree_size_min", opt_tree_size.min);
opts.set("tree_size_max", opt_tree_size.max);
opts.set("seed", opt_seed);
opts.set("simplification_level", 0);
opts.set("unique", opt_unique);
opts.set("literals", opt_literal);
return opts;
}(), opt_pL, nullptr, opt_pB, &opt->sub, output_p);
if (opt_dump_priorities)
{
switch (output)
{
case spot::randltlgenerator::LTL:
std::cout <<
"Use --ltl-priorities to set the following LTL priorities:\n";
rg.dump_ltl_priorities(std::cout);
break;
case spot::randltlgenerator::Bool:
std::cout <<
"Use --boolean-priorities to set the following Boolean "
"formula priorities:\n";
rg.dump_bool_priorities(std::cout);
break;
case spot::randltlgenerator::PSL:
case spot::randltlgenerator::SERE:
error(2, 0, "PSL/SERE output is unsupported");
break;
}
exit(0);
}
int count = 0;
while (opt_formulas < 0 || opt_formulas--)
{
spot::formula f = rg.next();
if (!f)
{
error(2, 0, "failed to generate a new unique formula after %d " \
"trials", spot::randltlgenerator::MAX_TRIALS);
}
else
{
output_formula_checked(f, nullptr, nullptr, count + 1, count);
++count;
}
};
flush_cout();
return 0;
});
}

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,6 @@
## -*- coding: utf-8 -*-
## Copyright (C) by the Spot authors, see the AUTHORS file for details.
## Copyright (C) 2012-2017 Laboratoire de Recherche et Développement
## de l'Epita (LRDE).
##
## This file is part of Spot, a model checking library.
##
@ -35,7 +36,6 @@ dist_man1_MANS = \
ltldo.1 \
ltlfilt.1 \
ltlgrind.1 \
ltlmix.1 \
ltlsynt.1 \
randaut.1 \
randltl.1
@ -73,9 +73,6 @@ ltlfilt.1: $(common_dep) $(srcdir)/ltlfilt.x $(srcdir)/../ltlfilt.cc
ltlgrind.1: $(common_dep) $(srcdir)/ltlgrind.x $(srcdir)/../ltlgrind.cc
$(convman) ../ltlgrind$(EXEEXT) $(srcdir)/ltlgrind.x $@
ltlmix.1: $(common_dep) $(srcdir)/ltlmix.x $(srcdir)/../ltlmix.cc
$(convman) ../ltlmix$(EXEEXT) $(srcdir)/ltlmix.x $@
ltlsynt.1: $(common_dep) $(srcdir)/ltlsynt.x $(srcdir)/../ltlsynt.cc
$(convman) ../ltlsynt$(EXEEXT) $(srcdir)/ltlsynt.x $@

View file

@ -59,29 +59,29 @@ Information about how the execution of the tool went.
values:
.RS
.TP
\fC"ok"\fR
\f(CW"ok"\fR
The tool ran succesfully (this does not imply that the produced
automaton is correct) and autcross could parse the resulting
automaton. In this case \fBexit_code\fR is always 0.
.TP
\fC"timeout"\fR
\f(CW"timeout"\fR
The tool ran for more than the number of seconds
specified with the \fB\-\-timeout\fR option. In this
case \fBexit_code\fR is always -1.
.TP
\fC"exit code"\fR
\f(CW"exit code"\fR
The tool terminated with a non-zero exit code.
\fBexit_code\fR contains that value.
.TP
\fC"signal"\fR
\f(CW"signal"\fR
The tool terminated with a signal.
\fBexit_code\fR contains that signal's number.
.TP
\fC"parse error"\fR
\f(CW"parse error"\fR
The tool terminated normally, but autcross could not
parse its output. In this case \fBexit_code\fR is always -1.
.TP
\fC"no output"\fR
\f(CW"no output"\fR
The tool terminated normally, but without creating the specified
output file. In this case \fBexit_code\fR is always -1.
.RE

View file

@ -70,7 +70,7 @@ Documents the output format of
.TP
2.
Christof Löding: Mehods for the Transformation of ω-Automata:
Chistof Löding: Mehods for the Transformation of ω-Automata:
Complexity and Connection to Second Order Logic. Diploma Thesis.
University of Kiel. 1998.

View file

@ -29,17 +29,13 @@ gh
J. Geldenhuys and H. Hansen: Larger automata and less
work for LTL model checking. Proceedings of Spin'06. LNCS 3925.
.TP
go
P. Gastin and D. Oddoux: Fast LTL to Büchi Automata Translation.
Proceedings of CAV'01. LNCS 2102.
.TP
hkrss
J. Holeček, T. Kratochvila, V. Řehák, D. Šafránek, and P. Šimeček:
Verification Results in Liberouter Project. Tech. Report 03, CESNET, 2004.
.TP
jb, lily
B. Jobstmann, and R. Bloem:
Optimizations for LTL Synthesis. Proceedings of FMCAD'06. IEEE.
go
P. Gastin and D. Oddoux: Fast LTL to Büchi Automata Translation.
Proceedings of CAV'01. LNCS 2102.
.TP
kr
O. Kupferman and A. Rosenberg: The Blow-Up in Translating LTL to Deterministic
@ -84,4 +80,3 @@ Proceedings of RV'10. LNCS 6418.
.BR ltlfilt (1),
.BR randaut (1),
.BR randltl (1)
.BR ltlmix (1)

View file

@ -13,7 +13,7 @@ condition, meanings that a run of the automaton is accepted iff it
visits ininitely often multiple acceptance sets, and it also uses
transition-based acceptance, i.e., those acceptance sets are sets of
transitions. TGBA are often more consise than traditional Büchi
automata. For instance the LTL formula \fCGFa & GFb\fR can be
automata. For instance the LTL formula \f(CWGFa & GFb\fR can be
translated into a single-state TGBA while a traditional Büchi
automaton would need 3 states. Compare
.PP
@ -158,11 +158,11 @@ are not recognized, i.e., infinite words that start with a bad prefix.
.PP
Because of this limited expressiveness, a monitor for some given LTL
or PSL formula may accept a larger language than the one specified by
the formula. For instance a monitor for the LTL formula \fCa U b\fR
will reject (for instance) any word starting with \fC!a&!b\fR as
the formula. For instance a monitor for the LTL formula \f(CWa U b\fR
will reject (for instance) any word starting with \f(CW!a&!b\fR as
there is no way such a word can validate the formula, but it will not
reject a finite prefix repeating only \fCa&!b\fR as such a prefix
could be extented in a way that is comptible with \fCa U b\fR.
reject a finite prefix repeating only \f(CWa&!b\fR as such a prefix
could be extented in a way that is comptible with \f(CWa U b\fR.
.PP
For more information about monitors, we refer the readers to the
following two papers (the first paper describes the construction of

View file

@ -12,7 +12,7 @@ for 100 random formulas, using a timeout of 2 minutes. Because
knows those tools, there is no need to specify their input and
output. A trace of the execution of the two tools, including any
potential issue detected, is reported on standard error, while
statistics are written to \fCresults.json\fR.
statistics are written to \f(CWresults.json\fR.
.PP
.in +4n
.nf
@ -26,14 +26,14 @@ The next command compares
.BR ltl3ba ,
and
.BR ltl2tgba (1)
on a set of formulas saved in file \fCinput.ltl\fR.
on a set of formulas saved in file \f(CWinput.ltl\fR.
Statistics are again writen
as CSV into \fCresults.csv\fR. This examples specify the
as CSV into \f(CWresults.csv\fR. This examples specify the
input and output for each tool, to show how this can be done.
Note the use of \fC%L\fR to indicate that the formula passed t
Note the use of \f(CW%L\fR to indicate that the formula passed t
for the formula in
.BR spin (1)'s
format, and \fC%f\fR for the
format, and \f(CW%f\fR for the
formula in Spot's format. Each of these tool produces an
automaton in a different format (respectively, LBTT's format,
Spin's never claims, and HOA format), but Spot's parser can
@ -51,7 +51,7 @@ distinguish and understand these three formats.
Rabin or Streett automata output by
.B ltl2dstar
in its historical format can be read from a
file specified with \fC%D\fR instead of \fC%O\fR. For instance:
file specified with \f(CW%D\fR instead of \f(CW%O\fR. For instance:
.PP
.in +4n
.nf
@ -99,7 +99,7 @@ The formula translated.
\fBtool\fR
The tool used to translate this formula. This is either the value of the
full \fICOMMANDFMT\fR string specified on the command-line, or,
if \fICOMMANDFMT\fR has the form \fC{\fISHORTNAME\fR\fC}\fR\fICMD\fR,
if \fICOMMANDFMT\fR has the form \f(CW{\fISHORTNAME\fR\f(CW}\fR\fICMD\fR,
the value of \fISHORTNAME\fR.
.TP
\fBexit_status\fR, \fBexit_code\fR
@ -110,29 +110,29 @@ Otherwise, \fBexit_status\fR is a string that can take the following
values:
.RS
.TP
\fC"ok"\fR
\f(CW"ok"\fR
The translator ran succesfully (this does not imply that the produced
automaton is correct) and ltlcross could parse the resulting
automaton. In this case \fBexit_code\fR is always 0.
.TP
\fC"timeout"\fR
\f(CW"timeout"\fR
The translator ran for more than the number of seconds
specified with the \fB\-\-timeout\fR option. In this
case \fBexit_code\fR is always -1.
.TP
\fC"exit code"\fR
\f(CW"exit code"\fR
The translator terminated with a non-zero exit code.
\fBexit_code\fR contains that value.
.TP
\fC"signal"\fR
\f(CW"signal"\fR
The translator terminated with a signal.
\fBexit_code\fR contains that signal's number.
.TP
\fC"parse error"\fR
\f(CW"parse error"\fR
The translator terminated normally, but ltlcross could not
parse its output. In this case \fBexit_code\fR is always -1.
.TP
\fC"no output"\fR
\f(CW"no output"\fR
The translator terminated normally, but without creating the specified
output file. In this case \fBexit_code\fR is always -1.
.RE
@ -150,7 +150,7 @@ translated automaton. Column \fBedges\fR counts the number of edges
(labeled by Boolean formulas) in the automaton seen as a graph, while
\fBtransitions\fR counts the number of assignment-labeled transitions
that might have been merged into a formula-labeled edge. For instance
an edge labeled by \fCtrue\fR will be counted as 2^3=8 transitions if
an edge labeled by \f(CWtrue\fR will be counted as 2^3=8 transitions if
the automaton uses 3 atomic propositions.
.TP
\fBscc\fR, \fBnonacc_scc\fR, \fBterminal_scc\fR, \fBweak_scc\fR, \fBstrong_scc\fR

View file

@ -1,7 +0,0 @@
[NAME]
ltlmix \- combine formulas selected randomly
[DESCRIPTION]
.\" Add any additional description here
[SEE ALSO]
.BR randltl (1),
.BR genltl (1)

View file

@ -4,15 +4,8 @@ ltlsynt \- reactive synthesis from LTL specifications
[BIBLIOGRAPHY]
If you would like to give a reference to this tool in an article,
we suggest you cite the following papers:
we suggest you cite the following paper:
.TP
\(bu
Florian Renkin, Philipp Schlehuber-Caissier, Alexandre Duret-Lutz,
and Adrien Pommellet.
Dissecting ltlsynt. In Formal Methods in System Design, 2023.
.TP
\(bu
Thibaud Michaud and Maximilien Colange.
Reactive Synthesis from LTL Specification with Spot.
In proceedings of SYNT@CAV'18.
Thibaud Michaud, Maximilien Colange: Reactive Synthesis from LTL
Specification with Spot. Proceedings of SYNT@CAV'18.

View file

@ -14,4 +14,3 @@ Proceedings of ATVA'13. LNCS 8172.
.BR genltl (1),
.BR ltlfilt (1),
.BR randaut (1)
.BR ltlmix (1)

View file

@ -10,7 +10,7 @@ spot-x \- Common fine-tuning options and environment variables.
.\" Add any additional description here
[SAT\-MINIMIZE VALUES]
When the \fBsat-minimize=K\fR option is used to enable SAT-based
When the sat-minimize=K option is used to enable SAT-based
minimization of deterministic automata, a SAT solver is
used to minimize an input automaton with N states into an
output automaton with 1≤M≤N states. The parameter K specifies
@ -19,7 +19,7 @@ how the smallest possible M should be searched.
.TP
\fB1\fR
The default, \fB1\fR, performs a binary search between 1 and N. The
lower bound can sometimes be improved when the \fBsat\-langmap\fR
lower bound can sometimes be improved when the \fBsat-langmap\fR
option is used.
.TP
@ -50,15 +50,6 @@ time. Note that it restarts all the encoding each time.
If this variable is set to any value, statistics about BDD garbage
collection and resizing will be output on standard error.
.TP
\fBSPOT_CONTAINMENT_CHECK\fR
Specifies which inclusion algorithm Spot should use. If the variable
is unset, or set to \fB"default"\fR, containment checks are done
using a complementation-based procedure. If the variable is set to
\fB"forq"\fR, then the FORQ-based containment check is used for Büchi automata
(the default procedure is still used for non-Büchi automata). See
[6] in the bibliography below.
.TP
\fBSPOT_DEFAULT_FORMAT\fR
Set to a value of \fBdot\fR or \fBhoa\fR to override the default
@ -72,12 +63,12 @@ passed to the printer by suffixing the output format with
.in +4n
.EX
% SPOT_DEFAULT_FORMAT=dot=bar autfilt ...
.EE
.EN
.in -4n
is the same as running
.in +4n
.EX
% autfilt \-\-dot=bar ...
% autfilt --dot=bar ...
.EE
.in -4n
but the use of the environment variable makes more sense if you set
@ -90,13 +81,13 @@ executed in debug mode, showing how the input is processed.
.TP
\fBSPOT_DOTDEFAULT\fR
Whenever the \fB\-\-dot\fR option is used without argument (even
Whenever the \f(CW--dot\fR option is used without argument (even
implicitely via \fBSPOT_DEFAULT_FORMAT\fR), the contents of this
variable are used as default argument. If you have some default
variable is used as default argument. If you have some default
settings in \fBSPOT_DOTDEFAULT\fR and want to append to options
\fCxyz\fR temporarily for one call, use \fB\-\-dot=.xyz\fR:
\f(CWxyz\fR temporarily for one call, use \f(CW--dot=.xyz\fR:
the dot character will be replaced by the contents of the
\fBSPOT_DOTDEFAULT\fR environment variable.
\f(CWSPOT_DOTDEFAULT\fR environment variable.
.TP
\fBSPOT_DOTEXTRA\fR
@ -104,14 +95,6 @@ The contents of this variable is added to any dot output, immediately
before the first state is output. This makes it easy to override
global attributes of the graph.
.TP
\fBSPOT_EXCLUSIVE_WORD\fR
Specifies which algorithm spot should use for exclusive_word. This can
currently take on 1 of 2 values: 0 for the legacy implementation, and 1
for the forq implementation [6] (See bibliography below). Forq assumes buchi
automata in order to find an exclusive word, and will default to the legacy
version if these constraints are not satisfied with the automata passed.
.TP
\fBSPOT_HOA_TOLERANT\fR
If this variable is set, a few sanity checks performed by the HOA
@ -123,7 +106,7 @@ is actually used)
.TP
\fBSPOT_O_CHECK\fR
Specifies the default algorithm that should be used
by the \fCis_obligation()\fR function. The value should
by the \f(CWis_obligation()\fR function. The value should
be one of the following:
.RS
.RS
@ -141,24 +124,24 @@ by a weak and deterministic Büchi automata.
.TP
\fBSPOT_OOM_ABORT\fR
If this variable is set, Out-Of-Memory errors will \fCabort()\fR the
If this variable is set, Out-Of-Memory errors will \f(CWabort()\fR the
program (potentially generating a coredump) instead of raising an
exception. This is useful to debug a program and to obtain a stack
trace pointing to the function doing the allocation. When this
variable is unset (the default), \fCstd::bad_alloc\fR are thrown on
variable is unset (the default), \f(CWstd::bad_alloc\fR are thrown on
memory allocation failures, and the stack is usually unwinded up to
top-level, losing the original context of the error. Note that at
least \fCltlcross\fR has some custom handling of
\fCstd::bad_alloc\fR to recover from products that are too large (by
least \f(CWltlcross\fR has some custom handling of
\f(CWstd::bad_alloc\fR to recover from products that are too large (by
ignoring them), and setting this variable will interfer with that.
.TP
\fBSPOT_PR_CHECK\fR
Select the default algorithm that must be used to check the persistence
or recurrence property of a formula f. The values it can take are between
1 and 3. All methods work either on f or !f thanks to the duality of
1 and 3. All methods work either on f or !f thanks to the duality of
persistence and recurrence classes. See
.UR https://spot.lre.epita.fr/hierarchy.html
.UR https://spot.lrde.epita.fr/hierarchy.html
this page
.UE
for more details. If it is set to:
@ -196,8 +179,8 @@ format.
If set, this variable should indicate how to call an external
SAT\-solver \- by default, Spot uses PicoSAT, which is distributed
with. This is used by the sat\-minimize option described above.
The format to follow is the following: \fC"<sat_solver> [options] %I >%O"\fR.
The escape sequences \fC%I\fR and \fC%O\fR respectively
The format to follow is the following: \f(CW"<sat_solver> [options] %I >%O"\fR.
The escape sequences \f(CW%I\fR and \f(CW%O\fR respectively
denote the names of the input and output files. These temporary files
are created in the directory specified by \fBSPOT_TMPDIR\fR or
\fBTMPDIR\fR (see below). The SAT\-solver should follow the convention
@ -252,11 +235,10 @@ sl(a) x sl(!a), performed on-the-fly
.IP 8
cl(a) x cl(!a)
.RE
This variable is used by the \fB\-\-check=stutter-invariance\fR and
\fB\-\-stutter-invariant\fR options, but it is ignored by
\fB\-\-check=stutter-sensitive-example\fR.
.RE
This variable is used by the \fB--check=stutter-invariance\fR and
\fB--stutter-invariant\fR options, but it is ignored by
\fB--check=stutter-sensitive-example\fR.
.TP
\fBSPOT_SIMULATION_REDUCTION\fR
@ -281,7 +263,7 @@ This is mostly useful for debugging.
.TP
\fBSPOT_XCNF\fR
Assign a folder path to this variable to generate XCNF files whenever
SAT\-based minimization is used \- the file is output as "incr.xcnf"
SAT\-based minimization is used \- the file is outputed as "incr.xcnf"
in the specified directory. This feature works only with an external
SAT\-solver. See \fBSPOT_SATSOLVER\fR to know how to provide one. Also note
that this needs an incremental approach without restarting the encoding i.e
@ -290,9 +272,6 @@ autfilt (see sat\-minimize options described above or autfilt man page).
The XCNF format is the one used by the SAT incremental competition.
[BIBLIOGRAPHY]
The following papers are related to some of the options and
environment variables.
.TP
1.
Christian Dax, Jochen Eisinger, Felix Klaedtke: Mechanizing the
@ -332,20 +311,12 @@ Describes the stutter-invariance checks that can be selected through
5.
Javier Esparza, Jan Křetínský and Salomon Sickert: One Theorem to Rule
Them All: A Unified Translation of LTL into ω-Automata. Proceedings
of LICS'18.
of LICS'18. To appear.
Describes (among other things) the constructions used for translating
formulas of the form GF(guarantee) or FG(safety), that can be
disabled with \fB-x gf-guarantee=0\fR.
.TP
6.
Kyveli Doveri and Pierre Ganty and Nicolas Mazzocchi:
FORQ-Based Language Inclusion Formal Testing.
Proceedings of CAV'22. LNCS 13372.
The containment check implemented as \fCspot::contains_forq()\fR, and
used for Büchi automata when \fBSPOT_CONTAINMENT_CHECK=forq\fR.
[SEE ALSO]
.BR ltl2tgba (1)

View file

@ -22,12 +22,11 @@ that are listed below.
.BR ltldo (1)
.BR ltlfilt (1)
.BR ltlgrind (1)
.BR ltlmix (1)
.BR ltlsynt (1)
.BR randaut (1)
.BR randltl (1)
.BR spot-x (7)
.UR https://spot.lre.epita.fr/
.UR https://spot.lrde.epita.fr/
The Spot web page.
.UE

View file

@ -1,6 +1,7 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) by the Spot authors, see the AUTHORS file for details.
# Copyright (C) 2014 Laboratoire de Recherche et
# Développement de l'Epita (LRDE).
#
# This file is part of Spot, a model checking library.
#
@ -29,10 +30,10 @@ import subprocess
with open('Makefile.am', 'r') as mf:
lines = mf.read()
lines = re.sub(r'\s*\\\s*', ' ', lines)
bin_programs = re.search(r'bin_PROGRAMS\s*=([\w \t]*)', lines).group(1).split()
lines = re.sub('\s*\\\\\s*', ' ', lines)
bin_programs = re.search('bin_PROGRAMS\s*=([\w \t]*)', lines).group(1).split()
optre = re.compile(r'(-\w), (--[\w=-]+)')
optre = re.compile('(-\w), (--[\w=-]+)')
d = {}

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012-2016, 2018-2020 Laboratoire de Recherche et
// Développement de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -41,7 +42,7 @@
#include <spot/twaalgos/canonicalize.hh>
static const char argp_program_doc[] = "\
const char argp_program_doc[] = "\
Generate random connected automata.\n\n\
The automata are built over the atomic propositions named by PROPS...\n\
or, if N is a nonnegative number, using N arbitrary names.\n\
@ -384,8 +385,6 @@ main(int argc, char** argv)
if (opt_acceptance)
aut->set_acceptance(accs, code);
if (aut->acc().is_t() || aut->acc().is_f())
aut->prop_weak(true);
if (opt_uniq)
{
@ -407,8 +406,7 @@ main(int argc, char** argv)
timer.stop();
printer.print(aut, timer, nullptr,
opt_seed_str, automaton_num,
automaton_num, nullptr);
opt_seed_str, automaton_num, nullptr);
++automaton_num;
if (opt_automata > 0 && automaton_num >= opt_automata)

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2012-2016, 2018-2019 Laboratoire de Recherche
// et Développement de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -39,7 +40,7 @@
#include <spot/misc/random.hh>
#include <spot/misc/optionmap.hh>
static const char argp_program_doc[] = "\
const char argp_program_doc[] ="\
Generate random temporal logic formulas.\n\n\
The formulas are built over the atomic propositions named by PROPS...\n\
or, if N is a nonnegative number, using N arbitrary names.\v\
@ -64,6 +65,7 @@ enum {
OPT_DUMP_PRIORITIES,
OPT_DUPS,
OPT_LTL_PRIORITIES,
OPT_PSL_PRIORITIES,
OPT_SEED,
OPT_SERE_PRIORITIES,
OPT_TREE_SIZE,
@ -114,10 +116,8 @@ static const argp_option options[] =
"the following interpreted sequences:", -19 },
{ "%f", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"the formula (in the selected syntax)", 0 },
{ "%l", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"the (serial) number of the formula (0-based)", 0 },
{ "%L", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"the (serial) number of the formula (1-based)", 0 },
"the (serial) number of the formula", 0 },
{ "%%", 0, nullptr, OPTION_DOC | OPTION_NO_USAGE,
"a single %", 0 },
COMMON_LTL_OUTPUT_SPECS,
@ -181,7 +181,6 @@ parse_opt(int key, char* arg, struct argp_state* as)
break;
case 'S':
output = spot::randltlgenerator::SERE;
output_ratexp = true;
break;
case OPT_BOOLEAN_PRIORITIES:
opt_pB = arg;
@ -195,6 +194,7 @@ parse_opt(int key, char* arg, struct argp_state* as)
case OPT_DUMP_PRIORITIES:
opt_dump_priorities = true;
break;
// case OPT_PSL_PRIORITIES: break;
case OPT_SERE_PRIORITIES:
opt_pS = arg;
break;
@ -307,9 +307,9 @@ main(int argc, char** argv)
exit(0);
}
int count = 0;
while (opt_formulas < 0 || opt_formulas--)
{
static int count = 0;
spot::formula f = rg.next();
if (!f)
{
@ -318,8 +318,7 @@ main(int argc, char** argv)
}
else
{
output_formula_checked(f, nullptr, nullptr, count + 1, count);
++count;
output_formula_checked(f, nullptr, nullptr, ++count);
}
};
flush_cout();

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2013-2021 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -23,7 +24,7 @@
#include <argp.h>
#include "common_setup.hh"
static const char argp_program_doc[] = "\
const char argp_program_doc[] ="\
Common fine-tuning options for programs installed with Spot.\n\
\n\
The argument of -x or --extra-options is a comma-separated list of KEY=INT \
@ -46,17 +47,9 @@ depends on the --low, --medium, or --high settings.") },
{ DOC("tls-max-states",
"Maximum number of states of automata involved in automata-based \
implication checks for formula simplifications. Defaults to 64.") },
{ DOC("tls-max-ops",
"Maximum number of operands in n-ary operators (or, and) on which \
implication-based simplifications are attempted. Defaults to 16.") },
{ nullptr, 0, nullptr, 0, "Translation options:", 0 },
{ DOC("ltl-split", "Set to 0 to disable the translation of automata \
as product or sum of subformulas.") },
{ DOC("branch-post", "Set to 0 to disable branching-postponement \
(done during translation, may create more states) and delayed-branching \
(almost similar, but done after translation to only remove states). \
Set to 1 to force branching-postponement, and to 2 \
to force delayed-branching. By default, delayed-branching is used.") },
{ DOC("comp-susp", "Set to 1 to enable compositional suspension, \
as described in our SPIN'13 paper (see Bibliography below). Set to 2, \
to build only the skeleton TGBA without composing it. Set to 0 (the \
@ -82,16 +75,13 @@ only if it is smaller than the original skeleton. This option is only \
used when comp-susp=1 and default to 1 or 2 depending on whether --small \
or --deterministic is specified.") },
{ nullptr, 0, nullptr, 0, "Postprocessing options:", 0 },
{ DOC("acd", "Set to 1 (the default) to use paritize automata using \
the alternating cycle decomposition. Set to 0 to use paritization based \
on latest appearance record variants.") },
{ DOC("scc-filter", "Set to 1 (the default) to enable \
SCC-pruning and acceptance simplification at the beginning of \
post-processing. Transitions that are outside accepting SCC are \
post-processing. Transitions that are outside of accepting SCC are \
removed from accepting sets, except those that enter into an accepting \
SCC. Set to 2 to remove even these entering transition from the \
accepting sets. Set to 0 to disable this SCC-pruning and acceptance \
simplification pass.") },
simpification pass.") },
{ DOC("degen-reset", "If non-zero (the default), the \
degeneralization algorithm will reset its level any time it exits \
an SCC.") },
@ -106,7 +96,7 @@ uses the first level created, 2 uses the minimum level seen so far, and \
will compute an independent degeneralization order for each SCC it \
processes. This is currently disabled by default.") },
{ DOC("degen-lskip", "If non-zero (the default), the degeneralization \
algorithm will skip as many levels as possible for each transition. This \
algorithm will skip as much levels as possible for each transition. This \
is enabled by default as it very often reduce the number of resulting \
states. A consequence of skipping levels is that the degeneralized \
automaton tends to have smaller cycles around the accepting states. \
@ -121,7 +111,7 @@ level, as it might favor finding accepting cycles earlier. If \
degen-lowinit is non-zero, then level L is always used without looking \
for the presence of an accepting self-loop.") },
{ DOC("degen-remscc", "If non-zero (the default), make sure the output \
of the degeneralization has as many SCCs as the input, by removing superfluous \
of the degenalization has as many SCCs as the input, by removing superfluous \
ones.") },
{ DOC("det-max-states", "When defined to a positive integer N, \
determinizations will be aborted whenever the number of generated \
@ -174,44 +164,25 @@ Set to 1 to use only direct simulation. Set to 2 to use only reverse \
simulation. Set to 3 to iterate both direct and reverse simulations. \
The default is the value of parameter \"simul\" in --high mode, and 0 \
therwise.") },
{ DOC("merge-states-min", "Number of states above which states are \
merged using a cheap approximation of a bisimulation quotient before \
attempting simulation-based reductions. Defaults to 128. Set to 0 to \
never merge states.") },
{ DOC("simul-max", "Number of states above which simulation-based \
reductions are skipped. Defaults to 4096. Set to 0 to disable. This \
applies to all simulation-based optimization, including those of the \
applies to all simulation-based optimization, including thoses of the \
determinization algorithm.") },
{ DOC("simul-trans-pruning", "Number of equivalence classes above which \
simulation-based transition-pruning for non-deterministic automata is \
disabled. Defaults to 512. Set to 0 to disable. This applies to all \
simulation-based reductions, as well as to the simulation-based optimization \
of the determinization algorithm. Simulation-based reductions perform a \
number of BDD implication checks that is quadratic in the number of classes to \
implement transition pruning. The number of equivalence classes is equal to \
the number of output states of the simulation-based reduction when \
transition-pruning is disabled, it is just an upper bound otherwise.") },
simulation-based transition-pruning for non-deterministic automata is disabled \
automata. Defaults to 512. Set to 0 to disable. This applies to all \
simulation-based reduction, as well as to the simulation-based optimization of \
the determinization algorithm. Simulation-based reduction perform a number of \
BDD implication checks that is quadratic in the number of classes to implement \
transition pruning. The equivalence classes is equal to the number \
of output states of simulation-based reduction when transition-pruning is \
disabled, it is just an upper bound otherwise.") },
{ DOC("relabel-bool", "If set to a positive integer N, a formula \
with N atomic propositions or more will have its Boolean subformulas \
abstracted as atomic propositions during the translation to automaton. \
This relabeling can speeds the translation if a few Boolean subformulas \
use many atomic propositions. This relabeling make sure \
the subexpressions that are replaced do not share atomic propositions. \
By default N=4. Setting this value to 0 will disable the rewriting.") },
{ DOC("relabel-overlap", "If set to a positive integer N, a formula \
with N atomic propositions or more will have its Boolean subformulas \
abstracted as atomic propositions during the translation to automaton. \
This version does not care about overlapping atomic propositions, so \
it can cause the created temporary automata to have incompatible \
combinations of atomic propositions that will be eventually be removed. \
This relabeling is attempted after relabel-bool. By default, N=8. Setting \
use a large number of atomic propositions. By default N=4. Setting \
this value to 0 will disable the rewriting.") },
{ DOC("rde", "Disable (0), or enable (1) the 'restrict-dead-end-edges' \
optimization. A dead-end-edge is one that move to a state that has only \
itself as successors. The label of such edges can be simplified in some \
situtation, reducing non-determinism slightly. By default (-1), this is \
enabled only in --high mode, or if both --medium and --deterministic are \
used.") },
{ DOC("wdba-minimize", "Set to 0 to disable WDBA-minimization, to 1 to \
always try it, or 2 to attempt it only on syntactic obligations or on automata \
that are weak and deterministic. The default is 1 in --high mode, else 2 in \
@ -226,7 +197,7 @@ or when det-max-states is set.") },
if the TGBA is not already deterministic. Doing so will degeneralize \
the automaton. This is disabled by default, unless sat-minimize is set.") },
{ DOC("dba-simul", "Set to 1 to enable simulation-based reduction after \
running the powerset determinization enabled by \"tba-det\". By default, this \
running the powerset determinization enabled by \"tba-det\". By default this \
is disabled at low level or if parameter \"simul\" is set to 0.") },
{ DOC("sat-minimize",
"Set to a value between 1 and 4 to enable SAT-based minimization \
@ -261,11 +232,11 @@ sets sat-minimize to 1 if not set differently.") },
{ DOC("state-based",
"Set to 1 to instruct the SAT-minimization procedure to produce \
an automaton where all outgoing transition of a state have the same acceptance \
sets. By default, this is only enabled when options -B or -S are used.") },
sets. By default this is only enabled when options -B or -S are used.") },
{ DOC("simul-method",
"Chose which simulation based reduction to use: 1 force the \
signature-based BDD implementation, 2 force matrix-based and 0, the default, \
is a heuristic which chooses which implementation to use.") },
is a heristic wich choose which implementation to use.") },
{ nullptr, 0, nullptr, 0, nullptr, 0 }
};

View file

@ -1,5 +1,6 @@
// -*- coding: utf-8 -*-
// Copyright (C) by the Spot authors, see the AUTHORS file for details.
// Copyright (C) 2013-2018 Laboratoire de Recherche et Développement
// de l'Epita (LRDE).
//
// This file is part of Spot, a model checking library.
//
@ -23,8 +24,7 @@
#include <argp.h>
#include "common_setup.hh"
static const char argp_program_doc[] =
"Command-line tools installed by Spot.";
const char argp_program_doc[] ="Command-line tools installed by Spot.";
#define DOC(NAME, TXT) NAME, 0, nullptr, OPTION_DOC | OPTION_NO_USAGE, TXT, 0
@ -37,8 +37,6 @@ static const argp_option options[] =
{ DOC("ltlgrind", "Mutate LTL or PSL formulas to generate similar but "
"simpler ones. Use this when looking for shorter formula to "
"reproduce a bug.") },
{ DOC("ltlmix",
"Combine LTL/PSL formulas taken randomly from some input set.") },
{ nullptr, 0, nullptr, 0, "Tools that output automata or circuits:", 0 },
{ DOC("randaut", "Generate random ω-automata.") },
{ DOC("genaut", "Generate ω-automata from scalable patterns.") },

View file

@ -2300,7 +2300,7 @@ RETURN {* The quantified BDD. *}
*/
BDD bdd_forall(BDD r, BDD var)
{
return quantify(r, var, bddop_and, 0, CACHEID_FORALL);
return quantify(r, var, bddop_and, 0, CACHEID_EXIST);
}
/*
@ -2315,7 +2315,7 @@ RETURN {* The quantified BDD. *}
*/
BDD bdd_forallcomp(BDD r, BDD var)
{
return quantify(r, var, bddop_and, 1, CACHEID_FORALLC);
return quantify(r, var, bddop_and, 1, CACHEID_EXISTC);
}

View file

@ -1,5 +1,5 @@
/*========================================================================
Copyright (C) 1996-2003 by Jorn Lind-Nielsen
Copyright (C) 1996-2003, 2021 by Jorn Lind-Nielsen
All rights reserved
Permission is hereby granted, without written agreement and without
@ -501,8 +501,6 @@ BUDDY_API_VAR const BDD bddtrue;
*************************************************************************/
#ifdef CPLUSPLUS
#include <iostream>
#include <memory>
#include <type_traits>
/*=== User BDD class ===================================================*/
@ -1094,11 +1092,6 @@ inline bddxfalse bdd_false(void)
{ return bddxfalse(); }
template<>
struct std::default_delete<bddPair> {
void operator()(bddPair *p) const { bdd_freepair(p); };
};
/*=== Iostream printing ================================================*/
class BUDDY_API bdd_ioformat
@ -1135,18 +1128,7 @@ public:
class minterm_iterator
{
public:
typedef bdd value_type;
typedef value_type& reference;
typedef value_type* pointer;
typedef std::ptrdiff_t difference_type;
typedef std::forward_iterator_tag iterator_category;
explicit minterm_iterator() noexcept
: me_(nullptr)
{
}
minterm_iterator(minterms_of* me) noexcept
minterm_iterator(minterms_of* me)
: me_(me)
{
}

View file

@ -210,6 +210,7 @@ static BddTree *reorder_win2ite(BddTree *t)
{
BddTree *this, *first=t;
int lastsize;
int c=1;
if (t == NULL)
return t;
@ -245,6 +246,7 @@ static BddTree *reorder_win2ite(BddTree *t)
if (verbose > 1)
printf(" %d nodes\n", reorder_nodenum());
c++;
}
while (reorder_nodenum() != lastsize);

View file

@ -1,5 +1,9 @@
# -*- coding: utf-8 -*-
# Copyright (C) by the Spot authors, see the AUTHORS file for details.
# Copyright (C) 2008-2022, Laboratoire de Recherche et Développement
# de l'Epita (LRDE).
# Copyright (C) 2003-2007 Laboratoire d'Informatique de Paris 6
# (LIP6), département Systèmes Répartis Coopératifs (SRC), Université
# Pierre et Marie Curie.
#
# This file is part of Spot, a model checking library.
#
@ -17,7 +21,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
AC_PREREQ([2.69])
AC_INIT([spot], [2.12.2.dev], [spot@lrde.epita.fr])
AC_INIT([spot], [2.10.4.dev], [spot@lrde.epita.fr])
AC_CONFIG_AUX_DIR([tools])
AC_CONFIG_MACRO_DIR([m4])
AM_INIT_AUTOMAKE([1.11 gnu tar-ustar color-tests parallel-tests])
@ -49,15 +53,6 @@ AC_ARG_ENABLE([c++20],
[Compile in C++20 mode.])],
[enable_20=$enableval], [enable_20=no])
AC_ARG_ENABLE([pthread],
[AS_HELP_STRING([--enable-pthread],
[Allow libspot to use POSIX threads.])],
[enable_pthread=$enableval], [enable_pthread=no])
if test "$enable_pthread" = yes; then
AC_DEFINE([ENABLE_PTHREAD], [1], [Whether Spot is compiled with -pthread.])
AC_SUBST([LIBSPOT_PTHREAD], [-pthread])
fi
AC_ARG_ENABLE([doxygen],
[AS_HELP_STRING([--enable-doxygen],
[enable generation of Doxygen documentation (requires Doxygen)])],
@ -75,7 +70,6 @@ if test 0 -eq `expr $enable_max_accsets % $default_max_accsets`
then
AC_DEFINE_UNQUOTED([MAX_ACCSETS], [$enable_max_accsets],
[The maximal number of acceptance sets supported (also known as acceptance marks)])
AC_SUBST([MAX_ACCSETS], [$enable_max_accsets])
else
AC_MSG_ERROR([The argument of --enable-max-accsets must be a multiple of $default_max_accsets])
fi
@ -156,7 +150,7 @@ AX_CHECK_BUDDY
AC_CHECK_FUNCS([times kill alarm sigaction sched_getcpu])
oLIBS=$LIBS
LIBS="$LIBS -pthread"
LIBS="$LIBS -lpthread"
AC_CHECK_FUNCS([pthread_setaffinity_np])
LIBS=$oLIBS
@ -185,14 +179,9 @@ if test "x${enable_python:-yes}" = xyes; then
AC_MSG_NOTICE([You may configure with --disable-python ]dnl
[if you do not need Python bindings.])
adl_CHECK_PYTHON
AC_ARG_WITH([pythondir],
[AS_HELP_STRING([--with-pythondir], [override the computed pythondir])],
[pythondir=$withval pyexecdir=$withval], [])
fi
adl_ENABLE_DEBUG
ad_GCC_OPTIM
adl_NDEBUG
@ -213,14 +202,14 @@ AC_CHECK_PROG([LTL3BA], [ltl3ba], [ltl3ba])
AC_CHECK_PROG([PERL], [perl], [perl])
AC_CHECK_PROG([SPIN], [spin], [spin])
AC_CHECK_PROG([LBTT], [lbtt], [lbtt])
AM_MISSING_PROG([EMACS], [emacs])
AC_CHECK_PROG([EMACS], [emacs], [emacs])
AC_CHECK_PROGS([IPYTHON], [ipython3 ipython], [ipython])
AC_CHECK_PROGS([JUPYTER], [jupyter], [jupyter])
AC_CHECK_PROG([LBTT_TRANSLATE], [lbtt-translate], [lbtt-translate])
AX_CHECK_VALGRIND
# Debian used to reserve the name 'swig' for swig-2.0. So prefer
# swig4.0 (available in Debian bullseye) to swig3.0 (available in Debian buster)
# to swig.
# ti swig.
AC_CHECK_PROGS([SWIG], [swig4.0 swig3.0 swig], [swig])
AC_SUBST([CROSS_COMPILING], [$cross_compiling])
@ -291,23 +280,3 @@ case $VERSION:$enable_devel in
echo '==================================================================='
;;
esac
case $enable_python in
yes)
pd=$pythondir
eval pd=$pd
eval pd=$pd
$PYTHON -c "
import sys
if '$pd' in sys.path:
exit()
else:
print('\nWARNING: Python bindings will be installed in $pd')
print(' however this path is not searched by default by $PYTHON.')
print('\n$PYTHON\'s sys.path contains the following paths:\n',
'\n'.join(sys.path))
print('\nUse --with-pythondir=... if you wish '
'to change this installation path.')
"
;;
esac

2
debian/control vendored
View file

@ -2,7 +2,7 @@ Source: spot
Section: science
Priority: optional
Maintainer: Alexandre Duret-Lutz <adl@lrde.epita.fr>
Build-Depends: debhelper (>= 12), python3-all-dev, python3-ipykernel, python3-nbconvert, libltdl-dev, dh-python, graphviz, jupyter-nbconvert, doxygen, gdb
Build-Depends: debhelper (>= 12), python3-all-dev, ipython3-notebook | python3-ipykernel, ipython3-notebook | python3-nbconvert, libltdl-dev, dh-python
Standards-Version: 4.5.1
Homepage: http://spot.lrde.epita.fr/

7
debian/copyright vendored
View file

@ -1,9 +1,10 @@
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: spot
Source: http://www.lrde.epita.fr/dload/spot/
Source: http://spot.lrde.epita.fr/dload/spot/
Files: *
Copyright: 2003-2025 the Spot authors
Copyright: 2003-2007 Laboratoire d'Informatique de Paris 6 (LIP6)
2007-2022 Laboratoire de Recherche et Développement de l'Epita (LRDE)
License: GPL-3+
Spot is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
@ -160,7 +161,7 @@ License: GPL-3+
Public License version 3 can be found in "/usr/share/common-licenses/GPL-3".
Files: debian/*
Copyright: 2015-2023 the Spot authors
Copyright: 2015-2018 Laboratoire de Recherche et Développement de l'Epita (LRDE)
License: GPL-3+
This package is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

22
debian/rules vendored
View file

@ -20,16 +20,20 @@ include /usr/share/dpkg/default.mk
%:
dh $@ --with=python3
# Find the LTO plugin, which we need to pass to ar, nm, and ranlib.
LTOPLUG := $(shell gcc -v 2>&1 | \
sed -n 's:COLLECT_LTO_WRAPPER=\(/.*/\)[^/]*:\1:p')liblto_plugin.so
# ARFLAGS is for Automake
# AR_FLAGS is for Libtool, (but libtool 2.4.7 will now use ARFLAGS as well)
# The gcc-tools activate the LTO plugin.
LTOSETUP = \
LDFLAGS='-fuse-linker-plugin' \
NM='gcc-nm' \
AR='gcc-ar' \
ARFLAGS='cr' \
AR_FLAGS='cr' \
RANLIB='gcc-ranlib' \
# AR_FLAGS is for Libtool
# These activate the LTO pluggin, but also remove the 'u' option
# from ar, since its now ignored with Debian's default to 'D'.
LTOSETUP = \
LDFLAGS='-fuse-linker-plugin' \
NM='nm --plugin $(LTOPLUG)' \
ARFLAGS='cr --plugin $(LTOPLUG)' \
AR_FLAGS='cr --plugin $(LTOPLUG)' \
RANLIB='ranlib --plugin $(LTOPLUG)' \
VALGRIND=false
GCDADIR := $(shell pwd)/gcda

View file

@ -1,5 +1,9 @@
## -*- coding: utf-8 -*-
## Copyright (C) by the Spot authors, see the AUTHORS file for details.
## Copyright (C) 2010-2011, 2013-2021 Laboratoire de Recherche et
## Développement de l'Epita (LRDE).
## Copyright (C) 2003-2005 Laboratoire d'Informatique de Paris 6
## (LIP6), département Systèmes Répartis Coopératifs (SRC), Université
## Pierre et Marie Curie.
##
## This file is part of Spot, a model checking library.
##
@ -104,7 +108,6 @@ ORG_FILES = \
org/ltldo.org \
org/ltlfilt.org \
org/ltlgrind.org \
org/ltlmix.org \
org/ltlsynt.org \
org/ltlsynt.tex \
org/oaut.org \
@ -124,7 +127,6 @@ ORG_FILES = \
org/tut22.org \
org/tut23.org \
org/tut24.org \
org/tut25.org \
org/tut30.org \
org/tut31.org \
org/tut40.org \

View file

@ -27,9 +27,6 @@
(setenv "SPOT_DOTEXTRA" "node[fontsize=12] fontsize=12 stylesheet=\"spot.css\" edge[arrowhead=vee, arrowsize=.7, fontsize=12]")
(setq org-babel-temporary-directory "@abs_top_builddir@/doc/org/tmp")
(make-directory org-babel-temporary-directory t)
; has to be set globally, not buffer-local
(setq ess-ask-for-ess-directory nil)
(setq ess-startup-directory 'default-directory)
(org-babel-do-load-languages 'org-babel-load-languages
`((,(if (version< org-version "8.3") 'sh 'shell) . t)
(python . t)
@ -42,6 +39,7 @@
(org-babel-python-command . "@PYTHON@")
(org-babel-C++-compiler . "./g++wrap")
(shell-file-name . "@SHELL@")
(ess-ask-for-ess-directory . nil)
(org-export-html-postamble . nil)
(org-html-table-header-tags
"<th scope=\"%s\"%s><div><span>" . "</span></div></th>")

1
doc/org/.gitignore vendored
View file

@ -19,4 +19,3 @@ g++wrap
*.fls
sitemap.org
plantuml.jar
toba.py

View file

@ -22,19 +22,18 @@
usedby/.style={->,ultra thick,>={Stealth[length=5mm,round]},gray!50!black}}
\node[cppbox=14.12cm] (libspot) {\texttt{libspot\strut}};
\node[shbox=3cm,above right=2mm and 0mm of libspot.north west,align=center] (shcmd) {
\href{https://spot.lre.epita.fr/randltl.html}{\texttt{randltl}}\\
\href{https://spot.lre.epita.fr/ltlmix.html}{\texttt{ltlmix}}\\
\href{https://spot.lre.epita.fr/ltlfilt.html}{\texttt{ltlfilt}}\\
\href{https://spot.lre.epita.fr/randaut.html}{\texttt{randaut}}\\
\href{https://spot.lre.epita.fr/autfilt.html}{\texttt{autfilt}}\\
\href{https://spot.lre.epita.fr/ltl2tgba.html}{\texttt{ltl2tgba}}\\
\href{https://spot.lre.epita.fr/ltl2tgta.html}{\texttt{ltl2tgta}}\\
\href{https://spot.lre.epita.fr/dstar2tgba.html}{\texttt{dstar2tgba}}\\
\href{https://spot.lre.epita.fr/ltlcross.html}{\texttt{ltlcross}}\\
\href{https://spot.lre.epita.fr/ltlgrind.html}{\texttt{ltlgrind}}\\
\href{https://spot.lre.epita.fr/ltlsynt.html}{\texttt{ltlsynt}}\\
\href{https://spot.lre.epita.fr/ltldo.html}{\texttt{ltldo}}\\
\href{https://spot.lre.epita.fr/autcross.html}{\texttt{autcross}}
\href{https://spot.lrde.epita.fr/randltl.html}{\texttt{randltl}}\\
\href{https://spot.lrde.epita.fr/ltlfilt.html}{\texttt{ltlfilt}}\\
\href{https://spot.lrde.epita.fr/randaut.html}{\texttt{randaut}}\\
\href{https://spot.lrde.epita.fr/autfilt.html}{\texttt{autfilt}}\\
\href{https://spot.lrde.epita.fr/ltl2tgba.html}{\texttt{ltl2tgba}}\\
\href{https://spot.lrde.epita.fr/ltl2tgta.html}{\texttt{ltl2tgta}}\\
\href{https://spot.lrde.epita.fr/dstar2tgba.html}{\texttt{dstar2tgba}}\\
\href{https://spot.lrde.epita.fr/ltlcross.html}{\texttt{ltlcross}}\\
\href{https://spot.lrde.epita.fr/ltlgrind.html}{\texttt{ltlgrind}}\\
\href{https://spot.lrde.epita.fr/ltlsynt.html}{\texttt{ltlsynt}}\\
\href{https://spot.lrde.epita.fr/ltldo.html}{\texttt{ltldo}}\\
\href{https://spot.lrde.epita.fr/autcross.html}{\texttt{autcross}}
};
\node[cppbox=4.7cm,above right=0mm and 2mm of shcmd.south east] (libgen) {\texttt{libspotgen\strut}};
\node[cppbox=2.5cm,above right=0mm and 2mm of libgen.south east] (buddy) {\texttt{libbddx\strut}};
@ -42,8 +41,8 @@
\node[cppbox=4cm,above right=0mm and 2mm of pyspot.south east] (libltsmin) {\texttt{libspotltsmin\strut}};
\node[shbox=1.5cm,above right=2mm and 0mm of libgen.north west,align=center] (genaut) {
\href{https://www.lre.epita.fr/genaut.html}{\texttt{genaut\strut}}\\
\href{https://www.lre.epita.fr/genltl.html}{\texttt{genltl}}
\href{https://www.lrde.epita.fr/genaut.html}{\texttt{genaut\strut}}\\
\href{https://www.lrde.epita.fr/genltl.html}{\texttt{genltl}}
};
\node[pybox=3cm,above left=2mm and 0mm of libgen.north east] (pygen) {\texttt{import spot.gen\strut}};

View file

@ -16,7 +16,7 @@ The core of =autcross= is a loop that does the following steps:
will be named =A0=, =A1=, and =A2=.
- Ensure that all produced automata are equivalent.
Statistics about the results of each tool can optionally be saved in
Statistics about the results of each tools can optionally be saved in
a CSV file. And in case only those statistics matters, it is also
possible to disable the equivalence checks.
@ -46,7 +46,7 @@ following character sequences:
: %O filename for the automaton output in HOA, never
: claim, LBTT, or ltl2dstar's format
For instance, we can use =autfilt --complement %H >%O= to indicate that
For instance we can use =autfilt --complement %H >%O= to indicate that
=autfilt= reads one file (=%H=) in the HOA format, and to redirect the
output in file so that =autcross= can find it. The output format is
automatically detected, so a generic =%O= is used for the output file
@ -203,7 +203,7 @@ time) will appear with empty columns at the end of the CSV line.
Those lines with missing data can be omitted with the =--omit-missing=
option.
However, data for bogus automata are still included: as shown below
However data for bogus automata are still included: as shown below
=autcross= will report inconsistencies between automata as errors, but
it does not try to guess who is incorrect.
@ -249,11 +249,11 @@ EOF
| -:95.1-140.7 | automaton 2 | 2 | 10 | 26 | 26 | 1 | 2 | 6 | 1 | 0 | AF | ok | 0 | 0.0211636 | 2 | 21 | 66 | 84 | 2 | 4 | 0 | 0 | 0 |
| -:95.1-140.7 | automaton 2 | 2 | 10 | 26 | 26 | 1 | 2 | 6 | 1 | 0 | L2D | ok | 0 | 0.0028508 | 2 | 24 | 74 | 96 | 2 | 4 | 0 | 0 | 0 |
* Transformation that preserve or complement languages
* Language preserving transformation
By default, =autcross= assumes that for a given input the automata
produced by all tools should be equivalent. However, it does not
assume that those languages should be equivalent to the input (it is
By default =autcross= assumes that for a given input the automata
produced by all tools should be equivalent. However it does not
assume that those language should be equivalent to the input (it is
clearly not the case in our complementation test above).
If the transformation being tested does preserve the language of an
@ -261,13 +261,6 @@ automaton, it is worth to pass the =--language-preserved= option to
=autfilt=. Doing so a bit like adding =cat %H>%O= as another tool: it
will also ensure that the output is equivalent to the input.
Similarly, if the tools being tested implement complementation
algorithm, adding the =--language-complemented= will additionally
compare the outputs using this own complementation algorithm. Using
this option is more efficient than passing =autfilt --complement= as a
tool, since =autcross= can save on complementation by using the input
automaton.
* Detecting problems
:PROPERTIES:
:CUSTOM_ID: checks
@ -277,19 +270,19 @@ If a translator exits with a non-zero status code, or fails to output
an automaton =autcross= can read, and error will be displayed and the
result of the tool will be discarded.
Otherwise, =autcross= performs equivalence checks between each pair of
Otherwise =autcross= performs equivalence checks between each pair of
automata. This is done in two steps. First, all produced automata
=A0=, =A1=, etc. are complemented: the complement automata are
named =Comp(A0)=, =Comp(A1)= etc. Second, =autcross= ensures
that =Ai*Comp(Aj)= is empty for all =i= and =j=.
If the =--language-preserved= option is passed, the =input= automaton
also participates to these equivalence checks.
also participate to these equivalence checks.
To simulate a problem, let's pretend we want to verify that =autfilt
--complement= preserves the input language (clearly it does not, since
it actually complements the language of the automaton).
To simulate a problem, let's compare pretend we want verify that
=autfilt --complement= preserves the input language (clearly it does
not, since it actually complement the language of the automaton).
#+BEGIN_SRC sh :prologue "exec 2>&1" :epilogue true
randaut -B -n 3 a b --name="automaton %L" |
@ -334,15 +327,15 @@ examples would not exit if the language was really preserved by the
tool.
Incoherence between the output of several tools (even with
=--language-preserved=) are reported similarly.
=--language-preserved=) are reported in a similar way.
* Miscellaneous options
** =--stop-on-error=
The =--stop-on-error= option will cause =autcross= to abort on the
first detected error. This includes failure to start some tool,
read its output, or failure to pass the sanity checks. Timeouts are
first detected error. This include failure to start some tool,
read its output, or failure to passe the sanity checks. Timeouts are
allowed unless =--fail-on-timeout= is also given.
One use for this option is when =autcross= is used in combination with
@ -472,7 +465,7 @@ Performing sanity checks and gathering statistics...
No problem detected.
#+end_example
However, in practice you could also use the =name:= field of the input
However in practice you could also use the =name:= field of the input
automaton, combined with =%M= in the tool specification, to designate
an alternate filename to load, or some key to look up somewhere.

View file

@ -29,7 +29,7 @@ process them in batch. (The only restriction is that inside a file an
automaton in LBTT's format may not follow an automaton in
=ltl2dstar='s format.)
By default, the output uses the HOA format. This can be changed using
By default the output uses the HOA format. This can be changed using
[[file:oaut.org][the common output options]] like =--spin=, =--lbtt=, =--dot=,
=--stats=...
@ -128,30 +128,27 @@ autfilt --stats='%s states, %e edges, %a acc-sets, %c SCCs, det=%d'
The following =%= sequences are available:
#+BEGIN_SRC sh :exports results
autfilt --help | sed -n '/ for output):/,/^$/p' | sed '1d;$d'
ltl2tgba --help | sed -n '/ sequences:/,/^$/p' | sed '1d;$d'
#+END_SRC
#+RESULTS:
#+begin_example
%< the part of the line before the formula if it
comes from a column extracted from a CSV file
%> the part of the line after the formula if it comes
from a column extracted from a CSV file
%% a single %
%< the part of the line before the automaton if it
comes from a column extracted from a CSV file
%> the part of the line after the automaton if it
comes from a column extracted from a CSV file
%A, %a number of acceptance sets
%C, %c, %[LETTERS]C, %[LETTERS]c
number of SCCs; you may filter the SCCs to count
%a number of acceptance sets
%c, %[LETTERS]c number of SCCs; you may filter the SCCs to count
using the following LETTERS, possibly
concatenated: (a) accepting, (r) rejecting, (c)
complete, (v) trivial, (t) terminal, (w) weak,
(iw) inherently weak. Use uppercase letters to
negate them.
%D, %d 1 if the automaton is deterministic, 0 otherwise
%E, %e, %[LETTER]E, %[LETTER]e number of edges (add one LETTER to select
(r) reachable [default], (u) unreachable, (a)
all).
%d 1 if the output is deterministic, 0 otherwise
%e number of reachable edges
%f the formula, in Spot's syntax
%F name of the input file
%G, %g, %[LETTERS]G, %[LETTERS]g
acceptance condition (in HOA syntax); add brackets
%g, %[LETTERS]g acceptance condition (in HOA syntax); add brackets
to print an acceptance name instead and LETTERS to
tweak the format: (0) no parameters, (a)
accentuated, (b) abbreviated, (d) style used in
@ -160,32 +157,28 @@ autfilt --help | sed -n '/ for output):/,/^$/p' | sed '1d;$d'
parameter, (p) no parity parameter, (o) name
unknown acceptance as 'other', (s) shorthand for
'lo0'.
%H, %h the automaton in HOA format on a single line (use
%[opt]H or %[opt]h to specify additional options
as in --hoa=opt)
%h the automaton in HOA format on a single line (use
%[opt]h to specify additional options as in
--hoa=opt)
%L location in the input file
%l serial number of the output automaton (0-based)
%M, %m name of the automaton
%N, %n number of nondeterministic states
%P, %p 1 if the automaton is complete, 0 otherwise
%m name of the automaton
%n number of nondeterministic states in output
%p 1 if the output is complete, 0 otherwise
%r wall-clock time elapsed in seconds (excluding
parsing)
%R, %[LETTERS]R CPU time (excluding parsing), in seconds; Add
LETTERS to restrict to (u) user time, (s) system
LETTERS to restrict to(u) user time, (s) system
time, (p) parent process, or (c) children
processes.
%S, %s, %[LETTER]S, %[LETTER]s number of states (add one LETTER to select
(r) reachable [default], (u) unreachable, (a)
all).
%T, %t, %[LETTER]T, %[LETTER]t number of transitions (add one LETTER to
select (r) reachable [default], (u) unreachable,
(a) all).
%U, %u, %[LETTER]U, %[LETTER]u 1 if the automaton contains some universal
%s number of reachable states
%t number of reachable transitions
%u, %[e]u number of states (or [e]dges) with universal
branching
%u, %[LETTER]u 1 if the automaton contains some universal
branching (or a number of [s]tates or [e]dges with
universal branching)
%W, %w one word accepted by the automaton
%X, %x, %[LETTERS]X, %[LETTERS]x
number of atomic propositions declared in the
%w one word accepted by the output automaton
%x, %[LETTERS]x number of atomic propositions declared in the
automaton; add LETTERS to list atomic
propositions with (n) no quoting, (s) occasional
double-quotes with C-style escape, (d)
@ -196,7 +189,7 @@ autfilt --help | sed -n '/ for output):/,/^$/p' | sed '1d;$d'
#+end_example
When a letter is available both as uppercase and lowercase, the
uppercase version refers to the input automaton, while the lowercase
uppercase version refer to the input automaton, while the lowercase
refer to the output automaton. Of course this distinction makes sense
only if =autfilt= was instructed to perform an operation on the input
automaton.
@ -237,7 +230,7 @@ autfilt --help | sed -n '/Filtering options.*:/,/^$/p' | sed '1d;$d'
inherently-weak SCCs is in RANGE. An accepting
SCC is inherently weak if it does not have a
rejecting cycle.
--intersect=FILENAME keep automata whose languages have a non-empty
--intersect=FILENAME keep automata whose languages have an non-empty
intersection with the automaton from FILENAME
--is-alternating keep only automata using universal branching
--is-colored keep colored automata (i.e., exactly one
@ -349,7 +342,7 @@ autfilt --help | sed -n '/Simplification level:/,/^$/p' | sed '1d;$d'
By default, =--any --low= is used, which cause all simplifications to
be skipped. However, if any goal is given, then the simplification level
be skipped. However if any goal is given, than the simplification level
defaults to =--high= (unless specified otherwise). If a simplification
level is given without specifying any goal, then the goal default to =--small=.
@ -370,7 +363,7 @@ depending on the constraints on the acceptance conditions:
in the output, but it may not always succeed and may output
non-deterministic automata. Note that if =autfilt --deterministic
--tgba= fails to output a deterministic automaton, it does not
necessarily imply that a deterministic TGBA does not exist: it
necessarily implies that a deterministic TGBA does not exist: it
just implies that =autfilt= could not find it.
@ -442,27 +435,19 @@ autfilt --help | sed -n '/Transformations:/,/^$/p' | sed '1d;$d'
generalized Rabin definition from the HOA format;
the "share-inf" option allows clauses to share Inf
sets, therefore reducing the number of sets
--generalized-streett[=unique-fin|share-fin], --gsa[=unique-fin|share-fin]
rewrite the acceptance condition as generalized
Streett; the "share-fin" option allows clauses to
share Fin sets, therefore reducing the number of
sets; the default "unique-fin" does not
--generalized-streett[=unique-fin|share-fin], --gsa[=unique-fin|share-fin] rewrite the
acceptance condition as generalized Streett; the
"share-fin" option allows clauses to share Fin
sets, therefore reducing the number of sets; the
default "unique-fin" does not
--instut[=1|2] allow more stuttering (two possible algorithms)
--keep-states=NUM[,NUM...] only keep specified states. The first state
will be the new initial state. Implies
--remove-unreachable-states.
--kill-states=NUM[,NUM...] mark the specified states as dead (no
successor), and remove them. Implies
--remove-dead-states.
--mask-acc=NUM[,NUM...] remove all transitions in specified acceptance
sets
--merge-transitions merge transitions with same destination and
acceptance
--partial-degeneralize[=NUM1,NUM2,...]
Degeneralize automata according to sets
NUM1,NUM2,... If no sets are given, partial
degeneralization is performed for all conjunctions
of Inf and disjunctions of Fin.
--product=FILENAME, --product-and=FILENAME
build the product with the automaton in FILENAME
to intersect languages
@ -475,8 +460,8 @@ autfilt --help | sed -n '/Transformations:/,/^$/p' | sed '1d;$d'
quantification, or by assigning them 0 or 1
--remove-dead-states remove states that are unreachable, or that cannot
belong to an infinite path
--remove-fin rewrite the automaton without using Fin
acceptance
--remove-fin rewrite the automaton without using Fin acceptance
--remove-unreachable-states
remove states that are unreachable from the
initial state
@ -513,10 +498,6 @@ autfilt --help | sed -n '/Transformations:/,/^$/p' | sed '1d;$d'
sum languages
--sum-and=FILENAME build the sum with the automaton in FILENAME to
intersect languages
--to-finite[=alive] Convert an automaton with "alive" and "!alive"
propositions into a Büchi automaton interpretable
as a finite automaton. States with a outgoing
"!alive" edge are marked as accepting.
#+end_example
* Decorations

View file

@ -6,22 +6,18 @@
* Generic reference
If you need to cite the Spot project, the latest tool paper about
it is the following reference:
If you need to cite the Spot project in some academic paper, please
use the following reference:
- *From Spot 2.0 to Spot 2.10: What's new?*, /Alexandre Duret-Lutz/,
/Etienne Renault/, /Maximilien Colange/, /Florian Renkin/,
/Alexandre Gbaguidi Aisse/, /Philipp Schlehuber-Caissier/, /Thomas
Medioni/, /Antoine Martin/, /Jérôme Dubois/, /Clément Gillard/, and
Henrich Lauko/. In Proc. of CAV'22, LNCS 13372, pp. 174--187.
Haifa, Israel, Aug. 2022.
([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#duret.22.cav][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/duret.22.cav.pdf][pdf]])
- *Spot 2.0 — a framework for LTL and ω-automata manipulation*,
/Alexandre Duret-Lutz/, /Alexandre Lewkowicz/, /Amaury Fauchille/,
/Thibaud Michaud/, /Etienne Renault/, and /Laurent Xu/. In Proc.
of ATVA'16, LNCS 9938, pp. 122--129. Chiba, Japan, Oct. 2016.
([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#duret.16.atva2][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/duret.16.atva2.pdf][pdf]])
#+begin_note
Tools evolve while published papers don't. Please always specify
the version of Spot (or any other tool) you are using when citing it
in a paper. Future versions might have different behaviors.
#+end_note
This provides a quick overview of the entire project (the features
of the library, [[file:tools.org][the tools]], the Python bindings), and provides many
references detailing more specific aspects.
* Other, more specific, references
@ -71,40 +67,21 @@ be more specific about a particular aspect of Spot.
Presents the automaton format [[file:hoa.org][supported by Spot]] and [[http://adl.github.io/hoaf/support.html][several other
tools]].
- *Reactive Synthesis from LTL Specification with Spot*,
/Thibaud Michaud/, /Maximilien Colange/.
In Proc. of SYNT@CAV'18. ([[https://www.lrde.epita.fr/~max/bibtexbrowser.php?key=michaud.18.synt&bib=perso.bib][bib]] | [[https://www.lrde.epita.fr/dload/papers/michaud.18.synt.pdf][pdf]])
Presents the tool [[file:ltlsynt.org][=ltlsynt=]].
- *Generic Emptiness Check for Fun and Profit*,
/Christel Baier/, /František Blahoudek/, /Alexandre Duret-Lutz/,
/Joachim Klein/, /David Müller/, and /Jan Strejček/.
In. Proc. of ATVA'19, LNCS 11781, pp. 445--461, Oct 2019. ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#baier.19.atva][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/baier.19.atva.pdf][pdf]] |
In. Proc. of ATVA'19, LNCS 11781, pp. 11781, Oct 2019. ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#baier.19.atva][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/baier.19.atva.pdf][pdf]] |
[[https://www.lrde.epita.fr/~adl/dl/adl/baier.19.atva.slides.mefosyloma.pdf][slides1]] | [[https://www.lrde.epita.fr/~adl/dl/adl/baier.19.atva.slides.pdf][slides2]])
Presents the generic emptiness-check implemented in Spot.
- *Practical Applications of the Alternating Cycle Decomposition*,
/Antonio Casares/, /Alexandre Duret-Lutz/, /Klara J. Meyer/, /Florian Renkin/,
and /Salomon Sickert/.
In. Proc. of TACAS'22, LNCS 13244, pp. 99--117, Apr 2022. ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#casares.22.tacas][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/casares.22.tacas.pdf][pdf]] |
[[https://www.lrde.epita.fr/~adl/dl/adl/casares.22.tacas.slides.pdf][slides1]] | [[https://www.lrde.epita.fr/~adl/dl/adl/casares.22.tacas.slides2.pdf][slides2]])
Shows applications of the ACD implemented in Spot.
- *Dissecting ltlsynt*,
/Florian Renkin/, /Philipp Schlehuber-Caissier/, /Alexandre Duret-Lutz/,
and /Adrien Pommellet/.
In Formal Methods in System Design, 2023. ([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#renkin.23.scp][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/renkin.23.scp.pdf][pdf]])
Discuss the implementation of [[file:ltlsynt.org][=ltlsynt=]].
* Obsolete references
- *Spot 2.0 — a framework for LTL and ω-automata manipulation*,
/Alexandre Duret-Lutz/, /Alexandre Lewkowicz/, /Amaury Fauchille/,
/Thibaud Michaud/, /Etienne Renault/, and /Laurent Xu/. In Proc.
of ATVA'16, LNCS 9938, pp. 122--129. Chiba, Japan, Oct. 2016.
([[https://www.lrde.epita.fr/~adl/dl/adl_bib.html#duret.16.atva2][bib]] | [[https://www.lrde.epita.fr/~adl/dl/adl/duret.16.atva2.pdf][pdf]])
This provides a quick overview of the entire project (the features
of the library, [[file:tools.org][the tools]], the Python bindings), and provides many
references detailing more specific aspects.
* Obsolete reference
- *Spot: an extensible model checking library using transition-based
generalized Büchi automata*, /Alexandre Duret-Lutz/ and /Denis
@ -114,14 +91,6 @@ be more specific about a particular aspect of Spot.
For a while, this used to be the only paper presenting Spot as a
model-checking library.
- *Reactive Synthesis from LTL Specification with Spot*,
/Thibaud Michaud/, /Maximilien Colange/.
In Proc. of SYNT@CAV'18. ([[https://www.lrde.epita.fr/~max/bibtexbrowser.php?key=michaud.18.synt&bib=perso.bib][bib]] | [[https://www.lrde.epita.fr/dload/papers/michaud.18.synt.pdf][pdf]])
Was the first presentation of the tool [[file:ltlsynt.org][=ltlsynt=]]. The paper
*Dissecting ltlsynt*, mentioned earlier, is more up-to-date.
# LocalWords: utf html Alexandre Duret Lutz Lewkowicz Amaury Xu pdf
# LocalWords: Fauchille Thibaud Michaud Etienne Proc ATVA LNCS TGBA
# LocalWords: ltlfilt randltl ltlcross tgba Eddine Fabrice Kordon

Some files were not shown because too many files have changed in this diff Show more