python: cleanup with autopep8
* tests/python/341.py, tests/python/alarm.py, tests/python/bdddict.py, tests/python/bddnqueen.py, tests/python/bugdet.py, tests/python/dualize.py, tests/python/except.py, tests/python/gen.py, tests/python/genem.py, tests/python/implies.py, tests/python/interdep.py, tests/python/ipnbdoctest.py, tests/python/kripke.py, tests/python/ltl2tgba.py, tests/python/ltlf.py, tests/python/ltlparse.py, tests/python/ltlsimple.py, tests/python/relabel.py, tests/python/rs_like.py, tests/python/sccsplit.py, tests/python/semidet.py, tests/python/setacc.py, tests/python/setxor.py, tests/python/split.py, tests/python/streett_totgba.py, tests/python/stutter.py, tests/python/sum.py, tests/python/toparity.py, tests/python/toweak.py, tests/python/trival.py, python/spot/__init__.py, python/spot/aux.py, python/spot/jupyter.py: Reformat with autopep8. fixup! * spot/tl/simplify.cc: Fix typos in tracing code.
This commit is contained in:
parent
5b01ce32dd
commit
822fe77891
33 changed files with 444 additions and 356 deletions
|
|
@ -107,14 +107,17 @@ if 'op_ff' not in globals():
|
|||
_bdd_dict = make_bdd_dict()
|
||||
|
||||
|
||||
|
||||
__om_init_tmp = option_map.__init__
|
||||
|
||||
|
||||
def __om_init_new(self, str=None):
|
||||
__om_init_tmp(self)
|
||||
if str:
|
||||
res = self.parse_options(str)
|
||||
if res:
|
||||
raise RuntimeError("failed to parse option at: '" + str + "'")
|
||||
|
||||
|
||||
option_map.__init__ = __om_init_new
|
||||
|
||||
|
||||
|
|
@ -162,6 +165,7 @@ class twa:
|
|||
self.highlight_edge(val, color)
|
||||
return self
|
||||
|
||||
|
||||
@_extend(twa)
|
||||
class twa:
|
||||
def to_str(a, format='hoa', opt=None):
|
||||
|
|
@ -192,6 +196,7 @@ class twa:
|
|||
f.write('\n')
|
||||
return a
|
||||
|
||||
|
||||
@_extend(twa_graph)
|
||||
class twa_graph:
|
||||
def show_storage(self, opt=None):
|
||||
|
|
@ -200,12 +205,14 @@ class twa_graph:
|
|||
from IPython.display import SVG
|
||||
return SVG(_ostream_to_svg(ostr))
|
||||
|
||||
|
||||
def make_twa_graph(*args):
|
||||
from spot.impl import make_twa_graph as mtg
|
||||
if len(args) == 0:
|
||||
return mtg(_bdd_dict)
|
||||
return mtg(*args)
|
||||
|
||||
|
||||
@_extend(formula)
|
||||
class formula:
|
||||
def __init__(self, str):
|
||||
|
|
@ -475,9 +482,9 @@ def automata(*sources, timeout=None, ignore_abort=True,
|
|||
# universal_newlines for str output instead of bytes
|
||||
# when the pipe is read from Python (which happens
|
||||
# when timeout is set).
|
||||
prefn = None if no_sid else os.setsid
|
||||
proc = subprocess.Popen(filename[:-1], shell=True,
|
||||
preexec_fn=
|
||||
None if no_sid else os.setsid,
|
||||
preexec_fn=prefn,
|
||||
universal_newlines=True,
|
||||
stdout=subprocess.PIPE)
|
||||
if timeout is None:
|
||||
|
|
@ -815,6 +822,7 @@ def _add_twa_graph(meth, name = None):
|
|||
setattr(twa_graph, name or meth, (lambda self, *args, **kwargs:
|
||||
globals()[meth](self, *args, **kwargs)))
|
||||
|
||||
|
||||
for meth in ('scc_filter', 'scc_filter_states',
|
||||
'is_deterministic', 'is_unambiguous',
|
||||
'contains'):
|
||||
|
|
@ -824,6 +832,8 @@ _add_twa_graph('are_equivalent', 'equivalent_to')
|
|||
# Wrapper around a formula iterator to which we add some methods of formula
|
||||
# (using _addfilter and _addmap), so that we can write things like
|
||||
# formulas.simplify().is_X_free().
|
||||
|
||||
|
||||
class formulaiterator:
|
||||
def __init__(self, formulas):
|
||||
self._formulas = formulas
|
||||
|
|
@ -1025,7 +1035,6 @@ for fun in ['remove_x', 'relabel', 'relabel_bse',
|
|||
_addmap(fun)
|
||||
|
||||
|
||||
|
||||
# Better interface to the corresponding C++ function.
|
||||
def sat_minimize(aut, acc=None, colored=False,
|
||||
state_based=False, states=0,
|
||||
|
|
@ -1079,16 +1088,19 @@ def parse_word(word, dic=_bdd_dict):
|
|||
from spot.impl import parse_word as pw
|
||||
return pw(word, dic)
|
||||
|
||||
|
||||
def bdd_to_formula(b, dic=_bdd_dict):
|
||||
from spot.impl import bdd_to_formula as bf
|
||||
return bf(b, dic)
|
||||
|
||||
|
||||
def language_containment_checker(dic=_bdd_dict):
|
||||
from spot.impl import language_containment_checker as c
|
||||
c.contains = lambda this, a, b: c.contained(this, b, a)
|
||||
c.are_equivalent = lambda this, a, b: c.equal(this, a, b)
|
||||
return c(dic)
|
||||
|
||||
|
||||
def mp_hierarchy_svg(cl=None):
|
||||
"""
|
||||
Return an some string containing an SVG picture of the Manna &
|
||||
|
|
@ -1150,8 +1162,10 @@ def show_mp_hierarchy(cl):
|
|||
from IPython.display import SVG
|
||||
return SVG(mp_hierarchy_svg(cl))
|
||||
|
||||
|
||||
formula.show_mp_hierarchy = show_mp_hierarchy
|
||||
|
||||
|
||||
@_extend(twa_word)
|
||||
class twa_word:
|
||||
def _repr_latex_(self):
|
||||
|
|
@ -1162,8 +1176,8 @@ class twa_word:
|
|||
res += '; '
|
||||
res += bdd_to_formula(letter, bd).to_str('j')
|
||||
if len(res) > 1:
|
||||
res += '; ';
|
||||
res += '\\mathsf{cycle}\\{';
|
||||
res += '; '
|
||||
res += '\\mathsf{cycle}\\{'
|
||||
for idx, letter in enumerate(self.cycle):
|
||||
if idx:
|
||||
res += '; '
|
||||
|
|
@ -1263,5 +1277,6 @@ class twa_word:
|
|||
class scc_and_mark_filter:
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.restore_acceptance()
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ import os
|
|||
import errno
|
||||
import contextlib
|
||||
|
||||
|
||||
def extend(*classes):
|
||||
"""
|
||||
Decorator that extends all the given classes with the contents
|
||||
|
|
@ -89,6 +90,7 @@ def rm_f(filename):
|
|||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def tmpdir():
|
||||
cwd = os.getcwd()
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ Auxiliary functions for Spot's Python bindings.
|
|||
|
||||
from IPython.display import display, HTML
|
||||
|
||||
|
||||
def display_inline(*args, per_row=None, show=None):
|
||||
"""
|
||||
This is a wrapper around IPython's `display()` to display multiple
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@
|
|||
import spot
|
||||
from subprocess import _active
|
||||
|
||||
|
||||
def two_intersecting_automata():
|
||||
"""return two random automata with a non-empty intersection"""
|
||||
g = spot.automata('randaut -A4 -Q5 -n-1 2 |')
|
||||
|
|
@ -27,9 +28,10 @@ def two_intersecting_automata():
|
|||
if a.intersects(b):
|
||||
return a, b
|
||||
|
||||
|
||||
for i in range(5):
|
||||
two_intersecting_automata()
|
||||
|
||||
n = len(_active)
|
||||
print(n, "active processes")
|
||||
assert(n == 0);
|
||||
assert(n == 0)
|
||||
|
|
|
|||
|
|
@ -23,11 +23,13 @@ import signal
|
|||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def alarm_handler(signum, frame):
|
||||
sys.stdout.write("signaled\n")
|
||||
os.kill(child, signal.SIGTERM)
|
||||
exit(0)
|
||||
|
||||
|
||||
f = """!(G(F(P_Rbt2.observe)&& F(P_Rbt3.observe) &&
|
||||
F(P_rbt1.observe)&& F(P_Rbt1.plus || P_Rbt1.moins || P_Rbt1.stop)&&
|
||||
F(P_Rbt3.plus || P_Rbt3.moins || P_Rbt3.stop) && F(P_Rbt2.plus ||
|
||||
|
|
|
|||
|
|
@ -20,6 +20,10 @@
|
|||
# Make sure we can leep track of BDD association in Python using bdd_dict, as
|
||||
# discussed in issue #372.
|
||||
|
||||
|
||||
import spot
|
||||
|
||||
|
||||
class bdd_holder:
|
||||
def __init__(self, aut):
|
||||
self.bdddict = d = aut.get_dict()
|
||||
|
|
@ -48,10 +52,10 @@ class bdd_holder3:
|
|||
self.bdddict.unregister_all_my_variables(self)
|
||||
|
||||
|
||||
|
||||
def check_ok():
|
||||
assert type(bdict.varnum(spot.formula.ap("a"))) is int
|
||||
|
||||
|
||||
def check_nok():
|
||||
try:
|
||||
bdict.varnum(spot.formula.ap("a"))
|
||||
|
|
@ -60,12 +64,13 @@ def check_nok():
|
|||
else:
|
||||
raise RuntimeError("missing exception")
|
||||
|
||||
|
||||
def debug(txt):
|
||||
# print(txt)
|
||||
# bdict.dump(spot.get_cout())
|
||||
pass
|
||||
|
||||
import spot
|
||||
|
||||
aut = spot.translate("a U b")
|
||||
bdict = aut.get_dict()
|
||||
debug("aut")
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
# -*- mode: python; coding: utf-8 -*-
|
||||
# Copyright (C) 2010, 2011, 2012, 2014 Laboratoire de Recherche et
|
||||
# Copyright (C) 2010, 2011, 2012, 2014, 2019 Laboratoire de Recherche et
|
||||
# Développement de l'EPITA.
|
||||
# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6
|
||||
# (LIP6), département Systèmes Répartis Coopératifs (SRC), Université
|
||||
|
|
@ -26,9 +26,12 @@
|
|||
import sys
|
||||
from buddy import *
|
||||
|
||||
# Build the requirements for all other fields than (i,j) assuming
|
||||
# that (i,j) has a queen.
|
||||
|
||||
def build(i, j):
|
||||
"""
|
||||
Build the requirements for all other fields than (i,j) assuming
|
||||
that (i,j) has a queen.
|
||||
"""
|
||||
a = b = c = d = bddtrue
|
||||
|
||||
# No one in the same column.
|
||||
|
|
@ -59,7 +62,6 @@ def build(i, j):
|
|||
queen &= a & b & c & d
|
||||
|
||||
|
||||
|
||||
# Get the number of queens from the command-line, or default to 8.
|
||||
if len(sys.argv) > 1:
|
||||
N = int(argv[1])
|
||||
|
|
@ -97,7 +99,6 @@ solution = bdd_satone(queen)
|
|||
bdd_printset(solution)
|
||||
|
||||
from spot import nl_cout
|
||||
|
||||
nl_cout()
|
||||
|
||||
# Cleanup all BDD variables before calling bdd_done(), otherwise
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ State: 6 {0}
|
|||
State: 7 {0}
|
||||
[0&!1&2] 7
|
||||
--END--
|
||||
""");
|
||||
""")
|
||||
|
||||
# In Reuben's report this first block built an incorrect deterministic
|
||||
# automaton, which ultimately led to an non-empty product. The second
|
||||
|
|
@ -82,10 +82,10 @@ print("use_simulation=True")
|
|||
b1 = spot.tgba_determinize(b, False, True, True, True)
|
||||
assert b1.num_states() == 5
|
||||
b1 = spot.remove_fin(spot.dualize(b1))
|
||||
assert not a.intersects(b1);
|
||||
assert not a.intersects(b1)
|
||||
|
||||
print("\nuse_simulation=False")
|
||||
b2 = spot.tgba_determinize(b, False, True, False, True)
|
||||
assert b2.num_states() == 5
|
||||
b2 = spot.remove_fin(spot.dualize(b2))
|
||||
assert not a.intersects(b2);
|
||||
assert not a.intersects(b2)
|
||||
|
|
|
|||
|
|
@ -21,32 +21,38 @@
|
|||
import spot
|
||||
import buddy
|
||||
|
||||
match_strings = [('is_buchi', 'is_co_buchi'),\
|
||||
('is_generalized_buchi', 'is_generalized_co_buchi'),\
|
||||
('is_all', 'is_none'),\
|
||||
match_strings = [('is_buchi', 'is_co_buchi'),
|
||||
('is_generalized_buchi', 'is_generalized_co_buchi'),
|
||||
('is_all', 'is_none'),
|
||||
('is_all', 'is_all'),
|
||||
('is_buchi', 'is_all')]
|
||||
|
||||
# existential and universal are dual
|
||||
# deterministic is self-dual
|
||||
|
||||
|
||||
def dualtype(aut, dual):
|
||||
if dual.acc().is_none(): return True
|
||||
if dual.acc().is_none():
|
||||
return True
|
||||
return (not spot.is_deterministic(aut) or spot.is_deterministic(dual))\
|
||||
and (spot.is_universal(dual) or not aut.is_existential())\
|
||||
and (dual.is_existential() or not spot.is_universal(aut))
|
||||
|
||||
|
||||
def produce_phi(rg, n):
|
||||
phi = []
|
||||
while len(phi) < n:
|
||||
phi.append(rg.next())
|
||||
return phi
|
||||
|
||||
|
||||
def produce_automaton(phi):
|
||||
aut = []
|
||||
for f in phi:
|
||||
aut.append(spot.translate(f))
|
||||
return aut
|
||||
|
||||
|
||||
def test_aut(aut, d=None):
|
||||
if d is None:
|
||||
d = spot.dualize(aut)
|
||||
|
|
@ -61,7 +67,7 @@ def test_aut(aut, d = None):
|
|||
if not dualtype(aut, d):
|
||||
return (False, 'Incorrect transition mode resulting of dual')
|
||||
for p in match_strings:
|
||||
if ((getattr(aa, p[0])() and getattr(da, p[1])())\
|
||||
if ((getattr(aa, p[0])() and getattr(da, p[1])())
|
||||
or (getattr(aa, p[1])() and getattr(da, p[0])())):
|
||||
return (True, '')
|
||||
return (False, 'Incorrect acceptance type dual')
|
||||
|
|
@ -70,12 +76,15 @@ def test_aut(aut, d = None):
|
|||
# languages.
|
||||
# FIXME This test could be extended to non-deterministic automata with a
|
||||
# dealternization procedure.
|
||||
|
||||
|
||||
def test_complement(aut):
|
||||
assert aut.is_deterministic()
|
||||
d = spot.dualize(aut)
|
||||
s = spot.product_or(aut, d)
|
||||
assert spot.dualize(s).is_empty()
|
||||
|
||||
|
||||
def test_assert(a, d=None):
|
||||
t = test_aut(a, d)
|
||||
if not t[0]:
|
||||
|
|
@ -84,6 +93,7 @@ def test_assert(a, d=None):
|
|||
print (spot.dualize(a).to_str('hoa'))
|
||||
assert False
|
||||
|
||||
|
||||
aut = spot.translate('a')
|
||||
|
||||
test_assert(aut)
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@
|
|||
import spot
|
||||
import buddy
|
||||
|
||||
|
||||
def report_missing_exception():
|
||||
raise RuntimeError("missing exception")
|
||||
|
||||
|
|
|
|||
|
|
@ -139,8 +139,6 @@ State: 1 [0&1] 0 {4 6 7} [0&!1] 1 {3 6} [!0&1] 0 {4 7} [!0&!1] 1 {0}
|
|||
--END--""")
|
||||
|
||||
|
||||
|
||||
|
||||
def generic_emptiness2_rec(aut):
|
||||
spot.cleanup_acceptance_here(aut, False)
|
||||
# Catching 'false' acceptance here is an optimization that could be removed.
|
||||
|
|
@ -183,6 +181,8 @@ def generic_emptiness2_rec(aut):
|
|||
return True
|
||||
|
||||
# A very old python version of spot.generic_emptiness_check()
|
||||
|
||||
|
||||
def generic_emptiness2(aut):
|
||||
old_a = spot.acc_cond(aut.acc())
|
||||
res = generic_emptiness2_rec(aut)
|
||||
|
|
@ -191,6 +191,8 @@ def generic_emptiness2(aut):
|
|||
return res
|
||||
|
||||
# A more modern python version of spot.generic_emptiness_check()
|
||||
|
||||
|
||||
def is_empty1(g):
|
||||
si = spot.scc_info_with_options(g, spot.scc_info_options_NONE)
|
||||
for scc_num in range(si.scc_count()):
|
||||
|
|
@ -200,15 +202,19 @@ def is_empty1(g):
|
|||
return False
|
||||
return True
|
||||
|
||||
|
||||
def is_scc_empty1(si, scc_num, acc=None):
|
||||
if acc is None: # acceptance isn't forced, get it from the automaton
|
||||
acc = si.get_aut().acc()
|
||||
occur, common = si.acc_sets_of(scc_num), si.common_sets_of(scc_num)
|
||||
acc = acc.restrict_to(occur)
|
||||
acc = acc.remove(common, False)
|
||||
if acc.is_t(): return False
|
||||
if acc.is_f(): return True
|
||||
if acc.accepting(occur): return False
|
||||
if acc.is_t():
|
||||
return False
|
||||
if acc.is_f():
|
||||
return True
|
||||
if acc.accepting(occur):
|
||||
return False
|
||||
for cl in acc.top_disjuncts():
|
||||
fu = cl.fin_unit() # Is there Fin at the top level
|
||||
if fu:
|
||||
|
|
@ -229,9 +235,11 @@ def is_scc_empty1(si, scc_num, acc=None):
|
|||
return False
|
||||
return True
|
||||
|
||||
|
||||
def is_empty2(g):
|
||||
return is_empty2_rec(spot.scc_and_mark_filter(g, g.acc().fin_unit()))
|
||||
|
||||
|
||||
def is_empty2_rec(g):
|
||||
si = spot.scc_info_with_options(g, spot.scc_info_options_STOP_ON_ACC)
|
||||
if si.one_accepting_scc() >= 0:
|
||||
|
|
@ -243,6 +251,7 @@ def is_empty2_rec(g):
|
|||
return False
|
||||
return True
|
||||
|
||||
|
||||
def is_scc_empty2(si, scc_num, acc=None):
|
||||
if acc is None: # acceptance isn't forced, get it from the automaton
|
||||
acc = si.get_aut().acc()
|
||||
|
|
@ -271,6 +280,7 @@ def is_scc_empty2(si, scc_num, acc=None):
|
|||
return False
|
||||
return True
|
||||
|
||||
|
||||
def run_bench(automata):
|
||||
for aut in automata:
|
||||
# Make sure our three implementation behave identically
|
||||
|
|
@ -287,4 +297,5 @@ def run_bench(automata):
|
|||
run3 = spot.generic_accepting_run(aut)
|
||||
assert run3.replay(spot.get_cout()) is True
|
||||
|
||||
|
||||
run_bench([a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a11, a360])
|
||||
|
|
|
|||
|
|
@ -57,5 +57,5 @@ assert(bdd_implies(a,b))
|
|||
# Cleanup all BDD variables before calling bdd_done(), otherwise
|
||||
# bdd_delref will be called after bdd_done() and this is unsafe in
|
||||
# optimized builds.
|
||||
V = a = b = c = d = e = f = g = 0;
|
||||
V = a = b = c = d = e = f = g = 0
|
||||
bdd_done()
|
||||
|
|
|
|||
|
|
@ -34,7 +34,8 @@ d = simp.get_dict()
|
|||
a = spot.ltl_to_tgba_fm(pf.f, d)
|
||||
g = spot.parse_infix_boolean('b&c', e)
|
||||
b = simp.as_bdd(g.f)
|
||||
buddy.bdd_printset(b); spot.nl_cout()
|
||||
buddy.bdd_printset(b)
|
||||
spot.nl_cout()
|
||||
del g
|
||||
|
||||
s0 = a.get_init_state()
|
||||
|
|
@ -45,9 +46,11 @@ while not it.done():
|
|||
sys.stdout.write("%s\n" % c)
|
||||
b &= c # `&=' is defined only in buddy. So if this statement works
|
||||
# it means buddy can grok spot's objects.
|
||||
buddy.bdd_printset(c); spot.nl_cout()
|
||||
buddy.bdd_printset(c)
|
||||
spot.nl_cout()
|
||||
it.next()
|
||||
buddy.bdd_printset(b); spot.nl_cout()
|
||||
buddy.bdd_printset(b)
|
||||
spot.nl_cout()
|
||||
sys.stdout.write("%s\n" % b)
|
||||
del it
|
||||
del s0
|
||||
|
|
|
|||
|
|
@ -12,7 +12,9 @@ with those stored in the notebook.
|
|||
|
||||
from __future__ import print_function
|
||||
|
||||
import os,sys,time
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import base64
|
||||
import re
|
||||
import pprint
|
||||
|
|
@ -51,6 +53,7 @@ try:
|
|||
except ImportError:
|
||||
from IPython.nbformat import v4 as nbformat
|
||||
|
||||
|
||||
def compare_png(a64, b64):
|
||||
"""compare two b64 PNGs (incomplete)"""
|
||||
try:
|
||||
|
|
@ -61,6 +64,7 @@ def compare_png(a64, b64):
|
|||
bdata = base64.decodestring(b64)
|
||||
return True
|
||||
|
||||
|
||||
def canonicalize(s, type, ignores):
|
||||
"""sanitize a string for comparison.
|
||||
|
||||
|
|
@ -195,6 +199,7 @@ def compare_outputs(ref, test, ignores=[]):
|
|||
fromfile='expected', tofile='effective')))
|
||||
return False
|
||||
|
||||
|
||||
def _wait_for_ready_backport(kc):
|
||||
"""Backport BlockingKernelClient.wait_for_ready from IPython 3"""
|
||||
# Wait for kernel info reply on shell channel
|
||||
|
|
@ -210,6 +215,7 @@ def _wait_for_ready_backport(kc):
|
|||
except Empty:
|
||||
break
|
||||
|
||||
|
||||
def run_cell(kc, cell):
|
||||
kc.execute(cell.source)
|
||||
# wait for finish, maximum 30s
|
||||
|
|
@ -295,7 +301,6 @@ def test_notebook(ipynb):
|
|||
print("OK")
|
||||
successes += 1
|
||||
|
||||
|
||||
print("tested notebook %s" % ipynb)
|
||||
print(" %3i cells successfully replicated" % successes)
|
||||
if failures:
|
||||
|
|
@ -308,6 +313,7 @@ def test_notebook(ipynb):
|
|||
if failures | errors:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
for ipynb in sys.argv[1:]:
|
||||
print("testing %s" % ipynb)
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ import sys
|
|||
import getopt
|
||||
import spot
|
||||
|
||||
|
||||
def usage(prog):
|
||||
sys.stderr.write("""Usage: %s [OPTIONS...] formula
|
||||
|
||||
|
|
|
|||
|
|
@ -44,8 +44,8 @@ for str1, isl in l:
|
|||
assert isl == pf.f.is_leaf()
|
||||
del pf
|
||||
|
||||
assert spot.formula('a').is_leaf();
|
||||
assert spot.formula('0').is_leaf();
|
||||
assert spot.formula('a').is_leaf()
|
||||
assert spot.formula('0').is_leaf()
|
||||
|
||||
for str1 in ['a * b', 'a xor b', 'a <-> b']:
|
||||
pf = spot.parse_infix_boolean(str1, e, False)
|
||||
|
|
@ -114,7 +114,8 @@ for (x, msg) in [('{foo[->bug]}', "treating this goto block as [->]"),
|
|||
('{a;b b}', "ignoring this"),
|
||||
('{*', "missing closing brace"),
|
||||
('{(a', "missing closing parenthesis"),
|
||||
('{* a', "ignoring trailing garbage and missing closing brace"),
|
||||
('{* a',
|
||||
"ignoring trailing garbage and missing closing brace"),
|
||||
('F(a b)', "ignoring this"),
|
||||
('F(-)', "treating this parenthetical block as false"),
|
||||
('F(', "missing closing parenthesis"),
|
||||
|
|
|
|||
|
|
@ -65,7 +65,8 @@ F = spot.formula.ff()
|
|||
f1 = spot.formula.Equiv(c, a)
|
||||
f2 = spot.formula.Implies(a, b)
|
||||
f3 = spot.formula.Xor(b, c)
|
||||
f4 = spot.formula.Not(f3); del f3
|
||||
f4 = spot.formula.Not(f3)
|
||||
del f3
|
||||
f5 = spot.formula.Xor(F, c)
|
||||
|
||||
del a, b, c, T, F, f1, f2, f4, f5
|
||||
|
|
@ -83,14 +84,19 @@ def switch_g_f(x):
|
|||
return spot.formula.G(switch_g_f(x[0]))
|
||||
return x.map(switch_g_f)
|
||||
|
||||
|
||||
f = spot.formula('GFa & XFGb & Fc & G(a | b | Fd)')
|
||||
assert str(switch_g_f(f)) == 'FGa & XGFb & Gc & F(a | b | Gd)'
|
||||
|
||||
x = 0
|
||||
|
||||
|
||||
def count_g(f):
|
||||
global x
|
||||
if f._is(spot.op_G):
|
||||
x += 1
|
||||
|
||||
|
||||
f.traverse(count_g)
|
||||
assert x == 3
|
||||
|
||||
|
|
@ -135,10 +141,13 @@ for (input, output) in [('(a&b)<->b', 'b->(a&b)'),
|
|||
assert(f == output)
|
||||
assert(spot.are_equivalent(input, output))
|
||||
|
||||
|
||||
def myparse(input):
|
||||
env = spot.default_environment.instance()
|
||||
pf = spot.parse_infix_psl(input, env)
|
||||
return pf.f
|
||||
|
||||
|
||||
# This used to fail, because myparse would return a pointer
|
||||
# to pf.f inside the destroyed pf.
|
||||
assert myparse('a U b') == spot.formula('a U b')
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ m2 = spot.mark_t([2])
|
|||
m3 = spot.mark_t([3])
|
||||
mall = spot.mark_t()
|
||||
|
||||
|
||||
def test_rs(acc, rs, expected_res, expected_pairs):
|
||||
res, p = getattr(acc, 'is_' + rs + '_like')()
|
||||
assert res == expected_res
|
||||
|
|
@ -36,6 +37,7 @@ def test_rs(acc, rs, expected_res, expected_pairs):
|
|||
for a, b in zip(p, expected_pairs):
|
||||
assert a.fin == b.fin and a.inf == b.inf
|
||||
|
||||
|
||||
def switch_pairs(pairs):
|
||||
if pairs == None:
|
||||
return None
|
||||
|
|
@ -44,15 +46,20 @@ def switch_pairs(pairs):
|
|||
r.append(spot.rs_pair(p.inf, p.fin))
|
||||
return r
|
||||
|
||||
|
||||
def test_streett(acc, expected_streett_like, expected_pairs):
|
||||
test_rs(acc, 'streett', expected_streett_like, expected_pairs)
|
||||
o_acc = spot.acc_cond(acc.get_acceptance().complement())
|
||||
test_rs(o_acc, 'rabin', expected_streett_like, switch_pairs(expected_pairs))
|
||||
test_rs(o_acc, 'rabin', expected_streett_like,
|
||||
switch_pairs(expected_pairs))
|
||||
|
||||
|
||||
def test_rabin(acc, expected_rabin_like, expected_pairs):
|
||||
test_rs(acc, 'rabin', expected_rabin_like, expected_pairs)
|
||||
o_acc = spot.acc_cond(acc.get_acceptance().complement())
|
||||
test_rs(o_acc, 'streett', expected_rabin_like, switch_pairs(expected_pairs))
|
||||
test_rs(o_acc, 'streett', expected_rabin_like,
|
||||
switch_pairs(expected_pairs))
|
||||
|
||||
|
||||
acc = spot.acc_cond(spot.acc_code('Fin(0)'))
|
||||
test_streett(acc, True, [spot.rs_pair(m0, mall)])
|
||||
|
|
@ -75,12 +82,12 @@ test_streett(acc, True, [spot.rs_pair(m0, m1), spot.rs_pair(m0, m2)])
|
|||
acc = spot.acc_cond(spot.acc_code('(Fin(0)|Inf(1))&(Fin(1)|Inf(2))'))
|
||||
test_streett(acc, True, [spot.rs_pair(m0, m1), spot.rs_pair(m1, m2)])
|
||||
|
||||
acc = spot.acc_cond(spot.acc_code('(Fin(0)|Inf(1))&(Fin(1)|Inf(2))'\
|
||||
acc = spot.acc_cond(spot.acc_code('(Fin(0)|Inf(1))&(Fin(1)|Inf(2))'
|
||||
'&(Fin(3)&Inf(3))'))
|
||||
test_streett(acc, True, [spot.rs_pair(m0, m1), spot.rs_pair(m1, m2),\
|
||||
test_streett(acc, True, [spot.rs_pair(m0, m1), spot.rs_pair(m1, m2),
|
||||
spot.rs_pair(m3, mall), spot.rs_pair(mall, m3)])
|
||||
|
||||
acc = spot.acc_cond(spot.acc_code('(Fin(0)|Inf(1))&(Fin(1)|Inf(2))'\
|
||||
acc = spot.acc_cond(spot.acc_code('(Fin(0)|Inf(1))&(Fin(1)|Inf(2))'
|
||||
'&(Fin(3)&Inf(3))&(Fin(4)|Inf(5)|Inf(6))'))
|
||||
test_streett(acc, False, None)
|
||||
|
||||
|
|
@ -102,13 +109,11 @@ test_rabin(acc, True, [spot.rs_pair(m0, m1), spot.rs_pair(m0, m2)])
|
|||
acc = spot.acc_cond(spot.acc_code('(Fin(0)&Inf(1))|(Fin(1)&Inf(2))'))
|
||||
test_rabin(acc, True, [spot.rs_pair(m0, m1), spot.rs_pair(m1, m2)])
|
||||
|
||||
acc = spot.acc_cond(spot.acc_code('(Fin(0)&Inf(1))|(Fin(1)&Inf(2))'\
|
||||
acc = spot.acc_cond(spot.acc_code('(Fin(0)&Inf(1))|(Fin(1)&Inf(2))'
|
||||
'|(Fin(3)|Inf(3))'))
|
||||
test_rabin(acc, True, [spot.rs_pair(m0, m1), spot.rs_pair(m1, m2),\
|
||||
test_rabin(acc, True, [spot.rs_pair(m0, m1), spot.rs_pair(m1, m2),
|
||||
spot.rs_pair(m3, mall), spot.rs_pair(mall, m3)])
|
||||
|
||||
acc = spot.acc_cond(spot.acc_code('(Fin(0)&Inf(1))|(Fin(1)&Inf(2))'\
|
||||
acc = spot.acc_cond(spot.acc_code('(Fin(0)&Inf(1))|(Fin(1)&Inf(2))'
|
||||
'|(Fin(3)|Inf(3))|(Fin(4)&Inf(5)&Inf(6))'))
|
||||
test_rabin(acc, False, None)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -26,5 +26,5 @@ s = ""
|
|||
for aut2 in si.split_on_sets(0, [0]):
|
||||
# This call to to_str() used to fail because split_on_sets had not
|
||||
# registered the atomic propositions of aut
|
||||
s += aut2.to_str();
|
||||
s += aut2.to_str()
|
||||
assert spot.automaton(s).num_states() == 8
|
||||
|
|
|
|||
|
|
@ -24,9 +24,9 @@ import spot
|
|||
# Test case reduced from a report from Juraj Major <major@fi.muni.cz>.
|
||||
a = spot.make_twa_graph(spot._bdd_dict)
|
||||
a.set_acceptance(0, spot.acc_code("t"))
|
||||
assert(a.prop_state_acc() == True);
|
||||
assert(a.prop_state_acc() == True)
|
||||
a.set_acceptance(1, spot.acc_code("Fin(0)"))
|
||||
assert(a.prop_state_acc() == spot.trival.maybe());
|
||||
assert(a.prop_state_acc() == spot.trival.maybe())
|
||||
|
||||
|
||||
# Some tests for used_inf_fin_sets(), which return a pair of mark_t.
|
||||
|
|
|
|||
|
|
@ -45,5 +45,5 @@ assert(e == bdd_setxor(d,a))
|
|||
# Cleanup all BDD variables before calling bdd_done(), otherwise
|
||||
# bdd_delref will be called after bdd_done() and this is unsafe in
|
||||
# optimized builds.
|
||||
V = a = b = c = d = e = 0;
|
||||
V = a = b = c = d = e = 0
|
||||
bdd_done()
|
||||
|
|
|
|||
|
|
@ -19,11 +19,15 @@
|
|||
|
||||
import spot
|
||||
|
||||
|
||||
def incl(a, b):
|
||||
return not b.intersects(spot.dualize(spot.tgba_determinize(a)))
|
||||
|
||||
|
||||
def equiv(a, b):
|
||||
return incl(a, b) and incl(b, a)
|
||||
|
||||
|
||||
def do_split(f, in_list):
|
||||
aut = spot.translate(f)
|
||||
inputs = spot.buddy.bddtrue
|
||||
|
|
@ -32,6 +36,7 @@ def do_split(f, in_list):
|
|||
s = spot.split_2step(aut, inputs)
|
||||
return aut, s
|
||||
|
||||
|
||||
aut, s = do_split('(FG !a) <-> (GF b)', ['a'])
|
||||
assert equiv(aut, spot.unsplit_2step(s))
|
||||
|
||||
|
|
@ -100,4 +105,3 @@ assert equiv(aut, spot.unsplit_2step(s))
|
|||
aut, s = do_split('((G (((! g_0) || (! g_1)) && ((r_0 && (X r_1)) -> (F (g_0 \
|
||||
&& g_1))))) && (G (r_0 -> F g_0))) && (G (r_1 -> F g_1))', ['r_0', 'r_1'])
|
||||
assert equiv(aut, spot.unsplit_2step(s))
|
||||
|
||||
|
|
|
|||
|
|
@ -23,12 +23,14 @@ import os
|
|||
import shutil
|
||||
import sys
|
||||
|
||||
|
||||
def tgba(a):
|
||||
if not a.is_existential():
|
||||
a = spot.remove_alternation(a)
|
||||
a = spot.to_generalized_buchi(a)
|
||||
return a
|
||||
|
||||
|
||||
def test_aut(aut):
|
||||
stgba = tgba(aut)
|
||||
assert stgba.equivalent_to(aut)
|
||||
|
|
@ -51,6 +53,7 @@ def test_aut(aut):
|
|||
# ltldo "ltl2dstar --automata=streett --output-format=hoa\
|
||||
# --ltl2nba=spin:ltl2tgba@-s %L ->%O" -F- --name=%f -H"
|
||||
|
||||
|
||||
if shutil.which('ltl2dstar') is None:
|
||||
sys.exit(77)
|
||||
for a in spot.automata('genltl --eh-patterns --dac-patterns --hkrss-patterns\
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
import spot
|
||||
|
||||
|
||||
def explain_stut(f):
|
||||
f = spot.formula(f)
|
||||
pos = spot.translate(f)
|
||||
|
|
@ -41,6 +42,7 @@ def explain_stut(f):
|
|||
word2.simplify()
|
||||
return(word, word2)
|
||||
|
||||
|
||||
# Test from issue #388
|
||||
w1, w2 = explain_stut('{(a:b) | (a;b)}|->Gc')
|
||||
assert str(w1) == 'a & !b & !c; cycle{!a & b & !c}'
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ rg = spot.randltlgenerator(2, opts)
|
|||
|
||||
dict = spot.make_bdd_dict()
|
||||
|
||||
|
||||
def produce_phi(rg, n):
|
||||
phi = []
|
||||
while len(phi) < n:
|
||||
|
|
@ -54,6 +55,7 @@ def produce_phi(rg, n):
|
|||
phi.append(f)
|
||||
return phi
|
||||
|
||||
|
||||
phi1 = produce_phi(rg, 1000)
|
||||
phi2 = produce_phi(rg, 1000)
|
||||
inputres = []
|
||||
|
|
@ -62,7 +64,7 @@ for p in zip(phi1, phi2):
|
|||
inputres.append(spot.formula.Or(p))
|
||||
a1 = spot.ltl_to_tgba_fm(p[0], dict)
|
||||
a2 = spot.ltl_to_tgba_fm(p[1], dict)
|
||||
aut.append(spot.to_generalized_buchi( \
|
||||
aut.append(spot.to_generalized_buchi(
|
||||
spot.remove_alternation(spot.sum(a1, a2), True)))
|
||||
|
||||
for p in zip(aut, inputres):
|
||||
|
|
@ -75,7 +77,7 @@ for p in zip(phi1, phi2):
|
|||
inputres.append(spot.formula.And(p))
|
||||
a1 = spot.ltl_to_tgba_fm(p[0], dict)
|
||||
a2 = spot.ltl_to_tgba_fm(p[1], dict)
|
||||
aut.append(spot.to_generalized_buchi( \
|
||||
aut.append(spot.to_generalized_buchi(
|
||||
spot.remove_alternation(spot.sum_and(a1, a2), True)))
|
||||
|
||||
for p in zip(aut, inputres):
|
||||
|
|
|
|||
|
|
@ -46,6 +46,6 @@ for f in spot.randltl(5, 2000):
|
|||
assert spot.are_equivalent(n, p)
|
||||
|
||||
# Issue #390.
|
||||
a = spot.translate('!(GFa -> (GFb & GF(!b & !Xb)))', 'gen', 'det');
|
||||
b = spot.to_parity(a);
|
||||
a = spot.translate('!(GFa -> (GFb & GF(!b & !Xb)))', 'gen', 'det')
|
||||
b = spot.to_parity(a)
|
||||
assert a.equivalent_to(b)
|
||||
|
|
|
|||
|
|
@ -29,11 +29,13 @@ GF!b
|
|||
(b & GF!b) | (!b & FGb)
|
||||
b | (a & XF(b R a)) | (!a & XG(!b U !a))"""
|
||||
|
||||
|
||||
def test_phi(phi):
|
||||
a = spot.translate(phi, 'TGBA', 'SBAcc')
|
||||
res = spot.to_weak_alternating(spot.dualize(a))
|
||||
assert res.equivalent_to(spot.formula.Not(spot.formula(phi)))
|
||||
|
||||
|
||||
for p in phi1.split('\n'):
|
||||
print(p)
|
||||
test_phi(p)
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ assert v4 == spot.trival(spot.trival.maybe_value)
|
|||
assert v3
|
||||
assert -v2
|
||||
assert not -v1
|
||||
assert not v1;
|
||||
assert not v1
|
||||
assert not -v3
|
||||
|
||||
for u in (v1, v2, v3):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue