python: cleanup with autopep8

* tests/python/341.py, tests/python/alarm.py, tests/python/bdddict.py,
tests/python/bddnqueen.py, tests/python/bugdet.py,
tests/python/dualize.py, tests/python/except.py, tests/python/gen.py,
tests/python/genem.py, tests/python/implies.py,
tests/python/interdep.py, tests/python/ipnbdoctest.py,
tests/python/kripke.py, tests/python/ltl2tgba.py,
tests/python/ltlf.py, tests/python/ltlparse.py,
tests/python/ltlsimple.py, tests/python/relabel.py,
tests/python/rs_like.py, tests/python/sccsplit.py,
tests/python/semidet.py, tests/python/setacc.py,
tests/python/setxor.py, tests/python/split.py,
tests/python/streett_totgba.py, tests/python/stutter.py,
tests/python/sum.py, tests/python/toparity.py, tests/python/toweak.py,
tests/python/trival.py, python/spot/__init__.py, python/spot/aux.py,
python/spot/jupyter.py: Reformat with autopep8.

fixup! * spot/tl/simplify.cc: Fix typos in tracing code.
This commit is contained in:
Alexandre Duret-Lutz 2019-07-02 10:12:44 +02:00
parent 5b01ce32dd
commit 822fe77891
33 changed files with 444 additions and 356 deletions

View file

@ -107,14 +107,17 @@ if 'op_ff' not in globals():
_bdd_dict = make_bdd_dict()
__om_init_tmp = option_map.__init__
def __om_init_new(self, str=None):
__om_init_tmp(self)
if str:
res = self.parse_options(str)
if res:
raise RuntimeError("failed to parse option at: '" + str +"'")
raise RuntimeError("failed to parse option at: '" + str + "'")
option_map.__init__ = __om_init_new
@ -162,6 +165,7 @@ class twa:
self.highlight_edge(val, color)
return self
@_extend(twa)
class twa:
def to_str(a, format='hoa', opt=None):
@ -192,6 +196,7 @@ class twa:
f.write('\n')
return a
@_extend(twa_graph)
class twa_graph:
def show_storage(self, opt=None):
@ -200,12 +205,14 @@ class twa_graph:
from IPython.display import SVG
return SVG(_ostream_to_svg(ostr))
def make_twa_graph(*args):
from spot.impl import make_twa_graph as mtg
if len(args) == 0:
return mtg(_bdd_dict)
return mtg(*args)
@_extend(formula)
class formula:
def __init__(self, str):
@ -475,9 +482,9 @@ def automata(*sources, timeout=None, ignore_abort=True,
# universal_newlines for str output instead of bytes
# when the pipe is read from Python (which happens
# when timeout is set).
prefn = None if no_sid else os.setsid
proc = subprocess.Popen(filename[:-1], shell=True,
preexec_fn=
None if no_sid else os.setsid,
preexec_fn=prefn,
universal_newlines=True,
stdout=subprocess.PIPE)
if timeout is None:
@ -751,7 +758,7 @@ formula.translate = translate
# instance methods (i.e., self passed as first argument
# automatically), because only user-defined functions are converted as
# instance methods.
def _add_formula(meth, name = None):
def _add_formula(meth, name=None):
setattr(formula, name or meth, (lambda self, *args, **kwargs:
globals()[meth](self, *args, **kwargs)))
@ -811,10 +818,11 @@ twa.postprocess = postprocess
# instance methods (i.e., self passed as first argument
# automatically), because only user-defined functions are converted as
# instance methods.
def _add_twa_graph(meth, name = None):
def _add_twa_graph(meth, name=None):
setattr(twa_graph, name or meth, (lambda self, *args, **kwargs:
globals()[meth](self, *args, **kwargs)))
for meth in ('scc_filter', 'scc_filter_states',
'is_deterministic', 'is_unambiguous',
'contains'):
@ -824,6 +832,8 @@ _add_twa_graph('are_equivalent', 'equivalent_to')
# Wrapper around a formula iterator to which we add some methods of formula
# (using _addfilter and _addmap), so that we can write things like
# formulas.simplify().is_X_free().
class formulaiterator:
def __init__(self, formulas):
self._formulas = formulas
@ -1025,14 +1035,13 @@ for fun in ['remove_x', 'relabel', 'relabel_bse',
_addmap(fun)
# Better interface to the corresponding C++ function.
def sat_minimize(aut, acc=None, colored=False,
state_based=False, states=0,
max_states=0, sat_naive=False, sat_langmap=False,
sat_incr=0, sat_incr_steps=0,
display_log=False, return_log=False):
args=''
args = ''
if acc is not None:
if type(acc) is not str:
raise ValueError("argument 'acc' should be a string")
@ -1079,16 +1088,19 @@ def parse_word(word, dic=_bdd_dict):
from spot.impl import parse_word as pw
return pw(word, dic)
def bdd_to_formula(b, dic=_bdd_dict):
from spot.impl import bdd_to_formula as bf
return bf(b, dic)
def language_containment_checker(dic=_bdd_dict):
from spot.impl import language_containment_checker as c
c.contains = lambda this, a, b: c.contained(this, b, a)
c.are_equivalent = lambda this, a, b: c.equal(this, a, b)
return c(dic)
def mp_hierarchy_svg(cl=None):
"""
Return an some string containing an SVG picture of the Manna &
@ -1099,7 +1111,7 @@ def mp_hierarchy_svg(cl=None):
`mp_class(cl)`.
"""
if type(cl)==formula:
if type(cl) == formula:
cl = mp_class(cl)
ch = None
coords = {
@ -1112,12 +1124,12 @@ def mp_hierarchy_svg(cl=None):
'B': '110,198',
}
if cl in coords:
highlight='''<g transform="translate({})">
highlight = '''<g transform="translate({})">
<line x1="-10" y1="-10" x2="10" y2="10" stroke="red" stroke-width="5" />
<line x1="-10" y1="10" x2="10" y2="-10" stroke="red" stroke-width="5" />
</g>'''.format(coords[cl])
else:
highlight=''
highlight = ''
return '''
<svg height="210" width="220" xmlns="http://www.w3.org/2000/svg" version="1.1">
<polygon points="20,0 200,120 200,210 20,210" fill="cyan" opacity=".2" />
@ -1150,8 +1162,10 @@ def show_mp_hierarchy(cl):
from IPython.display import SVG
return SVG(mp_hierarchy_svg(cl))
formula.show_mp_hierarchy = show_mp_hierarchy
@_extend(twa_word)
class twa_word:
def _repr_latex_(self):
@ -1162,8 +1176,8 @@ class twa_word:
res += '; '
res += bdd_to_formula(letter, bd).to_str('j')
if len(res) > 1:
res += '; ';
res += '\\mathsf{cycle}\\{';
res += '; '
res += '\\mathsf{cycle}\\{'
for idx, letter in enumerate(self.cycle):
if idx:
res += '; '
@ -1263,5 +1277,6 @@ class twa_word:
class scc_and_mark_filter:
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.restore_acceptance()

View file

@ -28,6 +28,7 @@ import os
import errno
import contextlib
def extend(*classes):
"""
Decorator that extends all the given classes with the contents
@ -89,6 +90,7 @@ def rm_f(filename):
if e.errno != errno.ENOENT:
raise
@contextlib.contextmanager
def tmpdir():
cwd = os.getcwd()

View file

@ -23,6 +23,7 @@ Auxiliary functions for Spot's Python bindings.
from IPython.display import display, HTML
def display_inline(*args, per_row=None, show=None):
"""
This is a wrapper around IPython's `display()` to display multiple

View file

@ -20,6 +20,7 @@
import spot
from subprocess import _active
def two_intersecting_automata():
"""return two random automata with a non-empty intersection"""
g = spot.automata('randaut -A4 -Q5 -n-1 2 |')
@ -27,9 +28,10 @@ def two_intersecting_automata():
if a.intersects(b):
return a, b
for i in range(5):
two_intersecting_automata()
n = len(_active)
print(n, "active processes")
assert(n == 0);
assert(n == 0)

View file

@ -23,11 +23,13 @@ import signal
import sys
import os
def alarm_handler(signum, frame):
sys.stdout.write("signaled\n")
os.kill(child, signal.SIGTERM)
exit(0)
f = """!(G(F(P_Rbt2.observe)&& F(P_Rbt3.observe) &&
F(P_rbt1.observe)&& F(P_Rbt1.plus || P_Rbt1.moins || P_Rbt1.stop)&&
F(P_Rbt3.plus || P_Rbt3.moins || P_Rbt3.stop) && F(P_Rbt2.plus ||

View file

@ -20,6 +20,10 @@
# Make sure we can leep track of BDD association in Python using bdd_dict, as
# discussed in issue #372.
import spot
class bdd_holder:
def __init__(self, aut):
self.bdddict = d = aut.get_dict()
@ -48,10 +52,10 @@ class bdd_holder3:
self.bdddict.unregister_all_my_variables(self)
def check_ok():
assert type(bdict.varnum(spot.formula.ap("a"))) is int
def check_nok():
try:
bdict.varnum(spot.formula.ap("a"))
@ -60,12 +64,13 @@ def check_nok():
else:
raise RuntimeError("missing exception")
def debug(txt):
# print(txt)
# bdict.dump(spot.get_cout())
pass
import spot
aut = spot.translate("a U b")
bdict = aut.get_dict()
debug("aut")

View file

@ -1,5 +1,5 @@
# -*- mode: python; coding: utf-8 -*-
# Copyright (C) 2010, 2011, 2012, 2014 Laboratoire de Recherche et
# Copyright (C) 2010, 2011, 2012, 2014, 2019 Laboratoire de Recherche et
# Développement de l'EPITA.
# Copyright (C) 2003, 2004 Laboratoire d'Informatique de Paris 6
# (LIP6), département Systèmes Répartis Coopératifs (SRC), Université
@ -26,9 +26,12 @@
import sys
from buddy import *
# Build the requirements for all other fields than (i,j) assuming
# that (i,j) has a queen.
def build(i, j):
"""
Build the requirements for all other fields than (i,j) assuming
that (i,j) has a queen.
"""
a = b = c = d = bddtrue
# No one in the same column.
@ -59,7 +62,6 @@ def build(i, j):
queen &= a & b & c & d
# Get the number of queens from the command-line, or default to 8.
if len(sys.argv) > 1:
N = int(argv[1])
@ -97,7 +99,6 @@ solution = bdd_satone(queen)
bdd_printset(solution)
from spot import nl_cout
nl_cout()
# Cleanup all BDD variables before calling bdd_done(), otherwise

View file

@ -73,7 +73,7 @@ State: 6 {0}
State: 7 {0}
[0&!1&2] 7
--END--
""");
""")
# In Reuben's report this first block built an incorrect deterministic
# automaton, which ultimately led to an non-empty product. The second
@ -82,10 +82,10 @@ print("use_simulation=True")
b1 = spot.tgba_determinize(b, False, True, True, True)
assert b1.num_states() == 5
b1 = spot.remove_fin(spot.dualize(b1))
assert not a.intersects(b1);
assert not a.intersects(b1)
print("\nuse_simulation=False")
b2 = spot.tgba_determinize(b, False, True, False, True)
assert b2.num_states() == 5
b2 = spot.remove_fin(spot.dualize(b2))
assert not a.intersects(b2);
assert not a.intersects(b2)

View file

@ -21,33 +21,39 @@
import spot
import buddy
match_strings = [('is_buchi', 'is_co_buchi'),\
('is_generalized_buchi', 'is_generalized_co_buchi'),\
('is_all', 'is_none'),\
match_strings = [('is_buchi', 'is_co_buchi'),
('is_generalized_buchi', 'is_generalized_co_buchi'),
('is_all', 'is_none'),
('is_all', 'is_all'),
('is_buchi', 'is_all')]
# existential and universal are dual
# deterministic is self-dual
def dualtype(aut, dual):
if dual.acc().is_none(): return True
if dual.acc().is_none():
return True
return (not spot.is_deterministic(aut) or spot.is_deterministic(dual))\
and (spot.is_universal(dual) or not aut.is_existential())\
and (dual.is_existential() or not spot.is_universal(aut))
def produce_phi(rg, n):
phi = []
while len(phi) < n:
phi.append(rg.next())
return phi
def produce_automaton(phi):
aut = []
for f in phi:
aut.append(spot.translate(f))
return aut
def test_aut(aut, d = None):
def test_aut(aut, d=None):
if d is None:
d = spot.dualize(aut)
aa = aut.acc()
@ -61,7 +67,7 @@ def test_aut(aut, d = None):
if not dualtype(aut, d):
return (False, 'Incorrect transition mode resulting of dual')
for p in match_strings:
if ((getattr(aa, p[0])() and getattr(da, p[1])())\
if ((getattr(aa, p[0])() and getattr(da, p[1])())
or (getattr(aa, p[1])() and getattr(da, p[0])())):
return (True, '')
return (False, 'Incorrect acceptance type dual')
@ -70,12 +76,15 @@ def test_aut(aut, d = None):
# languages.
# FIXME This test could be extended to non-deterministic automata with a
# dealternization procedure.
def test_complement(aut):
assert aut.is_deterministic()
d = spot.dualize(aut)
s = spot.product_or(aut, d)
assert spot.dualize(s).is_empty()
def test_assert(a, d=None):
t = test_aut(a, d)
if not t[0]:
@ -84,6 +93,7 @@ def test_assert(a, d=None):
print (spot.dualize(a).to_str('hoa'))
assert False
aut = spot.translate('a')
test_assert(aut)

View file

@ -25,6 +25,7 @@
import spot
import buddy
def report_missing_exception():
raise RuntimeError("missing exception")
@ -147,7 +148,7 @@ else:
report_missing_exception()
try:
m = spot.mark_t([0,n,1])
m = spot.mark_t([0, n, 1])
except RuntimeError as e:
assert "bit index is out of bounds" in str(e)
else:

View file

@ -139,8 +139,6 @@ State: 1 [0&1] 0 {4 6 7} [0&!1] 1 {3 6} [!0&1] 0 {4 7} [!0&!1] 1 {0}
--END--""")
def generic_emptiness2_rec(aut):
spot.cleanup_acceptance_here(aut, False)
# Catching 'false' acceptance here is an optimization that could be removed.
@ -183,6 +181,8 @@ def generic_emptiness2_rec(aut):
return True
# A very old python version of spot.generic_emptiness_check()
def generic_emptiness2(aut):
old_a = spot.acc_cond(aut.acc())
res = generic_emptiness2_rec(aut)
@ -191,6 +191,8 @@ def generic_emptiness2(aut):
return res
# A more modern python version of spot.generic_emptiness_check()
def is_empty1(g):
si = spot.scc_info_with_options(g, spot.scc_info_options_NONE)
for scc_num in range(si.scc_count()):
@ -200,15 +202,19 @@ def is_empty1(g):
return False
return True
def is_scc_empty1(si, scc_num, acc=None):
if acc is None: # acceptance isn't forced, get it from the automaton
acc = si.get_aut().acc()
occur, common = si.acc_sets_of(scc_num), si.common_sets_of(scc_num)
acc = acc.restrict_to(occur)
acc = acc.remove(common, False)
if acc.is_t(): return False
if acc.is_f(): return True
if acc.accepting(occur): return False
if acc.is_t():
return False
if acc.is_f():
return True
if acc.accepting(occur):
return False
for cl in acc.top_disjuncts():
fu = cl.fin_unit() # Is there Fin at the top level
if fu:
@ -229,9 +235,11 @@ def is_scc_empty1(si, scc_num, acc=None):
return False
return True
def is_empty2(g):
return is_empty2_rec(spot.scc_and_mark_filter(g, g.acc().fin_unit()))
def is_empty2_rec(g):
si = spot.scc_info_with_options(g, spot.scc_info_options_STOP_ON_ACC)
if si.one_accepting_scc() >= 0:
@ -243,6 +251,7 @@ def is_empty2_rec(g):
return False
return True
def is_scc_empty2(si, scc_num, acc=None):
if acc is None: # acceptance isn't forced, get it from the automaton
acc = si.get_aut().acc()
@ -271,6 +280,7 @@ def is_scc_empty2(si, scc_num, acc=None):
return False
return True
def run_bench(automata):
for aut in automata:
# Make sure our three implementation behave identically
@ -287,4 +297,5 @@ def run_bench(automata):
run3 = spot.generic_accepting_run(aut)
assert run3.replay(spot.get_cout()) is True
run_bench([a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a11, a360])

View file

@ -33,29 +33,29 @@ e = V[1] & V[2] & -V[3] & V[4]
f = V[0] & -V[3] & V[4]
g = -V[0] | V[1]
assert(bdd_implies(b,a))
assert(not bdd_implies(a,b))
assert(not bdd_implies(c,a))
assert(bdd_implies(a,d))
assert(bdd_implies(b,d))
assert(bdd_implies(c,d))
assert(bdd_implies(d,d))
assert(not bdd_implies(e,d))
assert(not bdd_implies(d,e))
assert(not bdd_implies(f,e))
assert(not bdd_implies(e,f))
assert(bdd_implies(bddfalse,f))
assert(not bdd_implies(bddtrue,f))
assert(bdd_implies(f,bddtrue))
assert(not bdd_implies(f,bddfalse))
assert(bdd_implies(a,g))
assert(bdd_implies(b, a))
assert(not bdd_implies(a, b))
assert(not bdd_implies(c, a))
assert(bdd_implies(a, d))
assert(bdd_implies(b, d))
assert(bdd_implies(c, d))
assert(bdd_implies(d, d))
assert(not bdd_implies(e, d))
assert(not bdd_implies(d, e))
assert(not bdd_implies(f, e))
assert(not bdd_implies(e, f))
assert(bdd_implies(bddfalse, f))
assert(not bdd_implies(bddtrue, f))
assert(bdd_implies(f, bddtrue))
assert(not bdd_implies(f, bddfalse))
assert(bdd_implies(a, g))
a = (-V[2] & (-V[1] | V[0])) | (-V[0] & V[1] & V[2])
b = V[1] | -V[2]
assert(bdd_implies(a,b))
assert(bdd_implies(a, b))
# Cleanup all BDD variables before calling bdd_done(), otherwise
# bdd_delref will be called after bdd_done() and this is unsafe in
# optimized builds.
V = a = b = c = d = e = f = g = 0;
V = a = b = c = d = e = f = g = 0
bdd_done()

View file

@ -34,7 +34,8 @@ d = simp.get_dict()
a = spot.ltl_to_tgba_fm(pf.f, d)
g = spot.parse_infix_boolean('b&c', e)
b = simp.as_bdd(g.f)
buddy.bdd_printset(b); spot.nl_cout()
buddy.bdd_printset(b)
spot.nl_cout()
del g
s0 = a.get_init_state()
@ -45,9 +46,11 @@ while not it.done():
sys.stdout.write("%s\n" % c)
b &= c # `&=' is defined only in buddy. So if this statement works
# it means buddy can grok spot's objects.
buddy.bdd_printset(c); spot.nl_cout()
buddy.bdd_printset(c)
spot.nl_cout()
it.next()
buddy.bdd_printset(b); spot.nl_cout()
buddy.bdd_printset(b)
spot.nl_cout()
sys.stdout.write("%s\n" % b)
del it
del s0

View file

@ -12,7 +12,9 @@ with those stored in the notebook.
from __future__ import print_function
import os,sys,time
import os
import sys
import time
import base64
import re
import pprint
@ -51,6 +53,7 @@ try:
except ImportError:
from IPython.nbformat import v4 as nbformat
def compare_png(a64, b64):
"""compare two b64 PNGs (incomplete)"""
try:
@ -61,6 +64,7 @@ def compare_png(a64, b64):
bdata = base64.decodestring(b64)
return True
def canonicalize(s, type, ignores):
"""sanitize a string for comparison.
@ -195,6 +199,7 @@ def compare_outputs(ref, test, ignores=[]):
fromfile='expected', tofile='effective')))
return False
def _wait_for_ready_backport(kc):
"""Backport BlockingKernelClient.wait_for_ready from IPython 3"""
# Wait for kernel info reply on shell channel
@ -210,6 +215,7 @@ def _wait_for_ready_backport(kc):
except Empty:
break
def run_cell(kc, cell):
kc.execute(cell.source)
# wait for finish, maximum 30s
@ -295,7 +301,6 @@ def test_notebook(ipynb):
print("OK")
successes += 1
print("tested notebook %s" % ipynb)
print(" %3i cells successfully replicated" % successes)
if failures:
@ -308,6 +313,7 @@ def test_notebook(ipynb):
if failures | errors:
sys.exit(1)
if __name__ == '__main__':
for ipynb in sys.argv[1:]:
print("testing %s" % ipynb)

View file

@ -36,7 +36,7 @@ k.new_edge(s3, s3)
k.new_edge(s3, s2)
k.set_init_state(s1)
hoa="""HOA: v1
hoa = """HOA: v1
States: 3
Start: 0
AP: 2 "p1" "p2"

View file

@ -28,6 +28,7 @@ import sys
import getopt
import spot
def usage(prog):
sys.stderr.write("""Usage: %s [OPTIONS...] formula

View file

@ -44,8 +44,8 @@ for str1, isl in l:
assert isl == pf.f.is_leaf()
del pf
assert spot.formula('a').is_leaf();
assert spot.formula('0').is_leaf();
assert spot.formula('a').is_leaf()
assert spot.formula('0').is_leaf()
for str1 in ['a * b', 'a xor b', 'a <-> b']:
pf = spot.parse_infix_boolean(str1, e, False)
@ -114,7 +114,8 @@ for (x, msg) in [('{foo[->bug]}', "treating this goto block as [->]"),
('{a;b b}', "ignoring this"),
('{*', "missing closing brace"),
('{(a', "missing closing parenthesis"),
('{* a', "ignoring trailing garbage and missing closing brace"),
('{* a',
"ignoring trailing garbage and missing closing brace"),
('F(a b)', "ignoring this"),
('F(-)', "treating this parenthetical block as false"),
('F(', "missing closing parenthesis"),

View file

@ -23,7 +23,7 @@
import spot
import sys
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
a = spot.formula.ap('a')
b = spot.formula.ap('b')
c = spot.formula.ap('c')
@ -55,7 +55,7 @@ assert op4 == op2
del op2, op3, op4
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
a = spot.formula.ap('a')
b = spot.formula.ap('b')
c = spot.formula.ap('c')
@ -65,14 +65,15 @@ F = spot.formula.ff()
f1 = spot.formula.Equiv(c, a)
f2 = spot.formula.Implies(a, b)
f3 = spot.formula.Xor(b, c)
f4 = spot.formula.Not(f3); del f3
f4 = spot.formula.Not(f3)
del f3
f5 = spot.formula.Xor(F, c)
del a, b, c, T, F, f1, f2, f4, f5
assert spot.fnode_instances_check()
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
assert str([str(x) for x in spot.formula('a &b & c')]) == "['a', 'b', 'c']"
@ -83,18 +84,23 @@ def switch_g_f(x):
return spot.formula.G(switch_g_f(x[0]))
return x.map(switch_g_f)
f = spot.formula('GFa & XFGb & Fc & G(a | b | Fd)')
assert str(switch_g_f(f)) == 'FGa & XGFb & Gc & F(a | b | Gd)'
x = 0
def count_g(f):
global x
if f._is(spot.op_G):
x += 1
f.traverse(count_g)
assert x == 3
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
# The example from tut01.org
@ -106,7 +112,7 @@ Spin syntax: {f:s}
Default for shell: echo {f:q} | ...
LBT for shell: echo {f:lq} | ...
Default for CSV: ...,{f:c},...
Wring, centered: {f:w:~^50}""".format(f = formula)
Wring, centered: {f:w:~^50}""".format(f=formula)
assert res == """\
Default output: a U (b U "$strange[0]=name")
@ -135,10 +141,13 @@ for (input, output) in [('(a&b)<->b', 'b->(a&b)'),
assert(f == output)
assert(spot.are_equivalent(input, output))
def myparse(input):
env = spot.default_environment.instance()
pf = spot.parse_infix_psl(input, env)
return pf.f
# This used to fail, because myparse would return a pointer
# to pf.f inside the destroyed pf.
assert myparse('a U b') == spot.formula('a U b')

View file

@ -27,15 +27,17 @@ m2 = spot.mark_t([2])
m3 = spot.mark_t([3])
mall = spot.mark_t()
def test_rs(acc, rs, expected_res, expected_pairs):
res, p = getattr(acc, 'is_' + rs + '_like')()
assert res == expected_res
if expected_res:
expected_pairs.sort()
p = sorted(p)
for a,b in zip (p, expected_pairs):
for a, b in zip(p, expected_pairs):
assert a.fin == b.fin and a.inf == b.inf
def switch_pairs(pairs):
if pairs == None:
return None
@ -44,15 +46,20 @@ def switch_pairs(pairs):
r.append(spot.rs_pair(p.inf, p.fin))
return r
def test_streett(acc, expected_streett_like, expected_pairs):
test_rs(acc, 'streett', expected_streett_like, expected_pairs)
o_acc = spot.acc_cond(acc.get_acceptance().complement())
test_rs(o_acc, 'rabin', expected_streett_like, switch_pairs(expected_pairs))
test_rs(o_acc, 'rabin', expected_streett_like,
switch_pairs(expected_pairs))
def test_rabin(acc, expected_rabin_like, expected_pairs):
test_rs(acc, 'rabin', expected_rabin_like, expected_pairs)
o_acc = spot.acc_cond(acc.get_acceptance().complement())
test_rs(o_acc, 'streett', expected_rabin_like, switch_pairs(expected_pairs))
test_rs(o_acc, 'streett', expected_rabin_like,
switch_pairs(expected_pairs))
acc = spot.acc_cond(spot.acc_code('Fin(0)'))
test_streett(acc, True, [spot.rs_pair(m0, mall)])
@ -75,12 +82,12 @@ test_streett(acc, True, [spot.rs_pair(m0, m1), spot.rs_pair(m0, m2)])
acc = spot.acc_cond(spot.acc_code('(Fin(0)|Inf(1))&(Fin(1)|Inf(2))'))
test_streett(acc, True, [spot.rs_pair(m0, m1), spot.rs_pair(m1, m2)])
acc = spot.acc_cond(spot.acc_code('(Fin(0)|Inf(1))&(Fin(1)|Inf(2))'\
acc = spot.acc_cond(spot.acc_code('(Fin(0)|Inf(1))&(Fin(1)|Inf(2))'
'&(Fin(3)&Inf(3))'))
test_streett(acc, True, [spot.rs_pair(m0, m1), spot.rs_pair(m1, m2),\
test_streett(acc, True, [spot.rs_pair(m0, m1), spot.rs_pair(m1, m2),
spot.rs_pair(m3, mall), spot.rs_pair(mall, m3)])
acc = spot.acc_cond(spot.acc_code('(Fin(0)|Inf(1))&(Fin(1)|Inf(2))'\
acc = spot.acc_cond(spot.acc_code('(Fin(0)|Inf(1))&(Fin(1)|Inf(2))'
'&(Fin(3)&Inf(3))&(Fin(4)|Inf(5)|Inf(6))'))
test_streett(acc, False, None)
@ -102,13 +109,11 @@ test_rabin(acc, True, [spot.rs_pair(m0, m1), spot.rs_pair(m0, m2)])
acc = spot.acc_cond(spot.acc_code('(Fin(0)&Inf(1))|(Fin(1)&Inf(2))'))
test_rabin(acc, True, [spot.rs_pair(m0, m1), spot.rs_pair(m1, m2)])
acc = spot.acc_cond(spot.acc_code('(Fin(0)&Inf(1))|(Fin(1)&Inf(2))'\
acc = spot.acc_cond(spot.acc_code('(Fin(0)&Inf(1))|(Fin(1)&Inf(2))'
'|(Fin(3)|Inf(3))'))
test_rabin(acc, True, [spot.rs_pair(m0, m1), spot.rs_pair(m1, m2),\
test_rabin(acc, True, [spot.rs_pair(m0, m1), spot.rs_pair(m1, m2),
spot.rs_pair(m3, mall), spot.rs_pair(mall, m3)])
acc = spot.acc_cond(spot.acc_code('(Fin(0)&Inf(1))|(Fin(1)&Inf(2))'\
acc = spot.acc_cond(spot.acc_code('(Fin(0)&Inf(1))|(Fin(1)&Inf(2))'
'|(Fin(3)|Inf(3))|(Fin(4)&Inf(5)&Inf(6))'))
test_rabin(acc, False, None)

View file

@ -26,5 +26,5 @@ s = ""
for aut2 in si.split_on_sets(0, [0]):
# This call to to_str() used to fail because split_on_sets had not
# registered the atomic propositions of aut
s += aut2.to_str();
s += aut2.to_str()
assert spot.automaton(s).num_states() == 8

View file

@ -24,7 +24,7 @@ formulas = [('(Gp0 | Fp1) M 1', False, True),
('(p1 | (Fp0 R (p1 W p0))) M 1', True, True),
('!G(F(p1 & Fp0) W p1)', False, True),
('X(!p0 W Xp1)', False, False),
('FG(p0)', False, True) ]
('FG(p0)', False, True)]
for f, isd, issd in formulas:
print(f)

View file

@ -24,9 +24,9 @@ import spot
# Test case reduced from a report from Juraj Major <major@fi.muni.cz>.
a = spot.make_twa_graph(spot._bdd_dict)
a.set_acceptance(0, spot.acc_code("t"))
assert(a.prop_state_acc() == True);
assert(a.prop_state_acc() == True)
a.set_acceptance(1, spot.acc_code("Fin(0)"))
assert(a.prop_state_acc() == spot.trival.maybe());
assert(a.prop_state_acc() == spot.trival.maybe())
# Some tests for used_inf_fin_sets(), which return a pair of mark_t.
@ -34,8 +34,8 @@ assert(a.prop_state_acc() == spot.trival.maybe());
assert inf == []
assert fin == [0]
(inf, fin) = spot.acc_code("(Fin(0)|Inf(1))&Fin(2)&Inf(0)").used_inf_fin_sets()
assert inf == [0,1]
assert fin == [0,2]
assert inf == [0, 1]
assert fin == [0, 2]
# is_rabin_like() returns (bool, [(inf, fin), ...])
(b, v) = spot.acc_cond("(Fin(0)&Inf(1))|(Fin(2)&Inf(0))").is_rabin_like()

View file

@ -29,21 +29,21 @@ a = V[0] & -V[1] & V[2] & -V[3]
b = V[0] & V[1] & V[2] & -V[3]
c = -V[0] & V[1] & -V[2] & -V[3]
assert(c == bdd_setxor(a,b))
assert(c == bdd_setxor(b,a))
assert(a == bdd_setxor(b,c))
assert(a == bdd_setxor(c,b))
assert(b == bdd_setxor(a,c))
assert(b == bdd_setxor(c,a))
assert(c == bdd_setxor(a, b))
assert(c == bdd_setxor(b, a))
assert(a == bdd_setxor(b, c))
assert(a == bdd_setxor(c, b))
assert(b == bdd_setxor(a, c))
assert(b == bdd_setxor(c, a))
d = V[1] & V[2] & -V[3] & V[4]
e = V[0] & V[1] & -V[2] & -V[3] & V[4]
assert(e == bdd_setxor(a,d))
assert(e == bdd_setxor(d,a))
assert(e == bdd_setxor(a, d))
assert(e == bdd_setxor(d, a))
# Cleanup all BDD variables before calling bdd_done(), otherwise
# bdd_delref will be called after bdd_done() and this is unsafe in
# optimized builds.
V = a = b = c = d = e = 0;
V = a = b = c = d = e = 0
bdd_done()

View file

@ -19,10 +19,14 @@
import spot
def incl(a,b):
def incl(a, b):
return not b.intersects(spot.dualize(spot.tgba_determinize(a)))
def equiv(a,b):
return incl(a,b) and incl(b,a)
def equiv(a, b):
return incl(a, b) and incl(b, a)
def do_split(f, in_list):
aut = spot.translate(f)
@ -30,7 +34,8 @@ def do_split(f, in_list):
for a in in_list:
inputs &= spot.buddy.bdd_ithvar(aut.get_dict().varnum(spot.formula(a)))
s = spot.split_2step(aut, inputs)
return aut,s
return aut, s
aut, s = do_split('(FG !a) <-> (GF b)', ['a'])
assert equiv(aut, spot.unsplit_2step(s))
@ -62,42 +67,41 @@ aut, s = do_split('! ((G (req -> (F ack))) && (G (go -> (F grant))))', ['go',
assert equiv(aut, spot.unsplit_2step(s))
# FIXME s.to_str() is NOT the same on Debian stable and on Debian unstable
# we should investigate this
#assert s.to_str() == """HOA: v1
#States: 9
#Start: 0
#AP: 4 "ack" "req" "go" "grant"
#acc-name: Buchi
#Acceptance: 1 Inf(0)
#properties: trans-labels explicit-labels state-acc
#--BODY--
#State: 0
#[1&!2] 3
#[!1&!2] 4
#[1&2] 5
#[!1&2] 6
#State: 1
#[t] 7
#State: 2
#[t] 8
#State: 3
#[t] 0
#[!0] 1
#State: 4
#[t] 0
#State: 5
#[t] 0
#[!0] 1
#[!3] 2
#State: 6
#[t] 0
#[!3] 2
#State: 7 {0}
#[!0] 1
#State: 8 {0}
#[!3] 2
#--END--"""
# assert s.to_str() == """HOA: v1
# States: 9
# Start: 0
# AP: 4 "ack" "req" "go" "grant"
# acc-name: Buchi
# Acceptance: 1 Inf(0)
# properties: trans-labels explicit-labels state-acc
# --BODY--
# State: 0
# [1&!2] 3
# [!1&!2] 4
# [1&2] 5
# [!1&2] 6
# State: 1
# [t] 7
# State: 2
# [t] 8
# State: 3
# [t] 0
# [!0] 1
# State: 4
# [t] 0
# State: 5
# [t] 0
# [!0] 1
# [!3] 2
# State: 6
# [t] 0
# [!3] 2
# State: 7 {0}
# [!0] 1
# State: 8 {0}
# [!3] 2
# --END--"""
aut, s = do_split('((G (((! g_0) || (! g_1)) && ((r_0 && (X r_1)) -> (F (g_0 \
&& g_1))))) && (G (r_0 -> F g_0))) && (G (r_1 -> F g_1))', ['r_0', 'r_1'])
assert equiv(aut, spot.unsplit_2step(s))

View file

@ -23,12 +23,14 @@ import os
import shutil
import sys
def tgba(a):
if not a.is_existential():
a = spot.remove_alternation(a)
a = spot.to_generalized_buchi(a)
return a
def test_aut(aut):
stgba = tgba(aut)
assert stgba.equivalent_to(aut)
@ -51,6 +53,7 @@ def test_aut(aut):
# ltldo "ltl2dstar --automata=streett --output-format=hoa\
# --ltl2nba=spin:ltl2tgba@-s %L ->%O" -F- --name=%f -H"
if shutil.which('ltl2dstar') is None:
sys.exit(77)
for a in spot.automata('genltl --eh-patterns --dac-patterns --hkrss-patterns\

View file

@ -24,6 +24,7 @@
import spot
def explain_stut(f):
f = spot.formula(f)
pos = spot.translate(f)
@ -41,6 +42,7 @@ def explain_stut(f):
word2.simplify()
return(word, word2)
# Test from issue #388
w1, w2 = explain_stut('{(a:b) | (a;b)}|->Gc')
assert str(w1) == 'a & !b & !c; cycle{!a & b & !c}'

View file

@ -46,6 +46,7 @@ rg = spot.randltlgenerator(2, opts)
dict = spot.make_bdd_dict()
def produce_phi(rg, n):
phi = []
while len(phi) < n:
@ -54,6 +55,7 @@ def produce_phi(rg, n):
phi.append(f)
return phi
phi1 = produce_phi(rg, 1000)
phi2 = produce_phi(rg, 1000)
inputres = []
@ -62,7 +64,7 @@ for p in zip(phi1, phi2):
inputres.append(spot.formula.Or(p))
a1 = spot.ltl_to_tgba_fm(p[0], dict)
a2 = spot.ltl_to_tgba_fm(p[1], dict)
aut.append(spot.to_generalized_buchi( \
aut.append(spot.to_generalized_buchi(
spot.remove_alternation(spot.sum(a1, a2), True)))
for p in zip(aut, inputres):
@ -75,7 +77,7 @@ for p in zip(phi1, phi2):
inputres.append(spot.formula.And(p))
a1 = spot.ltl_to_tgba_fm(p[0], dict)
a2 = spot.ltl_to_tgba_fm(p[1], dict)
aut.append(spot.to_generalized_buchi( \
aut.append(spot.to_generalized_buchi(
spot.remove_alternation(spot.sum_and(a1, a2), True)))
for p in zip(aut, inputres):

View file

@ -46,6 +46,6 @@ for f in spot.randltl(5, 2000):
assert spot.are_equivalent(n, p)
# Issue #390.
a = spot.translate('!(GFa -> (GFb & GF(!b & !Xb)))', 'gen', 'det');
b = spot.to_parity(a);
a = spot.translate('!(GFa -> (GFb & GF(!b & !Xb)))', 'gen', 'det')
b = spot.to_parity(a)
assert a.equivalent_to(b)

View file

@ -29,11 +29,13 @@ GF!b
(b & GF!b) | (!b & FGb)
b | (a & XF(b R a)) | (!a & XG(!b U !a))"""
def test_phi(phi):
a = spot.translate(phi, 'TGBA', 'SBAcc')
res = spot.to_weak_alternating(spot.dualize(a))
assert res.equivalent_to(spot.formula.Not(spot.formula(phi)))
for p in phi1.split('\n'):
print(p)
test_phi(p)

View file

@ -39,7 +39,7 @@ assert v4 == spot.trival(spot.trival.maybe_value)
assert v3
assert -v2
assert not -v1
assert not v1;
assert not v1
assert not -v3
for u in (v1, v2, v3):