various typos
* bench/dtgbasat/gen.py, spot/twaalgos/complement.hh: Fix looser->loser and lossing->losing. * tests/sanity/style.test: Catch 'an uni[^n]'. * spot/ta/ta.hh, spot/taalgos/tgba2ta.cc, spot/taalgos/tgba2ta.hh, spot/twa/twagraph.cc, spot/twaalgos/complement.hh, spot/twaalgos/sccinfo.cc: Fix various occurences of this pattern.
This commit is contained in:
parent
b910330a78
commit
1ed6e518dd
8 changed files with 17 additions and 18 deletions
|
|
@ -115,7 +115,7 @@ def ne(string):
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------SUMMARY
|
# --------------------------------------------------------------SUMMARY
|
||||||
def add_winner(res, winner, looser):
|
def add_winner(res, winner, loser):
|
||||||
"""
|
"""
|
||||||
Each time this function is called, it increments the scrore
|
Each time this function is called, it increments the scrore
|
||||||
of one method against another one.
|
of one method against another one.
|
||||||
|
|
@ -125,7 +125,7 @@ def add_winner(res, winner, looser):
|
||||||
for i in range(1, res_length): # except the first row (header)
|
for i in range(1, res_length): # except the first row (header)
|
||||||
if winner in res[i]:
|
if winner in res[i]:
|
||||||
for j in range(1, header_length):
|
for j in range(1, header_length):
|
||||||
if looser in res[0][j]:
|
if loser in res[0][j]:
|
||||||
if type(res[i][j]) is str:
|
if type(res[i][j]) is str:
|
||||||
res[i][j] = 1
|
res[i][j] = 1
|
||||||
else:
|
else:
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
// -*- coding: utf-8 -*-
|
// -*- coding: utf-8 -*-
|
||||||
// Copyright (C) 2010, 2012, 2013, 2014, 2015, 2016 Laboratoire de Recherche et
|
// Copyright (C) 2010, 2012-2017 Laboratoire de Recherche et
|
||||||
// Developpement de l Epita (LRDE).
|
// Developpement de l Epita (LRDE).
|
||||||
//
|
//
|
||||||
// This file is part of Spot, a model checking library.
|
// This file is part of Spot, a model checking library.
|
||||||
|
|
@ -99,7 +99,7 @@ namespace spot
|
||||||
/// \brief Get the artificial initial state set of the automaton.
|
/// \brief Get the artificial initial state set of the automaton.
|
||||||
/// Return 0 if this artificial state is not implemented
|
/// Return 0 if this artificial state is not implemented
|
||||||
/// (in this case, use \c get_initial_states_set)
|
/// (in this case, use \c get_initial_states_set)
|
||||||
/// The aim of adding this state is to have an unique initial state. This
|
/// The aim of adding this state is to have a unique initial state. This
|
||||||
/// artificial initial state have one transition to each real initial state,
|
/// artificial initial state have one transition to each real initial state,
|
||||||
/// and this transition is labeled by the corresponding initial condition.
|
/// and this transition is labeled by the corresponding initial condition.
|
||||||
/// (For more details, see the paper cited above)
|
/// (For more details, see the paper cited above)
|
||||||
|
|
|
||||||
|
|
@ -94,7 +94,7 @@ namespace spot
|
||||||
&& (!dest->is_accepting_state()) && (!dest_trans_empty))
|
&& (!dest->is_accepting_state()) && (!dest_trans_empty))
|
||||||
transitions_to_livelock_states->push_front(*it_trans);
|
transitions_to_livelock_states->push_front(*it_trans);
|
||||||
|
|
||||||
// optimization to have, after minimization, an unique
|
// optimization to have, after minimization, a unique
|
||||||
// livelock state which has no successors
|
// livelock state which has no successors
|
||||||
if (dest->is_livelock_accepting_state() && (dest_trans_empty))
|
if (dest->is_livelock_accepting_state() && (dest_trans_empty))
|
||||||
dest->set_accepting_state(false);
|
dest->set_accepting_state(false);
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
// -*- coding: utf-8 -*-
|
// -*- coding: utf-8 -*-
|
||||||
// Copyright (C) 2010, 2012, 2013, 2014, 2015 Laboratoire de Recherche et
|
// Copyright (C) 2010, 2012-2015, 2017 Laboratoire de Recherche et
|
||||||
// Développement de l'Epita (LRDE).
|
// Développement de l'Epita (LRDE).
|
||||||
//
|
//
|
||||||
// This file is part of Spot, a model checking library.
|
// This file is part of Spot, a model checking library.
|
||||||
|
|
@ -58,7 +58,7 @@ namespace spot
|
||||||
/// for TA (spot::ta_check::check) can also be used to check GTA.
|
/// for TA (spot::ta_check::check) can also be used to check GTA.
|
||||||
///
|
///
|
||||||
/// \param artificial_initial_state_mode When set, the algorithm will build
|
/// \param artificial_initial_state_mode When set, the algorithm will build
|
||||||
/// a TA automaton with an unique initial state. This
|
/// a TA automaton with a unique initial state. This
|
||||||
/// artificial initial state have one transition to each real initial state,
|
/// artificial initial state have one transition to each real initial state,
|
||||||
/// and this transition is labeled by the corresponding initial condition.
|
/// and this transition is labeled by the corresponding initial condition.
|
||||||
/// (see spot::ta::get_artificial_initial_state())
|
/// (see spot::ta::get_artificial_initial_state())
|
||||||
|
|
|
||||||
|
|
@ -449,7 +449,7 @@ namespace spot
|
||||||
// calling graph::degrag_states() to finish with #1. We clear
|
// calling graph::degrag_states() to finish with #1. We clear
|
||||||
// the "dests vector" of the current automaton, recreate all
|
// the "dests vector" of the current automaton, recreate all
|
||||||
// the new destination groups using a univ_dest_mapper to
|
// the new destination groups using a univ_dest_mapper to
|
||||||
// simplify an unify them, and extend newst with some new
|
// simplify and unify them, and extend newst with some new
|
||||||
// entries that will point the those new universal destination
|
// entries that will point the those new universal destination
|
||||||
// so that graph::defrag_states() does not have to deal with
|
// so that graph::defrag_states() does not have to deal with
|
||||||
// universal destination in any way.
|
// universal destination in any way.
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
// -*- coding: utf-8 -*-
|
// -*- coding: utf-8 -*-
|
||||||
// Copyright (C) 2013, 2014, 2015 Laboratoire de Recherche et Développement
|
// Copyright (C) 2013-2015, 2017 Laboratoire de Recherche et
|
||||||
// de l'Epita.
|
// Développement de l'Epita.
|
||||||
//
|
//
|
||||||
// This file is part of Spot, a model checking library.
|
// This file is part of Spot, a model checking library.
|
||||||
//
|
//
|
||||||
|
|
@ -35,7 +35,7 @@ namespace spot
|
||||||
///
|
///
|
||||||
/// Functions like to_generalized_buchi() or remove_fin() are
|
/// Functions like to_generalized_buchi() or remove_fin() are
|
||||||
/// frequently called after dtwa_complement() to obtain an easier
|
/// frequently called after dtwa_complement() to obtain an easier
|
||||||
/// acceptance condition (maybe at the cost of loosing determinism.)
|
/// acceptance condition (maybe at the cost of losing determinism.)
|
||||||
SPOT_API twa_graph_ptr
|
SPOT_API twa_graph_ptr
|
||||||
dtwa_complement(const const_twa_graph_ptr& aut);
|
dtwa_complement(const const_twa_graph_ptr& aut);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -182,7 +182,7 @@ namespace spot
|
||||||
unsigned dest = e.dst;
|
unsigned dest = e.dst;
|
||||||
if ((int) dest < 0)
|
if ((int) dest < 0)
|
||||||
{
|
{
|
||||||
// Iterate over all destinations of an universal edge.
|
// Iterate over all destinations of a universal edge.
|
||||||
if (todo_.top().univ_pos == 0)
|
if (todo_.top().univ_pos == 0)
|
||||||
todo_.top().univ_pos = ~dest + 1;
|
todo_.top().univ_pos = ~dest + 1;
|
||||||
const auto& v = gr.dests_vector();
|
const auto& v = gr.dests_vector();
|
||||||
|
|
|
||||||
|
|
@ -81,12 +81,11 @@ for dir in "$TOP/spot" "$TOP/bin" "$TOP/tests"; do
|
||||||
fail=false
|
fail=false
|
||||||
|
|
||||||
# Check this before stripping comments and strings.
|
# Check this before stripping comments and strings.
|
||||||
$GREP -i 'accepting cond' $file &&
|
$GREP -i 'accepting cond' $file && diag 'accepting -> acceptance'
|
||||||
diag 'accepting -> acceptance'
|
$GREP -i 'dictionnar[yi]' $file && diag 'dictionnary -> dictionary'
|
||||||
|
# "an uninstalled" seems to be the exception so far, but we want
|
||||||
# Check this before stripping comments and strings.
|
# "a unique", "a universal", etc.
|
||||||
$GREP -i 'dictionnar[yi]' $file &&
|
$GREP -i 'an uni[^n]' $file && diag 'an uni... -> a uni...'
|
||||||
diag 'dictionnary -> dictionary'
|
|
||||||
|
|
||||||
$GREP -i 'version 2 of the License' $file &&
|
$GREP -i 'version 2 of the License' $file &&
|
||||||
diag 'license text should refer to version 2'
|
diag 'license text should refer to version 2'
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue