ltlcross: report exit_status and exit_code columns in CSV and JSON

* src/bin/ltlcross.cc: Report exit_status and exit_code columns in CSV
and JSON files.  Also output lines for failed translations, and add
a --omit-missing option to disable that.  Move the time column right
after exit_status and exit_code.
* src/bin/man/ltlcross.x: Document each column of the output.
* bench/ltl2tgba/tools: Use the "{name}cmd" notation.
* bench/ltl2tgba/sum.py: Adjust to the new columns.
* bench/ltl2tgba/README: Update to point to the man page for a
description of the columns.
* bench/ltl2tgba/Makefile.am: Build results.pdf as said announced in
README.
* bench/spin13/html.bottom: Update code to ignore these two new
columns and lines with null values.
* src/tgbatest/ltlcross3.test: Add tests.
* doc/org/ltlcross.org: Adjust examples.
* NEWS: Mention this.
This commit is contained in:
Alexandre Duret-Lutz 2013-11-19 23:22:29 +01:00
parent 686a45484d
commit f65c621a55
10 changed files with 430 additions and 202 deletions

View file

@ -34,9 +34,11 @@ OUTLOG = $(OUTPUTS:=.log)
CLEANFILES = $(OUTCSV) $(OUTJSON) $(OUTLOG) \
results.pdf results.aux results.log results.tex
.PHONY = run
.PHONY = run json
run: $(OUTJSON)
run: results.pdf
json: $(OUTJSON)
deps = $(srcdir)/tools \
$(top_srcdir)/configure.ac \

View file

@ -117,40 +117,16 @@ this benchmark.
Reading the summaries
=======================
The various outputs (CSV, JSON, our LaTeX) may use the following
column headers:
* input: formula translated (as a string in the CSV output, and as
an index into the input table in the JSON output)
* tool: tool used to translated this formula (idem)
* states: number of states of the resulting automaton
* edges: number of physical arcs between these states
* transitions: number of logical transitions from one state to the other
(for instance if the atomic propositions are 'a' and 'b', and
edge labeled by 'a' represents two transitions labeled by
'a&b' and 'a&!b')
* acc: number of acceptance sets used; it should always be one
in this automaton since we are producing (degeneralized)
Büchi automata.
* scc: number of strongly conncected components in the produced automaton
* nondetstates: number of nondeterministic states
* nondeterministic: 0 if the automaton is deterministic
(no nondeterministic state), 1 otherwise
* time: time required by the translation (although this is measured with
the highest-resolution clock available, it is "wall time", so it
can be affected by the machine's load).
* product_states: number of states in a product if the automaton with some
random Kripke structure (one unique Kripke structure is generated
for each formula and used with automata produced by all tools)
* product_transitions: number of transitions in that product
* product_scc: number of strongly connected componebts in that product
The various outputs (CSV, JSON, our LaTeX) use column headers
that are described in doc/userdoc/ltlcross.html and also
in the ltlcross man page.
The summary tables produced by sum.py accumulate all these results for
all formulae, tool by tool. They display an additional column, called
'count', giving the number of formulae successfully translated (the
missing formulae correspond to timeouts).
For all these values (except count), the sammler number the better.
For all these values (except count), smaller numbers are better.
More details about ltlcross (used to produce these outputs) can be

View file

@ -47,23 +47,26 @@ def process_file(filename):
ncols = len(data['fields'])
ntools = len(data['tool'])
datacols = range(2, ncols)
datacols = range(4, ncols)
fields = { name:index for index,name in enumerate(data["fields"]) }
toolcol = fields['tool']
inputcol = fields['formula']
statescol = fields['states']
inputs = data["inputs"]
# Index results by tool, then input
results = { t:{} for t in range(0, ntools) }
for l in data["results"]:
if l[statescol] == None:
continue
results[l[toolcol]][l[inputcol]] = l
for i in range(0, ntools):
# Remove any leading directory, and trailing %...
name = data["tool"][i]
name = name[name.rfind('/', 0, name.find(' ')) + 1:]
data["tool"][i] = latex_escape(name[0:name.find('%')])
data["tool"][i] = latex_escape(name[0:(name+'%').find('%')])
timecol = fields['time']
@ -72,7 +75,7 @@ def process_file(filename):
\subsection*{Cumulative summary}''' % latex_escape(filename))
print('\\noindent\\begin{tabular}{l' + ('r' * (ncols - 1)) + '}\n',
" & ".join(rot(latex_escape(["tool", "count"] + data["fields"][2:]))),
" & ".join(rot(latex_escape(["tool", "count"] + data["fields"][4:]))),
"\\\\")
for i in range(0, ntools):
# Compute sums over each column.
@ -96,7 +99,6 @@ states and more transitions.
header += 'c'
header += '}'
statescol = fields['states']
transcol = fields['transitions']
print(header)

View file

@ -25,17 +25,17 @@
set dummy
# Add third-party tools if they are available.
test -n "$SPIN" && set "$@" "$SPIN -f %s >%N"
test -n "$LTL2BA" && set "$@" "$LTL2BA -f %s >%N"
test -n "$LTL3BA" && set "$@" "$LTL3BA -f %s >%N" \
"$LTL3BA -M -f %s >%N" \
"$LTL3BA -S -f %s >%N" \
"$LTL3BA -S -M -f %s >%N"
test -n "$SPIN" && set "$@" "{spin} $SPIN -f %s >%N"
test -n "$LTL2BA" && set "$@" "{ltl2ba} $LTL2BA -f %s >%N"
test -n "$LTL3BA" && set "$@" "{ltl3ba} $LTL3BA -f %s >%N" \
"{ltl3ba-M} $LTL3BA -M -f %s >%N" \
"{ltl3ba-S} $LTL3BA -S -f %s >%N" \
"{ltl3ba-SM} $LTL3BA -S -M -f %s >%N"
# Use -s to output a neverclaim, like the other tools.
set "$@" \
"$LTL2TGBA --det -s %s >%N" \
"$LTL2TGBA --small -s %s >%N"
"{ltl2tgba-D} $LTL2TGBA --det -s %s >%N" \
"{ltl2tgba} $LTL2TGBA --small -s %s >%N"
# If you want to add your own tool, you can add it here.
# See 'man ltlcross' for the list of %-escapes you may use

View file

@ -303,7 +303,7 @@ var tfield;
for (var m = 0; m < nfields; ++m)
{
if (m == ifield || m == tfield)
if (!is_datacol(m))
continue;
var tmp = [];
for (var t = 0; t < ntools; ++t)
@ -352,16 +352,29 @@ var tfield;
for (var c = 0; c < nfields; ++c)
{
var name = rawdata.fields[c];
results.addColumn('number', name, name);
if (name == 'exit_status')
results.addColumn('string', name, name);
else
results.addColumn('number', name, name);
fields[name] = c;
}
// FIXME: we should uses rawdata.inputs to set ifield and tfield
tfield = fields['tool'];
ifield = fields['formula'];
sfield = fields['states'];
esfield = fields['exit_status'];
ecfield = fields['exit_code'];
is_datacol = function(c) {
return (c != ifield && c != tfield && c != esfield && c != ecfield);
}
nresults = rawdata.results.length;
for (var r = 0; r < nresults; ++r)
{
var row = rawdata.results[r];
if (row[sfield] == null)
continue;
results.addRow(row);
hashres[[row[tfield],row[ifield]]] = row;
}
@ -412,7 +425,7 @@ var tfield;
var col = 2;
for (var c = 0; c < nfields; ++c)
{
if (c != ifield && c != tfield)
if (is_datacol(c))
{
aggreg.push({column:c, aggregation: google.visualization.data.sum, type: 'number'})
aggreg.push({column:c, aggregation: google.visualization.data.avg, type: 'number'})
@ -469,7 +482,7 @@ var tfield;
var pos = 3;
for (var m = 0; m < nfields; ++m)
{
if (m != ifield && m != tfield)
if (is_datacol(m))
{
var row = [rawdata.fields[m]];
var max = sumresults.getColumnRange(pos + 2)['max'];
@ -498,7 +511,7 @@ var tfield;
sel3.append($("<option>").attr('value', -1).text('(nothing)'));
for (var c = 0; c < nfields; ++c)
{
if (c != ifield && c != tfield)
if (is_datacol(c))
{
sel.append($("<option>").attr('value', c).text(rawdata.fields[c]));
sel2.append($("<option>").attr('value', c).text(rawdata.fields[c]));
@ -525,7 +538,7 @@ var tfield;
var first = true;
for (var c = 0; c < nfields; ++c)
{
if (c != ifield && c != tfield)
if (is_datacol(c))
{
sel.append($("<option>").attr('value', c).text(rawdata.fields[c]));
if (first)