summaryrefslogtreecommitdiff
path: root/test/regress/run_regression.py
blob: 7226e7453064b09f1fbbeaa9c296b18881d507e3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
#!/usr/bin/env python3
"""
Usage:

    run_regression.py [ --proof | --dump ] [ wrapper ] cvc4-binary
        [ benchmark.cvc | benchmark.smt | benchmark.smt2 | benchmark.p ]

Runs benchmark and checks for correct exit status and output.
"""

import argparse
import difflib
import os
import re
import shlex
import subprocess
import sys
import threading

SCRUBBER = 'SCRUBBER: '
ERROR_SCRUBBER = 'ERROR-SCRUBBER: '
EXPECT = 'EXPECT: '
EXPECT_ERROR = 'EXPECT-ERROR: '
EXIT = 'EXIT: '
COMMAND_LINE = 'COMMAND-LINE: '


def run_process(args, cwd, timeout, s_input=None):
    """Runs a process with a timeout `timeout` in seconds. `args` are the
    arguments to execute, `cwd` is the working directory and `s_input` is the
    input to be sent to the process over stdin. Returns the output, the error
    output and the exit code of the process. If the process times out, the
    output and the error output are empty and the exit code is 124."""

    proc = subprocess.Popen(
        args,
        cwd=cwd,
        stdin=subprocess.PIPE,
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE)

    out = ''
    err = ''
    exit_status = 124
    timer = threading.Timer(timeout, lambda p: p.kill(), [proc])
    try:
        timer.start()
        out, err = proc.communicate(input=s_input)
        exit_status = proc.returncode
    finally:
        timer.cancel()

    return out, err, exit_status


def run_benchmark(dump, wrapper, scrubber, error_scrubber, cvc4_binary,
                  command_line, benchmark_dir, benchmark_filename, timeout):
    """Runs CVC4 on the file `benchmark_filename` in the directory
    `benchmark_dir` using the binary `cvc4_binary` with the command line
    options `command_line`. The output is scrubbed using `scrubber` and
    `error_scrubber` for stdout and stderr, respectively. If dump is true, the
    function first uses CVC4 to read in and dump the benchmark file and then
    uses that as input."""

    bin_args = wrapper[:]
    bin_args.append(cvc4_binary)

    output = None
    error = None
    exit_status = None
    if dump:
        dump_args = [
            '--preprocess-only', '--dump', 'raw-benchmark',
            '--output-lang=smt2', '-qq'
        ]
        dump_output, _, _ = run_process(
            bin_args + command_line + dump_args + [benchmark_filename],
            benchmark_dir, timeout)
        output, error, exit_status = run_process(
            bin_args + command_line + ['--lang=smt2', '-'], benchmark_dir,
            timeout, dump_output)
    else:
        output, error, exit_status = run_process(
            bin_args + command_line + [benchmark_filename], benchmark_dir,
            timeout)

    # If a scrubber command has been specified then apply it to the output.
    if scrubber:
        output, _, _ = run_process(
            shlex.split(scrubber), benchmark_dir, timeout, output)
    if error_scrubber:
        error, _, _ = run_process(
            shlex.split(error_scrubber), benchmark_dir, timeout, error)

    # Popen in Python 3 returns a bytes object instead of a string for
    # stdout/stderr.
    if isinstance(output, bytes):
        output = output.decode()
    if isinstance(error, bytes):
        error = error.decode()
    return (output.strip(), error.strip(), exit_status)


def run_regression(proof, dump, wrapper, cvc4_binary, benchmark_path, timeout):
    """Determines the expected output for a benchmark, runs CVC4 on it and then
    checks whether the output corresponds to the expected output. Optionally
    uses a wrapper `wrapper`, tests proof generation (if proof is true), or
    dumps a benchmark and uses that as the input (if dump is true)."""

    if not os.access(cvc4_binary, os.X_OK):
        sys.exit(
            '"{}" does not exist or is not executable'.format(cvc4_binary))
    if not os.path.isfile(benchmark_path):
        sys.exit('"{}" does not exist or is not a file'.format(benchmark_path))

    basic_command_line_args = []

    benchmark_basename = os.path.basename(benchmark_path)
    benchmark_filename, benchmark_ext = os.path.splitext(benchmark_basename)
    benchmark_dir = os.path.dirname(benchmark_path)
    comment_char = '%'
    status_regex = None
    status_to_output = lambda s: s
    if benchmark_ext == '.smt':
        status_regex = r':status\s*(sat|unsat)'
        comment_char = ';'
    elif benchmark_ext == '.smt2':
        status_regex = r'set-info\s*:status\s*(sat|unsat)'
        comment_char = ';'
    elif benchmark_ext == '.cvc':
        pass
    elif benchmark_ext == '.p':
        basic_command_line_args.append('--finite-model-find')
        status_regex = r'% Status\s*:\s*(Theorem|Unsatisfiable|CounterSatisfiable|Satisfiable)'
        status_to_output = lambda s: '% SZS status {} for {}'.format(s, benchmark_filename)
    elif benchmark_ext == '.sy':
        comment_char = ';'
        # Do not use proofs/unsat-cores with .sy files
        proof = False
    else:
        sys.exit('"{}" must be *.cvc or *.smt or *.smt2 or *.p or *.sy'.format(
            benchmark_basename))

    # If there is an ".expect" file for the benchmark, read the metadata
    # from there, otherwise from the benchmark file.
    metadata_filename = benchmark_path + '.expect'
    if os.path.isfile(metadata_filename):
        comment_char = '%'
    else:
        metadata_filename = benchmark_path

    metadata_lines = None
    with open(metadata_filename, 'r') as metadata_file:
        metadata_lines = metadata_file.readlines()

    benchmark_content = None
    if metadata_filename == benchmark_path:
        benchmark_content = ''.join(metadata_lines)
    else:
        with open(benchmark_path, 'r') as benchmark_file:
            benchmark_content = benchmark_file.read()

    # Extract the metadata for the benchmark.
    scrubber = None
    error_scrubber = None
    expected_output = ''
    expected_error = ''
    expected_exit_status = None
    command_lines = []
    for line in metadata_lines:
        # Skip lines that do not start with a comment character.
        if line[0] != comment_char:
            continue
        line = line[1:].lstrip()

        if line.startswith(SCRUBBER):
            scrubber = line[len(SCRUBBER):]
        elif line.startswith(ERROR_SCRUBBER):
            error_scrubber = line[len(ERROR_SCRUBBER):]
        elif line.startswith(EXPECT):
            expected_output += line[len(EXPECT):]
        elif line.startswith(EXPECT_ERROR):
            expected_error += line[len(EXPECT_ERROR):]
        elif line.startswith(EXIT):
            expected_exit_status = int(line[len(EXIT):])
        elif line.startswith(COMMAND_LINE):
            command_lines.append(line[len(COMMAND_LINE):])
    expected_output = expected_output.strip()
    expected_error = expected_error.strip()

    # Expected output/expected error has not been defined in the metadata for
    # the benchmark. Try to extract the information from the benchmark itself.
    if expected_output == '' and expected_error == '':
        match = None
        if status_regex:
            match = re.search(status_regex, benchmark_content)

        if match:
            expected_output = status_to_output(match.group(1))
        elif expected_exit_status is None:
            # If there is no expected output/error and the exit status has not
            # been set explicitly, the benchmark is invalid.
            sys.exit('Cannot determine status of "{}"'.format(benchmark_path))
    if expected_exit_status is None:
        expected_exit_status = 0

    if 'CVC4_REGRESSION_ARGS' in os.environ:
        basic_command_line_args += shlex.split(
            os.environ['CVC4_REGRESSION_ARGS'])

    if not proof and ('(get-unsat-core)' in benchmark_content
                      or '(get-unsat-assumptions)' in benchmark_content
                      or '--check-proofs' in basic_command_line_args
                      or '--dump-proofs' in basic_command_line_args):
        print(
            '1..0 # Skipped regression: unsat cores not supported without proof support'
        )
        return

    if not command_lines:
        command_lines.append('')

    command_line_args_configs = []
    for command_line in command_lines:
        args = shlex.split(command_line)
        if proof or ('--check-proofs' not in args
                     and '--dump-proofs' not in args):
            all_args = basic_command_line_args + args
            command_line_args_configs.append(all_args)

            extra_command_line_args = []
            if benchmark_ext == '.sy' and \
                '--no-check-synth-sol' not in all_args and \
                '--check-synth-sol' not in all_args:
                extra_command_line_args = ['--check-synth-sol']
            if re.search(r'^(sat|invalid|unknown)$', expected_output) and \
               '--no-check-models' not in all_args:
                extra_command_line_args = ['--check-models']
            if proof and re.search(r'^(unsat|valid)$', expected_output):
                if '--no-check-proofs' not in all_args and \
                   '--incremental' not in all_args and \
                   '--unconstrained-simp' not in all_args and \
                   not cvc4_binary.endswith('pcvc4'):
                    extra_command_line_args = [
                        '--check-proofs', '--no-bv-eq', '--no-bv-ineq',
                        '--no-bv-algebraic'
                    ]
                if '--no-check-unsat-cores' not in all_args and \
                   '--incremental' not in all_args and \
                   '--unconstrained-simp' not in all_args and \
                   not cvc4_binary.endswith('pcvc4'):
                    extra_command_line_args += ['--check-unsat-cores']
            if extra_command_line_args:
                command_line_args_configs.append(
                    all_args + extra_command_line_args)

    # Run CVC4 on the benchmark with the different option sets and check
    # whether the exit status, stdout output, stderr output are as expected.
    print('1..{}'.format(len(command_line_args_configs)))
    print('# Starting')
    for command_line_args in command_line_args_configs:
        output, error, exit_status = run_benchmark(
            dump, wrapper, scrubber, error_scrubber, cvc4_binary,
            command_line_args, benchmark_dir, benchmark_basename, timeout)
        if output != expected_output:
            print(
                'not ok - Differences between expected and actual output on stdout - Flags: {}'.
                format(command_line_args))
            for line in difflib.context_diff(output.splitlines(),
                                             expected_output.splitlines()):
                print(line)
        elif error != expected_error:
            print(
                'not ok - Differences between expected and actual output on stderr - Flags: {}'.
                format(command_line_args))
            for line in difflib.context_diff(error.splitlines(),
                                             expected_error.splitlines()):
                print(line)
        elif expected_exit_status != exit_status:
            print(
                'not ok - Expected exit status "{}" but got "{}" - Flags: {}'.
                format(expected_exit_status, exit_status, command_line_args))
        else:
            print('ok - Flags: {}'.format(command_line_args))


def main():
    """Parses the command line arguments and then calls the core of the
    script."""

    parser = argparse.ArgumentParser(
        description=
        'Runs benchmark and checks for correct exit status and output.')
    parser.add_argument('--proof', action='store_true')
    parser.add_argument('--dump', action='store_true')
    parser.add_argument('wrapper', nargs='*')
    parser.add_argument('cvc4_binary')
    parser.add_argument('benchmark')
    args = parser.parse_args()
    cvc4_binary = os.path.abspath(args.cvc4_binary)

    wrapper = args.wrapper
    if os.environ.get('VALGRIND') == '1' and not wrapper:
        wrapper = ['libtool', '--mode=execute', 'valgrind']

    timeout = float(os.getenv('TEST_TIMEOUT', 600.0))

    run_regression(args.proof, args.dump, wrapper, cvc4_binary, args.benchmark,
                   timeout)


if __name__ == "__main__":
    main()
generated by cgit on debian on lair
contact matthew@masot.net with questions or feedback