summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Reynolds <andrew.j.reynolds@gmail.com>2018-09-28 11:42:49 -0500
committerGitHub <noreply@github.com>2018-09-28 11:42:49 -0500
commit676fbb54488a995818773f05a6ce860916ee6667 (patch)
tree4a0d335b8438c593306da49cd930e95330ddc625
parent37f8a0a52fac6933af7bace33d8da648901a7bf9 (diff)
parent8c9e1ce5939737bac95cf16f59e6fc7fc856940b (diff)
Merge branch 'master' into emptyEqemptyEq
-rw-r--r--CMakeLists.txt27
-rw-r--r--INSTALL259
-rw-r--r--INSTALL.md325
-rw-r--r--cmake/ConfigDebug.cmake5
-rw-r--r--cmake/ConfigProduction.cmake5
-rw-r--r--cmake/ConfigTesting.cmake5
-rw-r--r--cmake/FindCxxTest.cmake45
-rwxr-xr-xconfigure.sh9
-rw-r--r--examples/README26
-rw-r--r--examples/hashsmt/sha1_inversion.cpp6
-rw-r--r--src/base/CMakeLists.txt13
-rw-r--r--src/expr/node_algorithm.cpp2
-rw-r--r--src/parser/smt2/Smt2.g54
-rw-r--r--src/parser/smt2/smt2.cpp62
-rw-r--r--src/parser/smt2/smt2.h28
-rw-r--r--src/proof/proof_manager.cpp17
-rw-r--r--src/proof/proof_manager.h6
-rw-r--r--src/theory/arith/nonlinear_extension.cpp142
-rw-r--r--src/theory/arith/nonlinear_extension.h35
-rw-r--r--src/theory/datatypes/datatypes_rewriter.h11
-rw-r--r--src/theory/datatypes/datatypes_sygus.cpp446
-rw-r--r--src/theory/datatypes/datatypes_sygus.h98
-rw-r--r--src/theory/quantifiers/extended_rewrite.cpp4
-rw-r--r--src/theory/quantifiers/quantifiers_rewriter.cpp42
-rw-r--r--src/theory/quantifiers/sygus/cegis.cpp71
-rw-r--r--src/theory/quantifiers/sygus/cegis.h10
-rw-r--r--src/theory/quantifiers/sygus/cegis_unif.cpp57
-rw-r--r--src/theory/quantifiers/sygus/sygus_module.h10
-rw-r--r--src/theory/quantifiers/sygus/sygus_pbe.cpp44
-rw-r--r--src/theory/quantifiers/sygus/sygus_pbe.h5
-rw-r--r--src/theory/quantifiers/sygus/sygus_unif_io.cpp25
-rw-r--r--src/theory/quantifiers/sygus/synth_conjecture.cpp80
-rw-r--r--src/theory/quantifiers/sygus/synth_conjecture.h18
-rw-r--r--src/theory/quantifiers/sygus/synth_engine.cpp3
-rw-r--r--src/theory/quantifiers/sygus/term_database_sygus.cpp225
-rw-r--r--src/theory/quantifiers/sygus/term_database_sygus.h79
-rw-r--r--test/CMakeLists.txt39
-rw-r--r--test/java/CMakeLists.txt8
-rw-r--r--test/regress/CMakeLists.txt12
-rw-r--r--test/regress/Makefile.tests4
-rw-r--r--test/regress/regress0/quantifiers/qarray-sel-over-store.smt232
-rw-r--r--test/regress/regress0/strings/hconst-092618.smt25
-rw-r--r--test/regress/regress0/sygus/inv-different-var-order.sy22
-rw-r--r--test/regress/regress1/nl/exp-approx.smt215
-rw-r--r--test/system/CMakeLists.txt13
-rw-r--r--test/unit/CMakeLists.txt26
46 files changed, 1824 insertions, 651 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 453f1bcf7..86007b39a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -90,6 +90,7 @@ set(ABC_DIR "" CACHE STRING "Set ABC install directory")
set(ANTLR_DIR "" CACHE STRING "Set ANTLR3 install directory")
set(CADICAL_DIR "" CACHE STRING "Set CaDiCaL install directory")
set(CRYPTOMINISAT_DIR "" CACHE STRING "Set CryptoMiniSat install directory")
+set(CXXTEST_DIR "" CACHE STRING "Set CxxTest install directory")
set(GLPK_DIR "" CACHE STRING "Set GLPK install directory")
set(GMP_DIR "" CACHE STRING "Set GMP install directory")
set(LFSC_DIR "" CACHE STRING "Set LFSC install directory")
@@ -217,6 +218,24 @@ endif()
enable_testing()
#-----------------------------------------------------------------------------#
+# Check GCC version.
+#
+# GCC version 4.5.1 builds MiniSat incorrectly with -O2, which results in
+# incorrect answers.
+
+if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
+ execute_process(
+ COMMAND ${CMAKE_CXX_COMPILER} -dumpversion
+ OUTPUT_VARIABLE GCC_VERSION
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(GCC_VERSION VERSION_EQUAL "4.5.1")
+ message(FATAL_ERROR
+ "GCC 4.5.1's optimizer is known to build MiniSat incorrectly "
+ "(and by extension CVC4).")
+ endif()
+endif()
+
+#-----------------------------------------------------------------------------#
# Check options, find packages and configure build.
if(USE_PYTHON2)
@@ -252,8 +271,8 @@ if(ENABLE_COVERAGE)
add_definitions(-DCVC4_COVERAGE)
setup_target_for_coverage_lcov(
NAME coverage
- EXECUTABLE ctest -j${CTEST_NTHREADS} $(ARGS)
- DEPENDENCIES cvc4-bin)
+ EXECUTABLE
+ ctest -j${CTEST_NTHREADS} -LE "example" --output-on-failure $(ARGS))
endif()
if(ENABLE_DEBUG_CONTEXT_MM)
@@ -312,10 +331,6 @@ if(ENABLE_TRACING)
add_definitions(-DCVC4_TRACING)
endif()
-if(ENABLE_UNIT_TESTING)
- find_package(CxxTest REQUIRED)
-endif()
-
if(ENABLE_STATISTICS)
add_definitions(-DCVC4_STATISTICS_ON)
endif()
diff --git a/INSTALL b/INSTALL
deleted file mode 100644
index 655c0e005..000000000
--- a/INSTALL
+++ /dev/null
@@ -1,259 +0,0 @@
-CVC4 prerelease version 1.7.
-
-** Quick-start instructions
-
-*** Supported operating systems
-
-CVC4 can be built on Linux and macOS. For Windows, CVC4 can be cross-compiled
-using Mingw-w64. We recommend a 64-bit operating system.
-
-On macOS, we recommend using Homebrew (https://brew.sh/) to install the
-dependencies. We also have a Homebrew Tap available at
-https://github.com/CVC4/homebrew-cvc4 .
-
-*** Build dependencies
-
-The following tools and libraries are required to build and run CVC4.
-Versions given are minimum versions; more recent versions should be
-compatible.
-
- GNU C and C++ (gcc and g++) or Clang, reasonably recent versions
- GNU Make
- GNU Bash
- GMP v4.2 (GNU Multi-Precision arithmetic library)
- libantlr3c v3.2 or v3.4 (ANTLR parser generator C support library)
- The Boost C++ base libraries
-
-Some features, such as the theory of floating-point numbers, require optional
-dependencies, please refer to the section "Optional dependencies" below for
-details.
-
-If "make" is non-GNU on your system, make sure to invoke "gmake" (or
-whatever GNU Make is installed as). If your usual shell is not Bash,
-the configure script should auto-correct this. If it does not, you'll
-see strange shell syntax errors, and you may need to explicitly set
-SHELL or CONFIG_SHELL to the location of bash on your system.
-
-*** Installing libantlr3c: ANTLR parser generator C support library
-
-For libantlr3c, you can use the convenience script in
-contrib/get-antlr-3.4. This will download, patch, and install
-libantlr3c. On a 32-bit machine, or if you have difficulty building
-libantlr3c (or difficulty getting CVC4 to link against it later), you
-may need to remove the --enable-64bit part in the script. (If you're
-curious, the manual instructions are at
-http://cvc4.cs.stanford.edu/wiki/Developer%27s_Guide#ANTLR3 .)
-
-*** Installing the Boost C++ base libraries
-
-A Boost package is available on most Linux distributions and Homebrew; check
-yours for a package named something like libboost-dev or boost-devel. There
-are a number of additional Boost packages in some distributions, but this
-"basic" one should be sufficient for building CVC4.
-
-Should you want to install Boost manually, or to learn more about the
-Boost project, please visit http://www.boost.org/.
-
-*** Optional dependencies
-
-None of these is required, but can improve CVC4 as described below:
-
- Optional: SymFPU (an implementation of SMT-LIB/IEEE-754 floating-point
- operations in terms of bit-vector operations)
- Optional: CaDiCaL (optional SAT solver)
- Optional: CryptoMiniSat (optional SAT solver)
- Optional: LFSC (the LFSC proof checker)
- Optional: SWIG 3.0.x (Simplified Wrapper and Interface Generator)
- Optional: CLN v1.3 or newer (Class Library for Numbers)
- Optional: glpk-cut-log (A fork of the GNU Linear Programming Kit)
- Optional: ABC library (for improved bitvector support)
- Optional: GNU Readline library (for an improved interactive experience)
- Optional: The Boost C++ threading library (libboost_thread)
- Optional: CxxTest unit testing framework
-
-SymFPU is required for supporting the theory of floating-point numbers. It can
-be installed using the contrib/get-symfpu script. Use --with-symfpu when
-configuring CVC4 to build with the dependency.
-
-CaDiCaL is a SAT solver that can be used for solving non-incremental bit-vector
-problems when using eager bit-blasting. This dependency may improve
-performance. It can be installed using the contrib/get-cadical script. Use
---with-cadical when configuring CVC4 to build with this dependency.
-
-CryptoMiniSat is a SAT solver that can be used for solving bit-vector problems
-when using eager bit-blasting. This dependency may improve performance. It can
-be installed using the contrib/get-cryptominisat script. Use
---with-cryptominisat when configuring CVC4 to build with this dependency.
-
-LFSC is required to check proofs internally with --check-proofs. It can be
-installed using the contrib/get-lfsc script. Use --with-lfsc when configuring
-CVC4 to build with this dependency.
-
-SWIG is necessary to build the Java API (and of course a JDK is
-necessary, too). SWIG 1.x/2.x won't work; you'll need 3.0, and the more
-recent the better. See "Language bindings" below for build instructions.
-
-CLN is an alternative multiprecision arithmetic package that can offer
-better performance and memory footprint than GMP. CLN is covered by
-the GNU General Public License, version 3; so if you choose to use
-CVC4 with CLN support, you are licensing CVC4 under that same license.
-(Usually CVC4's license is more permissive than GPL is; see the file
-COPYING in the CVC4 source distribution for details.) Please visit
-http://www.ginac.de/CLN/ for more details about CLN.
-
-glpk-cut-log is a fork of GLPK (the GNU Linear Programming Kit).
-This can be used to speed up certain classes of problems for the arithmetic
-implementation in CVC4. (This is not recommended for most users.) The source
-code for glpk-cut-log is available at:
-https://github.com/timothy-king/glpk-cut-log/
-The only option for installation of glpk-cut-log is downloading the library,
-compiling and installing it manually. CVC4 is no longer compatible with the
-main GLPK library. GLPK and glpk-cut-log are covered by the GNU General Public
-License, version 3; so if you choose to use CVC4 with GLPK support, you are
-licensing CVC4 under that same license.
-(Usually CVC4's license is more permissive; see above discussion.)
-Please visit http://www.gnu.org/software/glpk/ for more details about GLPK.
-
-ABC: A System for Sequential Synthesis and Verification is a library
-for synthesis and verification of logic circuits. This can be used to
-speed up the eager bit-vector solver by first encoding the bit-blasted
-formula into AIG format and then using ABC to simplify the AIG. To
-install abc run the contrib/get-abc script which will download and
-install a compatible version of ABC in the cvc4 directory. To configure
-CVC4 to use abc configure with --with-abc and --with-abc-dir=PATH, where
-PATH corresponds to the install path of ABC. To run CVC4 using ABC use
-the --bitblast-aig command line argument.
-Please visit http://www.eecs.berkeley.edu/~alanmi/abc/ for more details
-on ABC.
-
-The GNU Readline library is optionally used to provide command
-editing, tab completion, and history functionality at the CVC prompt
-(when running in interactive mode). Check your distribution for a
-package named "libreadline-dev" or "readline-devel" or similar. This
-library is covered by the GNU General Public License, version 3. Like
-the above-mentioned libraries, if you choose to use CVC4 with readline
-support, you are licensing CVC4 under that same license. (Please visit
-http://cnswww.cns.cwru.edu/php/chet/readline/rltop.html for more
-details about readline.)
-
-The Boost C++ threading library (often packaged independently of the
-Boost base library) is needed to run CVC4 in "portfolio"
-(multithreaded) mode. Check your distribution for a package named
-"libboost-thread-dev" or similar.
-
-CxxTest is necessary to run CVC4's unit tests (included with the
-distribution). Running these is not really required for users of
-CVC4; "make check" will skip unit tests if CxxTest isn't available,
-and go on to run the extensive system- and regression-tests in the
-source tree. However, if you're interested, you can download CxxTest
-at http://cxxtest.com/ .
-
-*** Building CVC4
-
-Once the build dependencies are satisfied, you should be able to configure,
-make, make check, and make install without incident:
-
- ./autogen.sh [not required when building from a source distribution]
- ./configure [use --prefix to specify a prefix; default /usr/local
- use --with-* to use optional dependencies]
- make
- make check [optional but a good idea!]
-
-To build from a repository checkout (rather than a distributed CVC4
-tarball), there are additional dependencies; see below.
-
-You can then "make install" to install in the prefix you gave to
-the configure script (/usr/local by default). ** You should run
-"make check" ** before installation to ensure that CVC4 has been
-built correctly. In particular, GCC version 4.5.1 seems to have a
-bug in the optimizer that results in incorrect behavior (and wrong
-results) in many builds. This is a known problem for MiniSat, and
-since MiniSat is at the core of CVC4, a problem for CVC4. "make check"
-easily detects this problem (by showing a number of FAILed test cases).
-It is ok if the unit tests aren't run as part of "make check", but all
-system tests and regression tests should pass without incident.
-
-To build API documentation, use "make doc". Documentation is produced
-under doc/ but is not installed by "make install".
-
-Examples and tutorials are not installed with "make install." You may
-want to "make install-examples". See below.
-
-For more information about the build system itself (probably not
-necessary for casual users), see the Appendix at the bottom of this
-file.
-
-*** Language bindings
-
-There are several options available for using CVC4 from the API.
-
-First, CVC4 offers a complete and flexible API for manipulating
-expressions, maintaining a stack of assertions, and checking
-satisfiability, and related things. The C++ libraries (libcvc4.so and
-libcvc4parser.so) and required headers are installed normally via a
-"make install". This API is also available from Java (via CVC4.jar
-and libcvc4jni.so) by configuring with --enable-language-bindings=java.
-You'll also need SWIG 2.0 installed (and you might need to help
-configure find it if you installed it in a nonstandard place with
---with-swig-dir=/path/to/swig/installation). You may also need to
-give the configure script the path to your Java headers (in
-particular, jni.h). You might do so with (for example):
-
- ./configure --enable-language-bindings=java \
- JAVA_CPPFLAGS=-I/usr/lib/jvm/java-6-openjdk-amd64/include
-
-The examples/ directory includes some basic examples (the "simple vc" family of
-examples) of all these interfaces.
-
-In principle, since we use SWIG to generate the native Java API, we
-could support other languages as well. However, using CVC4 from other
-languages is not supported, nor expected to work, at this time. If
-you're interested in helping to develop, maintain, and test a language
-binding, please contact one of the project leaders.
-
-*** Building CVC4 from a repository checkout
-
-CVC4's main repository is kept on GitHub at:
-
- https://github.com/CVC4/CVC4
-
-and there are numerous experimental forks housed on GitHub as well,
-by different developers, implementing various features.
-
-The following tools and libraries are additionally required to build
-CVC4 from from a repository checkout rather than from a prepared
-source tarball.
-
- Automake v1.11 or later
- Autoconf v2.61 or later
- Libtool v2.2 or later
- ANTLR3 v3.2 or v3.4
- Python (2.x or 3.x)
-
-First, use "./autogen.sh" to create the configure script. Then proceed as
-normal for any distribution tarball. The parsers are pre-generated for the
-tarballs, but don't exist in the repository; hence the extra ANTLR3 requirement
-to generate the source code for the parsers, when building from the repository.
-Similarly, Python is required for generating some of the code.
-
-*** Examples and tutorials are not built or installed
-
-Examples are not built by "make" or "make install". See
-examples/README for information on what to find in the examples/
-directory, as well as information about building and installing them.
-
-*** Appendix: Build architecture
-
-The build system is generated by automake, libtool, and autoconf. It
-is somewhat nonstandard, though, which (for one thing) requires that
-GNU Make be used. If you ./configure in the top-level source
-directory, the objects will actually all appear in
-builds/${arch}/${build_id}. This is to allow multiple, separate
-builds in the same place (e.g., an assertions-enabled debugging build
-alongside a production build), without changing directories at the
-shell. The "current" build is maintained until you re-configure.
-
-You can also create your own build directory inside or outside of the
-source tree and configure from there. All objects will then be built
-in that directory, and you'll ultimately find the "cvc4" binary in
-src/main/, and the libraries under src/ and src/parser/.
diff --git a/INSTALL.md b/INSTALL.md
new file mode 100644
index 000000000..2f0c7a051
--- /dev/null
+++ b/INSTALL.md
@@ -0,0 +1,325 @@
+CVC4 prerelease version 1.7.
+============================
+
+## Supported Operating Systems
+
+CVC4 can be built on Linux and macOS. For Windows, CVC4 can be cross-compiled
+using Mingw-w64. We recommend a 64-bit operating system.
+
+On macOS, we recommend using Homebrew (https://brew.sh/) to install the
+dependencies. We also have a Homebrew Tap available at
+https://github.com/CVC4/homebrew-cvc4 .
+
+## Build dependencies
+
+The following tools and libraries are required to build and run CVC4.
+Versions given are minimum versions; more recent versions should be
+compatible.
+
+- [GNU C and C++ (gcc and g++)](https://gcc.gnu.org)
+ or [Clang](https://clang.llvm.org) (reasonably recent versions)
+- [CMake >= 3.1](https://cmake.org)
+- [GNU Bash](https://www.gnu.org/software/bash/)
+- [Python >= 2.7](https://www.python.org)
+- [GMP v4.2 (GNU Multi-Precision arithmetic library)](https://gmplib.org)
+- [libantlr3c v3.2 or v3.4 (ANTLR parser generator C support library)](http://www.antlr3.org/)
+
+Some features, such as the theory of floating-point numbers, require
+[optional dependencies](optional-dependencies) (see below).
+
+### Installing libantlr3c: ANTLR parser generator C support library
+
+For libantlr3c, you can use the script contrib/get-antlr-3.4.
+This will download, patch, and install libantlr3c.
+
+If you're on a 32-bit machine, or if you have difficulty building
+libantlr3c (or difficulty getting CVC4 to link against it), you
+may need to remove the configure option `--enable-64bit` in the script.
+
+### Warning: GCC 4.5.1
+
+GCC version 4.5.1 seems to have a bug in the optimizer that may result in
+incorrect behavior (and wrong results) in many builds. This is a known problem
+for MiniSat, and since MiniSat is at the core of CVC4, a problem for CVC4.
+We recommend using a GCC version > 4.5.1.
+
+## Optional Dependencies
+
+### SymFPU (Support for the Theory of Floating Point Numbers)
+
+[SymFPU](https://github.com/martin-cs/symfpu/tree/CVC4)
+is an implementation of SMT-LIB/IEEE-754 floating-point operations in terms
+of bit-vector operations.
+It is required for supporting the theory of floating-point numbers and
+can be installed using the `contrib/get-symfpu` script.
+Configure CVC4 with `configure.sh --symfpu` to build with this dependency.
+
+### CaDiCaL (Optional SAT solver)
+
+[CaDiCaL](https://github.com/arminbiere/cadical)
+is a SAT solver that can be used for solving non-incremental bit-vector
+problems with eager bit-blasting. This dependency may improve performance.
+It can be installed using the `contrib/get-cadical script`.
+Configure CVC4 with `configure.sh --cadical` to build with this dependency.
+
+### CryptoMiniSat (Optional SAT solver)
+
+[CryptoMinisat](https://github.com/msoos/cryptominisat)
+is a SAT solver that can be used for solving bit-vector problems with eager
+bit-blasting. This dependency may improve performance.
+It can be installed using the `contrib/get-cryptominisat` script.
+Configure CVC4 with `configure.sh --cryptominisat` to build with this
+dependency.
+
+### LFSC (The LFSC Proof Checker)
+
+[LFSC](https://github.com/CVC4/LFSC) is required to check proofs internally
+with --check-proofs. It can be installed using the `contrib/get-lfsc` script.
+Configure CVC4 with `configure.sh --lfsc` to build with this dependency.
+
+### SWIG >= 3.0.x (Simplified Wrapper and Interface Generator)
+
+SWIG 3.0.x (and a JDK) is necessary to build the Java API.
+See [Language Bindings](language-bindings) below for build instructions.
+
+### CLN >= v1.3 (Class Library for Numbers)
+
+[CLN](http://www.ginac.de/CLN)
+is an alternative multiprecision arithmetic package that may offer better
+performance and memory footprint than GMP.
+Configure CVC4 with `configure.sh --cln` to build with this dependency.
+
+Note that CLN is covered by the [GNU General Public License, version 3](https://www.gnu.org/licenses/gpl-3.0.en.html).
+If you choose to use CVC4 with CLN support, you are licensing CVC4 under that
+same license.
+(Usually CVC4's license is more permissive than GPL, see the file `COPYING` in
+the CVC4 source distribution for details.)
+
+### glpk-cut-log (A fork of the GNU Linear Programming Kit)
+
+[glpk-cut-log](https://github.com/timothy-king/glpk-cut-log/) is a fork of
+[GLPK](http://www.gnu.org/software/glpk/) (the GNU Linear Programming Kit).
+This can be used to speed up certain classes of problems for the arithmetic
+implementation in CVC4. (This is not recommended for most users.)
+
+glpk-cut-log can be installed using the `contrib/get-glpk-cut-log` script.
+Note that the only installation option is manual installation via this script.
+CVC4 is no longer compatible with the main GLPK library.
+Configure CVC4 with `configure.sh --glpk` to build with this dependency.
+
+Note that GLPK and glpk-cut-log are covered by the [GNU General Public License, version 3](https://www.gnu.org/licenses/gpl-3.0.en.html).
+If you choose to use CVC4 with GLPK support, you are licensing CVC4 under that
+same license.
+(Usually CVC4's license is more permissive; see above discussion.)
+
+### ABC library (Improved Bit-Vector Support)
+
+[ABC](http://www.eecs.berkeley.edu/~alanmi/abc/) (A System for Sequential
+Synthesis and Verification) is a library for synthesis and verification of
+logic circuits. This dependency may improve performance of the eager
+bit-vector solver. When enabled, the bit-blasted formula is encoded into
+and-inverter-graphs (AIG) and ABC is used to simplify these AIGs.
+
+ABC can be installed using the `contrib/get-abc` script.
+Configure CVC4 with `configure.sh --abc` to build with this dependency.
+
+### GNU Readline library (Improved Interactive Experience)
+
+The [GNU Readline](http://cnswww.cns.cwru.edu/php/chet/readline/rltop.html)
+library is optionally used to provide command editing, tab completion, and
+history functionality at the CVC4 prompt (when running in interactive mode).
+Check your distribution for a package named "libreadline-dev" or
+"readline-devel" or similar.
+
+Note that GNU Readline is covered by the [GNU General Public License, version 3](https://www.gnu.org/licenses/gpl-3.0.en.html).
+If you choose to use CVC4 with GNU Readline support, you are licensing CVC4
+under that same license.
+(Usually CVC4's license is more permissive; see above discussion.)
+
+### libboost_thread: The Boost C++ threading library (Portfolio Builds)
+
+The [Boost](http://www.boost.org) C++ threading library (often packaged
+independently of the Boost base library) is needed to run CVC4 in "portfolio"
+(multithreaded) mode.
+Check your distribution for a package named "libboost-thread-dev" or similar.
+
+### Boost C++ base libraries (Examples)
+
+The [Boost](http://www.boost.org) C++ base library is needed for some examples
+provided with CVC4.
+
+### CxxTest Unit Testing Framework (Unit Tests)
+
+[CxxTest](http://cxxtest.com) is required to optionally run CVC4's unit tests
+(included with the distribution). See [Testing](testing) below for more details.
+
+
+## Building CVC4
+
+ ./configure.sh # use --prefix to specify a prefix (default: /usr/local)
+ # use --name=<PATH> for custom build directory
+ cd <build_dir> # default is ./build
+ make # use -jN for parallel build with N threads
+ make check # to run default set of tests
+ make install # to install into the prefix specified above
+
+All binaries are built into `<build_dir>/bin`, the CVC4 library is built into
+`<build_dir>/src`.
+
+## Language bindings
+
+CVC4 provides a complete and flexible C++ API (see `examples/api` for examples).
+It further provides Java (see `examples/SimpleVC.java` and `examples/api/java`)
+and Python (see `examples/SimpleVC.py`) API bindings.
+
+Configure CVC4 with `configure.sh --language-bindings=[java,python,all]`
+to build with language bindings.
+Note that this requires SWIG >= 3.0.x.
+
+In principle, since we use SWIG to generate the native Java and PythonAPI,
+we could support other languages as well. However, using CVC4 from other
+languages is not supported, nor expected to work, at this time.
+If you're interested in helping to develop, maintain, and test a language
+binding, please contact one of the project leaders.
+
+## Building the Examples
+
+The examples provided in directory `examples` are not built by default.
+
+ make examples # build all examples
+ make runexamples # build and run all examples
+ make <example> # build examples/<subdir>/<example>.<ext>
+ ctest example/<subdir>/<example> # run test example/<subdir>/<example>
+
+All examples binaries are built into `<build_dir>/bin/examples`.
+
+See `examples/README` for more detailed information on what to find in the
+`examples` directory.
+
+## Testing CVC4
+
+We use `ctest` as test infrastructure, for all command-line options of ctest,
+see `ctest -h`. Some useful options are:
+
+ ctest -R <regex> # run all tests with names matching <regex>
+ ctest -E <regex> # exclude tests with names matching <regex>
+ ctest -L <regex> # run all tests with labels matching <regex>
+ ctest -LE <regex> # exclude tests with labels matching <regex>
+ ctest # run all tests
+ ctest -jN # run all tests in parallel with N threads
+ ctest --output-on-failure # run all tests and print output of failed tests
+
+We have 4 categories of tests:
+- **examples** in directory `examples`
+ (label: **example**)
+- **regression tests** (5 levels) in directory `test/regress`
+ (label: **regressN** with N the regression level)
+- **system tests** in directory `test/system`
+ (label: **system**)
+- **unit tests** in directory `test/unit`
+ (label: **unit**)
+
+### Testing Examples
+
+For building instructions, see [Building the Examples](building-the-examples).
+
+We use prefix `example/` + `<subdir>/` + `<example>` (for `<example>` in
+`example/<subdir>/`) as test target name.
+
+ make bitvectors # build example/api/bitvectors.cpp
+ ctest -R bitvectors # run all tests that match '*bitvectors*'
+ # > runs example/api/bitvectors
+ # > example/api/bitvectors_and_arrays
+ # > ...
+ ctest -R bitvectors$ # run all tests that match '*bitvectors'
+ # > runs example/api/bitvectors
+ ctest -R example/api/bitvectors$ # run all tests that match '*example/api/bitvectors'
+ # > runs example/api/bitvectors
+
+
+### Testing System Tests
+
+The system tests are not built by default.
+
+ make systemtests # build and run all system tests
+ make <system_test> # build test/system/<system_test>.<ext>
+ ctest system/<system_test> # run test/system/<system_test>.<ext>
+
+All system test binaries are built into `<build_dir>/bin/test/system`.
+
+We use prefix `system/` + `<system_test>` (for `<system_test>` in `test/system`)
+as test target name.
+
+ make ouroborous # build test/system/ouroborous.cpp
+ ctest -R ouroborous # run all tests that match '*ouroborous*'
+ # > runs system/ouroborous
+ ctest -R ouroborous$ # run all tests that match '*ouroborous'
+ # > runs system/ouroborous
+ ctest -R system/ouroborous$ # run all tests that match '*system/ouroborous'
+ # > runs system/ouroborous
+### Testing Unit Tests
+
+The unit tests are not built by default.
+
+ make units # build and run all unit tests
+ make <unit_test> # build test/unit/<subdir>/<unit_test>.<ext>
+ ctest unit/<subdir>/<unit_test> # run test/unit/<subdir>/<unit_test>.<ext>
+
+All unit test binaries are built into `<build_dir>/bin/test/unit`.
+
+We use prefix `unit/` + `<subdir>/` + `<unit_test>` (for `<unit_test>` in
+`test/unit/<subdir>`) as test target name.
+
+ make map_util_black # build test/unit/base/map_util_black.cpp
+ ctest -R map_util_black # run all tests that match '*map_util_black*'
+ # > runs unit/base/map_util_black
+ ctest -R base/map_util_black$ # run all tests that match '*base/map_util_black'
+ # > runs unit/base/map_util_black
+ ctest -R unit/base/map_util_black$ # run all tests that match '*unit/base/map_util_black'
+ # > runs unit/base/map_util_black
+
+### Testing Regression Tests
+
+We use prefix `regressN/` + `<subdir>/` + `<regress_test>` (for `<regress_test>`
+in level `N` in `test/regress/regressN/<subdir>`) as test target name.
+
+ ctest -L regress # run all regression tests
+ ctest -L regress0 # run all regression tests in level 0
+ ctest -L regress[0-1] # run all regression tests in level 0 and 1
+ ctest -R regress # run all regression tests
+ ctest -R regress0 # run all regression tests in level 0
+ ctest -R regress[0-1] # run all regression tests in level 0 and 1
+ ctest -R regress0/bug288b # run all tests that match '*regress0/bug288b*'
+ # > runs regress0/bug288b
+### Custom Targets
+
+All custom test targets build and run a preconfigured set of tests.
+
+- `make check [-jN] [ARGS=-jN]`
+ The default build-and-test target for CVC4, builds and runs all examples,
+ all system and unit tests, and regression tests from levels 0 and 1.
+
+- `make systemtests [-jN] [ARGS=-jN]`
+ Build and run all system tests.
+
+- `make units [-jN] [ARGS=-jN]`
+ Build and run all unit tests.
+
+- `make regress [-jN] [ARGS=-jN]`
+ Build and run all regression tests.
+
+- `make runexamples [-jN] [ARGS=-jN]`
+ Build and run all examples.
+
+- `make coverage [-jN] [ARGS=-jN]`
+ Build and run all tests (system and unit tests, regression tests level 0-4)
+ with gcov to determine code coverage.
+
+We use `ctest` as test infrastructure, and by default all test targets
+are configured to **run** in parallel with the maximum number of threads
+available on the system. Override with `ARGS=-jN`.
+
+Use `-jN` for parallel **building** with `N` threads.
+
+
+
diff --git a/cmake/ConfigDebug.cmake b/cmake/ConfigDebug.cmake
index 9c9f5ec10..a4331bfce 100644
--- a/cmake/ConfigDebug.cmake
+++ b/cmake/ConfigDebug.cmake
@@ -19,9 +19,8 @@ cvc4_set_option(ENABLE_PROOFS ON)
cvc4_set_option(ENABLE_TRACING ON)
# enable_dumping=yes
if(ENABLE_PORTFOLIO)
- if(ENABLE_DUMPING)
- message(FATAL_ERROR "Dumping not supported with a portfolio build.")
- else()
+ # Only print warning if dumping was not explicitely disabled by the user.
+ if(${ENABLE_DUMPING} STREQUAL "IGNORE")
message(WARNING
"Disabling dumping support, not supported with a portfolio build.")
endif()
diff --git a/cmake/ConfigProduction.cmake b/cmake/ConfigProduction.cmake
index db63507a7..99f2a7b93 100644
--- a/cmake/ConfigProduction.cmake
+++ b/cmake/ConfigProduction.cmake
@@ -16,9 +16,8 @@ cvc4_set_option(ENABLE_PROOFS ON)
cvc4_set_option(ENABLE_TRACING OFF)
# enable_dumping=yes
if(ENABLE_PORTFOLIO)
- if(ENABLE_DUMPING)
- message(FATAL_ERROR "Dumping not supported with a portfolio build.")
- else()
+ # Only print warning if dumping was not explicitely disabled by the user.
+ if(${ENABLE_DUMPING} STREQUAL "IGNORE")
message(WARNING
"Disabling dumping support, not supported with a portfolio build.")
endif()
diff --git a/cmake/ConfigTesting.cmake b/cmake/ConfigTesting.cmake
index 3c995e421..7feaf5129 100644
--- a/cmake/ConfigTesting.cmake
+++ b/cmake/ConfigTesting.cmake
@@ -16,9 +16,8 @@ cvc4_set_option(ENABLE_PROOFS ON)
cvc4_set_option(ENABLE_TRACING ON)
# enable_dumping=yes
if(ENABLE_PORTFOLIO)
- if(ENABLE_DUMPING)
- message(FATAL_ERROR "Dumping not supported with a portfolio build.")
- else()
+ # Only print warning if dumping was not explicitely disabled by the user.
+ if(${ENABLE_DUMPING} STREQUAL "IGNORE")
message(WARNING
"Disabling dumping support, not supported with a portfolio build.")
endif()
diff --git a/cmake/FindCxxTest.cmake b/cmake/FindCxxTest.cmake
new file mode 100644
index 000000000..cd7aed70d
--- /dev/null
+++ b/cmake/FindCxxTest.cmake
@@ -0,0 +1,45 @@
+# Find CxxTest
+# CxxTest_FOUND - system has CxxTest lib
+# CxxTest_INCLUDE_DIR - the CxxTest include directory
+# CxxTest_TESTGEN_EXECUTABLE - CxxTest excecutable
+# CxxTest_TESTGEN_INTERPRETER - Python/Perl interpreter for running executable
+
+find_package(PythonInterp QUIET)
+find_package(Perl QUIET)
+
+find_path(CxxTest_INCLUDE_DIR cxxtest/TestSuite.h
+ PATHS ${CxxTest_HOME}
+ NO_DEFAULT_PATH)
+find_program(CxxTest_PYTHON_TESTGEN_EXECUTABLE
+ NAMES cxxtestgen cxxtestgen.py
+ PATHS ${CxxTest_HOME}/bin
+ NO_DEFAULT_PATH)
+find_program(CxxTest_PERL_TESTGEN_EXECUTABLE cxxtestgen.pl
+ PATHS ${CxxTest_HOME}/bin
+ NO_DEFAULT_PATH)
+
+if(NOT CxxTest_HOME)
+ find_path(CxxTest_INCLUDE_DIR cxxtest/TestSuite.h)
+ find_program(CxxTest_PYTHON_TESTGEN_EXECUTABLE NAMES cxxtestgen cxxtestgen.py)
+ find_program(CxxTest_PERL_TESTGEN_EXECUTABLE cxxtestgen.pl)
+endif()
+
+if(PYTHONINTERP_FOUND AND CxxTest_PYTHON_TESTGEN_EXECUTABLE)
+ set(CxxTest_TESTGEN_EXECUTABLE ${CxxTest_PYTHON_TESTGEN_EXECUTABLE})
+ set(CxxTest_TESTGEN_INTERPRETER ${PYTHON_EXECUTABLE})
+elseif(PERL_FOUND AND CxxTest_PERL_TESTGEN_EXECUTABLE)
+ set(CxxTest_TESTGEN_EXECUTABLE ${CxxTest_PERL_TESTGEN_EXECUTABLE})
+ set(CxxTest_TESTGEN_INTERPRETER ${PERL_EXECUTABLE})
+elseif(NOT PYTHONINTERP_FOUND AND NOT PERL_FOUND AND CxxTest_FIND_REQUIRED)
+ message(FATAL_ERROR "Neither Python nor Perl found, cannot use CxxTest.")
+endif()
+
+if(NOT DEFINED CxxTest_TESTGEN_ARGS)
+ set(CxxTest_TESTGEN_ARGS --error-printer)
+endif()
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(
+ CxxTest DEFAULT_MSG CxxTest_INCLUDE_DIR CxxTest_TESTGEN_EXECUTABLE)
+
+mark_as_advanced(CxxTest_INCLUDE_DIR CxxTest_TESTGEN_EXECUTABLE)
diff --git a/configure.sh b/configure.sh
index 16764dd8a..1cc104c94 100755
--- a/configure.sh
+++ b/configure.sh
@@ -50,7 +50,6 @@ The following options configure parameterized features.
Optional Packages:
The following flags enable optional packages (disable with --no-<option name>).
--cln use CLN instead of GMP
- --gmp use GMP instead of CLN
--glpk use GLPK simplex solver
--abc use the ABC AIG library
--cadical use the CaDiCaL SAT solver
@@ -66,7 +65,7 @@ Optional Path to Optional Packages:
--antlr-dir=PATH path to ANTLR C headers and libraries
--cadical-dir=PATH path to top level of CaDiCaL source tree
--cryptominisat-dir=PATH path to top level of CryptoMiniSat source tree
- --cxxtest-dir=DIR path to CxxTest installation
+ --cxxtest-dir=PATH path to CxxTest installation
--glpk-dir=PATH path to top level of GLPK installation
--gmp-dir=PATH path to top level of GMP installation
--lfsc-dir=PATH path to top level of LFSC source tree
@@ -140,6 +139,7 @@ abc_dir=default
antlr_dir=default
cadical_dir=default
cryptominisat_dir=default
+cxxtest_dir=default
glpk_dir=default
gmp_dir=default
lfsc_dir=default
@@ -289,6 +289,9 @@ do
--cryptominisat-dir) die "missing argument to $1 (try -h)" ;;
--cryptominisat-dir=*) cryptominisat_dir=${1##*=} ;;
+ --cxxtest-dir) die "missing argument to $1 (try -h)" ;;
+ --cxxtest-dir=*) cxxtest_dir=${1##*=} ;;
+
--glpk-dir) die "missing argument to $1 (try -h)" ;;
--glpk-dir=*) glpk_dir=${1##*=} ;;
@@ -396,6 +399,8 @@ cmake_opts=""
&& cmake_opts="$cmake_opts -DCADICAL_DIR=$cadical_dir"
[ "$cryptominisat_dir" != default ] \
&& cmake_opts="$cmake_opts -DCRYPTOMINISAT_DIR=$cryptominisat_dir"
+[ "$cxxtest_dir" != default ] \
+ && cmake_opts="$cmake_opts -DCXXTEST_DIR=$cxxtest_dir"
[ "$glpk_dir" != default ] \
&& cmake_opts="$cmake_opts -DGLPK_DIR=$glpk_dir"
[ "$gmp_dir" != default ] \
diff --git a/examples/README b/examples/README
index df8cffe73..cc3c23f26 100644
--- a/examples/README
+++ b/examples/README
@@ -1,8 +1,11 @@
+Examples
+--------
+
This directory contains usage examples of CVC4's different language
bindings, library APIs, and also tutorial examples from the tutorials
available at http://cvc4.cs.stanford.edu/wiki/Tutorials
-*** Files named SimpleVC*, simple_vc*
+### SimpleVC*, simple_vc*
These are examples of how to use CVC4 with each of its library
interfaces (APIs) and language bindings. They are essentially "hello
@@ -10,24 +13,19 @@ world" examples, and do not fully demonstrate the interfaces, but
function as a starting point to using simple expressions and solving
functionality through each library.
-*** Targeted examples
+### Targeted examples
The "api" directory contains some more specifically-targeted
examples (for bitvectors, for arithmetic, etc.). The "api/java"
directory contains the same examples in Java.
-*** Installing example source code
-
-Examples are not automatically installed by "make install". If you
-wish to install them, use "make install-examples" after you configure
-your CVC4 source tree. They'll appear in your documentation
-directory, under the "examples" subdirectory (so, by default,
-in /usr/local/share/doc/cvc4/examples).
+### Building Examples
-*** Building examples
+The examples provided in this directory are not built by default.
-Examples can be built as a separate step, after building CVC4 from
-source. After building CVC4, you can run "make examples". You'll
-find the built binaries in builds/examples (or just in "examples" if
-you configured a build directory outside of the source tree).
+ make examples # build all examples
+ make runexamples # build and run all examples
+ make <example> # build examples/<subdir>/<example>.<ext>
+ ctest example//<subdir>/<example> # run examples/<subdir>/<example>.<ext>
+The examples binaries are built into `<build_dir>/bin/examples`.
diff --git a/examples/hashsmt/sha1_inversion.cpp b/examples/hashsmt/sha1_inversion.cpp
index 652fc4e5b..ef5191cb7 100644
--- a/examples/hashsmt/sha1_inversion.cpp
+++ b/examples/hashsmt/sha1_inversion.cpp
@@ -22,7 +22,13 @@
* Author: dejan
*/
+#include <boost/version.hpp>
+#if BOOST_VERSION > 106700
+#include <boost/uuid/detail/sha1.hpp>
+#else
#include <boost/uuid/sha1.hpp>
+#endif
+
#include <fstream>
#include <iostream>
#include <sstream>
diff --git a/src/base/CMakeLists.txt b/src/base/CMakeLists.txt
index 94ef98126..532ed475d 100644
--- a/src/base/CMakeLists.txt
+++ b/src/base/CMakeLists.txt
@@ -73,17 +73,18 @@ file(GLOB_RECURSE source_files
${PROJECT_SOURCE_DIR}/src/*.g)
string(REPLACE ";" " " source_files_list "${source_files}")
-# Note: {Debug,Trace}_tags.tmp are targets since we always want to generate
-# the temporary tag files in order to check if anything changed.
+# Note: gen-tags-{debug,trace} are targets since we always want to generate
+# the temporary tag files {Debug,Trace}_tags.tmp in order to check if tags
+# were added/modified/deleted.
add_custom_target(
- Debug_tags.tmp
+ gen-tags-debug
COMMAND
${gentmptags_script} ${CMAKE_CURRENT_LIST_DIR} Debug ${source_files_list}
DEPENDS mktags
)
add_custom_target(
- Trace_tags.tmp
+ gen-tags-trace
COMMAND
${gentmptags_script} ${CMAKE_CURRENT_LIST_DIR} Trace ${source_files_list}
DEPENDS mktags
@@ -92,13 +93,13 @@ add_custom_target(
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/Debug_tags
COMMAND ${gentags_script} Debug
- DEPENDS Debug_tags.tmp
+ DEPENDS gen-tags-debug ${CMAKE_CURRENT_BINARY_DIR}/Debug_tags.tmp
)
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/Trace_tags
COMMAND ${gentags_script} Trace
- DEPENDS Trace_tags.tmp
+ DEPENDS gen-tags-trace ${CMAKE_CURRENT_BINARY_DIR}/Trace_tags.tmp
)
add_custom_command(
diff --git a/src/expr/node_algorithm.cpp b/src/expr/node_algorithm.cpp
index 9240e4a8e..4bbfb5df8 100644
--- a/src/expr/node_algorithm.cpp
+++ b/src/expr/node_algorithm.cpp
@@ -169,7 +169,7 @@ bool hasFreeVar(TNode n)
void getSymbols(TNode n, std::unordered_set<Node, NodeHashFunction>& syms)
{
std::unordered_set<TNode, TNodeHashFunction> visited;
- getSymbols(n, syms);
+ getSymbols(n, syms, visited);
}
void getSymbols(TNode n,
diff --git a/src/parser/smt2/Smt2.g b/src/parser/smt2/Smt2.g
index 40166a641..ebf50283d 100644
--- a/src/parser/smt2/Smt2.g
+++ b/src/parser/smt2/Smt2.g
@@ -724,31 +724,39 @@ sygusCommand [std::unique_ptr<CVC4::Command>* cmd]
PARSER_STATE->parseError("Bad syntax for inv-constraint: expected 4 "
"arguments.");
}
- //get primed variables
- std::vector< Expr > primed[2];
- std::vector< Expr > all;
- for( unsigned i=0; i<2; i++ ){
- PARSER_STATE->getSygusPrimedVars( primed[i], i==1 );
- all.insert( all.end(), primed[i].begin(), primed[i].end() );
- }
- //make relevant terms
- for( unsigned i=0; i<4; i++ ){
+ // get variables (regular and their respective primed versions)
+ std::vector<Expr> vars, primed_vars;
+ PARSER_STATE->getSygusInvVars(terms[0].getType(), vars, primed_vars);
+ // make relevant terms; 0 -> Inv, 1 -> Pre, 2 -> Trans, 3 -> Post
+ for (unsigned i = 0; i < 4; ++i)
+ {
Expr op = terms[i];
- Debug("parser-sygus") << "Make inv-constraint term #" << i << " : " << op << "..." << std::endl;
- std::vector< Expr > children;
- children.push_back( op );
- if( i==2 ){
- children.insert( children.end(), all.begin(), all.end() );
- }else{
- children.insert( children.end(), primed[0].begin(), primed[0].end() );
+ Debug("parser-sygus")
+ << "Make inv-constraint term #" << i << " : " << op << " with type "
+ << op.getType() << "..." << std::endl;
+ std::vector<Expr> children;
+ children.push_back(op);
+ // transition relation applied over both variable lists
+ if (i == 2)
+ {
+ children.insert(children.end(), vars.begin(), vars.end());
+ children.insert(
+ children.end(), primed_vars.begin(), primed_vars.end());
}
- terms[i] = EXPR_MANAGER->mkExpr( i==0 ? kind::APPLY_UF : kind::APPLY,children);
- if( i==0 ){
- std::vector< Expr > children2;
- children2.push_back( op );
- children2.insert(children2.end(), primed[1].begin(),
- primed[1].end());
- terms.push_back( EXPR_MANAGER->mkExpr(kind::APPLY_UF,children2) );
+ else
+ {
+ children.insert(children.end(), vars.begin(), vars.end());
+ }
+ terms[i] = EXPR_MANAGER->mkExpr(i == 0 ? kind::APPLY_UF : kind::APPLY,
+ children);
+ // make application of Inv on primed variables
+ if (i == 0)
+ {
+ children.clear();
+ children.push_back(op);
+ children.insert(
+ children.end(), primed_vars.begin(), primed_vars.end());
+ terms.push_back(EXPR_MANAGER->mkExpr(kind::APPLY_UF, children));
}
}
//make constraints
diff --git a/src/parser/smt2/smt2.cpp b/src/parser/smt2/smt2.cpp
index dfeaca62b..c3a107bdf 100644
--- a/src/parser/smt2/smt2.cpp
+++ b/src/parser/smt2/smt2.cpp
@@ -629,20 +629,18 @@ void Smt2::includeFile(const std::string& filename) {
}
}
-Expr Smt2::mkSygusVar(const std::string& name, const Type& type, bool isPrimed) {
- Expr e = mkBoundVar(name, type);
- d_sygusVars.push_back(e);
- d_sygusVarPrimed[e] = false;
- if( isPrimed ){
- d_sygusInvVars.push_back(e);
- std::stringstream ss;
- ss << name << "'";
- Expr ep = mkBoundVar(ss.str(), type);
- d_sygusVars.push_back(ep);
- d_sygusInvVars.push_back(ep);
- d_sygusVarPrimed[ep] = true;
+void Smt2::mkSygusVar(const std::string& name, const Type& type, bool isPrimed)
+{
+ if (!isPrimed)
+ {
+ d_sygusVars.push_back(mkBoundVar(name, type));
}
- return e;
+#ifdef CVC4_ASSERTIONS
+ else
+ {
+ d_sygusVarPrimed.push_back(mkBoundVar(name, type));
+ }
+#endif
}
void Smt2::mkSygusConstantsForType( const Type& type, std::vector<CVC4::Expr>& ops ) {
@@ -1235,16 +1233,40 @@ Expr Smt2::makeSygusBoundVarList(Datatype& dt,
return getExprManager()->mkExpr(kind::BOUND_VAR_LIST, lvars);
}
-const void Smt2::getSygusPrimedVars( std::vector<Expr>& vars, bool isPrimed ) {
- for (unsigned i = 0, size = d_sygusInvVars.size(); i < size; i++)
+const void Smt2::getSygusInvVars(FunctionType t,
+ std::vector<Expr>& vars,
+ std::vector<Expr>& primed_vars)
+{
+ std::vector<Type> argTypes = t.getArgTypes();
+ ExprManager* em = getExprManager();
+ for (const Type& ti : argTypes)
{
- Expr v = d_sygusInvVars[i];
- std::map< Expr, bool >::iterator it = d_sygusVarPrimed.find( v );
- if( it!=d_sygusVarPrimed.end() ){
- if( it->second==isPrimed ){
- vars.push_back( v );
+ vars.push_back(em->mkBoundVar(ti));
+ d_sygusVars.push_back(vars.back());
+ std::stringstream ss;
+ ss << vars.back() << "'";
+ primed_vars.push_back(em->mkBoundVar(ss.str(), ti));
+ d_sygusVars.push_back(primed_vars.back());
+#ifdef CVC4_ASSERTIONS
+ bool find_new_declared_var = false;
+ for (const Expr& e : d_sygusVarPrimed)
+ {
+ if (e.getType() == ti)
+ {
+ d_sygusVarPrimed.erase(
+ std::find(d_sygusVarPrimed.begin(), d_sygusVarPrimed.end(), e));
+ find_new_declared_var = true;
+ break;
}
}
+ if (!find_new_declared_var)
+ {
+ ss.str("");
+ ss << "warning: decleared primed variables do not match invariant's "
+ "type\n";
+ warning(ss.str());
+ }
+#endif
}
}
diff --git a/src/parser/smt2/smt2.h b/src/parser/smt2/smt2.h
index 64a957402..80bd8df83 100644
--- a/src/parser/smt2/smt2.h
+++ b/src/parser/smt2/smt2.h
@@ -68,17 +68,16 @@ private:
std::unordered_map<std::string, Kind> operatorKindMap;
std::pair<Expr, std::string> d_lastNamedTerm;
// for sygus
- std::vector<Expr> d_sygusVars, d_sygusInvVars, d_sygusConstraints,
+ std::vector<Expr> d_sygusVars, d_sygusVarPrimed, d_sygusConstraints,
d_sygusFunSymbols;
- std::map< Expr, bool > d_sygusVarPrimed;
-protected:
- Smt2(api::Solver* solver,
- Input* input,
- bool strictMode = false,
- bool parseOnly = false);
+ protected:
+ Smt2(api::Solver* solver,
+ Input* input,
+ bool strictMode = false,
+ bool parseOnly = false);
-public:
+ public:
/**
* Add theory symbols to the parser state.
*
@@ -228,7 +227,9 @@ public:
return getExprManager()->mkConst(AbstractValue(Integer(name.substr(1))));
}
- Expr mkSygusVar(const std::string& name, const Type& type, bool isPrimed = false);
+ void mkSygusVar(const std::string& name,
+ const Type& type,
+ bool isPrimed = false);
void mkSygusConstantsForType( const Type& type, std::vector<CVC4::Expr>& ops );
@@ -287,7 +288,14 @@ public:
const std::vector<Expr>& getSygusVars() {
return d_sygusVars;
}
- const void getSygusPrimedVars( std::vector<Expr>& vars, bool isPrimed );
+ /** retrieves the invariant variables (both regular and primed)
+ *
+ * To ensure that the variable list represent the correct argument type order
+ * the type of the invariant predicate is used during the variable retrieval
+ */
+ const void getSygusInvVars(FunctionType t,
+ std::vector<Expr>& vars,
+ std::vector<Expr>& primed_vars);
const void addSygusFunSymbol( Type t, Expr synth_fun );
const std::vector<Expr>& getSygusFunSymbols() {
diff --git a/src/proof/proof_manager.cpp b/src/proof/proof_manager.cpp
index cc5332cfd..e7b00068a 100644
--- a/src/proof/proof_manager.cpp
+++ b/src/proof/proof_manager.cpp
@@ -808,8 +808,7 @@ void LFSCProof::printPreprocessedAssertions(const NodeSet& assertions,
ProofManager::currentPM()->getTheoryProofEngine()->printTheoryTerm(inputAssertion, os, globalLetMap);
os << " ";
- ProofManager::currentPM()->getTheoryProofEngine()->printTheoryTerm((*it).toExpr(), os, globalLetMap);
-
+ ProofManager::currentPM()->printTrustedTerm(*it, os, globalLetMap);
os << "))";
os << "(\\ "<< ProofManager::getPreprocessedAssertionName(*it, "") << "\n";
paren << "))";
@@ -832,9 +831,7 @@ void LFSCProof::printPreprocessedAssertions(const NodeSet& assertions,
//TODO
os << "(trust_f ";
- if (ProofManager::currentPM()->getTheoryProofEngine()->printsAsBool(*it)) os << "(p_app ";
- ProofManager::currentPM()->getTheoryProofEngine()->printTheoryTerm((*it).toExpr(), os, globalLetMap);
- if (ProofManager::currentPM()->getTheoryProofEngine()->printsAsBool(*it)) os << ")";
+ ProofManager::currentPM()->printTrustedTerm(*it, os, globalLetMap);
os << ") ";
os << "(\\ "<< ProofManager::getPreprocessedAssertionName(*it, "") << "\n";
@@ -1063,5 +1060,13 @@ void ProofManager::printGlobalLetMap(std::set<Node>& atoms,
void ProofManager::ensureLiteral(Node node) {
d_cnfProof->ensureLiteral(node);
}
-
+void ProofManager::printTrustedTerm(Node term,
+ std::ostream& os,
+ ProofLetMap& globalLetMap)
+{
+ TheoryProofEngine* tpe = ProofManager::currentPM()->getTheoryProofEngine();
+ if (tpe->printsAsBool(term)) os << "(p_app ";
+ tpe->printTheoryTerm(term.toExpr(), os, globalLetMap);
+ if (tpe->printsAsBool(term)) os << ")";
+}
} /* CVC4 namespace */
diff --git a/src/proof/proof_manager.h b/src/proof/proof_manager.h
index 89aa66c2d..0342288fe 100644
--- a/src/proof/proof_manager.h
+++ b/src/proof/proof_manager.h
@@ -239,6 +239,12 @@ public:
// for SMT variable names that have spaces and other things
static std::string sanitize(TNode var);
+ // wrap term with (p_app ... ) if the term is printed as a boolean, and print
+ // used for "trust" assertions
+ static void printTrustedTerm(Node term,
+ std::ostream& os,
+ ProofLetMap& globalLetMap);
+
/** Add proof assertion - unlike addCoreAssertion this is post definition expansion **/
void addAssertion(Expr formula);
diff --git a/src/theory/arith/nonlinear_extension.cpp b/src/theory/arith/nonlinear_extension.cpp
index 9f6608dc5..929c7808d 100644
--- a/src/theory/arith/nonlinear_extension.cpp
+++ b/src/theory/arith/nonlinear_extension.cpp
@@ -888,17 +888,25 @@ bool NonlinearExtension::checkModel(const std::vector<Node>& assertions,
Kind k = tfs.first;
for (const Node& tf : tfs.second)
{
+ bool success = true;
// tf is Figure 3 : tf( x )
Node atf = computeModelValue(tf, 0);
if (k == PI)
{
- addCheckModelBound(atf, d_pi_bound[0], d_pi_bound[1]);
+ success = addCheckModelBound(atf, d_pi_bound[0], d_pi_bound[1]);
}
else if (isRefineableTfFun(tf))
{
d_used_approx = true;
std::pair<Node, Node> bounds = getTfModelBounds(tf, d_taylor_degree);
- addCheckModelBound(atf, bounds.first, bounds.second);
+ success = addCheckModelBound(atf, bounds.first, bounds.second);
+ }
+ if (!success)
+ {
+ Trace("nl-ext-cm-debug")
+ << "...failed to set bound for transcendental function."
+ << std::endl;
+ return false;
}
if (Trace.isOn("nl-ext-cm"))
{
@@ -962,7 +970,8 @@ bool NonlinearExtension::checkModel(const std::vector<Node>& assertions,
<< "check-model-bound : exact : " << cur << " = ";
printRationalApprox("nl-ext-cm", curv);
Trace("nl-ext-cm") << std::endl;
- addCheckModelSubstitution(cur, curv);
+ bool ret = addCheckModelSubstitution(cur, curv);
+ AlwaysAssert(ret);
}
}
}
@@ -1036,15 +1045,31 @@ bool NonlinearExtension::checkModel(const std::vector<Node>& assertions,
return true;
}
-void NonlinearExtension::addCheckModelSubstitution(TNode v, TNode s)
+bool NonlinearExtension::addCheckModelSubstitution(TNode v, TNode s)
{
// should not substitute the same variable twice
Trace("nl-ext-model") << "* check model substitution : " << v << " -> " << s << std::endl;
// should not set exact bound more than once
if(std::find(d_check_model_vars.begin(),d_check_model_vars.end(),v)!=d_check_model_vars.end())
{
+ Trace("nl-ext-model") << "...ERROR: already has value." << std::endl;
+ // this should never happen since substitutions should be applied eagerly
Assert( false );
- return;
+ return false;
+ }
+ // if we previously had an approximate bound, the exact bound should be in its
+ // range
+ std::map<Node, std::pair<Node, Node> >::iterator itb =
+ d_check_model_bounds.find(v);
+ if (itb != d_check_model_bounds.end())
+ {
+ if (s.getConst<Rational>() >= itb->second.first.getConst<Rational>()
+ || s.getConst<Rational>() <= itb->second.second.getConst<Rational>())
+ {
+ Trace("nl-ext-model")
+ << "...ERROR: already has bound which is out of range." << std::endl;
+ return false;
+ }
}
for (unsigned i = 0, size = d_check_model_subs.size(); i < size; i++)
{
@@ -1058,23 +1083,32 @@ void NonlinearExtension::addCheckModelSubstitution(TNode v, TNode s)
}
d_check_model_vars.push_back(v);
d_check_model_subs.push_back(s);
+ return true;
}
-void NonlinearExtension::addCheckModelBound(TNode v, TNode l, TNode u)
+bool NonlinearExtension::addCheckModelBound(TNode v, TNode l, TNode u)
{
Trace("nl-ext-model") << "* check model bound : " << v << " -> [" << l << " " << u << "]" << std::endl;
if( l==u )
{
// bound is exact, can add as substitution
- addCheckModelSubstitution(v,l);
- return;
+ return addCheckModelSubstitution(v, l);
}
// should not set a bound for a value that is exact
- Assert(std::find(d_check_model_vars.begin(),d_check_model_vars.end(),v)==d_check_model_vars.end());
+ if (std::find(d_check_model_vars.begin(), d_check_model_vars.end(), v)
+ != d_check_model_vars.end())
+ {
+ Trace("nl-ext-model")
+ << "...ERROR: setting bound for variable that already has exact value."
+ << std::endl;
+ Assert(false);
+ return false;
+ }
Assert(l.isConst());
Assert(u.isConst());
Assert(l.getConst<Rational>() <= u.getConst<Rational>());
d_check_model_bounds[v] = std::pair<Node, Node>(l, u);
+ return true;
}
bool NonlinearExtension::hasCheckModelAssignment(Node v) const
@@ -1203,11 +1237,14 @@ bool NonlinearExtension::solveEqualitySimple(Node eq)
{
Trace("nl-ext-cm")
<< "check-model-subs : " << uv << " -> " << slv << std::endl;
- addCheckModelSubstitution(uv, slv);
- Trace("nl-ext-cms") << "...success, model substitution " << uv
- << " -> " << slv << std::endl;
- d_check_model_solved[eq] = uv;
- return true;
+ bool ret = addCheckModelSubstitution(uv, slv);
+ if (ret)
+ {
+ Trace("nl-ext-cms") << "...success, model substitution " << uv
+ << " -> " << slv << std::endl;
+ d_check_model_solved[eq] = uv;
+ }
+ return ret;
}
}
}
@@ -1223,9 +1260,9 @@ bool NonlinearExtension::solveEqualitySimple(Node eq)
Trace("nl-ext-cm") << "check-model-bound : exact : " << uvf << " = ";
printRationalApprox("nl-ext-cm", uvfv);
Trace("nl-ext-cm") << std::endl;
- addCheckModelSubstitution(uvf, uvfv);
+ bool ret = addCheckModelSubstitution(uvf, uvfv);
// recurse
- return solveEqualitySimple(eq);
+ return ret ? solveEqualitySimple(eq) : false;
}
}
Trace("nl-ext-cms") << "...fail due to constrained invalid terms."
@@ -1252,10 +1289,13 @@ bool NonlinearExtension::solveEqualitySimple(Node eq)
Trace("nl-ext-cm") << "check-model-bound : exact : " << var << " = ";
printRationalApprox("nl-ext-cm", val);
Trace("nl-ext-cm") << std::endl;
- addCheckModelSubstitution(var, val);
- Trace("nl-ext-cms") << "...success, solved linear." << std::endl;
- d_check_model_solved[eq] = var;
- return true;
+ bool ret = addCheckModelSubstitution(var, val);
+ if (ret)
+ {
+ Trace("nl-ext-cms") << "...success, solved linear." << std::endl;
+ d_check_model_solved[eq] = var;
+ }
+ return ret;
}
Trace("nl-ext-quad") << "Solve quadratic : " << seq << std::endl;
Trace("nl-ext-quad") << " a : " << a << std::endl;
@@ -1352,10 +1392,14 @@ bool NonlinearExtension::solveEqualitySimple(Node eq)
Trace("nl-ext-cm") << " <= " << var << " <= ";
printRationalApprox("nl-ext-cm", bounds[r_use_index][1]);
Trace("nl-ext-cm") << std::endl;
- addCheckModelBound(var, bounds[r_use_index][0], bounds[r_use_index][1]);
- d_check_model_solved[eq] = var;
- Trace("nl-ext-cms") << "...success, solved quadratic." << std::endl;
- return true;
+ bool ret =
+ addCheckModelBound(var, bounds[r_use_index][0], bounds[r_use_index][1]);
+ if (ret)
+ {
+ d_check_model_solved[eq] = var;
+ Trace("nl-ext-cms") << "...success, solved quadratic." << std::endl;
+ }
+ return ret;
}
bool NonlinearExtension::simpleCheckModelLit(Node lit)
@@ -3622,7 +3666,7 @@ std::vector<Node> NonlinearExtension::checkFactoring(
sum = Rewriter::rewrite( sum );
Trace("nl-ext-factor")
<< "* Factored sum for " << x << " : " << sum << std::endl;
- Node kf = getFactorSkolem( sum, lemmas );
+ Node kf = getFactorSkolem(sum, lemmas);
std::vector< Node > poly;
poly.push_back(NodeManager::currentNM()->mkNode(MULT, x, kf));
std::map<Node, std::vector<Node> >::iterator itfo =
@@ -3672,7 +3716,7 @@ Node NonlinearExtension::getFactorSkolem( Node n, std::vector< Node >& lemmas )
return itf->second;
}
}
-
+
std::vector<Node> NonlinearExtension::checkMonomialInferResBounds() {
std::vector< Node > lemmas;
Trace("nl-ext") << "Get monomial resolution inferred bound lemmas..." << std::endl;
@@ -4444,6 +4488,8 @@ bool NonlinearExtension::checkTfTangentPlanesFun(Node tf,
Assert(rcoeff_n.isConst());
Rational rcoeff = rcoeff_n.getConst<Rational>();
Assert(rcoeff.sgn() != 0);
+ poly_approx_b = Rewriter::rewrite(poly_approx_b);
+ poly_approx_c = Rewriter::rewrite(poly_approx_c);
splane = nm->mkNode(
PLUS,
poly_approx_b,
@@ -4837,6 +4883,48 @@ void NonlinearExtension::getPolynomialApproximationBounds(
}
}
+void NonlinearExtension::getPolynomialApproximationBoundForArg(
+ Kind k, Node c, unsigned d, std::vector<Node>& pbounds)
+{
+ getPolynomialApproximationBounds(k, d, pbounds);
+ Assert(c.isConst());
+ if (k == EXPONENTIAL && c.getConst<Rational>().sgn() == 1)
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node tft = nm->mkNode(k, d_zero);
+ bool success = false;
+ unsigned ds = d;
+ TNode ttrf = d_taylor_real_fv;
+ TNode tc = c;
+ do
+ {
+ success = true;
+ unsigned n = 2 * ds;
+ std::pair<Node, Node> taylor = getTaylor(tft, n);
+ // check that 1-c^{n+1}/(n+1)! > 0
+ Node ru = nm->mkNode(DIVISION, taylor.second[1], taylor.second[0][1]);
+ Node rus = ru.substitute(ttrf, tc);
+ rus = Rewriter::rewrite(rus);
+ Assert(rus.isConst());
+ if (rus.getConst<Rational>() > d_one.getConst<Rational>())
+ {
+ success = false;
+ ds = ds + 1;
+ }
+ } while (!success);
+ if (ds > d)
+ {
+ Trace("nl-ext-exp-taylor")
+ << "*** Increase Taylor bound to " << ds << " > " << d << " for ("
+ << k << " " << c << ")" << std::endl;
+ // must use sound upper bound
+ std::vector<Node> pboundss;
+ getPolynomialApproximationBounds(k, ds, pboundss);
+ pbounds[2] = pboundss[2];
+ }
+ }
+}
+
std::pair<Node, Node> NonlinearExtension::getTfModelBounds(Node tf, unsigned d)
{
// compute the model value of the argument
@@ -4847,7 +4935,7 @@ std::pair<Node, Node> NonlinearExtension::getTfModelBounds(Node tf, unsigned d)
bool isNeg = csign == -1;
std::vector<Node> pbounds;
- getPolynomialApproximationBounds(tf.getKind(), d, pbounds);
+ getPolynomialApproximationBoundForArg(tf.getKind(), c, d, pbounds);
std::vector<Node> bounds;
TNode tfv = d_taylor_real_fv;
diff --git a/src/theory/arith/nonlinear_extension.h b/src/theory/arith/nonlinear_extension.h
index c0af312b3..cb74502d6 100644
--- a/src/theory/arith/nonlinear_extension.h
+++ b/src/theory/arith/nonlinear_extension.h
@@ -317,8 +317,10 @@ class NonlinearExtension {
* Adds the model substitution v -> s. This applies the substitution
* { v -> s } to each term in d_check_model_subs and adds v,s to
* d_check_model_vars and d_check_model_subs respectively.
+ * If this method returns false, then the substitution v -> s is inconsistent
+ * with the current substitution and bounds.
*/
- void addCheckModelSubstitution(TNode v, TNode s);
+ bool addCheckModelSubstitution(TNode v, TNode s);
/** lower and upper bounds for check model
*
* For each term t in the domain of this map, if this stores the pair
@@ -335,9 +337,11 @@ class NonlinearExtension {
/** add check model bound
*
* Adds the bound x -> < l, u > to the map above, and records the
- * approximation ( x, l <= x <= u ) in the model.
+ * approximation ( x, l <= x <= u ) in the model. This method returns false
+ * if the bound is inconsistent with the current model substitution or
+ * bounds.
*/
- void addCheckModelBound(TNode v, TNode l, TNode u);
+ bool addCheckModelBound(TNode v, TNode l, TNode u);
/**
* The map from literals that our model construction solved, to the variable
* that was solved for. Examples of such literals are:
@@ -586,7 +590,7 @@ class NonlinearExtension {
// factor skolems
std::map< Node, Node > d_factor_skolem;
Node getFactorSkolem( Node n, std::vector< Node >& lemmas );
-
+
// tangent plane bounds
std::map< Node, std::map< Node, Node > > d_tangent_val_bound[4];
@@ -645,15 +649,32 @@ class NonlinearExtension {
unsigned d_taylor_degree;
/** polynomial approximation bounds
*
- * This adds P_l+, P_l-, P_u+, P_u- to pbounds, where these are polynomial
- * approximations of the Taylor series of <k>( 0 ) for degree 2*d where
- * k is SINE or EXPONENTIAL.
+ * This adds P_l+[x], P_l-[x], P_u+[x], P_u-[x] to pbounds, where x is
+ * d_taylor_real_fv. These are polynomial approximations of the Taylor series
+ * of <k>( 0 ) for degree 2*d where k is SINE or EXPONENTIAL.
* These correspond to P_l and P_u from Figure 3 of Cimatti et al., CADE 2017,
* for positive/negative (+/-) values of the argument of <k>( 0 ).
+ *
+ * Notice that for certain bounds (e.g. upper bounds for exponential), the
+ * Taylor approximation for a fixed degree is only sound up to a given
+ * upper bound on the argument. To obtain sound lower/upper bounds for a
+ * given <k>( c ), use the function below.
*/
void getPolynomialApproximationBounds(Kind k,
unsigned d,
std::vector<Node>& pbounds);
+ /** polynomial approximation bounds
+ *
+ * This computes polynomial approximations P_l+[x], P_l-[x], P_u+[x], P_u-[x]
+ * that are sound (lower, upper) bounds for <k>( c ). Notice that these
+ * polynomials may depend on c. In particular, for P_u+[x] for <k>( c ) where
+ * c>0, we return the P_u+[x] from the function above for the minimum degree
+ * d' >= d such that (1-c^{2*d'+1}/(2*d'+1)!) is positive.
+ */
+ void getPolynomialApproximationBoundForArg(Kind k,
+ Node c,
+ unsigned d,
+ std::vector<Node>& pbounds);
/** cache of the above function */
std::map<Kind, std::map<unsigned, std::vector<Node> > > d_poly_bounds;
/** get transcendental function model bounds
diff --git a/src/theory/datatypes/datatypes_rewriter.h b/src/theory/datatypes/datatypes_rewriter.h
index a6c95b893..05f1a9960 100644
--- a/src/theory/datatypes/datatypes_rewriter.h
+++ b/src/theory/datatypes/datatypes_rewriter.h
@@ -39,6 +39,17 @@ struct SygusAnyConstAttributeId
};
typedef expr::Attribute<SygusAnyConstAttributeId, bool> SygusAnyConstAttribute;
+/**
+ * Attribute true for enumerators whose current model values have been excluded
+ * by sygus symmetry breaking. This is set by the datatypes sygus solver during
+ * LAST_CALL effort checks for each active sygus enumerator.
+ */
+struct SygusSymBreakExcAttributeId
+{
+};
+typedef expr::Attribute<SygusSymBreakExcAttributeId, bool>
+ SygusSymBreakExcAttribute;
+
namespace datatypes {
class DatatypesRewriter {
diff --git a/src/theory/datatypes/datatypes_sygus.cpp b/src/theory/datatypes/datatypes_sygus.cpp
index db1ce1c05..227bd6170 100644
--- a/src/theory/datatypes/datatypes_sygus.cpp
+++ b/src/theory/datatypes/datatypes_sygus.cpp
@@ -299,15 +299,23 @@ void SygusSymBreakNew::assertTesterInternal( int tindex, TNode n, Node exp, std:
if( min_depth<=max_depth ){
TNode x = getFreeVar( ntn );
std::vector<Node> sb_lemmas;
+ // symmetry breaking lemmas requiring predicate elimination
+ std::map<Node, bool> sb_elim_pred;
bool usingSymCons = d_tds->usingSymbolicConsForEnumerator(m);
+ bool isVarAgnostic = d_tds->isVariableAgnosticEnumerator(m);
for (unsigned ds = 0; ds <= max_depth; ds++)
{
// static conjecture-independent symmetry breaking
Trace("sygus-sb-debug") << " simple symmetry breaking...\n";
- Node ipred = getSimpleSymBreakPred(ntn, tindex, ds, usingSymCons);
+ Node ipred = getSimpleSymBreakPred(
+ m, ntn, tindex, ds, usingSymCons, isVarAgnostic);
if (!ipred.isNull())
{
sb_lemmas.push_back(ipred);
+ if (ds == 0 && isVarAgnostic)
+ {
+ sb_elim_pred[ipred] = true;
+ }
}
// static conjecture-dependent symmetry breaking
Trace("sygus-sb-debug")
@@ -332,6 +340,15 @@ void SygusSymBreakNew::assertTesterInternal( int tindex, TNode n, Node exp, std:
for (const Node& slem : sb_lemmas)
{
Node sslem = slem.substitute(x, n, cache);
+ // if we require predicate elimination
+ if (sb_elim_pred.find(slem) != sb_elim_pred.end())
+ {
+ Trace("sygus-sb-tp") << "Eliminate traversal predicates: start "
+ << sslem << std::endl;
+ sslem = eliminateTraversalPredicates(sslem);
+ Trace("sygus-sb-tp") << "Eliminate traversal predicates: finish "
+ << sslem << std::endl;
+ }
if (!rlv.isNull())
{
sslem = nm->mkNode(OR, rlv, sslem);
@@ -407,14 +424,123 @@ Node SygusSymBreakNew::getRelevancyCondition( Node n ) {
}
}
-Node SygusSymBreakNew::getSimpleSymBreakPred(TypeNode tn,
+Node SygusSymBreakNew::getTraversalPredicate(TypeNode tn, Node n, bool isPre)
+{
+ unsigned index = isPre ? 0 : 1;
+ std::map<Node, Node>::iterator itt = d_traversal_pred[index][tn].find(n);
+ if (itt != d_traversal_pred[index][tn].end())
+ {
+ return itt->second;
+ }
+ NodeManager* nm = NodeManager::currentNM();
+ std::vector<TypeNode> types;
+ types.push_back(tn);
+ TypeNode ptn = nm->mkPredicateType(types);
+ Node pred = nm->mkSkolem(isPre ? "pre" : "post", ptn);
+ d_traversal_pred[index][tn][n] = pred;
+ return pred;
+}
+
+Node SygusSymBreakNew::eliminateTraversalPredicates(Node n)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ std::unordered_map<TNode, Node, TNodeHashFunction> visited;
+ std::unordered_map<TNode, Node, TNodeHashFunction>::iterator it;
+ std::map<Node, Node>::iterator ittb;
+ std::vector<TNode> visit;
+ TNode cur;
+ visit.push_back(n);
+ do
+ {
+ cur = visit.back();
+ visit.pop_back();
+ it = visited.find(cur);
+
+ if (it == visited.end())
+ {
+ if (cur.getKind() == APPLY_UF)
+ {
+ Assert(cur.getType().isBoolean());
+ Assert(cur.getNumChildren() == 1
+ && (cur[0].isVar() || cur[0].getKind() == APPLY_SELECTOR_TOTAL));
+ ittb = d_traversal_bool.find(cur);
+ Node ret;
+ if (ittb == d_traversal_bool.end())
+ {
+ std::stringstream ss;
+ ss << "v_" << cur;
+ ret = nm->mkSkolem(ss.str(), cur.getType());
+ d_traversal_bool[cur] = ret;
+ }
+ else
+ {
+ ret = ittb->second;
+ }
+ visited[cur] = ret;
+ }
+ else
+ {
+ visited[cur] = Node::null();
+ visit.push_back(cur);
+ for (const Node& cn : cur)
+ {
+ visit.push_back(cn);
+ }
+ }
+ }
+ else if (it->second.isNull())
+ {
+ Node ret = cur;
+ bool childChanged = false;
+ std::vector<Node> children;
+ if (cur.getMetaKind() == metakind::PARAMETERIZED)
+ {
+ children.push_back(cur.getOperator());
+ }
+ for (const Node& cn : cur)
+ {
+ it = visited.find(cn);
+ Assert(it != visited.end());
+ Assert(!it->second.isNull());
+ childChanged = childChanged || cn != it->second;
+ children.push_back(it->second);
+ }
+ if (childChanged)
+ {
+ ret = nm->mkNode(cur.getKind(), children);
+ }
+ visited[cur] = ret;
+ }
+ } while (!visit.empty());
+ Assert(visited.find(n) != visited.end());
+ Assert(!visited.find(n)->second.isNull());
+ return visited[n];
+}
+
+Node SygusSymBreakNew::getSimpleSymBreakPred(Node e,
+ TypeNode tn,
int tindex,
unsigned depth,
- bool usingSymCons)
+ bool usingSymCons,
+ bool isVarAgnostic)
{
+ // Compute the tuple of expressions we hash the predicate for.
+
+ // The hash value in d_simple_sb_pred for the given options
+ unsigned optHashVal = usingSymCons ? 1 : 0;
+ if (isVarAgnostic && depth == 0)
+ {
+ // variable agnostic symmetry breaking only applies at depth 0
+ optHashVal = optHashVal + 2;
+ }
+ else
+ {
+ // enumerator is only specific to variable agnostic symmetry breaking
+ e = Node::null();
+ }
std::map<unsigned, Node>::iterator it =
- d_simple_sb_pred[tn][tindex][usingSymCons].find(depth);
- if (it != d_simple_sb_pred[tn][tindex][usingSymCons].end())
+ d_simple_sb_pred[e][tn][tindex][optHashVal].find(depth);
+ if (it != d_simple_sb_pred[e][tn][tindex][optHashVal].end())
{
return it->second;
}
@@ -435,7 +561,7 @@ Node SygusSymBreakNew::getSimpleSymBreakPred(TypeNode tn,
Node sop = Node::fromExpr(dt[tindex].getSygusOp());
if (sop.getAttribute(SygusAnyConstAttribute()) || depth > 2)
{
- d_simple_sb_pred[tn][tindex][usingSymCons][depth] = Node::null();
+ d_simple_sb_pred[e][tn][tindex][optHashVal][depth] = Node::null();
return Node::null();
}
// conjunctive conclusion of lemma
@@ -493,6 +619,82 @@ Node SygusSymBreakNew::getSimpleSymBreakPred(TypeNode tn,
sbp_conj.push_back(DatatypesRewriter::mkTester(children[j], i, cdt));
}
}
+
+ if (isVarAgnostic && depth == 0)
+ {
+ // Enforce symmetry breaking lemma template for each x_i:
+ // template z.
+ // is-x_i( z ) => pre_{x_{i-1}}( z )
+ // for args a = 1...n
+ // // pre-definition
+ // pre_{x_i}( z.a ) = a=0 ? pre_{x_i}( z ) : post_{x_i}( z.{a-1} )
+ // post_{x_i}( z ) = post_{x_i}( z.n ) OR is-x_i( z )
+
+ // Notice that we are constructing a symmetry breaking template
+ // under the condition that is-C( z ) holds in this method, where C
+ // is the tindex^{th} constructor of dt. Thus, is-x_i( z ) is either
+ // true or false below.
+
+ Node svl = Node::fromExpr(dt.getSygusVarList());
+ // for each variable
+ Assert(!e.isNull());
+ TypeNode etn = e.getType();
+ // for each variable in the sygus type
+ for (const Node& var : svl)
+ {
+ unsigned sc = d_tds->getSubclassForVar(etn, var);
+ if (d_tds->getNumSubclassVars(etn, sc) == 1)
+ {
+ // unique variable in singleton subclass, skip
+ continue;
+ }
+ // Compute the "predecessor" variable in the subclass of var.
+ Node predVar;
+ unsigned scindex = 0;
+ if (d_tds->getIndexInSubclassForVar(etn, var, scindex))
+ {
+ if (scindex > 0)
+ {
+ predVar = d_tds->getVarSubclassIndex(etn, sc, scindex - 1);
+ }
+ }
+ Node preParentOp = getTraversalPredicate(tn, var, true);
+ Node preParent = nm->mkNode(APPLY_UF, preParentOp, n);
+ Node prev = preParent;
+ // for each child
+ for (const Node& child : children)
+ {
+ TypeNode ctn = child.getType();
+ // my pre is equal to the previous
+ Node preCurrOp = getTraversalPredicate(ctn, var, true);
+ Node preCurr = nm->mkNode(APPLY_UF, preCurrOp, child);
+ // definition of pre, for each argument
+ sbp_conj.push_back(preCurr.eqNode(prev));
+ Node postCurrOp = getTraversalPredicate(ctn, var, false);
+ prev = nm->mkNode(APPLY_UF, postCurrOp, child);
+ }
+ Node postParent = getTraversalPredicate(tn, var, false);
+ Node finish = nm->mkNode(APPLY_UF, postParent, n);
+ // check if we are constructing the symmetry breaking predicate for the
+ // variable in question. If so, is-{x_i}( z ) is true.
+ int varCn = d_tds->getOpConsNum(tn, var);
+ if (varCn == static_cast<int>(tindex))
+ {
+ // the post value is true
+ prev = d_true;
+ // requirement : If I am the variable, I must have seen
+ // the variable before this one in its type class.
+ if (!predVar.isNull())
+ {
+ Node preParentPredVarOp = getTraversalPredicate(tn, predVar, true);
+ Node preParentPredVar = nm->mkNode(APPLY_UF, preParentPredVarOp, n);
+ sbp_conj.push_back(preParentPredVar);
+ }
+ }
+ // definition of post
+ sbp_conj.push_back(finish.eqNode(prev));
+ }
+ }
// depth 1 symmetry breaking : talks about direct children
if (depth == 1)
{
@@ -721,7 +923,7 @@ Node SygusSymBreakNew::getSimpleSymBreakPred(TypeNode tn,
sb_pred = nm->mkNode(
kind::OR, DatatypesRewriter::mkTester(n, tindex, dt).negate(), sb_pred);
}
- d_simple_sb_pred[tn][tindex][usingSymCons][depth] = sb_pred;
+ d_simple_sb_pred[e][tn][tindex][optHashVal][depth] = sb_pred;
return sb_pred;
}
@@ -1062,70 +1264,115 @@ void SygusSymBreakNew::preRegisterTerm( TNode n, std::vector< Node >& lemmas )
}
void SygusSymBreakNew::registerSizeTerm( Node e, std::vector< Node >& lemmas ) {
- if( d_register_st.find( e )==d_register_st.end() ){
- if( e.getType().isDatatype() ){
- const Datatype& dt = ((DatatypeType)(e.getType()).toType()).getDatatype();
- if( dt.isSygus() ){
- if (d_tds->isEnumerator(e))
- {
- d_register_st[e] = true;
- Node ag = d_tds->getActiveGuardForEnumerator(e);
- if( !ag.isNull() ){
- d_anchor_to_active_guard[e] = ag;
- std::map<Node, std::unique_ptr<DecisionStrategy>>::iterator itaas =
- d_anchor_to_ag_strategy.find(e);
- if (itaas == d_anchor_to_ag_strategy.end())
- {
- d_anchor_to_ag_strategy[e].reset(
- new DecisionStrategySingleton("sygus_enum_active",
- ag,
- d_td->getSatContext(),
- d_td->getValuation()));
- }
- d_td->getDecisionManager()->registerStrategy(
- DecisionManager::STRAT_DT_SYGUS_ENUM_ACTIVE,
- d_anchor_to_ag_strategy[e].get());
- }
- Node m;
- if( !ag.isNull() ){
- // if it has an active guard (it is an enumerator), use itself as measure term. This will enforce fairness on it independently.
- m = e;
- }else{
- // otherwise we enforce fairness in a unified way for all
- if( d_generic_measure_term.isNull() ){
- // choose e as master for all future terms
- d_generic_measure_term = e;
- }
- m = d_generic_measure_term;
- }
- Trace("sygus-sb") << "Sygus : register size term : " << e << " with measure " << m << std::endl;
- registerMeasureTerm( m );
- d_szinfo[m]->d_anchors.push_back( e );
- d_anchor_to_measure_term[e] = m;
- if( options::sygusFair()==SYGUS_FAIR_DT_SIZE ){
- // update constraints on the measure term
- if( options::sygusFairMax() ){
- Node ds = NodeManager::currentNM()->mkNode(kind::DT_SIZE, e);
- Node slem = NodeManager::currentNM()->mkNode(
- kind::LEQ, ds, d_szinfo[m]->getOrMkMeasureValue(lemmas));
- lemmas.push_back(slem);
- }else{
- Node mt = d_szinfo[m]->getOrMkActiveMeasureValue(lemmas);
- Node new_mt =
- d_szinfo[m]->getOrMkActiveMeasureValue(lemmas, true);
- Node ds = NodeManager::currentNM()->mkNode(kind::DT_SIZE, e);
- lemmas.push_back(mt.eqNode(
- NodeManager::currentNM()->mkNode(kind::PLUS, new_mt, ds)));
- }
- }
- }else{
- // not sure if it is a size term or not (may be registered later?)
- }
- }else{
- d_register_st[e] = false;
- }
+ if (d_register_st.find(e) != d_register_st.end())
+ {
+ // already registered
+ return;
+ }
+ TypeNode etn = e.getType();
+ if (!etn.isDatatype())
+ {
+ // not a datatype term
+ d_register_st[e] = false;
+ return;
+ }
+ const Datatype& dt = etn.getDatatype();
+ if (!dt.isSygus())
+ {
+ // not a sygus datatype term
+ d_register_st[e] = false;
+ return;
+ }
+ if (!d_tds->isEnumerator(e))
+ {
+ // not sure if it is a size term or not (may be registered later?)
+ return;
+ }
+ d_register_st[e] = true;
+ Node ag = d_tds->getActiveGuardForEnumerator(e);
+ if (!ag.isNull())
+ {
+ d_anchor_to_active_guard[e] = ag;
+ std::map<Node, std::unique_ptr<DecisionStrategy>>::iterator itaas =
+ d_anchor_to_ag_strategy.find(e);
+ if (itaas == d_anchor_to_ag_strategy.end())
+ {
+ d_anchor_to_ag_strategy[e].reset(
+ new DecisionStrategySingleton("sygus_enum_active",
+ ag,
+ d_td->getSatContext(),
+ d_td->getValuation()));
+ }
+ d_td->getDecisionManager()->registerStrategy(
+ DecisionManager::STRAT_DT_SYGUS_ENUM_ACTIVE,
+ d_anchor_to_ag_strategy[e].get());
+ }
+ Node m;
+ if (!ag.isNull())
+ {
+ // if it has an active guard (it is an enumerator), use itself as measure
+ // term. This will enforce fairness on it independently.
+ m = e;
+ }
+ else
+ {
+ // otherwise we enforce fairness in a unified way for all
+ if (d_generic_measure_term.isNull())
+ {
+ // choose e as master for all future terms
+ d_generic_measure_term = e;
+ }
+ m = d_generic_measure_term;
+ }
+ Trace("sygus-sb") << "Sygus : register size term : " << e << " with measure "
+ << m << std::endl;
+ registerMeasureTerm(m);
+ d_szinfo[m]->d_anchors.push_back(e);
+ d_anchor_to_measure_term[e] = m;
+ NodeManager* nm = NodeManager::currentNM();
+ if (options::sygusFair() == SYGUS_FAIR_DT_SIZE)
+ {
+ // update constraints on the measure term
+ Node slem;
+ if (options::sygusFairMax())
+ {
+ Node ds = nm->mkNode(DT_SIZE, e);
+ slem = nm->mkNode(LEQ, ds, d_szinfo[m]->getOrMkMeasureValue(lemmas));
}else{
- d_register_st[e] = false;
+ Node mt = d_szinfo[m]->getOrMkActiveMeasureValue(lemmas);
+ Node new_mt = d_szinfo[m]->getOrMkActiveMeasureValue(lemmas, true);
+ Node ds = nm->mkNode(DT_SIZE, e);
+ slem = mt.eqNode(nm->mkNode(PLUS, new_mt, ds));
+ }
+ Trace("sygus-sb") << "...size lemma : " << slem << std::endl;
+ lemmas.push_back(slem);
+ }
+ if (d_tds->isVariableAgnosticEnumerator(e))
+ {
+ // if it is variable agnostic, enforce top-level constraint that says no
+ // variables occur pre-traversal at top-level
+ Node varList = Node::fromExpr(dt.getSygusVarList());
+ std::vector<Node> constraints;
+ for (const Node& v : varList)
+ {
+ unsigned sc = d_tds->getSubclassForVar(etn, v);
+ // no symmetry breaking occurs for variables in singleton subclasses
+ if (d_tds->getNumSubclassVars(etn, sc) > 1)
+ {
+ Node preRootOp = getTraversalPredicate(etn, v, true);
+ Node preRoot = nm->mkNode(APPLY_UF, preRootOp, e);
+ constraints.push_back(preRoot.negate());
+ }
+ }
+ if (!constraints.empty())
+ {
+ Node preNoVar = constraints.size() == 1 ? constraints[0]
+ : nm->mkNode(AND, constraints);
+ Node preNoVarProc = eliminateTraversalPredicates(preNoVar);
+ Trace("sygus-sb") << "...variable order : " << preNoVarProc << std::endl;
+ Trace("sygus-sb-tp") << "...variable order : " << preNoVarProc
+ << std::endl;
+ lemmas.push_back(preNoVarProc);
}
}
}
@@ -1223,8 +1470,8 @@ void SygusSymBreakNew::incrementCurrentSearchSize( Node m, std::vector< Node >&
for (const TNode& t : itt->second)
{
if (!options::sygusSymBreakLazy()
- || d_active_terms.find(t) != d_active_terms.end()
- && !it->second.empty())
+ || (d_active_terms.find(t) != d_active_terms.end()
+ && !it->second.empty()))
{
Node rlv = getRelevancyCondition(t);
std::unordered_map<TNode, TNode, TNodeHashFunction> cache;
@@ -1290,13 +1537,9 @@ void SygusSymBreakNew::check( std::vector< Node >& lemmas ) {
->toStreamSygus(ss, progv);
Trace("dt-sygus") << ss.str() << std::endl;
}
- // TODO : remove this step (ensure there is no way a sygus term cannot be assigned a tester before this point)
- if (!checkTesters(prog, progv, 0, lemmas))
+ // first check that the value progv for prog is what we expected
+ if (checkValue(prog, progv, 0, lemmas))
{
- Trace("sygus-sb") << " SygusSymBreakNew::check: ...WARNING: considered missing split for " << prog << "." << std::endl;
- // this should not happen generally, it is caused by a sygus term not being assigned a tester
- //Assert( false );
- }else{
//debugging : ensure fairness was properly handled
if( options::sygusFair()==SYGUS_FAIR_DT_SIZE ){
Node prog_sz = NodeManager::currentNM()->mkNode( kind::DT_SIZE, prog );
@@ -1316,7 +1559,11 @@ void SygusSymBreakNew::check( std::vector< Node >& lemmas ) {
// register the search value ( prog -> progv ), this may invoke symmetry breaking
if( options::sygusSymBreakDynamic() ){
+ // check that it is unique up to theory-specific rewriting and
+ // conjecture-specific symmetry breaking.
Node rsv = registerSearchValue(prog, prog, progv, 0, lemmas);
+ SygusSymBreakExcAttribute ssbea;
+ prog.setAttribute(ssbea, rsv.isNull());
if (rsv.isNull())
{
Trace("sygus-sb") << " SygusSymBreakNew::check: ...added new symmetry breaking lemma for " << prog << "." << std::endl;
@@ -1353,10 +1600,10 @@ void SygusSymBreakNew::check( std::vector< Node >& lemmas ) {
}
}
-bool SygusSymBreakNew::checkTesters(Node n,
- Node vn,
- int ind,
- std::vector<Node>& lemmas)
+bool SygusSymBreakNew::checkValue(Node n,
+ Node vn,
+ int ind,
+ std::vector<Node>& lemmas)
{
if (vn.getKind() != kind::APPLY_CONSTRUCTOR)
{
@@ -1364,16 +1611,22 @@ bool SygusSymBreakNew::checkTesters(Node n,
Assert(!vn.getType().isDatatype());
return true;
}
- if( Trace.isOn("sygus-sb-warn") ){
- Node prog_sz = NodeManager::currentNM()->mkNode( kind::DT_SIZE, n );
+ NodeManager* nm = NodeManager::currentNM();
+ if (Trace.isOn("sygus-sb-check-value"))
+ {
+ Node prog_sz = nm->mkNode(DT_SIZE, n);
Node prog_szv = d_td->getValuation().getModel()->getValue( prog_sz );
for( int i=0; i<ind; i++ ){
- Trace("sygus-sb-warn") << " ";
+ Trace("sygus-sb-check-value") << " ";
}
- Trace("sygus-sb-warn") << n << " : " << vn << " : " << prog_szv << std::endl;
+ Trace("sygus-sb-check-value") << n << " : " << vn << " : " << prog_szv
+ << std::endl;
}
TypeNode tn = n.getType();
- const Datatype& dt = ((DatatypeType)tn.toType()).getDatatype();
+ const Datatype& dt = tn.getDatatype();
+ Assert(dt.isSygus());
+
+ // ensure that the expected size bound is met
int cindex = DatatypesRewriter::indexOf(vn.getOperator());
Node tst = DatatypesRewriter::mkTester( n, cindex, dt );
bool hastst = d_td->getEqualityEngine()->hasTerm(tst);
@@ -1384,18 +1637,27 @@ bool SygusSymBreakNew::checkTesters(Node n,
}
if (!hastst || tstrep != d_true)
{
- Trace("sygus-sb-warn") << "- has tester : " << tst << " : " << ( hastst ? "true" : "false" );
- Trace("sygus-sb-warn") << ", value=" << tstrep << std::endl;
+ Trace("sygus-check-value") << "- has tester : " << tst << " : "
+ << (hastst ? "true" : "false");
+ Trace("sygus-check-value") << ", value=" << tstrep << std::endl;
if( !hastst ){
+ // This should not happen generally, it is caused by a sygus term not
+ // being assigned a tester.
Node split = DatatypesRewriter::mkSplit(n, dt);
+ Trace("sygus-sb") << " SygusSymBreakNew::check: ...WARNING: considered "
+ "missing split for "
+ << n << "." << std::endl;
Assert( !split.isNull() );
lemmas.push_back( split );
return false;
}
}
for( unsigned i=0; i<vn.getNumChildren(); i++ ){
- Node sel = NodeManager::currentNM()->mkNode( kind::APPLY_SELECTOR_TOTAL, Node::fromExpr( dt[cindex].getSelectorInternal( tn.toType(), i ) ), n );
- if (!checkTesters(sel, vn[i], ind + 1, lemmas))
+ Node sel = nm->mkNode(
+ APPLY_SELECTOR_TOTAL,
+ Node::fromExpr(dt[cindex].getSelectorInternal(tn.toType(), i)),
+ n);
+ if (!checkValue(sel, vn[i], ind + 1, lemmas))
{
return false;
}
@@ -1474,6 +1736,8 @@ Node SygusSymBreakNew::SygusSizeDecisionStrategy::mkLiteral(unsigned s)
}
Assert(!d_this.isNull());
NodeManager* nm = NodeManager::currentNM();
+ Trace("cegqi-engine") << "******* Sygus : allocate size literal " << s
+ << " for " << d_this << std::endl;
return nm->mkNode(DT_SYGUS_BOUND, d_this, nm->mkConst(Rational(s)));
}
diff --git a/src/theory/datatypes/datatypes_sygus.h b/src/theory/datatypes/datatypes_sygus.h
index 99f9672e7..a38e7c5b8 100644
--- a/src/theory/datatypes/datatypes_sygus.h
+++ b/src/theory/datatypes/datatypes_sygus.h
@@ -207,6 +207,82 @@ private:
};
/** An instance of the above cache, for each anchor */
std::map< Node, SearchCache > d_cache;
+ //-----------------------------------traversal predicates
+ /** pre/post traversal predicates for each type, variable
+ *
+ * This stores predicates (pre, post) whose semantics correspond to whether
+ * a variable has occurred by a (pre, post) traversal of a symbolic term,
+ * where index = 0 corresponds to pre, index = 1 corresponds to post. For
+ * details, see getTraversalPredicate below.
+ */
+ std::map<TypeNode, std::map<Node, Node>> d_traversal_pred[2];
+ /** traversal applications to Boolean variables
+ *
+ * This maps each application of a traversal predicate pre_x( t ) or
+ * post_x( t ) to a fresh Boolean variable.
+ */
+ std::map<Node, Node> d_traversal_bool;
+ /** get traversal predicate
+ *
+ * Get the predicates (pre, post) whose semantics correspond to whether
+ * a variable has occurred by this point in a (pre, post) traversal of a term.
+ * The type of getTraversalPredicate(tn, n, _) is tn -> Bool.
+ *
+ * For example, consider the term:
+ * f( x_1, g( x_2, x_3 ) )
+ * and a left-to-right, depth-first traversal of this term. Let e be
+ * a variable of the same type as this term. We say that for the above term:
+ * pre_{x_1} is false for e, e.1 and true for e.2, e.2.1, e.2.2
+ * pre_{x_2} is false for e, e.1, e.2, e.2.1, and true for e.2.2
+ * pre_{x_3} is false for e, e.1, e.2, e.2.1, e.2.2
+ * post_{x_1} is true for e.1, e.2.1, e.2.2, e.2, e
+ * post_{x_2} is false for e.1 and true for e.2.1, e.2.2, e.2, e
+ * post_{x_3} is false for e.1, e.2.1 and true for e.2.2, e.2, e
+ *
+ * We enforce a symmetry breaking scheme for each enumerator e that is
+ * "variable-agnostic" (see argument isVarAgnostic in registerEnumerator)
+ * that ensures the variables are ordered. This scheme makes use of these
+ * predicates, described in the following:
+ *
+ * Let x_1, ..., x_m be variables that occur in the same subclass in the type
+ * of e (see TermDbSygus::getSubclassForVar).
+ * For i = 1, ..., m:
+ * // each variable does not occur initially in a traversal of e
+ * ~pre_{x_i}( e ) AND
+ * // for each subterm of e
+ * template z.
+ * // if this is variable x_i, then x_{i-1} must have already occurred
+ * is-x_i( z ) => pre_{x_{i-1}}( z ) AND
+ * for args a = 1...n
+ * // pre-definition for each argument of this term
+ * pre_{x_i}( z.a ) = a=0 ? pre_{x_i}( z ) : post_{x_i}( z.{a-1} ) AND
+ * // post-definition for this term
+ * post_{x_i}( z ) = post_{x_i}( z.n ) OR is-x_i( z )
+ *
+ * For clarity, above we have written pre and post as first-order predicates.
+ * However, applications of pre/post should be seen as indexed Boolean
+ * variables. The reason for this is pre and post cannot be given a consistent
+ * semantics. For example, consider term f( x_1, x_1 ) and enumerator variable
+ * e of the same type over which we are encoding a traversal. We have that
+ * pre_{x_1}( e.1 ) is false and pre_{x_1}( e.2 ) is true, although the model
+ * values for e.1 and e.2 are equal. Instead, pre_{x_1}( e.1 ) should be seen
+ * as a Boolean variable V_pre_{x_1,e.1} indexed by x_1 and e.1. and likewise
+ * for e.2. We convert all applications of pre/post to Boolean variables in
+ * the method eliminateTraversalPredicates below. Nevertheless, it is
+ * important that applications pre and post are encoded as APPLY_UF
+ * applications so that they behave as expected under substitutions. For
+ * example, pre_{x_1}( z.1 ) { z -> e.2 } results in pre_{x_1}( e.2.1 ), which
+ * after eliminateTraversalPredicates is V_pre_{x_1, e.2.1}.
+ */
+ Node getTraversalPredicate(TypeNode tn, Node n, bool isPre);
+ /** eliminate traversal predicates
+ *
+ * This replaces all applications of traversal predicates P( x ) in n with
+ * unique Boolean variables, given by d_traversal_bool[ P( x ) ], and
+ * returns the result.
+ */
+ Node eliminateTraversalPredicates(Node n);
+ //-----------------------------------end traversal predicates
/** a sygus sampler object for each (anchor, sygus type) pair
*
* This is used for the sygusRewVerify() option to verify the correctness of
@@ -396,15 +472,23 @@ private:
* where t is a search term, see registerSearchTerm for definition of search
* term.
*
- * usingSymCons is whether we are using symbolic constructors for subterms in
- * the type tn. This may affect the form of the predicate we construct.
+ * usingSymCons: whether we are using symbolic constructors for subterms in
+ * the type tn,
+ * isVarAgnostic: whether the terms we are enumerating are agnostic to
+ * variables.
+ *
+ * The latter two options may affect the form of the predicate we construct.
*/
- Node getSimpleSymBreakPred(TypeNode tn,
+ Node getSimpleSymBreakPred(Node e,
+ TypeNode tn,
int tindex,
unsigned depth,
- bool usingSymCons);
+ bool usingSymCons,
+ bool isVarAgnostic);
/** Cache of the above function */
- std::map<TypeNode, std::map<int, std::map<bool, std::map<unsigned, Node>>>>
+ std::map<Node,
+ std::map<TypeNode,
+ std::map<int, std::map<bool, std::map<unsigned, Node>>>>>
d_simple_sb_pred;
/**
* For each search term, this stores the maximum depth for which we have added
@@ -579,7 +663,7 @@ private:
*/
Node getCurrentTemplate( Node n, std::map< TypeNode, int >& var_count );
//----------------------end search size information
- /** check testers
+ /** check value
*
* This is called when we have a model assignment vn for n, where n is
* a selector chain applied to an enumerator (a search term). This function
@@ -602,7 +686,7 @@ private:
* method should not ever add anything to lemmas. However, due to its
* importance, we check this regardless.
*/
- bool checkTesters(Node n, Node vn, int ind, std::vector<Node>& lemmas);
+ bool checkValue(Node n, Node vn, int ind, std::vector<Node>& lemmas);
/**
* Get the current SAT status of the guard g.
* In particular, this returns 1 if g is asserted true, -1 if it is asserted
diff --git a/src/theory/quantifiers/extended_rewrite.cpp b/src/theory/quantifiers/extended_rewrite.cpp
index 57549ba63..404bb850c 100644
--- a/src/theory/quantifiers/extended_rewrite.cpp
+++ b/src/theory/quantifiers/extended_rewrite.cpp
@@ -787,8 +787,8 @@ Node ExtendedRewriter::extendedRewriteBcp(
// also, treat it as clause if possible
if (cln.getNumChildren() > 0
- & (bcp_kinds.empty()
- || bcp_kinds.find(cln.getKind()) != bcp_kinds.end()))
+ && (bcp_kinds.empty()
+ || bcp_kinds.find(cln.getKind()) != bcp_kinds.end()))
{
if (std::find(clauses.begin(), clauses.end(), cn) == clauses.end()
&& prop_clauses.find(cn) == prop_clauses.end())
diff --git a/src/theory/quantifiers/quantifiers_rewriter.cpp b/src/theory/quantifiers/quantifiers_rewriter.cpp
index 1f94dd3df..e2a26f6e6 100644
--- a/src/theory/quantifiers/quantifiers_rewriter.cpp
+++ b/src/theory/quantifiers/quantifiers_rewriter.cpp
@@ -667,7 +667,28 @@ Node QuantifiersRewriter::computeProcessTerms2( Node body, bool hasPol, bool pol
}
}
}
- }else if( elimExtArith ){
+ }
+ else if (ret.getKind() == SELECT && ret[0].getKind() == STORE)
+ {
+ Node st = ret[0];
+ Node index = ret[1];
+ std::vector<Node> iconds;
+ std::vector<Node> elements;
+ while (st.getKind() == STORE)
+ {
+ iconds.push_back(index.eqNode(st[1]));
+ elements.push_back(st[2]);
+ st = st[0];
+ }
+ ret = nm->mkNode(SELECT, st, index);
+ // conditions
+ for (int i = (iconds.size() - 1); i >= 0; i--)
+ {
+ ret = nm->mkNode(ITE, iconds[i], elements[i], ret);
+ }
+ }
+ else if( elimExtArith )
+ {
if( ret.getKind()==INTS_DIVISION_TOTAL || ret.getKind()==INTS_MODULUS_TOTAL ){
Node num = ret[0];
Node den = ret[1];
@@ -711,25 +732,6 @@ Node QuantifiersRewriter::computeProcessTerms2( Node body, bool hasPol, bool pol
}
}
}
- else if (ret.getKind() == SELECT && ret[0].getKind() == STORE)
- {
- Node st = ret[0];
- Node index = ret[1];
- std::vector<Node> iconds;
- std::vector<Node> elements;
- while (st.getKind() == STORE)
- {
- iconds.push_back(index.eqNode(st[1]));
- elements.push_back(st[2]);
- st = st[0];
- }
- ret = nm->mkNode(SELECT, st, index);
- // conditions
- for (int i = (iconds.size() - 1); i >= 0; i--)
- {
- ret = nm->mkNode(ITE, iconds[i], elements[i], ret);
- }
- }
icache[prev] = ret;
return ret;
}
diff --git a/src/theory/quantifiers/sygus/cegis.cpp b/src/theory/quantifiers/sygus/cegis.cpp
index fbe0da7a8..db9af10b4 100644
--- a/src/theory/quantifiers/sygus/cegis.cpp
+++ b/src/theory/quantifiers/sygus/cegis.cpp
@@ -13,6 +13,7 @@
**/
#include "theory/quantifiers/sygus/cegis.h"
+#include "expr/node_algorithm.h"
#include "options/base_options.h"
#include "options/quantifiers_options.h"
#include "printer/printer.h"
@@ -100,15 +101,47 @@ void Cegis::getTermList(const std::vector<Node>& candidates,
bool Cegis::addEvalLemmas(const std::vector<Node>& candidates,
const std::vector<Node>& candidate_values)
{
+ // First, decide if this call will apply "conjecture-specific refinement".
+ // In other words, in some settings, the following method will identify and
+ // block a class of solutions {candidates -> S} that generalizes the current
+ // one (given by {candidates -> candidate_values}), such that for each
+ // candidate_values' in S, we have that {candidates -> candidate_values'} is
+ // also not a solution for the given conjecture. We may not
+ // apply this form of refinement if any (relevant) enumerator in candidates is
+ // "actively generated" (see TermDbSygs::isPassiveEnumerator), since its
+ // model values are themselves interpreted as classes of solutions.
+ bool doGen = true;
+ for (const Node& v : candidates)
+ {
+ // if it is relevant to refinement
+ if (d_refinement_lemma_vars.find(v) != d_refinement_lemma_vars.end())
+ {
+ if (!d_tds->isPassiveEnumerator(v))
+ {
+ doGen = false;
+ break;
+ }
+ }
+ }
NodeManager* nm = NodeManager::currentNM();
bool addedEvalLemmas = false;
if (options::sygusRefEval())
{
- Trace("cegqi-engine") << " *** Do refinement lemma evaluation..."
- << std::endl;
+ Trace("cegqi-engine") << " *** Do refinement lemma evaluation"
+ << (doGen ? " with conjecture-specific refinement"
+ : "")
+ << "..." << std::endl;
// see if any refinement lemma is refuted by evaluation
std::vector<Node> cre_lems;
- getRefinementEvalLemmas(candidates, candidate_values, cre_lems);
+ bool ret =
+ getRefinementEvalLemmas(candidates, candidate_values, cre_lems, doGen);
+ if (ret && !doGen)
+ {
+ Trace("cegqi-engine") << "...(actively enumerated) candidate failed "
+ "refinement lemma evaluation."
+ << std::endl;
+ return true;
+ }
if (!cre_lems.empty())
{
for (const Node& lem : cre_lems)
@@ -124,7 +157,8 @@ bool Cegis::addEvalLemmas(const std::vector<Node>& candidates,
add the lemmas below as well, in parallel. */
}
}
- if (d_eval_unfold != nullptr)
+ // we only do evaluation unfolding for passive enumerators
+ if (doGen && d_eval_unfold != nullptr)
{
Trace("cegqi-engine") << " *** Do evaluation unfolding..." << std::endl;
std::vector<Node> eager_terms, eager_vals, eager_exps;
@@ -281,6 +315,8 @@ void Cegis::addRefinementLemma(Node lem)
}
// rewrite with extended rewriter
slem = d_tds->getExtRewriter()->extendedRewrite(slem);
+ // collect all variables in slem
+ expr::getSymbols(slem, d_refinement_lemma_vars);
std::vector<Node> waiting;
waiting.push_back(lem);
unsigned wcounter = 0;
@@ -408,10 +444,10 @@ void Cegis::registerRefinementLemma(const std::vector<Node>& vars,
}
bool Cegis::usingRepairConst() { return true; }
-
-void Cegis::getRefinementEvalLemmas(const std::vector<Node>& vs,
+bool Cegis::getRefinementEvalLemmas(const std::vector<Node>& vs,
const std::vector<Node>& ms,
- std::vector<Node>& lems)
+ std::vector<Node>& lems,
+ bool doGen)
{
Trace("sygus-cref-eval") << "Cref eval : conjecture has "
<< d_refinement_lemma_unit.size() << " unit and "
@@ -424,6 +460,7 @@ void Cegis::getRefinementEvalLemmas(const std::vector<Node>& vs,
Node nfalse = nm->mkConst(false);
Node neg_guard = d_parent->getGuard().negate();
+ bool ret = false;
for (unsigned r = 0; r < 2; r++)
{
std::unordered_set<Node, NodeHashFunction>& rlemmas =
@@ -447,6 +484,12 @@ void Cegis::getRefinementEvalLemmas(const std::vector<Node>& vs,
<< "...after unfolding is : " << lemcsu << std::endl;
if (lemcsu.isConst() && !lemcsu.getConst<bool>())
{
+ if (!doGen)
+ {
+ // we are not generating the lemmas, instead just return
+ return true;
+ }
+ ret = true;
std::vector<Node> msu;
std::vector<Node> mexp;
msu.insert(msu.end(), ms.begin(), ms.end());
@@ -480,13 +523,12 @@ void Cegis::getRefinementEvalLemmas(const std::vector<Node>& vs,
{
cre_lem = neg_guard;
}
- }
- if (!cre_lem.isNull()
- && std::find(lems.begin(), lems.end(), cre_lem) == lems.end())
- {
- Trace("sygus-cref-eval")
- << "...produced lemma : " << cre_lem << std::endl;
- lems.push_back(cre_lem);
+ if (std::find(lems.begin(), lems.end(), cre_lem) == lems.end())
+ {
+ Trace("sygus-cref-eval") << "...produced lemma : " << cre_lem
+ << std::endl;
+ lems.push_back(cre_lem);
+ }
}
}
if (!lems.empty())
@@ -494,6 +536,7 @@ void Cegis::getRefinementEvalLemmas(const std::vector<Node>& vs,
break;
}
}
+ return ret;
}
bool Cegis::sampleAddRefinementLemma(const std::vector<Node>& candidates,
diff --git a/src/theory/quantifiers/sygus/cegis.h b/src/theory/quantifiers/sygus/cegis.h
index c7392b378..7387bd468 100644
--- a/src/theory/quantifiers/sygus/cegis.h
+++ b/src/theory/quantifiers/sygus/cegis.h
@@ -104,6 +104,8 @@ class Cegis : public SygusModule
/** substitution entailed by d_refinement_lemma_unit */
std::vector<Node> d_rl_eval_hds;
std::vector<Node> d_rl_vals;
+ /** all variables appearing in refinement lemmas */
+ std::unordered_set<Node, NodeHashFunction> d_refinement_lemma_vars;
/** adds lem as a refinement lemma */
void addRefinementLemma(Node lem);
/** add refinement lemma conjunct
@@ -150,10 +152,14 @@ class Cegis : public SygusModule
* Given a candidate solution ms for candidates vs, this function adds lemmas
* to lems based on evaluating the conjecture, instantiated for ms, on lemmas
* for previous refinements (d_refinement_lemmas).
+ *
+ * Returns true if any such lemma exists. If doGen is false, then the
+ * lemmas are not generated or added to lems.
*/
- void getRefinementEvalLemmas(const std::vector<Node>& vs,
+ bool getRefinementEvalLemmas(const std::vector<Node>& vs,
const std::vector<Node>& ms,
- std::vector<Node>& lems);
+ std::vector<Node>& lems,
+ bool doGen);
/** sampler object for the option cegisSample()
*
* This samples points of the type of the inner variables of the synthesis
diff --git a/src/theory/quantifiers/sygus/cegis_unif.cpp b/src/theory/quantifiers/sygus/cegis_unif.cpp
index 456f44019..56edc55c6 100644
--- a/src/theory/quantifiers/sygus/cegis_unif.cpp
+++ b/src/theory/quantifiers/sygus/cegis_unif.cpp
@@ -86,6 +86,7 @@ void CegisUnif::getTermList(const std::vector<Node>& candidates,
// Non-unif candidate are themselves the enumerators
enums.insert(
enums.end(), d_non_unif_candidates.begin(), d_non_unif_candidates.end());
+ Valuation& valuation = d_qe->getValuation();
for (const Node& c : d_unif_candidates)
{
// Collect heads of candidates
@@ -95,6 +96,31 @@ void CegisUnif::getTermList(const std::vector<Node>& candidates,
<< "......cand " << c << " with enum hd " << hd << "\n";
enums.push_back(hd);
}
+ // get unification enumerators
+ for (const Node& e : d_cand_to_strat_pt[c])
+ {
+ for (unsigned index = 0; index < 2; index++)
+ {
+ std::vector<Node> uenums;
+ // get the current unification enumerators
+ d_u_enum_manager.getEnumeratorsForStrategyPt(e, uenums, index);
+ if (index == 1 && options::sygusUnifCondIndependent())
+ {
+ Assert(uenums.size() == 1);
+ Node eu = uenums[0];
+ Node g = d_u_enum_manager.getActiveGuardForEnumerator(eu);
+ // If active guard for this enumerator is not true, there are no more
+ // values for it, and hence we ignore it
+ Node gstatus = valuation.getSatValue(g);
+ if (gstatus.isNull() || !gstatus.getConst<bool>())
+ {
+ continue;
+ }
+ }
+ // get the model value of each enumerator
+ enums.insert(enums.end(), uenums.begin(), uenums.end());
+ }
+ }
}
}
@@ -118,10 +144,15 @@ bool CegisUnif::processConstructCandidates(const std::vector<Node>& enums,
// if we didn't satisfy the specification, there is no way to repair
return false;
}
+ // build model value map
+ std::map<Node, Node> mvMap;
+ for (unsigned i = 0, size = enums.size(); i < size; i++)
+ {
+ mvMap[enums[i]] = enum_values[i];
+ }
// the unification enumerators (return values, conditions) and their model
// values
NodeManager* nm = NodeManager::currentNM();
- Valuation& valuation = d_qe->getValuation();
bool addedUnifEnumSymBreakLemma = false;
Node cost_lit = d_u_enum_manager.getAssertedLiteral();
std::map<Node, std::vector<Node>> unif_enums[2];
@@ -143,11 +174,7 @@ bool CegisUnif::processConstructCandidates(const std::vector<Node>& enums,
{
Assert(unif_enums[index][e].size() == 1);
Node eu = unif_enums[index][e][0];
- Node g = d_u_enum_manager.getActiveGuardForEnumerator(eu);
- // If active guard for this enumerator is not true, there are no more
- // values for it, and hence we ignore it
- Node gstatus = valuation.getSatValue(g);
- if (gstatus.isNull() || !gstatus.getConst<bool>())
+ if (mvMap.find(eu) == mvMap.end())
{
Trace("cegis") << " " << eu << " -> N/A\n";
unif_enums[index][e].clear();
@@ -157,7 +184,8 @@ bool CegisUnif::processConstructCandidates(const std::vector<Node>& enums,
// get the model value of each enumerator
for (const Node& eu : unif_enums[index][e])
{
- Node m_eu = d_parent->getModelValue(eu);
+ Assert(mvMap.find(eu) != mvMap.end());
+ Node m_eu = mvMap[eu];
if (Trace.isOn("cegis"))
{
Trace("cegis") << " " << eu << " -> ";
@@ -232,11 +260,16 @@ bool CegisUnif::processConstructCandidates(const std::vector<Node>& enums,
if (options::sygusUnifCondIndependent() && !unif_enums[1][e].empty())
{
Node eu = unif_enums[1][e][0];
- Node g = d_u_enum_manager.getActiveGuardForEnumerator(eu);
- Node exp_exc = d_tds->getExplain()
- ->getExplanationForEquality(eu, unif_values[1][e][0])
- .negate();
- lems.push_back(nm->mkNode(OR, g.negate(), exp_exc));
+ Assert(d_tds->isEnumerator(eu));
+ if (d_tds->isPassiveEnumerator(eu))
+ {
+ Node g = d_u_enum_manager.getActiveGuardForEnumerator(eu);
+ Node exp_exc =
+ d_tds->getExplain()
+ ->getExplanationForEquality(eu, unif_values[1][e][0])
+ .negate();
+ lems.push_back(nm->mkNode(OR, g.negate(), exp_exc));
+ }
}
}
}
diff --git a/src/theory/quantifiers/sygus/sygus_module.h b/src/theory/quantifiers/sygus/sygus_module.h
index c1b12dfd0..02cf1cdf2 100644
--- a/src/theory/quantifiers/sygus/sygus_module.h
+++ b/src/theory/quantifiers/sygus/sygus_module.h
@@ -71,6 +71,16 @@ class SygusModule
*/
virtual void getTermList(const std::vector<Node>& candidates,
std::vector<Node>& terms) = 0;
+ /** allow partial model
+ *
+ * This method returns true if this module does not require that all
+ * terms returned by getTermList have "proper" model values when calling
+ * constructCandidates. A term may have a model value that is not proper
+ * if is excluded by symmetry breaking, e.g. x+0 is not proper. All model
+ * values that are not proper are replaced by "null" when calling
+ * constructCandidates.
+ */
+ virtual bool allowPartialModel() { return false; }
/** construct candidate
*
* This function takes as input:
diff --git a/src/theory/quantifiers/sygus/sygus_pbe.cpp b/src/theory/quantifiers/sygus/sygus_pbe.cpp
index 9a6645560..b7e6e0c65 100644
--- a/src/theory/quantifiers/sygus/sygus_pbe.cpp
+++ b/src/theory/quantifiers/sygus/sygus_pbe.cpp
@@ -415,6 +415,8 @@ void SygusPbe::getTermList(const std::vector<Node>& candidates,
}
}
+bool SygusPbe::allowPartialModel() { return !options::sygusPbeMultiFair(); }
+
bool SygusPbe::constructCandidates(const std::vector<Node>& enums,
const std::vector<Node>& enum_values,
const std::vector<Node>& candidates,
@@ -431,10 +433,18 @@ bool SygusPbe::constructCandidates(const std::vector<Node>& enums,
Trace("sygus-pbe-enum") << " " << enums[i] << " -> ";
TermDbSygus::toStreamSygus("sygus-pbe-enum", enum_values[i]);
Trace("sygus-pbe-enum") << std::endl;
- unsigned sz = d_tds->getSygusTermSize( enum_values[i] );
- szs.push_back(sz);
- if( i==0 || sz<min_term_size ){
- min_term_size = sz;
+ if (!enum_values[i].isNull())
+ {
+ unsigned sz = d_tds->getSygusTermSize(enum_values[i]);
+ szs.push_back(sz);
+ if (i == 0 || sz < min_term_size)
+ {
+ min_term_size = sz;
+ }
+ }
+ else
+ {
+ szs.push_back(0);
}
}
// Assume two enumerators of types T1 and T2.
@@ -448,11 +458,14 @@ bool SygusPbe::constructCandidates(const std::vector<Node>& enums,
std::vector<unsigned> enum_consider;
for (unsigned i = 0, esize = enums.size(); i < esize; i++)
{
- Assert(szs[i] >= min_term_size);
- int diff = szs[i] - min_term_size;
- if (!options::sygusPbeMultiFair() || diff <= diffAllow)
+ if (!enum_values[i].isNull())
{
- enum_consider.push_back( i );
+ Assert(szs[i] >= min_term_size);
+ int diff = szs[i] - min_term_size;
+ if (!options::sygusPbeMultiFair() || diff <= diffAllow)
+ {
+ enum_consider.push_back(i);
+ }
}
}
@@ -468,14 +481,17 @@ bool SygusPbe::constructCandidates(const std::vector<Node>& enums,
Node c = d_enum_to_candidate[e];
std::vector<Node> enum_lems;
d_sygus_unif[c].notifyEnumeration(e, v, enum_lems);
- // the lemmas must be guarded by the active guard of the enumerator
- Assert(d_enum_to_active_guard.find(e) != d_enum_to_active_guard.end());
- Node g = d_enum_to_active_guard[e];
- for (unsigned j = 0, size = enum_lems.size(); j < size; j++)
+ if (!enum_lems.empty())
{
- enum_lems[j] = nm->mkNode(OR, g.negate(), enum_lems[j]);
+ // the lemmas must be guarded by the active guard of the enumerator
+ Assert(d_enum_to_active_guard.find(e) != d_enum_to_active_guard.end());
+ Node g = d_enum_to_active_guard[e];
+ for (unsigned j = 0, size = enum_lems.size(); j < size; j++)
+ {
+ enum_lems[j] = nm->mkNode(OR, g.negate(), enum_lems[j]);
+ }
+ lems.insert(lems.end(), enum_lems.begin(), enum_lems.end());
}
- lems.insert(lems.end(), enum_lems.begin(), enum_lems.end());
}
}
for( unsigned i=0; i<candidates.size(); i++ ){
diff --git a/src/theory/quantifiers/sygus/sygus_pbe.h b/src/theory/quantifiers/sygus/sygus_pbe.h
index 66e19b6c9..b2ad5f63a 100644
--- a/src/theory/quantifiers/sygus/sygus_pbe.h
+++ b/src/theory/quantifiers/sygus/sygus_pbe.h
@@ -117,6 +117,11 @@ class SygusPbe : public SygusModule
*/
void getTermList(const std::vector<Node>& candidates,
std::vector<Node>& terms) override;
+ /**
+ * PBE allows partial models to handle multiple enumerators if we are not
+ * using a strictly fair enumeration strategy.
+ */
+ bool allowPartialModel() override;
/** construct candidates
*
* This function attempts to use unification-based approaches for constructing
diff --git a/src/theory/quantifiers/sygus/sygus_unif_io.cpp b/src/theory/quantifiers/sygus/sygus_unif_io.cpp
index eca88cab8..4fe3cfbed 100644
--- a/src/theory/quantifiers/sygus/sygus_unif_io.cpp
+++ b/src/theory/quantifiers/sygus/sygus_unif_io.cpp
@@ -562,7 +562,10 @@ void SygusUnifIo::notifyEnumeration(Node e, Node v, std::vector<Node>& lemmas)
// is it excluded for domain-specific reason?
std::vector<Node> exp_exc_vec;
- if (getExplanationForEnumeratorExclude(e, v, base_results, exp_exc_vec))
+ Assert(d_tds->isEnumerator(e));
+ bool isPassive = d_tds->isPassiveEnumerator(e);
+ if (isPassive
+ && getExplanationForEnumeratorExclude(e, v, base_results, exp_exc_vec))
{
Assert(!exp_exc_vec.empty());
exp_exc = exp_exc_vec.size() == 1
@@ -707,16 +710,20 @@ void SygusUnifIo::notifyEnumeration(Node e, Node v, std::vector<Node>& lemmas)
}
}
- // exclude this value on subsequent iterations
- if (exp_exc.isNull())
+ if (isPassive)
{
- // if we did not already explain why this should be excluded, use default
- exp_exc = d_tds->getExplain()->getExplanationForEquality(e, v);
+ // exclude this value on subsequent iterations
+ if (exp_exc.isNull())
+ {
+ Trace("sygus-sui-enum-lemma") << "Use basic exclusion." << std::endl;
+ // if we did not already explain why this should be excluded, use default
+ exp_exc = d_tds->getExplain()->getExplanationForEquality(e, v);
+ }
+ exp_exc = exp_exc.negate();
+ Trace("sygus-sui-enum-lemma")
+ << "SygusUnifIo : enumeration exclude lemma : " << exp_exc << std::endl;
+ lemmas.push_back(exp_exc);
}
- exp_exc = exp_exc.negate();
- Trace("sygus-sui-enum-lemma")
- << "SygusUnifIo : enumeration exclude lemma : " << exp_exc << std::endl;
- lemmas.push_back(exp_exc);
}
bool SygusUnifIo::constructSolution(std::vector<Node>& sols,
diff --git a/src/theory/quantifiers/sygus/synth_conjecture.cpp b/src/theory/quantifiers/sygus/synth_conjecture.cpp
index fde69d196..99f1131fe 100644
--- a/src/theory/quantifiers/sygus/synth_conjecture.cpp
+++ b/src/theory/quantifiers/sygus/synth_conjecture.cpp
@@ -23,6 +23,7 @@
#include "smt/smt_engine.h"
#include "smt/smt_engine_scope.h"
#include "smt/smt_statistics_registry.h"
+#include "theory/datatypes/datatypes_rewriter.h"
#include "theory/quantifiers/first_order_model.h"
#include "theory/quantifiers/instantiate.h"
#include "theory/quantifiers/quantifiers_attributes.h"
@@ -40,6 +41,7 @@ namespace quantifiers {
SynthConjecture::SynthConjecture(QuantifiersEngine* qe)
: d_qe(qe),
+ d_tds(qe->getTermDatabaseSygus()),
d_ceg_si(new CegSingleInv(qe, this)),
d_ceg_proc(new SynthConjectureProcess(qe)),
d_ceg_gc(new CegGrammarConstructor(qe, this)),
@@ -338,7 +340,14 @@ void SynthConjecture::doCheck(std::vector<Node>& lems)
// get the model value of the relevant terms from the master module
std::vector<Node> enum_values;
- getModelValues(terms, enum_values);
+ bool fullModel = getEnumeratedValues(terms, enum_values);
+
+ // if the master requires a full model and the model is partial, we fail
+ if (!d_master->allowPartialModel() && !fullModel)
+ {
+ Trace("cegqi-check") << "...partial model, fail." << std::endl;
+ return;
+ }
if (!constructed_cand)
{
@@ -442,7 +451,7 @@ void SynthConjecture::doCheck(std::vector<Node>& lems)
// eagerly unfold applications of evaluation function
Trace("cegqi-debug") << "pre-unfold counterexample : " << lem << std::endl;
std::map<Node, Node> visited_n;
- lem = d_qe->getTermDatabaseSygus()->getEagerUnfold(lem, visited_n);
+ lem = d_tds->getEagerUnfold(lem, visited_n);
// record the instantiation
// this is used for remembering the solution
recordInstantiation(candidate_values);
@@ -534,7 +543,12 @@ void SynthConjecture::doRefine(std::vector<Node>& lems)
if (d_ce_sk_var_mvs.empty())
{
std::vector<Node> model_values;
- getModelValues(d_ce_sk_vars, model_values);
+ for (const Node& v : d_ce_sk_vars)
+ {
+ Node mv = getModelValue(v);
+ Trace("cegqi-refine") << " " << v << " -> " << mv << std::endl;
+ model_values.push_back(mv);
+ }
sk_subs.insert(sk_subs.end(), model_values.begin(), model_values.end());
}
else
@@ -586,30 +600,59 @@ void SynthConjecture::preregisterConjecture(Node q)
d_ceg_si->preregisterConjecture(q);
}
-void SynthConjecture::getModelValues(std::vector<Node>& n, std::vector<Node>& v)
+bool SynthConjecture::getEnumeratedValues(std::vector<Node>& n,
+ std::vector<Node>& v)
{
+ bool ret = true;
Trace("cegqi-engine") << " * Value is : ";
for (unsigned i = 0; i < n.size(); i++)
{
- Node nv = getModelValue(n[i]);
+ Node nv = getEnumeratedValue(n[i]);
v.push_back(nv);
+ ret = ret && !nv.isNull();
if (Trace.isOn("cegqi-engine"))
{
- TypeNode tn = nv.getType();
- Trace("cegqi-engine") << n[i] << " -> ";
+ Node onv = nv.isNull() ? d_qe->getModel()->getValue(n[i]) : nv;
+ TypeNode tn = onv.getType();
std::stringstream ss;
- Printer::getPrinter(options::outputLanguage())->toStreamSygus(ss, nv);
- Trace("cegqi-engine") << ss.str() << " ";
- if (Trace.isOn("cegqi-engine-rr"))
+ Printer::getPrinter(options::outputLanguage())->toStreamSygus(ss, onv);
+ Trace("cegqi-engine") << n[i] << " -> ";
+ if (nv.isNull())
{
- Node bv = d_qe->getTermDatabaseSygus()->sygusToBuiltin(nv, tn);
- bv = Rewriter::rewrite(bv);
- Trace("cegqi-engine-rr") << " -> " << bv << std::endl;
+ Trace("cegqi-engine") << "[EXC: " << ss.str() << "] ";
+ }
+ else
+ {
+ Trace("cegqi-engine") << ss.str() << " ";
+ if (Trace.isOn("cegqi-engine-rr"))
+ {
+ Node bv = d_tds->sygusToBuiltin(nv, tn);
+ bv = Rewriter::rewrite(bv);
+ Trace("cegqi-engine-rr") << " -> " << bv << std::endl;
+ }
}
}
- Assert(!nv.isNull());
}
Trace("cegqi-engine") << std::endl;
+ return ret;
+}
+
+Node SynthConjecture::getEnumeratedValue(Node e)
+{
+ if (e.getAttribute(SygusSymBreakExcAttribute()))
+ {
+ // if the current model value of e was excluded by symmetry breaking, then
+ // it does not have a proper model value that we should consider, thus we
+ // return null.
+ return Node::null();
+ }
+ if (d_tds->isPassiveEnumerator(e))
+ {
+ return getModelValue(e);
+ }
+ Assert(false);
+ // management of actively generated enumerators goes here
+ return getModelValue(e);
}
Node SynthConjecture::getModelValue(Node n)
@@ -692,8 +735,7 @@ void SynthConjecture::printAndContinueStream()
{
sol = d_cinfo[cprog].d_inst.back();
// add to explanation of exclusion
- d_qe->getTermDatabaseSygus()->getExplain()->getExplanationForEquality(
- cprog, sol, exp);
+ d_tds->getExplain()->getExplanationForEquality(cprog, sol, exp);
}
}
Assert(!exp.empty());
@@ -791,7 +833,6 @@ void SynthConjecture::getSynthSolutions(std::map<Node, Node>& sol_map,
bool singleInvocation)
{
NodeManager* nm = NodeManager::currentNM();
- TermDbSygus* sygusDb = d_qe->getTermDatabaseSygus();
std::vector<Node> sols;
std::vector<int> statuses;
if (!getSynthSolutionsInternal(sols, statuses, singleInvocation))
@@ -807,7 +848,7 @@ void SynthConjecture::getSynthSolutions(std::map<Node, Node>& sol_map,
if (status != 0)
{
// convert sygus to builtin here
- bsol = sygusDb->sygusToBuiltin(sol, sol.getType());
+ bsol = d_tds->sygusToBuiltin(sol, sol.getType());
}
// convert to lambda
TypeNode tn = d_embed_quant[0][i].getType();
@@ -868,8 +909,7 @@ bool SynthConjecture::getSynthSolutionsInternal(std::vector<Node>& sols,
{
TNode templa = d_ceg_si->getTemplateArg(sf);
// make the builtin version of the full solution
- TermDbSygus* sygusDb = d_qe->getTermDatabaseSygus();
- sol = sygusDb->sygusToBuiltin(sol, sol.getType());
+ sol = d_tds->sygusToBuiltin(sol, sol.getType());
Trace("cegqi-inv") << "Builtin version of solution is : " << sol
<< ", type : " << sol.getType() << std::endl;
TNode tsol = sol;
diff --git a/src/theory/quantifiers/sygus/synth_conjecture.h b/src/theory/quantifiers/sygus/synth_conjecture.h
index 1cbd4e949..694e4a0cb 100644
--- a/src/theory/quantifiers/sygus/synth_conjecture.h
+++ b/src/theory/quantifiers/sygus/synth_conjecture.h
@@ -112,9 +112,19 @@ class SynthConjecture
void assign(Node q);
/** has a conjecture been assigned to this class */
bool isAssigned() { return !d_embed_quant.isNull(); }
- /** get model values for terms n, store in vector v */
- void getModelValues(std::vector<Node>& n, std::vector<Node>& v);
- /** get model value for term n */
+ /**
+ * Get model values for terms n, store in vector v. This method returns true
+ * if and only if all values added to v are non-null.
+ */
+ bool getEnumeratedValues(std::vector<Node>& n, std::vector<Node>& v);
+ /**
+ * Get model value for term n. If n has a value that was excluded by
+ * datatypes sygus symmetry breaking, this method returns null.
+ */
+ Node getEnumeratedValue(Node n);
+ /**
+ * Get model value for term n.
+ */
Node getModelValue(Node n);
/** get utility for static preprocessing and analysis of conjectures */
@@ -132,6 +142,8 @@ class SynthConjecture
private:
/** reference to quantifier engine */
QuantifiersEngine* d_qe;
+ /** term database sygus of d_qe */
+ TermDbSygus* d_tds;
/** The feasible guard. */
Node d_feasible_guard;
/** the decision strategy for the feasible guard */
diff --git a/src/theory/quantifiers/sygus/synth_engine.cpp b/src/theory/quantifiers/sygus/synth_engine.cpp
index 56844ec1f..296c10ff6 100644
--- a/src/theory/quantifiers/sygus/synth_engine.cpp
+++ b/src/theory/quantifiers/sygus/synth_engine.cpp
@@ -42,8 +42,7 @@ SynthEngine::~SynthEngine() { delete d_conj; }
bool SynthEngine::needsCheck(Theory::Effort e)
{
- return !d_quantEngine->getTheoryEngine()->needCheck()
- && e >= Theory::EFFORT_LAST_CALL;
+ return e >= Theory::EFFORT_LAST_CALL;
}
QuantifiersModule::QEffort SynthEngine::needsModel(Theory::Effort e)
diff --git a/src/theory/quantifiers/sygus/term_database_sygus.cpp b/src/theory/quantifiers/sygus/term_database_sygus.cpp
index 1f4e34c1f..23b35cfed 100644
--- a/src/theory/quantifiers/sygus/term_database_sygus.cpp
+++ b/src/theory/quantifiers/sygus/term_database_sygus.cpp
@@ -343,6 +343,7 @@ void TermDbSygus::registerSygusType( TypeNode tn ) {
}
}else{
// no arguments to synthesis functions
+ d_var_list[tn].clear();
}
// register connected types
for (unsigned i = 0, ncons = dt.getNumConstructors(); i < ncons; i++)
@@ -421,11 +422,51 @@ void TermDbSygus::registerSygusType( TypeNode tn ) {
}
}
+/** A trie indexed by types that assigns unique identifiers to nodes. */
+class TypeNodeIdTrie
+{
+ public:
+ /** children of this node */
+ std::map<TypeNode, TypeNodeIdTrie> d_children;
+ /** the data stored at this node */
+ std::vector<Node> d_data;
+ /** add v to this trie, indexed by types */
+ void add(Node v, std::vector<TypeNode>& types)
+ {
+ TypeNodeIdTrie* tnt = this;
+ for (unsigned i = 0, size = types.size(); i < size; i++)
+ {
+ tnt = &tnt->d_children[types[i]];
+ }
+ tnt->d_data.push_back(v);
+ }
+ /**
+ * Assign each node in this trie an identifier such that
+ * assign[v1] = assign[v2] iff v1 and v2 are indexed by the same values.
+ */
+ void assignIds(std::map<Node, unsigned>& assign, unsigned& idCount)
+ {
+ if (!d_data.empty())
+ {
+ for (const Node& v : d_data)
+ {
+ assign[v] = idCount;
+ }
+ idCount++;
+ }
+ for (std::pair<const TypeNode, TypeNodeIdTrie>& c : d_children)
+ {
+ c.second.assignIds(assign, idCount);
+ }
+ }
+};
+
void TermDbSygus::registerEnumerator(Node e,
Node f,
SynthConjecture* conj,
bool mkActiveGuard,
- bool useSymbolicCons)
+ bool useSymbolicCons,
+ bool isVarAgnostic)
{
if (d_enum_to_conjecture.find(e) != d_enum_to_conjecture.end())
{
@@ -441,15 +482,10 @@ void TermDbSygus::registerEnumerator(Node e,
NodeManager* nm = NodeManager::currentNM();
if( mkActiveGuard ){
// make the guard
- Node eg = Rewriter::rewrite(nm->mkSkolem("eG", nm->booleanType()));
- eg = d_quantEngine->getValuation().ensureLiteral( eg );
- AlwaysAssert( !eg.isNull() );
- d_quantEngine->getOutputChannel().requirePhase( eg, true );
- //add immediate lemma
- Node lem = nm->mkNode(OR, eg, eg.negate());
- Trace("cegqi-lemma") << "Cegqi::Lemma : enumerator : " << lem << std::endl;
- d_quantEngine->getOutputChannel().lemma( lem );
- d_enum_to_active_guard[e] = eg;
+ Node ag = nm->mkSkolem("eG", nm->booleanType());
+ // must ensure it is a literal immediately here
+ ag = d_quantEngine->getValuation().ensureLiteral(ag);
+ d_enum_to_active_guard[e] = ag;
}
Trace("sygus-db") << " registering symmetry breaking clauses..."
@@ -459,35 +495,47 @@ void TermDbSygus::registerEnumerator(Node e,
// breaking lemma templates for each relevant subtype of the grammar
std::vector<TypeNode> sf_types;
getSubfieldTypes(et, sf_types);
+ // maps variables to the list of subfield types they occur in
+ std::map<Node, std::vector<TypeNode> > type_occurs;
+ std::map<TypeNode, std::vector<Node> >::iterator itv = d_var_list.find(et);
+ Assert(itv != d_var_list.end());
+ for (const Node& v : itv->second)
+ {
+ type_occurs[v].clear();
+ }
// for each type of subfield type of this enumerator
for (unsigned i = 0, ntypes = sf_types.size(); i < ntypes; i++)
{
std::vector<unsigned> rm_indices;
TypeNode stn = sf_types[i];
Assert(stn.isDatatype());
- const Datatype& dt = static_cast<DatatypeType>(stn.toType()).getDatatype();
- std::map<TypeNode, unsigned>::iterator itsa =
- d_sym_cons_any_constant.find(stn);
- if (itsa != d_sym_cons_any_constant.end())
+ const Datatype& dt = stn.getDatatype();
+ int anyC = getAnyConstantConsNum(stn);
+ for (unsigned i = 0, ncons = dt.getNumConstructors(); i < ncons; i++)
{
- if (!useSymbolicCons)
+ Expr sop = dt[i].getSygusOp();
+ Assert(!sop.isNull());
+ bool isAnyC = static_cast<int>(i) == anyC;
+ Node sopn = Node::fromExpr(sop);
+ if (type_occurs.find(sopn) != type_occurs.end())
+ {
+ // if it is a variable, store that it occurs in stn
+ type_occurs[sopn].push_back(stn);
+ }
+ else if (isAnyC && !useSymbolicCons)
{
+ // if we are not using the any constant constructor
// do not use the symbolic constructor
- rm_indices.push_back(itsa->second);
+ rm_indices.push_back(i);
}
- else
+ else if (anyC != -1 && !isAnyC && useSymbolicCons)
{
- // can remove all other concrete constant constructors
- for (unsigned i = 0, ncons = dt.getNumConstructors(); i < ncons; i++)
+ // if we are using the any constant constructor, do not use any
+ // concrete constant
+ Node c_op = getConsNumConst(stn, i);
+ if (!c_op.isNull())
{
- if (i != itsa->second)
- {
- Node c_op = getConsNumConst(stn, i);
- if (!c_op.isNull())
- {
- rm_indices.push_back(i);
- }
- }
+ rm_indices.push_back(i);
}
}
}
@@ -515,6 +563,33 @@ void TermDbSygus::registerEnumerator(Node e,
}
}
Trace("sygus-db") << " ...finished" << std::endl;
+
+ d_enum_var_agnostic[e] = isVarAgnostic;
+ if (isVarAgnostic)
+ {
+ // if not done so already, compute type class identifiers for each variable
+ if (d_var_subclass_id.find(et) == d_var_subclass_id.end())
+ {
+ d_var_subclass_id[et].clear();
+ TypeNodeIdTrie tnit;
+ for (std::pair<const Node, std::vector<TypeNode> >& to : type_occurs)
+ {
+ tnit.add(to.first, to.second);
+ }
+ // 0 is reserved for "no type class id"
+ unsigned typeIdCount = 1;
+ tnit.assignIds(d_var_subclass_id[et], typeIdCount);
+ // assign the list and reverse map to index
+ for (std::pair<const Node, std::vector<TypeNode> >& to : type_occurs)
+ {
+ Node v = to.first;
+ unsigned sc = d_var_subclass_id[et][v];
+ Trace("sygus-db") << v << " has subclass id " << sc << std::endl;
+ d_var_subclass_list_index[et][v] = d_var_subclass_list[et][sc].size();
+ d_var_subclass_list[et][sc].push_back(v);
+ }
+ }
+ }
}
bool TermDbSygus::isEnumerator(Node e) const
@@ -561,6 +636,26 @@ bool TermDbSygus::usingSymbolicConsForEnumerator(Node e) const
return false;
}
+bool TermDbSygus::isVariableAgnosticEnumerator(Node e) const
+{
+ std::map<Node, bool>::const_iterator itus = d_enum_var_agnostic.find(e);
+ if (itus != d_enum_var_agnostic.end())
+ {
+ return itus->second;
+ }
+ return false;
+}
+
+bool TermDbSygus::isPassiveEnumerator(Node e) const
+{
+ if (isVariableAgnosticEnumerator(e))
+ {
+ return false;
+ }
+ // other criteria go here
+ return true;
+}
+
void TermDbSygus::getEnumerators(std::vector<Node>& mts)
{
for (std::map<Node, SynthConjecture*>::iterator itm =
@@ -881,6 +976,82 @@ bool TermDbSygus::hasSubtermSymbolicCons(TypeNode tn) const
return d_has_subterm_sym_cons.find(tn) != d_has_subterm_sym_cons.end();
}
+unsigned TermDbSygus::getSubclassForVar(TypeNode tn, Node n) const
+{
+ std::map<TypeNode, std::map<Node, unsigned> >::const_iterator itc =
+ d_var_subclass_id.find(tn);
+ if (itc == d_var_subclass_id.end())
+ {
+ Assert(false);
+ return 0;
+ }
+ std::map<Node, unsigned>::const_iterator itcc = itc->second.find(n);
+ if (itcc == itc->second.end())
+ {
+ Assert(false);
+ return 0;
+ }
+ return itcc->second;
+}
+
+unsigned TermDbSygus::getNumSubclassVars(TypeNode tn, unsigned sc) const
+{
+ std::map<TypeNode, std::map<unsigned, std::vector<Node> > >::const_iterator
+ itv = d_var_subclass_list.find(tn);
+ if (itv == d_var_subclass_list.end())
+ {
+ Assert(false);
+ return 0;
+ }
+ std::map<unsigned, std::vector<Node> >::const_iterator itvv =
+ itv->second.find(sc);
+ if (itvv == itv->second.end())
+ {
+ Assert(false);
+ return 0;
+ }
+ return itvv->second.size();
+}
+Node TermDbSygus::getVarSubclassIndex(TypeNode tn,
+ unsigned sc,
+ unsigned i) const
+{
+ std::map<TypeNode, std::map<unsigned, std::vector<Node> > >::const_iterator
+ itv = d_var_subclass_list.find(tn);
+ if (itv == d_var_subclass_list.end())
+ {
+ Assert(false);
+ return Node::null();
+ }
+ std::map<unsigned, std::vector<Node> >::const_iterator itvv =
+ itv->second.find(sc);
+ if (itvv == itv->second.end() || i >= itvv->second.size())
+ {
+ Assert(false);
+ return Node::null();
+ }
+ return itvv->second[i];
+}
+
+bool TermDbSygus::getIndexInSubclassForVar(TypeNode tn,
+ Node v,
+ unsigned& index) const
+{
+ std::map<TypeNode, std::map<Node, unsigned> >::const_iterator itv =
+ d_var_subclass_list_index.find(tn);
+ if (itv == d_var_subclass_list_index.end())
+ {
+ return false;
+ }
+ std::map<Node, unsigned>::const_iterator itvv = itv->second.find(v);
+ if (itvv == itv->second.end())
+ {
+ return false;
+ }
+ index = itvv->second;
+ return true;
+}
+
bool TermDbSygus::isSymbolicConsApp(Node n) const
{
if (n.getKind() != APPLY_CONSTRUCTOR)
diff --git a/src/theory/quantifiers/sygus/term_database_sygus.h b/src/theory/quantifiers/sygus/term_database_sygus.h
index b7bdba3ab..785e8731c 100644
--- a/src/theory/quantifiers/sygus/term_database_sygus.h
+++ b/src/theory/quantifiers/sygus/term_database_sygus.h
@@ -69,6 +69,10 @@ class TermDbSygus {
* (see d_enum_to_active_guard),
* useSymbolicCons : whether we want model values for e to include symbolic
* constructors like the "any constant" variable.
+ * isVarAgnostic : if this flag is true, the enumerator will only generate
+ * values whose variables are in canonical order (for example, only x1-x2
+ * and not x2-x1 will be generated, assuming x1 and x2 are in the same
+ * "subclass", see getSubclassForVar).
*
* Notice that enumerator e may not be one-to-one with f in
* synthesis-through-unification approaches (e.g. decision tree construction
@@ -78,7 +82,8 @@ class TermDbSygus {
Node f,
SynthConjecture* conj,
bool mkActiveGuard = false,
- bool useSymbolicCons = false);
+ bool useSymbolicCons = false,
+ bool isVarAgnostic = false);
/** is e an enumerator registered with this class? */
bool isEnumerator(Node e) const;
/** return the conjecture e is associated with */
@@ -89,6 +94,26 @@ class TermDbSygus {
Node getActiveGuardForEnumerator(Node e) const;
/** are we using symbolic constructors for enumerator e? */
bool usingSymbolicConsForEnumerator(Node e) const;
+ /** is this enumerator agnostic to variables? */
+ bool isVariableAgnosticEnumerator(Node e) const;
+ /** is this a "passively-generated" enumerator?
+ *
+ * A "passively-generated" enumerator is one for which the terms it enumerates
+ * are obtained by looking at its model value only. For passively-generated
+ * enumerators, it is the responsibility of the user of that enumerator (say
+ * a SygusModule) to block the current model value of it before asking for
+ * another value. By default, the Cegis module uses passively-generated
+ * enumerators and "conjecture-specific refinement" to rule out models
+ * for passively-generated enumerators.
+ *
+ * On the other hand, an "actively-generated" enumerator is one for which the
+ * terms it enumerates are not necessarily a subset of the model values the
+ * enumerator takes. Actively-generated enumerators are centrally managed by
+ * SynthConjecture. The user of actively-generated enumerators are prohibited
+ * from influencing its model value. For example, conjecture-specific
+ * refinement in Cegis is not applied to actively-generated enumerators.
+ */
+ bool isPassiveEnumerator(Node e) const;
/** get all registered enumerators */
void getEnumerators(std::vector<Node>& mts);
/** Register symmetry breaking lemma
@@ -273,6 +298,8 @@ class TermDbSygus {
std::map<Node, TypeNode> d_sb_lemma_to_type;
/** mapping from symmetry breaking lemmas to size */
std::map<Node, unsigned> d_sb_lemma_to_size;
+ /** enumerators to whether they are variable agnostic */
+ std::map<Node, bool> d_enum_var_agnostic;
//------------------------------end enumerators
//-----------------------------conversion from sygus to builtin
@@ -345,6 +372,17 @@ class TermDbSygus {
* for this type.
*/
std::map<TypeNode, bool> d_has_subterm_sym_cons;
+ /**
+ * Map from sygus types and bound variables to their type subclass id. Note
+ * type class identifiers are computed for each type of registered sygus
+ * enumerators, but not all sygus types. For details, see getSubclassIdForVar.
+ */
+ std::map<TypeNode, std::map<Node, unsigned> > d_var_subclass_id;
+ /** the list of variables with given subclass */
+ std::map<TypeNode, std::map<unsigned, std::vector<Node> > >
+ d_var_subclass_list;
+ /** the index of each variable in the above list */
+ std::map<TypeNode, std::map<Node, unsigned> > d_var_subclass_list_index;
public: // general sygus utilities
bool isRegistered(TypeNode tn) const;
@@ -390,6 +428,45 @@ class TermDbSygus {
* Returns true if any subterm of type tn can be a symbolic constructor.
*/
bool hasSubtermSymbolicCons(TypeNode tn) const;
+ //--------------------------------- variable subclasses
+ /** Get subclass id for variable
+ *
+ * This returns the "subclass" identifier for variable v in sygus
+ * type tn. A subclass identifier groups variables based on the sygus
+ * types they occur in:
+ * A -> A + B | C + C | x | y | z | w | u
+ * B -> y | z
+ * C -> u
+ * The variables in this grammar can be grouped according to the sygus types
+ * they appear in:
+ * { x,w } occur in A
+ * { y,z } occur in A,B
+ * { u } occurs in A,C
+ * We say that e.g. x, w are in the same subclass.
+ *
+ * If this method returns 0, then v is not a variable in sygus type tn.
+ * Otherwise, this method returns a positive value n, such that
+ * getSubclassIdForVar[v1] = getSubclassIdForVar[v2] iff v1 and v2 are in the
+ * same subclass.
+ *
+ * The type tn should be the type of an enumerator registered to this
+ * database, where notice that we do not compute this information for the
+ * subfield types of the enumerator.
+ */
+ unsigned getSubclassForVar(TypeNode tn, Node v) const;
+ /**
+ * Get the number of variable in the subclass with identifier sc for type tn.
+ */
+ unsigned getNumSubclassVars(TypeNode tn, unsigned sc) const;
+ /** Get the i^th variable in the subclass with identifier sc for type tn */
+ Node getVarSubclassIndex(TypeNode tn, unsigned sc, unsigned i) const;
+ /**
+ * Get the a variable's index in its subclass list. This method returns true
+ * iff variable v has been assigned a subclass in tn. It updates index to
+ * be v's index iff the method returns true.
+ */
+ bool getIndexInSubclassForVar(TypeNode tn, Node v, unsigned& index) const;
+ //--------------------------------- end variable subclasses
/** return whether n is an application of a symbolic constructor */
bool isSymbolicConsApp(Node n) const;
/** can construct kind
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 44cf5a651..a79e96c65 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -1,30 +1,29 @@
#-----------------------------------------------------------------------------#
-# Add subdirectories
-
-add_subdirectory(regress)
-add_subdirectory(system EXCLUDE_FROM_ALL)
-
-if(BUILD_BINDINGS_JAVA)
- add_subdirectory(java)
-endif()
-
-if(ENABLE_UNIT_TESTING)
- add_subdirectory(unit EXCLUDE_FROM_ALL)
-endif()
-
-#-----------------------------------------------------------------------------#
# Add target 'check', builds and runs
# > unit tests
# > regression tests of levels 0 and 1
# > system tests
+# Note: Do not add custom targets for running tests (regress, systemtests,
+# units) as dependencies to other run targets. This will result in executing
+# tests multiple times. For example, if check would depend on regress it would
+# first run the command of the regress target (i.e., run all regression tests)
+# and afterwards run the command specified for the check target.
+# Dependencies of check are added in the corresponding subdirectories.
add_custom_target(check
COMMAND
- ctest --output-on-failure -LE "regress[2-4]" -j${CTEST_NTHREADS} $(ARGS)
- DEPENDS regress systemtests)
-if(BUILD_BINDINGS_JAVA)
- add_dependencies(check cvc4javatests)
-endif()
+ ctest --output-on-failure -LE "regress[2-4]" -j${CTEST_NTHREADS} $(ARGS))
+
+#-----------------------------------------------------------------------------#
+# Add subdirectories
+
+add_subdirectory(regress)
+add_subdirectory(system EXCLUDE_FROM_ALL)
+
if(ENABLE_UNIT_TESTING)
- add_dependencies(check units)
+ add_subdirectory(unit EXCLUDE_FROM_ALL)
+
+ if(BUILD_BINDINGS_JAVA)
+ add_subdirectory(java)
+ endif()
endif()
diff --git a/test/java/CMakeLists.txt b/test/java/CMakeLists.txt
index 27042559a..b6f0d035a 100644
--- a/test/java/CMakeLists.txt
+++ b/test/java/CMakeLists.txt
@@ -10,16 +10,18 @@ set(java_test_src_files
LinearArith.java
)
-add_jar(cvc4javatests
+add_jar(build-javatests
SOURCES ${java_test_src_files}
INCLUDE_JARS
${CMAKE_BINARY_DIR}/src/bindings/java/CVC4.jar
${JUnit_JAR}
+ OUTPUT_NAME javatests
)
-add_dependencies(cvc4javatests cvc4jar)
+add_dependencies(build-javatests cvc4jar)
+add_dependencies(check build-javatests)
# Add java tests to ctest
-set(classpath "${CMAKE_CURRENT_BINARY_DIR}/cvc4javatests.jar")
+set(classpath "${CMAKE_CURRENT_BINARY_DIR}/javatests.jar")
set(classpath "${classpath}:${CMAKE_BINARY_DIR}/src/bindings/java/CVC4.jar")
set(classpath "${classpath}:${JUnit_JAR}:${JUnit_JAR_DEPS}")
diff --git a/test/regress/CMakeLists.txt b/test/regress/CMakeLists.txt
index 88be6b94b..4e8405d5d 100644
--- a/test/regress/CMakeLists.txt
+++ b/test/regress/CMakeLists.txt
@@ -626,6 +626,7 @@ set(regress_0_tests
regress0/quantifiers/nested-inf.smt2
regress0/quantifiers/partial-trigger.smt2
regress0/quantifiers/pure_dt_cbqi.smt2
+ regress0/quantifiers/qarray-sel-over-store.smt2
regress0/quantifiers/qbv-inequality2.smt2
regress0/quantifiers/qbv-simp.smt2
regress0/quantifiers/qbv-test-invert-bvadd-neq.smt2
@@ -796,6 +797,7 @@ set(regress_0_tests
regress0/strings/code-sat-neg-one.smt2
regress0/strings/escchar.smt2
regress0/strings/escchar_25.smt2
+ regress0/strings/hconst-092618.smt2
regress0/strings/idof-rewrites.smt2
regress0/strings/idof-sem.smt2
regress0/strings/ilc-like.smt2
@@ -825,6 +827,7 @@ set(regress_0_tests
regress0/sygus/check-generic-red.sy
regress0/sygus/const-var-test.sy
regress0/sygus/dt-no-syntax.sy
+ regress0/sygus/inv-different-var-order.sy
regress0/sygus/let-ringer.sy
regress0/sygus/let-simp.sy
regress0/sygus/no-syntax-test-bool.sy
@@ -1163,6 +1166,7 @@ set(regress_1_tests
regress1/nl/dist-big.smt2
regress1/nl/div-mod-partial.smt2
regress1/nl/dumortier_llibre_artes_ex_5_13.transcendental.k2.smt2
+ regress1/nl/exp-approx.smt2
regress1/nl/exp-4.5-lt.smt2
regress1/nl/exp1-lb.smt2
regress1/nl/exp_monotone.smt2
@@ -2074,10 +2078,16 @@ set(regression_disabled_tests
get_target_property(path_to_cvc4 cvc4-bin RUNTIME_OUTPUT_DIRECTORY)
set(run_regress_script ${CMAKE_CURRENT_LIST_DIR}/run_regression.py)
+add_custom_target(build-regress DEPENDS cvc4-bin)
+add_dependencies(check build-regress)
+if(ENABLE_COVERAGE)
+ add_dependencies(coverage build-regress)
+endif()
+
add_custom_target(regress
COMMAND
ctest --output-on-failure -L "regress[0-1]" -j${CTEST_NTHREADS} $(ARGS)
- DEPENDS cvc4-bin)
+ DEPENDS build-regress)
macro(cvc4_add_regression_test level file)
add_test(${file}
diff --git a/test/regress/Makefile.tests b/test/regress/Makefile.tests
index a1cba3b55..d6d8d1a68 100644
--- a/test/regress/Makefile.tests
+++ b/test/regress/Makefile.tests
@@ -635,6 +635,7 @@ REG0_TESTS = \
regress0/quantifiers/nested-inf.smt2 \
regress0/quantifiers/partial-trigger.smt2 \
regress0/quantifiers/pure_dt_cbqi.smt2 \
+ regress0/quantifiers/qarray-sel-over-store.smt2 \
regress0/quantifiers/qbv-inequality2.smt2 \
regress0/quantifiers/qbv-simp.smt2 \
regress0/quantifiers/qbv-test-invert-bvadd-neq.smt2 \
@@ -806,6 +807,7 @@ REG0_TESTS = \
regress0/strings/code-sat-neg-one.smt2 \
regress0/strings/escchar.smt2 \
regress0/strings/escchar_25.smt2 \
+ regress0/strings/hconst-092618.smt2 \
regress0/strings/idof-rewrites.smt2 \
regress0/strings/idof-sem.smt2 \
regress0/strings/ilc-like.smt2 \
@@ -838,6 +840,7 @@ REG0_TESTS = \
regress0/sygus/const-var-test.sy \
regress0/sygus/dt-no-syntax.sy \
regress0/sygus/hd-05-d1-prog-nogrammar.sy \
+ regress0/sygus/inv-different-var-order.sy \
regress0/sygus/let-ringer.sy \
regress0/sygus/let-simp.sy \
regress0/sygus/no-syntax-test-bool.sy \
@@ -1161,6 +1164,7 @@ REG1_TESTS = \
regress1/nl/div-mod-partial.smt2 \
regress1/nl/dumortier_llibre_artes_ex_5_13.transcendental.k2.smt2 \
regress1/nl/exp-4.5-lt.smt2 \
+ regress1/nl/exp-approx.smt2 \
regress1/nl/exp1-lb.smt2 \
regress1/nl/exp_monotone.smt2 \
regress1/nl/factor_agg_s.smt2 \
diff --git a/test/regress/regress0/quantifiers/qarray-sel-over-store.smt2 b/test/regress/regress0/quantifiers/qarray-sel-over-store.smt2
new file mode 100644
index 000000000..f7e5a3a04
--- /dev/null
+++ b/test/regress/regress0/quantifiers/qarray-sel-over-store.smt2
@@ -0,0 +1,32 @@
+; EXPECT: unsat
+(set-logic AUFBV)
+(set-info :status unsat)
+(set-info :category "crafted")
+(declare-sort Element 0)
+
+(declare-fun a1 () (Array (_ BitVec 8) Element))
+(declare-fun a2 () (Array (_ BitVec 8) Element))
+(declare-fun a3 () (Array (_ BitVec 8) Element))
+(declare-fun a4 () (Array (_ BitVec 8) Element))
+
+(declare-fun i2 () (_ BitVec 8))
+(declare-fun i3 () (_ BitVec 8))
+
+(declare-fun e1 () Element)
+(declare-fun e2 () Element)
+
+(assert (not (= e1 e2)))
+(assert (= a3 (store a1 (_ bv3 8) e1)))
+(assert (= a4 (store a2 (_ bv3 8) e1)))
+(assert (= (select a3 (_ bv2 8)) e1))
+(assert (= (select a4 (_ bv2 8)) e2))
+
+; (assert (eqrange a3 a4 (_ bv0 8) (_ bv2 8)))
+
+(assert (forall ((x (_ BitVec 8)))
+ (=> (and (bvule (_ bv0 8) x) (bvule x (_ bv2 8))) (= (select a3 x) (select a4 x)))
+ ))
+
+
+(check-sat)
+(exit)
diff --git a/test/regress/regress0/strings/hconst-092618.smt2 b/test/regress/regress0/strings/hconst-092618.smt2
new file mode 100644
index 000000000..c7c5206bd
--- /dev/null
+++ b/test/regress/regress0/strings/hconst-092618.smt2
@@ -0,0 +1,5 @@
+(set-logic ALL)
+(set-info :status sat)
+(declare-fun x () String)
+(assert (str.contains "::" (str.++ x ":" x ":")))
+(check-sat) \ No newline at end of file
diff --git a/test/regress/regress0/sygus/inv-different-var-order.sy b/test/regress/regress0/sygus/inv-different-var-order.sy
new file mode 100644
index 000000000..c3f43fc07
--- /dev/null
+++ b/test/regress/regress0/sygus/inv-different-var-order.sy
@@ -0,0 +1,22 @@
+; COMMAND-LINE: --sygus-out=status
+;EXPECT: unsat
+(set-logic LIA)
+(synth-inv inv-f ((x Int) (y Int) (b Bool)))
+(declare-primed-var b Bool)
+(declare-primed-var x Int)
+(declare-primed-var y Int)
+(define-fun pre-f ((x Int) (y Int) (b Bool)) Bool
+(and
+(and (>= x 5) (<= x 9))
+(and (>= y 1) (<= y 3))
+)
+)
+(define-fun trans-f ((x Int) (y Int) (b Bool) (x! Int) (y! Int) (b! Bool)) Bool
+(and
+(and (= b! b) (= y! x))
+(ite b (= x! (+ x 10)) (= x! (+ x 12)))
+)
+)
+(define-fun post-f ((x Int) (y Int) (b Bool)) Bool true)
+(inv-constraint inv-f pre-f trans-f post-f)
+(check-synth) \ No newline at end of file
diff --git a/test/regress/regress1/nl/exp-approx.smt2 b/test/regress/regress1/nl/exp-approx.smt2
new file mode 100644
index 000000000..f3faceda5
--- /dev/null
+++ b/test/regress/regress1/nl/exp-approx.smt2
@@ -0,0 +1,15 @@
+; COMMAND-LINE: --no-check-models
+; EXPECT: sat
+(set-logic QF_NRAT)
+(set-info :status sat)
+
+(assert (and (<= 3.0 (exp 1.1)) (<= (exp 1.1) 3.1)))
+(assert (and (<= 8.1 (exp 2.1)) (<= (exp 2.1) 8.2)))
+(assert (and (<= 22.1 (exp 3.1)) (<= (exp 3.1) 22.2)))
+(assert (and (<= 60.3 (exp 4.1)) (<= (exp 4.1) 60.4)))
+(assert (and (<= 164.0 (exp 5.1)) (<= (exp 5.1) 164.1)))
+
+; this takes ~10 seconds in production
+;(assert (and (<= 536190464.4 (exp 20.1)) (<= (exp 20.1) 536190464.5)))
+
+(check-sat)
diff --git a/test/system/CMakeLists.txt b/test/system/CMakeLists.txt
index 1832217c3..36e6c4a37 100644
--- a/test/system/CMakeLists.txt
+++ b/test/system/CMakeLists.txt
@@ -7,9 +7,15 @@ include_directories(${CMAKE_BINARY_DIR}/src)
# Add target 'systemtests', builds and runs
# > system tests
+add_custom_target(build-systemtests)
+add_dependencies(check build-systemtests)
+if(ENABLE_COVERAGE)
+ add_dependencies(coverage build-systemtests)
+endif()
+
add_custom_target(systemtests
COMMAND ctest --output-on-failure -L "system" -j${CTEST_NTHREADS} $(ARGS)
- DEPENDS main-test)
+ DEPENDS build-systemtests)
set(CVC4_SYSTEM_TEST_FLAGS
-D__BUILDING_CVC4_SYSTEM_TEST -D__STDC_LIMIT_MACROS -D__STDC_FORMAT_MACROS)
@@ -20,10 +26,7 @@ macro(cvc4_add_system_test name)
target_compile_definitions(${name} PRIVATE ${CVC4_SYSTEM_TEST_FLAGS})
add_test(system/${name} ${CMAKE_CURRENT_BINARY_DIR}/${name})
set_tests_properties(system/${name} PROPERTIES LABELS "system")
- add_dependencies(systemtests ${name})
- if(ENABLE_COVERAGE)
- add_dependencies(coverage ${name})
- endif()
+ add_dependencies(build-systemtests ${name})
endmacro()
cvc4_add_system_test(boilerplate)
diff --git a/test/unit/CMakeLists.txt b/test/unit/CMakeLists.txt
index 3d1007929..d362870c1 100644
--- a/test/unit/CMakeLists.txt
+++ b/test/unit/CMakeLists.txt
@@ -1,3 +1,9 @@
+# Note: We use our custom CxxTest finder here to specify custom directories and
+# fail if it is not found in the specified directory (similar to the other
+# custom finder modules).
+set(CxxTest_HOME ${CXXTEST_DIR})
+find_package(CxxTest REQUIRED)
+
include_directories(.)
include_directories(${PROJECT_SOURCE_DIR}/src)
include_directories(${PROJECT_SOURCE_DIR}/src/include)
@@ -7,8 +13,15 @@ include_directories(${CMAKE_BINARY_DIR}/src)
# Add target 'units', builds and runs
# > unit tests
+add_custom_target(build-units)
+add_dependencies(check build-units)
+if(ENABLE_COVERAGE)
+ add_dependencies(coverage build-units)
+endif()
+
add_custom_target(units
- COMMAND ctest --output-on-failure -L "unit" -j${CTEST_NTHREADS} $(ARGS))
+ COMMAND ctest --output-on-failure -L "unit" -j${CTEST_NTHREADS} $(ARGS)
+ DEPENDS build-units)
set(CVC4_CXXTEST_FLAGS_BLACK
-D__BUILDING_CVC4LIB_UNIT_TEST -D__BUILDING_CVC4PARSERLIB_UNIT_TEST
@@ -26,9 +39,9 @@ macro(cvc4_add_unit_test is_white name output_dir)
OUTPUT ${test_src}
DEPENDS ${test_header}
COMMAND
- ${CXXTEST_TESTGEN_INTERPRETER}
- ${CXXTEST_TESTGEN_EXECUTABLE}
- ${CXXTEST_TESTGEN_ARGS} -o ${test_src} ${test_header}
+ ${CxxTest_TESTGEN_INTERPRETER}
+ ${CxxTest_TESTGEN_EXECUTABLE}
+ ${CxxTest_TESTGEN_ARGS} -o ${test_src} ${test_header}
)
set_source_files_properties(${test_src} PROPERTIES GENERATED true)
# The build target is created without the path prefix (not supported),
@@ -46,7 +59,7 @@ macro(cvc4_add_unit_test is_white name output_dir)
# Disable the Wsuggest-override warnings for the unit tests. CxxTest generates
# code that does not properly add the override keyword to runTest().
target_compile_options(${name} PRIVATE -Wno-suggest-override)
- add_dependencies(units ${name})
+ add_dependencies(build-units ${name})
# Generate into bin/test/unit/<output_dir>.
set(test_bin_dir ${CMAKE_BINARY_DIR}/bin/test/unit/${output_dir})
set_target_properties(${name}
@@ -62,9 +75,6 @@ macro(cvc4_add_unit_test is_white name output_dir)
endif()
add_test(${test_name} ${test_bin_dir}/${name})
set_tests_properties(${test_name} PROPERTIES LABELS "unit")
- if(ENABLE_COVERAGE)
- add_dependencies(coverage ${name})
- endif()
endmacro()
macro(cvc4_add_unit_test_black name output_dir)
generated by cgit on debian on lair
contact matthew@masot.net with questions or feedback