summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGereon Kremer <nafur42@gmail.com>2021-10-27 16:05:02 -0700
committerGitHub <noreply@github.com>2021-10-27 16:05:02 -0700
commit75f92b54a63f80aabf6591e9033f28c62d9ed030 (patch)
tree893c423b4f38b6b2a57c6fd386be8e7f702b17df
parent2519a0ba0491b8500799b56caf952a15bf2d0409 (diff)
parent95685c06c1c3983bc50a5cf4b4196fc1c5ae2247 (diff)
Merge branch 'master' into issue7504issue7504
-rw-r--r--.github/workflows/docs_upload.yml18
-rw-r--r--cmake/FindANTLR3.cmake23
-rw-r--r--cmake/FindGMP.cmake6
-rw-r--r--cmake/FindPoly.cmake17
-rwxr-xr-xcmake/deps-utils/Poly-windows-patch.sh14
-rw-r--r--docs/api/python/python.rst3
-rw-r--r--docs/api/python/sort.rst6
-rw-r--r--src/CMakeLists.txt61
-rw-r--r--src/api/cpp/cvc5.cpp6
-rw-r--r--src/api/cpp/cvc5.h24
-rw-r--r--src/api/java/io/github/cvc5/api/Solver.java15
-rw-r--r--src/api/java/jni/option_Info.cpp345
-rw-r--r--src/api/java/jni/solver.cpp19
-rw-r--r--src/api/java/jni/sort.cpp20
-rw-r--r--src/api/python/cvc5.pxd2
-rw-r--r--src/api/python/cvc5.pxi308
-rw-r--r--src/base/configuration.cpp6
-rw-r--r--src/expr/skolem_manager.cpp2
-rw-r--r--src/expr/skolem_manager.h16
-rw-r--r--src/options/main_options.toml1
-rw-r--r--src/options/mkoptions.py3
-rw-r--r--src/options/quantifiers_options.toml18
-rw-r--r--src/parser/CMakeLists.txt10
-rw-r--r--src/preprocessing/assertion_pipeline.cpp2
-rw-r--r--src/proof/alethe/alethe_post_processor.cpp505
-rw-r--r--src/proof/lazy_proof.cpp7
-rw-r--r--src/proof/lazy_proof.h6
-rw-r--r--src/proof/proof_node_algorithm.cpp88
-rw-r--r--src/proof/proof_node_algorithm.h28
-rw-r--r--src/prop/sat_proof_manager.cpp2
-rw-r--r--src/smt/assertions.cpp27
-rw-r--r--src/smt/assertions.h13
-rw-r--r--src/smt/process_assertions.cpp2
-rw-r--r--src/smt/proof_post_processor.cpp34
-rw-r--r--src/smt/set_defaults.cpp37
-rw-r--r--src/theory/bags/bag_solver.cpp41
-rw-r--r--src/theory/bags/bag_solver.h17
-rw-r--r--src/theory/bags/bags_rewriter.cpp3
-rw-r--r--src/theory/bags/inference_generator.cpp175
-rw-r--r--src/theory/bags/inference_generator.h53
-rw-r--r--src/theory/bags/normal_form.cpp2
-rw-r--r--src/theory/bags/theory_bags.cpp2
-rw-r--r--src/theory/booleans/proof_checker.cpp136
-rw-r--r--src/theory/booleans/theory_bool.cpp20
-rw-r--r--src/theory/booleans/theory_bool.h2
-rw-r--r--src/theory/inference_id.cpp29
-rw-r--r--src/theory/inference_id.h32
-rw-r--r--src/theory/quantifiers/expr_miner.cpp9
-rw-r--r--src/theory/quantifiers/expr_miner_manager.cpp71
-rw-r--r--src/theory/quantifiers/expr_miner_manager.h23
-rw-r--r--src/theory/quantifiers/extended_rewrite.cpp46
-rw-r--r--src/theory/quantifiers/extended_rewrite.h3
-rw-r--r--src/theory/quantifiers/query_generator_unsat.cpp178
-rw-r--r--src/theory/quantifiers/query_generator_unsat.h89
-rw-r--r--src/theory/quantifiers/sygus/synth_conjecture.cpp27
-rw-r--r--src/theory/sets/theory_sets_rels.cpp71
-rw-r--r--src/theory/strings/arith_entail.cpp115
-rw-r--r--src/theory/strings/arith_entail.h18
-rw-r--r--src/theory/strings/core_solver.cpp11
-rw-r--r--src/theory/strings/eager_solver.cpp175
-rw-r--r--src/theory/strings/eager_solver.h23
-rw-r--r--src/theory/strings/eqc_info.cpp58
-rw-r--r--src/theory/strings/eqc_info.h25
-rw-r--r--src/theory/strings/regexp_elim.cpp33
-rw-r--r--src/theory/strings/sequences_rewriter.cpp45
-rw-r--r--src/theory/strings/solver_state.cpp13
-rw-r--r--src/theory/strings/solver_state.h6
-rw-r--r--src/theory/strings/theory_strings.cpp8
-rw-r--r--src/theory/strings/theory_strings.h8
-rw-r--r--src/theory/theory_engine.cpp85
-rw-r--r--src/theory/theory_model_builder.cpp8
-rw-r--r--src/theory/uf/eq_proof.cpp2
-rw-r--r--src/theory/uf/theory_uf.cpp42
-rw-r--r--test/api/CMakeLists.txt1
-rw-r--r--test/api/proj-issue306.cpp21
-rw-r--r--test/python/unit/api/test_solver.py11
-rw-r--r--test/regress/CMakeLists.txt28
-rw-r--r--test/regress/regress0/cores/issue5234-uc-ua.smt212
-rw-r--r--test/regress/regress0/ho/issue5741-1-cg-model.smt218
-rw-r--r--test/regress/regress0/ho/issue5741-3-cg-model.smt25
-rw-r--r--test/regress/regress0/ho/issue5744-cg-model.smt27
-rw-r--r--test/regress/regress0/ho/qgu-fuzz-ho-1-dd.smt26
-rw-r--r--test/regress/regress0/options/version.smt23
-rw-r--r--test/regress/regress0/proofs/project-issue317-inc-sat-conflictlit.smt27
-rw-r--r--test/regress/regress0/proofs/qgu-fuzz-3-chainres-checking.smt27
-rw-r--r--test/regress/regress0/proofs/qgu-fuzz-4-bool-chainres-postprocessing-singleton.smt27
-rw-r--r--test/regress/regress0/push-pop/issue7479-global-decls.smt28
-rw-r--r--test/regress/regress0/rels/qgu-fuzz-relations-1-dd.smt25
-rw-r--r--test/regress/regress0/rels/qgu-fuzz-relations-1.smt28
-rw-r--r--test/regress/regress0/strings/proj-issue316-regexp-ite.smt210
-rw-r--r--test/regress/regress0/unconstrained/issue4656-bool-term-vars.smt212
-rw-r--r--test/regress/regress1/bags/fuzzy1.smt210
-rw-r--r--test/regress/regress1/bags/fuzzy2.smt215
-rw-r--r--test/regress/regress1/bags/map1.smt2 (renamed from test/regress/regress1/bags/map.smt2)0
-rw-r--r--test/regress/regress1/bags/map2.smt29
-rw-r--r--test/regress/regress1/bags/map3.smt210
-rw-r--r--test/regress/regress1/ho/issue4758.smt26
-rw-r--r--test/regress/regress1/ho/issue5078-small.smt28
-rw-r--r--test/regress/regress1/ho/issue5201-1.smt220
-rw-r--r--test/regress/regress1/proofs/qgu-fuzz-arrays-1-dd-te-auto.smt27
-rw-r--r--test/regress/regress1/push-pop/arith_lra_01.smt22
-rw-r--r--test/regress/regress1/push-pop/fuzz_3_6.smt22
-rw-r--r--test/regress/regress1/quantifiers/issue7385-sygus-inst-i.smt29
-rw-r--r--test/regress/regress1/quantifiers/symmetric_unsat_7.smt22
-rw-r--r--test/regress/regress1/strings/issue6766-re-elim-bv.smt29
-rw-r--r--test/regress/regress1/strings/non-terminating-rewrite-aent.smt211
-rw-r--r--test/regress/regress1/strings/pattern1.smt273
-rw-r--r--test/regress/regress2/sygus/qgu-bools.sy21
-rwxr-xr-xtest/regress/run_regression.py4
-rw-r--r--test/unit/api/java/SolverTest.java24
-rw-r--r--test/unit/theory/sequences_rewriter_white.cpp11
111 files changed, 2873 insertions, 954 deletions
diff --git a/.github/workflows/docs_upload.yml b/.github/workflows/docs_upload.yml
index 7bdd186a1..9aadd6647 100644
--- a/.github/workflows/docs_upload.yml
+++ b/.github/workflows/docs_upload.yml
@@ -100,3 +100,21 @@ jobs:
else
echo "Ignored run"
fi
+
+ - name: Update docs for release
+ continue-on-error: true
+ env:
+ SSH_AUTH_SOCK: /tmp/ssh_agent.sock
+ run: |
+ if [ "$ISTAG" ]; then
+ git clone git@github.com:cvc5/docs-releases.git target-releases/
+ mv docs-new target-releases/$NAME
+ cd target-releases/
+
+ git add docs-$NAME
+
+ git commit -m "Update docs for $NAME"
+ git push
+ else
+ echo "Ignored run"
+ fi
diff --git a/cmake/FindANTLR3.cmake b/cmake/FindANTLR3.cmake
index 772f5f43e..e389e15f0 100644
--- a/cmake/FindANTLR3.cmake
+++ b/cmake/FindANTLR3.cmake
@@ -133,8 +133,7 @@ if(NOT ANTLR3_FOUND_SYSTEM)
set(ANTLR3_JAR "${DEPS_BASE}/share/java/antlr-3.4-complete.jar")
set(ANTLR3_INCLUDE_DIR "${DEPS_BASE}/include/")
- set(ANTLR3_LIBRARIES "${DEPS_BASE}/${CMAKE_INSTALL_LIBDIR}/libantlr3c${CMAKE_SHARED_LIBRARY_SUFFIX}")
- set(ANTLR3_STATIC_LIBRARIES "${DEPS_BASE}/${CMAKE_INSTALL_LIBDIR}/libantlr3c.a")
+ set(ANTLR3_LIBRARIES "${DEPS_BASE}/${CMAKE_INSTALL_LIBDIR}/libantlr3c.a")
endif()
find_package(Java COMPONENTS Runtime REQUIRED)
@@ -146,21 +145,13 @@ set(ANTLR3_FOUND TRUE)
set(ANTLR3_COMMAND ${Java_JAVA_EXECUTABLE} -cp "${ANTLR3_JAR}" org.antlr.Tool
CACHE STRING "run ANTLR3" FORCE)
-add_library(ANTLR3_SHARED SHARED IMPORTED GLOBAL)
-set_target_properties(ANTLR3_SHARED PROPERTIES
+add_library(ANTLR3 STATIC IMPORTED GLOBAL)
+set_target_properties(ANTLR3 PROPERTIES
IMPORTED_LOCATION "${ANTLR3_LIBRARIES}"
INTERFACE_INCLUDE_DIRECTORIES "${ANTLR3_INCLUDE_DIR}"
)
if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
- set_target_properties(ANTLR3_SHARED PROPERTIES IMPORTED_IMPLIB "${ANTLR3_LIBRARIES}")
-endif()
-
-if(ENABLE_STATIC_LIBRARY)
- add_library(ANTLR3_STATIC STATIC IMPORTED GLOBAL)
- set_target_properties(ANTLR3_STATIC PROPERTIES
- IMPORTED_LOCATION "${ANTLR3_STATIC_LIBRARIES}"
- INTERFACE_INCLUDE_DIRECTORIES "${ANTLR3_INCLUDE_DIR}"
- )
+ set_target_properties(ANTLR3 PROPERTIES IMPORTED_IMPLIB "${ANTLR3_LIBRARIES}")
endif()
mark_as_advanced(ANTLR3_BINARY)
@@ -170,7 +161,6 @@ mark_as_advanced(ANTLR3_FOUND_SYSTEM)
mark_as_advanced(ANTLR3_INCLUDE_DIR)
mark_as_advanced(ANTLR3_JAR)
mark_as_advanced(ANTLR3_LIBRARIES)
-mark_as_advanced(ANTLR3_STATIC_LIBRARIES)
if(ANTLR3_FOUND_SYSTEM)
message(STATUS "Found ANTLR3 runtime: ${ANTLR3_LIBRARIES}")
@@ -178,8 +168,5 @@ if(ANTLR3_FOUND_SYSTEM)
else()
message(STATUS "Building ANTLR3 runtime: ${ANTLR3_LIBRARIES}")
message(STATUS "Downloading ANTLR3 JAR: ${ANTLR3_JAR}")
- add_dependencies(ANTLR3_SHARED ANTLR3-EP-runtime ANTLR3-EP-jar)
- if(ENABLE_STATIC_LIBRARY)
- add_dependencies(ANTLR3_STATIC ANTLR3-EP-runtime ANTLR3-EP-jar)
- endif()
+ add_dependencies(ANTLR3 ANTLR3-EP-runtime ANTLR3-EP-jar)
endif()
diff --git a/cmake/FindGMP.cmake b/cmake/FindGMP.cmake
index 6dad15954..5a849153b 100644
--- a/cmake/FindGMP.cmake
+++ b/cmake/FindGMP.cmake
@@ -76,7 +76,7 @@ if(NOT GMP_FOUND_SYSTEM)
<SOURCE_DIR>/configure --enable-shared --disable-static
--prefix=<INSTALL_DIR>/gmp-shared
--enable-cxx --with-pic --host=${TOOLCHAIN_PREFIX}
- BUILD_BYPRODUCTS <INSTALL_DIR>/lib/libgmp${CMAKE_SHARED_LIBRARY_SUFFIX}
+ BUILD_BYPRODUCTS <INSTALL_DIR>/gmp-shared/lib/libgmp.dll.a
)
ExternalProject_Add(
GMP-EP-static
@@ -88,14 +88,14 @@ if(NOT GMP_FOUND_SYSTEM)
<SOURCE_DIR>/configure --disable-shared --enable-static
--prefix=<INSTALL_DIR>/gmp-static
--enable-cxx --with-pic --host=${TOOLCHAIN_PREFIX}
- BUILD_BYPRODUCTS <INSTALL_DIR>/lib/libgmp.a
+ BUILD_BYPRODUCTS <INSTALL_DIR>/gmp-static/lib/libgmp.a
)
add_custom_target(GMP-EP DEPENDS GMP-EP-shared GMP-EP-static)
set(GMP_INCLUDE_DIR "${DEPS_BASE}/gmp-shared/include/")
set(GMP_STATIC_INCLUDE_DIR "${DEPS_BASE}/gmp-static/include/")
- set(GMP_LIBRARIES "${DEPS_BASE}/gmp-shared/bin/libgmp-10${CMAKE_SHARED_LIBRARY_SUFFIX}")
+ set(GMP_LIBRARIES "${DEPS_BASE}/gmp-shared/lib/libgmp.dll.a")
set(GMP_STATIC_LIBRARIES "${DEPS_BASE}/gmp-static/lib/libgmp.a")
file(MAKE_DIRECTORY "${GMP_INCLUDE_DIR}")
diff --git a/cmake/FindPoly.cmake b/cmake/FindPoly.cmake
index edba3663e..f363147cc 100644
--- a/cmake/FindPoly.cmake
+++ b/cmake/FindPoly.cmake
@@ -59,21 +59,8 @@ if(NOT Poly_FOUND_SYSTEM)
check_if_cross_compiling(CCWIN "Windows" "")
if(CCWIN)
- # Roughly following https://stackoverflow.com/a/44383330/2375725
- set(patchcmd
- # Avoid %z and %llu format specifiers
- COMMAND find <SOURCE_DIR>/ -type f ! -name "*.orig" -exec
- sed -i.orig "s/%z[diu]/%\\\" PRIu64 \\\"/g" {} +
- COMMAND find <SOURCE_DIR>/ -type f ! -name "*.orig" -exec
- sed -i.orig "s/%ll[du]/%\\\" PRIu64 \\\"/g" {} +
- # Make sure the new macros are available
- COMMAND find <SOURCE_DIR>/ -type f ! -name "*.orig" -exec
- sed -i.orig "s/#include <stdio.h>/#include <stdio.h>\\\\n#include <inttypes.h>/" {} +
- COMMAND find <SOURCE_DIR>/ -type f ! -name "*.orig" -exec
- sed -i.orig "s/#include <cstdio>/#include <cstdio>\\\\n#include <inttypes.h>/" {} +
- # Help with finding GMP
- COMMAND sed -i.orig "s/find_library(GMP_LIBRARY gmp)/find_library(GMP_LIBRARY gmp gmp-10)/"
- <SOURCE_DIR>/CMakeLists.txt
+ set(patchcmd COMMAND
+ ${CMAKE_SOURCE_DIR}/cmake/deps-utils/Poly-windows-patch.sh <SOURCE_DIR>
)
else()
unset(patchcmd)
diff --git a/cmake/deps-utils/Poly-windows-patch.sh b/cmake/deps-utils/Poly-windows-patch.sh
new file mode 100755
index 000000000..3b613bc0f
--- /dev/null
+++ b/cmake/deps-utils/Poly-windows-patch.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+# Roughly following https://stackoverflow.com/a/44383330/2375725
+# Avoid %z and %llu format specifiers
+find $1/ -type f ! -name "*.orig" -exec \
+ sed -i.orig "s/%z[diu]/%\\\" PRIu64 \\\"/g" {} +
+find $1/ -type f ! -name "*.orig" -exec \
+ sed -i.orig "s/%ll[du]/%\\\" PRIu64 \\\"/g" {} +
+
+# Make sure the new macros are available
+find $1/ -type f ! -name "*.orig" -exec \
+ sed -i.orig "s/#include <stdio.h>/#include <stdio.h>\\n#include <inttypes.h>/" {} +
+find $1/ -type f ! -name "*.orig" -exec \
+ sed -i.orig "s/#include <cstdio>/#include <cstdio>\\n#include <inttypes.h>/" {} + \ No newline at end of file
diff --git a/docs/api/python/python.rst b/docs/api/python/python.rst
index a6aca2cf9..f6258af2d 100644
--- a/docs/api/python/python.rst
+++ b/docs/api/python/python.rst
@@ -13,7 +13,6 @@ Python API Documentation
:maxdepth: 1
quickstart
- solver
datatype
datatypeconstructor
datatypeconstructordecl
@@ -23,4 +22,6 @@ Python API Documentation
op
result
roundingmode
+ solver
+ sort
unknownexplanation
diff --git a/docs/api/python/sort.rst b/docs/api/python/sort.rst
new file mode 100644
index 000000000..270113e0c
--- /dev/null
+++ b/docs/api/python/sort.rst
@@ -0,0 +1,6 @@
+Sort
+================
+
+.. autoclass:: pycvc5.Sort
+ :members:
+ :undoc-members:
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 5bde94a5d..2cb3457e1 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -887,6 +887,8 @@ libcvc5_add_sources(
theory/quantifiers/quantifiers_statistics.h
theory/quantifiers/query_generator.cpp
theory/quantifiers/query_generator.h
+ theory/quantifiers/query_generator_unsat.cpp
+ theory/quantifiers/query_generator_unsat.h
theory/quantifiers/relevant_domain.cpp
theory/quantifiers/relevant_domain.h
theory/quantifiers/single_inv_partition.cpp
@@ -1242,36 +1244,43 @@ if (BUILD_DOCS)
)
endif()
+# The mkoptions.py script only updates its output files if their content would
+# actually change. This mechanism makes sure that running the script does not
+# automatically trigger a full rebuild, but only a rebuild of those parts that
+# include files that did actually change.
+# This approach also means that the output files (which gen-options used to
+# depend on) may not actually be updated and thus cmake would keep re-running
+# mkoptions.py over and over again.
+# We thus have an artificial options.stamp file that mkoptions.py always writes
+# to, and can thus be used to communicate that all options files have been
+# properly updated.
add_custom_command(
- OUTPUT
- ${options_gen_cpp_files} ${options_gen_h_files}
- ${options_gen_doc_files}
- COMMAND
- ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/options
- COMMAND
- ${PYTHON_EXECUTABLE}
- ${CMAKE_CURRENT_LIST_DIR}/options/mkoptions.py
- ${CMAKE_CURRENT_LIST_DIR}
- ${CMAKE_BINARY_DIR}
- ${CMAKE_CURRENT_BINARY_DIR}
- ${abs_toml_files}
- DEPENDS
- options/mkoptions.py
- ${options_toml_files}
- main/options_template.cpp
- options/module_template.h
- options/module_template.cpp
- options/options_public_template.cpp
- options/options_template.h
- options/options_template.cpp
-)
-
-add_custom_target(gen-options
+ OUTPUT
+ options/options.stamp
+ COMMAND
+ ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/options
+ COMMAND
+ ${PYTHON_EXECUTABLE}
+ ${CMAKE_CURRENT_LIST_DIR}/options/mkoptions.py
+ ${CMAKE_CURRENT_LIST_DIR}
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_CURRENT_BINARY_DIR}
+ ${abs_toml_files}
+ BYPRODUCTS
+ ${options_gen_cpp_files} ${options_gen_h_files} ${options_gen_doc_files}
DEPENDS
- ${options_gen_cpp_files}
- ${options_gen_h_files}
+ options/mkoptions.py
+ ${options_toml_files}
+ main/options_template.cpp
+ options/module_template.h
+ options/module_template.cpp
+ options/options_public_template.cpp
+ options/options_template.h
+ options/options_template.cpp
)
+add_custom_target(gen-options DEPENDS options/options.stamp)
+
#-----------------------------------------------------------------------------#
diff --git a/src/api/cpp/cvc5.cpp b/src/api/cpp/cvc5.cpp
index 280b07bb2..5e38096c8 100644
--- a/src/api/cpp/cvc5.cpp
+++ b/src/api/cpp/cvc5.cpp
@@ -6054,16 +6054,16 @@ Term Solver::mkFloatingPoint(uint32_t exp, uint32_t sig, Term val) const
CVC5_API_TRY_CATCH_END;
}
-Term Solver::mkCardinalityConstraint(const Sort& sort, uint32_t ubound) const
+Term Solver::mkCardinalityConstraint(const Sort& sort, uint32_t upperBound) const
{
CVC5_API_TRY_CATCH_BEGIN;
CVC5_API_SOLVER_CHECK_SORT(sort);
CVC5_API_ARG_CHECK_EXPECTED(sort.isUninterpretedSort(), sort)
<< "an uninterpreted sort";
- CVC5_API_ARG_CHECK_EXPECTED(ubound > 0, ubound) << "a value > 0";
+ CVC5_API_ARG_CHECK_EXPECTED(upperBound > 0, upperBound) << "a value > 0";
//////// all checks before this line
Node cco =
- d_nodeMgr->mkConst(cvc5::CardinalityConstraint(*sort.d_type, ubound));
+ d_nodeMgr->mkConst(cvc5::CardinalityConstraint(*sort.d_type, upperBound));
Node cc = d_nodeMgr->mkNode(cvc5::Kind::CARDINALITY_CONSTRAINT, cco);
return Term(this, cc);
////////
diff --git a/src/api/cpp/cvc5.h b/src/api/cpp/cvc5.h
index 57c3f311d..f0a36b792 100644
--- a/src/api/cpp/cvc5.h
+++ b/src/api/cpp/cvc5.h
@@ -354,19 +354,19 @@ class CVC5_EXPORT Sort
/**
* Is this a Boolean sort?
- * @return true if the sort is a Boolean sort
+ * @return true if the sort is the Boolean sort
*/
bool isBoolean() const;
/**
* Is this a integer sort?
- * @return true if the sort is a integer sort
+ * @return true if the sort is the integer sort
*/
bool isInteger() const;
/**
* Is this a real sort?
- * @return true if the sort is a real sort
+ * @return true if the sort is the real sort
*/
bool isReal() const;
@@ -462,7 +462,7 @@ class CVC5_EXPORT Sort
/**
* Is this an array sort?
- * @return true if the sort is a array sort
+ * @return true if the sort is an array sort
*/
bool isArray() const;
@@ -485,8 +485,8 @@ class CVC5_EXPORT Sort
bool isSequence() const;
/**
- * Is this a sort kind?
- * @return true if this is a sort kind
+ * Is this an uninterpreted sort?
+ * @return true if this is an uninterpreted sort
*/
bool isUninterpretedSort() const;
@@ -499,9 +499,9 @@ class CVC5_EXPORT Sort
/**
* Is this a first-class sort?
* First-class sorts are sorts for which:
- * (1) we handle equalities between terms of that type, and
- * (2) they are allowed to be parameters of parametric sorts (e.g. index or
- * element sorts of arrays).
+ * 1. we handle equalities between terms of that type, and
+ * 2. they are allowed to be parameters of parametric sorts (e.g. index or
+ * element sorts of arrays).
*
* Examples of sorts that are not first-class include sort constructor sorts
* and regular expression sorts.
@@ -641,7 +641,7 @@ class CVC5_EXPORT Sort
Sort getArrayIndexSort() const;
/**
- * @return the array element sort of an array element sort
+ * @return the array element sort of an array sort
*/
Sort getArrayElementSort() const;
@@ -3600,10 +3600,10 @@ class CVC5_EXPORT Solver
/**
* Create a cardinality constraint for an uninterpreted sort.
* @param sort the sort the cardinality constraint is for
- * @param val the upper bound on the cardinality of the sort
+ * @param upperBound the upper bound on the cardinality of the sort
* @return the cardinality constraint
*/
- Term mkCardinalityConstraint(const Sort& sort, uint32_t ubound) const;
+ Term mkCardinalityConstraint(const Sort& sort, uint32_t upperBound) const;
/* .................................................................... */
/* Create Variables */
diff --git a/src/api/java/io/github/cvc5/api/Solver.java b/src/api/java/io/github/cvc5/api/Solver.java
index 670ab7cbd..3a9f450a4 100644
--- a/src/api/java/io/github/cvc5/api/Solver.java
+++ b/src/api/java/io/github/cvc5/api/Solver.java
@@ -1184,6 +1184,21 @@ public class Solver implements IPointer
private native long mkFloatingPoint(long pointer, int exp, int sig, long valPointer);
+ /**
+ * Create a cardinality constraint for an uninterpreted sort.
+ * @param sort the sort the cardinality constraint is for
+ * @param upperBound the upper bound on the cardinality of the sort
+ * @return the cardinality constraint
+ */
+ public Term mkCardinalityConstraint(Sort sort, int upperBound) throws CVC5ApiException
+ {
+ Utils.validateUnsigned(upperBound, "upperBound");
+ long termPointer = mkCardinalityConstraint(pointer, sort.getPointer(), upperBound);
+ return new Term(this, termPointer);
+ }
+
+ private native long mkCardinalityConstraint(long pointer, long sortPointer, int upperBound);
+
/* .................................................................... */
/* Create Variables */
/* .................................................................... */
diff --git a/src/api/java/jni/option_Info.cpp b/src/api/java/jni/option_Info.cpp
deleted file mode 100644
index 4c6025357..000000000
--- a/src/api/java/jni/option_Info.cpp
+++ /dev/null
@@ -1,345 +0,0 @@
-/******************************************************************************
- * Top contributors (to current version):
- * Mudathir Mohamed
- *
- * This file is part of the cvc5 project.
- *
- * Copyright (c) 2009-2021 by the authors listed in the file AUTHORS
- * in the top-level source directory and their institutional affiliations.
- * All rights reserved. See the file COPYING in the top-level source
- * directory for licensing information.
- * ****************************************************************************
- *
- * The cvc5 Java API.
- */
-
-#include "api/cpp/cvc5.h"
-#include "api_utilities.h"
-#include "io_github_cvc5_api_OptionInfo.h"
-
-using namespace cvc5::api;
-
-/*
- * Class: io_github_cvc5_api_OptionInfo
- * Method: deletePointer
- * Signature: (J)V
- */
-JNIEXPORT void JNICALL
-Java_io_github_cvc5_api_OptionInfo_deletePointer(JNIEnv*, jclass, jlong pointer)
-{
- delete reinterpret_cast<OptionInfo*>(pointer);
-}
-
-/*
- * Class: io_github_cvc5_api_OptionInfo
- * Method: toString
- * Signature: (J)Ljava/lang/String;
- */
-JNIEXPORT jstring JNICALL
-Java_io_github_cvc5_api_OptionInfo_toString(JNIEnv* env, jobject, jlong pointer)
-{
- CVC5_JAVA_API_TRY_CATCH_BEGIN;
- OptionInfo* current = reinterpret_cast<OptionInfo*>(pointer);
- std::stringstream ss;
- ss << *current;
- return env->NewStringUTF(ss.str().c_str());
- CVC5_JAVA_API_TRY_CATCH_END_RETURN(env, nullptr);
-}
-
-/*
- * Class: io_github_cvc5_api_OptionInfo
- * Method: getName
- * Signature: (J)Ljava/lang/String;
- */
-JNIEXPORT jstring JNICALL
-Java_io_github_cvc5_api_OptionInfo_getName(JNIEnv* env, jobject, jlong pointer)
-{
- CVC5_JAVA_API_TRY_CATCH_BEGIN;
- OptionInfo* current = reinterpret_cast<OptionInfo*>(pointer);
- return env->NewStringUTF(current->name.c_str());
- CVC5_JAVA_API_TRY_CATCH_END_RETURN(env, nullptr);
-}
-
-/*
- * Class: io_github_cvc5_api_OptionInfo
- * Method: getAliases
- * Signature: (J)[Ljava/lang/String;
- */
-JNIEXPORT jobjectArray JNICALL Java_io_github_cvc5_api_OptionInfo_getAliases(
- JNIEnv* env, jobject, jlong pointer)
-{
- CVC5_JAVA_API_TRY_CATCH_BEGIN;
- OptionInfo* current = reinterpret_cast<OptionInfo*>(pointer);
- jobjectArray ret = getStringArrayFromStringVector(env, current->aliases);
- return ret;
- CVC5_JAVA_API_TRY_CATCH_END_RETURN(env, nullptr);
-}
-
-/*
- * Class: io_github_cvc5_api_OptionInfo
- * Method: getSetByUser
- * Signature: (J)Z
- */
-JNIEXPORT jboolean JNICALL Java_io_github_cvc5_api_OptionInfo_getSetByUser(
- JNIEnv* env, jobject, jlong pointer)
-{
- CVC5_JAVA_API_TRY_CATCH_BEGIN;
- OptionInfo* current = reinterpret_cast<OptionInfo*>(pointer);
- return static_cast<jboolean>(current->setByUser);
- CVC5_JAVA_API_TRY_CATCH_END_RETURN(env, static_cast<jboolean>(false));
-}
-
-/**
- * Convert OptionInfo::NumberInfo cpp object to OptionInfo.NumberInfo java
- * object
- * @tparam T cpp integer types int64_t, uint64_t, etc
- * @param env jni environment
- * @param optionInfo a java object for this OptionInfo
- * @param numberInfoClass the java class for OptionInfo.NumberInfo
- * @param methodId a constructor for OptionInfo.NumberInfo
- * @param info the cpp OptionInfo::NumberInfo object
- * @return a java object of class OptionInfo.NumberInfo<BigInteger>
- */
-template <typename T>
-jobject getNumberInfoFromInteger(JNIEnv* env,
- const _jobject* optionInfo,
- jclass numberInfoClass,
- jmethodID methodId,
- const OptionInfo::NumberInfo<T>& info)
-{
- jobject defaultValue = getBigIntegerObject<T>(env, info.defaultValue);
- jobject currentValue = getBigIntegerObject<T>(env, info.currentValue);
- jobject minimum = nullptr;
- if (info.minimum)
- {
- minimum = getBigIntegerObject<T>(env, *info.minimum);
- }
- jobject maximum = nullptr;
- if (info.maximum)
- {
- maximum = getBigIntegerObject<T>(env, *info.maximum);
- }
- jobject ret = env->NewObject(numberInfoClass,
- methodId,
- optionInfo,
- defaultValue,
- currentValue,
- minimum,
- maximum);
-
- return ret;
-}
-
-template <typename T>
-jobject getNumberInfoFromInteger(JNIEnv* env,
- const _jobject* optionInfo,
- jclass numberInfoClass,
- jmethodID methodId,
- const OptionInfo::NumberInfo<int64_t>& info);
-
-template <typename T>
-jobject getNumberInfoFromInteger(JNIEnv* env,
- const _jobject* optionInfo,
- jclass numberInfoClass,
- jmethodID methodId,
- const OptionInfo::NumberInfo<uint64_t>& info);
-
-/*
- * Class: io_github_cvc5_api_OptionInfo
- * Method: getBaseInfo
- * Signature: (J)Lio/github/cvc5/api/BaseInfo;
- */
-JNIEXPORT jobject JNICALL Java_io_github_cvc5_api_OptionInfo_getBaseInfo(
- JNIEnv* env, jobject optionInfo, jlong pointer)
-{
- CVC5_JAVA_API_TRY_CATCH_BEGIN;
- OptionInfo* current = reinterpret_cast<OptionInfo*>(pointer);
- std::variant<OptionInfo::VoidInfo,
- OptionInfo::ValueInfo<bool>,
- OptionInfo::ValueInfo<std::string>,
- OptionInfo::NumberInfo<int64_t>,
- OptionInfo::NumberInfo<uint64_t>,
- OptionInfo::NumberInfo<double>,
- OptionInfo::ModeInfo>
- v = current->valueInfo;
-
- if (std::holds_alternative<OptionInfo::VoidInfo>(v))
- {
- jclass voidInfoClass =
- env->FindClass("io/github/cvc5/api/OptionInfo$VoidInfo");
- jmethodID methodId = env->GetMethodID(
- voidInfoClass, "<init>", "(Lio/github/cvc5/api/OptionInfo;)V");
- jobject ret = env->NewObject(voidInfoClass, methodId, optionInfo);
- return ret;
- }
-
- if (std::holds_alternative<OptionInfo::ValueInfo<bool>>(v)
- || std::holds_alternative<OptionInfo::ValueInfo<std::string>>(v))
- {
- jclass valueInfoClass =
- env->FindClass("io/github/cvc5/api/OptionInfo$ValueInfo");
- jmethodID methodId =
- env->GetMethodID(valueInfoClass,
- "<init>",
- "(Lio/github/cvc5/api/OptionInfo;Ljava/lang/"
- "Object;Ljava/lang/Object;)V");
-
- if (std::holds_alternative<OptionInfo::ValueInfo<bool>>(v))
- {
- auto info = std::get<OptionInfo::ValueInfo<bool>>(v);
- jobject currentValue = getBooleanObject(env, info.currentValue);
- jobject defaultValue = getBooleanObject(env, info.defaultValue);
- jobject ret = env->NewObject(
- valueInfoClass, methodId, optionInfo, defaultValue, currentValue);
- return ret;
- }
-
- if (std::holds_alternative<OptionInfo::ValueInfo<std::string>>(v))
- {
- auto info = std::get<OptionInfo::ValueInfo<std::string>>(v);
- jstring defaultValue = env->NewStringUTF(info.defaultValue.c_str());
- jstring currentValue = env->NewStringUTF(info.currentValue.c_str());
- jobject ret = env->NewObject(
- valueInfoClass, methodId, optionInfo, defaultValue, currentValue);
- return ret;
- }
- }
-
- if (std::holds_alternative<OptionInfo::NumberInfo<int64_t>>(v)
- || std::holds_alternative<OptionInfo::NumberInfo<uint64_t>>(v)
- || std::holds_alternative<OptionInfo::NumberInfo<double>>(v))
- {
- jclass numberInfoClass =
- env->FindClass("io/github/cvc5/api/OptionInfo$NumberInfo");
- jmethodID methodId = env->GetMethodID(
- numberInfoClass,
- "<init>",
- "(Lio/github/cvc5/api/OptionInfo;Ljava/lang/Object;Ljava/lang/"
- "Object;Ljava/lang/Object;Ljava/lang/Object;)V");
-
- if (std::holds_alternative<OptionInfo::NumberInfo<int64_t>>(v))
- {
- auto info = std::get<OptionInfo::NumberInfo<int64_t>>(v);
- return getNumberInfoFromInteger(
- env, optionInfo, numberInfoClass, methodId, info);
- }
-
- if (std::holds_alternative<OptionInfo::NumberInfo<uint64_t>>(v))
- {
- auto info = std::get<OptionInfo::NumberInfo<uint64_t>>(v);
- return getNumberInfoFromInteger(
- env, optionInfo, numberInfoClass, methodId, info);
- }
-
- if (std::holds_alternative<OptionInfo::NumberInfo<double>>(v))
- {
- auto info = std::get<OptionInfo::NumberInfo<double>>(v);
- jobject defaultValue = getDoubleObject(env, info.defaultValue);
- jobject currentValue = getDoubleObject(env, info.currentValue);
- jobject minimum = nullptr;
- if (info.minimum)
- {
- minimum = getDoubleObject(env, *info.minimum);
- }
- jobject maximum = nullptr;
- if (info.maximum)
- {
- maximum = getDoubleObject(env, *info.maximum);
- }
- jobject ret = env->NewObject(numberInfoClass,
- methodId,
- optionInfo,
- defaultValue,
- currentValue,
- minimum,
- maximum);
- return ret;
- }
- }
-
- if (std::holds_alternative<OptionInfo::ModeInfo>(v))
- {
- jclass modeInfoClass =
- env->FindClass("io/github/cvc5/api/OptionInfo$ModeInfo");
- jmethodID methodId = env->GetMethodID(
- modeInfoClass,
- "<init>",
- "(Lio/github/cvc5/api/OptionInfo;Ljava/lang/String;Ljava/lang/"
- "String;[Ljava/lang/String;)V");
-
- auto info = std::get<OptionInfo::ModeInfo>(v);
- jstring defaultValue = env->NewStringUTF(info.defaultValue.c_str());
- jstring currentValue = env->NewStringUTF(info.currentValue.c_str());
- jobject stringArray = getStringArrayFromStringVector(env, info.modes);
- jobject ret = env->NewObject(modeInfoClass,
- methodId,
- optionInfo,
- defaultValue,
- currentValue,
- stringArray);
- return ret;
- }
-
- return nullptr;
- CVC5_JAVA_API_TRY_CATCH_END_RETURN(env, nullptr);
-}
-
-/*
- * Class: io_github_cvc5_api_OptionInfo
- * Method: booleanValue
- * Signature: (J)Z
- */
-JNIEXPORT jboolean JNICALL Java_io_github_cvc5_api_OptionInfo_booleanValue(
- JNIEnv* env, jobject, jlong pointer)
-{
- CVC5_JAVA_API_TRY_CATCH_BEGIN;
- OptionInfo* current = reinterpret_cast<OptionInfo*>(pointer);
- return static_cast<jboolean>(current->boolValue());
- CVC5_JAVA_API_TRY_CATCH_END_RETURN(env, static_cast<jboolean>(false));
-}
-
-/*
- * Class: io_github_cvc5_api_OptionInfo
- * Method: stringValue
- * Signature: (J)Ljava/lang/String;
- */
-JNIEXPORT jstring JNICALL Java_io_github_cvc5_api_OptionInfo_stringValue(
- JNIEnv* env, jobject, jlong pointer)
-{
- CVC5_JAVA_API_TRY_CATCH_BEGIN;
- OptionInfo* current = reinterpret_cast<OptionInfo*>(pointer);
- std::string ret = current->stringValue();
- return env->NewStringUTF(ret.c_str());
- CVC5_JAVA_API_TRY_CATCH_END_RETURN(env, nullptr);
-}
-
-/*
- * Class: io_github_cvc5_api_OptionInfo
- * Method: intValue
- * Signature: (J)Ljava/math/BigInteger;
- */
-JNIEXPORT jobject JNICALL
-Java_io_github_cvc5_api_OptionInfo_intValue(JNIEnv* env, jobject, jlong pointer)
-{
- CVC5_JAVA_API_TRY_CATCH_BEGIN;
- OptionInfo* current = reinterpret_cast<OptionInfo*>(pointer);
- std::int64_t value = current->intValue();
- jobject ret = getBigIntegerObject<std::int64_t>(env, value);
- return ret;
- CVC5_JAVA_API_TRY_CATCH_END_RETURN(env, nullptr);
-}
-
-/*
- * Class: io_github_cvc5_api_OptionInfo
- * Method: doubleValue
- * Signature: (J)D
- */
-JNIEXPORT jdouble JNICALL Java_io_github_cvc5_api_OptionInfo_doubleValue(
- JNIEnv* env, jobject, jlong pointer)
-{
- CVC5_JAVA_API_TRY_CATCH_BEGIN;
- OptionInfo* current = reinterpret_cast<OptionInfo*>(pointer);
- double ret = current->doubleValue();
- return static_cast<jdouble>(ret);
- CVC5_JAVA_API_TRY_CATCH_END_RETURN(env, static_cast<jdouble>(0.0));
-}
diff --git a/src/api/java/jni/solver.cpp b/src/api/java/jni/solver.cpp
index 882a74794..af3d7e59e 100644
--- a/src/api/java/jni/solver.cpp
+++ b/src/api/java/jni/solver.cpp
@@ -1333,6 +1333,23 @@ JNIEXPORT jlong JNICALL Java_io_github_cvc5_api_Solver_mkFloatingPoint(
/*
* Class: io_github_cvc5_api_Solver
+ * Method: mkCardinalityConstraint
+ * Signature: (JJI)J
+ */
+JNIEXPORT jlong JNICALL Java_io_github_cvc5_api_Solver_mkCardinalityConstraint(
+ JNIEnv* env, jobject, jlong pointer, jlong sortPointer, jint upperBound)
+{
+ CVC5_JAVA_API_TRY_CATCH_BEGIN;
+ Solver* solver = reinterpret_cast<Solver*>(pointer);
+ Sort* sort = reinterpret_cast<Sort*>(sortPointer);
+ Term* retPointer =
+ new Term(solver->mkCardinalityConstraint(*sort, (int32_t)upperBound));
+ return reinterpret_cast<jlong>(retPointer);
+ CVC5_JAVA_API_TRY_CATCH_END_RETURN(env, 0);
+}
+
+/*
+ * Class: io_github_cvc5_api_Solver
* Method: mkConst
* Signature: (JJLjava/lang/String;)J
*/
@@ -2707,4 +2724,4 @@ Java_io_github_cvc5_api_Solver_getNullDatatypeDecl(JNIEnv* env, jobject, jlong)
DatatypeDecl* ret = new DatatypeDecl();
return reinterpret_cast<jlong>(ret);
CVC5_JAVA_API_TRY_CATCH_END_RETURN(env, 0);
-} \ No newline at end of file
+}
diff --git a/src/api/java/jni/sort.cpp b/src/api/java/jni/sort.cpp
index fed1f3a41..e5b4f06fe 100644
--- a/src/api/java/jni/sort.cpp
+++ b/src/api/java/jni/sort.cpp
@@ -652,10 +652,10 @@ Java_io_github_cvc5_api_Sort_getConstructorDomainSorts(JNIEnv* env,
CVC5_JAVA_API_TRY_CATCH_BEGIN;
Sort* current = reinterpret_cast<Sort*>(pointer);
std::vector<Sort> sorts = current->getConstructorDomainSorts();
- std::vector<long> sortPointers(sorts.size());
+ std::vector<jlong> sortPointers(sorts.size());
for (size_t i = 0; i < sorts.size(); i++)
{
- sortPointers[i] = (long)new Sort(sorts[i]);
+ sortPointers[i] = reinterpret_cast<jlong> (new Sort(sorts[i]));
}
jlongArray ret = env->NewLongArray(sorts.size());
env->SetLongArrayRegion(ret, 0, sorts.size(), sortPointers.data());
@@ -765,10 +765,10 @@ Java_io_github_cvc5_api_Sort_getFunctionDomainSorts(JNIEnv* env,
CVC5_JAVA_API_TRY_CATCH_BEGIN;
Sort* current = reinterpret_cast<Sort*>(pointer);
std::vector<Sort> sorts = current->getFunctionDomainSorts();
- std::vector<long> sortPointers(sorts.size());
+ std::vector<jlong> sortPointers(sorts.size());
for (size_t i = 0; i < sorts.size(); i++)
{
- sortPointers[i] = (long)new Sort(sorts[i]);
+ sortPointers[i] = reinterpret_cast<jlong>(new Sort(sorts[i]));
}
jlongArray ret = env->NewLongArray(sorts.size());
env->SetLongArrayRegion(ret, 0, sorts.size(), sortPointers.data());
@@ -909,10 +909,10 @@ Java_io_github_cvc5_api_Sort_getUninterpretedSortParamSorts(JNIEnv* env,
CVC5_JAVA_API_TRY_CATCH_BEGIN;
Sort* current = reinterpret_cast<Sort*>(pointer);
std::vector<Sort> sorts = current->getUninterpretedSortParamSorts();
- std::vector<long> sortPointers(sorts.size());
+ std::vector<jlong> sortPointers(sorts.size());
for (size_t i = 0; i < sorts.size(); i++)
{
- sortPointers[i] = (long)new Sort(sorts[i]);
+ sortPointers[i] = reinterpret_cast<jlong>(new Sort(sorts[i]));
}
jlongArray ret = env->NewLongArray(sorts.size());
env->SetLongArrayRegion(ret, 0, sorts.size(), sortPointers.data());
@@ -1005,10 +1005,10 @@ JNIEXPORT jlongArray JNICALL Java_io_github_cvc5_api_Sort_getDatatypeParamSorts(
CVC5_JAVA_API_TRY_CATCH_BEGIN;
Sort* current = reinterpret_cast<Sort*>(pointer);
std::vector<Sort> sorts = current->getDatatypeParamSorts();
- std::vector<long> sortPointers(sorts.size());
+ std::vector<jlong> sortPointers(sorts.size());
for (size_t i = 0; i < sorts.size(); i++)
{
- sortPointers[i] = (long)new Sort(sorts[i]);
+ sortPointers[i] = reinterpret_cast<jlong>(new Sort(sorts[i]));
}
jlongArray ret = env->NewLongArray(sorts.size());
env->SetLongArrayRegion(ret, 0, sorts.size(), sortPointers.data());
@@ -1055,10 +1055,10 @@ Java_io_github_cvc5_api_Sort_getTupleSorts(JNIEnv* env, jobject, jlong pointer)
CVC5_JAVA_API_TRY_CATCH_BEGIN;
Sort* current = reinterpret_cast<Sort*>(pointer);
std::vector<Sort> sorts = current->getTupleSorts();
- std::vector<long> sortPointers(sorts.size());
+ std::vector<jlong> sortPointers(sorts.size());
for (size_t i = 0; i < sorts.size(); i++)
{
- sortPointers[i] = (long)new Sort(sorts[i]);
+ sortPointers[i] = reinterpret_cast<jlong>(new Sort(sorts[i]));
}
jlongArray ret = env->NewLongArray(sorts.size());
env->SetLongArrayRegion(ret, 0, sorts.size(), sortPointers.data());
diff --git a/src/api/python/cvc5.pxd b/src/api/python/cvc5.pxd
index 02b572120..42aee08b0 100644
--- a/src/api/python/cvc5.pxd
+++ b/src/api/python/cvc5.pxd
@@ -241,6 +241,7 @@ cdef extern from "api/cpp/cvc5.h" namespace "cvc5::api":
Term mkUninterpretedConst(Sort sort, int32_t index) except +
Term mkAbstractValue(const string& index) except +
Term mkFloatingPoint(uint32_t exp, uint32_t sig, Term val) except +
+ Term mkCardinalityConstraint(Sort sort, int32_t index) except +
Term mkConst(Sort sort, const string& symbol) except +
# default value for symbol defined in cpp/cvc5.h
Term mkConst(Sort sort) except +
@@ -340,6 +341,7 @@ cdef extern from "api/cpp/cvc5.h" namespace "cvc5::api":
bint isComparableTo(Sort s) except +
Datatype getDatatype() except +
Sort instantiate(const vector[Sort]& params) except +
+ Sort substitute(const vector[Sort] & es, const vector[Sort] & reps) except +
size_t getConstructorArity() except +
vector[Sort] getConstructorDomainSorts() except +
Sort getConstructorCodomainSort() except +
diff --git a/src/api/python/cvc5.pxi b/src/api/python/cvc5.pxi
index 4627859b9..9a7358bbf 100644
--- a/src/api/python/cvc5.pxi
+++ b/src/api/python/cvc5.pxi
@@ -609,7 +609,7 @@ cdef class Result:
:return: True if query was a :cpp:func:`Solver::checkEntailed() <cvc5::api::Solver::checkEntailed>` query query and cvc5 was not able to determine if it is entailed.
"""
return self.cr.isEntailmentUnknown()
-
+
def getUnknownExplanation(self):
"""
:return: an explanation for an unknown query result.
@@ -641,7 +641,7 @@ cdef class RoundingMode:
arithmetic, which in turn is based on IEEE Standard 754 :cite:`IEEE754`.
The rounding modes are specified in Sections 4.3.1 and 4.3.2 of the IEEE
Standard 754.
-
+
Wrapper class for :cpp:enum:`cvc5::api::RoundingMode`.
"""
cdef c_RoundingMode crm
@@ -1041,7 +1041,7 @@ cdef class Solver:
raise ValueError("Argument {} must fit in a uint32_t".format(a))
v.push_back((<uint32_t?> a))
- op.cop = self.csolver.mkOp(k.k, <const vector[uint32_t]&> v)
+ op.cop = self.csolver.mkOp(k.k, <const vector[uint32_t]&> v)
else:
raise ValueError("Unsupported signature"
" mkOp: {}".format(" X ".join([str(k), str(args[0])])))
@@ -1347,7 +1347,7 @@ cdef class Solver:
def mkRoundingMode(self, RoundingMode rm):
"""Create a roundingmode constant.
-
+
:param rm: the floating point rounding mode this constant represents
"""
cdef Term term = Term(self)
@@ -1391,6 +1391,16 @@ cdef class Solver:
term.cterm = self.csolver.mkFloatingPoint(exp, sig, val.cterm)
return term
+ def mkCardinalityConstraint(self, Sort sort, int index):
+ """Create cardinality constraint.
+
+ :param sort: Sort of the constraint
+ :param index: The upper bound for the cardinality of the sort
+ """
+ cdef Term term = Term(self)
+ term.cterm = self.csolver.mkCardinalityConstraint(sort.csort, index)
+ return term
+
def mkConst(self, Sort sort, symbol=None):
"""
Create (first-order) constant (0-arity function symbol).
@@ -2181,6 +2191,9 @@ cdef class Solver:
cdef class Sort:
+ """
+ The sort of a cvc5 term.
+ """
cdef c_Sort csort
cdef Solver solver
def __cinit__(self, Solver solver):
@@ -2215,98 +2228,266 @@ cdef class Sort:
return csorthash(self.csort)
def isNull(self):
+ """:return: True if this Sort is a null sort."""
return self.csort.isNull()
def isBoolean(self):
+ """
+ Is this a Boolean sort?
+
+ :return: True if the sort is the Boolean sort.
+ """
return self.csort.isBoolean()
def isInteger(self):
+ """
+ Is this an integer sort?
+
+ :return: True if the sort is the integer sort.
+ """
return self.csort.isInteger()
def isReal(self):
+ """
+ Is this a real sort?
+
+ :return: True if the sort is the real sort.
+ """
return self.csort.isReal()
def isString(self):
+ """
+ Is this a string sort?
+
+ :return: True if the sort is the string sort.
+ """
return self.csort.isString()
def isRegExp(self):
+ """
+ Is this a regexp sort?
+
+ :return: True if the sort is the regexp sort.
+ """
return self.csort.isRegExp()
def isRoundingMode(self):
+ """
+ Is this a rounding mode sort?
+
+ :return: True if the sort is the rounding mode sort.
+ """
return self.csort.isRoundingMode()
def isBitVector(self):
+ """
+ Is this a bit-vector sort?
+
+ :return: True if the sort is a bit-vector sort.
+ """
return self.csort.isBitVector()
def isFloatingPoint(self):
+ """
+ Is this a floating-point sort?
+
+ :return: True if the sort is a bit-vector sort.
+ """
return self.csort.isFloatingPoint()
def isDatatype(self):
+ """
+ Is this a datatype sort?
+
+ :return: True if the sort is a datatype sort.
+ """
return self.csort.isDatatype()
def isParametricDatatype(self):
+ """
+ Is this a parametric datatype sort?
+
+ :return: True if the sort is a parametric datatype sort.
+ """
return self.csort.isParametricDatatype()
def isConstructor(self):
+ """
+ Is this a constructor sort?
+
+ :return: True if the sort is a constructor sort.
+ """
return self.csort.isConstructor()
def isSelector(self):
+ """
+ Is this a selector sort?
+
+ :return: True if the sort is a selector sort.
+ """
return self.csort.isSelector()
def isTester(self):
+ """
+ Is this a tester sort?
+
+ :return: True if the sort is a selector sort.
+ """
return self.csort.isTester()
def isUpdater(self):
+ """
+ Is this a datatype updater sort?
+
+ :return: True if the sort is a datatype updater sort.
+ """
return self.csort.isUpdater()
def isFunction(self):
+ """
+ Is this a function sort?
+
+ :return: True if the sort is a function sort.
+ """
return self.csort.isFunction()
def isPredicate(self):
+ """
+ Is this a predicate sort?
+ That is, is this a function sort mapping to Boolean? All predicate
+ sorts are also function sorts.
+
+ :return: True if the sort is a predicate sort.
+ """
return self.csort.isPredicate()
def isTuple(self):
+ """
+ Is this a tuple sort?
+
+ :return: True if the sort is a tuple sort.
+ """
return self.csort.isTuple()
def isRecord(self):
+ """
+ Is this a record sort?
+
+ :return: True if the sort is a record sort.
+ """
return self.csort.isRecord()
def isArray(self):
+ """
+ Is this an array sort?
+
+ :return: True if the sort is an array sort.
+ """
return self.csort.isArray()
def isSet(self):
+ """
+ Is this a set sort?
+
+ :return: True if the sort is a set sort.
+ """
return self.csort.isSet()
def isBag(self):
+ """
+ Is this a bag sort?
+
+ :return: True if the sort is a bag sort.
+ """
return self.csort.isBag()
def isSequence(self):
+ """
+ Is this a sequence sort?
+
+ :return: True if the sort is a sequence sort.
+ """
return self.csort.isSequence()
def isUninterpretedSort(self):
+ """
+ Is this a sort uninterpreted?
+
+ :return: True if the sort is uninterpreted.
+ """
return self.csort.isUninterpretedSort()
def isSortConstructor(self):
+ """
+ Is this a sort constructor kind?
+
+ :return: True if this a sort constructor kind.
+ """
return self.csort.isSortConstructor()
def isFirstClass(self):
+ """
+ Is this a first-class sort?
+ First-class sorts are sorts for which:
+
+ 1. we handle equalities between terms of that type, and
+ 2. they are allowed to be parameters of parametric sorts
+ (e.g. index or element sorts of arrays).
+
+ Examples of sorts that are not first-class include sort constructor
+ sorts and regular expression sorts.
+
+ :return: True if the sort is a first-class sort.
+ """
return self.csort.isFirstClass()
def isFunctionLike(self):
+ """
+ Is this a function-LIKE sort?
+
+ Anything function-like except arrays (e.g., datatype selectors) is
+ considered a function here. Function-like terms can not be the argument
+ or return value for any term that is function-like.
+ This is mainly to avoid higher order.
+
+ Note that arrays are explicitly not considered function-like here.
+
+ :return: True if this is a function-like sort
+ """
return self.csort.isFunctionLike()
def isSubsortOf(self, Sort sort):
+ """
+ Is this sort a subsort of the given sort?
+
+ :return: True if this sort is a subsort of s
+ """
return self.csort.isSubsortOf(sort.csort)
def isComparableTo(self, Sort sort):
+ """
+ Is this sort comparable to the given sort
+ (i.e., do they share a common ancestor in the subsort tree)?
+
+ :return: True if this sort is comparable to s
+ """
return self.csort.isComparableTo(sort.csort)
def getDatatype(self):
+ """
+ :return: the underlying datatype of a datatype sort
+ """
cdef Datatype d = Datatype(self.solver)
d.cd = self.csort.getDatatype()
return d
def instantiate(self, params):
+ """
+ Instantiate a parameterized datatype/sort sort.
+ Create sorts parameter with :py:meth:`Solver.mkParamSort()`
+
+ :param params: the list of sort parameters to instantiate with
+ """
cdef Sort sort = Sort(self.solver)
cdef vector[c_Sort] v
for s in params:
@@ -2314,10 +2495,54 @@ cdef class Sort:
sort.csort = self.csort.instantiate(v)
return sort
+ def substitute(self, sort_or_list_1, sort_or_list_2):
+ """
+ Substitution of Sorts.
+
+ :param sort_or_list_1: the subsort or subsorts to be substituted within this sort.
+ :param sort_or_list_2: the sort or list of sorts replacing the substituted subsort.
+ """
+
+ # The resulting sort after substitution
+ cdef Sort sort = Sort(self.solver)
+ # lists for substitutions
+ cdef vector[c_Sort] ces
+ cdef vector[c_Sort] creplacements
+
+ # normalize the input parameters to be lists
+ if isinstance(sort_or_list_1, list):
+ assert isinstance(sort_or_list_2, list)
+ es = sort_or_list_1
+ replacements = sort_or_list_2
+ if len(es) != len(replacements):
+ raise RuntimeError("Expecting list inputs to substitute to "
+ "have the same length but got: "
+ "{} and {}".format(len(es), len(replacements)))
+
+ for e, r in zip(es, replacements):
+ ces.push_back((<Sort?> e).csort)
+ creplacements.push_back((<Sort?> r).csort)
+
+ else:
+ # add the single elements to the vectors
+ ces.push_back((<Sort?> sort_or_list_1).csort)
+ creplacements.push_back((<Sort?> sort_or_list_2).csort)
+
+ # call the API substitute method with lists
+ sort.csort = self.csort.substitute(ces, creplacements)
+ return sort
+
+
def getConstructorArity(self):
+ """
+ :return: the arity of a constructor sort.
+ """
return self.csort.getConstructorArity()
def getConstructorDomainSorts(self):
+ """
+ :return: the domain sorts of a constructor sort
+ """
domain_sorts = []
for s in self.csort.getConstructorDomainSorts():
sort = Sort(self.solver)
@@ -2326,34 +2551,55 @@ cdef class Sort:
return domain_sorts
def getConstructorCodomainSort(self):
+ """
+ :return: the codomain sort of a constructor sort
+ """
cdef Sort sort = Sort(self.solver)
sort.csort = self.csort.getConstructorCodomainSort()
return sort
def getSelectorDomainSort(self):
+ """
+ :return: the domain sort of a selector sort
+ """
cdef Sort sort = Sort(self.solver)
sort.csort = self.csort.getSelectorDomainSort()
return sort
def getSelectorCodomainSort(self):
+ """
+ :return: the codomain sort of a selector sort
+ """
cdef Sort sort = Sort(self.solver)
sort.csort = self.csort.getSelectorCodomainSort()
return sort
def getTesterDomainSort(self):
+ """
+ :return: the domain sort of a tester sort
+ """
cdef Sort sort = Sort(self.solver)
sort.csort = self.csort.getTesterDomainSort()
return sort
def getTesterCodomainSort(self):
+ """
+ :return: the codomain sort of a tester sort, which is the Boolean sort
+ """
cdef Sort sort = Sort(self.solver)
sort.csort = self.csort.getTesterCodomainSort()
return sort
def getFunctionArity(self):
+ """
+ :return: the arity of a function sort
+ """
return self.csort.getFunctionArity()
def getFunctionDomainSorts(self):
+ """
+ :return: the domain sorts of a function sort
+ """
domain_sorts = []
for s in self.csort.getFunctionDomainSorts():
sort = Sort(self.solver)
@@ -2362,42 +2608,69 @@ cdef class Sort:
return domain_sorts
def getFunctionCodomainSort(self):
+ """
+ :return: the codomain sort of a function sort
+ """
cdef Sort sort = Sort(self.solver)
sort.csort = self.csort.getFunctionCodomainSort()
return sort
def getArrayIndexSort(self):
+ """
+ :return: the array index sort of an array sort
+ """
cdef Sort sort = Sort(self.solver)
sort.csort = self.csort.getArrayIndexSort()
return sort
def getArrayElementSort(self):
+ """
+ :return: the array element sort of an array sort
+ """
cdef Sort sort = Sort(self.solver)
sort.csort = self.csort.getArrayElementSort()
return sort
def getSetElementSort(self):
+ """
+ :return: the element sort of a set sort
+ """
cdef Sort sort = Sort(self.solver)
sort.csort = self.csort.getSetElementSort()
return sort
def getBagElementSort(self):
+ """
+ :return: the element sort of a bag sort
+ """
cdef Sort sort = Sort(self.solver)
sort.csort = self.csort.getBagElementSort()
return sort
def getSequenceElementSort(self):
+ """
+ :return: the element sort of a sequence sort
+ """
cdef Sort sort = Sort(self.solver)
sort.csort = self.csort.getSequenceElementSort()
return sort
def getUninterpretedSortName(self):
+ """
+ :return: the name of an uninterpreted sort
+ """
return self.csort.getUninterpretedSortName().decode()
def isUninterpretedSortParameterized(self):
+ """
+ :return: True if an uninterpreted sort is parameterized
+ """
return self.csort.isUninterpretedSortParameterized()
def getUninterpretedSortParamSorts(self):
+ """
+ :return: the parameter sorts of an uninterpreted sort
+ """
param_sorts = []
for s in self.csort.getUninterpretedSortParamSorts():
sort = Sort(self.solver)
@@ -2406,21 +2679,39 @@ cdef class Sort:
return param_sorts
def getSortConstructorName(self):
+ """
+ :return: the name of a sort constructor sort
+ """
return self.csort.getSortConstructorName().decode()
def getSortConstructorArity(self):
+ """
+ :return: the arity of a sort constructor sort
+ """
return self.csort.getSortConstructorArity()
def getBitVectorSize(self):
+ """
+ :return: the bit-width of the bit-vector sort
+ """
return self.csort.getBitVectorSize()
def getFloatingPointExponentSize(self):
+ """
+ :return: the bit-width of the exponent of the floating-point sort
+ """
return self.csort.getFloatingPointExponentSize()
def getFloatingPointSignificandSize(self):
+ """
+ :return: the width of the significand of the floating-point sort
+ """
return self.csort.getFloatingPointSignificandSize()
def getDatatypeParamSorts(self):
+ """
+ :return: the parameter sorts of a datatype sort
+ """
param_sorts = []
for s in self.csort.getDatatypeParamSorts():
sort = Sort(self.solver)
@@ -2429,12 +2720,21 @@ cdef class Sort:
return param_sorts
def getDatatypeArity(self):
+ """
+ :return: the arity of a datatype sort
+ """
return self.csort.getDatatypeArity()
def getTupleLength(self):
+ """
+ :return: the length of a tuple sort
+ """
return self.csort.getTupleLength()
def getTupleSorts(self):
+ """
+ :return: the element sorts of a tuple sort
+ """
tuple_sorts = []
for s in self.csort.getTupleSorts():
sort = Sort(self.solver)
diff --git a/src/base/configuration.cpp b/src/base/configuration.cpp
index 35a899617..c9e650117 100644
--- a/src/base/configuration.cpp
+++ b/src/base/configuration.cpp
@@ -73,11 +73,7 @@ bool Configuration::isCompetitionBuild() {
bool Configuration::isStaticBuild()
{
-#if defined(CVC5_STATIC_BUILD)
- return true;
-#else
- return false;
-#endif
+ return CVC5_STATIC_BUILD;
}
string Configuration::getPackageName() { return CVC5_PACKAGE_NAME; }
diff --git a/src/expr/skolem_manager.cpp b/src/expr/skolem_manager.cpp
index c06f016c4..80626fbc6 100644
--- a/src/expr/skolem_manager.cpp
+++ b/src/expr/skolem_manager.cpp
@@ -54,6 +54,8 @@ const char* toString(SkolemFunId id)
case SkolemFunId::SHARED_SELECTOR: return "SHARED_SELECTOR";
case SkolemFunId::SEQ_NTH_OOB: return "SEQ_NTH_OOB";
case SkolemFunId::RE_UNFOLD_POS_COMPONENT: return "RE_UNFOLD_POS_COMPONENT";
+ case SkolemFunId::BAGS_MAP_PREIMAGE: return "BAGS_MAP_PREIMAGE";
+ case SkolemFunId::BAGS_MAP_SUM: return "BAGS_MAP_SUM";
case SkolemFunId::HO_TYPE_MATCH_PRED: return "HO_TYPE_MATCH_PRED";
default: return "?";
}
diff --git a/src/expr/skolem_manager.h b/src/expr/skolem_manager.h
index 1d35fa4b5..90e935767 100644
--- a/src/expr/skolem_manager.h
+++ b/src/expr/skolem_manager.h
@@ -51,6 +51,22 @@ enum class SkolemFunId
* i = 0, ..., n.
*/
RE_UNFOLD_POS_COMPONENT,
+ /** An uninterpreted function for bag.map operator:
+ * To compute (bag.count y (map f A)), we need to find the distinct
+ * elements in A that are mapped to y by function f (i.e., preimage of {y}).
+ * If n is the cardinality of this preimage, then
+ * the preimage is the set {uf(1), ..., uf(n)}
+ * where uf: Int -> E is a skolem function, and E is the type of elements of A
+ */
+ BAGS_MAP_PREIMAGE,
+ /** An uninterpreted function for bag.map operator:
+ * If the preimage of {y} in A is {uf(1), ..., uf(n)} (see BAGS_MAP_PREIMAGE},
+ * then the multiplicity of an element y in a bag (map f A) is sum(n),
+ * where sum: Int -> Int is a skolem function such that:
+ * sum(0) = 0
+ * sum(i) = sum (i-1) + (bag.count (uf i) A)
+ */
+ BAGS_MAP_SUM,
/** Higher-order type match predicate, see HoTermDb */
HO_TYPE_MATCH_PRED,
};
diff --git a/src/options/main_options.toml b/src/options/main_options.toml
index 974bdd155..0e3f63072 100644
--- a/src/options/main_options.toml
+++ b/src/options/main_options.toml
@@ -6,6 +6,7 @@ name = "Driver"
short = "V"
long = "version"
type = "void"
+ handler = "showVersion"
help = "identify this cvc5 binary"
[[option]]
diff --git a/src/options/mkoptions.py b/src/options/mkoptions.py
index badfd59f0..7288ede51 100644
--- a/src/options/mkoptions.py
+++ b/src/options/mkoptions.py
@@ -1047,6 +1047,9 @@ def mkoptions_main():
codegen_module(module, dst_dir, module_tpls)
codegen_all_modules(modules, build_dir, dst_dir, global_tpls)
+ # Generate output file to signal cmake when this script was run last
+ open(os.path.join(dst_dir, 'options/options.stamp'), 'w').write('')
+
if __name__ == "__main__":
mkoptions_main()
diff --git a/src/options/quantifiers_options.toml b/src/options/quantifiers_options.toml
index f3c89fd74..b15ed49a6 100644
--- a/src/options/quantifiers_options.toml
+++ b/src/options/quantifiers_options.toml
@@ -1448,10 +1448,20 @@ name = "Quantifiers"
[[option]]
name = "sygusQueryGen"
category = "regular"
- long = "sygus-query-gen"
- type = "bool"
- default = "false"
- help = "use sygus to enumerate interesting satisfiability queries"
+ long = "sygus-query-gen=MODE"
+ type = "SygusQueryGenMode"
+ default = "NONE"
+ help = "mode for generating interesting satisfiability queries using SyGuS, for internal fuzzing"
+ help_mode = "Modes for generating interesting satisfiability queries using SyGuS."
+[[option.mode.NONE]]
+ name = "none"
+ help = "Do not generate queries with SyGuS."
+[[option.mode.SAT]]
+ name = "sat"
+ help = "Generate interesting SAT queries, for e.g. soundness testing."
+[[option.mode.UNSAT]]
+ name = "unsat"
+ help = "Generate interesting UNSAT queries, for e.g. proof testing."
[[option]]
name = "sygusQueryGenThresh"
diff --git a/src/parser/CMakeLists.txt b/src/parser/CMakeLists.txt
index eeae442a4..e2cf3d64e 100644
--- a/src/parser/CMakeLists.txt
+++ b/src/parser/CMakeLists.txt
@@ -102,18 +102,14 @@ add_library(cvc5parser-objs OBJECT ${libcvc5parser_src_files})
set_target_properties(cvc5parser-objs PROPERTIES POSITION_INDEPENDENT_CODE ON)
target_compile_definitions(cvc5parser-objs PUBLIC -D__BUILDING_CVC5PARSERLIB -Dcvc5_obj_EXPORTS)
-add_dependencies(cvc5parser-objs ANTLR3_SHARED)
+add_dependencies(cvc5parser-objs ANTLR3)
target_include_directories(cvc5parser-objs PRIVATE ${ANTLR3_INCLUDE_DIR})
add_library(cvc5parser-shared SHARED $<TARGET_OBJECTS:cvc5parser-objs>)
set_target_properties(cvc5parser-shared PROPERTIES SOVERSION ${CVC5_SOVERSION})
set_target_properties(cvc5parser-shared PROPERTIES OUTPUT_NAME cvc5parser)
target_link_libraries(cvc5parser-shared PRIVATE cvc5-shared)
-if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
- target_link_libraries(cvc5parser-shared PRIVATE ANTLR3_STATIC)
-else()
- target_link_libraries(cvc5parser-shared PRIVATE ANTLR3_SHARED)
-endif()
+target_link_libraries(cvc5parser-shared PRIVATE ANTLR3)
install(TARGETS cvc5parser-shared
EXPORT cvc5-targets
@@ -123,7 +119,7 @@ if(ENABLE_STATIC_LIBRARY)
add_library(cvc5parser-static STATIC $<TARGET_OBJECTS:cvc5parser-objs>)
set_target_properties(cvc5parser-static PROPERTIES OUTPUT_NAME cvc5parser)
target_link_libraries(cvc5parser-static PRIVATE cvc5-static)
- target_link_libraries(cvc5parser-static PRIVATE ANTLR3_STATIC)
+ target_link_libraries(cvc5parser-static PRIVATE ANTLR3)
install(TARGETS cvc5parser-objs cvc5parser-static
EXPORT cvc5-targets
diff --git a/src/preprocessing/assertion_pipeline.cpp b/src/preprocessing/assertion_pipeline.cpp
index 665415772..f97c5bafb 100644
--- a/src/preprocessing/assertion_pipeline.cpp
+++ b/src/preprocessing/assertion_pipeline.cpp
@@ -186,7 +186,7 @@ void AssertionPipeline::conjoin(size_t i, Node n, ProofGenerator* pg)
lcp->addLazyStep(d_nodes[i], d_pppg);
lcp->addStep(newConj, PfRule::AND_INTRO, {d_nodes[i], n}, {});
}
- if (newConjr != newConj)
+ if (!CDProof::isSame(newConjr, newConj))
{
lcp->addStep(
newConjr, PfRule::MACRO_SR_PRED_TRANSFORM, {newConj}, {newConjr});
diff --git a/src/proof/alethe/alethe_post_processor.cpp b/src/proof/alethe/alethe_post_processor.cpp
index 920ca7dcb..2eb3d8d1d 100644
--- a/src/proof/alethe/alethe_post_processor.cpp
+++ b/src/proof/alethe/alethe_post_processor.cpp
@@ -18,6 +18,7 @@
#include "expr/node_algorithm.h"
#include "proof/proof.h"
#include "proof/proof_checker.h"
+#include "proof/proof_node_algorithm.h"
#include "theory/builtin/proof_checker.h"
#include "util/rational.h"
@@ -621,100 +622,7 @@ bool AletheProofPostprocessCallback::update(Node res,
}
}
- // If res is not an or node, then it's necessarily a singleton clause.
- bool isSingletonClause = res.getKind() != kind::OR;
- // Otherwise, we need to determine if res, which is of the form (or t1 ...
- // tn), corresponds to the clause (cl t1 ... tn) or to (cl (OR t1 ...
- // tn)). The only way in which the latter can happen is if res occurs as a
- // child in one of the premises, and is not eliminated afterwards. So we
- // search for res as a subterm of some children, which would mark its last
- // insertion into the resolution result. If res does not occur as the
- // pivot to be eliminated in a subsequent premise, then, and only then, it
- // is a singleton clause.
- if (!isSingletonClause)
- {
- size_t i;
- // Find out the last child to introduced res, if any. We only need to
- // look at the last one because any previous introduction would have
- // been eliminated.
- //
- // After the loop finishes i is the index of the child C_i that
- // introduced res. If i=0 none of the children introduced res as a
- // subterm and therefore it cannot be a singleton clause.
- for (i = children.size(); i > 0; --i)
- {
- // only non-singleton clauses may be introducing
- // res, so we only care about non-singleton or nodes. We check then
- // against the kind and whether the whole or node occurs as a pivot of
- // the respective resolution
- if (children[i - 1].getKind() != kind::OR)
- {
- continue;
- }
- size_t pivotIndex = (i != 1) ? 2 * (i - 1) - 1 : 1;
- if (args[pivotIndex] == children[i - 1]
- || args[pivotIndex].notNode() == children[i - 1])
- {
- continue;
- }
- // if res occurs as a subterm of a non-singleton premise
- if (std::find(children[i - 1].begin(), children[i - 1].end(), res)
- != children[i - 1].end())
- {
- break;
- }
- }
-
- // If res is a subterm of one of the children we still need to check if
- // that subterm is eliminated
- if (i > 0)
- {
- bool posFirst = (i == 1) ? (args[0] == trueNode)
- : (args[(2 * (i - 1)) - 2] == trueNode);
- Node pivot = (i == 1) ? args[1] : args[(2 * (i - 1)) - 1];
-
- // Check if it is eliminated by the previous resolution step
- if ((res == pivot && !posFirst)
- || (res.notNode() == pivot && posFirst)
- || (pivot.notNode() == res && posFirst))
- {
- // We decrease i by one, since it could have been the case that i
- // was equal to children.size(), so that isSingletonClause is set to
- // false
- --i;
- }
- else
- {
- // Otherwise check if any subsequent premise eliminates it
- for (; i < children.size(); ++i)
- {
- posFirst = args[(2 * i) - 2] == trueNode;
- pivot = args[(2 * i) - 1];
- // To eliminate res, the clause must contain it with opposite
- // polarity. There are three successful cases, according to the
- // pivot and its sign
- //
- // - res is the same as the pivot and posFirst is true, which
- // means that the clause contains its negation and eliminates it
- //
- // - res is the negation of the pivot and posFirst is false, so
- // the clause contains the node whose negation is res. Note that
- // this case may either be res.notNode() == pivot or res ==
- // pivot.notNode().
- if ((res == pivot && posFirst)
- || (res.notNode() == pivot && !posFirst)
- || (pivot.notNode() == res && !posFirst))
- {
- break;
- }
- }
- }
- }
- // if not eliminated (loop went to the end), then it's a singleton
- // clause
- isSingletonClause = i == children.size();
- }
- if (!isSingletonClause)
+ if (!expr::isSingletonClause(res, children, args))
{
return addAletheStepFromOr(
AletheRule::RESOLUTION, res, new_children, {}, *cdp);
@@ -760,6 +668,415 @@ bool AletheProofPostprocessCallback::update(Node res,
{},
*cdp);
}
+ // ======== Split
+ // See proof_rule.h for documentation on the SPLIT rule. This comment
+ // uses variable names as introduced there.
+ //
+ // --------- NOT_NOT --------- NOT_NOT
+ // VP1 VP2
+ // -------------------------------- RESOLUTION
+ // (cl F (not F))*
+ //
+ // VP1: (cl (not (not (not F))) F)
+ // VP2: (cl (not (not (not (not F)))) (not F))
+ //
+ // * the corresponding proof node is (or F (not F))
+ case PfRule::SPLIT:
+ {
+ Node vp1 = nm->mkNode(
+ kind::SEXPR, d_cl, args[0].notNode().notNode().notNode(), args[0]);
+ Node vp2 = nm->mkNode(kind::SEXPR,
+ d_cl,
+ args[0].notNode().notNode().notNode().notNode(),
+ args[0].notNode());
+
+ return addAletheStep(AletheRule::NOT_NOT, vp2, vp2, {}, {}, *cdp)
+ && addAletheStep(AletheRule::NOT_NOT, vp1, vp1, {}, {}, *cdp)
+ && addAletheStepFromOr(AletheRule::RESOLUTION, res, {vp1, vp2}, {}, *cdp);
+ }
+ // ======== Equality resolution
+ // See proof_rule.h for documentation on the EQ_RESOLVE rule. This
+ // comment uses variable names as introduced there.
+ //
+ // If F1 = (or G1 ... Gn), then P1 will be printed as (cl G1 ... Gn) but
+ // needs to be printed as (cl (or G1 ... Gn)). The only exception to this
+ // are ASSUME steps that are always printed as (cl (or G1 ... Gn)) and
+ // EQ_RESOLVE steps themselves.
+ //
+ // ------ ... ------ OR_NEG
+ // P1 VP21 ... VP2n
+ // ---------------------------- RESOLUTION
+ // VP3
+ // ---------------------------- DUPLICATED_LITERALS
+ // VP4
+ //
+ // for i=1 to n, VP2i: (cl (or G1 ... Gn) (not Gi))
+ // VP3: (cl (or G1 ... Gn)^n)
+ // VP4: (cl (or (G1 ... Gn))
+ //
+ // Let child1 = VP4.
+ //
+ //
+ // Otherwise, child1 = P1.
+ //
+ //
+ // Then, if F2 = false:
+ //
+ // ------ EQUIV_POS2
+ // VP1 P2 child1
+ // --------------------------------- RESOLUTION
+ // (cl)*
+ //
+ // Otherwise:
+ //
+ // ------ EQUIV_POS2
+ // VP1 P2 child1
+ // --------------------------------- RESOLUTION
+ // (cl F2)*
+ //
+ // VP1: (cl (not (= F1 F2)) (not F1) F2)
+ //
+ // * the corresponding proof node is F2
+ case PfRule::EQ_RESOLVE:
+ {
+ bool success = true;
+ Node vp1 =
+ nm->mkNode(kind::SEXPR,
+ {d_cl, children[1].notNode(), children[0].notNode(), res});
+ Node child1 = children[0];
+
+ // Transform (cl F1 ... Fn) into (cl (or F1 ... Fn))
+ if (children[0].notNode() != children[1].notNode()
+ && children[0].getKind() == kind::OR)
+ {
+ PfRule pr = cdp->getProofFor(child1)->getRule();
+ if (pr != PfRule::ASSUME && pr != PfRule::EQ_RESOLVE)
+ {
+ std::vector<Node> clauses{d_cl};
+ clauses.insert(clauses.end(),
+ children[0].begin(),
+ children[0].end()); //(cl G1 ... Gn)
+
+ std::vector<Node> vp2Nodes{children[0]};
+ std::vector<Node> resNodes{d_cl};
+ for (size_t i = 0, size = children[0].getNumChildren(); i < size; i++)
+ {
+ Node vp2i = nm->mkNode(
+ kind::SEXPR,
+ d_cl,
+ children[0],
+ children[0][i].notNode()); //(cl (or G1 ... Gn) (not Gi))
+ success &=
+ addAletheStep(AletheRule::OR_NEG, vp2i, vp2i, {}, {}, *cdp);
+ vp2Nodes.push_back(vp2i);
+ resNodes.push_back(children[0]);
+ }
+ Node vp3 = nm->mkNode(kind::SEXPR, resNodes);
+ success &= addAletheStep(
+ AletheRule::RESOLUTION, vp3, vp3, vp2Nodes, {}, *cdp);
+
+ Node vp4 = nm->mkNode(kind::SEXPR, d_cl, children[0]);
+ success &= addAletheStep(
+ AletheRule::DUPLICATED_LITERALS, vp4, vp4, {vp3}, {}, *cdp);
+ child1 = vp4;
+ }
+ }
+
+ success &= addAletheStep(AletheRule::EQUIV_POS2, vp1, vp1, {}, {}, *cdp);
+
+ return success &= addAletheStep(AletheRule::RESOLUTION,
+ res,
+ res == nm->mkConst(false)
+ ? nm->mkNode(kind::SEXPR, d_cl)
+ : nm->mkNode(kind::SEXPR, d_cl, res),
+ {vp1, children[1], child1},
+ {},
+ *cdp);
+ }
+ // ======== Modus ponens
+ // See proof_rule.h for documentation on the MODUS_PONENS rule. This comment
+ // uses variable names as introduced there.
+ //
+ // (P2:(=> F1 F2))
+ // ------------------------ IMPLIES
+ // (VP1:(cl (not F1) F2)) (P1:F1)
+ // -------------------------------------------- RESOLUTION
+ // (cl F2)*
+ //
+ // * the corresponding proof node is F2
+ case PfRule::MODUS_PONENS:
+ {
+ Node vp1 = nm->mkNode(kind::SEXPR, d_cl, children[0].notNode(), res);
+
+ return addAletheStep(
+ AletheRule::IMPLIES, vp1, vp1, {children[1]}, {}, *cdp)
+ && addAletheStep(AletheRule::RESOLUTION,
+ res,
+ nm->mkNode(kind::SEXPR, d_cl, res),
+ {vp1, children[0]},
+ {},
+ *cdp);
+ }
+ // ======== Double negation elimination
+ // See proof_rule.h for documentation on the NOT_NOT_ELIM rule. This comment
+ // uses variable names as introduced there.
+ //
+ // ---------------------------------- NOT_NOT
+ // (VP1:(cl (not (not (not F))) F)) (P:(not (not F)))
+ // ------------------------------------------------------------- RESOLUTION
+ // (cl F)*
+ //
+ // * the corresponding proof node is F
+ case PfRule::NOT_NOT_ELIM:
+ {
+ Node vp1 = nm->mkNode(kind::SEXPR, d_cl, children[0].notNode(), res);
+
+ return addAletheStep(AletheRule::NOT_NOT, vp1, vp1, {}, {}, *cdp)
+ && addAletheStep(AletheRule::RESOLUTION,
+ res,
+ nm->mkNode(kind::SEXPR, d_cl, res),
+ {vp1, children[0]},
+ {},
+ *cdp);
+ }
+ // ======== Contradiction
+ // See proof_rule.h for documentation on the CONTRA rule. This
+ // comment uses variable names as introduced there.
+ //
+ // P1 P2
+ // --------- RESOLUTION
+ // (cl)*
+ //
+ // * the corresponding proof node is false
+ case PfRule::CONTRA:
+ {
+ return addAletheStep(AletheRule::RESOLUTION,
+ res,
+ nm->mkNode(kind::SEXPR, d_cl),
+ children,
+ {},
+ *cdp);
+ }
+ // ======== And elimination
+ // This rule is translated according to the singleton pattern.
+ case PfRule::AND_ELIM:
+ {
+ return addAletheStep(AletheRule::AND,
+ res,
+ nm->mkNode(kind::SEXPR, d_cl, res),
+ children,
+ {},
+ *cdp);
+ }
+ // ======== And introduction
+ // See proof_rule.h for documentation on the AND_INTRO rule. This
+ // comment uses variable names as introduced there.
+ //
+ //
+ // ----- AND_NEG
+ // VP1 P1 ... Pn
+ // -------------------------- RESOLUTION
+ // (cl (and F1 ... Fn))*
+ //
+ // VP1:(cl (and F1 ... Fn) (not F1) ... (not Fn))
+ //
+ // * the corresponding proof node is (and F1 ... Fn)
+ case PfRule::AND_INTRO:
+ {
+ std::vector<Node> neg_Nodes = {d_cl,res};
+ for (size_t i = 0, size = children.size(); i < size; i++)
+ {
+ neg_Nodes.push_back(children[i].notNode());
+ }
+ Node vp1 = nm->mkNode(kind::SEXPR, neg_Nodes);
+
+ std::vector<Node> new_children = {vp1};
+ new_children.insert(new_children.end(), children.begin(), children.end());
+
+ return addAletheStep(AletheRule::AND_NEG, vp1, vp1, {}, {}, *cdp)
+ && addAletheStep(AletheRule::RESOLUTION,
+ res,
+ nm->mkNode(kind::SEXPR, d_cl, res),
+ new_children,
+ {},
+ *cdp);
+ }
+ // ======== Not Or elimination
+ // This rule is translated according to the singleton pattern.
+ case PfRule::NOT_OR_ELIM:
+ {
+ return addAletheStep(AletheRule::NOT_OR,
+ res,
+ nm->mkNode(kind::SEXPR, d_cl, res),
+ children,
+ {},
+ *cdp);
+ }
+ // ======== Implication elimination
+ // This rule is translated according to the clause pattern.
+ case PfRule::IMPLIES_ELIM:
+ {
+ return addAletheStepFromOr(AletheRule::IMPLIES, res, children, {}, *cdp);
+ }
+ // ======== Not Implication elimination version 1
+ // This rule is translated according to the singleton pattern.
+ case PfRule::NOT_IMPLIES_ELIM1:
+ {
+ return addAletheStep(AletheRule::NOT_IMPLIES1,
+ res,
+ nm->mkNode(kind::SEXPR, d_cl, res),
+ children,
+ {},
+ *cdp);
+ }
+ // ======== Not Implication elimination version 2
+ // This rule is translated according to the singleton pattern.
+ case PfRule::NOT_IMPLIES_ELIM2:
+ {
+ return addAletheStep(AletheRule::NOT_IMPLIES2,
+ res,
+ nm->mkNode(kind::SEXPR, d_cl, res),
+ children,
+ {},
+ *cdp);
+ }
+ // ======== Various elimination rules
+ // The following rules are all translated according to the clause pattern.
+ case PfRule::EQUIV_ELIM1:
+ {
+ return addAletheStepFromOr(AletheRule::EQUIV1, res, children, {}, *cdp);
+ }
+ case PfRule::EQUIV_ELIM2:
+ {
+ return addAletheStepFromOr(AletheRule::EQUIV2, res, children, {}, *cdp);
+ }
+ case PfRule::NOT_EQUIV_ELIM1:
+ {
+ return addAletheStepFromOr(
+ AletheRule::NOT_EQUIV1, res, children, {}, *cdp);
+ }
+ case PfRule::NOT_EQUIV_ELIM2:
+ {
+ return addAletheStepFromOr(
+ AletheRule::NOT_EQUIV2, res, children, {}, *cdp);
+ }
+ case PfRule::XOR_ELIM1:
+ {
+ return addAletheStepFromOr(AletheRule::XOR1, res, children, {}, *cdp);
+ }
+ case PfRule::XOR_ELIM2:
+ {
+ return addAletheStepFromOr(AletheRule::XOR2, res, children, {}, *cdp);
+ }
+ case PfRule::NOT_XOR_ELIM1:
+ {
+ return addAletheStepFromOr(AletheRule::NOT_XOR1, res, children, {}, *cdp);
+ }
+ case PfRule::NOT_XOR_ELIM2:
+ {
+ return addAletheStepFromOr(AletheRule::NOT_XOR2, res, children, {}, *cdp);
+ }
+ case PfRule::ITE_ELIM1:
+ {
+ return addAletheStepFromOr(AletheRule::ITE2, res, children, {}, *cdp);
+ }
+ case PfRule::ITE_ELIM2:
+ {
+ return addAletheStepFromOr(AletheRule::ITE1, res, children, {}, *cdp);
+ }
+ case PfRule::NOT_ITE_ELIM1:
+ {
+ return addAletheStepFromOr(AletheRule::NOT_ITE2, res, children, {}, *cdp);
+ }
+ case PfRule::NOT_ITE_ELIM2:
+ {
+ return addAletheStepFromOr(AletheRule::NOT_ITE1, res, children, {}, *cdp);
+ }
+ //================================================= De Morgan rules
+ // ======== Not And
+ // This rule is translated according to the clause pattern.
+ case PfRule::NOT_AND:
+ {
+ return addAletheStepFromOr(AletheRule::NOT_AND, res, children, {}, *cdp);
+ }
+
+ //================================================= CNF rules
+ // The following rules are all translated according to the clause pattern.
+ case PfRule::CNF_AND_POS:
+ {
+ return addAletheStepFromOr(AletheRule::AND_POS, res, children, {}, *cdp);
+ }
+ case PfRule::CNF_AND_NEG:
+ {
+ return addAletheStepFromOr(AletheRule::AND_NEG, res, children, {}, *cdp);
+ }
+ case PfRule::CNF_OR_POS:
+ {
+ return addAletheStepFromOr(AletheRule::OR_POS, res, children, {}, *cdp);
+ }
+ case PfRule::CNF_OR_NEG:
+ {
+ return addAletheStepFromOr(AletheRule::OR_NEG, res, children, {}, *cdp);
+ }
+ case PfRule::CNF_IMPLIES_POS:
+ {
+ return addAletheStepFromOr(
+ AletheRule::IMPLIES_POS, res, children, {}, *cdp);
+ }
+ case PfRule::CNF_IMPLIES_NEG1:
+ {
+ return addAletheStepFromOr(
+ AletheRule::IMPLIES_NEG1, res, children, {}, *cdp);
+ }
+ case PfRule::CNF_IMPLIES_NEG2:
+ {
+ return addAletheStepFromOr(
+ AletheRule::IMPLIES_NEG2, res, children, {}, *cdp);
+ }
+ case PfRule::CNF_EQUIV_POS1:
+ {
+ return addAletheStepFromOr(
+ AletheRule::EQUIV_POS2, res, children, {}, *cdp);
+ }
+ case PfRule::CNF_EQUIV_POS2:
+ {
+ return addAletheStepFromOr(
+ AletheRule::EQUIV_POS1, res, children, {}, *cdp);
+ }
+ case PfRule::CNF_EQUIV_NEG1:
+ {
+ return addAletheStepFromOr(
+ AletheRule::EQUIV_NEG2, res, children, {}, *cdp);
+ }
+ case PfRule::CNF_EQUIV_NEG2:
+ {
+ return addAletheStepFromOr(
+ AletheRule::EQUIV_NEG1, res, children, {}, *cdp);
+ }
+ case PfRule::CNF_XOR_POS1:
+ {
+ return addAletheStepFromOr(AletheRule::XOR_POS1, res, children, {}, *cdp);
+ }
+ case PfRule::CNF_XOR_POS2:
+ {
+ return addAletheStepFromOr(AletheRule::XOR_POS2, res, children, {}, *cdp);
+ }
+ case PfRule::CNF_XOR_NEG1:
+ {
+ return addAletheStepFromOr(AletheRule::XOR_NEG2, res, children, {}, *cdp);
+ }
+ case PfRule::CNF_XOR_NEG2:
+ {
+ return addAletheStepFromOr(AletheRule::XOR_NEG1, res, children, {}, *cdp);
+ }
+ case PfRule::CNF_ITE_POS1:
+ {
+ return addAletheStepFromOr(AletheRule::ITE_POS2, res, children, {}, *cdp);
+ }
+ case PfRule::CNF_ITE_POS2:
+ {
+ return addAletheStepFromOr(AletheRule::ITE_POS1, res, children, {}, *cdp);
+ }
default:
{
return addAletheStep(AletheRule::UNDEFINED,
diff --git a/src/proof/lazy_proof.cpp b/src/proof/lazy_proof.cpp
index eab452d49..f6c9d8eb2 100644
--- a/src/proof/lazy_proof.cpp
+++ b/src/proof/lazy_proof.cpp
@@ -26,8 +26,11 @@ namespace cvc5 {
LazyCDProof::LazyCDProof(ProofNodeManager* pnm,
ProofGenerator* dpg,
context::Context* c,
- const std::string& name)
- : CDProof(pnm, c, name), d_gens(c ? c : &d_context), d_defaultGen(dpg)
+ const std::string& name,
+ bool autoSym)
+ : CDProof(pnm, c, name, autoSym),
+ d_gens(c ? c : &d_context),
+ d_defaultGen(dpg)
{
}
diff --git a/src/proof/lazy_proof.h b/src/proof/lazy_proof.h
index bf5bc229c..e14dc8a1c 100644
--- a/src/proof/lazy_proof.h
+++ b/src/proof/lazy_proof.h
@@ -41,11 +41,15 @@ class LazyCDProof : public CDProof
* for facts that have no explicitly provided generator.
* @param c The context that this class depends on. If none is provided,
* this class is context-independent.
+ * @param name The name of this proof generator (for debugging)
+ * @param autoSym Whether symmetry steps are automatically added when adding
+ * steps to this proof
*/
LazyCDProof(ProofNodeManager* pnm,
ProofGenerator* dpg = nullptr,
context::Context* c = nullptr,
- const std::string& name = "LazyCDProof");
+ const std::string& name = "LazyCDProof",
+ bool autoSym = true);
~LazyCDProof();
/**
* Get lazy proof for fact, or nullptr if it does not exist. This may
diff --git a/src/proof/proof_node_algorithm.cpp b/src/proof/proof_node_algorithm.cpp
index 5f56d785e..ce8ca55c3 100644
--- a/src/proof/proof_node_algorithm.cpp
+++ b/src/proof/proof_node_algorithm.cpp
@@ -237,5 +237,93 @@ bool containsSubproof(ProofNode* pn,
return false;
}
+bool isSingletonClause(TNode res,
+ const std::vector<Node>& children,
+ const std::vector<Node>& args)
+{
+ if (res.getKind() != kind::OR)
+ {
+ return true;
+ }
+ size_t i;
+ Node trueNode = NodeManager::currentNM()->mkConst(true);
+ // Find out the last child to introduced res, if any. We only need to
+ // look at the last one because any previous introduction would have
+ // been eliminated.
+ //
+ // After the loop finishes i is the index of the child C_i that
+ // introduced res. If i=0 none of the children introduced res as a
+ // subterm and therefore it cannot be a singleton clause.
+ for (i = children.size(); i > 0; --i)
+ {
+ // only non-singleton clauses may be introducing
+ // res, so we only care about non-singleton or nodes. We check then
+ // against the kind and whether the whole or node occurs as a pivot of
+ // the respective resolution
+ if (children[i - 1].getKind() != kind::OR)
+ {
+ continue;
+ }
+ size_t pivotIndex = (i != 1) ? 2 * (i - 1) - 1 : 1;
+ if (args[pivotIndex] == children[i - 1]
+ || args[pivotIndex].notNode() == children[i - 1])
+ {
+ continue;
+ }
+ // if res occurs as a subterm of a non-singleton premise
+ if (std::find(children[i - 1].begin(), children[i - 1].end(), res)
+ != children[i - 1].end())
+ {
+ break;
+ }
+ }
+
+ // If res is a subterm of one of the children we still need to check if
+ // that subterm is eliminated
+ if (i > 0)
+ {
+ bool posFirst = (i == 1) ? (args[0] == trueNode)
+ : (args[(2 * (i - 1)) - 2] == trueNode);
+ Node pivot = (i == 1) ? args[1] : args[(2 * (i - 1)) - 1];
+
+ // Check if it is eliminated by the previous resolution step
+ if ((res == pivot && !posFirst) || (res.notNode() == pivot && posFirst)
+ || (pivot.notNode() == res && posFirst))
+ {
+ // We decrease i by one, since it could have been the case that i
+ // was equal to children.size(), so that we return false in the end
+ --i;
+ }
+ else
+ {
+ // Otherwise check if any subsequent premise eliminates it
+ for (; i < children.size(); ++i)
+ {
+ posFirst = args[(2 * i) - 2] == trueNode;
+ pivot = args[(2 * i) - 1];
+ // To eliminate res, the clause must contain it with opposite
+ // polarity. There are three successful cases, according to the
+ // pivot and its sign
+ //
+ // - res is the same as the pivot and posFirst is true, which
+ // means that the clause contains its negation and eliminates it
+ //
+ // - res is the negation of the pivot and posFirst is false, so
+ // the clause contains the node whose negation is res. Note that
+ // this case may either be res.notNode() == pivot or res ==
+ // pivot.notNode().
+ if ((res == pivot && posFirst) || (res.notNode() == pivot && !posFirst)
+ || (pivot.notNode() == res && !posFirst))
+ {
+ break;
+ }
+ }
+ }
+ }
+ // if not eliminated (loop went to the end), then it's a singleton
+ // clause
+ return i == children.size();
+}
+
} // namespace expr
} // namespace cvc5
diff --git a/src/proof/proof_node_algorithm.h b/src/proof/proof_node_algorithm.h
index f35a84c87..b2e20c478 100644
--- a/src/proof/proof_node_algorithm.h
+++ b/src/proof/proof_node_algorithm.h
@@ -91,6 +91,34 @@ bool containsSubproof(ProofNode* pn,
ProofNode* pnc,
std::unordered_set<const ProofNode*>& visited);
+/** Whether the result of a resolution corresponds to a singleton clause
+ *
+ * Viewing a node as a clause (i.e., as a list of literals), whether a node of
+ * the form (or t1 ... tn) corresponds to the clause [t1, ..., tn]) or to the
+ * clause [(or t1 ... tn)] can be ambiguous in different settings.
+ *
+ * This method determines whether a node `res`, corresponding to the result of a
+ * resolution inference with premises `children` and arguments `args` (see
+ * proof_rule.h for more details on the inference), is a singleton clause (i.e.,
+ * a clause with a single literal).
+ *
+ * It does so relying on the fact that `res` is only a singleton if it occurs as
+ * a child in one of the premises and is not eliminated afterwards. So we search
+ * for `res` as a subterm of some child, which would mark its last insertion
+ * into the resolution result. If `res` does not occur as the pivot to be
+ * eliminated in a subsequent premise, then, and only then, it is a singleton
+ * clause.
+ *
+ * @param res the result of a resolution inference
+ * @param children the premises for the resolution inference
+ * @param args the arguments, i.e., the pivots and their polarities, for the
+ * resolution inference
+ * @return whether `res` is a singleton clause
+ */
+bool isSingletonClause(TNode res,
+ const std::vector<Node>& children,
+ const std::vector<Node>& args);
+
} // namespace expr
} // namespace cvc5
diff --git a/src/prop/sat_proof_manager.cpp b/src/prop/sat_proof_manager.cpp
index e650473b3..74617a521 100644
--- a/src/prop/sat_proof_manager.cpp
+++ b/src/prop/sat_proof_manager.cpp
@@ -722,6 +722,8 @@ void SatProofManager::finalizeProof()
<< "SatProofManager::finalizeProof: conflicting (lazy) satLit: "
<< d_conflictLit << "\n";
finalizeProof(getClauseNode(d_conflictLit), {d_conflictLit});
+ // reset since if in incremental mode this may be used again
+ d_conflictLit = undefSatVariable;
}
void SatProofManager::finalizeProof(Minisat::Lit inConflict, bool adding)
diff --git a/src/smt/assertions.cpp b/src/smt/assertions.cpp
index 77646c002..9c24dd690 100644
--- a/src/smt/assertions.cpp
+++ b/src/smt/assertions.cpp
@@ -40,6 +40,7 @@ Assertions::Assertions(Env& env, AbstractValues& absv)
d_produceAssertions(false),
d_assertionList(userContext()),
d_assertionListDefs(userContext()),
+ d_globalDefineFunLemmasIndex(userContext(), 0),
d_globalNegation(false),
d_assertions()
{
@@ -49,6 +50,22 @@ Assertions::~Assertions()
{
}
+void Assertions::refresh()
+{
+ if (d_globalDefineFunLemmas != nullptr)
+ {
+ // Global definitions are asserted now to ensure they always exist. This is
+ // done at the beginning of preprocessing, to ensure that definitions take
+ // priority over, e.g. solving during preprocessing. See issue #7479.
+ size_t numGlobalDefs = d_globalDefineFunLemmas->size();
+ for (size_t i = d_globalDefineFunLemmasIndex.get(); i < numGlobalDefs; i++)
+ {
+ addFormula((*d_globalDefineFunLemmas)[i], false, true, false);
+ }
+ d_globalDefineFunLemmasIndex = numGlobalDefs;
+ }
+}
+
void Assertions::finishInit()
{
// [MGD 10/20/2011] keep around in incremental mode, due to a
@@ -107,16 +124,6 @@ void Assertions::initializeCheckSat(const std::vector<Node>& assumptions,
ensureBoolean(n);
addFormula(n, true, false, false);
}
- if (d_globalDefineFunLemmas != nullptr)
- {
- // Global definitions are asserted at check-sat-time because we have to
- // make sure that they are always present (they are essentially level
- // zero assertions)
- for (const Node& lemma : *d_globalDefineFunLemmas)
- {
- addFormula(lemma, false, true, false);
- }
- }
}
void Assertions::assertFormula(const Node& n)
diff --git a/src/smt/assertions.h b/src/smt/assertions.h
index 15131be60..5cda366e6 100644
--- a/src/smt/assertions.h
+++ b/src/smt/assertions.h
@@ -21,6 +21,7 @@
#include <vector>
#include "context/cdlist.h"
+#include "context/cdo.h"
#include "expr/node.h"
#include "preprocessing/assertion_pipeline.h"
#include "smt/env_obj.h"
@@ -56,6 +57,16 @@ class Assertions : protected EnvObj
* without a check-sat.
*/
void clearCurrent();
+ /** refresh
+ *
+ * Ensures that all global declarations have been processed in the current
+ * context. This may trigger substitutions to be added to the top-level
+ * substitution and/or formulas added to the current set of assertions.
+ *
+ * If global declarations are true, this method must be called before
+ * processing assertions.
+ */
+ void refresh();
/*
* Initialize a call to check satisfiability. This adds assumptions to
* the list of assumptions maintained by this class, and finalizes the
@@ -163,6 +174,8 @@ class Assertions : protected EnvObj
* assert this list of definitions in each check-sat call.
*/
std::unique_ptr<std::vector<Node>> d_globalDefineFunLemmas;
+ /** The index of the above list that we have processed */
+ context::CDO<size_t> d_globalDefineFunLemmasIndex;
/**
* The list of assumptions from the previous call to checkSatisfiability.
* Note that if the last call to checkSatisfiability was an entailment check,
diff --git a/src/smt/process_assertions.cpp b/src/smt/process_assertions.cpp
index 67e83bcd1..473b53015 100644
--- a/src/smt/process_assertions.cpp
+++ b/src/smt/process_assertions.cpp
@@ -93,6 +93,8 @@ void ProcessAssertions::spendResource(Resource r)
bool ProcessAssertions::apply(Assertions& as)
{
+ // must first refresh the assertions, in the case global declarations is true
+ as.refresh();
AssertionPipeline& assertions = as.getAssertionPipeline();
Assert(d_preprocessingPassContext != nullptr);
// Dump the assertions
diff --git a/src/smt/proof_post_processor.cpp b/src/smt/proof_post_processor.cpp
index a519ca6b5..17ea2c09c 100644
--- a/src/smt/proof_post_processor.cpp
+++ b/src/smt/proof_post_processor.cpp
@@ -18,6 +18,7 @@
#include "expr/skolem_manager.h"
#include "options/proof_options.h"
#include "preprocessing/assertion_pipeline.h"
+#include "proof/proof_node_algorithm.h"
#include "proof/proof_node_manager.h"
#include "smt/solver_engine.h"
#include "theory/arith/arith_utilities.h"
@@ -164,6 +165,8 @@ Node ProofPostprocessCallback::eliminateCrowdingLits(
CDProof* cdp)
{
Trace("smt-proof-pp-debug2") << push;
+ Trace("smt-proof-pp-debug2") << "Clause lits: " << clauseLits << "\n";
+ Trace("smt-proof-pp-debug2") << "Target lits: " << targetClauseLits << "\n\n";
NodeManager* nm = NodeManager::currentNM();
Node trueNode = nm->mkConst(true);
// get crowding lits and the position of the last clause that includes
@@ -683,6 +686,7 @@ Node ProofPostprocessCallback::expandMacros(PfRule id,
// literals, which could themselves add crowding literals.
if (chainConclusion == args[0])
{
+ Trace("smt-proof-pp-debug") << "..same conclusion, DONE.\n";
cdp->addStep(
chainConclusion, PfRule::CHAIN_RESOLUTION, children, chainResArgs);
return chainConclusion;
@@ -695,11 +699,29 @@ Node ProofPostprocessCallback::expandMacros(PfRule id,
chainConclusion.end()};
std::set<Node> chainConclusionLitsSet{chainConclusion.begin(),
chainConclusion.end()};
- // is args[0] a singleton clause? If it's not an OR node, then yes.
- // Otherwise, it's only a singleton if it occurs in chainConclusionLitsSet
+ Trace("smt-proof-pp-debug2")
+ << "..chainConclusionLits: " << chainConclusionLits << "\n";
+ Trace("smt-proof-pp-debug2")
+ << "..chainConclusionLitsSet: " << chainConclusionLitsSet << "\n";
std::vector<Node> conclusionLits;
- // whether conclusion is singleton
- if (chainConclusionLitsSet.count(args[0]))
+ // is args[0] a singleton clause? Yes if it's not an OR node. One might also
+ // think that it is a singleton if args[0] occurs in chainConclusionLitsSet.
+ // However it's not possible to know this only looking at the sets. For
+ // example with
+ //
+ // args[0] : (or b c)
+ // chairConclusionLitsSet : {b, c, (or b c)}
+ //
+ // we have that if args[0] occurs in the set but as a crowding literal, then
+ // args[0] is *not* a singleton clause. But if b and c were crowding
+ // literals, then args[0] would be a singleton clause. Since our intention
+ // is to determine who are the crowding literals exactly based on whether
+ // args[0] is a singleton or not, we must determine in another way whether
+ // args[0] is a singleton.
+ //
+ // Thus we rely on the standard utility to determine if args[0] is singleton
+ // based on the premises and arguments of the resolution
+ if (expr::isSingletonClause(args[0], children, chainResArgs))
{
conclusionLits.push_back(args[0]);
}
@@ -716,6 +738,7 @@ Node ProofPostprocessCallback::expandMacros(PfRule id,
// chain.
if (chainConclusionLitsSet != conclusionLitsSet)
{
+ Trace("smt-proof-pp-debug") << "..need to eliminate crowding lits.\n";
chainConclusion = eliminateCrowdingLits(
chainConclusionLits, conclusionLits, children, args, cdp);
// update vector of lits. Note that the set is no longer used, so we don't
@@ -739,6 +762,7 @@ Node ProofPostprocessCallback::expandMacros(PfRule id,
}
else
{
+ Trace("smt-proof-pp-debug") << "..add chainRes step directly.\n";
cdp->addStep(
chainConclusion, PfRule::CHAIN_RESOLUTION, children, chainResArgs);
}
@@ -751,6 +775,7 @@ Node ProofPostprocessCallback::expandMacros(PfRule id,
// factoring
if (chainConclusionLits.size() != conclusionLits.size())
{
+ Trace("smt-proof-pp-debug") << "..add factoring step.\n";
// We build it rather than taking conclusionLits because the order may be
// different
std::vector<Node> factoredLits;
@@ -777,6 +802,7 @@ Node ProofPostprocessCallback::expandMacros(PfRule id,
// reordering
if (n != args[0])
{
+ Trace("smt-proof-pp-debug") << "..add reordering step.\n";
cdp->addStep(args[0], PfRule::REORDERING, {n}, {args[0]});
}
return args[0];
diff --git a/src/smt/set_defaults.cpp b/src/smt/set_defaults.cpp
index 7c813cee0..19eab3617 100644
--- a/src/smt/set_defaults.cpp
+++ b/src/smt/set_defaults.cpp
@@ -197,7 +197,8 @@ void SetDefaults::finalizeLogic(LogicInfo& logic, Options& opts) const
}
else if (!isSygus(opts) && logic.isQuantified()
&& (logic.isPure(THEORY_FP)
- || (logic.isPure(THEORY_ARITH) && !logic.isLinear())))
+ || (logic.isPure(THEORY_ARITH) && !logic.isLinear()))
+ && !opts.base.incrementalSolving)
{
opts.quantifiers.sygusInst = true;
}
@@ -301,7 +302,11 @@ void SetDefaults::finalizeLogic(LogicInfo& logic, Options& opts) const
// formulas at preprocess time.
//
// We don't want to set this option when we are in logics that contain ALL.
- if (!logic.hasEverything() && logic.isTheoryEnabled(THEORY_STRINGS))
+ //
+ // We also must enable stringExp if reElimAgg is true, since this introduces
+ // bounded quantifiers during preprocessing.
+ if ((!logic.hasEverything() && logic.isTheoryEnabled(THEORY_STRINGS))
+ || opts.strings.regExpElimAgg)
{
// If the user explicitly set a logic that includes strings, but is not
// the generic "ALL" logic, then enable stringsExp.
@@ -989,6 +994,18 @@ bool SetDefaults::incompatibleWithIncremental(const LogicInfo& logic,
<< std::endl;
opts.quantifiers.sygusInference = false;
}
+ if (opts.quantifiers.sygusInst)
+ {
+ if (opts.quantifiers.sygusInstWasSetByUser)
+ {
+ reason << "sygus inst";
+ return true;
+ }
+ Notice() << "SolverEngine: turning off sygus inst to support "
+ "incremental solving"
+ << std::endl;
+ opts.quantifiers.sygusInst = false;
+ }
if (opts.smt.solveIntAsBV > 0)
{
reason << "solveIntAsBV";
@@ -1340,20 +1357,6 @@ void SetDefaults::setDefaultsQuantifiers(const LogicInfo& logic,
{
opts.quantifiers.macrosQuant = false;
}
- // HOL is incompatible with fmfBound
- if (opts.quantifiers.fmfBound)
- {
- if (opts.quantifiers.fmfBoundWasSetByUser
- || opts.quantifiers.fmfBoundLazyWasSetByUser
- || opts.quantifiers.fmfBoundIntWasSetByUser)
- {
- Notice() << "Disabling bound finite-model finding since it is "
- "incompatible with HOL.\n";
- }
-
- opts.quantifiers.fmfBound = false;
- Trace("smt") << "turning off fmf-bound, since HOL\n";
- }
}
if (opts.quantifiers.fmfFunWellDefinedRelevant)
{
@@ -1628,7 +1631,7 @@ void SetDefaults::setDefaultsSygus(Options& opts) const
reqBasicSygus = true;
}
if (opts.quantifiers.sygusRewSynth || opts.quantifiers.sygusRewVerify
- || opts.quantifiers.sygusQueryGen)
+ || opts.quantifiers.sygusQueryGen != options::SygusQueryGenMode::NONE)
{
// rewrite rule synthesis implies that sygus stream must be true
opts.quantifiers.sygusStream = true;
diff --git a/src/theory/bags/bag_solver.cpp b/src/theory/bags/bag_solver.cpp
index 203903d88..341798eb2 100644
--- a/src/theory/bags/bag_solver.cpp
+++ b/src/theory/bags/bag_solver.cpp
@@ -31,8 +31,16 @@ namespace cvc5 {
namespace theory {
namespace bags {
-BagSolver::BagSolver(SolverState& s, InferenceManager& im, TermRegistry& tr)
- : d_state(s), d_ig(&s, &im), d_im(im), d_termReg(tr)
+BagSolver::BagSolver(Env& env,
+ SolverState& s,
+ InferenceManager& im,
+ TermRegistry& tr)
+ : EnvObj(env),
+ d_state(s),
+ d_ig(&s, &im),
+ d_im(im),
+ d_termReg(tr),
+ d_mapCache(userContext())
{
d_zero = NodeManager::currentNM()->mkConst(Rational(0));
d_one = NodeManager::currentNM()->mkConst(Rational(1));
@@ -69,6 +77,7 @@ void BagSolver::postCheck()
case kind::DIFFERENCE_SUBTRACT: checkDifferenceSubtract(n); break;
case kind::DIFFERENCE_REMOVE: checkDifferenceRemove(n); break;
case kind::DUPLICATE_REMOVAL: checkDuplicateRemoval(n); break;
+ case kind::BAG_MAP: checkMap(n); break;
default: break;
}
it++;
@@ -210,6 +219,34 @@ void BagSolver::checkDisequalBagTerms()
}
}
+void BagSolver::checkMap(Node n)
+{
+ Assert(n.getKind() == BAG_MAP);
+ const set<Node>& downwards = d_state.getElements(n);
+ const set<Node>& upwards = d_state.getElements(n[1]);
+ for (const Node& y : downwards)
+ {
+ if (d_mapCache.count(n) && d_mapCache[n].get()->contains(y))
+ {
+ continue;
+ }
+ auto [downInference, uf, preImageSize] = d_ig.mapDownwards(n, y);
+ d_im.lemmaTheoryInference(&downInference);
+ for (const Node& x : upwards)
+ {
+ InferInfo upInference = d_ig.mapUpwards(n, uf, preImageSize, y, x);
+ d_im.lemmaTheoryInference(&upInference);
+ }
+ if (!d_mapCache.count(n))
+ {
+ std::shared_ptr<context::CDHashSet<Node> > set =
+ std::make_shared<context::CDHashSet<Node> >(userContext());
+ d_mapCache.insert(n, set);
+ }
+ d_mapCache[n].get()->insert(y);
+ }
+}
+
} // namespace bags
} // namespace theory
} // namespace cvc5
diff --git a/src/theory/bags/bag_solver.h b/src/theory/bags/bag_solver.h
index 5fb49fab7..9155c93d0 100644
--- a/src/theory/bags/bag_solver.h
+++ b/src/theory/bags/bag_solver.h
@@ -13,7 +13,10 @@
* Solver for the theory of bags.
*/
+#include "context/cdhashmap.h"
+#include "context/cdhashset.h"
#include "cvc5_private.h"
+#include "smt/env_obj.h"
#ifndef CVC5__THEORY__BAG__SOLVER_H
#define CVC5__THEORY__BAG__SOLVER_H
@@ -31,10 +34,10 @@ class TermRegistry;
/** The solver for the theory of bags
*
*/
-class BagSolver
+class BagSolver : protected EnvObj
{
public:
- BagSolver(SolverState& s, InferenceManager& im, TermRegistry& tr);
+ BagSolver(Env& env, SolverState& s, InferenceManager& im, TermRegistry& tr);
~BagSolver();
void postCheck();
@@ -73,6 +76,8 @@ class BagSolver
void checkNonNegativeCountTerms(const Node& bag, const Node& element);
/** apply inference rules for disequal bag terms */
void checkDisequalBagTerms();
+ /** apply inference rules for map operator */
+ void checkMap(Node n);
/** The solver state object */
SolverState& d_state;
@@ -82,6 +87,14 @@ class BagSolver
InferenceManager& d_im;
/** Reference to the term registry of theory of bags */
TermRegistry& d_termReg;
+
+ /** a cache that stores bags of kind BAG_MAP and those element representatives
+ * which we generated their inferences.
+ */
+ using BagElementsMap =
+ context::CDHashMap<Node, std::shared_ptr<context::CDHashSet<Node> > >;
+ BagElementsMap d_mapCache;
+
/** Commonly used constants */
Node d_true;
Node d_false;
diff --git a/src/theory/bags/bags_rewriter.cpp b/src/theory/bags/bags_rewriter.cpp
index 646bb28cf..0be83fb13 100644
--- a/src/theory/bags/bags_rewriter.cpp
+++ b/src/theory/bags/bags_rewriter.cpp
@@ -532,7 +532,8 @@ BagsRewriteResponse BagsRewriter::postRewriteMap(const TNode& n) const
case MK_BAG:
{
Node mappedElement = d_nm->mkNode(APPLY_UF, n[0], n[1][0]);
- Node ret = d_nm->mkNode(MK_BAG, mappedElement, n[1][0]);
+ Node ret =
+ d_nm->mkBag(n[0].getType().getRangeType(), mappedElement, n[1][1]);
return BagsRewriteResponse(ret, Rewrite::MAP_MK_BAG);
}
diff --git a/src/theory/bags/inference_generator.cpp b/src/theory/bags/inference_generator.cpp
index 556dc76d6..734572f7c 100644
--- a/src/theory/bags/inference_generator.cpp
+++ b/src/theory/bags/inference_generator.cpp
@@ -20,6 +20,7 @@
#include "expr/skolem_manager.h"
#include "theory/bags/inference_manager.h"
#include "theory/bags/solver_state.h"
+#include "theory/quantifiers/fmf/bounded_integers.h"
#include "theory/uf/equality_engine.h"
#include "util/rational.h"
@@ -42,7 +43,7 @@ InferInfo InferenceGenerator::nonNegativeCount(Node n, Node e)
Assert(n.getType().isBag());
Assert(e.getType() == n.getType().getBagElementType());
- InferInfo inferInfo(d_im, InferenceId::BAG_NON_NEGATIVE_COUNT);
+ InferInfo inferInfo(d_im, InferenceId::BAGS_NON_NEGATIVE_COUNT);
Node count = d_nm->mkNode(kind::BAG_COUNT, e, n);
Node gte = d_nm->mkNode(kind::GEQ, count, d_zero);
@@ -59,7 +60,7 @@ InferInfo InferenceGenerator::mkBag(Node n, Node e)
{
// TODO issue #78: refactor this with BagRewriter
// (=> true (= (bag.count e (bag e c)) c))
- InferInfo inferInfo(d_im, InferenceId::BAG_MK_BAG_SAME_ELEMENT);
+ InferInfo inferInfo(d_im, InferenceId::BAGS_MK_BAG_SAME_ELEMENT);
Node skolem = getSkolem(n, inferInfo);
Node count = getMultiplicityTerm(e, skolem);
inferInfo.d_conclusion = count.eqNode(n[1]);
@@ -70,7 +71,7 @@ InferInfo InferenceGenerator::mkBag(Node n, Node e)
// (=>
// true
// (= (bag.count e (bag x c)) (ite (= e x) c 0)))
- InferInfo inferInfo(d_im, InferenceId::BAG_MK_BAG);
+ InferInfo inferInfo(d_im, InferenceId::BAGS_MK_BAG);
Node skolem = getSkolem(n, inferInfo);
Node count = getMultiplicityTerm(e, skolem);
Node same = d_nm->mkNode(kind::EQUAL, n[0], e);
@@ -81,6 +82,27 @@ InferInfo InferenceGenerator::mkBag(Node n, Node e)
}
}
+/**
+ * A bound variable corresponding to the universally quantified integer
+ * variable used to range over the distinct elements in a bag, used
+ * for axiomatizing the behavior of some term.
+ */
+struct FirstIndexVarAttributeId
+{
+};
+typedef expr::Attribute<FirstIndexVarAttributeId, Node> FirstIndexVarAttribute;
+
+/**
+ * A bound variable corresponding to the universally quantified integer
+ * variable used to range over the distinct elements in a bag, used
+ * for axiomatizing the behavior of some term.
+ */
+struct SecondIndexVarAttributeId
+{
+};
+typedef expr::Attribute<SecondIndexVarAttributeId, Node>
+ SecondIndexVarAttribute;
+
struct BagsDeqAttributeId
{
};
@@ -93,7 +115,7 @@ InferInfo InferenceGenerator::bagDisequality(Node n)
Node A = n[0];
Node B = n[1];
- InferInfo inferInfo(d_im, InferenceId::BAG_DISEQUALITY);
+ InferInfo inferInfo(d_im, InferenceId::BAGS_DISEQUALITY);
TypeNode elementType = A.getType().getBagElementType();
BoundVarManager* bvm = d_nm->getBoundVarManager();
@@ -126,7 +148,7 @@ InferInfo InferenceGenerator::empty(Node n, Node e)
Assert(n.getKind() == kind::EMPTYBAG);
Assert(e.getType() == n.getType().getBagElementType());
- InferInfo inferInfo(d_im, InferenceId::BAG_EMPTY);
+ InferInfo inferInfo(d_im, InferenceId::BAGS_EMPTY);
Node skolem = getSkolem(n, inferInfo);
Node count = getMultiplicityTerm(e, skolem);
@@ -142,7 +164,7 @@ InferInfo InferenceGenerator::unionDisjoint(Node n, Node e)
Node A = n[0];
Node B = n[1];
- InferInfo inferInfo(d_im, InferenceId::BAG_UNION_DISJOINT);
+ InferInfo inferInfo(d_im, InferenceId::BAGS_UNION_DISJOINT);
Node countA = getMultiplicityTerm(e, A);
Node countB = getMultiplicityTerm(e, B);
@@ -164,7 +186,7 @@ InferInfo InferenceGenerator::unionMax(Node n, Node e)
Node A = n[0];
Node B = n[1];
- InferInfo inferInfo(d_im, InferenceId::BAG_UNION_MAX);
+ InferInfo inferInfo(d_im, InferenceId::BAGS_UNION_MAX);
Node countA = getMultiplicityTerm(e, A);
Node countB = getMultiplicityTerm(e, B);
@@ -187,7 +209,7 @@ InferInfo InferenceGenerator::intersection(Node n, Node e)
Node A = n[0];
Node B = n[1];
- InferInfo inferInfo(d_im, InferenceId::BAG_INTERSECTION_MIN);
+ InferInfo inferInfo(d_im, InferenceId::BAGS_INTERSECTION_MIN);
Node countA = getMultiplicityTerm(e, A);
Node countB = getMultiplicityTerm(e, B);
@@ -208,7 +230,7 @@ InferInfo InferenceGenerator::differenceSubtract(Node n, Node e)
Node A = n[0];
Node B = n[1];
- InferInfo inferInfo(d_im, InferenceId::BAG_DIFFERENCE_SUBTRACT);
+ InferInfo inferInfo(d_im, InferenceId::BAGS_DIFFERENCE_SUBTRACT);
Node countA = getMultiplicityTerm(e, A);
Node countB = getMultiplicityTerm(e, B);
@@ -230,7 +252,7 @@ InferInfo InferenceGenerator::differenceRemove(Node n, Node e)
Node A = n[0];
Node B = n[1];
- InferInfo inferInfo(d_im, InferenceId::BAG_DIFFERENCE_REMOVE);
+ InferInfo inferInfo(d_im, InferenceId::BAGS_DIFFERENCE_REMOVE);
Node countA = getMultiplicityTerm(e, A);
Node countB = getMultiplicityTerm(e, B);
@@ -251,7 +273,7 @@ InferInfo InferenceGenerator::duplicateRemoval(Node n, Node e)
Assert(e.getType() == n[0].getType().getBagElementType());
Node A = n[0];
- InferInfo inferInfo(d_im, InferenceId::BAG_DUPLICATE_REMOVAL);
+ InferInfo inferInfo(d_im, InferenceId::BAGS_DUPLICATE_REMOVAL);
Node countA = getMultiplicityTerm(e, A);
Node skolem = getSkolem(n, inferInfo);
@@ -270,6 +292,137 @@ Node InferenceGenerator::getMultiplicityTerm(Node element, Node bag)
return count;
}
+std::tuple<InferInfo, Node, Node> InferenceGenerator::mapDownwards(Node n,
+ Node e)
+{
+ Assert(n.getKind() == kind::BAG_MAP && n[1].getType().isBag());
+ Assert(n[0].getType().isFunction()
+ && n[0].getType().getArgTypes().size() == 1);
+ Assert(e.getType() == n[0].getType().getRangeType());
+
+ InferInfo inferInfo(d_im, InferenceId::BAGS_MAP);
+
+ Node f = n[0];
+ Node A = n[1];
+ // declare an uninterpreted function uf: Int -> T
+ TypeNode domainType = f.getType().getArgTypes()[0];
+ TypeNode ufType = d_nm->mkFunctionType(d_nm->integerType(), domainType);
+ Node uf =
+ d_sm->mkSkolemFunction(SkolemFunId::BAGS_MAP_PREIMAGE, ufType, {n, e});
+
+ // declare uninterpreted function sum: Int -> Int
+ TypeNode sumType =
+ d_nm->mkFunctionType(d_nm->integerType(), d_nm->integerType());
+ Node sum = d_sm->mkSkolemFunction(SkolemFunId::BAGS_MAP_SUM, sumType, {n, e});
+
+ // (= (sum 0) 0)
+ Node sum_zero = d_nm->mkNode(kind::APPLY_UF, sum, d_zero);
+ Node baseCase = d_nm->mkNode(Kind::EQUAL, sum_zero, d_zero);
+
+ // guess the size of the preimage of e
+ Node preImageSize = d_sm->mkDummySkolem("preImageSize", d_nm->integerType());
+
+ // (= (sum preImageSize) (bag.count e skolem))
+ Node mapSkolem = getSkolem(n, inferInfo);
+ Node countE = getMultiplicityTerm(e, mapSkolem);
+ Node totalSum = d_nm->mkNode(kind::APPLY_UF, sum, preImageSize);
+ Node totalSumEqualCountE = d_nm->mkNode(kind::EQUAL, totalSum, countE);
+
+ // (forall ((i Int))
+ // (let ((uf_i (uf i)))
+ // (let ((count_uf_i (bag.count uf_i A)))
+ // (=>
+ // (and (>= i 1) (<= i preImageSize))
+ // (and
+ // (= (f uf_i) e)
+ // (>= count_uf_i 1)
+ // (= (sum i) (+ (sum (- i 1)) count_uf_i))
+ // (forall ((j Int))
+ // (=>
+ // (and (< i j) (<= j preImageSize))
+ // (not (= (uf i) (uf j))))))
+ // )))))
+
+ BoundVarManager* bvm = d_nm->getBoundVarManager();
+ Node i = bvm->mkBoundVar<FirstIndexVarAttribute>(n, "i", d_nm->integerType());
+ Node j =
+ bvm->mkBoundVar<SecondIndexVarAttribute>(n, "j", d_nm->integerType());
+ Node iList = d_nm->mkNode(kind::BOUND_VAR_LIST, i);
+ Node jList = d_nm->mkNode(kind::BOUND_VAR_LIST, j);
+ Node iPlusOne = d_nm->mkNode(kind::PLUS, i, d_one);
+ Node iMinusOne = d_nm->mkNode(kind::MINUS, i, d_one);
+ Node uf_i = d_nm->mkNode(kind::APPLY_UF, uf, i);
+ Node uf_j = d_nm->mkNode(kind::APPLY_UF, uf, j);
+ Node f_uf_i = d_nm->mkNode(kind::APPLY_UF, f, uf_i);
+ Node uf_iPlusOne = d_nm->mkNode(kind::APPLY_UF, uf, iPlusOne);
+ Node uf_iMinusOne = d_nm->mkNode(kind::APPLY_UF, uf, iMinusOne);
+ Node interval_i = d_nm->mkNode(kind::AND,
+ d_nm->mkNode(kind::GEQ, i, d_one),
+ d_nm->mkNode(kind::LEQ, i, preImageSize));
+ Node sum_i = d_nm->mkNode(kind::APPLY_UF, sum, i);
+ Node sum_iPlusOne = d_nm->mkNode(kind::APPLY_UF, sum, iPlusOne);
+ Node sum_iMinusOne = d_nm->mkNode(kind::APPLY_UF, sum, iMinusOne);
+ Node count_iMinusOne = d_nm->mkNode(kind::BAG_COUNT, uf_iMinusOne, A);
+ Node count_uf_i = d_nm->mkNode(kind::BAG_COUNT, uf_i, A);
+ Node inductiveCase = d_nm->mkNode(
+ Kind::EQUAL, sum_i, d_nm->mkNode(kind::PLUS, sum_iMinusOne, count_uf_i));
+ Node f_iEqualE = d_nm->mkNode(kind::EQUAL, f_uf_i, e);
+ Node geqOne = d_nm->mkNode(kind::GEQ, count_uf_i, d_one);
+
+ // i < j <= preImageSize
+ Node interval_j = d_nm->mkNode(kind::AND,
+ d_nm->mkNode(kind::LT, i, j),
+ d_nm->mkNode(kind::LEQ, j, preImageSize));
+ // uf(i) != uf(j)
+ Node uf_i_equals_uf_j = d_nm->mkNode(kind::EQUAL, uf_i, uf_j);
+ Node notEqual = d_nm->mkNode(kind::EQUAL, uf_i, uf_j).negate();
+ Node body_j = d_nm->mkNode(kind::OR, interval_j.negate(), notEqual);
+ Node forAll_j = quantifiers::BoundedIntegers::mkBoundedForall(jList, body_j);
+ Node andNode =
+ d_nm->mkNode(kind::AND, {f_iEqualE, geqOne, inductiveCase, forAll_j});
+ Node body_i = d_nm->mkNode(kind::OR, interval_i.negate(), andNode);
+ Node forAll_i = quantifiers::BoundedIntegers::mkBoundedForall(iList, body_i);
+ Node preImageGTE_zero = d_nm->mkNode(kind::GEQ, preImageSize, d_zero);
+ Node conclusion = d_nm->mkNode(
+ kind::AND, {baseCase, totalSumEqualCountE, forAll_i, preImageGTE_zero});
+ inferInfo.d_conclusion = conclusion;
+
+ std::map<Node, Node> m;
+ m[e] = conclusion;
+ return std::tuple(inferInfo, uf, preImageSize);
+}
+
+InferInfo InferenceGenerator::mapUpwards(
+ Node n, Node uf, Node preImageSize, Node y, Node x)
+{
+ Assert(n.getKind() == kind::BAG_MAP && n[1].getType().isBag());
+ Assert(n[0].getType().isFunction()
+ && n[0].getType().getArgTypes().size() == 1);
+
+ InferInfo inferInfo(d_im, InferenceId::BAGS_MAP);
+ Node f = n[0];
+ Node A = n[1];
+
+ Node countA = getMultiplicityTerm(x, A);
+ Node xInA = d_nm->mkNode(kind::GEQ, countA, d_one);
+ Node notEqual =
+ d_nm->mkNode(kind::EQUAL, d_nm->mkNode(kind::APPLY_UF, f, x), y).negate();
+
+ Node k = d_sm->mkDummySkolem("k", d_nm->integerType());
+ Node inRange = d_nm->mkNode(kind::AND,
+ d_nm->mkNode(kind::GEQ, k, d_one),
+ d_nm->mkNode(kind::LEQ, k, preImageSize));
+ Node equal =
+ d_nm->mkNode(kind::EQUAL, d_nm->mkNode(kind::APPLY_UF, uf, k), x);
+ Node andNode = d_nm->mkNode(kind::AND, inRange, equal);
+ Node orNode = d_nm->mkNode(kind::OR, notEqual, andNode);
+ Node implies = d_nm->mkNode(kind::IMPLIES, xInA, orNode);
+ inferInfo.d_conclusion = implies;
+ std::cout << "Upwards conclusion: " << inferInfo.d_conclusion << std::endl
+ << std::endl;
+ return inferInfo;
+}
+
} // namespace bags
} // namespace theory
} // namespace cvc5
diff --git a/src/theory/bags/inference_generator.h b/src/theory/bags/inference_generator.h
index 252b9641e..ab3a84b29 100644
--- a/src/theory/bags/inference_generator.h
+++ b/src/theory/bags/inference_generator.h
@@ -164,6 +164,59 @@ class InferenceGenerator
* where skolem is a fresh variable equals (duplicate_removal A)
*/
InferInfo duplicateRemoval(Node n, Node e);
+ /**
+ * @param n is (bag.map f A) where f is a function (-> E T), A a bag of type
+ * (Bag E)
+ * @param e is a node of Type E
+ * @return an inference that represents the following implication
+ * (and
+ * (= (sum 0) 0)
+ * (= (sum preImageSize) (bag.count e skolem))
+ * (>= preImageSize 0)
+ * (forall ((i Int))
+ * (let ((uf_i (uf i)))
+ * (let ((count_uf_i (bag.count uf_i A)))
+ * (=>
+ * (and (>= i 1) (<= i preImageSize))
+ * (and
+ * (= (f uf_i) e)
+ * (>= count_uf_i 1)
+ * (= (sum i) (+ (sum (- i 1)) count_uf_i))
+ * (forall ((j Int))
+ * (or
+ * (not (and (< i j) (<= j preImageSize)))
+ * (not (= (uf i) (uf j)))) )
+ * ))))))
+ * where uf: Int -> E is an uninterpreted function from integers to the
+ * type of the elements of A
+ * preImageSize is the cardinality of the distinct elements in A that are
+ * mapped to e by function f (i.e., preimage of {e})
+ * sum: Int -> Int is a function that aggregates the multiplicities of the
+ * preimage of e,
+ * and skolem is a fresh variable equals (bag.map f A))
+ */
+ std::tuple<InferInfo, Node, Node> mapDownwards(Node n, Node e);
+
+ /**
+ * @param n is (bag.map f A) where f is a function (-> E T), A a bag of type
+ * (Bag E)
+ * @param uf is an uninterpreted function Int -> E
+ * @param preImageSize is the cardinality of the distinct elements in A that
+ * are mapped to y by function f (i.e., preimage of {y})
+ * @param y is an element of type T
+ * @param e is an element of type E
+ * @return an inference that represents the following implication
+ * (=>
+ * (>= (bag.count x A) 1)
+ * (or
+ * (not (= (f x) y)
+ * (and
+ * (>= skolem 1)
+ * (<= skolem preImageSize)
+ * (= (uf skolem) x)))))
+ * where skolem is a fresh variable
+ */
+ InferInfo mapUpwards(Node n, Node uf, Node preImageSize, Node y, Node x);
/**
* @param element of type T
diff --git a/src/theory/bags/normal_form.cpp b/src/theory/bags/normal_form.cpp
index 58445de59..69401b3fa 100644
--- a/src/theory/bags/normal_form.cpp
+++ b/src/theory/bags/normal_form.cpp
@@ -159,7 +159,7 @@ Node NormalForm::evaluateBinaryOperation(const TNode& n,
// handle the remaining elements from A
remainderOfA(elements, elementsA, itA);
// handle the remaining elements from B
- remainderOfA(elements, elementsB, itB);
+ remainderOfB(elements, elementsB, itB);
Trace("bags-evaluate") << "elements: " << elements << std::endl;
Node bag = constructConstantBagFromElements(n.getType(), elements);
diff --git a/src/theory/bags/theory_bags.cpp b/src/theory/bags/theory_bags.cpp
index 4a6d345e9..f10144255 100644
--- a/src/theory/bags/theory_bags.cpp
+++ b/src/theory/bags/theory_bags.cpp
@@ -36,7 +36,7 @@ TheoryBags::TheoryBags(Env& env, OutputChannel& out, Valuation valuation)
d_statistics(),
d_rewriter(&d_statistics.d_rewrites),
d_termReg(env, d_state, d_im),
- d_solver(d_state, d_im, d_termReg)
+ d_solver(env, d_state, d_im, d_termReg)
{
// use the official theory state and inference manager objects
d_theoryState = &d_state;
diff --git a/src/theory/booleans/proof_checker.cpp b/src/theory/booleans/proof_checker.cpp
index eea179ad3..a7fefd47a 100644
--- a/src/theory/booleans/proof_checker.cpp
+++ b/src/theory/booleans/proof_checker.cpp
@@ -196,79 +196,89 @@ Node BoolProofRuleChecker::checkInternal(PfRule id,
NodeManager* nm = NodeManager::currentNM();
Node trueNode = nm->mkConst(true);
Node falseNode = nm->mkConst(false);
- std::vector<Node> clauseNodes;
- for (std::size_t i = 0, childrenSize = children.size(); i < childrenSize;
- ++i)
+ // The lhs and rhs clauses in a binary resolution step, respectively. Since
+ // children correspond to the premises in the resolution chain, the first
+ // lhs clause is the first premise, the first rhs clause is the second
+ // premise. Each subsequent lhs clause will be the result of the previous
+ // binary resolution step in the chain, while each subsequent rhs clause
+ // will be respectively the second, third etc premises.
+ std::vector<Node> lhsClause, rhsClause;
+ // The pivots to be eliminated to the lhs clause and rhs clause in a binary
+ // resolution step, respectively
+ Node lhsElim, rhsElim;
+ // Get lhsClause of first resolution.
+ //
+ // Since a Node cannot hold an OR with a single child we need to
+ // disambiguate singleton clauses that are OR nodes from non-singleton
+ // clauses (i.e. unit clauses in the SAT solver).
+ //
+ // If the child is not an OR, it is a singleton clause and we take the
+ // child itself as the clause. Otherwise the child can only be a singleton
+ // clause if the child itself is used as a resolution literal, i.e. if the
+ // first child equal to the first pivot (which is args[1] or
+ // args[1].notNote() depending on the polarity).
+ if (children[0].getKind() != kind::OR
+ || (args[0] == trueNode && children[0] == args[1])
+ || (args[0] == falseNode && children[0] == args[1].notNode()))
+ {
+ lhsClause.push_back(children[0]);
+ }
+ else
{
- // Literals to be removed from the current clause, according to this
- // clause being in the lhs or the rhs of a resolution. The first clause
- // has no rhsElim and the last clause has no lhsElim. The literal to be
- // eliminated depends ond the pivot and the polarity stored in the
- // arguments.
- Node lhsElim = Node::null();
- Node rhsElim = Node::null();
- if (i < childrenSize - 1)
+ lhsClause.insert(lhsClause.end(), children[0].begin(), children[0].end());
+ }
+ // Traverse the links, which amounts to for each pair of args removing a
+ // literal from the lhs and a literal from the lhs.
+ for (size_t i = 0, argsSize = args.size(); i < argsSize; i = i + 2)
+ {
+ // Polarity determines how the pivot occurs in lhs and rhs
+ if (args[i] == trueNode)
{
- std::size_t index = 2 * i;
- lhsElim = args[index] == trueNode ? args[index + 1]
- : args[index + 1].notNode();
- Trace("bool-pfcheck") << i << ": lhsElim: " << lhsElim << "\n";
+ lhsElim = args[i + 1];
+ rhsElim = args[i + 1].notNode();
}
- if (i > 0)
+ else
{
- std::size_t index = 2 * (i - 1);
- rhsElim = args[index] == trueNode ? args[index + 1].notNode()
- : args[index + 1];
- Trace("bool-pfcheck") << i << ": rhsElim: " << rhsElim << "\n";
+ Assert(args[i] == falseNode);
+ lhsElim = args[i + 1].notNode();
+ rhsElim = args[i + 1];
}
- // The current set of literals is what we had before plus those in the
- // current child.
- //
- // Since a Node cannot hold an OR with a single child we need to
- // disambiguate singleton clauses that are OR nodes from non-singleton
- // clauses (i.e. unit clauses in the SAT solver).
- //
- // If the child is not an OR, it is a singleton clause and we take the
- // child itself as the clause. Otherwise the child can only be a singleton
- // clause if the child itself is used as a resolution literal, i.e. if the
- // child equal to the lhsElim or to the rhsElim (which means that the
- // negation of the child is in lhsElim).
- std::vector<Node> lits{clauseNodes};
- if (children[i].getKind() == kind::OR && children[i] != lhsElim
- && children[i] != rhsElim)
+ // The index of the child corresponding to the current rhs clause
+ size_t childIndex = i / 2 + 1;
+ // Get rhs clause. It's a singleton if not an OR node or if equal to
+ // rhsElim
+ if (children[childIndex].getKind() != kind::OR
+ || children[childIndex] == rhsElim)
{
- lits.insert(lits.end(), children[i].begin(), children[i].end());
+ rhsClause.push_back(children[childIndex]);
}
else
{
- lits.push_back(children[i]);
+ rhsClause = {children[childIndex].begin(), children[childIndex].end()};
}
- Trace("bool-pfcheck") << i << ": clause lits: " << lits << "\n";
- // We now compute the set of literals minus those to be eliminated in this
- // step
- std::vector<Node> curr;
- for (std::size_t j = 0, size = lits.size(); j < size; ++j)
- {
- if (lits[j] == lhsElim)
- {
- lhsElim = Node::null();
- Trace("bool-pfcheck") << "\t removed lit: " << lits[j] << "\n";
- continue;
- }
- if (lits[j] == rhsElim)
- {
- rhsElim = Node::null();
- Trace("bool-pfcheck") << "\t removed lit: " << lits[j] << "\n";
- continue;
- }
- curr.push_back(lits[j]);
- }
- Trace("bool-pfcheck") << "\n";
- clauseNodes.clear();
- clauseNodes.insert(clauseNodes.end(), curr.begin(), curr.end());
- }
- Trace("bool-pfcheck") << "clause: " << clauseNodes << "\n" << pop;
- return nm->mkOr(clauseNodes);
+ Trace("bool-pfcheck") << i / 2 << "-th res link:\n";
+ Trace("bool-pfcheck") << "\t - lhsClause: " << lhsClause << "\n";
+ Trace("bool-pfcheck") << "\t\t - lhsElim: " << lhsElim << "\n";
+ Trace("bool-pfcheck") << "\t - rhsClause: " << rhsClause << "\n";
+ Trace("bool-pfcheck") << "\t\t - rhsElim: " << rhsElim << "\n";
+ // Compute the resulting clause, which will be the next lhsClause, as
+ // follows:
+ // - remove lhsElim from lhsClause
+ // - remove rhsElim from rhsClause and add the lits to lhsClause
+ auto itlhs = std::find(lhsClause.begin(), lhsClause.end(), lhsElim);
+ AlwaysAssert(itlhs != lhsClause.end());
+ lhsClause.erase(itlhs);
+ Trace("bool-pfcheck") << "\t.. after lhsClause: " << lhsClause << "\n";
+ auto itrhs = std::find(rhsClause.begin(), rhsClause.end(), rhsElim);
+ AlwaysAssert(itrhs != rhsClause.end());
+ lhsClause.insert(lhsClause.end(), rhsClause.begin(), itrhs);
+ lhsClause.insert(lhsClause.end(), itrhs + 1, rhsClause.end());
+ Trace("bool-pfcheck") << "\t.. after rhsClause: " << lhsClause << "\n";
+ rhsClause.clear();
+ }
+ Trace("bool-pfcheck") << "\n resulting clause: " << lhsClause << "\n"
+ << pop;
+ return nm->mkOr(lhsClause);
}
if (id == PfRule::MACRO_RESOLUTION_TRUST)
{
diff --git a/src/theory/booleans/theory_bool.cpp b/src/theory/booleans/theory_bool.cpp
index 33bb820b4..1327d1131 100644
--- a/src/theory/booleans/theory_bool.cpp
+++ b/src/theory/booleans/theory_bool.cpp
@@ -19,6 +19,7 @@
#include <vector>
#include "proof/proof_node_manager.h"
+#include "smt/logic_exception.h"
#include "smt_util/boolean_simplification.h"
#include "theory/booleans/circuit_propagator.h"
#include "theory/booleans/theory_bool_rewriter.h"
@@ -28,6 +29,8 @@
#include "theory/valuation.h"
#include "util/hash.h"
+using namespace cvc5::kind;
+
namespace cvc5 {
namespace theory {
namespace booleans {
@@ -66,6 +69,23 @@ Theory::PPAssertStatus TheoryBool::ppAssert(
return Theory::ppAssert(tin, outSubstitutions);
}
+TrustNode TheoryBool::ppRewrite(TNode n, std::vector<SkolemLemma>& lems)
+{
+ Trace("bool-ppr") << "TheoryBool::ppRewrite " << n << std::endl;
+ if (n.getKind() == ITE)
+ {
+ TypeNode tn = n.getType();
+ if (!tn.isFirstClass())
+ {
+ std::stringstream ss;
+ ss << "ITE branches of type " << tn << " are currently not supported."
+ << std::endl;
+ throw LogicException(ss.str());
+ }
+ }
+ return TrustNode::null();
+}
+
TheoryRewriter* TheoryBool::getTheoryRewriter() { return &d_rewriter; }
ProofRuleChecker* TheoryBool::getProofChecker() { return &d_checker; }
diff --git a/src/theory/booleans/theory_bool.h b/src/theory/booleans/theory_bool.h
index e0b7e6511..6c197d797 100644
--- a/src/theory/booleans/theory_bool.h
+++ b/src/theory/booleans/theory_bool.h
@@ -39,6 +39,8 @@ class TheoryBool : public Theory {
PPAssertStatus ppAssert(TrustNode tin,
TrustSubstitutionMap& outSubstitutions) override;
+ TrustNode ppRewrite(TNode n, std::vector<SkolemLemma>& lems) override;
+
std::string identify() const override;
private:
diff --git a/src/theory/inference_id.cpp b/src/theory/inference_id.cpp
index 28557e472..b50522834 100644
--- a/src/theory/inference_id.cpp
+++ b/src/theory/inference_id.cpp
@@ -103,18 +103,19 @@ const char* toString(InferenceId i)
return "ARRAYS_CONST_ARRAY_DEFAULT";
case InferenceId::ARRAYS_EQ_TAUTOLOGY: return "ARRAYS_EQ_TAUTOLOGY";
- case InferenceId::BAG_NON_NEGATIVE_COUNT: return "BAG_NON_NEGATIVE_COUNT";
- case InferenceId::BAG_MK_BAG_SAME_ELEMENT: return "BAG_MK_BAG_SAME_ELEMENT";
- case InferenceId::BAG_MK_BAG: return "BAG_MK_BAG";
- case InferenceId::BAG_EQUALITY: return "BAG_EQUALITY";
- case InferenceId::BAG_DISEQUALITY: return "BAG_DISEQUALITY";
- case InferenceId::BAG_EMPTY: return "BAG_EMPTY";
- case InferenceId::BAG_UNION_DISJOINT: return "BAG_UNION_DISJOINT";
- case InferenceId::BAG_UNION_MAX: return "BAG_UNION_MAX";
- case InferenceId::BAG_INTERSECTION_MIN: return "BAG_INTERSECTION_MIN";
- case InferenceId::BAG_DIFFERENCE_SUBTRACT: return "BAG_DIFFERENCE_SUBTRACT";
- case InferenceId::BAG_DIFFERENCE_REMOVE: return "BAG_DIFFERENCE_REMOVE";
- case InferenceId::BAG_DUPLICATE_REMOVAL: return "BAG_DUPLICATE_REMOVAL";
+ case InferenceId::BAGS_NON_NEGATIVE_COUNT: return "BAGS_NON_NEGATIVE_COUNT";
+ case InferenceId::BAGS_MK_BAG_SAME_ELEMENT: return "BAGS_MK_BAG_SAME_ELEMENT";
+ case InferenceId::BAGS_MK_BAG: return "BAGS_MK_BAG";
+ case InferenceId::BAGS_EQUALITY: return "BAGS_EQUALITY";
+ case InferenceId::BAGS_DISEQUALITY: return "BAGS_DISEQUALITY";
+ case InferenceId::BAGS_EMPTY: return "BAGS_EMPTY";
+ case InferenceId::BAGS_UNION_DISJOINT: return "BAGS_UNION_DISJOINT";
+ case InferenceId::BAGS_UNION_MAX: return "BAGS_UNION_MAX";
+ case InferenceId::BAGS_INTERSECTION_MIN: return "BAGS_INTERSECTION_MIN";
+ case InferenceId::BAGS_DIFFERENCE_SUBTRACT: return "BAGS_DIFFERENCE_SUBTRACT";
+ case InferenceId::BAGS_DIFFERENCE_REMOVE: return "BAGS_DIFFERENCE_REMOVE";
+ case InferenceId::BAGS_DUPLICATE_REMOVAL: return "BAGS_DUPLICATE_REMOVAL";
+ case InferenceId::BAGS_MAP: return "BAGS_MAP";
case InferenceId::BV_BITBLAST_CONFLICT: return "BV_BITBLAST_CONFLICT";
case InferenceId::BV_BITBLAST_INTERNAL_EAGER_LEMMA:
@@ -340,6 +341,8 @@ const char* toString(InferenceId i)
case InferenceId::SETS_RELS_JOIN_IMAGE_UP: return "SETS_RELS_JOIN_IMAGE_UP";
case InferenceId::SETS_RELS_JOIN_SPLIT_1: return "SETS_RELS_JOIN_SPLIT_1";
case InferenceId::SETS_RELS_JOIN_SPLIT_2: return "SETS_RELS_JOIN_SPLIT_2";
+ case InferenceId::SETS_RELS_JOIN_ELEM_SPLIT:
+ return "SETS_RELS_JOIN_ELEM_SPLIT";
case InferenceId::SETS_RELS_PRODUCE_COMPOSE:
return "SETS_RELS_PRODUCE_COMPOSE";
case InferenceId::SETS_RELS_PRODUCT_SPLIT: return "SETS_RELS_PRODUCT_SPLIT";
@@ -429,6 +432,8 @@ const char* toString(InferenceId i)
case InferenceId::STRINGS_CTN_POS: return "STRINGS_CTN_POS";
case InferenceId::STRINGS_REDUCTION: return "STRINGS_REDUCTION";
case InferenceId::STRINGS_PREFIX_CONFLICT: return "STRINGS_PREFIX_CONFLICT";
+ case InferenceId::STRINGS_ARITH_BOUND_CONFLICT:
+ return "STRINGS_ARITH_BOUND_CONFLICT";
case InferenceId::STRINGS_REGISTER_TERM_ATOMIC:
return "STRINGS_REGISTER_TERM_ATOMIC";
case InferenceId::STRINGS_REGISTER_TERM: return "STRINGS_REGISTER_TERM";
diff --git a/src/theory/inference_id.h b/src/theory/inference_id.h
index 50c063651..3c644e545 100644
--- a/src/theory/inference_id.h
+++ b/src/theory/inference_id.h
@@ -168,18 +168,19 @@ enum class InferenceId
// ---------------------------------- end arrays theory
// ---------------------------------- bags theory
- BAG_NON_NEGATIVE_COUNT,
- BAG_MK_BAG_SAME_ELEMENT,
- BAG_MK_BAG,
- BAG_EQUALITY,
- BAG_DISEQUALITY,
- BAG_EMPTY,
- BAG_UNION_DISJOINT,
- BAG_UNION_MAX,
- BAG_INTERSECTION_MIN,
- BAG_DIFFERENCE_SUBTRACT,
- BAG_DIFFERENCE_REMOVE,
- BAG_DUPLICATE_REMOVAL,
+ BAGS_NON_NEGATIVE_COUNT,
+ BAGS_MK_BAG_SAME_ELEMENT,
+ BAGS_MK_BAG,
+ BAGS_EQUALITY,
+ BAGS_DISEQUALITY,
+ BAGS_EMPTY,
+ BAGS_UNION_DISJOINT,
+ BAGS_UNION_MAX,
+ BAGS_INTERSECTION_MIN,
+ BAGS_DIFFERENCE_SUBTRACT,
+ BAGS_DIFFERENCE_REMOVE,
+ BAGS_DUPLICATE_REMOVAL,
+ BAGS_MAP,
// ---------------------------------- end bags theory
// ---------------------------------- bitvector theory
@@ -490,6 +491,7 @@ enum class InferenceId
SETS_RELS_JOIN_IMAGE_UP,
SETS_RELS_JOIN_SPLIT_1,
SETS_RELS_JOIN_SPLIT_2,
+ SETS_RELS_JOIN_ELEM_SPLIT,
SETS_RELS_PRODUCE_COMPOSE,
SETS_RELS_PRODUCT_SPLIT,
SETS_RELS_TCLOSURE_FWD,
@@ -772,9 +774,11 @@ enum class InferenceId
// f(x1, .., xn) and P is the reduction predicate for f
// (see theory_strings_preprocess).
STRINGS_REDUCTION,
- //-------------------- prefix conflict
- // prefix conflict (coarse-grained)
+ //-------------------- merge conflicts
+ // prefix conflict
STRINGS_PREFIX_CONFLICT,
+ // arithmetic bound conflict
+ STRINGS_ARITH_BOUND_CONFLICT,
//-------------------- other
// a lemma added during term registration for an atomic term
STRINGS_REGISTER_TERM_ATOMIC,
diff --git a/src/theory/quantifiers/expr_miner.cpp b/src/theory/quantifiers/expr_miner.cpp
index fad95612f..b0dd61d33 100644
--- a/src/theory/quantifiers/expr_miner.cpp
+++ b/src/theory/quantifiers/expr_miner.cpp
@@ -62,10 +62,13 @@ void ExprMiner::initializeChecker(std::unique_ptr<SolverEngine>& checker,
const LogicInfo& logicInfo)
{
Assert (!query.isNull());
- if (Options::current().quantifiers.sygusExprMinerCheckTimeoutWasSetByUser)
+ if (options().quantifiers.sygusExprMinerCheckTimeoutWasSetByUser)
{
- initializeSubsolver(
- checker, opts, logicInfo, true, options::sygusExprMinerCheckTimeout());
+ initializeSubsolver(checker,
+ opts,
+ logicInfo,
+ true,
+ options().quantifiers.sygusExprMinerCheckTimeout);
}
else
{
diff --git a/src/theory/quantifiers/expr_miner_manager.cpp b/src/theory/quantifiers/expr_miner_manager.cpp
index e53fd9424..d5fa7fbdf 100644
--- a/src/theory/quantifiers/expr_miner_manager.cpp
+++ b/src/theory/quantifiers/expr_miner_manager.cpp
@@ -30,10 +30,9 @@ ExpressionMinerManager::ExpressionMinerManager(Env& env)
d_use_sygus_type(false),
d_tds(nullptr),
d_crd(env,
- options::sygusRewSynthCheck(),
- options::sygusRewSynthAccel(),
+ options().quantifiers.sygusRewSynthCheck,
+ options().quantifiers.sygusRewSynthAccel,
false),
- d_qg(env),
d_sols(env),
d_sampler(env)
{
@@ -69,6 +68,32 @@ void ExpressionMinerManager::initializeSygus(TermDbSygus* tds,
d_sampler.initializeSygus(d_tds, f, nsamples, useSygusType);
}
+void ExpressionMinerManager::initializeMinersForOptions()
+{
+ if (options().quantifiers.sygusRewSynth)
+ {
+ enableRewriteRuleSynth();
+ }
+ if (options().quantifiers.sygusQueryGen != options::SygusQueryGenMode::NONE)
+ {
+ enableQueryGeneration(options().quantifiers.sygusQueryGenThresh);
+ }
+ if (options().quantifiers.sygusFilterSolMode
+ != options::SygusFilterSolMode::NONE)
+ {
+ if (options().quantifiers.sygusFilterSolMode
+ == options::SygusFilterSolMode::STRONG)
+ {
+ enableFilterStrongSolutions();
+ }
+ else if (options().quantifiers.sygusFilterSolMode
+ == options::SygusFilterSolMode::WEAK)
+ {
+ enableFilterWeakSolutions();
+ }
+ }
+}
+
void ExpressionMinerManager::enableRewriteRuleSynth()
{
if (d_doRewSynth)
@@ -101,18 +126,29 @@ void ExpressionMinerManager::enableQueryGeneration(unsigned deqThresh)
return;
}
d_doQueryGen = true;
+ options::SygusQueryGenMode mode = options().quantifiers.sygusQueryGen;
std::vector<Node> vars;
d_sampler.getVariables(vars);
- // must also enable rewrite rule synthesis
- if (!d_doRewSynth)
+ if (mode == options::SygusQueryGenMode::SAT)
{
- // initialize the candidate rewrite database, in silent mode
- enableRewriteRuleSynth();
- d_crd.setSilent(true);
+ // must also enable rewrite rule synthesis
+ if (!d_doRewSynth)
+ {
+ // initialize the candidate rewrite database, in silent mode
+ enableRewriteRuleSynth();
+ d_crd.setSilent(true);
+ }
+ d_qg = std::make_unique<QueryGenerator>(d_env);
+ // initialize the query generator
+ d_qg->initialize(vars, &d_sampler);
+ d_qg->setThreshold(deqThresh);
+ }
+ else if (mode == options::SygusQueryGenMode::UNSAT)
+ {
+ d_qgu = std::make_unique<QueryGeneratorUnsat>(d_env);
+ // initialize the query generator
+ d_qgu->initialize(vars, &d_sampler);
}
- // initialize the query generator
- d_qg.initialize(vars, &d_sampler);
- d_qg.setThreshold(deqThresh);
}
void ExpressionMinerManager::enableFilterWeakSolutions()
@@ -148,14 +184,23 @@ bool ExpressionMinerManager::addTerm(Node sol,
bool ret = true;
if (d_doRewSynth)
{
- Node rsol = d_crd.addTerm(sol, options::sygusRewSynthRec(), out, rew_print);
+ Node rsol = d_crd.addTerm(
+ sol, options().quantifiers.sygusRewSynthRec, out, rew_print);
ret = (sol == rsol);
}
// a unique term, let's try the query generator
if (ret && d_doQueryGen)
{
- d_qg.addTerm(solb, out);
+ options::SygusQueryGenMode mode = options().quantifiers.sygusQueryGen;
+ if (mode == options::SygusQueryGenMode::SAT)
+ {
+ d_qg->addTerm(solb, out);
+ }
+ else if (mode == options::SygusQueryGenMode::UNSAT)
+ {
+ d_qgu->addTerm(solb, out);
+ }
}
// filter based on logical strength
diff --git a/src/theory/quantifiers/expr_miner_manager.h b/src/theory/quantifiers/expr_miner_manager.h
index 43a615c97..9d0352408 100644
--- a/src/theory/quantifiers/expr_miner_manager.h
+++ b/src/theory/quantifiers/expr_miner_manager.h
@@ -22,6 +22,7 @@
#include "smt/env_obj.h"
#include "theory/quantifiers/candidate_rewrite_database.h"
#include "theory/quantifiers/query_generator.h"
+#include "theory/quantifiers/query_generator_unsat.h"
#include "theory/quantifiers/solution_filter.h"
#include "theory/quantifiers/sygus_sampler.h"
@@ -67,14 +68,8 @@ class ExpressionMinerManager : protected EnvObj
Node f,
unsigned nsamples,
bool useSygusType);
- /** enable rewrite rule synthesis (--sygus-rr-synth) */
- void enableRewriteRuleSynth();
- /** enable query generation (--sygus-query-gen) */
- void enableQueryGeneration(unsigned deqThresh);
- /** filter strong solutions (--sygus-filter-sol=strong) */
- void enableFilterStrongSolutions();
- /** filter weak solutions (--sygus-filter-sol=weak) */
- void enableFilterWeakSolutions();
+ /** initialize options */
+ void initializeMinersForOptions();
/** add term
*
* Expression miners may print information on the output stream out, for
@@ -90,6 +85,14 @@ class ExpressionMinerManager : protected EnvObj
bool addTerm(Node sol, std::ostream& out, bool& rew_print);
private:
+ /** enable rewrite rule synthesis (--sygus-rr-synth) */
+ void enableRewriteRuleSynth();
+ /** enable query generation (--sygus-query-gen) */
+ void enableQueryGeneration(unsigned deqThresh);
+ /** filter strong solutions (--sygus-filter-sol=strong) */
+ void enableFilterStrongSolutions();
+ /** filter weak solutions (--sygus-filter-sol=weak) */
+ void enableFilterWeakSolutions();
/** whether we are doing rewrite synthesis */
bool d_doRewSynth;
/** whether we are doing query generation */
@@ -105,7 +108,9 @@ class ExpressionMinerManager : protected EnvObj
/** candidate rewrite database */
CandidateRewriteDatabase d_crd;
/** query generator */
- QueryGenerator d_qg;
+ std::unique_ptr<QueryGenerator> d_qg;
+ /** query generator */
+ std::unique_ptr<QueryGeneratorUnsat> d_qgu;
/** solution filter based on logical strength */
SolutionFilterStrength d_sols;
/** sygus sampler object */
diff --git a/src/theory/quantifiers/extended_rewrite.cpp b/src/theory/quantifiers/extended_rewrite.cpp
index f5883c265..3d20f66b3 100644
--- a/src/theory/quantifiers/extended_rewrite.cpp
+++ b/src/theory/quantifiers/extended_rewrite.cpp
@@ -22,7 +22,9 @@
#include "theory/datatypes/datatypes_rewriter.h"
#include "theory/quantifiers/term_util.h"
#include "theory/rewriter.h"
+#include "theory/strings/arith_entail.h"
#include "theory/strings/sequences_rewriter.h"
+#include "theory/strings/word.h"
#include "theory/theory.h"
using namespace cvc5::kind;
@@ -47,6 +49,7 @@ ExtendedRewriter::ExtendedRewriter(Rewriter& rew, bool aggr)
{
d_true = NodeManager::currentNM()->mkConst(true);
d_false = NodeManager::currentNM()->mkConst(false);
+ d_zero = NodeManager::currentNM()->mkConst(Rational(0));
}
void ExtendedRewriter::setCache(Node n, Node ret) const
@@ -1702,19 +1705,50 @@ bool ExtendedRewriter::inferSubstitution(Node n,
return false;
}
-Node ExtendedRewriter::extendedRewriteStrings(Node ret) const
+Node ExtendedRewriter::extendedRewriteStrings(Node node) const
{
- Node new_ret;
Trace("q-ext-rewrite-debug")
- << "Extended rewrite strings : " << ret << std::endl;
+ << "Extended rewrite strings : " << node << std::endl;
- if (ret.getKind() == EQUAL)
+ Kind k = node.getKind();
+ if (k == EQUAL)
{
strings::SequencesRewriter sr(&d_rew, nullptr);
- new_ret = sr.rewriteEqualityExt(ret);
+ return sr.rewriteEqualityExt(node);
}
+ else if (k == STRING_SUBSTR)
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node tot_len = d_rew.rewrite(nm->mkNode(STRING_LENGTH, node[0]));
+ strings::ArithEntail aent(&d_rew);
+ // (str.substr s x y) --> "" if x < len(s) |= 0 >= y
+ Node n1_lt_tot_len = d_rew.rewrite(nm->mkNode(LT, node[1], tot_len));
+ if (aent.checkWithAssumption(n1_lt_tot_len, d_zero, node[2], false))
+ {
+ Node ret = strings::Word::mkEmptyWord(node.getType());
+ debugExtendedRewrite(node, ret, "SS_START_ENTAILS_ZERO_LEN");
+ return ret;
+ }
- return new_ret;
+ // (str.substr s x y) --> "" if 0 < y |= x >= str.len(s)
+ Node non_zero_len = d_rew.rewrite(nm->mkNode(LT, d_zero, node[2]));
+ if (aent.checkWithAssumption(non_zero_len, node[1], tot_len, false))
+ {
+ Node ret = strings::Word::mkEmptyWord(node.getType());
+ debugExtendedRewrite(node, ret, "SS_NON_ZERO_LEN_ENTAILS_OOB");
+ return ret;
+ }
+ // (str.substr s x y) --> "" if x >= 0 |= 0 >= str.len(s)
+ Node geq_zero_start = d_rew.rewrite(nm->mkNode(GEQ, node[1], d_zero));
+ if (aent.checkWithAssumption(geq_zero_start, d_zero, tot_len, false))
+ {
+ Node ret = strings::Word::mkEmptyWord(node.getType());
+ debugExtendedRewrite(node, ret, "SS_GEQ_ZERO_START_ENTAILS_EMP_S");
+ return ret;
+ }
+ }
+
+ return Node::null();
}
void ExtendedRewriter::debugExtendedRewrite(Node n,
diff --git a/src/theory/quantifiers/extended_rewrite.h b/src/theory/quantifiers/extended_rewrite.h
index b4dcab041..1b9d0dae4 100644
--- a/src/theory/quantifiers/extended_rewrite.h
+++ b/src/theory/quantifiers/extended_rewrite.h
@@ -259,9 +259,10 @@ class ExtendedRewriter
* may be applied as a preprocessing step.
*/
bool d_aggr;
- /** true/false nodes */
+ /** Common constant nodes */
Node d_true;
Node d_false;
+ Node d_zero;
};
} // namespace quantifiers
diff --git a/src/theory/quantifiers/query_generator_unsat.cpp b/src/theory/quantifiers/query_generator_unsat.cpp
new file mode 100644
index 000000000..ae7288080
--- /dev/null
+++ b/src/theory/quantifiers/query_generator_unsat.cpp
@@ -0,0 +1,178 @@
+/******************************************************************************
+ * Top contributors (to current version):
+ * Andrew Reynolds
+ *
+ * This file is part of the cvc5 project.
+ *
+ * Copyright (c) 2009-2021 by the authors listed in the file AUTHORS
+ * in the top-level source directory and their institutional affiliations.
+ * All rights reserved. See the file COPYING in the top-level source
+ * directory for licensing information.
+ * ****************************************************************************
+ *
+ * A class for mining interesting unsat queries from a stream of generated
+ * expressions.
+ */
+
+#include "theory/quantifiers/query_generator_unsat.h"
+
+#include "options/smt_options.h"
+#include "smt/env.h"
+#include "util/random.h"
+
+namespace cvc5 {
+namespace theory {
+namespace quantifiers {
+
+QueryGeneratorUnsat::QueryGeneratorUnsat(Env& env)
+ : ExprMiner(env), d_queryCount(0)
+{
+ d_true = NodeManager::currentNM()->mkConst(true);
+ d_false = NodeManager::currentNM()->mkConst(false);
+ // determine the options to use for the verification subsolvers we spawn
+ // we start with the provided options
+ d_subOptions.copyValues(d_env.getOriginalOptions());
+ d_subOptions.smt.produceProofs = true;
+ d_subOptions.smt.checkProofs = true;
+ d_subOptions.smt.produceModels = true;
+ d_subOptions.smt.checkModels = true;
+}
+
+void QueryGeneratorUnsat::initialize(const std::vector<Node>& vars,
+ SygusSampler* ss)
+{
+ Assert(ss != nullptr);
+ d_queryCount = 0;
+ ExprMiner::initialize(vars, ss);
+}
+
+bool QueryGeneratorUnsat::addTerm(Node n, std::ostream& out)
+{
+ d_terms.push_back(n);
+ Trace("sygus-qgen") << "Add term: " << n << std::endl;
+ Assert(n.getType().isBoolean());
+
+ // the loop below conjoins a random subset of predicates we have enumerated
+ // so far C1 ^ ... ^ Cn such that no subset of C1 ... Cn is an unsat core
+ // we have encountered so far, and each appended Ci is false on a model for
+ // C1 ^ ... ^ C_{i-1}.
+ std::vector<Node> currModel;
+ std::unordered_set<size_t> processed;
+ std::vector<Node> activeTerms;
+ // always start with the new term
+ processed.insert(d_terms.size() - 1);
+ activeTerms.push_back(n);
+ bool addSuccess = true;
+ size_t checkCount = 0;
+ while (checkCount < 10)
+ {
+ Assert(!activeTerms.empty());
+ // if we just successfully added a term, do a satisfiability check
+ if (addSuccess)
+ {
+ checkCount++;
+ // check the current for satisfiability
+ currModel.clear();
+ // Shuffle active terms to maximize the different possible behaviors
+ // in the subsolver. This is instead of making multiple queries with
+ // the same assertion order for a subsequence.
+ std::shuffle(activeTerms.begin(), activeTerms.end(), Random::getRandom());
+ Result r = checkCurrent(activeTerms, out, currModel);
+ if (r.asSatisfiabilityResult().isSat() == Result::UNSAT)
+ {
+ // exclude the last active term
+ activeTerms.pop_back();
+ }
+ }
+ if (processed.size() == d_terms.size())
+ {
+ break;
+ }
+ // activeTerms is satisfiable, add a new term
+ size_t rindex = getNextRandomIndex(processed);
+ Assert(rindex < d_terms.size());
+ processed.insert(rindex);
+ Node nextTerm = d_terms[rindex];
+ // immediately check if is satisfied by the current model using the
+ // evaluator, if so, don't conjoin the term.
+ Node newTermEval;
+ if (!currModel.empty())
+ {
+ Node nextTermSk = convertToSkolem(nextTerm);
+ newTermEval = evaluate(nextTermSk, d_skolems, currModel);
+ }
+ if (newTermEval == d_true)
+ {
+ Trace("sygus-qgen-check-debug")
+ << "...already satisfied " << convertToSkolem(nextTerm)
+ << " for model " << d_skolems << " " << currModel << std::endl;
+ addSuccess = false;
+ }
+ else
+ {
+ Trace("sygus-qgen-check-debug")
+ << "...not satisfied " << convertToSkolem(nextTerm) << " for model "
+ << d_skolems << " " << currModel << std::endl;
+ activeTerms.push_back(nextTerm);
+ addSuccess = !d_cores.hasSubset(activeTerms);
+ if (!addSuccess)
+ {
+ Trace("sygus-qgen-check-debug")
+ << "...already has unsat core " << nextTerm << std::endl;
+ activeTerms.pop_back();
+ }
+ }
+ }
+
+ return true;
+}
+
+Result QueryGeneratorUnsat::checkCurrent(const std::vector<Node>& activeTerms,
+ std::ostream& out,
+ std::vector<Node>& currModel)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Node qy = nm->mkAnd(activeTerms);
+ Trace("sygus-qgen-check") << "Check: " << qy << std::endl;
+ out << "(query " << qy << ")" << std::endl;
+ std::unique_ptr<SolverEngine> queryChecker;
+ initializeChecker(queryChecker, qy, d_subOptions, logicInfo());
+ Result r = queryChecker->checkSat();
+ Trace("sygus-qgen-check") << "..finished check got " << r << std::endl;
+ if (r.asSatisfiabilityResult().isSat() == Result::UNSAT)
+ {
+ // if unsat, get the unsat core
+ std::vector<Node> unsatCore;
+ getUnsatCoreFromSubsolver(*queryChecker.get(), unsatCore);
+ Assert(!unsatCore.empty());
+ Trace("sygus-qgen-check") << "...unsat core: " << unsatCore << std::endl;
+ d_cores.add(d_false, unsatCore);
+ }
+ else if (r.asSatisfiabilityResult().isSat() == Result::SAT)
+ {
+ getModelFromSubsolver(*queryChecker.get(), d_skolems, currModel);
+ Trace("sygus-qgen-check") << "...model: " << currModel << std::endl;
+ }
+ return r;
+}
+
+size_t QueryGeneratorUnsat::getNextRandomIndex(
+ const std::unordered_set<size_t>& processed) const
+{
+ Assert(!d_terms.empty());
+ Assert(processed.size() < d_terms.size());
+ size_t rindex = Random::getRandom().pick(0, d_terms.size() - 1);
+ while (processed.find(rindex) != processed.end())
+ {
+ rindex++;
+ if (rindex == d_terms.size())
+ {
+ rindex = 0;
+ }
+ }
+ return rindex;
+}
+
+} // namespace quantifiers
+} // namespace theory
+} // namespace cvc5
diff --git a/src/theory/quantifiers/query_generator_unsat.h b/src/theory/quantifiers/query_generator_unsat.h
new file mode 100644
index 000000000..d03d8efb9
--- /dev/null
+++ b/src/theory/quantifiers/query_generator_unsat.h
@@ -0,0 +1,89 @@
+/******************************************************************************
+ * Top contributors (to current version):
+ * Andrew Reynolds
+ *
+ * This file is part of the cvc5 project.
+ *
+ * Copyright (c) 2009-2021 by the authors listed in the file AUTHORS
+ * in the top-level source directory and their institutional affiliations.
+ * All rights reserved. See the file COPYING in the top-level source
+ * directory for licensing information.
+ * ****************************************************************************
+ *
+ * A class for mining interesting unsat queries from a stream of generated
+ * expressions.
+ */
+
+#include "cvc5_private.h"
+
+#ifndef CVC5__THEORY__QUANTIFIERS__QUERY_GENERATOR_UNSAT_H
+#define CVC5__THEORY__QUANTIFIERS__QUERY_GENERATOR_UNSAT_H
+
+#include <map>
+#include <unordered_set>
+
+#include "expr/node.h"
+#include "expr/variadic_trie.h"
+#include "options/options.h"
+#include "theory/quantifiers/expr_miner.h"
+#include "theory/quantifiers/lazy_trie.h"
+#include "theory/quantifiers/sygus_sampler.h"
+
+namespace cvc5 {
+namespace theory {
+namespace quantifiers {
+
+/**
+ * QueryGeneratorUnsat
+ *
+ * A module for generating interesting unsatisfiable benchmarks using SyGuS
+ * enumeration. At a high level, this is based on conjoining predicates that
+ * refine models and avoid previously encountered unsat cores.
+ */
+class QueryGeneratorUnsat : public ExprMiner
+{
+ public:
+ QueryGeneratorUnsat(Env& env);
+ ~QueryGeneratorUnsat() {}
+ /** initialize */
+ void initialize(const std::vector<Node>& vars,
+ SygusSampler* ss = nullptr) override;
+ /**
+ * Add term to this module. This may trigger the printing and/or checking of
+ * new queries.
+ */
+ bool addTerm(Node n, std::ostream& out) override;
+
+ private:
+ /**
+ * Check current query, given by conjunction activeTerms. The generated
+ * query is printed on out. If this is UNSAT, we add its unsat core to
+ * d_cores. If it is satisfiable, we add its model to currModel for
+ * its free variables (which are ExprMiner::d_skolems).
+ */
+ Result checkCurrent(const std::vector<Node>& activeTerms,
+ std::ostream& out,
+ std::vector<Node>& currModel);
+ /**
+ * Get next random index, which returns a random index [0, d_terms.size()-1]
+ * that is not already in processed.
+ */
+ size_t getNextRandomIndex(const std::unordered_set<size_t>& processed) const;
+ /** Constant nodes */
+ Node d_true;
+ Node d_false;
+ /** cache of all terms registered to this generator */
+ std::vector<Node> d_terms;
+ /** total number of queries generated by this class */
+ size_t d_queryCount;
+ /** containing the unsat cores */
+ VariadicTrie d_cores;
+ /** The options for subsolver calls */
+ Options d_subOptions;
+};
+
+} // namespace quantifiers
+} // namespace theory
+} // namespace cvc5
+
+#endif /* CVC5__THEORY__QUANTIFIERS___H */
diff --git a/src/theory/quantifiers/sygus/synth_conjecture.cpp b/src/theory/quantifiers/sygus/synth_conjecture.cpp
index 0119f44aa..805f19b34 100644
--- a/src/theory/quantifiers/sygus/synth_conjecture.cpp
+++ b/src/theory/quantifiers/sygus/synth_conjecture.cpp
@@ -805,7 +805,9 @@ void SynthConjecture::printSynthSolutionInternal(std::ostream& out)
bool is_unique_term = true;
if (status != 0
- && (options::sygusRewSynth() || options::sygusQueryGen()
+ && (options::sygusRewSynth()
+ || options().quantifiers.sygusQueryGen
+ != options::SygusQueryGenMode::NONE
|| options::sygusFilterSolMode()
!= options::SygusFilterSolMode::NONE))
{
@@ -818,28 +820,7 @@ void SynthConjecture::printSynthSolutionInternal(std::ostream& out)
ExpressionMinerManager* emm = d_exprm[prog].get();
emm->initializeSygus(
d_tds, d_candidates[i], options::sygusSamples(), true);
- if (options::sygusRewSynth())
- {
- emm->enableRewriteRuleSynth();
- }
- if (options::sygusQueryGen())
- {
- emm->enableQueryGeneration(options::sygusQueryGenThresh());
- }
- if (options::sygusFilterSolMode()
- != options::SygusFilterSolMode::NONE)
- {
- if (options::sygusFilterSolMode()
- == options::SygusFilterSolMode::STRONG)
- {
- emm->enableFilterStrongSolutions();
- }
- else if (options::sygusFilterSolMode()
- == options::SygusFilterSolMode::WEAK)
- {
- emm->enableFilterWeakSolutions();
- }
- }
+ emm->initializeMinersForOptions();
its = d_exprm.find(prog);
}
bool rew_print = false;
diff --git a/src/theory/sets/theory_sets_rels.cpp b/src/theory/sets/theory_sets_rels.cpp
index f6ea88b75..a4028d8fb 100644
--- a/src/theory/sets/theory_sets_rels.cpp
+++ b/src/theory/sets/theory_sets_rels.cpp
@@ -1039,9 +1039,15 @@ void TheorySetsRels::check(Theory::Effort level)
}
/*
- * Explicitly compose the join or product relations of r1 and r2
- * e.g. If (a, b) in X and (b, c) in Y, (a, c) in (X JOIN Y)
+ * Explicitly compose the join or product relations of r1 and r2. For example,
+ * consider the case that (a, b) in r1, (c, d) in r2.
*
+ * For JOIN, we have three cases:
+ * b = c, we infer (a, d) in (join r1 r2)
+ * b != c, do nothing
+ * else, if neither holds, we add the splitting lemma (b=c or b!=c)
+ *
+ * For PRODUCT, we infer (a, b, c, d) in (product r1 r2).
*/
void TheorySetsRels::composeMembersForRels( Node rel ) {
Trace("rels-debug") << "[Theory::Rels] Start composing members for relation = " << rel << std::endl;
@@ -1058,26 +1064,48 @@ void TheorySetsRels::check(Theory::Effort level)
std::vector<Node> r1_rep_exps = d_rReps_memberReps_exp_cache[r1_rep];
std::vector<Node> r2_rep_exps = d_rReps_memberReps_exp_cache[r2_rep];
- unsigned int r1_tuple_len = r1.getType().getSetElementType().getTupleLength();
- unsigned int r2_tuple_len = r2.getType().getSetElementType().getTupleLength();
+ size_t r1_tuple_len = r1.getType().getSetElementType().getTupleLength();
+ size_t r2_tuple_len = r2.getType().getSetElementType().getTupleLength();
+ Kind rk = rel.getKind();
+ TypeNode tn = rel.getType().getSetElementType();
for( unsigned int i = 0; i < r1_rep_exps.size(); i++ ) {
for( unsigned int j = 0; j < r2_rep_exps.size(); j++ ) {
std::vector<Node> tuple_elements;
- TypeNode tn = rel.getType().getSetElementType();
- Node r1_rmost = RelsUtils::nthElementOfTuple( r1_rep_exps[i][0], r1_tuple_len-1 );
- Node r2_lmost = RelsUtils::nthElementOfTuple( r2_rep_exps[j][0], 0 );
tuple_elements.push_back(tn.getDType()[0].getConstructor());
+ std::vector<Node> reasons;
+ if (rk == kind::JOIN)
+ {
+ Node r1_rmost =
+ RelsUtils::nthElementOfTuple(r1_rep_exps[i][0], r1_tuple_len - 1);
+ Node r2_lmost = RelsUtils::nthElementOfTuple(r2_rep_exps[j][0], 0);
+
+ Trace("rels-debug") << "[Theory::Rels] r1_rmost: " << r1_rmost
+ << " of type " << r1_rmost.getType() << std::endl;
+ Trace("rels-debug") << "[Theory::Rels] r2_lmost: " << r2_lmost
+ << " of type " << r2_lmost.getType() << std::endl;
+ if (!areEqual(r1_rmost, r2_lmost))
+ {
+ if (!d_state.areDisequal(r1_rmost, r2_lmost))
+ {
+ // If we have (a,b) in R1, (c,d) in R2, and we are considering
+ // join(R1, R2) must split on b=c if they are neither equal nor
+ // disequal.
+ Node eq = r1_rmost.eqNode(r2_lmost);
+ Node lem = nm->mkNode(kind::OR, eq, eq.negate());
+ d_im.addPendingLemma(lem, InferenceId::SETS_RELS_JOIN_ELEM_SPLIT);
+ }
+ continue;
+ }
+ else if (r1_rmost != r2_lmost)
+ {
+ reasons.push_back(nm->mkNode(kind::EQUAL, r1_rmost, r2_lmost));
+ }
+ }
- Trace("rels-debug") << "[Theory::Rels] r1_rmost: " << r1_rmost
- << " of type " << r1_rmost.getType() << std::endl;
- Trace("rels-debug") << "[Theory::Rels] r2_lmost: " << r2_lmost
- << " of type " << r2_lmost.getType() << std::endl;
-
- if (rel.getKind() == kind::PRODUCT
- || (rel.getKind() == kind::JOIN && areEqual(r1_rmost, r2_lmost)))
+ if (rk == kind::PRODUCT || rk == kind::JOIN)
{
- bool isProduct = rel.getKind() == kind::PRODUCT;
+ bool isProduct = rk == kind::PRODUCT;
unsigned int k = 0;
unsigned int l = 1;
@@ -1092,25 +1120,22 @@ void TheorySetsRels::check(Theory::Effort level)
tuple_elements.push_back( RelsUtils::nthElementOfTuple( r2_rep_exps[j][0], l ) );
}
- Node composed_tuple = NodeManager::currentNM()->mkNode(kind::APPLY_CONSTRUCTOR, tuple_elements);
- Node fact = NodeManager::currentNM()->mkNode(kind::MEMBER, composed_tuple, rel);
- std::vector<Node> reasons;
+ Node composed_tuple =
+ nm->mkNode(kind::APPLY_CONSTRUCTOR, tuple_elements);
+ Node fact = nm->mkNode(kind::MEMBER, composed_tuple, rel);
reasons.push_back( r1_rep_exps[i] );
reasons.push_back( r2_rep_exps[j] );
if( r1 != r1_rep_exps[i][1] ) {
- reasons.push_back( NodeManager::currentNM()->mkNode(kind::EQUAL, r1, r1_rep_exps[i][1]) );
+ reasons.push_back(nm->mkNode(kind::EQUAL, r1, r1_rep_exps[i][1]));
}
if( r2 != r2_rep_exps[j][1] ) {
- reasons.push_back( NodeManager::currentNM()->mkNode(kind::EQUAL, r2, r2_rep_exps[j][1]) );
+ reasons.push_back(nm->mkNode(kind::EQUAL, r2, r2_rep_exps[j][1]));
}
if( isProduct ) {
sendInfer(
fact, InferenceId::SETS_RELS_PRODUCE_COMPOSE, nm->mkNode(kind::AND, reasons));
} else {
- if( r1_rmost != r2_lmost ) {
- reasons.push_back( NodeManager::currentNM()->mkNode(kind::EQUAL, r1_rmost, r2_lmost) );
- }
sendInfer(
fact, InferenceId::SETS_RELS_JOIN_COMPOSE, nm->mkNode(kind::AND, reasons));
}
diff --git a/src/theory/strings/arith_entail.cpp b/src/theory/strings/arith_entail.cpp
index 8950ea6fa..9e3d8ad38 100644
--- a/src/theory/strings/arith_entail.cpp
+++ b/src/theory/strings/arith_entail.cpp
@@ -30,7 +30,10 @@ namespace cvc5 {
namespace theory {
namespace strings {
-ArithEntail::ArithEntail(Rewriter* r) : d_rr(r) {}
+ArithEntail::ArithEntail(Rewriter* r) : d_rr(r)
+{
+ d_zero = NodeManager::currentNM()->mkConst(Rational(0));
+}
Node ArithEntail::rewrite(Node a) { return d_rr->rewrite(a); }
@@ -197,7 +200,7 @@ bool ArithEntail::checkApprox(Node ar)
}
// get the current "fixed" sum for the abstraction of ar
Node aar = aarSum.empty()
- ? nm->mkConst(Rational(0))
+ ? d_zero
: (aarSum.size() == 1 ? aarSum[0] : nm->mkNode(PLUS, aarSum));
aar = d_rr->rewrite(aar);
Trace("strings-ent-approx-debug")
@@ -709,10 +712,61 @@ bool ArithEntail::checkWithAssumptions(std::vector<Node> assumptions,
return res;
}
+struct ArithEntailConstantBoundLowerId
+{
+};
+typedef expr::Attribute<ArithEntailConstantBoundLowerId, Node>
+ ArithEntailConstantBoundLower;
+
+struct ArithEntailConstantBoundUpperId
+{
+};
+typedef expr::Attribute<ArithEntailConstantBoundUpperId, Node>
+ ArithEntailConstantBoundUpper;
+
+void ArithEntail::setConstantBoundCache(Node n, Node ret, bool isLower)
+{
+ if (isLower)
+ {
+ ArithEntailConstantBoundLower acbl;
+ n.setAttribute(acbl, ret);
+ }
+ else
+ {
+ ArithEntailConstantBoundUpper acbu;
+ n.setAttribute(acbu, ret);
+ }
+}
+
+Node ArithEntail::getConstantBoundCache(Node n, bool isLower)
+{
+ if (isLower)
+ {
+ ArithEntailConstantBoundLower acbl;
+ if (n.hasAttribute(acbl))
+ {
+ return n.getAttribute(acbl);
+ }
+ }
+ else
+ {
+ ArithEntailConstantBoundUpper acbu;
+ if (n.hasAttribute(acbu))
+ {
+ return n.getAttribute(acbu);
+ }
+ }
+ return Node::null();
+}
+
Node ArithEntail::getConstantBound(Node a, bool isLower)
{
Assert(d_rr->rewrite(a) == a);
- Node ret;
+ Node ret = getConstantBoundCache(a, isLower);
+ if (!ret.isNull())
+ {
+ return ret;
+ }
if (a.isConst())
{
ret = a;
@@ -721,7 +775,7 @@ Node ArithEntail::getConstantBound(Node a, bool isLower)
{
if (isLower)
{
- ret = NodeManager::currentNM()->mkConst(Rational(0));
+ ret = d_zero;
}
}
else if (a.getKind() == kind::PLUS || a.getKind() == kind::MULT)
@@ -767,7 +821,7 @@ Node ArithEntail::getConstantBound(Node a, bool isLower)
{
if (children.empty())
{
- ret = NodeManager::currentNM()->mkConst(Rational(0));
+ ret = d_zero;
}
else if (children.size() == 1)
{
@@ -789,6 +843,55 @@ Node ArithEntail::getConstantBound(Node a, bool isLower)
|| check(a, false));
Assert(!isLower || ret.isNull() || ret.getConst<Rational>().sgn() <= 0
|| check(a, true));
+ // cache
+ setConstantBoundCache(a, ret, isLower);
+ return ret;
+}
+
+Node ArithEntail::getConstantBoundLength(Node s, bool isLower)
+{
+ Assert(s.getType().isStringLike());
+ Node ret = getConstantBoundCache(s, isLower);
+ if (!ret.isNull())
+ {
+ return ret;
+ }
+ NodeManager* nm = NodeManager::currentNM();
+ if (s.isConst())
+ {
+ ret = nm->mkConst(Rational(Word::getLength(s)));
+ }
+ else if (s.getKind() == STRING_CONCAT)
+ {
+ Rational sum(0);
+ bool success = true;
+ for (const Node& sc : s)
+ {
+ Node b = getConstantBoundLength(sc, isLower);
+ if (b.isNull())
+ {
+ if (isLower)
+ {
+ // assume zero and continue
+ continue;
+ }
+ success = false;
+ break;
+ }
+ Assert(b.getKind() == CONST_RATIONAL);
+ sum = sum + b.getConst<Rational>();
+ }
+ if (success)
+ {
+ ret = nm->mkConst(sum);
+ }
+ }
+ else if (isLower)
+ {
+ ret = d_zero;
+ }
+ // cache
+ setConstantBoundCache(s, ret, isLower);
return ret;
}
@@ -853,7 +956,7 @@ bool ArithEntail::inferZerosInSumGeq(Node x,
}
else
{
- sum = ys.size() == 1 ? ys[0] : nm->mkConst(Rational(0));
+ sum = ys.size() == 1 ? ys[0] : d_zero;
}
if (check(sum, x))
diff --git a/src/theory/strings/arith_entail.h b/src/theory/strings/arith_entail.h
index 2158b23b0..6529a81d1 100644
--- a/src/theory/strings/arith_entail.h
+++ b/src/theory/strings/arith_entail.h
@@ -96,6 +96,10 @@ class ArithEntail
* checkWithAssumption(x + (str.len y) = 0, 0, x, false) = true
*
* Because: x = -(str.len y), so 0 >= x --> 0 >= -(str.len y) --> true
+ *
+ * Since this method rewrites on rewriting and may introduce new variables
+ * (slack variables for inequalities), it should *not* be called from the
+ * main rewriter of strings, or non-termination can occur.
*/
bool checkWithAssumption(Node assumption,
Node a,
@@ -114,6 +118,10 @@ class ArithEntail
* checkWithAssumptions([x + (str.len y) = 0], 0, x, false) = true
*
* Because: x = -(str.len y), so 0 >= x --> 0 >= -(str.len y) --> true
+ *
+ * Since this method rewrites on rewriting and may introduce new variables
+ * (slack variables for inequalities), it should *not* be called from the
+ * main rewriter of strings, or non-termination can occur.
*/
bool checkWithAssumptions(std::vector<Node> assumptions,
Node a,
@@ -136,6 +144,10 @@ class ArithEntail
Node getConstantBound(Node a, bool isLower = true);
/**
+ * get constant bound on the length of s.
+ */
+ Node getConstantBoundLength(Node s, bool isLower = true);
+ /**
* Given an inequality y1 + ... + yn >= x, removes operands yi s.t. the
* original inequality still holds. Returns true if the original inequality
* holds and false otherwise. The list of ys is modified to contain a subset
@@ -179,8 +191,14 @@ class ArithEntail
void getArithApproximations(Node a,
std::vector<Node>& approx,
bool isOverApprox = false);
+ /** Set bound cache */
+ void setConstantBoundCache(Node n, Node ret, bool isLower);
+ /** Get bound cache */
+ Node getConstantBoundCache(Node n, bool isLower);
/** The underlying rewriter */
Rewriter* d_rr;
+ /** Constant zero */
+ Node d_zero;
};
} // namespace strings
diff --git a/src/theory/strings/core_solver.cpp b/src/theory/strings/core_solver.cpp
index d4d3761d8..b5e15a129 100644
--- a/src/theory/strings/core_solver.cpp
+++ b/src/theory/strings/core_solver.cpp
@@ -2550,6 +2550,7 @@ void CoreSolver::checkNormalFormsDeq()
const context::CDList<Node>& deqs = d_state.getDisequalityList();
+ NodeManager* nm = NodeManager::currentNM();
//for each pair of disequal strings, must determine whether their lengths are equal or disequal
for (const Node& eq : deqs)
{
@@ -2564,9 +2565,8 @@ void CoreSolver::checkNormalFormsDeq()
EqcInfo* ei = d_state.getOrMakeEqcInfo(n[i], false);
lt[i] = ei ? ei->d_lengthTerm : Node::null();
if( lt[i].isNull() ){
- lt[i] = eq[i];
+ lt[i] = nm->mkNode(STRING_LENGTH, eq[i]);
}
- lt[i] = NodeManager::currentNM()->mkNode( kind::STRING_LENGTH, lt[i] );
}
if (!d_state.areEqual(lt[0], lt[1]) && !d_state.areDisequal(lt[0], lt[1]))
{
@@ -2642,14 +2642,13 @@ void CoreSolver::checkLengthsEqc() {
<< "Process length constraints for " << d_strings_eqc[i] << std::endl;
// check if there is a length term for this equivalence class
EqcInfo* ei = d_state.getOrMakeEqcInfo(d_strings_eqc[i], false);
- Node lt = ei ? ei->d_lengthTerm : Node::null();
- if (lt.isNull())
+ Node llt = ei ? ei->d_lengthTerm : Node::null();
+ if (llt.isNull())
{
Trace("strings-process-debug")
<< "No length term for eqc " << d_strings_eqc[i] << std::endl;
continue;
}
- Node llt = NodeManager::currentNM()->mkNode(kind::STRING_LENGTH, lt);
// now, check if length normalization has occurred
if (ei->d_normalizedLength.get().isNull())
{
@@ -2669,7 +2668,7 @@ void CoreSolver::checkLengthsEqc() {
// if not, add the lemma
std::vector<Node> ant;
ant.insert(ant.end(), nfi.d_exp.begin(), nfi.d_exp.end());
- ant.push_back(lt.eqNode(nfi.d_base));
+ ant.push_back(llt[0].eqNode(nfi.d_base));
Node lc = NodeManager::currentNM()->mkNode(kind::STRING_LENGTH, nf);
Node lcr = rewrite(lc);
Trace("strings-process-debug")
diff --git a/src/theory/strings/eager_solver.cpp b/src/theory/strings/eager_solver.cpp
index 829146f52..21fdd6fa2 100644
--- a/src/theory/strings/eager_solver.cpp
+++ b/src/theory/strings/eager_solver.cpp
@@ -16,6 +16,7 @@
#include "theory/strings/eager_solver.h"
#include "theory/strings/theory_strings_utils.h"
+#include "util/rational.h"
using namespace cvc5::kind;
@@ -23,7 +24,13 @@ namespace cvc5 {
namespace theory {
namespace strings {
-EagerSolver::EagerSolver(SolverState& state) : d_state(state) {}
+EagerSolver::EagerSolver(Env& env,
+ SolverState& state,
+ TermRegistry& treg,
+ ArithEntail& aent)
+ : EnvObj(env), d_state(state), d_treg(treg), d_aent(aent)
+{
+}
EagerSolver::~EagerSolver() {}
@@ -37,7 +44,30 @@ void EagerSolver::eqNotifyNewClass(TNode t)
EqcInfo* ei = d_state.getOrMakeEqcInfo(r);
if (k == STRING_LENGTH)
{
- ei->d_lengthTerm = t[0];
+ ei->d_lengthTerm = t;
+ // also assume it as upper/lower bound as applicable for the equivalence
+ // class info of t.
+ EqcInfo* eil = nullptr;
+ for (size_t i = 0; i < 2; i++)
+ {
+ Node b = getBoundForLength(t, i == 0);
+ if (b.isNull())
+ {
+ continue;
+ }
+ if (eil == nullptr)
+ {
+ eil = d_state.getOrMakeEqcInfo(t);
+ }
+ if (i == 0)
+ {
+ eil->d_firstBound = t;
+ }
+ else if (i == 1)
+ {
+ eil->d_secondBound = t;
+ }
+ }
}
else
{
@@ -46,11 +76,12 @@ void EagerSolver::eqNotifyNewClass(TNode t)
}
else if (t.isConst())
{
- if (t.getType().isStringLike())
+ TypeNode tn = t.getType();
+ if (tn.isStringLike() || tn.isInteger())
{
EqcInfo* ei = d_state.getOrMakeEqcInfo(t);
- ei->d_prefixC = t;
- ei->d_suffixC = t;
+ ei->d_firstBound = t;
+ ei->d_secondBound = t;
}
}
else if (k == STRING_CONCAT)
@@ -66,8 +97,18 @@ void EagerSolver::eqNotifyMerge(TNode t1, TNode t2)
{
return;
}
- Assert(t1.getType().isStringLike());
+ // always create it if e2 was non-null
EqcInfo* e1 = d_state.getOrMakeEqcInfo(t1);
+ // check for conflict
+ Node conf = checkForMergeConflict(t1, t2, e1, e2);
+ if (!conf.isNull())
+ {
+ InferenceId id = t1.getType().isStringLike()
+ ? InferenceId::STRINGS_PREFIX_CONFLICT
+ : InferenceId::STRINGS_ARITH_BOUND_CONFLICT;
+ d_state.setPendingMergeConflict(conf, id);
+ return;
+ }
// add information from e2 to e1
if (!e2->d_lengthTerm.get().isNull())
{
@@ -77,16 +118,6 @@ void EagerSolver::eqNotifyMerge(TNode t1, TNode t2)
{
e1->d_codeTerm.set(e2->d_codeTerm);
}
- if (!e2->d_prefixC.get().isNull())
- {
- d_state.setPendingPrefixConflictWhen(
- e1->addEndpointConst(e2->d_prefixC, Node::null(), false));
- }
- if (!e2->d_suffixC.get().isNull())
- {
- d_state.setPendingPrefixConflictWhen(
- e1->addEndpointConst(e2->d_suffixC, Node::null(), true));
- }
if (e2->d_cardinalityLemK.get() > e1->d_cardinalityLemK.get())
{
e1->d_cardinalityLemK.set(e2->d_cardinalityLemK);
@@ -126,9 +157,50 @@ void EagerSolver::addEndpointsToEqcInfo(Node t, Node concat, Node eqc)
Trace("strings-eager-pconf-debug")
<< "New term: " << concat << " for " << t << " with prefix " << c
<< " (" << (r == 1) << ")" << std::endl;
- d_state.setPendingPrefixConflictWhen(ei->addEndpointConst(t, c, r == 1));
+ Node conf = ei->addEndpointConst(t, c, r == 1);
+ if (!conf.isNull())
+ {
+ d_state.setPendingMergeConflict(conf,
+ InferenceId::STRINGS_PREFIX_CONFLICT);
+ return;
+ }
+ }
+ }
+}
+
+Node EagerSolver::checkForMergeConflict(Node a,
+ Node b,
+ EqcInfo* ea,
+ EqcInfo* eb)
+{
+ Assert(eb != nullptr && ea != nullptr);
+ Assert(a.getType() == b.getType());
+ Assert(a.getType().isStringLike() || a.getType().isInteger());
+ // check prefix, suffix
+ for (size_t i = 0; i < 2; i++)
+ {
+ Node n = i == 0 ? eb->d_firstBound.get() : eb->d_secondBound.get();
+ if (!n.isNull())
+ {
+ Node conf;
+ if (a.getType().isStringLike())
+ {
+ conf = ea->addEndpointConst(n, Node::null(), i == 1);
+ }
+ else
+ {
+ Trace("strings-eager-aconf-debug")
+ << "addArithmeticBound " << n << " into " << a << " from " << b
+ << std::endl;
+ conf = addArithmeticBound(ea, n, i == 0);
+ }
+ if (!conf.isNull())
+ {
+ return conf;
+ }
}
}
+ return Node::null();
}
void EagerSolver::notifyFact(TNode atom,
@@ -147,6 +219,75 @@ void EagerSolver::notifyFact(TNode atom,
}
}
+Node EagerSolver::addArithmeticBound(EqcInfo* e, Node t, bool isLower)
+{
+ Assert(e != nullptr);
+ Assert(!t.isNull());
+ Node tb = t.isConst() ? t : getBoundForLength(t, isLower);
+ Assert(!tb.isNull() && tb.getKind() == CONST_RATIONAL)
+ << "Unexpected bound " << tb << " from " << t;
+ Rational br = tb.getConst<Rational>();
+ Node prev = isLower ? e->d_firstBound : e->d_secondBound;
+ // check if subsumed
+ if (!prev.isNull())
+ {
+ // convert to bound
+ Node prevb = prev.isConst() ? prev : getBoundForLength(prev, isLower);
+ Assert(!prevb.isNull() && prevb.getKind() == CONST_RATIONAL);
+ Rational prevbr = prevb.getConst<Rational>();
+ if (prevbr == br || (br < prevbr) == isLower)
+ {
+ // subsumed
+ return Node::null();
+ }
+ }
+ Node prevo = isLower ? e->d_secondBound : e->d_firstBound;
+ Trace("strings-eager-aconf-debug")
+ << "Check conflict for bounds " << t << " " << prevo << std::endl;
+ if (!prevo.isNull())
+ {
+ // are we in conflict?
+ Node prevob = prevo.isConst() ? prevo : getBoundForLength(prevo, !isLower);
+ Assert(!prevob.isNull() && prevob.getKind() == CONST_RATIONAL);
+ Rational prevobr = prevob.getConst<Rational>();
+ if (prevobr != br && (prevobr < br) == isLower)
+ {
+ // conflict
+ Node ret = EqcInfo::mkMergeConflict(t, prevo);
+ Trace("strings-eager-aconf")
+ << "String: eager arithmetic bound conflict: " << ret << std::endl;
+ return ret;
+ }
+ }
+ if (isLower)
+ {
+ e->d_firstBound = t;
+ }
+ else
+ {
+ e->d_secondBound = t;
+ }
+ return Node::null();
+}
+
+Node EagerSolver::getBoundForLength(Node len, bool isLower)
+{
+ Assert(len.getKind() == STRING_LENGTH);
+ // it is prohibitively expensive to convert to original form and rewrite,
+ // since this may invoke the rewriter on lengths of complex terms. Instead,
+ // we convert to original term the argument, then call the utility method
+ // for computing the length of the argument, implicitly under an application
+ // of length (ArithEntail::getConstantBoundLength).
+ // convert to original form
+ Node olent = SkolemManager::getOriginalForm(len[0]);
+ // get the bound
+ Node c = d_aent.getConstantBoundLength(olent, isLower);
+ Trace("strings-eager-aconf-debug")
+ << "Constant " << (isLower ? "lower" : "upper") << " bound for " << len
+ << " is " << c << ", from original form " << olent << std::endl;
+ return c;
+}
+
} // namespace strings
} // namespace theory
} // namespace cvc5
diff --git a/src/theory/strings/eager_solver.h b/src/theory/strings/eager_solver.h
index cf4062bfe..03fb0ff63 100644
--- a/src/theory/strings/eager_solver.h
+++ b/src/theory/strings/eager_solver.h
@@ -21,8 +21,11 @@
#include <map>
#include "expr/node.h"
+#include "smt/env_obj.h"
+#include "theory/strings/arith_entail.h"
#include "theory/strings/eqc_info.h"
#include "theory/strings/solver_state.h"
+#include "theory/strings/term_registry.h"
namespace cvc5 {
namespace theory {
@@ -32,10 +35,13 @@ namespace strings {
* Eager solver, which is responsible for tracking of eager information and
* reporting conflicts to the solver state.
*/
-class EagerSolver
+class EagerSolver : protected EnvObj
{
public:
- EagerSolver(SolverState& state);
+ EagerSolver(Env& env,
+ SolverState& state,
+ TermRegistry& treg,
+ ArithEntail& aent);
~EagerSolver();
/** called when a new equivalence class is created */
void eqNotifyNewClass(TNode t);
@@ -58,8 +64,21 @@ class EagerSolver
* for some eqc that is currently equal to z.
*/
void addEndpointsToEqcInfo(Node t, Node concat, Node eqc);
+ /**
+ * Check for conflict when merging equivalence classes with the given info,
+ * return the node corresponding to the conflict if so.
+ */
+ Node checkForMergeConflict(Node a, Node b, EqcInfo* ea, EqcInfo* eb);
+ /** add arithmetic bound */
+ Node addArithmeticBound(EqcInfo* ea, Node t, bool isLower);
+ /** get bound for length term */
+ Node getBoundForLength(Node len, bool isLower);
/** Reference to the solver state */
SolverState& d_state;
+ /** Reference to the term registry */
+ TermRegistry& d_treg;
+ /** Arithmetic entailment */
+ ArithEntail& d_aent;
};
} // namespace strings
diff --git a/src/theory/strings/eqc_info.cpp b/src/theory/strings/eqc_info.cpp
index 43636ecea..5fb5e91c3 100644
--- a/src/theory/strings/eqc_info.cpp
+++ b/src/theory/strings/eqc_info.cpp
@@ -31,15 +31,15 @@ EqcInfo::EqcInfo(context::Context* c)
d_codeTerm(c),
d_cardinalityLemK(c),
d_normalizedLength(c),
- d_prefixC(c),
- d_suffixC(c)
+ d_firstBound(c),
+ d_secondBound(c)
{
}
Node EqcInfo::addEndpointConst(Node t, Node c, bool isSuf)
{
// check conflict
- Node prev = isSuf ? d_suffixC : d_prefixC;
+ Node prev = isSuf ? d_secondBound : d_firstBound;
if (!prev.isNull())
{
Trace("strings-eager-pconf-debug") << "Check conflict " << prev << ", " << t
@@ -100,28 +100,7 @@ Node EqcInfo::addEndpointConst(Node t, Node c, bool isSuf)
{
Trace("strings-eager-pconf")
<< "Conflict for " << prevC << ", " << c << std::endl;
- std::vector<Node> ccs;
- Node r[2];
- for (unsigned i = 0; i < 2; i++)
- {
- Node tp = i == 0 ? t : prev;
- if (tp.getKind() == STRING_IN_REGEXP)
- {
- ccs.push_back(tp);
- r[i] = tp[0];
- }
- else
- {
- r[i] = tp;
- }
- }
- if (r[0] != r[1])
- {
- ccs.push_back(r[0].eqNode(r[1]));
- }
- Assert(!ccs.empty());
- Node ret =
- ccs.size() == 1 ? ccs[0] : NodeManager::currentNM()->mkNode(AND, ccs);
+ Node ret = mkMergeConflict(t, prev);
Trace("strings-eager-pconf")
<< "String: eager prefix conflict: " << ret << std::endl;
return ret;
@@ -129,15 +108,40 @@ Node EqcInfo::addEndpointConst(Node t, Node c, bool isSuf)
}
if (isSuf)
{
- d_suffixC = t;
+ d_secondBound = t;
}
else
{
- d_prefixC = t;
+ d_firstBound = t;
}
return Node::null();
}
+Node EqcInfo::mkMergeConflict(Node t, Node prev)
+{
+ std::vector<Node> ccs;
+ Node r[2];
+ for (unsigned i = 0; i < 2; i++)
+ {
+ Node tp = i == 0 ? t : prev;
+ if (tp.getKind() == STRING_IN_REGEXP)
+ {
+ ccs.push_back(tp);
+ r[i] = tp[0];
+ }
+ else
+ {
+ r[i] = tp;
+ }
+ }
+ if (r[0] != r[1])
+ {
+ ccs.push_back(r[0].eqNode(r[1]));
+ }
+ Assert(!ccs.empty());
+ return NodeManager::currentNM()->mkAnd(ccs);
+}
+
} // namespace strings
} // namespace theory
} // namespace cvc5
diff --git a/src/theory/strings/eqc_info.h b/src/theory/strings/eqc_info.h
index 1fd430c95..bfc753989 100644
--- a/src/theory/strings/eqc_info.h
+++ b/src/theory/strings/eqc_info.h
@@ -62,17 +62,34 @@ class EqcInfo
context::CDO<unsigned> d_cardinalityLemK;
context::CDO<Node> d_normalizedLength;
/**
- * A node that explains the longest constant prefix known of this
+ * If this is a string equivalence class, this is
+ * a node that explains the longest constant prefix known of this
* equivalence class. This can either be:
* (1) A term from this equivalence class, including a constant "ABC" or
* concatenation term (str.++ "ABC" ...), or
* (2) A membership of the form (str.in.re x R) where x is in this
* equivalence class and R is a regular expression of the form
* (str.to.re "ABC") or (re.++ (str.to.re "ABC") ...).
+ *
+ * If this is an integer equivalence class, this is the lower bound
+ * of the value of this equivalence class.
+ */
+ context::CDO<Node> d_firstBound;
+ /** same as above, for suffix and integer upper bounds. */
+ context::CDO<Node> d_secondBound;
+ /**
+ * Make merge conflict. Let "bound term" refer to a term that is set
+ * as the data member of this class for d_firstBound or d_secondBound.
+ * This method is called when this implies that two terms occur in an
+ * equivalence class that have conflicting properties. For example,
+ * t may be (str.in_re x (re.++ (str.to_re "A") R2)) and prev may be
+ * (str.++ "B" y), where the equivalence class of x has merged into
+ * the equivalence class of (str.++ "B" y). This method would return
+ * the conflict:
+ * (and (= x (str.++ "B" y)) (str.in_re x (re.++ (str.to_re "A") R2)))
+ * for this input.
*/
- context::CDO<Node> d_prefixC;
- /** same as above, for suffix. */
- context::CDO<Node> d_suffixC;
+ static Node mkMergeConflict(Node t, Node prev);
};
} // namespace strings
diff --git a/src/theory/strings/regexp_elim.cpp b/src/theory/strings/regexp_elim.cpp
index dfe006a86..b3dc309d5 100644
--- a/src/theory/strings/regexp_elim.cpp
+++ b/src/theory/strings/regexp_elim.cpp
@@ -15,6 +15,7 @@
#include "theory/strings/regexp_elim.h"
+#include "expr/bound_var_manager.h"
#include "options/strings_options.h"
#include "proof/proof_node_manager.h"
#include "theory/rewriter.h"
@@ -29,6 +30,22 @@ namespace cvc5 {
namespace theory {
namespace strings {
+/**
+ * Attributes used for constructing unique bound variables. The following
+ * attributes are used to construct (deterministic) bound variables for
+ * eliminations within eliminateConcat and eliminateStar respectively.
+ */
+struct ReElimConcatIndexAttributeId
+{
+};
+typedef expr::Attribute<ReElimConcatIndexAttributeId, Node>
+ ReElimConcatIndexAttribute;
+struct ReElimStarIndexAttributeId
+{
+};
+typedef expr::Attribute<ReElimStarIndexAttributeId, Node>
+ ReElimStarIndexAttribute;
+
RegExpElimination::RegExpElimination(bool isAgg,
ProofNodeManager* pnm,
context::Context* c)
@@ -77,6 +94,7 @@ TrustNode RegExpElimination::eliminateTrusted(Node atom)
Node RegExpElimination::eliminateConcat(Node atom, bool isAgg)
{
NodeManager* nm = NodeManager::currentNM();
+ BoundVarManager* bvm = nm->getBoundVarManager();
Node x = atom[0];
Node lenx = nm->mkNode(STRING_LENGTH, x);
Node re = atom[1];
@@ -260,7 +278,11 @@ Node RegExpElimination::eliminateConcat(Node atom, bool isAgg)
}
// if the gap after this one is strict, we need a non-greedy find
// thus, we add a symbolic constant
- Node k = nm->mkBoundVar(nm->integerType());
+ Node cacheVal =
+ BoundVarManager::getCacheValue(atom, nm->mkConst(Rational(i)));
+ TypeNode intType = nm->integerType();
+ Node k =
+ bvm->mkBoundVar<ReElimConcatIndexAttribute>(cacheVal, intType);
non_greedy_find_vars.push_back(k);
prev_end = nm->mkNode(PLUS, prev_end, k);
}
@@ -452,7 +474,10 @@ Node RegExpElimination::eliminateConcat(Node atom, bool isAgg)
}
else
{
- k = nm->mkBoundVar(nm->integerType());
+ Node cacheVal =
+ BoundVarManager::getCacheValue(atom, nm->mkConst(Rational(i)));
+ TypeNode intType = nm->integerType();
+ k = bvm->mkBoundVar<ReElimConcatIndexAttribute>(cacheVal, intType);
Node bound =
nm->mkNode(AND,
nm->mkNode(LEQ, zero, k),
@@ -509,6 +534,7 @@ Node RegExpElimination::eliminateStar(Node atom, bool isAgg)
// only aggressive rewrites below here
NodeManager* nm = NodeManager::currentNM();
+ BoundVarManager* bvm = nm->getBoundVarManager();
Node x = atom[0];
Node lenx = nm->mkNode(STRING_LENGTH, x);
Node re = atom[1];
@@ -530,7 +556,8 @@ Node RegExpElimination::eliminateStar(Node atom, bool isAgg)
}
bool lenOnePeriod = true;
std::vector<Node> char_constraints;
- Node index = nm->mkBoundVar(nm->integerType());
+ TypeNode intType = nm->integerType();
+ Node index = bvm->mkBoundVar<ReElimStarIndexAttribute>(atom, intType);
Node substr_ch =
nm->mkNode(STRING_SUBSTR, x, index, nm->mkConst(Rational(1)));
substr_ch = Rewriter::rewrite(substr_ch);
diff --git a/src/theory/strings/sequences_rewriter.cpp b/src/theory/strings/sequences_rewriter.cpp
index 9887e7ef0..783e258fa 100644
--- a/src/theory/strings/sequences_rewriter.cpp
+++ b/src/theory/strings/sequences_rewriter.cpp
@@ -1813,6 +1813,13 @@ Node SequencesRewriter::rewriteSubstr(Node node)
}
}
+ // (str.substr s x x) ---> "" if (str.len s) <= 1
+ if (node[1] == node[2] && d_stringsEntail.checkLengthOne(node[0]))
+ {
+ Node ret = Word::mkEmptyWord(node.getType());
+ return returnRewrite(node, ret, Rewrite::SS_LEN_ONE_Z_Z);
+ }
+
// symbolic length analysis
for (unsigned r = 0; r < 2; r++)
{
@@ -1847,44 +1854,6 @@ Node SequencesRewriter::rewriteSubstr(Node node)
d_arithEntail.rewrite(nm->mkNode(kind::MINUS, tot_len, end_pt));
}
}
-
- // (str.substr s x y) --> "" if x < len(s) |= 0 >= y
- Node n1_lt_tot_len =
- d_arithEntail.rewrite(nm->mkNode(kind::LT, node[1], tot_len));
- if (d_arithEntail.checkWithAssumption(
- n1_lt_tot_len, zero, node[2], false))
- {
- Node ret = Word::mkEmptyWord(node.getType());
- return returnRewrite(node, ret, Rewrite::SS_START_ENTAILS_ZERO_LEN);
- }
-
- // (str.substr s x y) --> "" if 0 < y |= x >= str.len(s)
- Node non_zero_len =
- d_arithEntail.rewrite(nm->mkNode(kind::LT, zero, node[2]));
- if (d_arithEntail.checkWithAssumption(
- non_zero_len, node[1], tot_len, false))
- {
- Node ret = Word::mkEmptyWord(node.getType());
- return returnRewrite(node, ret, Rewrite::SS_NON_ZERO_LEN_ENTAILS_OOB);
- }
-
- // (str.substr s x y) --> "" if x >= 0 |= 0 >= str.len(s)
- Node geq_zero_start =
- d_arithEntail.rewrite(nm->mkNode(kind::GEQ, node[1], zero));
- if (d_arithEntail.checkWithAssumption(
- geq_zero_start, zero, tot_len, false))
- {
- Node ret = Word::mkEmptyWord(node.getType());
- return returnRewrite(
- node, ret, Rewrite::SS_GEQ_ZERO_START_ENTAILS_EMP_S);
- }
-
- // (str.substr s x x) ---> "" if (str.len s) <= 1
- if (node[1] == node[2] && d_stringsEntail.checkLengthOne(node[0]))
- {
- Node ret = Word::mkEmptyWord(node.getType());
- return returnRewrite(node, ret, Rewrite::SS_LEN_ONE_Z_Z);
- }
}
if (!curr.isNull())
{
diff --git a/src/theory/strings/solver_state.cpp b/src/theory/strings/solver_state.cpp
index 32ed6896c..28ab63d05 100644
--- a/src/theory/strings/solver_state.cpp
+++ b/src/theory/strings/solver_state.cpp
@@ -90,6 +90,10 @@ Node SolverState::getLengthExp(Node t, std::vector<Node>& exp, Node te)
// typically shouldnt be necessary
lengthTerm = t;
}
+ else
+ {
+ lengthTerm = lengthTerm[0];
+ }
Debug("strings") << "SolverState::getLengthTerm " << t << " is " << lengthTerm
<< std::endl;
if (te != lengthTerm)
@@ -135,13 +139,14 @@ bool SolverState::isEqualEmptyWord(Node s, Node& emps)
return false;
}
-void SolverState::setPendingPrefixConflictWhen(Node conf)
+void SolverState::setPendingMergeConflict(Node conf, InferenceId id)
{
- if (conf.isNull() || d_pendingConflictSet.get())
+ if (d_pendingConflictSet.get())
{
+ // already set conflict
return;
}
- InferInfo iiPrefixConf(InferenceId::STRINGS_PREFIX_CONFLICT);
+ InferInfo iiPrefixConf(id);
iiPrefixConf.d_conc = d_false;
utils::flattenOp(AND, conf, iiPrefixConf.d_premises);
setPendingConflict(iiPrefixConf);
@@ -188,7 +193,6 @@ void SolverState::separateByLength(
// not have an associated length in the mappings above, if the length of
// an equivalence class is unknown.
std::map<unsigned, std::vector<Node> > eqc_to_strings;
- NodeManager* nm = NodeManager::currentNM();
for (const Node& eqc : n)
{
Assert(d_ee->getRepresentative(eqc) == eqc);
@@ -197,7 +201,6 @@ void SolverState::separateByLength(
Node lt = ei ? ei->d_lengthTerm : Node::null();
if (!lt.isNull())
{
- lt = nm->mkNode(STRING_LENGTH, lt);
Node r = d_ee->getRepresentative(lt);
std::pair<Node, TypeNode> lkey(r, tnEqc);
if (eqc_to_leqc.find(lkey) == eqc_to_leqc.end())
diff --git a/src/theory/strings/solver_state.h b/src/theory/strings/solver_state.h
index bbeb470f7..061c5cf10 100644
--- a/src/theory/strings/solver_state.h
+++ b/src/theory/strings/solver_state.h
@@ -64,9 +64,9 @@ class SolverState : public TheoryState
void addDisequality(TNode t1, TNode t2);
//-------------------------------------- end disequality information
//------------------------------------------ conflicts
- /** set pending prefix conflict
+ /** set pending merge conflict
*
- * If conf is non-null, this is called when conf is a conjunction of literals
+ * This is called when conf is a conjunction of literals
* that hold in the current context that are unsatisfiable. It is set as the
* "pending conflict" to be processed as a conflict lemma on the output
* channel of this class. It is not sent out immediately since it may require
@@ -74,7 +74,7 @@ class SolverState : public TheoryState
* during a merge operation, when the equality engine is not in a state to
* provide explanations.
*/
- void setPendingPrefixConflictWhen(Node conf);
+ void setPendingMergeConflict(Node conf, InferenceId id);
/**
* Set pending conflict, infer info version. Called when we are in conflict
* based on the inference ii. This generalizes the above method.
diff --git a/src/theory/strings/theory_strings.cpp b/src/theory/strings/theory_strings.cpp
index 25d0e7336..9a793e14f 100644
--- a/src/theory/strings/theory_strings.cpp
+++ b/src/theory/strings/theory_strings.cpp
@@ -55,14 +55,14 @@ TheoryStrings::TheoryStrings(Env& env, OutputChannel& out, Valuation valuation)
d_notify(*this),
d_statistics(),
d_state(env, d_valuation),
- d_eagerSolver(d_state),
d_termReg(env, d_state, d_statistics, d_pnm),
- d_extTheoryCb(),
- d_im(env, *this, d_state, d_termReg, d_extTheory, d_statistics),
- d_extTheory(env, d_extTheoryCb, d_im),
d_rewriter(env.getRewriter(),
&d_statistics.d_rewrites,
d_termReg.getAlphabetCardinality()),
+ d_eagerSolver(env, d_state, d_termReg, d_rewriter.getArithEntail()),
+ d_extTheoryCb(),
+ d_im(env, *this, d_state, d_termReg, d_extTheory, d_statistics),
+ d_extTheory(env, d_extTheoryCb, d_im),
// the checker depends on the cardinality of the alphabet
d_checker(d_termReg.getAlphabetCardinality()),
d_bsolver(env, d_state, d_im),
diff --git a/src/theory/strings/theory_strings.h b/src/theory/strings/theory_strings.h
index f23b2449f..dbb04580f 100644
--- a/src/theory/strings/theory_strings.h
+++ b/src/theory/strings/theory_strings.h
@@ -254,18 +254,18 @@ class TheoryStrings : public Theory {
SequencesStatistics d_statistics;
/** The solver state object */
SolverState d_state;
- /** The eager solver */
- EagerSolver d_eagerSolver;
/** The term registry for this theory */
TermRegistry d_termReg;
+ /** The theory rewriter for this theory. */
+ StringsRewriter d_rewriter;
+ /** The eager solver */
+ EagerSolver d_eagerSolver;
/** The extended theory callback */
StringsExtfCallback d_extTheoryCb;
/** The (custom) output channel of the theory of strings */
InferenceManager d_im;
/** Extended theory, responsible for context-dependent simplification. */
ExtTheory d_extTheory;
- /** The theory rewriter for this theory. */
- StringsRewriter d_rewriter;
/** The proof rule checker */
StringProofRuleChecker d_checker;
/**
diff --git a/src/theory/theory_engine.cpp b/src/theory/theory_engine.cpp
index 0f3260920..a186c05a0 100644
--- a/src/theory/theory_engine.cpp
+++ b/src/theory/theory_engine.cpp
@@ -1428,14 +1428,14 @@ void TheoryEngine::conflict(TrustNode tconflict, TheoryId theoryId)
// Process the explanation
TrustNode tncExp = getExplanation(vec);
- Trace("te-proof-debug")
- << "Check closed conflict explained with sharing" << std::endl;
- tncExp.debugCheckClosed("te-proof-debug",
- "TheoryEngine::conflict_explained_sharing");
Node fullConflict = tncExp.getNode();
if (isProofEnabled())
{
+ Trace("te-proof-debug")
+ << "Check closed conflict explained with sharing" << std::endl;
+ tncExp.debugCheckClosed("te-proof-debug",
+ "TheoryEngine::conflict_explained_sharing");
Trace("te-proof-debug") << "Process conflict: " << conflict << std::endl;
Trace("te-proof-debug") << "Conflict " << tconflict << " from "
<< tconflict.identifyGenerator() << std::endl;
@@ -1498,7 +1498,10 @@ void TheoryEngine::conflict(TrustNode tconflict, TheoryId theoryId)
Assert(properConflict(fullConflict));
Trace("te-proof-debug")
<< "Check closed conflict with sharing" << std::endl;
- tconf.debugCheckClosed("te-proof-debug", "TheoryEngine::conflict:sharing");
+ if (isProofEnabled())
+ {
+ tconf.debugCheckClosed("te-proof-debug", "TheoryEngine::conflict:sharing");
+ }
lemma(tconf, LemmaProperty::REMOVABLE);
} else {
// When only one theory, the conflict should need no processing
@@ -1532,8 +1535,26 @@ TrustNode TheoryEngine::getExplanation(
{
Trace("te-proof-exp") << "=== TheoryEngine::getExplanation " << conclusion
<< std::endl;
- lcp.reset(new LazyCDProof(
- d_pnm, nullptr, nullptr, "TheoryEngine::LazyCDProof::getExplanation"));
+ // We do not use auto-symmetry in this proof, since in very rare cases, it
+ // is possible that the proof of explanations is cyclic when considering
+ // (dis)equalities modulo symmetry, where such a proof looks like:
+ // x = y
+ // -----
+ // A ...
+ // ----------
+ // y = x
+ // Notice that this complication arises since propagations consider
+ // equalities that are not in rewritten form. This complication would not
+ // exist otherwise. It is the shared term database that introduces these
+ // unrewritten equalities; it must do so since theory combination requires
+ // communicating arrangements between shared terms, and the rewriter
+ // for arithmetic equalities does not preserve terms, e.g. x=y may become
+ // x+-1*y=0.
+ lcp.reset(new LazyCDProof(d_pnm,
+ nullptr,
+ nullptr,
+ "TheoryEngine::LazyCDProof::getExplanation",
+ false));
}
unsigned i = 0; // Index of the current literal we are processing
@@ -1640,7 +1661,7 @@ TrustNode TheoryEngine::getExplanation(
if (lcp != nullptr)
{
- if (!CDProof::isSame(toExplain.d_node, (*find).second.d_node))
+ if (toExplain.d_node != (*find).second.d_node)
{
Trace("te-proof-exp")
<< "- t-explained cached: " << toExplain.d_node << " by "
@@ -1668,17 +1689,13 @@ TrustNode TheoryEngine::getExplanation(
// should prove the propagation we asked for
Assert(texplanation.getKind() == TrustNodeKind::PROP_EXP
&& texplanation.getProven()[1] == toExplain.d_node);
- // if not a trivial explanation
- if (!CDProof::isSame(texplanation.getNode(), toExplain.d_node))
- {
- // We add it to the list of theory explanations, to be processed at
- // the end of this method. We wait to explain here because it may
- // be that a later explanation may preempt the need for proving this
- // step. For instance, if the conclusion lit is later added as an
- // assumption in the final explanation. This avoids cyclic proofs.
- texplains.push_back(
- std::pair<TheoryId, TrustNode>(toExplain.d_theory, texplanation));
- }
+ // We add it to the list of theory explanations, to be processed at
+ // the end of this method. We wait to explain here because it may
+ // be that a later explanation may preempt the need for proving this
+ // step. For instance, if the conclusion lit is later added as an
+ // assumption in the final explanation. This avoids cyclic proofs.
+ texplains.push_back(
+ std::pair<TheoryId, TrustNode>(toExplain.d_theory, texplanation));
}
Node explanation = texplanation.getNode();
@@ -1756,16 +1773,6 @@ TrustNode TheoryEngine::getExplanation(
Trace("te-proof-exp") << "...already added" << std::endl;
continue;
}
- Node symTConc = CDProof::getSymmFact(tConc);
- if (!symTConc.isNull())
- {
- if (exp.find(symTConc) != exp.end())
- {
- // symmetric direction
- Trace("te-proof-exp") << "...already added (SYMM)" << std::endl;
- continue;
- }
- }
// remember that we've explained this formula, to avoid cycles in lcp
exp.insert(tConc);
TheoryId ttid = it->first;
@@ -1813,7 +1820,7 @@ TrustNode TheoryEngine::getExplanation(
{
Trace("te-proof-exp") << "...via trust THEORY_LEMMA" << std::endl;
// otherwise, trusted theory lemma
- Node tidn = builtin::BuiltinProofRuleChecker::mkTheoryIdNode(it->first);
+ Node tidn = builtin::BuiltinProofRuleChecker::mkTheoryIdNode(ttid);
lcp->addStep(proven, PfRule::THEORY_LEMMA, {}, {proven, tidn});
}
std::vector<Node> pfChildren;
@@ -1821,6 +1828,24 @@ TrustNode TheoryEngine::getExplanation(
pfChildren.push_back(proven);
lcp->addStep(tConc, PfRule::MODUS_PONENS, pfChildren, {});
}
+ // If we don't have a step and the conclusion is not part of the
+ // explanation (for unit T-conflicts), it must be by symmetry. We must do
+ // this manually since lcp does not have auto-symmetry enabled due to the
+ // complication mentioned above.
+ if (!lcp->hasStep(conclusion) && exp.find(conclusion) == exp.end())
+ {
+ Node sconc = CDProof::getSymmFact(conclusion);
+ if (!sconc.isNull())
+ {
+ lcp->addStep(conclusion, PfRule::SYMM, {sconc}, {});
+ }
+ else
+ {
+ Assert(false)
+ << "TheoryEngine::getExplanation: no step found for conclusion "
+ << conclusion;
+ }
+ }
// store in the proof generator
TrustNode trn = d_tepg->mkTrustExplain(conclusion, expNode, lcp);
// return the trust node
diff --git a/src/theory/theory_model_builder.cpp b/src/theory/theory_model_builder.cpp
index 1a0549a0a..f50e7e0ee 100644
--- a/src/theory/theory_model_builder.cpp
+++ b/src/theory/theory_model_builder.cpp
@@ -452,7 +452,7 @@ bool TheoryEngineModelBuilder::buildModel(TheoryModel* tm)
for (; !eqcs_i.isFinished(); ++eqcs_i)
{
Node eqc = *eqcs_i;
-
+ Trace("model-builder") << " Processing EQC " << eqc << std::endl;
// Information computed for each equivalence class
// The assigned represenative and constant representative
@@ -484,7 +484,7 @@ bool TheoryEngineModelBuilder::buildModel(TheoryModel* tm)
for (; !eqc_i.isFinished(); ++eqc_i)
{
Node n = *eqc_i;
- Trace("model-builder") << " Processing Term: " << n << endl;
+ Trace("model-builder") << " Processing Term: " << n << endl;
// For each term n in this equivalence class, below we register its
// assignable subterms, compute whether it is a constant or assigned
@@ -505,7 +505,7 @@ bool TheoryEngineModelBuilder::buildModel(TheoryModel* tm)
Assert(constRep.isNull());
constRep = n;
Trace("model-builder")
- << " ConstRep( " << eqc << " ) = " << constRep << std::endl;
+ << " ..ConstRep( " << eqc << " ) = " << constRep << std::endl;
// if we have a constant representative, nothing else matters
continue;
}
@@ -522,7 +522,7 @@ bool TheoryEngineModelBuilder::buildModel(TheoryModel* tm)
// these cases here.
rep = itm->second;
Trace("model-builder")
- << " Rep( " << eqc << " ) = " << rep << std::endl;
+ << " ..Rep( " << eqc << " ) = " << rep << std::endl;
}
// (3) Finally, process assignable information
diff --git a/src/theory/uf/eq_proof.cpp b/src/theory/uf/eq_proof.cpp
index 06750c7ed..45a00a620 100644
--- a/src/theory/uf/eq_proof.cpp
+++ b/src/theory/uf/eq_proof.cpp
@@ -1432,7 +1432,7 @@ Node EqProof::addToProof(CDProof* p,
// we obtained for example (= (f (f t1 t2 t3) t4) (f (f t5 t6) t7)), which is
// flattened into the original conclusion (= (f t1 t2 t3 t4) (f t5 t6 t7)) via
// rewriting
- if (conclusion != d_node)
+ if (!CDProof::isSame(conclusion, d_node))
{
Trace("eqproof-conv") << "EqProof::addToProof: add "
<< PfRule::MACRO_SR_PRED_TRANSFORM
diff --git a/src/theory/uf/theory_uf.cpp b/src/theory/uf/theory_uf.cpp
index 1e240a254..5e9cb0a1d 100644
--- a/src/theory/uf/theory_uf.cpp
+++ b/src/theory/uf/theory_uf.cpp
@@ -511,9 +511,8 @@ EqualityStatus TheoryUF::getEqualityStatus(TNode a, TNode b) {
return EQUALITY_FALSE_IN_MODEL;
}
-bool TheoryUF::areCareDisequal(TNode x, TNode y){
- Assert(d_equalityEngine->hasTerm(x));
- Assert(d_equalityEngine->hasTerm(y));
+bool TheoryUF::areCareDisequal(TNode x, TNode y)
+{
if (d_equalityEngine->isTriggerTerm(x, THEORY_UF)
&& d_equalityEngine->isTriggerTerm(y, THEORY_UF))
{
@@ -534,11 +533,14 @@ void TheoryUF::addCarePairs(const TNodeTrie* t1,
unsigned arity,
unsigned depth)
{
+ // Note we use d_state instead of d_equalityEngine in this method in several
+ // places to be robust to cases where the tries have terms that do not
+ // exist in the equality engine, which can be the case if higher order.
if( depth==arity ){
if( t2!=NULL ){
Node f1 = t1->getData();
Node f2 = t2->getData();
- if (!d_equalityEngine->areEqual(f1, f2))
+ if (!d_state.areEqual(f1, f2))
{
Debug("uf::sharing") << "TheoryUf::computeCareGraph(): checking function " << f1 << " and " << f2 << std::endl;
vector< pair<TNode, TNode> > currentPairs;
@@ -546,11 +548,9 @@ void TheoryUF::addCarePairs(const TNodeTrie* t1,
{
TNode x = f1[k];
TNode y = f2[k];
- Assert(d_equalityEngine->hasTerm(x));
- Assert(d_equalityEngine->hasTerm(y));
- Assert(!d_equalityEngine->areDisequal(x, y, false));
+ Assert(!d_state.areDisequal(x, y));
Assert(!areCareDisequal(x, y));
- if (!d_equalityEngine->areEqual(x, y))
+ if (!d_state.areEqual(x, y))
{
if (d_equalityEngine->isTriggerTerm(x, THEORY_UF)
&& d_equalityEngine->isTriggerTerm(y, THEORY_UF))
@@ -586,7 +586,7 @@ void TheoryUF::addCarePairs(const TNodeTrie* t1,
std::map<TNode, TNodeTrie>::const_iterator it2 = it;
++it2;
for( ; it2 != t1->d_data.end(); ++it2 ){
- if (!d_equalityEngine->areDisequal(it->first, it2->first, false))
+ if (!d_state.areDisequal(it->first, it2->first))
{
if( !areCareDisequal(it->first, it2->first) ){
addCarePairs( &it->second, &it2->second, arity, depth+1 );
@@ -600,7 +600,7 @@ void TheoryUF::addCarePairs(const TNodeTrie* t1,
{
for (const std::pair<const TNode, TNodeTrie>& tt2 : t2->d_data)
{
- if (!d_equalityEngine->areDisequal(tt1.first, tt2.first, false))
+ if (!d_state.areDisequal(tt1.first, tt2.first))
{
if (!areCareDisequal(tt1.first, tt2.first))
{
@@ -618,11 +618,14 @@ void TheoryUF::computeCareGraph() {
{
return;
}
+ NodeManager* nm = NodeManager::currentNM();
// Use term indexing. We build separate indices for APPLY_UF and HO_APPLY.
// We maintain indices per operator for the former, and indices per
// function type for the latter.
Debug("uf::sharing") << "TheoryUf::computeCareGraph(): Build term indices..."
<< std::endl;
+ // temporary keep set for higher-order indexing below
+ std::vector<Node> keep;
std::map<Node, TNodeTrie> index;
std::map<TypeNode, TNodeTrie> hoIndex;
std::map<Node, size_t> arity;
@@ -645,6 +648,25 @@ void TheoryUF::computeCareGraph() {
Node op = app.getOperator();
index[op].addTerm(app, reps);
arity[op] = reps.size();
+ if (logicInfo().isHigherOrder() && d_equalityEngine->hasTerm(op))
+ {
+ // Since we use a lazy app-completion scheme for equating fully
+ // and partially applied versions of terms, we must add all
+ // sub-chains to the HO index if the operator of this term occurs
+ // in a higher-order context in the equality engine. In other words,
+ // for (f a b c), this will add the terms:
+ // (HO_APPLY f a), (HO_APPLY (HO_APPLY f a) b),
+ // (HO_APPLY (HO_APPLY (HO_APPLY f a) b) c) to the higher-order
+ // term index for consideration when computing care pairs.
+ Node curr = op;
+ for (const Node& c : app)
+ {
+ Node happ = nm->mkNode(kind::HO_APPLY, curr, c);
+ hoIndex[curr.getType()].addTerm(happ, {curr, c});
+ curr = happ;
+ keep.push_back(happ);
+ }
+ }
}
else
{
diff --git a/test/api/CMakeLists.txt b/test/api/CMakeLists.txt
index 4811cca1b..f6c1cf8df 100644
--- a/test/api/CMakeLists.txt
+++ b/test/api/CMakeLists.txt
@@ -52,6 +52,7 @@ cvc5_add_api_test(two_solvers)
cvc5_add_api_test(issue5074)
cvc5_add_api_test(issue4889)
cvc5_add_api_test(issue6111)
+cvc5_add_api_test(proj-issue306)
# if we've built using libedit, then we want the interactive shell tests
if (USE_EDITLINE)
diff --git a/test/api/proj-issue306.cpp b/test/api/proj-issue306.cpp
new file mode 100644
index 000000000..664536a0b
--- /dev/null
+++ b/test/api/proj-issue306.cpp
@@ -0,0 +1,21 @@
+#include "api/cpp/cvc5.h"
+
+using namespace cvc5::api;
+
+int main(void)
+{
+ Solver slv;
+ slv.setOption("check-proofs", "true");
+ slv.setOption("proof-check", "eager");
+ Sort s1 = slv.getBooleanSort();
+ Sort s3 = slv.getStringSort();
+ Term t1 = slv.mkConst(s3, "_x0");
+ Term t3 = slv.mkConst(s1, "_x2");
+ Term t11 = slv.mkString("");
+ Term t14 = slv.mkConst(s3, "_x11");
+ Term t42 = slv.mkTerm(Kind::ITE, t3, t14, t1);
+ Term t58 = slv.mkTerm(Kind::STRING_LEQ, t14, t11);
+ Term t95 = slv.mkTerm(Kind::EQUAL, t14, t42);
+ slv.assertFormula(t95);
+ slv.checkSatAssuming({t58});
+}
diff --git a/test/python/unit/api/test_solver.py b/test/python/unit/api/test_solver.py
index 6052a057f..04a275741 100644
--- a/test/python/unit/api/test_solver.py
+++ b/test/python/unit/api/test_solver.py
@@ -422,6 +422,17 @@ def test_mk_floating_point(solver):
with pytest.raises(RuntimeError):
slv.mkFloatingPoint(3, 5, t1)
+def test_mk_cardinality_constraint(solver):
+ su = solver.mkUninterpretedSort("u")
+ si = solver.getIntegerSort()
+ solver.mkCardinalityConstraint(su, 3)
+ with pytest.raises(RuntimeError):
+ solver.mkEmptySet(solver.mkCardinalityConstraint(si, 3))
+ with pytest.raises(RuntimeError):
+ solver.mkEmptySet(solver.mkCardinalityConstraint(su, 0))
+ slv = pycvc5.Solver()
+ with pytest.raises(RuntimeError):
+ slv.mkCardinalityConstraint(su, 3)
def test_mk_empty_set(solver):
slv = pycvc5.Solver()
diff --git a/test/regress/CMakeLists.txt b/test/regress/CMakeLists.txt
index 5dd456b9f..a3fd70683 100644
--- a/test/regress/CMakeLists.txt
+++ b/test/regress/CMakeLists.txt
@@ -449,6 +449,7 @@ set(regress_0_tests
regress0/cores/issue4971-2.smt2
regress0/cores/issue4971-3.smt2
regress0/cores/issue5079.smt2
+ regress0/cores/issue5234-uc-ua.smt2
regress0/cores/issue5238.smt2
regress0/cores/issue5902.smt2
regress0/cores/issue5908.smt2
@@ -650,12 +651,16 @@ set(regress_0_tests
regress0/ho/issue4990-care-graph.smt2
regress0/ho/issue5233-part1-usort-owner.smt2
regress0/ho/issue5371.smt2
+ regress0/ho/issue5741-1-cg-model.smt2
+ regress0/ho/issue5741-3-cg-model.smt2
+ regress0/ho/issue5744-cg-model.smt2
regress0/ho/issue6526.smt2
regress0/ho/issue6536.smt2
regress0/ho/ite-apply-eq.smt2
regress0/ho/lambda-equality-non-canon.smt2
regress0/ho/match-middle.smt2
regress0/ho/modulo-func-equality.smt2
+ regress0/ho/qgu-fuzz-ho-1-dd.smt2
regress0/ho/shadowing-defs.smt2
regress0/ho/simple-matching-partial.smt2
regress0/ho/simple-matching.smt2
@@ -774,6 +779,7 @@ set(regress_0_tests
regress0/options/set-and-get-options.smt2
regress0/options/statistics.smt2
regress0/options/stream-printing.smt2
+ regress0/options/version.smt2
regress0/parallel-let.smt2
regress0/parser/as.smt2
regress0/parser/bv_arity_smt2.6.smt2
@@ -856,8 +862,11 @@ set(regress_0_tests
regress0/proofs/open-pf-datatypes.smt2
regress0/proofs/open-pf-if-unordered-iff.smt2
regress0/proofs/open-pf-rederivation.smt2
+ regress0/proofs/project-issue317-inc-sat-conflictlit.smt2
regress0/proofs/qgu-fuzz-1-bool-sat.smt2
regress0/proofs/qgu-fuzz-2-bool-chainres-checking.smt2
+ regress0/proofs/qgu-fuzz-3-chainres-checking.smt2
+ regress0/proofs/qgu-fuzz-4-bool-chainres-postprocessing-singleton.smt2
regress0/proofs/scope.smt2
regress0/proofs/trust-subs-eq-open.smt2
regress0/push-pop/boolean/fuzz_12.smt2
@@ -890,6 +899,7 @@ set(regress_0_tests
regress0/push-pop/issue1986.smt2
regress0/push-pop/issue2137.min.smt2
regress0/push-pop/issue6535-inc-solve.smt2
+ regress0/push-pop/issue7479-global-decls.smt2
regress0/push-pop/quant-fun-proc-unfd.smt2
regress0/push-pop/real-as-int-incremental.smt2
regress0/push-pop/simple_unsat_cores.smt2
@@ -992,6 +1002,8 @@ set(regress_0_tests
regress0/rels/join-eq-u.cvc.smt2
regress0/rels/joinImg_0.cvc.smt2
regress0/rels/oneLoc_no_quant-int_0_1.cvc.smt2
+ regress0/rels/qgu-fuzz-relations-1.smt2
+ regress0/rels/qgu-fuzz-relations-1-dd.smt2
regress0/rels/rel_1tup_0.cvc.smt2
regress0/rels/rel_complex_0.cvc.smt2
regress0/rels/rel_complex_1.cvc.smt2
@@ -1234,6 +1246,7 @@ set(regress_0_tests
regress0/strings/norn-31.smt2
regress0/strings/norn-simp-rew.smt2
regress0/strings/parser-syms.cvc.smt2
+ regress0/strings/proj-issue316-regexp-ite.smt2
regress0/strings/re_diff.smt2
regress0/strings/re-in-rewrite.smt2
regress0/strings/re-syntax.smt2
@@ -1452,6 +1465,7 @@ set(regress_0_tests
regress0/unconstrained/geq.smt2
regress0/unconstrained/gt.smt2
regress0/unconstrained/issue4644.smt2
+ regress0/unconstrained/issue4656-bool-term-vars.smt2
regress0/unconstrained/ite.smt2
regress0/unconstrained/leq.smt2
regress0/unconstrained/lt.smt2
@@ -1569,9 +1583,14 @@ set(regress_1_tests
regress1/bags/duplicate_removal1.smt2
regress1/bags/duplicate_removal2.smt2
regress1/bags/emptybag1.smt2
+ regress1/bags/fuzzy1.smt2
+ regress1/bags/fuzzy2.smt2
regress1/bags/intersection_min1.smt2
regress1/bags/intersection_min2.smt2
regress1/bags/issue5759.smt2
+ regress1/bags/map1.smt2
+ regress1/bags/map2.smt2
+ regress1/bags/map3.smt2
regress1/bags/subbag1.smt2
regress1/bags/subbag2.smt2
regress1/bags/union_disjoint.smt2
@@ -1700,6 +1719,9 @@ set(regress_1_tests
regress1/ho/issue4065-no-rep.smt2
regress1/ho/issue4092-sinf.smt2
regress1/ho/issue4134-sinf.smt2
+ regress1/ho/issue4758.smt2
+ regress1/ho/issue5078-small.smt2
+ regress1/ho/issue5201-1.smt2
regress1/ho/issue5741-3.smt2
regress1/ho/NUM638^1.smt2
regress1/ho/NUM925^1.p
@@ -1796,6 +1818,7 @@ set(regress_1_tests
regress1/proofs/macro-res-exp-crowding-lit-inside-unit.smt2
regress1/proofs/macro-res-exp-singleton-after-elimCrowd.smt2
regress1/proofs/qgu-fuzz-1-strings-pp.smt2
+ regress1/proofs/qgu-fuzz-arrays-1-dd-te-auto.smt2
regress1/proofs/quant-alpha-eq.smt2
regress1/proofs/sat-trivial-cycle.smt2
regress1/proofs/unsat-cores-proofs.smt2
@@ -1954,6 +1977,7 @@ set(regress_1_tests
regress1/quantifiers/issue6699-nc-shadow.smt2
regress1/quantifiers/issue6775-vts-int.smt2
regress1/quantifiers/issue6845-nl-lemma-tc.smt2
+ regress1/quantifiers/issue7385-sygus-inst-i.smt2
regress1/quantifiers/issue993.smt2
regress1/quantifiers/javafe.ast.StmtVec.009.smt2
regress1/quantifiers/lia-witness-div-pp.smt2
@@ -2257,6 +2281,7 @@ set(regress_1_tests
regress1/strings/issue6653-4-rre.smt2
regress1/strings/issue6653-rre.smt2
regress1/strings/issue6653-rre-small.smt2
+ regress1/strings/issue6766-re-elim-bv.smt2
regress1/strings/issue6777-seq-nth-eval-cm.smt2
regress1/strings/issue6913.smt2
regress1/strings/issue6973-dup-lemma-conc.smt2
@@ -2272,6 +2297,7 @@ set(regress_1_tests
regress1/strings/nf-ff-contains-abs.smt2
regress1/strings/no-lazy-pp-quant.smt2
regress1/strings/non_termination_regular_expression4.smt2
+ regress1/strings/non-terminating-rewrite-aent.smt2
regress1/strings/norn-13.smt2
regress1/strings/norn-360.smt2
regress1/strings/norn-ab.smt2
@@ -2280,6 +2306,7 @@ set(regress_1_tests
regress1/strings/nt6-dd.smt2
regress1/strings/nterm-re-inter-sigma.smt2
regress1/strings/open-pf-merge.smt2
+ regress1/strings/pattern1.smt2
regress1/strings/pierre150331.smt2
regress1/strings/policy_variable.smt2
regress1/strings/pre_ctn_no_skolem_share.smt2
@@ -2631,6 +2658,7 @@ set(regress_2_tests
regress2/sygus/pbe_bvurem.sy
regress2/sygus/process-10-vars-2fun.sy
regress2/sygus/process-arg-invariance.sy
+ regress2/sygus/qgu-bools.sy
regress2/sygus/real-grammar-neg.sy
regress2/sygus/sets-fun-test.sy
regress2/sygus/sumn_recur_synth.sy
diff --git a/test/regress/regress0/cores/issue5234-uc-ua.smt2 b/test/regress/regress0/cores/issue5234-uc-ua.smt2
new file mode 100644
index 000000000..ab31ec71f
--- /dev/null
+++ b/test/regress/regress0/cores/issue5234-uc-ua.smt2
@@ -0,0 +1,12 @@
+; EXPECT: unsat
+; EXPECT: ()
+(set-option :incremental true)
+(set-option :check-unsat-cores true)
+(set-option :produce-unsat-assumptions true)
+(set-logic ALL)
+(declare-const a Bool)
+(declare-const b Bool)
+(declare-const c Bool)
+(assert (distinct a b c))
+(check-sat-assuming (c))
+(get-unsat-assumptions)
diff --git a/test/regress/regress0/ho/issue5741-1-cg-model.smt2 b/test/regress/regress0/ho/issue5741-1-cg-model.smt2
new file mode 100644
index 000000000..8989acdc4
--- /dev/null
+++ b/test/regress/regress0/ho/issue5741-1-cg-model.smt2
@@ -0,0 +1,18 @@
+(set-logic HO_ALL)
+(set-info :status sat)
+(declare-fun a ((_ BitVec 32) (_ BitVec 32)) (_ BitVec 32))
+(declare-fun b () (_ BitVec 32))
+(declare-fun c () (_ BitVec 1))
+(declare-fun d () (_ BitVec 32))
+(declare-fun e () (_ BitVec 32))
+(declare-fun f () (_ BitVec 32))
+(declare-fun g ((_ BitVec 32) (_ BitVec 32)) (_ BitVec 32))
+(declare-fun h () (_ BitVec 32))
+(declare-fun i () (_ BitVec 32))
+(declare-fun j () (_ BitVec 1))
+(declare-fun k () (_ BitVec 32))
+(assert (= b (a d e)))
+(assert (= f (g h i)))
+(assert (distinct j (ite (= i k) (_ bv1 1) (_ bv0 1))))
+(assert (= (ite (= i b) (_ bv1 1) (_ bv0 1)) j (ite (= c (_ bv0 1)) (_ bv1 1) (_ bv0 1))))
+(check-sat)
diff --git a/test/regress/regress0/ho/issue5741-3-cg-model.smt2 b/test/regress/regress0/ho/issue5741-3-cg-model.smt2
new file mode 100644
index 000000000..abc4b76a6
--- /dev/null
+++ b/test/regress/regress0/ho/issue5741-3-cg-model.smt2
@@ -0,0 +1,5 @@
+(set-logic HO_ALL)
+(set-info :status sat)
+(declare-fun p ((_ BitVec 17) (_ BitVec 16)) Bool)
+(assert (p (_ bv0 17) ((_ sign_extend 15) (ite (p (_ bv0 17) (_ bv0 16)) (_ bv1 1) (_ bv0 1)))))
+(check-sat)
diff --git a/test/regress/regress0/ho/issue5744-cg-model.smt2 b/test/regress/regress0/ho/issue5744-cg-model.smt2
new file mode 100644
index 000000000..5650351cd
--- /dev/null
+++ b/test/regress/regress0/ho/issue5744-cg-model.smt2
@@ -0,0 +1,7 @@
+(set-logic HO_ALL)
+(set-info :status sat)
+(declare-fun r4 () Real)
+(declare-fun ufrb5 (Real Real Real Real Real) Bool)
+(assert (ufrb5 0.0 0.0 0.0 0.0 0))
+(assert (ufrb5 (+ r4 r4) 0.0 1 0.0 0.0))
+(check-sat)
diff --git a/test/regress/regress0/ho/qgu-fuzz-ho-1-dd.smt2 b/test/regress/regress0/ho/qgu-fuzz-ho-1-dd.smt2
new file mode 100644
index 000000000..840db92a9
--- /dev/null
+++ b/test/regress/regress0/ho/qgu-fuzz-ho-1-dd.smt2
@@ -0,0 +1,6 @@
+(set-logic HO_ALL)
+(set-info :status sat)
+(declare-const b (-> Int Int Int))
+(declare-const c (-> Int Int))
+(assert (and (= c (b 1)) (= c (b 0)) (> (b 1 0) 0) (> 0 (b 0 1)) (= 0 (c (b 0 0)))))
+(check-sat)
diff --git a/test/regress/regress0/options/version.smt2 b/test/regress/regress0/options/version.smt2
new file mode 100644
index 000000000..755b6ccca
--- /dev/null
+++ b/test/regress/regress0/options/version.smt2
@@ -0,0 +1,3 @@
+; COMMAND-LINE: --version
+; EXPECT: This is cvc5 version
+; SCRUBBER: grep -o "This is cvc5 version" \ No newline at end of file
diff --git a/test/regress/regress0/proofs/project-issue317-inc-sat-conflictlit.smt2 b/test/regress/regress0/proofs/project-issue317-inc-sat-conflictlit.smt2
new file mode 100644
index 000000000..0f7f89651
--- /dev/null
+++ b/test/regress/regress0/proofs/project-issue317-inc-sat-conflictlit.smt2
@@ -0,0 +1,7 @@
+; EXPECT: unsat
+; EXPECT: unsat
+(set-logic ALL)
+(declare-const __ (_ BitVec 1))
+(set-option :incremental true)
+(check-sat-assuming ((bvsgt ((_ sign_extend 42) (bvcomp ((_ zero_extend 10) __) ((_ zero_extend 10) __))) (_ bv0 43))))
+(check-sat-assuming ((bvsgt ((_ sign_extend 42) (bvcomp ((_ zero_extend 10) __) ((_ zero_extend 10) __))) (_ bv0 43)))) \ No newline at end of file
diff --git a/test/regress/regress0/proofs/qgu-fuzz-3-chainres-checking.smt2 b/test/regress/regress0/proofs/qgu-fuzz-3-chainres-checking.smt2
new file mode 100644
index 000000000..27836734b
--- /dev/null
+++ b/test/regress/regress0/proofs/qgu-fuzz-3-chainres-checking.smt2
@@ -0,0 +1,7 @@
+; EXPECT: unsat
+(set-logic ALL)
+(declare-fun a () Bool)
+(declare-fun c () Bool)
+(declare-fun d () Bool)
+(assert (and (= a (ite (or c d) d a)) (not (ite d a false)) (ite c a d)))
+(check-sat) \ No newline at end of file
diff --git a/test/regress/regress0/proofs/qgu-fuzz-4-bool-chainres-postprocessing-singleton.smt2 b/test/regress/regress0/proofs/qgu-fuzz-4-bool-chainres-postprocessing-singleton.smt2
new file mode 100644
index 000000000..8eef0674b
--- /dev/null
+++ b/test/regress/regress0/proofs/qgu-fuzz-4-bool-chainres-postprocessing-singleton.smt2
@@ -0,0 +1,7 @@
+; EXPECT: unsat
+(set-logic ALL)
+(declare-fun b () Bool)
+(declare-fun c () Bool)
+(declare-fun d () Bool)
+(assert (and (or d (ite c false true)) (not (= d (or b c))) (= d (ite c d (not d)))))
+(check-sat)
diff --git a/test/regress/regress0/push-pop/issue7479-global-decls.smt2 b/test/regress/regress0/push-pop/issue7479-global-decls.smt2
new file mode 100644
index 000000000..ddf89960e
--- /dev/null
+++ b/test/regress/regress0/push-pop/issue7479-global-decls.smt2
@@ -0,0 +1,8 @@
+; COMMAND-LINE: -i
+; EXPECT: unsat
+(set-logic ALL)
+(set-option :global-declarations true)
+(define-fun y () Bool (> 0 0))
+(assert y)
+(push)
+(check-sat)
diff --git a/test/regress/regress0/rels/qgu-fuzz-relations-1-dd.smt2 b/test/regress/regress0/rels/qgu-fuzz-relations-1-dd.smt2
new file mode 100644
index 000000000..52ee0b1c0
--- /dev/null
+++ b/test/regress/regress0/rels/qgu-fuzz-relations-1-dd.smt2
@@ -0,0 +1,5 @@
+(set-logic ALL)
+(set-info :status sat)
+(declare-fun d () (Tuple Int Int))
+(assert (= (as emptyset (Set (Tuple Int Int))) (join (singleton (tuple 1 0)) (singleton d))))
+(check-sat)
diff --git a/test/regress/regress0/rels/qgu-fuzz-relations-1.smt2 b/test/regress/regress0/rels/qgu-fuzz-relations-1.smt2
new file mode 100644
index 000000000..b489ce65b
--- /dev/null
+++ b/test/regress/regress0/rels/qgu-fuzz-relations-1.smt2
@@ -0,0 +1,8 @@
+(set-logic ALL)
+(set-info :status sat)
+(declare-fun a () (Set (Tuple Int Int)))
+(declare-fun b () (Set (Tuple Int Int)))
+(declare-fun c () Int)
+(declare-fun d () (Tuple Int Int))
+(assert (and (= b (singleton (tuple 1 0))) (= a (join b (transpose a))) (= a (join b (tclosure a))) (= a (join b (singleton d)))))
+(check-sat)
diff --git a/test/regress/regress0/strings/proj-issue316-regexp-ite.smt2 b/test/regress/regress0/strings/proj-issue316-regexp-ite.smt2
new file mode 100644
index 000000000..e19accd36
--- /dev/null
+++ b/test/regress/regress0/strings/proj-issue316-regexp-ite.smt2
@@ -0,0 +1,10 @@
+; COMMAND-LINE: --theoryof-mode=type
+; COMMAND-LINE: --theoryof-mode=term
+; SCRUBBER: grep -o "ITE branches of type RegLan are currently not supported"
+; EXPECT: ITE branches of type RegLan are currently not supported
+; EXIT: 1
+(set-logic QF_SLIA)
+(declare-const b Bool)
+(declare-const x String)
+(assert (str.in_re x (ite b re.none re.allchar)))
+(check-sat)
diff --git a/test/regress/regress0/unconstrained/issue4656-bool-term-vars.smt2 b/test/regress/regress0/unconstrained/issue4656-bool-term-vars.smt2
new file mode 100644
index 000000000..cd6154464
--- /dev/null
+++ b/test/regress/regress0/unconstrained/issue4656-bool-term-vars.smt2
@@ -0,0 +1,12 @@
+(set-logic QF_AUFBVLIA)
+(set-info :status unsat)
+(declare-fun a (Bool) Bool)
+(declare-fun b (Bool) Bool)
+(declare-fun c (Bool) Bool)
+(declare-fun d () Bool)
+(declare-fun e () Bool)
+(declare-fun f () Bool)
+(assert (distinct (a d) (a e)))
+(assert (distinct (b e) (b f)))
+(assert (distinct (c f) (c d)))
+(check-sat)
diff --git a/test/regress/regress1/bags/fuzzy1.smt2 b/test/regress/regress1/bags/fuzzy1.smt2
new file mode 100644
index 000000000..f9fee0ec4
--- /dev/null
+++ b/test/regress/regress1/bags/fuzzy1.smt2
@@ -0,0 +1,10 @@
+(set-logic ALL)
+(set-info :status sat)
+(declare-fun a () (Bag (Tuple Int Int)))
+(declare-fun b () (Bag (Tuple Int Int)))
+(declare-fun c () Int) ; c here is zero
+(assert
+ (and
+ (= b (difference_subtract b a)) ; b is empty
+ (= a (bag (tuple c 0) 1)))) ; a = {|(<0, 0>, 1)|}
+(check-sat)
diff --git a/test/regress/regress1/bags/fuzzy2.smt2 b/test/regress/regress1/bags/fuzzy2.smt2
new file mode 100644
index 000000000..31da47f53
--- /dev/null
+++ b/test/regress/regress1/bags/fuzzy2.smt2
@@ -0,0 +1,15 @@
+(set-logic ALL)
+(set-info :status sat)
+(declare-fun a () (Bag (Tuple Int Int)))
+(declare-fun b () (Bag (Tuple Int Int)))
+(declare-fun c () Int)
+(declare-fun d () (Tuple Int Int))
+(assert
+ (let ((D (bag d c))) ; when c = zero, then D is empty
+ (and
+ (= a (bag (tuple 1 1) c)) ; when c = zero, then a is empty
+ (= a (union_max a D))
+ (= a (difference_subtract a (bag d 1)))
+ (= a (union_disjoint a D))
+ (= a (intersection_min a D)))))
+(check-sat)
diff --git a/test/regress/regress1/bags/map.smt2 b/test/regress/regress1/bags/map1.smt2
index 54d671415..54d671415 100644
--- a/test/regress/regress1/bags/map.smt2
+++ b/test/regress/regress1/bags/map1.smt2
diff --git a/test/regress/regress1/bags/map2.smt2 b/test/regress/regress1/bags/map2.smt2
new file mode 100644
index 000000000..faed04caa
--- /dev/null
+++ b/test/regress/regress1/bags/map2.smt2
@@ -0,0 +1,9 @@
+(set-logic HO_ALL)
+(set-info :status sat)
+(set-option :fmf-bound true)
+(declare-fun A () (Bag Int))
+(declare-fun B () (Bag Int))
+(declare-fun f (Int) Int)
+(assert (= B (bag.map f A)))
+(assert (= (bag.count (- 2) B) 57))
+(check-sat)
diff --git a/test/regress/regress1/bags/map3.smt2 b/test/regress/regress1/bags/map3.smt2
new file mode 100644
index 000000000..637815fa5
--- /dev/null
+++ b/test/regress/regress1/bags/map3.smt2
@@ -0,0 +1,10 @@
+(set-logic HO_ALL)
+(set-info :status unsat)
+(set-option :fmf-bound true)
+(declare-fun A () (Bag Int))
+(declare-fun B () (Bag Int))
+(define-fun f ((x Int)) Int (+ x 1))
+(assert (= B (bag.map f A)))
+(assert (= (bag.count (- 2) B) 57))
+(assert (= A (as emptybag (Bag Int)) ))
+(check-sat)
diff --git a/test/regress/regress1/ho/issue4758.smt2 b/test/regress/regress1/ho/issue4758.smt2
new file mode 100644
index 000000000..dab284c11
--- /dev/null
+++ b/test/regress/regress1/ho/issue4758.smt2
@@ -0,0 +1,6 @@
+(set-logic HO_ALL)
+(set-info :status sat)
+(declare-fun a () Real)
+(declare-fun b (Real Real) Real)
+(assert (> (b a 0) (b (- a) 1)))
+(check-sat)
diff --git a/test/regress/regress1/ho/issue5078-small.smt2 b/test/regress/regress1/ho/issue5078-small.smt2
new file mode 100644
index 000000000..21077434a
--- /dev/null
+++ b/test/regress/regress1/ho/issue5078-small.smt2
@@ -0,0 +1,8 @@
+(set-logic HO_QF_UFLIA)
+(set-info :status sat)
+(declare-fun a (Int Int) Int)
+(declare-fun d () Int)
+(declare-fun e () Int)
+(assert (= (a d 0) (a 0 1)))
+(assert (= d (mod e 3)))
+(check-sat)
diff --git a/test/regress/regress1/ho/issue5201-1.smt2 b/test/regress/regress1/ho/issue5201-1.smt2
new file mode 100644
index 000000000..9f6e058da
--- /dev/null
+++ b/test/regress/regress1/ho/issue5201-1.smt2
@@ -0,0 +1,20 @@
+(set-logic HO_QF_UFLIA)
+(set-info :status sat)
+(declare-fun a () Int)
+(declare-fun b (Int Int) Int)
+(declare-fun c (Int Int) Int)
+(declare-fun d () Int)
+(declare-fun e () Int)
+(declare-fun f () Int)
+(declare-fun g () Int)
+(declare-fun i () Int)
+(declare-fun j () Int)
+(declare-fun k () Int)
+(assert (= d (b j d) a))
+(assert (= e (c e i)))
+(assert (= (b k f) a))
+(assert (= d (+ g 4)))
+(assert (= j (* g 5)))
+(assert (= e (+ g 5)))
+(assert (= k 0))
+(check-sat)
diff --git a/test/regress/regress1/proofs/qgu-fuzz-arrays-1-dd-te-auto.smt2 b/test/regress/regress1/proofs/qgu-fuzz-arrays-1-dd-te-auto.smt2
new file mode 100644
index 000000000..b55fb3d49
--- /dev/null
+++ b/test/regress/regress1/proofs/qgu-fuzz-arrays-1-dd-te-auto.smt2
@@ -0,0 +1,7 @@
+(set-logic ALL)
+(set-info :status unsat)
+(declare-const x Int)
+(declare-fun b () (Array Int Int))
+(declare-fun y () Int)
+(assert (and (= b (store b x y)) (= b (store b y y)) (= y (ite (> y 0) 0 y)) (= (store b 0 0) (store (store b y 1) 1 1))))
+(check-sat)
diff --git a/test/regress/regress1/push-pop/arith_lra_01.smt2 b/test/regress/regress1/push-pop/arith_lra_01.smt2
index 02e22d685..b16bbdda0 100644
--- a/test/regress/regress1/push-pop/arith_lra_01.smt2
+++ b/test/regress/regress1/push-pop/arith_lra_01.smt2
@@ -1,4 +1,4 @@
-; COMMAND-LINE: --incremental --no-produce-proofs
+; COMMAND-LINE: --incremental
; EXPECT: sat
; EXPECT: sat
; EXPECT: sat
diff --git a/test/regress/regress1/push-pop/fuzz_3_6.smt2 b/test/regress/regress1/push-pop/fuzz_3_6.smt2
index 4ad684402..1901016c2 100644
--- a/test/regress/regress1/push-pop/fuzz_3_6.smt2
+++ b/test/regress/regress1/push-pop/fuzz_3_6.smt2
@@ -1,4 +1,4 @@
-; COMMAND-LINE: --incremental --no-produce-proofs
+; COMMAND-LINE: --incremental
; EXPECT: sat
; EXPECT: sat
; EXPECT: unsat
diff --git a/test/regress/regress1/quantifiers/issue7385-sygus-inst-i.smt2 b/test/regress/regress1/quantifiers/issue7385-sygus-inst-i.smt2
new file mode 100644
index 000000000..26fc1cf7e
--- /dev/null
+++ b/test/regress/regress1/quantifiers/issue7385-sygus-inst-i.smt2
@@ -0,0 +1,9 @@
+; COMMAND-LINE: -i
+; EXPECT: sat
+(set-logic NIRA)
+(push)
+(assert (exists ((q29 Int) (q30 Bool) (q35 Bool)) (= (= 0 q29) (= q35 q30))))
+(push)
+(pop)
+(pop)
+(check-sat)
diff --git a/test/regress/regress1/quantifiers/symmetric_unsat_7.smt2 b/test/regress/regress1/quantifiers/symmetric_unsat_7.smt2
index 7249e87aa..465408cc5 100644
--- a/test/regress/regress1/quantifiers/symmetric_unsat_7.smt2
+++ b/test/regress/regress1/quantifiers/symmetric_unsat_7.smt2
@@ -1,4 +1,4 @@
-; COMMAND-LINE: --no-check-unsat-cores --no-produce-proofs
+; COMMAND-LINE: --no-check-unsat-cores
; EXPECT: unsat
; Note we require disabling proofs/unsat cores due to timeouts in nightlies
diff --git a/test/regress/regress1/strings/issue6766-re-elim-bv.smt2 b/test/regress/regress1/strings/issue6766-re-elim-bv.smt2
new file mode 100644
index 000000000..13677838b
--- /dev/null
+++ b/test/regress/regress1/strings/issue6766-re-elim-bv.smt2
@@ -0,0 +1,9 @@
+; COMMAND-LINE: --strings-exp --re-elim --re-elim-agg
+; EXPECT: sat
+(set-logic ALL)
+(declare-fun a () String)
+(declare-fun b () String)
+(assert
+ (str.in_re (str.++ a (ite (str.in_re (str.++ a "BA" b) (re.++ (re.* (str.to_re "A")) (str.to_re "B"))) b a))
+ (re.diff (re.* (str.to_re "A")) (str.to_re ""))))
+(check-sat)
diff --git a/test/regress/regress1/strings/non-terminating-rewrite-aent.smt2 b/test/regress/regress1/strings/non-terminating-rewrite-aent.smt2
new file mode 100644
index 000000000..211209255
--- /dev/null
+++ b/test/regress/regress1/strings/non-terminating-rewrite-aent.smt2
@@ -0,0 +1,11 @@
+; COMMAND-LINE: --strings-exp
+; EXPECT: sat
+(set-logic ALL)
+(set-info :status sat)
+(declare-fun a () String)
+(declare-fun b () String)
+(declare-fun c () String)
+(assert
+(let ((_let_1 (str.len a))) (let ((_let_2 (str.len b))) (let ((_let_3 (+ _let_1 (* (- 1) _let_2)))) (let ((_let_4 (ite (>= _let_3 1) (str.substr a 0 _let_3) (str.substr b 0 (+ (* (- 1) _let_1) _let_2))))) (let ((_let_5 (str.len _let_4))) (let ((_let_6 (str.len c))) (let ((_let_7 (+ _let_6 (* (- 1) _let_5)))) (let ((_let_8 (ite (>= _let_7 0) (str.substr c _let_5 _let_7) (str.substr _let_4 _let_6 (+ (* (- 1) _let_6) _let_5))))) (let ((_let_9 (str.len _let_8))) (let ((_let_10 (ite (>= _let_1 _let_9) (str.substr a _let_9 (- _let_1 _let_9)) (str.substr _let_8 _let_1 (- _let_9 _let_1))))) (and (= _let_8 (str.++ a _let_10)) (not (= "" _let_10)) (>= (str.len _let_10) 1))))))))))))
+)
+(check-sat)
diff --git a/test/regress/regress1/strings/pattern1.smt2 b/test/regress/regress1/strings/pattern1.smt2
new file mode 100644
index 000000000..b75fdb9be
--- /dev/null
+++ b/test/regress/regress1/strings/pattern1.smt2
@@ -0,0 +1,73 @@
+(set-option :produce-models true)
+(set-logic QF_SLIA)
+(set-info :status sat)
+(declare-const x String)
+
+(assert
+ (str.in_re
+ x
+ (re.++
+ (str.to_re "pref")
+ (re.* re.allchar)
+ (str.to_re "a")
+ (re.* re.allchar)
+ (str.to_re "b")
+ (re.* re.allchar)
+ (str.to_re "c")
+ (re.* re.allchar)
+ (str.to_re "d")
+ (re.* re.allchar)
+ (str.to_re "e")
+ (re.* re.allchar)
+ (str.to_re "f")
+ (re.* re.allchar)
+ (str.to_re "g")
+ (re.* re.allchar)
+ (str.to_re "h")
+ (re.* re.allchar)
+ (str.to_re "i")
+ (re.* re.allchar)
+ (str.to_re "j")
+ (re.* re.allchar)
+ (str.to_re "k")
+ (re.* re.allchar)
+ (str.to_re "l")
+ (re.* re.allchar)
+ (str.to_re "m")
+ (re.* re.allchar)
+ (str.to_re "n")
+ (re.* re.allchar)
+ (str.to_re "o")
+ (re.* re.allchar)
+ (str.to_re "p")
+ (re.* re.allchar)
+ (str.to_re "q")
+ (re.* re.allchar)
+ (str.to_re "r")
+ (re.* re.allchar)
+ (str.to_re "s")
+ (re.* re.allchar)
+ (str.to_re "t")
+ (re.* re.allchar)
+ (str.to_re "u")
+ (re.* re.allchar)
+ (str.to_re "v")
+ (re.* re.allchar)
+ (str.to_re "w")
+ (re.* re.allchar)
+ (str.to_re "x")
+ (re.* re.allchar)
+ (str.to_re "y")
+ (re.* re.allchar)
+ (str.to_re "z")
+ (re.* re.allchar))))
+
+(assert
+ (or
+ (= x "pref0a-b-c-de")
+ (str.in_re x (re.++ (str.to_re "prefix") (re.* re.allchar)))
+ (str.in_re x (re.++ (re.* re.allchar) (str.to_re "test") (re.* re.allchar)))))
+
+(check-sat)
+
+
diff --git a/test/regress/regress2/sygus/qgu-bools.sy b/test/regress/regress2/sygus/qgu-bools.sy
new file mode 100644
index 000000000..35445e927
--- /dev/null
+++ b/test/regress/regress2/sygus/qgu-bools.sy
@@ -0,0 +1,21 @@
+; COMMAND-LINE: --sygus-query-gen=unsat --sygus-abort-size=2
+; EXPECT: (error "Maximum term size (2) for enumerative SyGuS exceeded.")
+; SCRUBBER: grep -v -E '(\(define-fun|\(query)'
+; EXIT: 1
+(set-logic ALL)
+(synth-fun P ((a Bool) (b Bool) (c Bool)) Bool
+((Start Bool))
+(
+(Start Bool (
+a
+b
+c
+(not Start)
+(= Start Start)
+(or Start Start)
+(ite Start Start Start)
+))
+))
+
+
+(check-synth)
diff --git a/test/regress/run_regression.py b/test/regress/run_regression.py
index 35e718b0f..8e22b04a8 100755
--- a/test/regress/run_regression.py
+++ b/test/regress/run_regression.py
@@ -260,7 +260,7 @@ class DumpTester(Tester):
"--parse-only",
"-o",
"raw-benchmark",
- f"--output-lang={ext_to_lang[benchmark_info.benchmark_ext]}",
+ "--output-lang={}".format(ext_to_lang[benchmark_info.benchmark_ext]),
]
dump_output, _, _ = run_process(
[benchmark_info.cvc5_binary]
@@ -282,7 +282,7 @@ class DumpTester(Tester):
command_line_args=benchmark_info.command_line_args
+ [
"--parse-only",
- f"--lang={ext_to_lang[benchmark_info.benchmark_ext]}",
+ "--lang={}".format(ext_to_lang[benchmark_info.benchmark_ext]),
],
benchmark_basename=tmpf.name,
expected_output="",
diff --git a/test/unit/api/java/SolverTest.java b/test/unit/api/java/SolverTest.java
index 8511827a8..1f88add2d 100644
--- a/test/unit/api/java/SolverTest.java
+++ b/test/unit/api/java/SolverTest.java
@@ -387,16 +387,6 @@ class SolverTest
assertDoesNotThrow(() -> d_solver.mkRoundingMode(RoundingMode.ROUND_TOWARD_ZERO));
}
- @Test void mkUninterpretedConst() throws CVC5ApiException
- {
- assertDoesNotThrow(() -> d_solver.mkUninterpretedConst(d_solver.getBooleanSort(), 1));
- assertThrows(
- CVC5ApiException.class, () -> d_solver.mkUninterpretedConst(d_solver.getNullSort(), 1));
- Solver slv = new Solver();
- assertThrows(
- CVC5ApiException.class, () -> slv.mkUninterpretedConst(d_solver.getBooleanSort(), 1));
- }
-
@Test void mkAbstractValue() throws CVC5ApiException
{
assertDoesNotThrow(() -> d_solver.mkAbstractValue(("1")));
@@ -434,6 +424,20 @@ class SolverTest
assertThrows(CVC5ApiException.class, () -> slv.mkFloatingPoint(3, 5, t1));
}
+ @Test void mkCardinalityConstraint() throws CVC5ApiException
+ {
+ Sort su = d_solver.mkUninterpretedSort("u");
+ Sort si = d_solver.getIntegerSort();
+ assertDoesNotThrow(() -> d_solver.mkCardinalityConstraint(su, 3));
+ assertThrows(
+ CVC5ApiException.class, () -> d_solver.mkCardinalityConstraint(si, 3));
+ assertThrows(
+ CVC5ApiException.class, () -> d_solver.mkCardinalityConstraint(su, 0));
+ Solver slv = new Solver();
+ assertThrows(
+ CVC5ApiException.class, () -> slv.mkCardinalityConstraint(su, 3));
+ }
+
@Test void mkEmptySet() throws CVC5ApiException
{
Solver slv = new Solver();
diff --git a/test/unit/theory/sequences_rewriter_white.cpp b/test/unit/theory/sequences_rewriter_white.cpp
index 32122e619..bcac315a7 100644
--- a/test/unit/theory/sequences_rewriter_white.cpp
+++ b/test/unit/theory/sequences_rewriter_white.cpp
@@ -248,8 +248,7 @@ TEST_F(TestTheoryWhiteSequencesRewriter, rewrite_substr)
a,
d_nodeManager->mkNode(kind::PLUS, x, d_nodeManager->mkConst(Rational(1))),
x);
- res = sr.rewriteSubstr(n);
- ASSERT_EQ(res, empty);
+ sameNormalForm(n, empty);
// (str.substr "A" (+ x (str.len s2)) x) -> ""
n = d_nodeManager->mkNode(
@@ -258,8 +257,7 @@ TEST_F(TestTheoryWhiteSequencesRewriter, rewrite_substr)
d_nodeManager->mkNode(
kind::PLUS, x, d_nodeManager->mkNode(kind::STRING_LENGTH, s)),
x);
- res = sr.rewriteSubstr(n);
- ASSERT_EQ(res, empty);
+ sameNormalForm(n, empty);
// (str.substr "A" x y) -> (str.substr "A" x y)
n = d_nodeManager->mkNode(kind::STRING_SUBSTR, a, x, y);
@@ -271,14 +269,13 @@ TEST_F(TestTheoryWhiteSequencesRewriter, rewrite_substr)
abcd,
d_nodeManager->mkNode(kind::PLUS, x, three),
x);
- res = sr.rewriteSubstr(n);
- ASSERT_EQ(res, empty);
+ sameNormalForm(n, empty);
// (str.substr "ABCD" (+ x 2) x) -> (str.substr "ABCD" (+ x 2) x)
n = d_nodeManager->mkNode(
kind::STRING_SUBSTR, abcd, d_nodeManager->mkNode(kind::PLUS, x, two), x);
res = sr.rewriteSubstr(n);
- ASSERT_EQ(res, n);
+ sameNormalForm(res, n);
// (str.substr (str.substr s x x) x x) -> ""
n = d_nodeManager->mkNode(kind::STRING_SUBSTR,
generated by cgit on debian on lair
contact matthew@masot.net with questions or feedback