summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndres Noetzli <andres.noetzli@gmail.com>2020-11-29 17:06:17 -0800
committerAndres Noetzli <andres.noetzli@gmail.com>2020-11-29 17:06:17 -0800
commit54c025e22f1424a4dceb83cea014917cac67a2fd (patch)
treed99549134dd1d0f83300904969c0df0d9dd2f897
parent5ddfd0dc28eb21008fdef36909b751ae8e810ba3 (diff)
parent865d1ee48de8e4a21d1e97c707be46c34918367d (diff)
Merge remote-tracking branch 'origin/master' into fixClangWarnings
-rw-r--r--src/CMakeLists.txt14
-rw-r--r--src/api/cvc4cpp.cpp2
-rw-r--r--src/expr/symbol_manager.cpp63
-rw-r--r--src/expr/symbol_manager.h17
-rw-r--r--src/expr/term_conversion_proof_generator.cpp4
-rw-r--r--src/expr/type.h2
-rw-r--r--src/expr/type_node.cpp4
-rw-r--r--src/parser/parser.cpp15
-rw-r--r--src/printer/ast/ast_printer.cpp19
-rw-r--r--src/printer/ast/ast_printer.h20
-rw-r--r--src/printer/cvc/cvc_printer.cpp85
-rw-r--r--src/printer/cvc/cvc_printer.h20
-rw-r--r--src/printer/let_binding.cpp4
-rw-r--r--src/printer/printer.cpp30
-rw-r--r--src/printer/printer.h27
-rw-r--r--src/printer/smt2/smt2_printer.cpp173
-rw-r--r--src/printer/smt2/smt2_printer.h21
-rw-r--r--src/printer/tptp/tptp_printer.cpp19
-rw-r--r--src/printer/tptp/tptp_printer.h18
-rw-r--r--src/smt/check_models.cpp14
-rw-r--r--src/smt/command.cpp41
-rw-r--r--src/smt/command.h5
-rw-r--r--src/smt/dump_manager.cpp76
-rw-r--r--src/smt/dump_manager.h30
-rw-r--r--src/smt/expand_definitions.cpp278
-rw-r--r--src/smt/expand_definitions.h76
-rw-r--r--src/smt/model.cpp28
-rw-r--r--src/smt/model.h44
-rw-r--r--src/smt/node_command.cpp23
-rw-r--r--src/smt/node_command.h5
-rw-r--r--src/smt/preprocessor.cpp10
-rw-r--r--src/smt/preprocessor.h3
-rw-r--r--src/smt/process_assertions.cpp234
-rw-r--r--src/smt/process_assertions.h17
-rw-r--r--src/smt/smt_engine.cpp42
-rw-r--r--src/smt/smt_engine.h22
-rw-r--r--src/theory/arith/nl/cad_solver.cpp8
-rw-r--r--src/theory/arith/nl/nl_lemma_utils.h19
-rw-r--r--src/theory/arith/nl/nonlinear_extension.h4
-rw-r--r--src/theory/arith/nl/transcendental/exponential_solver.cpp206
-rw-r--r--src/theory/arith/nl/transcendental/exponential_solver.h111
-rw-r--r--src/theory/arith/nl/transcendental/sine_solver.cpp333
-rw-r--r--src/theory/arith/nl/transcendental/sine_solver.h181
-rw-r--r--src/theory/arith/nl/transcendental/taylor_generator.cpp317
-rw-r--r--src/theory/arith/nl/transcendental/taylor_generator.h118
-rw-r--r--src/theory/arith/nl/transcendental/transcendental_solver.cpp402
-rw-r--r--src/theory/arith/nl/transcendental/transcendental_solver.h204
-rw-r--r--src/theory/arith/nl/transcendental/transcendental_state.cpp394
-rw-r--r--src/theory/arith/nl/transcendental/transcendental_state.h214
-rw-r--r--src/theory/arith/nl/transcendental_solver.cpp1474
-rw-r--r--src/theory/arith/nl/transcendental_solver.h423
-rw-r--r--src/theory/datatypes/sygus_datatype_utils.cpp19
-rw-r--r--src/theory/datatypes/sygus_datatype_utils.h20
-rw-r--r--src/theory/datatypes/theory_datatypes.cpp17
-rw-r--r--src/theory/quantifiers/sygus/sygus_eval_unfold.cpp3
-rw-r--r--src/theory/quantifiers/sygus/sygus_repair_const.cpp4
-rw-r--r--src/theory/smt_engine_subsolver.cpp2
-rw-r--r--src/theory/strings/regexp_entail.cpp12
-rw-r--r--src/theory/strings/theory_strings_preprocess.cpp4
-rw-r--r--src/theory/theory_preprocessor.cpp26
-rw-r--r--src/theory/uf/ho_extension.cpp4
-rw-r--r--src/theory/uf/ho_extension.h4
-rw-r--r--src/theory/uf/theory_uf.cpp12
-rw-r--r--src/theory/uf/theory_uf.h2
-rw-r--r--test/regress/CMakeLists.txt10
-rw-r--r--test/regress/regress0/datatypes/dt-param-2.6-print.smt21
-rw-r--r--test/regress/regress0/nl/issue5534-no-assertions.smt212
-rw-r--r--test/regress/regress1/arith/issue4985-model-success.smt27
-rw-r--r--test/regress/regress1/arith/issue4985b-model-success.smt26
-rw-r--r--test/regress/regress1/ho/NUM925^1.p2
-rw-r--r--test/regress/regress1/ho/nested_lambdas-AGT034^2.smt22
-rw-r--r--test/regress/regress1/quantifiers/issue5378-witness.smt25
-rw-r--r--test/regress/regress1/strings/issue5483-pp-leq.smt29
-rw-r--r--test/regress/regress1/strings/issue5510-re-consume.smt29
-rw-r--r--test/regress/regress1/strings/issue5520-re-consume.smt212
-rw-r--r--test/regress/regress2/ho/involved_parsing_ALG248^3.p2
-rw-r--r--test/unit/expr/attribute_white.h2
-rw-r--r--test/unit/expr/type_node_white.h2
-rw-r--r--test/unit/parser/parser_black.h2
-rw-r--r--test/unit/preprocessing/pass_bv_gauss_white.h2
-rw-r--r--test/unit/prop/cnf_stream_white.h4
-rw-r--r--test/unit/theory/evaluator_white.h2
-rw-r--r--test/unit/theory/sequences_rewriter_white.h5
-rw-r--r--test/unit/theory/theory_arith_white.h2
-rw-r--r--test/unit/theory/theory_bags_normal_form_white.h2
-rw-r--r--test/unit/theory/theory_bags_rewriter_white.h2
-rw-r--r--test/unit/theory/theory_bags_type_rules_white.h2
-rw-r--r--test/unit/theory/theory_bv_rewriter_white.h5
-rw-r--r--test/unit/theory/theory_bv_white.h2
-rw-r--r--test/unit/theory/theory_engine_white.h2
-rw-r--r--test/unit/theory/theory_quantifiers_bv_instantiator_white.h2
-rw-r--r--test/unit/theory/theory_quantifiers_bv_inverter_white.h2
-rw-r--r--test/unit/theory/theory_sets_type_enumerator_white.h2
-rw-r--r--test/unit/theory/theory_sets_type_rules_white.h2
-rw-r--r--test/unit/theory/theory_strings_skolem_cache_black.h4
-rw-r--r--test/unit/theory/theory_strings_word_white.h4
-rw-r--r--test/unit/theory/theory_white.h2
-rw-r--r--test/unit/theory/type_enumerator_white.h2
98 files changed, 3484 insertions, 2691 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 366e72af0..869699ac5 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -219,6 +219,8 @@ libcvc4_add_sources(
smt/dump.h
smt/dump_manager.cpp
smt/dump_manager.h
+ smt/expand_definitions.cpp
+ smt/expand_definitions.h
smt/listeners.cpp
smt/listeners.h
smt/logic_exception.h
@@ -387,8 +389,16 @@ libcvc4_add_sources(
theory/arith/nl/stats.h
theory/arith/nl/strategy.cpp
theory/arith/nl/strategy.h
- theory/arith/nl/transcendental_solver.cpp
- theory/arith/nl/transcendental_solver.h
+ theory/arith/nl/transcendental/exponential_solver.cpp
+ theory/arith/nl/transcendental/exponential_solver.h
+ theory/arith/nl/transcendental/sine_solver.cpp
+ theory/arith/nl/transcendental/sine_solver.h
+ theory/arith/nl/transcendental/taylor_generator.cpp
+ theory/arith/nl/transcendental/taylor_generator.h
+ theory/arith/nl/transcendental/transcendental_solver.cpp
+ theory/arith/nl/transcendental/transcendental_solver.h
+ theory/arith/nl/transcendental/transcendental_state.cpp
+ theory/arith/nl/transcendental/transcendental_state.h
theory/arith/normal_form.cpp
theory/arith/normal_form.h
theory/arith/operator_elim.cpp
diff --git a/src/api/cvc4cpp.cpp b/src/api/cvc4cpp.cpp
index 9b79b5c45..5eabcfe62 100644
--- a/src/api/cvc4cpp.cpp
+++ b/src/api/cvc4cpp.cpp
@@ -3093,7 +3093,7 @@ size_t RoundingModeHashFunction::operator()(const RoundingMode& rm) const
Solver::Solver(Options* opts)
{
d_exprMgr.reset(new ExprManager);
- d_smtEngine.reset(new SmtEngine(d_exprMgr.get(), opts));
+ d_smtEngine.reset(new SmtEngine(d_exprMgr->getNodeManager(), opts));
d_smtEngine->setSolver(this);
Options& o = d_smtEngine->getOptions();
d_rng.reset(new Random(o[options::seed]));
diff --git a/src/expr/symbol_manager.cpp b/src/expr/symbol_manager.cpp
index 9a689a380..283682ccd 100644
--- a/src/expr/symbol_manager.cpp
+++ b/src/expr/symbol_manager.cpp
@@ -16,6 +16,7 @@
#include "context/cdhashmap.h"
#include "context/cdhashset.h"
+#include "context/cdlist.h"
#include "context/cdo.h"
using namespace CVC4::context;
@@ -28,12 +29,16 @@ class SymbolManager::Implementation
{
using TermStringMap = CDHashMap<api::Term, std::string, api::TermHashFunction>;
using TermSet = CDHashSet<api::Term, api::TermHashFunction>;
+ using SortList = CDList<api::Sort>;
+ using TermList = CDList<api::Term>;
public:
Implementation()
: d_context(),
d_names(&d_context),
d_namedAsserts(&d_context),
+ d_declareSorts(&d_context),
+ d_declareTerms(&d_context),
d_hasPushedScope(&d_context, false)
{
}
@@ -53,6 +58,14 @@ class SymbolManager::Implementation
bool areAssertions = false) const;
/** get expression names */
std::map<api::Term, std::string> getExpressionNames(bool areAssertions) const;
+ /** get model declare sorts */
+ std::vector<api::Sort> getModelDeclareSorts() const;
+ /** get model declare terms */
+ std::vector<api::Term> getModelDeclareTerms() const;
+ /** Add declared sort to the list of model declarations. */
+ void addModelDeclarationSort(api::Sort s);
+ /** Add declared term to the list of model declarations. */
+ void addModelDeclarationTerm(api::Term t);
/** reset */
void reset();
/** Push a scope in the expression names. */
@@ -67,6 +80,10 @@ class SymbolManager::Implementation
TermStringMap d_names;
/** The set of terms with assertion names */
TermSet d_namedAsserts;
+ /** Declared sorts (for model printing) */
+ SortList d_declareSorts;
+ /** Declared terms (for model printing) */
+ TermList d_declareTerms;
/**
* Have we pushed a scope (e.g. a let or quantifier) in the current context?
*/
@@ -150,6 +167,34 @@ SymbolManager::Implementation::getExpressionNames(bool areAssertions) const
return emap;
}
+std::vector<api::Sort> SymbolManager::Implementation::getModelDeclareSorts()
+ const
+{
+ std::vector<api::Sort> declareSorts(d_declareSorts.begin(),
+ d_declareSorts.end());
+ return declareSorts;
+}
+
+std::vector<api::Term> SymbolManager::Implementation::getModelDeclareTerms()
+ const
+{
+ std::vector<api::Term> declareTerms(d_declareTerms.begin(),
+ d_declareTerms.end());
+ return declareTerms;
+}
+
+void SymbolManager::Implementation::addModelDeclarationSort(api::Sort s)
+{
+ Trace("sym-manager") << "addModelDeclarationSort " << s << std::endl;
+ d_declareSorts.push_back(s);
+}
+
+void SymbolManager::Implementation::addModelDeclarationTerm(api::Term t)
+{
+ Trace("sym-manager") << "addModelDeclarationTerm " << t << std::endl;
+ d_declareTerms.push_back(t);
+}
+
void SymbolManager::Implementation::pushScope(bool isUserContext)
{
Trace("sym-manager") << "pushScope, isUserContext = " << isUserContext
@@ -218,6 +263,24 @@ std::map<api::Term, std::string> SymbolManager::getExpressionNames(
{
return d_implementation->getExpressionNames(areAssertions);
}
+std::vector<api::Sort> SymbolManager::getModelDeclareSorts() const
+{
+ return d_implementation->getModelDeclareSorts();
+}
+std::vector<api::Term> SymbolManager::getModelDeclareTerms() const
+{
+ return d_implementation->getModelDeclareTerms();
+}
+
+void SymbolManager::addModelDeclarationSort(api::Sort s)
+{
+ d_implementation->addModelDeclarationSort(s);
+}
+
+void SymbolManager::addModelDeclarationTerm(api::Term t)
+{
+ d_implementation->addModelDeclarationTerm(t);
+}
size_t SymbolManager::scopeLevel() const
{
diff --git a/src/expr/symbol_manager.h b/src/expr/symbol_manager.h
index a5dac1067..37dd51ef8 100644
--- a/src/expr/symbol_manager.h
+++ b/src/expr/symbol_manager.h
@@ -92,6 +92,23 @@ class CVC4_PUBLIC SymbolManager
*/
std::map<api::Term, std::string> getExpressionNames(
bool areAssertions = false) const;
+ /**
+ * @return The sorts we have declared that should be printed in the model.
+ */
+ std::vector<api::Sort> getModelDeclareSorts() const;
+ /**
+ * @return The terms we have declared that should be printed in the model.
+ */
+ std::vector<api::Term> getModelDeclareTerms() const;
+ /**
+ * Add declared sort to the list of model declarations.
+ */
+ void addModelDeclarationSort(api::Sort s);
+ /**
+ * Add declared term to the list of model declarations.
+ */
+ void addModelDeclarationTerm(api::Term t);
+
//---------------------------- end named expressions
/**
* Get the scope level of the symbol table.
diff --git a/src/expr/term_conversion_proof_generator.cpp b/src/expr/term_conversion_proof_generator.cpp
index 02613345f..858ca9f64 100644
--- a/src/expr/term_conversion_proof_generator.cpp
+++ b/src/expr/term_conversion_proof_generator.cpp
@@ -138,7 +138,9 @@ Node TConvProofGenerator::registerRewriteStep(Node t, Node s, uint32_t tctx)
// should not rewrite term to two different things
if (!getRewriteStepInternal(thash).isNull())
{
- Assert(getRewriteStepInternal(thash) == s);
+ Assert(getRewriteStepInternal(thash) == s)
+ << identify() << " rewriting " << t << " to both " << s << " and "
+ << getRewriteStepInternal(thash);
return Node::null();
}
d_rewriteMap[thash] = s;
diff --git a/src/expr/type.h b/src/expr/type.h
index 69a8363dc..0b923f027 100644
--- a/src/expr/type.h
+++ b/src/expr/type.h
@@ -33,7 +33,7 @@ class Expr;
class TypeNode;
struct CVC4_PUBLIC ExprManagerMapCollection;
-class CVC4_PUBLIC SmtEngine;
+class SmtEngine;
class CVC4_PUBLIC Datatype;
class Record;
diff --git a/src/expr/type_node.cpp b/src/expr/type_node.cpp
index 0818ca8c1..49fbe73ef 100644
--- a/src/expr/type_node.cpp
+++ b/src/expr/type_node.cpp
@@ -589,13 +589,11 @@ TypeNode TypeNode::commonTypeNode(TypeNode t0, TypeNode t1, bool isLeast) {
case kind::SEQUENCE_TYPE:
case kind::SET_TYPE:
case kind::BAG_TYPE:
+ case kind::SEXPR_TYPE:
{
// we don't support subtyping except for built in types Int and Real.
return TypeNode(); // return null type
}
- case kind::SEXPR_TYPE:
- Unimplemented()
- << "haven't implemented leastCommonType for symbolic expressions yet";
default:
Unimplemented() << "don't have a commonType for types `" << t0
<< "' and `" << t1 << "'";
diff --git a/src/parser/parser.cpp b/src/parser/parser.cpp
index 1fc995fd6..1ca2e1c01 100644
--- a/src/parser/parser.cpp
+++ b/src/parser/parser.cpp
@@ -333,8 +333,7 @@ void Parser::defineParameterizedType(const std::string& name,
api::Sort Parser::mkSort(const std::string& name, uint32_t flags)
{
Debug("parser") << "newSort(" << name << ")" << std::endl;
- api::Sort type =
- api::Sort(d_solver, d_solver->getExprManager()->mkSort(name, flags));
+ api::Sort type = d_solver->mkUninterpretedSort(name);
bool globalDecls = d_symman->getGlobalDeclarations();
defineType(
name, type, globalDecls && !(flags & ExprManager::SORT_FLAG_PLACEHOLDER));
@@ -347,9 +346,7 @@ api::Sort Parser::mkSortConstructor(const std::string& name,
{
Debug("parser") << "newSortConstructor(" << name << ", " << arity << ")"
<< std::endl;
- api::Sort type = api::Sort(
- d_solver,
- d_solver->getExprManager()->mkSortConstructor(name, arity, flags));
+ api::Sort type = d_solver->mkSortConstructorSort(name, arity);
bool globalDecls = d_symman->getGlobalDeclarations();
defineType(name,
vector<api::Sort>(arity),
@@ -379,10 +376,7 @@ api::Sort Parser::mkUnresolvedTypeConstructor(
{
Debug("parser") << "newSortConstructor(P)(" << name << ", " << params.size()
<< ")" << std::endl;
- api::Sort unresolved =
- api::Sort(d_solver,
- d_solver->getExprManager()->mkSortConstructor(
- name, params.size(), ExprManager::SORT_FLAG_PLACEHOLDER));
+ api::Sort unresolved = d_solver->mkSortConstructorSort(name, params.size());
defineType(name, params, unresolved);
api::Sort t = getSort(name, params);
d_unresolved.insert(unresolved);
@@ -644,8 +638,7 @@ api::Term Parser::mkVar(const std::string& name,
const api::Sort& type,
uint32_t flags)
{
- return api::Term(
- d_solver, d_solver->getExprManager()->mkVar(name, type.getType(), flags));
+ return d_solver->mkConst(type, name);
}
//!!!!!!!!!!! temporary
diff --git a/src/printer/ast/ast_printer.cpp b/src/printer/ast/ast_printer.cpp
index 8bf3bd24e..4b9371181 100644
--- a/src/printer/ast/ast_printer.cpp
+++ b/src/printer/ast/ast_printer.cpp
@@ -148,9 +148,17 @@ void AstPrinter::toStream(std::ostream& out, const smt::Model& m) const
out << "Model()";
}
-void AstPrinter::toStream(std::ostream& out,
- const smt::Model& m,
- const NodeCommand* c) const
+void AstPrinter::toStreamModelSort(std::ostream& out,
+ const smt::Model& m,
+ TypeNode tn) const
+{
+ // shouldn't be called; only the non-Command* version above should be
+ Unreachable();
+}
+
+void AstPrinter::toStreamModelTerm(std::ostream& out,
+ const smt::Model& m,
+ Node n) const
{
// shouldn't be called; only the non-Command* version above should be
Unreachable();
@@ -272,12 +280,9 @@ void AstPrinter::toStreamCmdDefineFunction(std::ostream& out,
}
void AstPrinter::toStreamCmdDeclareType(std::ostream& out,
- const std::string& id,
- size_t arity,
TypeNode type) const
{
- out << "DeclareType(" << id << "," << arity << "," << type << ')'
- << std::endl;
+ out << "DeclareType(" << type << ')' << std::endl;
}
void AstPrinter::toStreamCmdDefineType(std::ostream& out,
diff --git a/src/printer/ast/ast_printer.h b/src/printer/ast/ast_printer.h
index ad20ffb79..e4251eba0 100644
--- a/src/printer/ast/ast_printer.h
+++ b/src/printer/ast/ast_printer.h
@@ -62,8 +62,6 @@ class AstPrinter : public CVC4::Printer
/** Print declare-sort command */
void toStreamCmdDeclareType(std::ostream& out,
- const std::string& id,
- size_t arity,
TypeNode type) const override;
/** Print define-sort command */
@@ -165,9 +163,21 @@ class AstPrinter : public CVC4::Printer
private:
void toStream(std::ostream& out, TNode n, int toDepth) const;
- void toStream(std::ostream& out,
- const smt::Model& m,
- const NodeCommand* c) const override;
+ /**
+ * To stream model sort. This prints the appropriate output for type
+ * tn declared via declare-sort or declare-datatype.
+ */
+ void toStreamModelSort(std::ostream& out,
+ const smt::Model& m,
+ TypeNode tn) const override;
+
+ /**
+ * To stream model term. This prints the appropriate output for term
+ * n declared via declare-fun.
+ */
+ void toStreamModelTerm(std::ostream& out,
+ const smt::Model& m,
+ Node n) const override;
}; /* class AstPrinter */
} // namespace ast
diff --git a/src/printer/cvc/cvc_printer.cpp b/src/printer/cvc/cvc_printer.cpp
index 44ff7be10..be530099b 100644
--- a/src/printer/cvc/cvc_printer.cpp
+++ b/src/printer/cvc/cvc_printer.cpp
@@ -1067,20 +1067,23 @@ void CvcPrinter::toStream(std::ostream& out, const CommandStatus* s) const
}/* CvcPrinter::toStream(CommandStatus*) */
-namespace {
-
-void DeclareTypeNodeCommandToStream(std::ostream& out,
- const theory::TheoryModel& model,
- const DeclareTypeNodeCommand& command)
+void CvcPrinter::toStreamModelSort(std::ostream& out,
+ const smt::Model& m,
+ TypeNode tn) const
{
- TypeNode type_node = command.getType();
- const std::vector<Node>* type_reps =
- model.getRepSet()->getTypeRepsOrNull(type_node);
+ if (!tn.isSort())
+ {
+ out << "ERROR: don't know how to print a non uninterpreted sort in model: "
+ << tn << std::endl;
+ return;
+ }
+ const theory::TheoryModel* tm = m.getTheoryModel();
+ const std::vector<Node>* type_reps = tm->getRepSet()->getTypeRepsOrNull(tn);
if (options::modelUninterpPrint() == options::ModelUninterpPrintMode::DtEnum
- && type_node.isSort() && type_reps != nullptr)
+ && type_reps != nullptr)
{
out << "DATATYPE" << std::endl;
- out << " " << command.getSymbol() << " = ";
+ out << " " << tn << " = ";
for (size_t i = 0; i < type_reps->size(); i++)
{
if (i > 0)
@@ -1091,16 +1094,16 @@ void DeclareTypeNodeCommandToStream(std::ostream& out,
}
out << std::endl << "END;" << std::endl;
}
- else if (type_node.isSort() && type_reps != nullptr)
+ else if (type_reps != nullptr)
{
- out << "% cardinality of " << type_node << " is " << type_reps->size()
+ out << "% cardinality of " << tn << " is " << type_reps->size()
<< std::endl;
- out << command << std::endl;
+ toStreamCmdDeclareType(out, tn);
for (Node type_rep : *type_reps)
{
if (type_rep.isVar())
{
- out << type_rep << " : " << type_node << ";" << std::endl;
+ out << type_rep << " : " << tn << ";" << std::endl;
}
else
{
@@ -1110,21 +1113,15 @@ void DeclareTypeNodeCommandToStream(std::ostream& out,
}
else
{
- out << command << std::endl;
+ toStreamCmdDeclareType(out, tn);
}
}
-void DeclareFunctionNodeCommandToStream(
- std::ostream& out,
- const theory::TheoryModel& model,
- const DeclareFunctionNodeCommand& command)
+void CvcPrinter::toStreamModelTerm(std::ostream& out,
+ const smt::Model& m,
+ Node n) const
{
- Node n = command.getFunction();
- if (n.getKind() == kind::SKOLEM)
- {
- // don't print out internal stuff
- return;
- }
+ const theory::TheoryModel* tm = m.getTheoryModel();
TypeNode tn = n.getType();
out << n << " : ";
if (tn.isFunction() || tn.isPredicate())
@@ -1146,15 +1143,16 @@ void DeclareFunctionNodeCommandToStream(
}
// We get the value from the theory model directly, which notice
// does not have to go through the standard SmtEngine::getValue interface.
- Node val = model.getValue(n);
+ Node val = tm->getValue(n);
if (options::modelUninterpPrint() == options::ModelUninterpPrintMode::DtEnum
&& val.getKind() == kind::STORE)
{
TypeNode type_node = val[1].getType();
if (tn.isSort())
{
- if (const std::vector<Node>* type_reps =
- model.getRepSet()->getTypeRepsOrNull(type_node))
+ const std::vector<Node>* type_reps =
+ tm->getRepSet()->getTypeRepsOrNull(type_node);
+ if (type_reps != nullptr)
{
Cardinality indexCard(type_reps->size());
val = theory::arrays::TheoryArraysRewriter::normalizeConstant(
@@ -1165,8 +1163,6 @@ void DeclareFunctionNodeCommandToStream(
out << " = " << val << ";" << std::endl;
}
-} // namespace
-
void CvcPrinter::toStream(std::ostream& out, const smt::Model& m) const
{
const theory::TheoryModel* tm = m.getTheoryModel();
@@ -1185,28 +1181,6 @@ void CvcPrinter::toStream(std::ostream& out, const smt::Model& m) const
out << "MODEL END;" << std::endl;
}
-void CvcPrinter::toStream(std::ostream& out,
- const smt::Model& model,
- const NodeCommand* command) const
-{
- const auto* theory_model = model.getTheoryModel();
- AlwaysAssert(theory_model != nullptr);
- if (const auto* declare_type_command =
- dynamic_cast<const DeclareTypeNodeCommand*>(command))
- {
- DeclareTypeNodeCommandToStream(out, *theory_model, *declare_type_command);
- }
- else if (const auto* dfc =
- dynamic_cast<const DeclareFunctionNodeCommand*>(command))
- {
- DeclareFunctionNodeCommandToStream(out, *theory_model, *dfc);
- }
- else
- {
- out << *command << std::endl;
- }
-}
-
void CvcPrinter::toStreamCmdAssert(std::ostream& out, Node n) const
{
out << "ASSERT " << n << ';' << std::endl;
@@ -1322,6 +1296,7 @@ void CvcPrinter::toStreamCmdDeclarationSequence(
{
DeclarationDefinitionCommand* dd =
static_cast<DeclarationDefinitionCommand*>(*i++);
+ Assert(dd != nullptr);
if (i != sequence.cend())
{
out << dd->getSymbol() << ", ";
@@ -1376,20 +1351,18 @@ void CvcPrinter::toStreamCmdDefineFunction(std::ostream& out,
}
void CvcPrinter::toStreamCmdDeclareType(std::ostream& out,
- const std::string& id,
- size_t arity,
TypeNode type) const
{
+ size_t arity = type.isSortConstructor() ? type.getSortConstructorArity() : 0;
if (arity > 0)
{
- // TODO?
out << "ERROR: Don't know how to print parameterized type declaration "
"in CVC language."
<< std::endl;
}
else
{
- out << id << " : TYPE;" << std::endl;
+ out << type << " : TYPE;" << std::endl;
}
}
diff --git a/src/printer/cvc/cvc_printer.h b/src/printer/cvc/cvc_printer.h
index ee4750a61..b0328bc3c 100644
--- a/src/printer/cvc/cvc_printer.h
+++ b/src/printer/cvc/cvc_printer.h
@@ -63,8 +63,6 @@ class CvcPrinter : public CVC4::Printer
/** Print declare-sort command */
void toStreamCmdDeclareType(std::ostream& out,
- const std::string& id,
- size_t arity,
TypeNode type) const override;
/** Print define-sort command */
@@ -166,9 +164,21 @@ class CvcPrinter : public CVC4::Printer
private:
void toStream(std::ostream& out, TNode n, int toDepth, bool bracket) const;
- void toStream(std::ostream& out,
- const smt::Model& m,
- const NodeCommand* c) const override;
+ /**
+ * To stream model sort. This prints the appropriate output for type
+ * tn declared via declare-sort or declare-datatype.
+ */
+ void toStreamModelSort(std::ostream& out,
+ const smt::Model& m,
+ TypeNode tn) const override;
+
+ /**
+ * To stream model term. This prints the appropriate output for term
+ * n declared via declare-fun.
+ */
+ void toStreamModelTerm(std::ostream& out,
+ const smt::Model& m,
+ Node n) const override;
bool d_cvc3Mode;
}; /* class CvcPrinter */
diff --git a/src/printer/let_binding.cpp b/src/printer/let_binding.cpp
index 439037b64..2efc98318 100644
--- a/src/printer/let_binding.cpp
+++ b/src/printer/let_binding.cpp
@@ -30,8 +30,7 @@ uint32_t LetBinding::getThreshold() const { return d_thresh; }
void LetBinding::process(Node n)
{
- Assert(!n.isNull());
- if (d_thresh == 0)
+ if (n.isNull() || d_thresh == 0)
{
// value of 0 means do not introduce let
return;
@@ -42,7 +41,6 @@ void LetBinding::process(Node n)
void LetBinding::letify(Node n, std::vector<Node>& letList)
{
- Assert(!n.isNull());
// first, push the context
pushScope();
// process the node
diff --git a/src/printer/printer.cpp b/src/printer/printer.cpp
index b24025124..7225721c0 100644
--- a/src/printer/printer.cpp
+++ b/src/printer/printer.cpp
@@ -74,18 +74,34 @@ unique_ptr<Printer> Printer::makePrinter(OutputLanguage lang)
void Printer::toStream(std::ostream& out, const smt::Model& m) const
{
- for(size_t i = 0; i < m.getNumCommands(); ++i) {
- const NodeCommand* cmd = m.getCommand(i);
- const DeclareFunctionNodeCommand* dfc =
- dynamic_cast<const DeclareFunctionNodeCommand*>(cmd);
- if (dfc != NULL && !m.isModelCoreSymbol(dfc->getFunction()))
+ // print the declared sorts
+ const std::vector<TypeNode>& dsorts = m.getDeclaredSorts();
+ for (const TypeNode& tn : dsorts)
+ {
+ toStreamModelSort(out, m, tn);
+ }
+
+ // print the declared terms
+ const std::vector<Node>& dterms = m.getDeclaredTerms();
+ for (const Node& n : dterms)
+ {
+ // take into account model core, independently of the format
+ if (!m.isModelCoreSymbol(n))
{
continue;
}
- toStream(out, m, cmd);
+ toStreamModelTerm(out, m, n);
}
+
}/* Printer::toStream(Model) */
+void Printer::toStreamUsing(OutputLanguage lang,
+ std::ostream& out,
+ const smt::Model& m) const
+{
+ getPrinter(lang)->toStream(out, m);
+}
+
void Printer::toStream(std::ostream& out, const UnsatCore& core) const
{
for(UnsatCore::iterator i = core.begin(); i != core.end(); ++i) {
@@ -160,8 +176,6 @@ void Printer::toStreamCmdDeclareFunction(std::ostream& out,
}
void Printer::toStreamCmdDeclareType(std::ostream& out,
- const std::string& id,
- size_t arity,
TypeNode type) const
{
printUnknownCommand(out, "declare-sort");
diff --git a/src/printer/printer.h b/src/printer/printer.h
index d32418deb..5bcccedb8 100644
--- a/src/printer/printer.h
+++ b/src/printer/printer.h
@@ -86,8 +86,6 @@ class Printer
/** Print declare-sort command */
virtual void toStreamCmdDeclareType(std::ostream& out,
- const std::string& id,
- size_t arity,
TypeNode type) const;
/** Print define-sort command */
@@ -266,19 +264,26 @@ class Printer
/** Derived classes can construct, but no one else. */
Printer() {}
- /** write model response to command */
- virtual void toStream(std::ostream& out,
- const smt::Model& m,
- const NodeCommand* c) const = 0;
+ /**
+ * To stream model sort. This prints the appropriate output for type
+ * tn declared via declare-sort or declare-datatype.
+ */
+ virtual void toStreamModelSort(std::ostream& out,
+ const smt::Model& m,
+ TypeNode tn) const = 0;
+
+ /**
+ * To stream model term. This prints the appropriate output for term
+ * n declared via declare-fun.
+ */
+ virtual void toStreamModelTerm(std::ostream& out,
+ const smt::Model& m,
+ Node n) const = 0;
/** write model response to command using another language printer */
void toStreamUsing(OutputLanguage lang,
std::ostream& out,
- const smt::Model& m,
- const NodeCommand* c) const
- {
- getPrinter(lang)->toStream(out, m, c);
- }
+ const smt::Model& m) const;
/**
* Write an error to `out` stating that command `name` is not supported by
diff --git a/src/printer/smt2/smt2_printer.cpp b/src/printer/smt2/smt2_printer.cpp
index 747873bee..9e9500bdb 100644
--- a/src/printer/smt2/smt2_printer.cpp
+++ b/src/printer/smt2/smt2_printer.cpp
@@ -1364,124 +1364,91 @@ void Smt2Printer::toStream(std::ostream& out, const smt::Model& m) const
}
}
-void Smt2Printer::toStream(std::ostream& out,
- const smt::Model& model,
- const NodeCommand* command) const
+void Smt2Printer::toStreamModelSort(std::ostream& out,
+ const smt::Model& m,
+ TypeNode tn) const
{
- const theory::TheoryModel* theory_model = model.getTheoryModel();
- AlwaysAssert(theory_model != nullptr);
- if (const DeclareTypeNodeCommand* dtc =
- dynamic_cast<const DeclareTypeNodeCommand*>(command))
+ if (!tn.isSort())
{
- // print out the DeclareTypeCommand
- TypeNode tn = dtc->getType();
- if (!tn.isSort())
+ out << "ERROR: don't know how to print non uninterpreted sort in model: "
+ << tn << std::endl;
+ return;
+ }
+ const theory::TheoryModel* tm = m.getTheoryModel();
+ std::vector<Node> elements = tm->getDomainElements(tn);
+ if (options::modelUninterpPrint() == options::ModelUninterpPrintMode::DtEnum)
+ {
+ if (isVariant_2_6(d_variant))
{
- out << (*dtc) << endl;
+ out << "(declare-datatypes ((" << tn << " 0)) (";
}
else
{
- std::vector<Node> elements = theory_model->getDomainElements(tn);
- if (options::modelUninterpPrint()
- == options::ModelUninterpPrintMode::DtEnum)
- {
- if (isVariant_2_6(d_variant))
- {
- out << "(declare-datatypes ((" << (*dtc).getSymbol() << " 0)) (";
- }
- else
- {
- out << "(declare-datatypes () ((" << (*dtc).getSymbol() << " ";
- }
- for (const Node& type_ref : elements)
- {
- out << "(" << type_ref << ")";
- }
- out << ")))" << endl;
- }
- else
- {
- // print the cardinality
- out << "; cardinality of " << tn << " is " << elements.size() << endl;
- if (options::modelUninterpPrint()
- == options::ModelUninterpPrintMode::DeclSortAndFun)
- {
- out << (*dtc) << endl;
- }
- // print the representatives
- for (const Node& trn : elements)
- {
- if (trn.isVar())
- {
- out << "(declare-fun " << quoteSymbol(trn) << " () " << tn << ")"
- << endl;
- }
- else
- {
- out << "; rep: " << trn << endl;
- }
- }
- }
- }
- }
- else if (const DeclareFunctionNodeCommand* dfc =
- dynamic_cast<const DeclareFunctionNodeCommand*>(command))
- {
- // print out the DeclareFunctionCommand
- Node n = dfc->getFunction();
- if ((*dfc).getPrintInModelSetByUser())
- {
- if (!(*dfc).getPrintInModel())
- {
- return;
- }
+ out << "(declare-datatypes () ((" << tn << " ";
}
- else if (n.getKind() == kind::SKOLEM)
+ for (const Node& type_ref : elements)
{
- // don't print out internal stuff
- return;
+ out << "(" << type_ref << ")";
}
- // We get the value from the theory model directly, which notice
- // does not have to go through the standard SmtEngine::getValue interface.
- Node val = theory_model->getValue(n);
- if (val.getKind() == kind::LAMBDA)
+ out << ")))" << endl;
+ return;
+ }
+ // print the cardinality
+ out << "; cardinality of " << tn << " is " << elements.size() << endl;
+ if (options::modelUninterpPrint()
+ == options::ModelUninterpPrintMode::DeclSortAndFun)
+ {
+ toStreamCmdDeclareType(out, tn);
+ }
+ // print the representatives
+ for (const Node& trn : elements)
+ {
+ if (trn.isVar())
{
- TypeNode rangeType = n.getType().getRangeType();
- out << "(define-fun " << n << " " << val[0] << " " << rangeType << " ";
- // call toStream and force its type to be proper
- toStreamCastToType(out, val[1], -1, rangeType);
- out << ")" << endl;
+ out << "(declare-fun " << quoteSymbol(trn) << " () " << tn << ")" << endl;
}
else
{
- if (options::modelUninterpPrint()
- == options::ModelUninterpPrintMode::DtEnum
- && val.getKind() == kind::STORE)
- {
- TypeNode tn = val[1].getType();
- const std::vector<Node>* type_refs =
- theory_model->getRepSet()->getTypeRepsOrNull(tn);
- if (tn.isSort() && type_refs != nullptr)
- {
- Cardinality indexCard(type_refs->size());
- val = theory::arrays::TheoryArraysRewriter::normalizeConstant(
- val, indexCard);
- }
- }
- out << "(define-fun " << n << " () " << n.getType() << " ";
- // call toStream and force its type to be proper
- toStreamCastToType(out, val, -1, n.getType());
- out << ")" << endl;
+ out << "; rep: " << trn << endl;
}
}
- else if (const DeclareDatatypeNodeCommand* declare_datatype_command =
- dynamic_cast<const DeclareDatatypeNodeCommand*>(command))
+}
+
+void Smt2Printer::toStreamModelTerm(std::ostream& out,
+ const smt::Model& m,
+ Node n) const
+{
+ const theory::TheoryModel* tm = m.getTheoryModel();
+ // We get the value from the theory model directly, which notice
+ // does not have to go through the standard SmtEngine::getValue interface.
+ Node val = tm->getValue(n);
+ if (val.getKind() == kind::LAMBDA)
{
- out << *declare_datatype_command;
+ TypeNode rangeType = n.getType().getRangeType();
+ out << "(define-fun " << n << " " << val[0] << " " << rangeType << " ";
+ // call toStream and force its type to be proper
+ toStreamCastToType(out, val[1], -1, rangeType);
+ out << ")" << endl;
}
else
{
- Unreachable();
+ if (options::modelUninterpPrint() == options::ModelUninterpPrintMode::DtEnum
+ && val.getKind() == kind::STORE)
+ {
+ TypeNode tn = val[1].getType();
+ const std::vector<Node>* type_refs =
+ tm->getRepSet()->getTypeRepsOrNull(tn);
+ if (tn.isSort() && type_refs != nullptr)
+ {
+ Cardinality indexCard(type_refs->size());
+ val = theory::arrays::TheoryArraysRewriter::normalizeConstant(
+ val, indexCard);
+ }
+ }
+ out << "(define-fun " << n << " () " << n.getType() << " ";
+ // call toStream and force its type to be proper
+ toStreamCastToType(out, val, -1, n.getType());
+ out << ")" << endl;
}
}
@@ -1694,11 +1661,13 @@ void Smt2Printer::toStreamCmdDefineFunctionRec(
}
void Smt2Printer::toStreamCmdDeclareType(std::ostream& out,
- const std::string& id,
- size_t arity,
TypeNode type) const
{
- out << "(declare-sort " << CVC4::quoteSymbol(id) << " " << arity << ")"
+ Assert(type.isSort() || type.isSortConstructor());
+ std::stringstream id;
+ id << type;
+ size_t arity = type.isSortConstructor() ? type.getSortConstructorArity() : 0;
+ out << "(declare-sort " << CVC4::quoteSymbol(id.str()) << " " << arity << ")"
<< std::endl;
}
diff --git a/src/printer/smt2/smt2_printer.h b/src/printer/smt2/smt2_printer.h
index c83a74d97..3d90cee06 100644
--- a/src/printer/smt2/smt2_printer.h
+++ b/src/printer/smt2/smt2_printer.h
@@ -78,8 +78,6 @@ class Smt2Printer : public CVC4::Printer
/** Print declare-sort command */
void toStreamCmdDeclareType(std::ostream& out,
- const std::string& id,
- size_t arity,
TypeNode type) const override;
/** Print define-sort command */
@@ -243,12 +241,25 @@ class Smt2Printer : public CVC4::Printer
TNode n,
int toDepth,
TypeNode tn) const;
- void toStream(std::ostream& out,
- const smt::Model& m,
- const NodeCommand* c) const override;
void toStream(std::ostream& out, const SExpr& sexpr) const;
void toStream(std::ostream& out, const DType& dt) const;
/**
+ * To stream model sort. This prints the appropriate output for type
+ * tn declared via declare-sort or declare-datatype.
+ */
+ void toStreamModelSort(std::ostream& out,
+ const smt::Model& m,
+ TypeNode tn) const override;
+
+ /**
+ * To stream model term. This prints the appropriate output for term
+ * n declared via declare-fun.
+ */
+ void toStreamModelTerm(std::ostream& out,
+ const smt::Model& m,
+ Node n) const override;
+
+ /**
* To stream with let binding. This prints n, possibly in the scope
* of letification generated by this method based on lbind.
*/
diff --git a/src/printer/tptp/tptp_printer.cpp b/src/printer/tptp/tptp_printer.cpp
index c93f3593a..f9384b0cb 100644
--- a/src/printer/tptp/tptp_printer.cpp
+++ b/src/printer/tptp/tptp_printer.cpp
@@ -54,20 +54,27 @@ void TptpPrinter::toStream(std::ostream& out, const smt::Model& m) const
: "CandidateFiniteModel");
out << "% SZS output start " << statusName << " for " << m.getInputName()
<< endl;
- for(size_t i = 0; i < m.getNumCommands(); ++i) {
- this->Printer::toStreamUsing(language::output::LANG_SMTLIB_V2_5, out, m, m.getCommand(i));
- }
+ this->Printer::toStreamUsing(language::output::LANG_SMTLIB_V2_5, out, m);
out << "% SZS output end " << statusName << " for " << m.getInputName()
<< endl;
}
-void TptpPrinter::toStream(std::ostream& out,
- const smt::Model& m,
- const NodeCommand* c) const
+void TptpPrinter::toStreamModelSort(std::ostream& out,
+ const smt::Model& m,
+ TypeNode tn) const
+{
+ // shouldn't be called; only the non-Command* version above should be
+ Unreachable();
+}
+
+void TptpPrinter::toStreamModelTerm(std::ostream& out,
+ const smt::Model& m,
+ Node n) const
{
// shouldn't be called; only the non-Command* version above should be
Unreachable();
}
+
void TptpPrinter::toStream(std::ostream& out, const UnsatCore& core) const
{
out << "% SZS output start UnsatCore " << std::endl;
diff --git a/src/printer/tptp/tptp_printer.h b/src/printer/tptp/tptp_printer.h
index 449fe409c..38a56bcb5 100644
--- a/src/printer/tptp/tptp_printer.h
+++ b/src/printer/tptp/tptp_printer.h
@@ -44,9 +44,21 @@ class TptpPrinter : public CVC4::Printer
void toStream(std::ostream& out, const UnsatCore& core) const override;
private:
- void toStream(std::ostream& out,
- const smt::Model& m,
- const NodeCommand* c) const override;
+ /**
+ * To stream model sort. This prints the appropriate output for type
+ * tn declared via declare-sort or declare-datatype.
+ */
+ void toStreamModelSort(std::ostream& out,
+ const smt::Model& m,
+ TypeNode tn) const override;
+
+ /**
+ * To stream model term. This prints the appropriate output for term
+ * n declared via declare-fun.
+ */
+ void toStreamModelTerm(std::ostream& out,
+ const smt::Model& m,
+ Node n) const override;
}; /* class TptpPrinter */
diff --git a/src/smt/check_models.cpp b/src/smt/check_models.cpp
index 612084de2..56d54eec9 100644
--- a/src/smt/check_models.cpp
+++ b/src/smt/check_models.cpp
@@ -67,24 +67,14 @@ void CheckModels::checkModel(Model* m,
/* substituteUnderQuantifiers = */ false);
Trace("check-model") << "checkModel: Collect substitution..." << std::endl;
- for (size_t k = 0, ncmd = m->getNumCommands(); k < ncmd; ++k)
+ const std::vector<Node>& decTerms = m->getDeclaredTerms();
+ for (const Node& func : decTerms)
{
- const DeclareFunctionNodeCommand* c =
- dynamic_cast<const DeclareFunctionNodeCommand*>(m->getCommand(k));
- Notice() << "SmtEngine::checkModel(): model command " << k << " : "
- << m->getCommand(k)->toString() << std::endl;
- if (c == nullptr)
- {
- // we don't care about DECLARE-DATATYPES, DECLARE-SORT, ...
- Notice() << "SmtEngine::checkModel(): skipping..." << std::endl;
- continue;
- }
// We have a DECLARE-FUN:
//
// We'll first do some checks, then add to our substitution map
// the mapping: function symbol |-> value
- Node func = c->getFunction();
Node val = m->getValue(func);
Notice() << "SmtEngine::checkModel(): adding substitution: " << func
diff --git a/src/smt/command.cpp b/src/smt/command.cpp
index 717d423fe..e6361be9e 100644
--- a/src/smt/command.cpp
+++ b/src/smt/command.cpp
@@ -1069,28 +1069,17 @@ DeclareFunctionCommand::DeclareFunctionCommand(const std::string& id,
api::Sort sort)
: DeclarationDefinitionCommand(id),
d_func(func),
- d_sort(sort),
- d_printInModel(true),
- d_printInModelSetByUser(false)
+ d_sort(sort)
{
}
api::Term DeclareFunctionCommand::getFunction() const { return d_func; }
api::Sort DeclareFunctionCommand::getSort() const { return d_sort; }
-bool DeclareFunctionCommand::getPrintInModel() const { return d_printInModel; }
-bool DeclareFunctionCommand::getPrintInModelSetByUser() const
-{
- return d_printInModelSetByUser;
-}
-
-void DeclareFunctionCommand::setPrintInModel(bool p)
-{
- d_printInModel = p;
- d_printInModelSetByUser = true;
-}
void DeclareFunctionCommand::invoke(api::Solver* solver, SymbolManager* sm)
{
+ // mark that it will be printed in the model
+ sm->addModelDeclarationTerm(d_func);
d_commandStatus = CommandSuccess::instance();
}
@@ -1098,8 +1087,6 @@ Command* DeclareFunctionCommand::clone() const
{
DeclareFunctionCommand* dfc =
new DeclareFunctionCommand(d_symbol, d_func, d_sort);
- dfc->d_printInModel = d_printInModel;
- dfc->d_printInModelSetByUser = d_printInModelSetByUser;
return dfc;
}
@@ -1132,6 +1119,8 @@ size_t DeclareSortCommand::getArity() const { return d_arity; }
api::Sort DeclareSortCommand::getSort() const { return d_sort; }
void DeclareSortCommand::invoke(api::Solver* solver, SymbolManager* sm)
{
+ // mark that it will be printed in the model
+ sm->addModelDeclarationSort(d_sort);
d_commandStatus = CommandSuccess::instance();
}
@@ -1150,8 +1139,8 @@ void DeclareSortCommand::toStream(std::ostream& out,
size_t dag,
OutputLanguage language) const
{
- Printer::getPrinter(language)->toStreamCmdDeclareType(
- out, d_sort.toString(), d_arity, d_sort.getTypeNode());
+ Printer::getPrinter(language)->toStreamCmdDeclareType(out,
+ d_sort.getTypeNode());
}
/* -------------------------------------------------------------------------- */
@@ -1438,8 +1427,8 @@ void SetUserAttributeCommand::invoke(api::Solver* solver, SymbolManager* sm)
{
solver->getSmtEngine()->setUserAttribute(
d_attr,
- d_term.getExpr(),
- api::termVectorToExprs(d_termValues),
+ d_term.getNode(),
+ api::termVectorToNodes(d_termValues),
d_strValue);
}
d_commandStatus = CommandSuccess::instance();
@@ -1693,6 +1682,18 @@ void GetModelCommand::invoke(api::Solver* solver, SymbolManager* sm)
try
{
d_result = solver->getSmtEngine()->getModel();
+ // set the model declarations, which determines what is printed in the model
+ d_result->clearModelDeclarations();
+ std::vector<api::Sort> declareSorts = sm->getModelDeclareSorts();
+ for (const api::Sort& s : declareSorts)
+ {
+ d_result->addDeclarationSort(s.getTypeNode());
+ }
+ std::vector<api::Term> declareTerms = sm->getModelDeclareTerms();
+ for (const api::Term& t : declareTerms)
+ {
+ d_result->addDeclarationTerm(t.getNode());
+ }
d_commandStatus = CommandSuccess::instance();
}
catch (RecoverableModalException& e)
diff --git a/src/smt/command.h b/src/smt/command.h
index 96a6938d6..0b86f3539 100644
--- a/src/smt/command.h
+++ b/src/smt/command.h
@@ -387,16 +387,11 @@ class CVC4_PUBLIC DeclareFunctionCommand : public DeclarationDefinitionCommand
protected:
api::Term d_func;
api::Sort d_sort;
- bool d_printInModel;
- bool d_printInModelSetByUser;
public:
DeclareFunctionCommand(const std::string& id, api::Term func, api::Sort sort);
api::Term getFunction() const;
api::Sort getSort() const;
- bool getPrintInModel() const;
- bool getPrintInModelSetByUser() const;
- void setPrintInModel(bool p);
void invoke(api::Solver* solver, SymbolManager* sm) override;
Command* clone() const override;
diff --git a/src/smt/dump_manager.cpp b/src/smt/dump_manager.cpp
index 9b7fba5a2..9d3031b4d 100644
--- a/src/smt/dump_manager.cpp
+++ b/src/smt/dump_manager.cpp
@@ -24,8 +24,6 @@ namespace smt {
DumpManager::DumpManager(context::UserContext* u)
: d_fullyInited(false),
- d_modelGlobalCommands(),
- d_modelCommands(u),
d_dumpCommands()
{
}
@@ -33,8 +31,6 @@ DumpManager::DumpManager(context::UserContext* u)
DumpManager::~DumpManager()
{
d_dumpCommands.clear();
- d_modelCommandsAlloc.clear();
- d_modelGlobalCommands.clear();
}
void DumpManager::finishInit()
@@ -49,8 +45,10 @@ void DumpManager::finishInit()
d_fullyInited = true;
}
-
-void DumpManager::resetAssertions() { d_modelGlobalCommands.clear(); }
+void DumpManager::resetAssertions()
+{
+ // currently, do nothing
+}
void DumpManager::addToModelCommandAndDump(const NodeCommand& c,
uint32_t flags,
@@ -58,29 +56,6 @@ void DumpManager::addToModelCommandAndDump(const NodeCommand& c,
const char* dumpTag)
{
Trace("smt") << "SMT addToModelCommandAndDump(" << c << ")" << std::endl;
- // If we aren't yet fully inited, the user might still turn on
- // produce-models. So let's keep any commands around just in
- // case. This is useful in two cases: (1) SMT-LIBv1 auto-declares
- // sort "U" in QF_UF before setLogic() is run and we still want to
- // support finding card(U) with --finite-model-find, and (2) to
- // decouple SmtEngine and ExprManager if the user does a few
- // ExprManager::mkSort() before SmtEngine::setOption("produce-models")
- // and expects to find their cardinalities in the model.
- if ((!d_fullyInited || options::produceModels())
- && (flags & ExprManager::VAR_FLAG_DEFINED) == 0)
- {
- if (flags & ExprManager::VAR_FLAG_GLOBAL)
- {
- d_modelGlobalCommands.push_back(std::unique_ptr<NodeCommand>(c.clone()));
- }
- else
- {
- NodeCommand* cc = c.clone();
- d_modelCommands.push_back(cc);
- // also remember for memory management purposes
- d_modelCommandsAlloc.push_back(std::unique_ptr<NodeCommand>(cc));
- }
- }
if (Dump.isOn(dumpTag))
{
if (d_fullyInited)
@@ -97,48 +72,7 @@ void DumpManager::addToModelCommandAndDump(const NodeCommand& c,
void DumpManager::setPrintFuncInModel(Node f, bool p)
{
Trace("setp-model") << "Set printInModel " << f << " to " << p << std::endl;
- for (std::unique_ptr<NodeCommand>& c : d_modelGlobalCommands)
- {
- DeclareFunctionNodeCommand* dfc =
- dynamic_cast<DeclareFunctionNodeCommand*>(c.get());
- if (dfc != NULL)
- {
- Node df = dfc->getFunction();
- if (df == f)
- {
- dfc->setPrintInModel(p);
- }
- }
- }
- for (NodeCommand* c : d_modelCommands)
- {
- DeclareFunctionNodeCommand* dfc =
- dynamic_cast<DeclareFunctionNodeCommand*>(c);
- if (dfc != NULL)
- {
- Node df = dfc->getFunction();
- if (df == f)
- {
- dfc->setPrintInModel(p);
- }
- }
- }
-}
-
-size_t DumpManager::getNumModelCommands() const
-{
- return d_modelCommands.size() + d_modelGlobalCommands.size();
-}
-
-const NodeCommand* DumpManager::getModelCommand(size_t i) const
-{
- Assert(i < getNumModelCommands());
- // index the global commands first, then the locals
- if (i < d_modelGlobalCommands.size())
- {
- return d_modelGlobalCommands[i].get();
- }
- return d_modelCommands[i - d_modelGlobalCommands.size()];
+ // TODO (cvc4-wishues/issues/75): implement
}
} // namespace smt
diff --git a/src/smt/dump_manager.h b/src/smt/dump_manager.h
index 0ba8e0b8b..eaedf39a1 100644
--- a/src/smt/dump_manager.h
+++ b/src/smt/dump_manager.h
@@ -31,14 +31,10 @@ namespace smt {
/**
* This utility is responsible for:
- * (1) storing information required for SMT-LIB queries such as get-model,
- * which requires knowing what symbols are declared and should be printed in
- * the model.
- * (2) implementing some dumping traces e.g. --dump=declarations.
+ * implementing some dumping traces e.g. --dump=declarations.
*/
class DumpManager
{
- typedef context::CDList<NodeCommand*> CommandList;
public:
DumpManager(context::UserContext* u);
@@ -65,34 +61,10 @@ class DumpManager
* Set that function f should print in the model if and only if p is true.
*/
void setPrintFuncInModel(Node f, bool p);
- /** get number of commands to report in a model */
- size_t getNumModelCommands() const;
- /** get model command at index i */
- const NodeCommand* getModelCommand(size_t i) const;
private:
/** Fully inited */
bool d_fullyInited;
-
- /**
- * A list of commands that should be in the Model globally (i.e.,
- * regardless of push/pop). Only maintained if produce-models option
- * is on.
- */
- std::vector<std::unique_ptr<NodeCommand>> d_modelGlobalCommands;
-
- /**
- * A list of commands that should be in the Model locally (i.e.,
- * it is context-dependent on push/pop). Only maintained if
- * produce-models option is on.
- */
- CommandList d_modelCommands;
- /**
- * A list of model commands allocated to d_modelCommands at any time. This
- * is maintained for memory management purposes.
- */
- std::vector<std::unique_ptr<NodeCommand>> d_modelCommandsAlloc;
-
/**
* A vector of declaration commands waiting to be dumped out.
* Once the SmtEngine is fully initialized, we'll dump them.
diff --git a/src/smt/expand_definitions.cpp b/src/smt/expand_definitions.cpp
new file mode 100644
index 000000000..20c4f8ef6
--- /dev/null
+++ b/src/smt/expand_definitions.cpp
@@ -0,0 +1,278 @@
+/********************* */
+/*! \file expand_definitions.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King, Haniel Barbosa
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of expand definitions for an SMT engine.
+ **/
+
+#include "smt/expand_definitions.h"
+
+#include <stack>
+#include <utility>
+
+#include "expr/node_manager_attributes.h"
+#include "smt/defined_function.h"
+#include "smt/smt_engine.h"
+#include "theory/theory_engine.h"
+
+using namespace CVC4::preprocessing;
+using namespace CVC4::theory;
+using namespace CVC4::kind;
+
+namespace CVC4 {
+namespace smt {
+
+ExpandDefs::ExpandDefs(SmtEngine& smt,
+ ResourceManager& rm,
+ SmtEngineStatistics& stats)
+ : d_smt(smt), d_resourceManager(rm), d_smtStats(stats)
+{
+}
+
+ExpandDefs::~ExpandDefs() {}
+
+Node ExpandDefs::expandDefinitions(
+ TNode n,
+ std::unordered_map<Node, Node, NodeHashFunction>& cache,
+ bool expandOnly)
+{
+ NodeManager* nm = d_smt.getNodeManager();
+ std::stack<std::tuple<Node, Node, bool>> worklist;
+ std::stack<Node> result;
+ worklist.push(std::make_tuple(Node(n), Node(n), false));
+ // The worklist is made of triples, each is input / original node then the
+ // output / rewritten node and finally a flag tracking whether the children
+ // have been explored (i.e. if this is a downward or upward pass).
+
+ do
+ {
+ d_resourceManager.spendResource(ResourceManager::Resource::PreprocessStep);
+
+ // n is the input / original
+ // node is the output / result
+ Node node;
+ bool childrenPushed;
+ std::tie(n, node, childrenPushed) = worklist.top();
+ worklist.pop();
+
+ // Working downwards
+ if (!childrenPushed)
+ {
+ Kind k = n.getKind();
+
+ // we can short circuit (variable) leaves
+ if (n.isVar())
+ {
+ SmtEngine::DefinedFunctionMap* dfuns = d_smt.getDefinedFunctionMap();
+ SmtEngine::DefinedFunctionMap::const_iterator i = dfuns->find(n);
+ if (i != dfuns->end())
+ {
+ Node f = (*i).second.getFormula();
+ // must expand its definition
+ Node fe = expandDefinitions(f, cache, expandOnly);
+ // replacement must be closed
+ if ((*i).second.getFormals().size() > 0)
+ {
+ result.push(
+ nm->mkNode(LAMBDA,
+ nm->mkNode(BOUND_VAR_LIST, (*i).second.getFormals()),
+ fe));
+ continue;
+ }
+ // don't bother putting in the cache
+ result.push(fe);
+ continue;
+ }
+ // don't bother putting in the cache
+ result.push(n);
+ continue;
+ }
+
+ // maybe it's in the cache
+ std::unordered_map<Node, Node, NodeHashFunction>::iterator cacheHit =
+ cache.find(n);
+ if (cacheHit != cache.end())
+ {
+ TNode ret = (*cacheHit).second;
+ result.push(ret.isNull() ? n : ret);
+ continue;
+ }
+
+ // otherwise expand it
+ bool doExpand = false;
+ if (k == APPLY_UF)
+ {
+ // Always do beta-reduction here. The reason is that there may be
+ // operators such as INTS_MODULUS in the body of the lambda that would
+ // otherwise be introduced by beta-reduction via the rewriter, but are
+ // not expanded here since the traversal in this function does not
+ // traverse the operators of nodes. Hence, we beta-reduce here to
+ // ensure terms in the body of the lambda are expanded during this
+ // call.
+ if (n.getOperator().getKind() == LAMBDA)
+ {
+ doExpand = true;
+ }
+ else
+ {
+ // We always check if this operator corresponds to a defined function.
+ doExpand = d_smt.isDefinedFunction(n.getOperator().toExpr());
+ }
+ }
+ if (doExpand)
+ {
+ std::vector<Node> formals;
+ TNode fm;
+ if (n.getOperator().getKind() == LAMBDA)
+ {
+ TNode op = n.getOperator();
+ // lambda
+ for (unsigned i = 0; i < op[0].getNumChildren(); i++)
+ {
+ formals.push_back(op[0][i]);
+ }
+ fm = op[1];
+ }
+ else
+ {
+ // application of a user-defined symbol
+ TNode func = n.getOperator();
+ SmtEngine::DefinedFunctionMap* dfuns = d_smt.getDefinedFunctionMap();
+ SmtEngine::DefinedFunctionMap::const_iterator i = dfuns->find(func);
+ if (i == dfuns->end())
+ {
+ throw TypeCheckingException(
+ n.toExpr(),
+ std::string("Undefined function: `") + func.toString() + "'");
+ }
+ DefinedFunction def = (*i).second;
+ formals = def.getFormals();
+
+ if (Debug.isOn("expand"))
+ {
+ Debug("expand") << "found: " << n << std::endl;
+ Debug("expand") << " func: " << func << std::endl;
+ std::string name = func.getAttribute(expr::VarNameAttr());
+ Debug("expand") << " : \"" << name << "\"" << std::endl;
+ }
+ if (Debug.isOn("expand"))
+ {
+ Debug("expand") << " defn: " << def.getFunction() << std::endl
+ << " [";
+ if (formals.size() > 0)
+ {
+ copy(formals.begin(),
+ formals.end() - 1,
+ std::ostream_iterator<Node>(Debug("expand"), ", "));
+ Debug("expand") << formals.back();
+ }
+ Debug("expand")
+ << "]" << std::endl
+ << " " << def.getFunction().getType() << std::endl
+ << " " << def.getFormula() << std::endl;
+ }
+
+ fm = def.getFormula();
+ }
+
+ Node instance = fm.substitute(formals.begin(),
+ formals.end(),
+ n.begin(),
+ n.begin() + formals.size());
+ Debug("expand") << "made : " << instance << std::endl;
+
+ Node expanded = expandDefinitions(instance, cache, expandOnly);
+ cache[n] = (n == expanded ? Node::null() : expanded);
+ result.push(expanded);
+ continue;
+ }
+ else if (!expandOnly)
+ {
+ // do not do any theory stuff if expandOnly is true
+
+ theory::Theory* t = d_smt.getTheoryEngine()->theoryOf(node);
+
+ Assert(t != NULL);
+ TrustNode trn = t->expandDefinition(n);
+ node = trn.isNull() ? Node(n) : trn.getNode();
+ }
+
+ // the partial functions can fall through, in which case we still
+ // consider their children
+ worklist.push(std::make_tuple(
+ Node(n), node, true)); // Original and rewritten result
+
+ for (size_t i = 0; i < node.getNumChildren(); ++i)
+ {
+ worklist.push(
+ std::make_tuple(node[i],
+ node[i],
+ false)); // Rewrite the children of the result only
+ }
+ }
+ else
+ {
+ // Working upwards
+ // Reconstruct the node from it's (now rewritten) children on the stack
+
+ Debug("expand") << "cons : " << node << std::endl;
+ if (node.getNumChildren() > 0)
+ {
+ // cout << "cons : " << node << std::endl;
+ NodeBuilder<> nb(node.getKind());
+ if (node.getMetaKind() == metakind::PARAMETERIZED)
+ {
+ Debug("expand") << "op : " << node.getOperator() << std::endl;
+ // cout << "op : " << node.getOperator() << std::endl;
+ nb << node.getOperator();
+ }
+ for (size_t i = 0, nchild = node.getNumChildren(); i < nchild; ++i)
+ {
+ Assert(!result.empty());
+ Node expanded = result.top();
+ result.pop();
+ // cout << "exchld : " << expanded << std::endl;
+ Debug("expand") << "exchld : " << expanded << std::endl;
+ nb << expanded;
+ }
+ node = nb;
+ }
+ // Only cache once all subterms are expanded
+ cache[n] = n == node ? Node::null() : node;
+ result.push(node);
+ }
+ } while (!worklist.empty());
+
+ AlwaysAssert(result.size() == 1);
+
+ return result.top();
+}
+
+void ExpandDefs::expandAssertions(AssertionPipeline& assertions,
+ bool expandOnly)
+{
+ Chat() << "expanding definitions in assertions..." << std::endl;
+ Trace("simplify") << "ExpandDefs::simplify(): expanding definitions"
+ << std::endl;
+ TimerStat::CodeTimer codeTimer(d_smtStats.d_definitionExpansionTime);
+ std::unordered_map<Node, Node, NodeHashFunction> cache;
+ for (size_t i = 0, nasserts = assertions.size(); i < nasserts; ++i)
+ {
+ Node assert = assertions[i];
+ Node expd = expandDefinitions(assert, cache, expandOnly);
+ if (expd != assert)
+ {
+ assertions.replace(i, expd);
+ }
+ }
+}
+
+} // namespace smt
+} // namespace CVC4
diff --git a/src/smt/expand_definitions.h b/src/smt/expand_definitions.h
new file mode 100644
index 000000000..f40ee4a4e
--- /dev/null
+++ b/src/smt/expand_definitions.h
@@ -0,0 +1,76 @@
+/********************* */
+/*! \file process_assertions.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King, Morgan Deters
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief The module for processing assertions for an SMT engine.
+ **/
+
+#include "cvc4_private.h"
+
+#ifndef CVC4__SMT__EXPAND_DEFINITIONS_H
+#define CVC4__SMT__EXPAND_DEFINITIONS_H
+
+#include <unordered_map>
+
+#include "expr/node.h"
+#include "preprocessing/assertion_pipeline.h"
+#include "smt/smt_engine_stats.h"
+#include "util/resource_manager.h"
+
+namespace CVC4 {
+
+class SmtEngine;
+
+namespace smt {
+
+/**
+ * Module in charge of expanding definitions for an SMT engine.
+ *
+ * Its main features is expandDefinitions(TNode, ...), which returns the
+ * expanded formula of a term.
+ */
+class ExpandDefs
+{
+ public:
+ ExpandDefs(SmtEngine& smt, ResourceManager& rm, SmtEngineStatistics& stats);
+ ~ExpandDefs();
+ /**
+ * Expand definitions in term n. Return the expanded form of n.
+ *
+ * @param n The node to expand
+ * @param cache Cache of previous results
+ * @param expandOnly if true, then the expandDefinitions function of
+ * TheoryEngine is not called on subterms of n.
+ * @return The expanded term.
+ */
+ Node expandDefinitions(
+ TNode n,
+ std::unordered_map<Node, Node, NodeHashFunction>& cache,
+ bool expandOnly = false);
+ /**
+ * Expand defintitions in assertions. This applies this above method to each
+ * assertion in the given pipeline.
+ */
+ void expandAssertions(preprocessing::AssertionPipeline& assertions,
+ bool expandOnly = false);
+
+ private:
+ /** Reference to the SMT engine */
+ SmtEngine& d_smt;
+ /** Reference to resource manager */
+ ResourceManager& d_resourceManager;
+ /** Reference to the SMT stats */
+ SmtEngineStatistics& d_smtStats;
+};
+
+} // namespace smt
+} // namespace CVC4
+
+#endif
diff --git a/src/smt/model.cpp b/src/smt/model.cpp
index fc9ea8fbb..8a9f944d2 100644
--- a/src/smt/model.cpp
+++ b/src/smt/model.cpp
@@ -26,29 +26,17 @@
namespace CVC4 {
namespace smt {
-Model::Model(SmtEngine& smt, theory::TheoryModel* tm)
- : d_smt(smt), d_isKnownSat(false), d_tmodel(tm)
+Model::Model(theory::TheoryModel* tm) : d_isKnownSat(false), d_tmodel(tm)
{
Assert(d_tmodel != nullptr);
}
std::ostream& operator<<(std::ostream& out, const Model& m) {
- smt::SmtScope smts(&m.d_smt);
expr::ExprDag::Scope scope(out, false);
Printer::getPrinter(options::outputLanguage())->toStream(out, m);
return out;
}
-size_t Model::getNumCommands() const
-{
- return d_smt.getDumpManager()->getNumModelCommands();
-}
-
-const NodeCommand* Model::getCommand(size_t i) const
-{
- return d_smt.getDumpManager()->getModelCommand(i);
-}
-
theory::TheoryModel* Model::getTheoryModel() { return d_tmodel; }
const theory::TheoryModel* Model::getTheoryModel() const { return d_tmodel; }
@@ -61,5 +49,19 @@ Node Model::getValue(TNode n) const { return d_tmodel->getValue(n); }
bool Model::hasApproximations() const { return d_tmodel->hasApproximations(); }
+void Model::clearModelDeclarations() { d_declareSorts.clear(); }
+
+void Model::addDeclarationSort(TypeNode tn) { d_declareSorts.push_back(tn); }
+
+void Model::addDeclarationTerm(Node n) { d_declareTerms.push_back(n); }
+const std::vector<TypeNode>& Model::getDeclaredSorts() const
+{
+ return d_declareSorts;
+}
+const std::vector<Node>& Model::getDeclaredTerms() const
+{
+ return d_declareTerms;
+}
+
} // namespace smt
}/* CVC4 namespace */
diff --git a/src/smt/model.h b/src/smt/model.h
index dc36b5d29..18675040a 100644
--- a/src/smt/model.h
+++ b/src/smt/model.h
@@ -27,7 +27,6 @@
namespace CVC4 {
class SmtEngine;
-class NodeCommand;
namespace smt {
@@ -39,6 +38,9 @@ std::ostream& operator<<(std::ostream&, const Model&);
* This is the SMT-level model object, that is responsible for maintaining
* the necessary information for how to print the model, as well as
* holding a pointer to the underlying implementation of the theory model.
+ *
+ * The model declarations maintained by this class are context-independent
+ * and should be updated when this model is printed.
*/
class Model {
friend std::ostream& operator<<(std::ostream&, const Model&);
@@ -46,17 +48,9 @@ class Model {
public:
/** construct */
- Model(SmtEngine& smt, theory::TheoryModel* tm);
+ Model(theory::TheoryModel* tm);
/** virtual destructor */
~Model() {}
- /** get number of commands to report */
- size_t getNumCommands() const;
- /** get command */
- const NodeCommand* getCommand(size_t i) const;
- /** get the smt engine that this model is hooked up to */
- SmtEngine* getSmtEngine() { return &d_smt; }
- /** get the smt engine (as a pointer-to-const) that this model is hooked up to */
- const SmtEngine* getSmtEngine() const { return &d_smt; }
/** get the input name (file name, etc.) this model is associated to */
std::string getInputName() const { return d_inputName; }
/**
@@ -78,9 +72,25 @@ class Model {
/** Does this model have approximations? */
bool hasApproximations() const;
//----------------------- end helper methods
+ //----------------------- model declarations
+ /** Clear the current model declarations. */
+ void clearModelDeclarations();
+ /**
+ * Set that tn is a sort that should be printed in the model, when applicable,
+ * based on the output language.
+ */
+ void addDeclarationSort(TypeNode tn);
+ /**
+ * Set that n is a variable that should be printed in the model, when
+ * applicable, based on the output language.
+ */
+ void addDeclarationTerm(Node n);
+ /** get declared sorts */
+ const std::vector<TypeNode>& getDeclaredSorts() const;
+ /** get declared terms */
+ const std::vector<Node>& getDeclaredTerms() const;
+ //----------------------- end model declarations
protected:
- /** The SmtEngine we're associated with */
- SmtEngine& d_smt;
/** the input name (file name, etc.) this model is associated to */
std::string d_inputName;
/**
@@ -93,6 +103,16 @@ class Model {
* the values of sorts and terms.
*/
theory::TheoryModel* d_tmodel;
+ /**
+ * The list of types to print, generally corresponding to declare-sort
+ * commands.
+ */
+ std::vector<TypeNode> d_declareSorts;
+ /**
+ * The list of terms to print, is typically one-to-one with declare-fun
+ * commands.
+ */
+ std::vector<Node> d_declareTerms;
};
} // namespace smt
diff --git a/src/smt/node_command.cpp b/src/smt/node_command.cpp
index eb2493c87..815f99132 100644
--- a/src/smt/node_command.cpp
+++ b/src/smt/node_command.cpp
@@ -51,9 +51,7 @@ DeclareFunctionNodeCommand::DeclareFunctionNodeCommand(const std::string& id,
TypeNode type)
: d_id(id),
d_fun(expr),
- d_type(type),
- d_printInModel(true),
- d_printInModelSetByUser(false)
+ d_type(type)
{
}
@@ -72,22 +70,6 @@ NodeCommand* DeclareFunctionNodeCommand::clone() const
const Node& DeclareFunctionNodeCommand::getFunction() const { return d_fun; }
-bool DeclareFunctionNodeCommand::getPrintInModel() const
-{
- return d_printInModel;
-}
-
-bool DeclareFunctionNodeCommand::getPrintInModelSetByUser() const
-{
- return d_printInModelSetByUser;
-}
-
-void DeclareFunctionNodeCommand::setPrintInModel(bool p)
-{
- d_printInModel = p;
- d_printInModelSetByUser = true;
-}
-
/* -------------------------------------------------------------------------- */
/* class DeclareTypeNodeCommand */
/* -------------------------------------------------------------------------- */
@@ -104,8 +86,7 @@ void DeclareTypeNodeCommand::toStream(std::ostream& out,
size_t dag,
OutputLanguage language) const
{
- Printer::getPrinter(language)->toStreamCmdDeclareType(
- out, d_id, d_arity, d_type);
+ Printer::getPrinter(language)->toStreamCmdDeclareType(out, d_type);
}
NodeCommand* DeclareTypeNodeCommand::clone() const
diff --git a/src/smt/node_command.h b/src/smt/node_command.h
index 8cf9a5e10..e1a15e62c 100644
--- a/src/smt/node_command.h
+++ b/src/smt/node_command.h
@@ -68,16 +68,11 @@ class DeclareFunctionNodeCommand : public NodeCommand
OutputLanguage language = language::output::LANG_AUTO) const override;
NodeCommand* clone() const override;
const Node& getFunction() const;
- bool getPrintInModel() const;
- bool getPrintInModelSetByUser() const;
- void setPrintInModel(bool p);
private:
std::string d_id;
Node d_fun;
TypeNode d_type;
- bool d_printInModel;
- bool d_printInModelSetByUser;
};
/**
diff --git a/src/smt/preprocessor.cpp b/src/smt/preprocessor.cpp
index bf7009081..bd6988645 100644
--- a/src/smt/preprocessor.cpp
+++ b/src/smt/preprocessor.cpp
@@ -35,7 +35,8 @@ Preprocessor::Preprocessor(SmtEngine& smt,
d_absValues(abs),
d_propagator(true, true),
d_assertionsProcessed(u, false),
- d_processor(smt, *smt.getResourceManager(), stats),
+ d_exDefs(smt, *smt.getResourceManager(), stats),
+ d_processor(smt, d_exDefs, *smt.getResourceManager(), stats),
d_rtf(u),
d_pnm(nullptr)
{
@@ -107,7 +108,7 @@ RemoveTermFormulas& Preprocessor::getTermFormulaRemover() { return d_rtf; }
Node Preprocessor::expandDefinitions(const Node& n, bool expandOnly)
{
std::unordered_map<Node, Node, NodeHashFunction> cache;
- return expandDefinitions(n, cache, expandOnly);
+ return d_exDefs.expandDefinitions(n, cache, expandOnly);
}
Node Preprocessor::expandDefinitions(
@@ -124,7 +125,7 @@ Node Preprocessor::expandDefinitions(
n.getType(true);
}
// expand only = true
- return d_processor.expandDefinitions(n, cache, expandOnly);
+ return d_exDefs.expandDefinitions(n, cache, expandOnly);
}
Node Preprocessor::simplify(const Node& node, bool removeItes)
@@ -142,7 +143,7 @@ Node Preprocessor::simplify(const Node& node, bool removeItes)
nas.getType(true);
}
std::unordered_map<Node, Node, NodeHashFunction> cache;
- Node n = d_processor.expandDefinitions(nas, cache);
+ Node n = d_exDefs.expandDefinitions(nas, cache);
TrustNode ts = d_ppContext->getTopLevelSubstitutions().apply(n);
Node ns = ts.isNull() ? n : ts.getNode();
if (removeItes)
@@ -157,6 +158,7 @@ void Preprocessor::setProofGenerator(PreprocessProofGenerator* pppg)
{
Assert(pppg != nullptr);
d_pnm = pppg->getManager();
+ d_propagator.setProof(d_pnm, d_context, pppg);
d_rtf.setProofNodeManager(d_pnm);
}
diff --git a/src/smt/preprocessor.h b/src/smt/preprocessor.h
index 8700c3885..696261d9e 100644
--- a/src/smt/preprocessor.h
+++ b/src/smt/preprocessor.h
@@ -20,6 +20,7 @@
#include <vector>
#include "preprocessing/preprocessing_pass_context.h"
+#include "smt/expand_definitions.h"
#include "smt/process_assertions.h"
#include "smt/term_formula_removal.h"
#include "theory/booleans/circuit_propagator.h"
@@ -123,6 +124,8 @@ class Preprocessor
context::CDO<bool> d_assertionsProcessed;
/** The preprocessing pass context */
std::unique_ptr<preprocessing::PreprocessingPassContext> d_ppContext;
+ /** Expand definitions module, responsible for expanding definitions */
+ ExpandDefs d_exDefs;
/**
* Process assertions module, responsible for implementing the preprocessing
* passes.
diff --git a/src/smt/process_assertions.cpp b/src/smt/process_assertions.cpp
index 2011e7b83..c68b73336 100644
--- a/src/smt/process_assertions.cpp
+++ b/src/smt/process_assertions.cpp
@@ -53,9 +53,11 @@ class ScopeCounter
};
ProcessAssertions::ProcessAssertions(SmtEngine& smt,
+ ExpandDefs& exDefs,
ResourceManager& rm,
SmtEngineStatistics& stats)
: d_smt(smt),
+ d_exDefs(exDefs),
d_resourceManager(rm),
d_smtStats(stats),
d_preprocessingPassContext(nullptr)
@@ -128,21 +130,7 @@ bool ProcessAssertions::apply(Assertions& as)
<< "ProcessAssertions::processAssertions() : pre-definition-expansion"
<< endl;
dumpAssertions("pre-definition-expansion", assertions);
- {
- Chat() << "expanding definitions..." << endl;
- Trace("simplify") << "ProcessAssertions::simplify(): expanding definitions"
- << endl;
- TimerStat::CodeTimer codeTimer(d_smtStats.d_definitionExpansionTime);
- unordered_map<Node, Node, NodeHashFunction> cache;
- for (size_t i = 0, nasserts = assertions.size(); i < nasserts; ++i)
- {
- Node expd = expandDefinitions(assertions[i], cache);
- if (expd != assertions[i])
- {
- assertions.replace(i, expd);
- }
- }
- }
+ d_exDefs.expandAssertions(assertions, false);
Trace("smt-proc")
<< "ProcessAssertions::processAssertions() : post-definition-expansion"
<< endl;
@@ -550,222 +538,6 @@ void ProcessAssertions::dumpAssertions(const char* key,
}
}
-Node ProcessAssertions::expandDefinitions(
- TNode n,
- unordered_map<Node, Node, NodeHashFunction>& cache,
- bool expandOnly)
-{
- NodeManager* nm = d_smt.getNodeManager();
- std::stack<std::tuple<Node, Node, bool>> worklist;
- std::stack<Node> result;
- worklist.push(std::make_tuple(Node(n), Node(n), false));
- // The worklist is made of triples, each is input / original node then the
- // output / rewritten node and finally a flag tracking whether the children
- // have been explored (i.e. if this is a downward or upward pass).
-
- do
- {
- spendResource(ResourceManager::Resource::PreprocessStep);
-
- // n is the input / original
- // node is the output / result
- Node node;
- bool childrenPushed;
- std::tie(n, node, childrenPushed) = worklist.top();
- worklist.pop();
-
- // Working downwards
- if (!childrenPushed)
- {
- Kind k = n.getKind();
-
- // we can short circuit (variable) leaves
- if (n.isVar())
- {
- SmtEngine::DefinedFunctionMap* dfuns = d_smt.getDefinedFunctionMap();
- SmtEngine::DefinedFunctionMap::const_iterator i = dfuns->find(n);
- if (i != dfuns->end())
- {
- Node f = (*i).second.getFormula();
- // must expand its definition
- Node fe = expandDefinitions(f, cache, expandOnly);
- // replacement must be closed
- if ((*i).second.getFormals().size() > 0)
- {
- result.push(
- nm->mkNode(LAMBDA,
- nm->mkNode(BOUND_VAR_LIST, (*i).second.getFormals()),
- fe));
- continue;
- }
- // don't bother putting in the cache
- result.push(fe);
- continue;
- }
- // don't bother putting in the cache
- result.push(n);
- continue;
- }
-
- // maybe it's in the cache
- unordered_map<Node, Node, NodeHashFunction>::iterator cacheHit =
- cache.find(n);
- if (cacheHit != cache.end())
- {
- TNode ret = (*cacheHit).second;
- result.push(ret.isNull() ? n : ret);
- continue;
- }
-
- // otherwise expand it
- bool doExpand = false;
- if (k == APPLY_UF)
- {
- // Always do beta-reduction here. The reason is that there may be
- // operators such as INTS_MODULUS in the body of the lambda that would
- // otherwise be introduced by beta-reduction via the rewriter, but are
- // not expanded here since the traversal in this function does not
- // traverse the operators of nodes. Hence, we beta-reduce here to
- // ensure terms in the body of the lambda are expanded during this
- // call.
- if (n.getOperator().getKind() == LAMBDA)
- {
- doExpand = true;
- }
- else
- {
- // We always check if this operator corresponds to a defined function.
- doExpand = d_smt.isDefinedFunction(n.getOperator().toExpr());
- }
- }
- if (doExpand)
- {
- vector<Node> formals;
- TNode fm;
- if (n.getOperator().getKind() == LAMBDA)
- {
- TNode op = n.getOperator();
- // lambda
- for (unsigned i = 0; i < op[0].getNumChildren(); i++)
- {
- formals.push_back(op[0][i]);
- }
- fm = op[1];
- }
- else
- {
- // application of a user-defined symbol
- TNode func = n.getOperator();
- SmtEngine::DefinedFunctionMap* dfuns = d_smt.getDefinedFunctionMap();
- SmtEngine::DefinedFunctionMap::const_iterator i = dfuns->find(func);
- if (i == dfuns->end())
- {
- throw TypeCheckingException(
- n.toExpr(),
- string("Undefined function: `") + func.toString() + "'");
- }
- DefinedFunction def = (*i).second;
- formals = def.getFormals();
-
- if (Debug.isOn("expand"))
- {
- Debug("expand") << "found: " << n << endl;
- Debug("expand") << " func: " << func << endl;
- string name = func.getAttribute(expr::VarNameAttr());
- Debug("expand") << " : \"" << name << "\"" << endl;
- }
- if (Debug.isOn("expand"))
- {
- Debug("expand") << " defn: " << def.getFunction() << endl
- << " [";
- if (formals.size() > 0)
- {
- copy(formals.begin(),
- formals.end() - 1,
- ostream_iterator<Node>(Debug("expand"), ", "));
- Debug("expand") << formals.back();
- }
- Debug("expand") << "]" << endl
- << " " << def.getFunction().getType() << endl
- << " " << def.getFormula() << endl;
- }
-
- fm = def.getFormula();
- }
-
- Node instance = fm.substitute(formals.begin(),
- formals.end(),
- n.begin(),
- n.begin() + formals.size());
- Debug("expand") << "made : " << instance << endl;
-
- Node expanded = expandDefinitions(instance, cache, expandOnly);
- cache[n] = (n == expanded ? Node::null() : expanded);
- result.push(expanded);
- continue;
- }
- else if (!expandOnly)
- {
- // do not do any theory stuff if expandOnly is true
-
- theory::Theory* t = d_smt.getTheoryEngine()->theoryOf(node);
-
- Assert(t != NULL);
- TrustNode trn = t->expandDefinition(n);
- node = trn.isNull() ? Node(n) : trn.getNode();
- }
-
- // the partial functions can fall through, in which case we still
- // consider their children
- worklist.push(std::make_tuple(
- Node(n), node, true)); // Original and rewritten result
-
- for (size_t i = 0; i < node.getNumChildren(); ++i)
- {
- worklist.push(
- std::make_tuple(node[i],
- node[i],
- false)); // Rewrite the children of the result only
- }
- }
- else
- {
- // Working upwards
- // Reconstruct the node from it's (now rewritten) children on the stack
-
- Debug("expand") << "cons : " << node << endl;
- if (node.getNumChildren() > 0)
- {
- // cout << "cons : " << node << endl;
- NodeBuilder<> nb(node.getKind());
- if (node.getMetaKind() == metakind::PARAMETERIZED)
- {
- Debug("expand") << "op : " << node.getOperator() << endl;
- // cout << "op : " << node.getOperator() << endl;
- nb << node.getOperator();
- }
- for (size_t i = 0, nchild = node.getNumChildren(); i < nchild; ++i)
- {
- Assert(!result.empty());
- Node expanded = result.top();
- result.pop();
- // cout << "exchld : " << expanded << endl;
- Debug("expand") << "exchld : " << expanded << endl;
- nb << expanded;
- }
- node = nb;
- }
- // Only cache once all subterms are expanded
- cache[n] = n == node ? Node::null() : node;
- result.push(node);
- }
- } while (!worklist.empty());
-
- AlwaysAssert(result.size() == 1);
-
- return result.top();
-}
-
void ProcessAssertions::collectSkolems(
IteSkolemMap& iskMap,
TNode n,
diff --git a/src/smt/process_assertions.h b/src/smt/process_assertions.h
index d260edf14..072603e7d 100644
--- a/src/smt/process_assertions.h
+++ b/src/smt/process_assertions.h
@@ -26,6 +26,7 @@
#include "preprocessing/preprocessing_pass.h"
#include "preprocessing/preprocessing_pass_context.h"
#include "smt/assertions.h"
+#include "smt/expand_definitions.h"
#include "smt/smt_engine_stats.h"
#include "util/resource_manager.h"
@@ -53,11 +54,11 @@ class ProcessAssertions
{
/** The types for the recursive function definitions */
typedef context::CDList<Node> NodeList;
- typedef unordered_map<Node, Node, NodeHashFunction> NodeToNodeHashMap;
typedef unordered_map<Node, bool, NodeHashFunction> NodeToBoolHashMap;
public:
ProcessAssertions(SmtEngine& smt,
+ ExpandDefs& exDefs,
ResourceManager& rm,
SmtEngineStatistics& stats);
~ProcessAssertions();
@@ -76,22 +77,12 @@ class ProcessAssertions
* processing the assertions.
*/
bool apply(Assertions& as);
- /**
- * Expand definitions in term n. Return the expanded form of n.
- *
- * @param n The node to expand
- * @param cache Cache of previous results
- * @param expandOnly if true, then the expandDefinitions function of
- * TheoryEngine is not called on subterms of n.
- * @return The expanded term.
- */
- Node expandDefinitions(TNode n,
- NodeToNodeHashMap& cache,
- bool expandOnly = false);
private:
/** Reference to the SMT engine */
SmtEngine& d_smt;
+ /** Reference to expand definitions module */
+ ExpandDefs& d_exDefs;
/** Reference to resource manager */
ResourceManager& d_resourceManager;
/** Reference to the SMT stats */
diff --git a/src/smt/smt_engine.cpp b/src/smt/smt_engine.cpp
index 2a0cde015..0f40db530 100644
--- a/src/smt/smt_engine.cpp
+++ b/src/smt/smt_engine.cpp
@@ -73,10 +73,9 @@ using namespace CVC4::theory;
namespace CVC4 {
-SmtEngine::SmtEngine(ExprManager* em, Options* optr)
+SmtEngine::SmtEngine(NodeManager* nm, Options* optr)
: d_state(new SmtEngineState(*this)),
- d_exprManager(em),
- d_nodeManager(d_exprManager->getNodeManager()),
+ d_nodeManager(nm),
d_absValues(new AbstractValues(d_nodeManager)),
d_asserts(new Assertions(getUserContext(), *d_absValues.get())),
d_dumpm(new DumpManager(getUserContext())),
@@ -244,7 +243,7 @@ void SmtEngine::finishInit()
TheoryModel* tm = te->getModel();
if (tm != nullptr)
{
- d_model.reset(new Model(*this, tm));
+ d_model.reset(new Model(tm));
// make the check models utility
d_checkModels.reset(new CheckModels(*d_smtSolver.get()));
}
@@ -506,7 +505,7 @@ bool SmtEngine::isValidGetInfoFlag(const std::string& key) const
if (key == "all-statistics" || key == "error-behavior" || key == "name"
|| key == "version" || key == "authors" || key == "status"
|| key == "reason-unknown" || key == "assertion-stack-levels"
- || key == "all-options")
+ || key == "all-options" || key == "time")
{
return true;
}
@@ -525,14 +524,8 @@ CVC4::SExpr SmtEngine::getInfo(const std::string& key) const
if (key == "all-statistics")
{
vector<SExpr> stats;
- for (StatisticsRegistry::const_iterator i =
- NodeManager::fromExprManager(d_exprManager)
- ->getStatisticsRegistry()
- ->begin();
- i
- != NodeManager::fromExprManager(d_exprManager)
- ->getStatisticsRegistry()
- ->end();
+ StatisticsRegistry* sr = d_nodeManager->getStatisticsRegistry();
+ for (StatisticsRegistry::const_iterator i = sr->begin(); i != sr->end();
++i)
{
vector<SExpr> v;
@@ -578,6 +571,10 @@ CVC4::SExpr SmtEngine::getInfo(const std::string& key) const
default: return SExpr(SExpr::Keyword("unknown"));
}
}
+ if (key == "time")
+ {
+ return SExpr(std::clock());
+ }
if (key == "reason-unknown")
{
Result status = d_state->getStatus();
@@ -1633,7 +1630,6 @@ void SmtEngine::pop() {
void SmtEngine::reset()
{
SmtScope smts(this);
- ExprManager *em = d_exprManager;
Trace("smt") << "SMT reset()" << endl;
if(Dump.isOn("benchmark")) {
getOutputManager().getPrinter().toStreamCmdReset(
@@ -1643,7 +1639,7 @@ void SmtEngine::reset()
Options opts;
opts.copyValues(d_originalOptions);
this->~SmtEngine();
- new (this) SmtEngine(em, &opts);
+ new (this) SmtEngine(d_nodeManager, &opts);
// Restore data set after creation
notifyStartParsing(filename);
}
@@ -1709,10 +1705,7 @@ unsigned long SmtEngine::getResourceRemaining() const
return d_resourceManager->getResourceRemaining();
}
-NodeManager* SmtEngine::getNodeManager() const
-{
- return d_exprManager->getNodeManager();
-}
+NodeManager* SmtEngine::getNodeManager() const { return d_nodeManager; }
Statistics SmtEngine::getStatistics() const
{
@@ -1729,20 +1722,15 @@ void SmtEngine::safeFlushStatistics(int fd) const {
}
void SmtEngine::setUserAttribute(const std::string& attr,
- Expr expr,
- const std::vector<Expr>& expr_values,
+ Node expr,
+ const std::vector<Node>& expr_values,
const std::string& str_value)
{
SmtScope smts(this);
finishInit();
- std::vector<Node> node_values;
- for (std::size_t i = 0, n = expr_values.size(); i < n; i++)
- {
- node_values.push_back( expr_values[i].getNode() );
- }
TheoryEngine* te = getTheoryEngine();
Assert(te != nullptr);
- te->setUserAttribute(attr, expr.getNode(), node_values, str_value);
+ te->setUserAttribute(attr, expr, expr_values, str_value);
}
void SmtEngine::setOption(const std::string& key, const CVC4::SExpr& value)
diff --git a/src/smt/smt_engine.h b/src/smt/smt_engine.h
index 1c83fa61f..a55428b55 100644
--- a/src/smt/smt_engine.h
+++ b/src/smt/smt_engine.h
@@ -21,13 +21,12 @@
#include <string>
#include <vector>
+#include <map>
#include "base/modal_exception.h"
#include "context/cdhashmap_forward.h"
#include "context/cdhashset_forward.h"
#include "context/cdlist_forward.h"
-#include "expr/expr.h"
-#include "expr/expr_manager.h"
#include "options/options.h"
#include "smt/logic_exception.h"
#include "smt/output_manager.h"
@@ -48,9 +47,10 @@ namespace CVC4 {
template <bool ref_count> class NodeTemplate;
typedef NodeTemplate<true> Node;
typedef NodeTemplate<false> TNode;
+class TypeNode;
struct NodeHashFunction;
-class SmtEngine;
+class NodeManager;
class DecisionEngine;
class TheoryEngine;
class ProofManager;
@@ -58,6 +58,7 @@ class UnsatCore;
class LogicRequest;
class StatisticsRegistry;
class Printer;
+class ResourceManager;
/* -------------------------------------------------------------------------- */
@@ -147,7 +148,7 @@ class CVC4_PUBLIC SmtEngine
* If provided, optr is a pointer to a set of options that should initialize the values
* of the options object owned by this class.
*/
- SmtEngine(ExprManager* em, Options* optr = nullptr);
+ SmtEngine(NodeManager* nm, Options* optr = nullptr);
/** Destruct the SMT engine. */
~SmtEngine();
@@ -691,7 +692,7 @@ class CVC4_PUBLIC SmtEngine
/**
* Completely reset the state of the solver, as though destroyed and
* recreated. The result is as if newly constructed (so it still
- * retains the same options structure and ExprManager).
+ * retains the same options structure and NodeManager).
*/
void reset();
@@ -785,9 +786,6 @@ class CVC4_PUBLIC SmtEngine
*/
unsigned long getResourceRemaining() const;
- /** Permit access to the underlying ExprManager. */
- ExprManager* getExprManager() const { return d_exprManager; }
-
/** Permit access to the underlying NodeManager. */
NodeManager* getNodeManager() const;
@@ -806,8 +804,8 @@ class CVC4_PUBLIC SmtEngine
* In SMT-LIBv2 this is done via the syntax (! expr :attr)
*/
void setUserAttribute(const std::string& attr,
- Expr expr,
- const std::vector<Expr>& expr_values,
+ Node expr,
+ const std::vector<Node>& expr_values,
const std::string& str_value);
/** Get the options object (const and non-const versions) */
@@ -1013,9 +1011,7 @@ class CVC4_PUBLIC SmtEngine
*/
std::unique_ptr<smt::SmtEngineState> d_state;
- /** Our expression manager */
- ExprManager* d_exprManager;
- /** Our internal expression/node manager */
+ /** Our internal node manager */
NodeManager* d_nodeManager;
/** Abstract values */
std::unique_ptr<smt::AbstractValues> d_absValues;
diff --git a/src/theory/arith/nl/cad_solver.cpp b/src/theory/arith/nl/cad_solver.cpp
index d80272475..2c3c55f45 100644
--- a/src/theory/arith/nl/cad_solver.cpp
+++ b/src/theory/arith/nl/cad_solver.cpp
@@ -75,6 +75,10 @@ void CadSolver::initLastCall(const std::vector<Node>& assertions)
void CadSolver::checkFull()
{
#ifdef CVC4_POLY_IMP
+ if (d_CAC.getConstraints().getConstraints().empty()) {
+ Trace("nl-cad") << "No constraints. Return." << std::endl;
+ return;
+ }
auto covering = d_CAC.getUnsatCover();
if (covering.empty())
{
@@ -105,6 +109,10 @@ void CadSolver::checkFull()
void CadSolver::checkPartial()
{
#ifdef CVC4_POLY_IMP
+ if (d_CAC.getConstraints().getConstraints().empty()) {
+ Trace("nl-cad") << "No constraints. Return." << std::endl;
+ return;
+ }
auto covering = d_CAC.getUnsatCover(0, true);
if (covering.empty())
{
diff --git a/src/theory/arith/nl/nl_lemma_utils.h b/src/theory/arith/nl/nl_lemma_utils.h
index ac52e7d52..24302339c 100644
--- a/src/theory/arith/nl/nl_lemma_utils.h
+++ b/src/theory/arith/nl/nl_lemma_utils.h
@@ -98,6 +98,25 @@ struct SortNlModel
bool operator()(Node i, Node j);
};
+/**
+ * Wrapper for std::sort that uses SortNlModel to sort an iterator range.
+ */
+template <typename It>
+void sortByNlModel(It begin,
+ It end,
+ NlModel* model,
+ bool concrete = true,
+ bool absolute = false,
+ bool reverse = false)
+{
+ SortNlModel smv;
+ smv.d_nlm = model;
+ smv.d_isConcrete = concrete;
+ smv.d_isAbsolute = absolute;
+ smv.d_reverse_order = reverse;
+ std::sort(begin, end, smv);
+}
+
struct SortNonlinearDegree
{
SortNonlinearDegree(const std::map<Node, unsigned>& m) : d_mdegree(m) {}
diff --git a/src/theory/arith/nl/nonlinear_extension.h b/src/theory/arith/nl/nonlinear_extension.h
index ca97d6a9b..89370608a 100644
--- a/src/theory/arith/nl/nonlinear_extension.h
+++ b/src/theory/arith/nl/nonlinear_extension.h
@@ -38,7 +38,7 @@
#include "theory/arith/nl/nl_model.h"
#include "theory/arith/nl/stats.h"
#include "theory/arith/nl/strategy.h"
-#include "theory/arith/nl/transcendental_solver.h"
+#include "theory/arith/nl/transcendental/transcendental_solver.h"
#include "theory/ext_theory.h"
#include "theory/uf/equality_engine.h"
@@ -251,7 +251,7 @@ class NonlinearExtension
* This is the subsolver responsible for running the procedure for
* transcendental functions.
*/
- TranscendentalSolver d_trSlv;
+ transcendental::TranscendentalSolver d_trSlv;
/**
* Holds common lookup data for the checks implemented in the "nl-ext"
* solvers (from Cimatti et al., TACAS 2017).
diff --git a/src/theory/arith/nl/transcendental/exponential_solver.cpp b/src/theory/arith/nl/transcendental/exponential_solver.cpp
new file mode 100644
index 000000000..7d7d0c23c
--- /dev/null
+++ b/src/theory/arith/nl/transcendental/exponential_solver.cpp
@@ -0,0 +1,206 @@
+/********************* */
+/*! \file transcendental_solver.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King, Gereon Kremer
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of solver for handling transcendental functions.
+ **/
+
+#include "theory/arith/nl/transcendental/exponential_solver.h"
+
+#include <cmath>
+#include <set>
+
+#include "expr/node_algorithm.h"
+#include "expr/node_builder.h"
+#include "options/arith_options.h"
+#include "theory/arith/arith_msum.h"
+#include "theory/arith/arith_utilities.h"
+#include "theory/rewriter.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+ExponentialSolver::ExponentialSolver(TranscendentalState* tstate)
+ : d_data(tstate)
+{
+}
+
+ExponentialSolver::~ExponentialSolver() {}
+
+void ExponentialSolver::checkInitialRefine()
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Trace("nl-ext")
+ << "Get initial refinement lemmas for transcendental functions..."
+ << std::endl;
+ for (std::pair<const Kind, std::vector<Node> >& tfl : d_data->d_funcMap)
+ {
+ if (tfl.first != Kind::EXPONENTIAL)
+ {
+ continue;
+ }
+ Assert(tfl.first == Kind::EXPONENTIAL);
+ for (const Node& t : tfl.second)
+ {
+ // initial refinements
+ if (d_tf_initial_refine.find(t) == d_tf_initial_refine.end())
+ {
+ d_tf_initial_refine[t] = true;
+ Node lem;
+ // ( exp(x) > 0 ) ^ ( x=0 <=> exp( x ) = 1 ) ^ ( x < 0 <=> exp( x ) <
+ // 1 ) ^ ( x <= 0 V exp( x ) > x + 1 )
+ lem = nm->mkNode(
+ Kind::AND,
+ nm->mkNode(Kind::GT, t, d_data->d_zero),
+ nm->mkNode(Kind::EQUAL,
+ t[0].eqNode(d_data->d_zero),
+ t.eqNode(d_data->d_one)),
+ nm->mkNode(Kind::EQUAL,
+ nm->mkNode(Kind::LT, t[0], d_data->d_zero),
+ nm->mkNode(Kind::LT, t, d_data->d_one)),
+ nm->mkNode(
+ Kind::OR,
+ nm->mkNode(Kind::LEQ, t[0], d_data->d_zero),
+ nm->mkNode(
+ Kind::GT, t, nm->mkNode(Kind::PLUS, t[0], d_data->d_one))));
+ if (!lem.isNull())
+ {
+ d_data->d_im.addPendingArithLemma(lem, InferenceId::NL_T_INIT_REFINE);
+ }
+ }
+ }
+ }
+}
+
+void ExponentialSolver::checkMonotonic()
+{
+ Trace("nl-ext") << "Get monotonicity lemmas for transcendental functions..."
+ << std::endl;
+
+ auto it = d_data->d_funcMap.find(Kind::EXPONENTIAL);
+ if (it == d_data->d_funcMap.end())
+ {
+ Trace("nl-ext-exp") << "No exponential terms" << std::endl;
+ return;
+ }
+
+ // sort arguments of all transcendentals
+ std::vector<Node> tf_args;
+ std::map<Node, Node> tf_arg_to_term;
+
+ for (const Node& tf : it->second)
+ {
+ Node a = tf[0];
+ Node mvaa = d_data->d_model.computeAbstractModelValue(a);
+ if (mvaa.isConst())
+ {
+ Trace("nl-ext-exp") << "...tf term : " << a << std::endl;
+ tf_args.push_back(a);
+ tf_arg_to_term[a] = tf;
+ }
+ }
+
+ if (tf_args.empty())
+ {
+ return;
+ }
+
+ sortByNlModel(
+ tf_args.begin(), tf_args.end(), &d_data->d_model, true, false, true);
+
+ Node targ, targval, t, tval;
+ for (const auto& sarg : tf_args)
+ {
+ Node sargval = d_data->d_model.computeAbstractModelValue(sarg);
+ Assert(sargval.isConst());
+ Node s = tf_arg_to_term[sarg];
+ Node sval = d_data->d_model.computeAbstractModelValue(s);
+ Assert(sval.isConst());
+
+ // store the concavity region
+ d_data->d_tf_region[s] = 1;
+ Trace("nl-ext-concavity") << ", arg model value = " << sargval << std::endl;
+
+ if (!tval.isNull() && sval.getConst<Rational>() > tval.getConst<Rational>())
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node mono_lem = nm->mkNode(Kind::IMPLIES,
+ nm->mkNode(Kind::GEQ, targ, sarg),
+ nm->mkNode(Kind::GEQ, t, s));
+ Trace("nl-ext-exp") << "Monotonicity lemma : " << mono_lem << std::endl;
+
+ d_data->d_im.addPendingArithLemma(mono_lem,
+ InferenceId::NL_T_MONOTONICITY);
+ }
+ // store the previous values
+ targ = sarg;
+ targval = sargval;
+ t = s;
+ tval = sval;
+ }
+}
+
+void ExponentialSolver::doTangentLemma(TNode e, TNode c, TNode poly_approx)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ // compute tangent plane
+ // Figure 3: T( x )
+ // We use zero slope tangent planes, since the concavity of the Taylor
+ // approximation cannot be easily established.
+ // Tangent plane is valid in the interval [c,u).
+ Node lem = nm->mkNode(Kind::IMPLIES,
+ nm->mkNode(Kind::GEQ, e[0], c),
+ nm->mkNode(Kind::GEQ, e, poly_approx));
+ Trace("nl-ext-exp") << "*** Tangent plane lemma (pre-rewrite): " << lem
+ << std::endl;
+ lem = Rewriter::rewrite(lem);
+ Trace("nl-ext-exp") << "*** Tangent plane lemma : " << lem << std::endl;
+ Assert(d_data->d_model.computeAbstractModelValue(lem) == d_data->d_false);
+ // Figure 3 : line 9
+ d_data->d_im.addPendingArithLemma(lem, InferenceId::NL_T_TANGENT, nullptr, true);
+}
+
+void ExponentialSolver::doSecantLemmas(
+ TNode e, TNode poly_approx, TNode c, TNode poly_approx_c, unsigned d)
+{
+ d_data->doSecantLemmas(
+ getSecantBounds(e, c, d), poly_approx, c, poly_approx_c, e, d, 1);
+}
+
+std::pair<Node, Node> ExponentialSolver::getSecantBounds(TNode e,
+ TNode c,
+ unsigned d)
+{
+ std::pair<Node, Node> bounds = d_data->getClosestSecantPoints(e, c, d);
+
+ // Check if we already have neighboring secant points
+ if (bounds.first.isNull())
+ {
+ // pick c-1
+ bounds.first = Rewriter::rewrite(
+ NodeManager::currentNM()->mkNode(Kind::MINUS, c, d_data->d_one));
+ }
+ if (bounds.second.isNull())
+ {
+ // pick c+1
+ bounds.second = Rewriter::rewrite(
+ NodeManager::currentNM()->mkNode(Kind::PLUS, c, d_data->d_one));
+ }
+ return bounds;
+}
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
diff --git a/src/theory/arith/nl/transcendental/exponential_solver.h b/src/theory/arith/nl/transcendental/exponential_solver.h
new file mode 100644
index 000000000..b20c23e56
--- /dev/null
+++ b/src/theory/arith/nl/transcendental/exponential_solver.h
@@ -0,0 +1,111 @@
+/********************* */
+/*! \file exponential_solver.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Solving for handling exponential function.
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__EXPONENTIAL_SOLVER_H
+#define CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__EXPONENTIAL_SOLVER_H
+
+#include <map>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "expr/node.h"
+#include "theory/arith/inference_manager.h"
+#include "theory/arith/nl/nl_model.h"
+#include "theory/arith/nl/transcendental/transcendental_state.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+/** Transcendental solver class
+ *
+ * This class implements model-based refinement schemes
+ * for transcendental functions, described in:
+ *
+ * - "Satisfiability Modulo Transcendental
+ * Functions via Incremental Linearization" by Cimatti
+ * et al., CADE 2017.
+ *
+ * It's main functionality are methods that implement lemma schemas below,
+ * which return a set of lemmas that should be sent on the output channel.
+ */
+class ExponentialSolver
+{
+ public:
+ ExponentialSolver(TranscendentalState* tstate);
+ ~ExponentialSolver();
+
+ void initLastCall(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts);
+
+ /**
+ * check initial refine
+ *
+ * Constructs a set of valid theory lemmas, based on
+ * simple facts about the exponential function.
+ * This mostly follows the initial axioms described in
+ * Section 4 of "Satisfiability
+ * Modulo Transcendental Functions via Incremental
+ * Linearization" by Cimatti et al., CADE 2017.
+ *
+ * Examples:
+ *
+ * exp( x )>0
+ * x<0 => exp( x )<1
+ */
+ void checkInitialRefine();
+
+ /**
+ * check monotonicity
+ *
+ * Constructs a set of valid theory lemmas, based on a
+ * lemma scheme that ensures that applications
+ * of the exponential function respect monotonicity.
+ *
+ * Examples:
+ *
+ * x > y => exp( x ) > exp( y )
+ */
+ void checkMonotonic();
+
+ /** Sent tangent lemma around c for e */
+ void doTangentLemma(TNode e, TNode c, TNode poly_approx);
+
+ /** Sent secant lemmas around c for e */
+ void doSecantLemmas(
+ TNode e, TNode poly_approx, TNode c, TNode poly_approx_c, unsigned d);
+
+ private:
+ /** Generate bounds for secant lemmas */
+ std::pair<Node, Node> getSecantBounds(TNode e, TNode c, unsigned d);
+
+ /** Holds common state for transcendental solvers */
+ TranscendentalState* d_data;
+
+ /** The transcendental functions we have done initial refinements on */
+ std::map<Node, bool> d_tf_initial_refine;
+
+}; /* class ExponentialSolver */
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H */
diff --git a/src/theory/arith/nl/transcendental/sine_solver.cpp b/src/theory/arith/nl/transcendental/sine_solver.cpp
new file mode 100644
index 000000000..31fd47503
--- /dev/null
+++ b/src/theory/arith/nl/transcendental/sine_solver.cpp
@@ -0,0 +1,333 @@
+/********************* */
+/*! \file transcendental_solver.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King, Gereon Kremer
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of solver for handling transcendental functions.
+ **/
+
+#include "theory/arith/nl/transcendental/sine_solver.h"
+
+#include <cmath>
+#include <set>
+
+#include "expr/node_algorithm.h"
+#include "expr/node_builder.h"
+#include "options/arith_options.h"
+#include "theory/arith/arith_msum.h"
+#include "theory/arith/arith_utilities.h"
+#include "theory/rewriter.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+SineSolver::SineSolver(TranscendentalState* tstate) : d_data(tstate) {}
+
+SineSolver::~SineSolver() {}
+
+void SineSolver::checkInitialRefine()
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Trace("nl-ext")
+ << "Get initial refinement lemmas for transcendental functions..."
+ << std::endl;
+ for (std::pair<const Kind, std::vector<Node> >& tfl : d_data->d_funcMap)
+ {
+ if (tfl.first != Kind::SINE)
+ {
+ continue;
+ }
+ for (const Node& t : tfl.second)
+ {
+ // initial refinements
+ if (d_tf_initial_refine.find(t) == d_tf_initial_refine.end())
+ {
+ d_tf_initial_refine[t] = true;
+ Node lem;
+ Node symn = nm->mkNode(Kind::SINE,
+ nm->mkNode(Kind::MULT, d_data->d_neg_one, t[0]));
+ symn = Rewriter::rewrite(symn);
+ // Can assume it is its own master since phase is split over 0,
+ // hence -pi <= t[0] <= pi implies -pi <= -t[0] <= pi.
+ d_data->d_trMaster[symn] = symn;
+ d_data->d_trSlaves[symn].insert(symn);
+ Assert(d_data->d_trSlaves.find(t) != d_data->d_trSlaves.end());
+ std::vector<Node> children;
+
+ lem =
+ nm->mkNode(Kind::AND,
+ // bounds
+ nm->mkNode(Kind::AND,
+ nm->mkNode(Kind::LEQ, t, d_data->d_one),
+ nm->mkNode(Kind::GEQ, t, d_data->d_neg_one)),
+ // symmetry
+ nm->mkNode(Kind::PLUS, t, symn).eqNode(d_data->d_zero),
+ // sign
+ nm->mkNode(Kind::EQUAL,
+ nm->mkNode(Kind::LT, t[0], d_data->d_zero),
+ nm->mkNode(Kind::LT, t, d_data->d_zero)),
+ // zero val
+ nm->mkNode(Kind::EQUAL,
+ nm->mkNode(Kind::GT, t[0], d_data->d_zero),
+ nm->mkNode(Kind::GT, t, d_data->d_zero)));
+ lem = nm->mkNode(
+ Kind::AND,
+ lem,
+ // zero tangent
+ nm->mkNode(Kind::AND,
+ nm->mkNode(Kind::IMPLIES,
+ nm->mkNode(Kind::GT, t[0], d_data->d_zero),
+ nm->mkNode(Kind::LT, t, t[0])),
+ nm->mkNode(Kind::IMPLIES,
+ nm->mkNode(Kind::LT, t[0], d_data->d_zero),
+ nm->mkNode(Kind::GT, t, t[0]))),
+ // pi tangent
+ nm->mkNode(
+ Kind::AND,
+ nm->mkNode(
+ Kind::IMPLIES,
+ nm->mkNode(Kind::LT, t[0], d_data->d_pi),
+ nm->mkNode(Kind::LT,
+ t,
+ nm->mkNode(Kind::MINUS, d_data->d_pi, t[0]))),
+ nm->mkNode(
+ Kind::IMPLIES,
+ nm->mkNode(Kind::GT, t[0], d_data->d_pi_neg),
+ nm->mkNode(
+ Kind::GT,
+ t,
+ nm->mkNode(Kind::MINUS, d_data->d_pi_neg, t[0])))));
+ if (!lem.isNull())
+ {
+ d_data->d_im.addPendingArithLemma(lem, InferenceId::NL_T_INIT_REFINE);
+ }
+ }
+ }
+ }
+}
+
+void SineSolver::checkMonotonic()
+{
+ Trace("nl-ext") << "Get monotonicity lemmas for transcendental functions..."
+ << std::endl;
+
+ auto it = d_data->d_funcMap.find(Kind::SINE);
+ if (it == d_data->d_funcMap.end())
+ {
+ Trace("nl-ext-exp") << "No sine terms" << std::endl;
+ return;
+ }
+
+ // sort arguments of all transcendentals
+ std::vector<Node> tf_args;
+ std::map<Node, Node> tf_arg_to_term;
+
+ for (const Node& tf : it->second)
+ {
+ Node a = tf[0];
+ Node mvaa = d_data->d_model.computeAbstractModelValue(a);
+ if (mvaa.isConst())
+ {
+ Trace("nl-ext-tf-mono-debug") << "...tf term : " << a << std::endl;
+ tf_args.push_back(a);
+ tf_arg_to_term[a] = tf;
+ }
+ }
+
+ if (tf_args.empty())
+ {
+ return;
+ }
+
+ sortByNlModel(
+ tf_args.begin(), tf_args.end(), &d_data->d_model, true, false, true);
+
+ std::vector<Node> mpoints = {d_data->d_pi,
+ d_data->d_pi_2,
+ d_data->d_zero,
+ d_data->d_pi_neg_2,
+ d_data->d_pi_neg};
+ std::vector<Node> mpoints_vals;
+
+ // get model values for points
+ for (const auto& point : mpoints)
+ {
+ mpoints_vals.emplace_back(d_data->d_model.computeAbstractModelValue(point));
+ Assert(mpoints_vals.back().isConst());
+ }
+
+ unsigned mdir_index = 0;
+ int monotonic_dir = -1;
+ Node mono_bounds[2];
+ Node targ, targval, t, tval;
+ for (const auto& sarg : tf_args)
+ {
+ Node sargval = d_data->d_model.computeAbstractModelValue(sarg);
+ Assert(sargval.isConst());
+ Node s = tf_arg_to_term[sarg];
+ Node sval = d_data->d_model.computeAbstractModelValue(s);
+ Assert(sval.isConst());
+
+ // increment to the proper monotonicity region
+ bool increment = true;
+ while (increment && mdir_index < mpoints.size())
+ {
+ increment = false;
+ Node pval = mpoints_vals[mdir_index];
+ Assert(pval.isConst());
+ if (sargval.getConst<Rational>() < pval.getConst<Rational>())
+ {
+ increment = true;
+ Trace("nl-ext-tf-mono")
+ << "...increment at " << sarg << " since model value is less than "
+ << mpoints[mdir_index] << std::endl;
+ }
+ if (increment)
+ {
+ tval = Node::null();
+ mono_bounds[1] = mpoints[mdir_index];
+ mdir_index++;
+ monotonic_dir = regionToMonotonicityDir(mdir_index);
+ if (mdir_index < mpoints.size())
+ {
+ mono_bounds[0] = mpoints[mdir_index];
+ }
+ else
+ {
+ mono_bounds[0] = Node::null();
+ }
+ }
+ }
+ // store the concavity region
+ d_data->d_tf_region[s] = mdir_index;
+ Trace("nl-ext-concavity")
+ << "Transcendental function " << s << " is in region #" << mdir_index;
+ Trace("nl-ext-concavity") << ", arg model value = " << sargval << std::endl;
+
+ if (!tval.isNull())
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node mono_lem;
+ if (monotonic_dir == 1
+ && sval.getConst<Rational>() > tval.getConst<Rational>())
+ {
+ mono_lem = nm->mkNode(Kind::IMPLIES,
+ nm->mkNode(Kind::GEQ, targ, sarg),
+ nm->mkNode(Kind::GEQ, t, s));
+ }
+ else if (monotonic_dir == -1
+ && sval.getConst<Rational>() < tval.getConst<Rational>())
+ {
+ mono_lem = nm->mkNode(Kind::IMPLIES,
+ nm->mkNode(Kind::LEQ, targ, sarg),
+ nm->mkNode(Kind::LEQ, t, s));
+ }
+ if (!mono_lem.isNull())
+ {
+ if (!mono_bounds[0].isNull())
+ {
+ Assert(!mono_bounds[1].isNull());
+ mono_lem = nm->mkNode(
+ Kind::IMPLIES,
+ nm->mkNode(Kind::AND,
+ mkBounded(mono_bounds[0], targ, mono_bounds[1]),
+ mkBounded(mono_bounds[0], sarg, mono_bounds[1])),
+ mono_lem);
+ }
+ Trace("nl-ext-tf-mono")
+ << "Monotonicity lemma : " << mono_lem << std::endl;
+
+ d_data->d_im.addPendingArithLemma(mono_lem,
+ InferenceId::NL_T_MONOTONICITY);
+ }
+ }
+ // store the previous values
+ targ = sarg;
+ targval = sargval;
+ t = s;
+ tval = sval;
+ }
+}
+
+void SineSolver::doTangentLemma(TNode e, TNode c, TNode poly_approx, int region)
+{
+ NodeManager* nm = NodeManager::currentNM();
+
+ // compute tangent plane
+ // Figure 3: T( x )
+ // We use zero slope tangent planes, since the concavity of the Taylor
+ // approximation cannot be easily established.
+ int concavity = regionToConcavity(region);
+ int mdir = regionToMonotonicityDir(region);
+ Node lem = nm->mkNode(
+ Kind::IMPLIES,
+ nm->mkNode(
+ Kind::AND,
+ nm->mkNode(Kind::GEQ,
+ e[0],
+ mdir == concavity ? Node(c) : regionToLowerBound(region)),
+ nm->mkNode(Kind::LEQ,
+ e[0],
+ mdir != concavity ? Node(c) : regionToUpperBound(region))),
+ nm->mkNode(concavity == 1 ? Kind::GEQ : Kind::LEQ, e, poly_approx));
+
+ Trace("nl-ext-sine") << "*** Tangent plane lemma (pre-rewrite): " << lem
+ << std::endl;
+ lem = Rewriter::rewrite(lem);
+ Trace("nl-ext-sine") << "*** Tangent plane lemma : " << lem << std::endl;
+ Assert(d_data->d_model.computeAbstractModelValue(lem) == d_data->d_false);
+ // Figure 3 : line 9
+ d_data->d_im.addPendingArithLemma(lem, InferenceId::NL_T_TANGENT, nullptr, true);
+}
+
+void SineSolver::doSecantLemmas(TNode e,
+ TNode poly_approx,
+ TNode c,
+ TNode poly_approx_c,
+ unsigned d,
+ int region)
+{
+ d_data->doSecantLemmas(getSecantBounds(e, c, d, region),
+ poly_approx,
+ c,
+ poly_approx_c,
+ e,
+ d,
+ regionToConcavity(region));
+}
+
+std::pair<Node, Node> SineSolver::getSecantBounds(TNode e,
+ TNode c,
+ unsigned d,
+ int region)
+{
+ std::pair<Node, Node> bounds = d_data->getClosestSecantPoints(e, c, d);
+
+ // Check if we already have neighboring secant points
+ if (bounds.first.isNull())
+ {
+ // lower boundary point for this concavity region
+ bounds.first = regionToLowerBound(region);
+ }
+ if (bounds.second.isNull())
+ {
+ // upper boundary point for this concavity region
+ bounds.second = regionToUpperBound(region);
+ }
+ return bounds;
+}
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
diff --git a/src/theory/arith/nl/transcendental/sine_solver.h b/src/theory/arith/nl/transcendental/sine_solver.h
new file mode 100644
index 000000000..9f9102a53
--- /dev/null
+++ b/src/theory/arith/nl/transcendental/sine_solver.h
@@ -0,0 +1,181 @@
+/********************* */
+/*! \file sine_solver.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Solving for handling exponential function.
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__SINE_SOLVER_H
+#define CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__SINE_SOLVER_H
+
+#include <map>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "expr/node.h"
+#include "theory/arith/inference_manager.h"
+#include "theory/arith/nl/nl_model.h"
+#include "theory/arith/nl/transcendental/transcendental_state.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+/** Transcendental solver class
+ *
+ * This class implements model-based refinement schemes
+ * for transcendental functions, described in:
+ *
+ * - "Satisfiability Modulo Transcendental
+ * Functions via Incremental Linearization" by Cimatti
+ * et al., CADE 2017.
+ *
+ * It's main functionality are methods that implement lemma schemas below,
+ * which return a set of lemmas that should be sent on the output channel.
+ */
+class SineSolver
+{
+ public:
+ SineSolver(TranscendentalState* tstate);
+ ~SineSolver();
+
+ void initLastCall(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts);
+
+ /**
+ * check initial refine
+ *
+ * Constructs a set of valid theory lemmas, based on
+ * simple facts about the sine function.
+ * This mostly follows the initial axioms described in
+ * Section 4 of "Satisfiability
+ * Modulo Transcendental Functions via Incremental
+ * Linearization" by Cimatti et al., CADE 2017.
+ *
+ * Examples:
+ *
+ * sin( x ) = -sin( -x )
+ * ( PI > x > 0 ) => 0 < sin( x ) < 1
+ */
+ void checkInitialRefine();
+
+ /**
+ * check monotonicity
+ *
+ * Constructs a set of valid theory lemmas, based on a
+ * lemma scheme that ensures that applications
+ * of the sine function respect monotonicity.
+ *
+ * Examples:
+ *
+ * PI/2 > x > y > 0 => sin( x ) > sin( y )
+ * PI > x > y > PI/2 => sin( x ) < sin( y )
+ */
+ void checkMonotonic();
+
+ /** Sent tangent lemma around c for e */
+ void doTangentLemma(TNode e, TNode c, TNode poly_approx, int region);
+
+ /** Sent secant lemmas around c for e */
+ void doSecantLemmas(TNode e,
+ TNode poly_approx,
+ TNode c,
+ TNode poly_approx_c,
+ unsigned d,
+ int region);
+
+ private:
+ std::pair<Node, Node> getSecantBounds(TNode e,
+ TNode c,
+ unsigned d,
+ int region);
+
+ /** region to lower bound
+ *
+ * Returns the term corresponding to the lower
+ * bound of the region of transcendental function
+ * with kind k. Returns Node::null if the region
+ * is invalid, or there is no lower bound for the
+ * region.
+ */
+ Node regionToLowerBound(int region)
+ {
+ switch (region)
+ {
+ case 1: return d_data->d_pi_2;
+ case 2: return d_data->d_zero;
+ case 3: return d_data->d_pi_neg_2;
+ case 4: return d_data->d_pi_neg;
+ default: return Node();
+ }
+ }
+
+ /** region to upper bound
+ *
+ * Returns the term corresponding to the upper
+ * bound of the region of transcendental function
+ * with kind k. Returns Node::null if the region
+ * is invalid, or there is no upper bound for the
+ * region.
+ */
+ Node regionToUpperBound(int region)
+ {
+ switch (region)
+ {
+ case 1: return d_data->d_pi;
+ case 2: return d_data->d_pi_2;
+ case 3: return d_data->d_zero;
+ case 4: return d_data->d_pi_neg_2;
+ default: return Node();
+ }
+ }
+
+ int regionToMonotonicityDir(int region)
+ {
+ switch (region)
+ {
+ case 1:
+ case 4: return -1;
+ case 2:
+ case 3: return 1;
+ default: return 0;
+ }
+ }
+ int regionToConcavity(int region)
+ {
+ switch (region)
+ {
+ case 1:
+ case 2: return -1;
+ case 3:
+ case 4: return 1;
+ default: return 0;
+ }
+ }
+
+ /** Holds common state for transcendental solvers */
+ TranscendentalState* d_data;
+
+ /** The transcendental functions we have done initial refinements on */
+ std::map<Node, bool> d_tf_initial_refine;
+
+}; /* class SineSolver */
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H */
diff --git a/src/theory/arith/nl/transcendental/taylor_generator.cpp b/src/theory/arith/nl/transcendental/taylor_generator.cpp
new file mode 100644
index 000000000..a373339e9
--- /dev/null
+++ b/src/theory/arith/nl/transcendental/taylor_generator.cpp
@@ -0,0 +1,317 @@
+/********************* */
+/*! \file taylor_generator.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Generate taylor approximations transcendental lemmas.
+ **/
+
+#include "theory/arith/nl/transcendental/taylor_generator.h"
+
+#include "theory/arith/arith_utilities.h"
+#include "theory/rewriter.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+TaylorGenerator::TaylorGenerator()
+ : d_nm(NodeManager::currentNM()),
+ d_taylor_real_fv(d_nm->mkBoundVar("x", d_nm->realType())),
+ d_taylor_real_fv_base(d_nm->mkBoundVar("a", d_nm->realType())),
+ d_taylor_real_fv_base_rem(d_nm->mkBoundVar("b", d_nm->realType()))
+{
+}
+
+TNode TaylorGenerator::getTaylorVariable() { return d_taylor_real_fv; }
+
+std::pair<Node, Node> TaylorGenerator::getTaylor(TNode fa, std::uint64_t n)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Node d_zero = nm->mkConst(Rational(0));
+
+ Assert(n > 0);
+ Node fac; // what term we cache for fa
+ if (fa[0] == d_zero)
+ {
+ // optimization : simpler to compute (x-fa[0])^n if we are centered around 0
+ fac = fa;
+ }
+ else
+ {
+ // otherwise we use a standard factor a in (x-a)^n
+ fac = nm->mkNode(fa.getKind(), d_taylor_real_fv_base);
+ }
+ Node taylor_rem;
+ Node taylor_sum;
+ // check if we have already computed this Taylor series
+ auto itt = s_taylor_sum[fac].find(n);
+ if (itt == s_taylor_sum[fac].end())
+ {
+ Node i_exp_base;
+ if (fa[0] == d_zero)
+ {
+ i_exp_base = d_taylor_real_fv;
+ }
+ else
+ {
+ i_exp_base = Rewriter::rewrite(
+ nm->mkNode(Kind::MINUS, d_taylor_real_fv, d_taylor_real_fv_base));
+ }
+ Node i_derv = fac;
+ Node i_fact = nm->mkConst(Rational(1));
+ Node i_exp = i_fact;
+ int i_derv_status = 0;
+ unsigned counter = 0;
+ std::vector<Node> sum;
+ do
+ {
+ counter++;
+ if (fa.getKind() == Kind::EXPONENTIAL)
+ {
+ // unchanged
+ }
+ else if (fa.getKind() == Kind::SINE)
+ {
+ if (i_derv_status % 2 == 1)
+ {
+ Node pi = nm->mkNullaryOperator(nm->realType(), Kind::PI);
+ Node pi_2 = Rewriter::rewrite(nm->mkNode(
+ Kind::MULT, pi, nm->mkConst(Rational(1) / Rational(2))));
+
+ Node arg = nm->mkNode(Kind::PLUS, pi_2, d_taylor_real_fv_base);
+ i_derv = nm->mkNode(Kind::SINE, arg);
+ }
+ else
+ {
+ i_derv = fa;
+ }
+ if (i_derv_status >= 2)
+ {
+ i_derv = nm->mkNode(Kind::MINUS, d_zero, i_derv);
+ }
+ i_derv = Rewriter::rewrite(i_derv);
+ i_derv_status = i_derv_status == 3 ? 0 : i_derv_status + 1;
+ }
+ if (counter == (n + 1))
+ {
+ TNode x = d_taylor_real_fv_base;
+ i_derv = i_derv.substitute(x, d_taylor_real_fv_base_rem);
+ }
+ Node curr = nm->mkNode(
+ Kind::MULT, nm->mkNode(Kind::DIVISION, i_derv, i_fact), i_exp);
+ if (counter == (n + 1))
+ {
+ taylor_rem = curr;
+ }
+ else
+ {
+ sum.push_back(curr);
+ i_fact = Rewriter::rewrite(
+ nm->mkNode(Kind::MULT, nm->mkConst(Rational(counter)), i_fact));
+ i_exp = Rewriter::rewrite(nm->mkNode(Kind::MULT, i_exp_base, i_exp));
+ }
+ } while (counter <= n);
+ taylor_sum = sum.size() == 1 ? sum[0] : nm->mkNode(Kind::PLUS, sum);
+
+ if (fac[0] != d_taylor_real_fv_base)
+ {
+ TNode x = d_taylor_real_fv_base;
+ taylor_sum = taylor_sum.substitute(x, fac[0]);
+ }
+
+ // cache
+ s_taylor_sum[fac][n] = taylor_sum;
+ d_taylor_rem[fac][n] = taylor_rem;
+ }
+ else
+ {
+ taylor_sum = itt->second;
+ Assert(d_taylor_rem[fac].find(n) != d_taylor_rem[fac].end());
+ taylor_rem = d_taylor_rem[fac][n];
+ }
+
+ // must substitute for the argument if we were using a different lookup
+ if (fa[0] != fac[0])
+ {
+ TNode x = d_taylor_real_fv_base;
+ taylor_sum = taylor_sum.substitute(x, fa[0]);
+ }
+ return std::pair<Node, Node>(taylor_sum, taylor_rem);
+}
+
+void TaylorGenerator::getPolynomialApproximationBounds(
+ Kind k, unsigned d, std::vector<Node>& pbounds)
+{
+ if (d_poly_bounds[k][d].empty())
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node tft = nm->mkNode(k, nm->mkConst(Rational(0)));
+ // n is the Taylor degree we are currently considering
+ unsigned n = 2 * d;
+ // n must be even
+ std::pair<Node, Node> taylor = getTaylor(tft, n);
+ Trace("nl-ext-tftp-debug2")
+ << "Taylor for " << k << " is : " << taylor.first << std::endl;
+ Node taylor_sum = Rewriter::rewrite(taylor.first);
+ Trace("nl-ext-tftp-debug2")
+ << "Taylor for " << k << " is (post-rewrite) : " << taylor_sum
+ << std::endl;
+ Assert(taylor.second.getKind() == Kind::MULT);
+ Assert(taylor.second.getNumChildren() == 2);
+ Assert(taylor.second[0].getKind() == Kind::DIVISION);
+ Trace("nl-ext-tftp-debug2")
+ << "Taylor remainder for " << k << " is " << taylor.second << std::endl;
+ // ru is x^{n+1}/(n+1)!
+ Node ru = nm->mkNode(Kind::DIVISION, taylor.second[1], taylor.second[0][1]);
+ ru = Rewriter::rewrite(ru);
+ Trace("nl-ext-tftp-debug2")
+ << "Taylor remainder factor is (post-rewrite) : " << ru << std::endl;
+ if (k == Kind::EXPONENTIAL)
+ {
+ pbounds.push_back(taylor_sum);
+ pbounds.push_back(taylor_sum);
+ pbounds.push_back(Rewriter::rewrite(
+ nm->mkNode(Kind::MULT,
+ taylor_sum,
+ nm->mkNode(Kind::PLUS, nm->mkConst(Rational(1)), ru))));
+ pbounds.push_back(
+ Rewriter::rewrite(nm->mkNode(Kind::PLUS, taylor_sum, ru)));
+ }
+ else
+ {
+ Assert(k == Kind::SINE);
+ Node l = Rewriter::rewrite(nm->mkNode(Kind::MINUS, taylor_sum, ru));
+ Node u = Rewriter::rewrite(nm->mkNode(Kind::PLUS, taylor_sum, ru));
+ pbounds.push_back(l);
+ pbounds.push_back(l);
+ pbounds.push_back(u);
+ pbounds.push_back(u);
+ }
+ Trace("nl-ext-tf-tplanes")
+ << "Polynomial approximation for " << k << " is: " << std::endl;
+ Trace("nl-ext-tf-tplanes") << " Lower (pos): " << pbounds[0] << std::endl;
+ Trace("nl-ext-tf-tplanes") << " Upper (pos): " << pbounds[2] << std::endl;
+ Trace("nl-ext-tf-tplanes") << " Lower (neg): " << pbounds[1] << std::endl;
+ Trace("nl-ext-tf-tplanes") << " Upper (neg): " << pbounds[3] << std::endl;
+ d_poly_bounds[k][d].insert(
+ d_poly_bounds[k][d].end(), pbounds.begin(), pbounds.end());
+ }
+ else
+ {
+ pbounds.insert(
+ pbounds.end(), d_poly_bounds[k][d].begin(), d_poly_bounds[k][d].end());
+ }
+}
+
+void TaylorGenerator::getPolynomialApproximationBoundForArg(
+ Kind k, Node c, unsigned d, std::vector<Node>& pbounds)
+{
+ getPolynomialApproximationBounds(k, d, pbounds);
+ Assert(c.isConst());
+ if (k == Kind::EXPONENTIAL && c.getConst<Rational>().sgn() == 1)
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node tft = nm->mkNode(k, nm->mkConst(Rational(0)));
+ bool success = false;
+ unsigned ds = d;
+ TNode ttrf = getTaylorVariable();
+ TNode tc = c;
+ do
+ {
+ success = true;
+ unsigned n = 2 * ds;
+ std::pair<Node, Node> taylor = getTaylor(tft, n);
+ // check that 1-c^{n+1}/(n+1)! > 0
+ Node ru =
+ nm->mkNode(Kind::DIVISION, taylor.second[1], taylor.second[0][1]);
+ Node rus = ru.substitute(ttrf, tc);
+ rus = Rewriter::rewrite(rus);
+ Assert(rus.isConst());
+ if (rus.getConst<Rational>() > 1)
+ {
+ success = false;
+ ds = ds + 1;
+ }
+ } while (!success);
+ if (ds > d)
+ {
+ Trace("nl-ext-exp-taylor")
+ << "*** Increase Taylor bound to " << ds << " > " << d << " for ("
+ << k << " " << c << ")" << std::endl;
+ // must use sound upper bound
+ std::vector<Node> pboundss;
+ getPolynomialApproximationBounds(k, ds, pboundss);
+ pbounds[2] = pboundss[2];
+ }
+ }
+}
+
+std::pair<Node, Node> TaylorGenerator::getTfModelBounds(Node tf, unsigned d, NlModel& model)
+{
+ // compute the model value of the argument
+ Node c = model.computeAbstractModelValue(tf[0]);
+ Assert(c.isConst());
+ int csign = c.getConst<Rational>().sgn();
+ Kind k = tf.getKind();
+ if (csign == 0)
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ // at zero, its trivial
+ if (k == Kind::SINE)
+ {
+ Node zero = nm->mkConst(Rational(0));
+ return std::pair<Node, Node>(zero, zero);
+ }
+ Assert(k == Kind::EXPONENTIAL);
+ Node one = nm->mkConst(Rational(1));
+ return std::pair<Node, Node>(one, one);
+ }
+ bool isNeg = csign == -1;
+
+ std::vector<Node> pbounds;
+ getPolynomialApproximationBoundForArg(k, c, d, pbounds);
+
+ std::vector<Node> bounds;
+ TNode tfv = getTaylorVariable();
+ TNode tfs = tf[0];
+ for (unsigned d2 = 0; d2 < 2; d2++)
+ {
+ int index = d2 == 0 ? (isNeg ? 1 : 0) : (isNeg ? 3 : 2);
+ Node pab = pbounds[index];
+ if (!pab.isNull())
+ {
+ // { x -> M_A(tf[0]) }
+ // Notice that we compute the model value of tfs first, so that
+ // the call to rewrite below does not modify the term, where notice that
+ // rewrite( x*x { x -> M_A(t) } ) = M_A(t)*M_A(t)
+ // is not equal to
+ // M_A( x*x { x -> t } ) = M_A( t*t )
+ // where M_A denotes the abstract model.
+ Node mtfs = model.computeAbstractModelValue(tfs);
+ pab = pab.substitute(tfv, mtfs);
+ pab = Rewriter::rewrite(pab);
+ Assert(pab.isConst());
+ bounds.push_back(pab);
+ }
+ else
+ {
+ bounds.push_back(Node::null());
+ }
+ }
+ return std::pair<Node, Node>(bounds[0], bounds[1]);
+}
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
diff --git a/src/theory/arith/nl/transcendental/taylor_generator.h b/src/theory/arith/nl/transcendental/taylor_generator.h
new file mode 100644
index 000000000..c647f7fd2
--- /dev/null
+++ b/src/theory/arith/nl/transcendental/taylor_generator.h
@@ -0,0 +1,118 @@
+/********************* */
+/*! \file taylor_generator.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Generate taylor approximations transcendental lemmas.
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__TAYLOR_GENERATOR_H
+#define CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__TAYLOR_GENERATOR_H
+
+#include "expr/node.h"
+#include "theory/arith/nl/nl_model.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+class TaylorGenerator
+{
+ public:
+ TaylorGenerator();
+
+ /**
+ * Return the variable used as x in getTaylor().
+ */
+ TNode getTaylorVariable();
+
+ /**
+ * Get Taylor series of degree n for function fa centered around point fa[0].
+ *
+ * Return value is ( P_{n,f(a)}( x ), R_{n+1,f(a)}( x ) ) where
+ * the first part of the pair is the Taylor series expansion :
+ * P_{n,f(a)}( x ) = sum_{i=0}^n (f^i( a )/i!)*(x-a)^i
+ * and the second part of the pair is the Taylor series remainder :
+ * R_{n+1,f(a),b}( x ) = (f^{n+1}( b )/(n+1)!)*(x-a)^{n+1}
+ *
+ * The above values are cached for each (f,n) for a fixed variable "a".
+ * To compute the Taylor series for fa, we compute the Taylor series
+ * for ( fa.getKind(), n ) then substitute { a -> fa[0] } if fa[0]!=0.
+ * We compute P_{n,f(0)}( x )/R_{n+1,f(0),b}( x ) for ( fa.getKind(), n )
+ * if fa[0]=0.
+ * In the latter case, note we compute the exponential x^{n+1}
+ * instead of (x-a)^{n+1}, which can be done faster.
+ */
+ std::pair<Node, Node> getTaylor(TNode fa, std::uint64_t n);
+
+ /**
+ * polynomial approximation bounds
+ *
+ * This adds P_l+[x], P_l-[x], P_u+[x], P_u-[x] to pbounds, where x is
+ * d_taylor_real_fv. These are polynomial approximations of the Taylor series
+ * of <k>( 0 ) for degree 2*d where k is SINE or EXPONENTIAL.
+ * These correspond to P_l and P_u from Figure 3 of Cimatti et al., CADE 2017,
+ * for positive/negative (+/-) values of the argument of <k>( 0 ).
+ *
+ * Notice that for certain bounds (e.g. upper bounds for exponential), the
+ * Taylor approximation for a fixed degree is only sound up to a given
+ * upper bound on the argument. To obtain sound lower/upper bounds for a
+ * given <k>( c ), use the function below.
+ */
+ void getPolynomialApproximationBounds(Kind k,
+ unsigned d,
+ std::vector<Node>& pbounds);
+
+ /**
+ * polynomial approximation bounds
+ *
+ * This computes polynomial approximations P_l+[x], P_l-[x], P_u+[x], P_u-[x]
+ * that are sound (lower, upper) bounds for <k>( c ). Notice that these
+ * polynomials may depend on c. In particular, for P_u+[x] for <k>( c ) where
+ * c>0, we return the P_u+[x] from the function above for the minimum degree
+ * d' >= d such that (1-c^{2*d'+1}/(2*d'+1)!) is positive.
+ */
+ void getPolynomialApproximationBoundForArg(Kind k,
+ Node c,
+ unsigned d,
+ std::vector<Node>& pbounds);
+
+ /** get transcendental function model bounds
+ *
+ * This returns the current lower and upper bounds of transcendental
+ * function application tf based on Taylor of degree 2*d, which is dependent
+ * on the model value of its argument.
+ */
+ std::pair<Node, Node> getTfModelBounds(Node tf, unsigned d, NlModel& model);
+
+ private:
+ NodeManager* d_nm;
+ const Node d_taylor_real_fv;
+ const Node d_taylor_real_fv_base;
+ const Node d_taylor_real_fv_base_rem;
+ std::unordered_map<Node,
+ std::unordered_map<std::uint64_t, Node>,
+ NodeHashFunction>
+ s_taylor_sum;
+ std::unordered_map<Node,
+ std::unordered_map<std::uint64_t, Node>,
+ NodeHashFunction>
+ d_taylor_rem;
+ std::map<Kind, std::map<unsigned, std::vector<Node>>> d_poly_bounds;
+};
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H */
diff --git a/src/theory/arith/nl/transcendental/transcendental_solver.cpp b/src/theory/arith/nl/transcendental/transcendental_solver.cpp
new file mode 100644
index 000000000..8eb69e50b
--- /dev/null
+++ b/src/theory/arith/nl/transcendental/transcendental_solver.cpp
@@ -0,0 +1,402 @@
+/********************* */
+/*! \file transcendental_solver.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King, Gereon Kremer
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of solver for handling transcendental functions.
+ **/
+
+#include "theory/arith/nl/transcendental/transcendental_solver.h"
+
+#include <cmath>
+#include <set>
+
+#include "expr/node_algorithm.h"
+#include "expr/node_builder.h"
+#include "options/arith_options.h"
+#include "theory/arith/arith_msum.h"
+#include "theory/arith/arith_utilities.h"
+#include "theory/arith/nl/transcendental/taylor_generator.h"
+#include "theory/rewriter.h"
+
+using namespace CVC4::kind;
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+TranscendentalSolver::TranscendentalSolver(InferenceManager& im, NlModel& m)
+ : d_tstate(im, m), d_expSlv(&d_tstate), d_sineSlv(&d_tstate)
+{
+ d_taylor_degree = options::nlExtTfTaylorDegree();
+}
+
+TranscendentalSolver::~TranscendentalSolver() {}
+
+void TranscendentalSolver::initLastCall(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts)
+{
+ d_tstate.init(assertions, false_asserts, xts);
+}
+
+bool TranscendentalSolver::preprocessAssertionsCheckModel(
+ std::vector<Node>& assertions)
+{
+ std::vector<Node> pvars;
+ std::vector<Node> psubs;
+ for (const std::pair<const Node, Node>& tb : d_tstate.d_trMaster)
+ {
+ pvars.push_back(tb.first);
+ psubs.push_back(tb.second);
+ }
+
+ // initialize representation of assertions
+ std::vector<Node> passertions;
+ for (const Node& a : assertions)
+
+ {
+ Node pa = a;
+ if (!pvars.empty())
+ {
+ pa = arithSubstitute(pa, pvars, psubs);
+ pa = Rewriter::rewrite(pa);
+ }
+ if (!pa.isConst() || !pa.getConst<bool>())
+ {
+ Trace("nl-ext-cm-assert") << "- assert : " << pa << std::endl;
+ passertions.push_back(pa);
+ }
+ }
+ // get model bounds for all transcendental functions
+ Trace("nl-ext-cm") << "----- Get bounds for transcendental functions..."
+ << std::endl;
+ for (std::pair<const Kind, std::vector<Node> >& tfs : d_tstate.d_funcMap)
+ {
+ for (const Node& tf : tfs.second)
+ {
+ Trace("nl-ext-cm") << "- Term: " << tf << std::endl;
+ bool success = true;
+ // tf is Figure 3 : tf( x )
+ std::pair<Node, Node> bounds;
+ if (tfs.first == Kind::PI)
+ {
+ bounds = {d_tstate.d_pi_bound[0], d_tstate.d_pi_bound[1]};
+ }
+ else
+ {
+ bounds = d_tstate.d_taylor.getTfModelBounds(
+ tf, d_taylor_degree, d_tstate.d_model);
+ if (bounds.first != bounds.second)
+ {
+ d_tstate.d_model.setUsedApproximate();
+ }
+ }
+ if (!bounds.first.isNull() && !bounds.second.isNull())
+ {
+ // for each function in the congruence classe
+ for (const Node& ctf : d_tstate.d_funcCongClass[tf])
+ {
+ // each term in congruence classes should be master terms
+ Assert(d_tstate.d_trSlaves.find(ctf) != d_tstate.d_trSlaves.end());
+ // we set the bounds for each slave of tf
+ for (const Node& stf : d_tstate.d_trSlaves[ctf])
+ {
+ Trace("nl-ext-cm")
+ << "...bound for " << stf << " : [" << bounds.first << ", "
+ << bounds.second << "]" << std::endl;
+ success = d_tstate.d_model.addCheckModelBound(
+ stf, bounds.first, bounds.second);
+ }
+ }
+ }
+ else
+ {
+ Trace("nl-ext-cm") << "...no bound for " << tf << std::endl;
+ }
+ if (!success)
+ {
+ // a bound was conflicting
+ Trace("nl-ext-cm") << "...failed to set bound for " << tf << std::endl;
+ Trace("nl-ext-cm") << "-----" << std::endl;
+ return false;
+ }
+ }
+ }
+ // replace the assertions
+ assertions = std::move(passertions);
+ return true;
+}
+
+void TranscendentalSolver::incrementTaylorDegree() { d_taylor_degree++; }
+unsigned TranscendentalSolver::getTaylorDegree() const
+{
+ return d_taylor_degree;
+}
+
+void TranscendentalSolver::processSideEffect(const NlLemma& se)
+{
+ for (const std::tuple<Node, unsigned, Node>& sp : se.d_secantPoint)
+ {
+ Node tf = std::get<0>(sp);
+ unsigned d = std::get<1>(sp);
+ Node c = std::get<2>(sp);
+ d_tstate.d_secant_points[tf][d].push_back(c);
+ }
+}
+
+void TranscendentalSolver::checkTranscendentalInitialRefine()
+{
+ d_expSlv.checkInitialRefine();
+ d_sineSlv.checkInitialRefine();
+}
+
+void TranscendentalSolver::checkTranscendentalMonotonic()
+{
+ d_expSlv.checkMonotonic();
+ d_sineSlv.checkMonotonic();
+}
+
+void TranscendentalSolver::checkTranscendentalTangentPlanes()
+{
+ Trace("nl-ext") << "Get tangent plane lemmas for transcendental functions..."
+ << std::endl;
+ // this implements Figure 3 of "Satisfiaility Modulo Transcendental Functions
+ // via Incremental Linearization" by Cimatti et al
+ for (const std::pair<const Kind, std::vector<Node> >& tfs :
+ d_tstate.d_funcMap)
+ {
+ Kind k = tfs.first;
+ if (k == PI)
+ {
+ // We do not use Taylor approximation for PI currently.
+ // This is because the convergence is extremely slow, and hence an
+ // initial approximation is superior.
+ continue;
+ }
+
+ // we substitute into the Taylor sum P_{n,f(0)}( x )
+
+ for (const Node& tf : tfs.second)
+ {
+ // tf is Figure 3 : tf( x )
+ Trace("nl-ext-tftp") << "Compute tangent planes " << tf << std::endl;
+ // go until max degree is reached, or we don't meet bound criteria
+ for (unsigned d = 1; d <= d_taylor_degree; d++)
+ {
+ Trace("nl-ext-tftp") << "- run at degree " << d << "..." << std::endl;
+ unsigned prev =
+ d_tstate.d_im.numPendingLemmas() + d_tstate.d_im.numWaitingLemmas();
+ if (checkTfTangentPlanesFun(tf, d))
+ {
+ Trace("nl-ext-tftp") << "...fail, #lemmas = "
+ << (d_tstate.d_im.numPendingLemmas()
+ + d_tstate.d_im.numWaitingLemmas() - prev)
+ << std::endl;
+ break;
+ }
+ else
+ {
+ Trace("nl-ext-tftp") << "...success" << std::endl;
+ }
+ }
+ }
+ }
+}
+
+bool TranscendentalSolver::checkTfTangentPlanesFun(Node tf, unsigned d)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Kind k = tf.getKind();
+ // this should only be run on master applications
+ Assert(d_tstate.d_trSlaves.find(tf) != d_tstate.d_trSlaves.end());
+
+ // Figure 3 : c
+ Node c = d_tstate.d_model.computeAbstractModelValue(tf[0]);
+ int csign = c.getConst<Rational>().sgn();
+ if (csign == 0)
+ {
+ // no secant/tangent plane is necessary
+ return true;
+ }
+ Assert(csign == 1 || csign == -1);
+
+ // Figure 3: P_l, P_u
+ // mapped to for signs of c
+ std::map<int, Node> poly_approx_bounds[2];
+ std::vector<Node> pbounds;
+ d_tstate.d_taylor.getPolynomialApproximationBoundForArg(k, c, d, pbounds);
+ poly_approx_bounds[0][1] = pbounds[0];
+ poly_approx_bounds[0][-1] = pbounds[1];
+ poly_approx_bounds[1][1] = pbounds[2];
+ poly_approx_bounds[1][-1] = pbounds[3];
+
+ // Figure 3 : v
+ Node v = d_tstate.d_model.computeAbstractModelValue(tf);
+
+ // check value of tf
+ Trace("nl-ext-tftp-debug") << "Process tangent plane refinement for " << tf
+ << ", degree " << d << "..." << std::endl;
+ Trace("nl-ext-tftp-debug") << " value in model : " << v << std::endl;
+ Trace("nl-ext-tftp-debug") << " arg value in model : " << c << std::endl;
+
+ // compute the concavity
+ int region = -1;
+ std::unordered_map<Node, int, NodeHashFunction>::iterator itr =
+ d_tstate.d_tf_region.find(tf);
+ if (itr != d_tstate.d_tf_region.end())
+ {
+ region = itr->second;
+ Trace("nl-ext-tftp-debug") << " region is : " << region << std::endl;
+ }
+ // Figure 3 : conc
+ int concavity = regionToConcavity(k, itr->second);
+ Trace("nl-ext-tftp-debug") << " concavity is : " << concavity << std::endl;
+ if (concavity == 0)
+ {
+ // no secant/tangent plane is necessary
+ return true;
+ }
+
+ // Figure 3: P
+ Node poly_approx;
+
+ // compute whether this is a tangent refinement or a secant refinement
+ bool is_tangent = false;
+ bool is_secant = false;
+ std::pair<Node, Node> mvb =
+ d_tstate.d_taylor.getTfModelBounds(tf, d, d_tstate.d_model);
+ // this is the approximated value of tf(c), which is a value such that:
+ // M_A(tf(c)) >= poly_appox_c >= tf(c) or
+ // M_A(tf(c)) <= poly_appox_c <= tf(c)
+ // In other words, it is a better approximation of the true value of tf(c)
+ // in the case that we add a refinement lemma. We use this value in the
+ // refinement schemas below.
+ Node poly_approx_c;
+ for (unsigned r = 0; r < 2; r++)
+ {
+ Node pab = poly_approx_bounds[r][csign];
+ Node v_pab = r == 0 ? mvb.first : mvb.second;
+ if (!v_pab.isNull())
+ {
+ Trace("nl-ext-tftp-debug2")
+ << "...model value of " << pab << " is " << v_pab << std::endl;
+
+ Assert(v_pab.isConst());
+ Node comp = nm->mkNode(r == 0 ? LT : GT, v, v_pab);
+ Trace("nl-ext-tftp-debug2") << "...compare : " << comp << std::endl;
+ Node compr = Rewriter::rewrite(comp);
+ Trace("nl-ext-tftp-debug2") << "...got : " << compr << std::endl;
+ if (compr == d_tstate.d_true)
+ {
+ poly_approx_c = Rewriter::rewrite(v_pab);
+ // beyond the bounds
+ if (r == 0)
+ {
+ poly_approx = poly_approx_bounds[r][csign];
+ is_tangent = concavity == 1;
+ is_secant = concavity == -1;
+ }
+ else
+ {
+ poly_approx = poly_approx_bounds[r][csign];
+ is_tangent = concavity == -1;
+ is_secant = concavity == 1;
+ }
+ if (Trace.isOn("nl-ext-tftp"))
+ {
+ Trace("nl-ext-tftp") << "*** Outside boundary point (";
+ Trace("nl-ext-tftp") << (r == 0 ? "low" : "high") << ") ";
+ printRationalApprox("nl-ext-tftp", v_pab);
+ Trace("nl-ext-tftp") << ", will refine..." << std::endl;
+ Trace("nl-ext-tftp")
+ << " poly_approx = " << poly_approx << std::endl;
+ Trace("nl-ext-tftp")
+ << " is_tangent = " << is_tangent << std::endl;
+ Trace("nl-ext-tftp") << " is_secant = " << is_secant << std::endl;
+ }
+ break;
+ }
+ else
+ {
+ Trace("nl-ext-tftp")
+ << " ...within " << (r == 0 ? "low" : "high") << " bound : ";
+ printRationalApprox("nl-ext-tftp", v_pab);
+ Trace("nl-ext-tftp") << std::endl;
+ }
+ }
+ }
+
+ // Figure 3: P( c )
+ if (is_tangent || is_secant)
+ {
+ Trace("nl-ext-tftp-debug2")
+ << "...poly approximation at c is " << poly_approx_c << std::endl;
+ }
+ else
+ {
+ // we may want to continue getting better bounds
+ return false;
+ }
+
+ if (is_tangent)
+ {
+ if (k == Kind::EXPONENTIAL)
+ {
+ d_expSlv.doTangentLemma(tf, c, poly_approx_c);
+ }
+ else if (k == Kind::SINE)
+ {
+ d_sineSlv.doTangentLemma(tf, c, poly_approx_c, region);
+ }
+ }
+ else if (is_secant)
+ {
+ if (k == EXPONENTIAL)
+ {
+ d_expSlv.doSecantLemmas(tf, poly_approx, c, poly_approx_c, d);
+ }
+ else if (k == Kind::SINE)
+ {
+ d_sineSlv.doSecantLemmas(tf, poly_approx, c, poly_approx_c, d, region);
+ }
+ }
+ return true;
+}
+
+int TranscendentalSolver::regionToConcavity(Kind k, int region)
+{
+ if (k == EXPONENTIAL)
+ {
+ if (region == 1)
+ {
+ return 1;
+ }
+ }
+ else if (k == SINE)
+ {
+ if (region == 1 || region == 2)
+ {
+ return -1;
+ }
+ else if (region == 3 || region == 4)
+ {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
diff --git a/src/theory/arith/nl/transcendental/transcendental_solver.h b/src/theory/arith/nl/transcendental/transcendental_solver.h
new file mode 100644
index 000000000..b0db79b6a
--- /dev/null
+++ b/src/theory/arith/nl/transcendental/transcendental_solver.h
@@ -0,0 +1,204 @@
+
+/********************* */
+/*! \file transcendental_solver.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Solving for handling transcendental functions.
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__TRANSCENDENTAL_SOLVER_H
+#define CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__TRANSCENDENTAL_SOLVER_H
+
+#include <map>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "expr/node.h"
+#include "theory/arith/inference_manager.h"
+#include "theory/arith/nl/nl_model.h"
+#include "theory/arith/nl/transcendental/exponential_solver.h"
+#include "theory/arith/nl/transcendental/sine_solver.h"
+#include "theory/arith/nl/transcendental/transcendental_state.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+/** Transcendental solver class
+ *
+ * This class implements model-based refinement schemes
+ * for transcendental functions, described in:
+ *
+ * - "Satisfiability Modulo Transcendental
+ * Functions via Incremental Linearization" by Cimatti
+ * et al., CADE 2017.
+ *
+ * It's main functionality are methods that implement lemma schemas below,
+ * which return a set of lemmas that should be sent on the output channel.
+ */
+class TranscendentalSolver
+{
+ public:
+ TranscendentalSolver(InferenceManager& im, NlModel& m);
+ ~TranscendentalSolver();
+
+ /** init last call
+ *
+ * This is called at the beginning of last call effort check, where
+ * assertions are the set of assertions belonging to arithmetic,
+ * false_asserts is the subset of assertions that are false in the current
+ * model, and xts is the set of extended function terms that are active in
+ * the current context.
+ *
+ * This call may add lemmas to lems based on registering term
+ * information (for example, purification of sine terms).
+ */
+ void initLastCall(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts);
+ /** increment taylor degree */
+ void incrementTaylorDegree();
+ /** get taylor degree */
+ unsigned getTaylorDegree() const;
+ /** preprocess assertions check model
+ *
+ * This modifies the given assertions in preparation for running a call
+ * to check model.
+ *
+ * This method returns false if a bound for a transcendental function
+ * was conflicting.
+ */
+ bool preprocessAssertionsCheckModel(std::vector<Node>& assertions);
+ /** Process side effects in lemma se */
+ void processSideEffect(const NlLemma& se);
+ //-------------------------------------------- lemma schemas
+ // Relays to exp and sine solvers.
+ void checkTranscendentalInitialRefine();
+
+ // Relays to exp and sine solvers.
+ void checkTranscendentalMonotonic();
+
+ /** check transcendental tangent planes
+ *
+ * Constructs a set of valid theory lemmas, based on
+ * computing an "incremental linearization" of
+ * transcendental functions based on the model values
+ * of transcendental functions and their arguments.
+ * It is based on Figure 3 of "Satisfiability
+ * Modulo Transcendental Functions via Incremental
+ * Linearization" by Cimatti et al., CADE 2017.
+ * This schema is not terminating in general.
+ * It is not enabled by default, and can
+ * be enabled by --nl-ext-tf-tplanes.
+ *
+ * Example:
+ *
+ * Assume we have a term sin(y) where M( y ) = 1 where M is the current model.
+ * Note that:
+ * sin(1) ~= .841471
+ *
+ * The Taylor series and remainder of sin(y) of degree 7 is
+ * P_{7,sin(0)}( x ) = x + (-1/6)*x^3 + (1/20)*x^5
+ * R_{7,sin(0),b}( x ) = (-1/5040)*x^7
+ *
+ * This gives us lower and upper bounds :
+ * P_u( x ) = P_{7,sin(0)}( x ) + R_{7,sin(0),b}( x )
+ * ...where note P_u( 1 ) = 4243/5040 ~= .841865
+ * P_l( x ) = P_{7,sin(0)}( x ) - R_{7,sin(0),b}( x )
+ * ...where note P_l( 1 ) = 4241/5040 ~= .841468
+ *
+ * Assume that M( sin(y) ) > P_u( 1 ).
+ * Since the concavity of sine in the region 0 < x < PI/2 is -1,
+ * we add a tangent plane refinement.
+ * The tangent plane at the point 1 in P_u is
+ * given by the formula:
+ * T( x ) = P_u( 1 ) + ((d/dx)(P_u(x)))( 1 )*( x - 1 )
+ * We add the lemma:
+ * ( 0 < y < PI/2 ) => sin( y ) <= T( y )
+ * which is:
+ * ( 0 < y < PI/2 ) => sin( y ) <= (391/720)*(y - 2737/1506)
+ *
+ * Assume that M( sin(y) ) < P_u( 1 ).
+ * Since the concavity of sine in the region 0 < x < PI/2 is -1,
+ * we add a secant plane refinement for some constants ( l, u )
+ * such that 0 <= l < M( y ) < u <= PI/2. Assume we choose
+ * l = 0 and u = M( PI/2 ) = 150517/47912.
+ * The secant planes at point 1 for P_l
+ * are given by the formulas:
+ * S_l( x ) = (x-l)*(P_l( l )-P_l(c))/(l-1) + P_l( l )
+ * S_u( x ) = (x-u)*(P_l( u )-P_l(c))/(u-1) + P_l( u )
+ * We add the lemmas:
+ * ( 0 < y < 1 ) => sin( y ) >= S_l( y )
+ * ( 1 < y < PI/2 ) => sin( y ) >= S_u( y )
+ * which are:
+ * ( 0 < y < 1 ) => (sin y) >= 4251/5040*y
+ * ( 1 < y < PI/2 ) => (sin y) >= c1*(y+c2)
+ * where c1, c2 are rationals (for brevity, omitted here)
+ * such that c1 ~= .277 and c2 ~= 2.032.
+ */
+ void checkTranscendentalTangentPlanes();
+
+ private:
+ /** check transcendental function refinement for tf
+ *
+ * This method is called by the above method for each "master"
+ * transcendental function application that occurs in an assertion in the
+ * current context. For example, an application like sin(t) is not a master
+ * if we have introduced the constraints:
+ * t=y+2*pi*n ^ -pi <= y <= pi ^ sin(t) = sin(y).
+ * See d_trMaster/d_trSlaves for more detail.
+ *
+ * This runs Figure 3 of Cimatti et al., CADE 2017 for transcendental
+ * function application tf for Taylor degree d. It may add a secant or
+ * tangent plane lemma to lems, which includes the side effect of the lemma
+ * (if one exists).
+ *
+ * It returns false if the bounds are not precise enough to add a
+ * secant or tangent plane lemma.
+ */
+ bool checkTfTangentPlanesFun(Node tf, unsigned d);
+ //-------------------------------------------- end lemma schemas
+
+ /** get concavity
+ *
+ * Returns whether we are concave (+1) or convex (-1)
+ * in region of transcendental function with kind k,
+ * where region is defined above.
+ * Returns 0 if region is invalid.
+ */
+ int regionToConcavity(Kind k, int region);
+
+ /** taylor degree
+ *
+ * Indicates that the degree of the polynomials in the Taylor approximation of
+ * all transcendental functions is 2*d_taylor_degree. This value is set
+ * initially to options::nlExtTfTaylorDegree() and may be incremented
+ * if the option options::nlExtTfIncPrecision() is enabled.
+ */
+ unsigned d_taylor_degree;
+
+ /** Common state for transcendental solver */
+ transcendental::TranscendentalState d_tstate;
+ /** The solver responsible for the exponential function */
+ transcendental::ExponentialSolver d_expSlv;
+ /** The solver responsible for the sine function */
+ transcendental::SineSolver d_sineSlv;
+}; /* class TranscendentalSolver */
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H */
diff --git a/src/theory/arith/nl/transcendental/transcendental_state.cpp b/src/theory/arith/nl/transcendental/transcendental_state.cpp
new file mode 100644
index 000000000..41ed2c53d
--- /dev/null
+++ b/src/theory/arith/nl/transcendental/transcendental_state.cpp
@@ -0,0 +1,394 @@
+/********************* */
+/*! \file transcendental_state.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King, Gereon Kremer
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of solver for handling transcendental functions.
+ **/
+
+#include "theory/arith/nl/transcendental/transcendental_state.h"
+
+#include "theory/arith/arith_utilities.h"
+#include "theory/arith/nl/transcendental/taylor_generator.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+namespace {
+
+/**
+ * Ensure a is in the main phase:
+ * -pi <= a <= pi
+ */
+inline Node mkValidPhase(TNode a, TNode pi)
+{
+ return mkBounded(
+ NodeManager::currentNM()->mkNode(Kind::MULT, mkRationalNode(-1), pi),
+ a,
+ pi);
+}
+} // namespace
+
+TranscendentalState::TranscendentalState(InferenceManager& im, NlModel& model)
+ : d_im(im), d_model(model)
+{
+ d_true = NodeManager::currentNM()->mkConst(true);
+ d_false = NodeManager::currentNM()->mkConst(false);
+ d_zero = NodeManager::currentNM()->mkConst(Rational(0));
+ d_one = NodeManager::currentNM()->mkConst(Rational(1));
+ d_neg_one = NodeManager::currentNM()->mkConst(Rational(-1));
+}
+
+void TranscendentalState::init(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts)
+{
+ d_funcCongClass.clear();
+ d_funcMap.clear();
+ d_tf_region.clear();
+
+ NodeManager* nm = NodeManager::currentNM();
+
+ // register the extended function terms
+ std::vector<Node> trNeedsMaster;
+ bool needPi = false;
+ // for computing congruence
+ std::map<Kind, ArgTrie> argTrie;
+ for (unsigned i = 0, xsize = xts.size(); i < xsize; i++)
+ {
+ Node a = xts[i];
+ Kind ak = a.getKind();
+ bool consider = true;
+ // if is an unpurified application of SINE, or it is a transcendental
+ // applied to a trancendental, purify.
+ if (isTranscendentalKind(ak))
+ {
+ // if we've already computed master for a
+ if (d_trMaster.find(a) != d_trMaster.end())
+ {
+ // a master has at least one slave
+ consider = (d_trSlaves.find(a) != d_trSlaves.end());
+ }
+ else
+ {
+ if (ak == Kind::SINE)
+ {
+ // always not a master
+ consider = false;
+ }
+ else
+ {
+ for (const Node& ac : a)
+ {
+ if (isTranscendentalKind(ac.getKind()))
+ {
+ consider = false;
+ break;
+ }
+ }
+ }
+ if (!consider)
+ {
+ // wait to assign a master below
+ trNeedsMaster.push_back(a);
+ }
+ else
+ {
+ d_trMaster[a] = a;
+ d_trSlaves[a].insert(a);
+ }
+ }
+ }
+ if (ak == Kind::EXPONENTIAL || ak == Kind::SINE)
+ {
+ needPi = needPi || (ak == Kind::SINE);
+ // if we didn't indicate that it should be purified above
+ if (consider)
+ {
+ std::vector<Node> repList;
+ for (const Node& ac : a)
+ {
+ Node r = d_model.computeConcreteModelValue(ac);
+ repList.push_back(r);
+ }
+ Node aa = argTrie[ak].add(a, repList);
+ if (aa != a)
+ {
+ // apply congruence to pairs of terms that are disequal and congruent
+ Assert(aa.getNumChildren() == a.getNumChildren());
+ Node mvaa = d_model.computeAbstractModelValue(a);
+ Node mvaaa = d_model.computeAbstractModelValue(aa);
+ if (mvaa != mvaaa)
+ {
+ std::vector<Node> exp;
+ for (unsigned j = 0, size = a.getNumChildren(); j < size; j++)
+ {
+ exp.push_back(a[j].eqNode(aa[j]));
+ }
+ Node expn = exp.size() == 1 ? exp[0] : nm->mkNode(Kind::AND, exp);
+ Node cong_lemma = nm->mkNode(Kind::OR, expn.negate(), a.eqNode(aa));
+ d_im.addPendingArithLemma(cong_lemma, InferenceId::NL_CONGRUENCE);
+ }
+ }
+ else
+ {
+ // new representative of congruence class
+ d_funcMap[ak].push_back(a);
+ }
+ // add to congruence class
+ d_funcCongClass[aa].push_back(a);
+ }
+ }
+ else if (ak == Kind::PI)
+ {
+ Assert(consider);
+ needPi = true;
+ d_funcMap[ak].push_back(a);
+ d_funcCongClass[a].push_back(a);
+ }
+ }
+ // initialize pi if necessary
+ if (needPi && d_pi.isNull())
+ {
+ mkPi();
+ getCurrentPiBounds();
+ }
+
+ if (d_im.hasUsed())
+ {
+ return;
+ }
+
+ // process SINE phase shifting
+ for (const Node& a : trNeedsMaster)
+ {
+ // should not have processed this already
+ Assert(d_trMaster.find(a) == d_trMaster.end());
+ Kind k = a.getKind();
+ Assert(k == Kind::SINE || k == Kind::EXPONENTIAL);
+ Node y =
+ nm->mkSkolem("y", nm->realType(), "phase shifted trigonometric arg");
+ Node new_a = nm->mkNode(k, y);
+ d_trSlaves[new_a].insert(new_a);
+ d_trSlaves[new_a].insert(a);
+ d_trMaster[a] = new_a;
+ d_trMaster[new_a] = new_a;
+ Node lem;
+ if (k == Kind::SINE)
+ {
+ Trace("nl-ext-tf") << "Basis sine : " << new_a << " for " << a
+ << std::endl;
+ Assert(!d_pi.isNull());
+ Node shift = nm->mkSkolem("s", nm->integerType(), "number of shifts");
+ // TODO : do not introduce shift here, instead needs model-based
+ // refinement for constant shifts (cvc4-projects #1284)
+ lem = nm->mkNode(
+ Kind::AND,
+ transcendental::mkValidPhase(y, d_pi),
+ nm->mkNode(
+ Kind::ITE,
+ transcendental::mkValidPhase(a[0], d_pi),
+ a[0].eqNode(y),
+ a[0].eqNode(nm->mkNode(
+ Kind::PLUS,
+ y,
+ nm->mkNode(
+ Kind::MULT, nm->mkConst(Rational(2)), shift, d_pi)))),
+ new_a.eqNode(a));
+ }
+ else
+ {
+ // do both equalities to ensure that new_a becomes a preregistered term
+ lem = nm->mkNode(Kind::AND, a.eqNode(new_a), a[0].eqNode(y));
+ }
+ // note we must do preprocess on this lemma
+ Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : purify : " << lem
+ << std::endl;
+ NlLemma nlem(
+ lem, LemmaProperty::PREPROCESS, nullptr, InferenceId::NL_T_PURIFY_ARG);
+ d_im.addPendingArithLemma(nlem);
+ }
+
+ if (Trace.isOn("nl-ext-mv"))
+ {
+ Trace("nl-ext-mv") << "Arguments of trancendental functions : "
+ << std::endl;
+ for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
+ {
+ Kind k = tfl.first;
+ if (k == Kind::SINE || k == Kind::EXPONENTIAL)
+ {
+ for (const Node& tf : tfl.second)
+ {
+ Node v = tf[0];
+ d_model.computeConcreteModelValue(v);
+ d_model.computeAbstractModelValue(v);
+ d_model.printModelValue("nl-ext-mv", v);
+ }
+ }
+ }
+ }
+}
+
+void TranscendentalState::mkPi()
+{
+ NodeManager* nm = NodeManager::currentNM();
+ if (d_pi.isNull())
+ {
+ d_pi = nm->mkNullaryOperator(nm->realType(), Kind::PI);
+ d_pi_2 = Rewriter::rewrite(
+ nm->mkNode(Kind::MULT, d_pi, nm->mkConst(Rational(1) / Rational(2))));
+ d_pi_neg_2 = Rewriter::rewrite(
+ nm->mkNode(Kind::MULT, d_pi, nm->mkConst(Rational(-1) / Rational(2))));
+ d_pi_neg = Rewriter::rewrite(
+ nm->mkNode(Kind::MULT, d_pi, nm->mkConst(Rational(-1))));
+ // initialize bounds
+ d_pi_bound[0] = nm->mkConst(Rational(103993) / Rational(33102));
+ d_pi_bound[1] = nm->mkConst(Rational(104348) / Rational(33215));
+ }
+}
+
+void TranscendentalState::getCurrentPiBounds()
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Node pi_lem = nm->mkNode(Kind::AND,
+ nm->mkNode(Kind::GEQ, d_pi, d_pi_bound[0]),
+ nm->mkNode(Kind::LEQ, d_pi, d_pi_bound[1]));
+ d_im.addPendingArithLemma(pi_lem, InferenceId::NL_T_PI_BOUND);
+}
+
+std::pair<Node, Node> TranscendentalState::getClosestSecantPoints(TNode e,
+ TNode c,
+ unsigned d)
+{
+ // bounds are the minimum and maximum previous secant points
+ // should not repeat secant points: secant lemmas should suffice to
+ // rule out previous assignment
+ Assert(
+ std::find(d_secant_points[e][d].begin(), d_secant_points[e][d].end(), c)
+ == d_secant_points[e][d].end());
+ // Insert into the (temporary) vector. We do not update this vector
+ // until we are sure this secant plane lemma has been processed. We do
+ // this by mapping the lemma to a side effect below.
+ std::vector<Node> spoints = d_secant_points[e][d];
+ spoints.push_back(c);
+
+ // sort
+ sortByNlModel(spoints.begin(), spoints.end(), &d_model);
+ // get the resulting index of c
+ unsigned index =
+ std::find(spoints.begin(), spoints.end(), c) - spoints.begin();
+
+ // bounds are the next closest upper/lower bound values
+ return {index > 0 ? spoints[index - 1] : Node(),
+ index < spoints.size() - 1 ? spoints[index + 1] : Node()};
+}
+
+Node TranscendentalState::mkSecantPlane(
+ TNode arg, TNode b, TNode c, TNode approx_b, TNode approx_c)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ // Figure 3: S_l( x ), S_u( x ) for s = 0,1
+ Node rcoeff_n = Rewriter::rewrite(nm->mkNode(Kind::MINUS, b, c));
+ Assert(rcoeff_n.isConst());
+ Rational rcoeff = rcoeff_n.getConst<Rational>();
+ Assert(rcoeff.sgn() != 0);
+ return nm->mkNode(Kind::PLUS,
+ approx_b,
+ nm->mkNode(Kind::MULT,
+ nm->mkNode(Kind::MINUS, approx_b, approx_c),
+ nm->mkConst(rcoeff.inverse()),
+ nm->mkNode(Kind::MINUS, arg, b)));
+}
+
+NlLemma TranscendentalState::mkSecantLemma(
+ TNode lower, TNode upper, int concavity, TNode tf, TNode splane)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ // With respect to Figure 3, this is slightly different.
+ // In particular, we chose b to be the model value of bounds[s],
+ // which is a constant although bounds[s] may not be (e.g. if it
+ // contains PI).
+ // To ensure that c...b does not cross an inflection point,
+ // we guard with the symbolic version of bounds[s].
+ // This leads to lemmas e.g. of this form:
+ // ( c <= x <= PI/2 ) => ( sin(x) < ( P( b ) - P( c ) )*( x -
+ // b ) + P( b ) )
+ // where b = (PI/2)^M, the current value of PI/2 in the model.
+ // This is sound since we are guarded by the symbolic
+ // representation of PI/2.
+ Node antec_n = nm->mkNode(Kind::AND,
+ nm->mkNode(Kind::GEQ, tf[0], lower),
+ nm->mkNode(Kind::LEQ, tf[0], upper));
+ Node lem = nm->mkNode(
+ Kind::IMPLIES,
+ antec_n,
+ nm->mkNode(concavity == 1 ? Kind::LEQ : Kind::GEQ, tf, splane));
+ Trace("nl-ext-tftp-debug2")
+ << "*** Secant plane lemma (pre-rewrite) : " << lem << std::endl;
+ lem = Rewriter::rewrite(lem);
+ Trace("nl-ext-tftp-lemma") << "*** Secant plane lemma : " << lem << std::endl;
+ Assert(d_model.computeAbstractModelValue(lem) == d_false);
+ return NlLemma(lem, LemmaProperty::NONE, nullptr, InferenceId::NL_T_SECANT);
+}
+
+void TranscendentalState::doSecantLemmas(const std::pair<Node, Node>& bounds,
+ TNode poly_approx,
+ TNode c,
+ TNode approx_c,
+ TNode tf,
+ unsigned d,
+ int concavity)
+{
+ Trace("nl-ext-tftp-debug2") << "...secant bounds are : " << bounds.first
+ << " ... " << bounds.second << std::endl;
+ // take the model value of l or u (since may contain PI)
+ // Make secant from bounds.first to c
+ Node lval = d_model.computeAbstractModelValue(bounds.first);
+ Trace("nl-ext-tftp-debug2") << "...model value of bound " << bounds.first
+ << " is " << lval << std::endl;
+ if (lval != c)
+ {
+ // Figure 3 : P(l), P(u), for s = 0
+ Node approx_l = Rewriter::rewrite(
+ poly_approx.substitute(d_taylor.getTaylorVariable(), lval));
+ Node splane = mkSecantPlane(tf[0], lval, c, approx_l, approx_c);
+ NlLemma nlem = mkSecantLemma(lval, c, concavity, tf, splane);
+ // The side effect says that if lem is added, then we should add the
+ // secant point c for (tf,d).
+ nlem.d_secantPoint.push_back(std::make_tuple(tf, d, c));
+ d_im.addPendingArithLemma(nlem, true);
+ }
+
+ // Make secant from c to bounds.second
+ Node uval = d_model.computeAbstractModelValue(bounds.second);
+ Trace("nl-ext-tftp-debug2") << "...model value of bound " << bounds.second
+ << " is " << uval << std::endl;
+ if (c != uval)
+ {
+ // Figure 3 : P(l), P(u), for s = 1
+ Node approx_u = Rewriter::rewrite(
+ poly_approx.substitute(d_taylor.getTaylorVariable(), uval));
+ Node splane = mkSecantPlane(tf[0], c, uval, approx_c, approx_u);
+ NlLemma nlem = mkSecantLemma(c, uval, concavity, tf, splane);
+ // The side effect says that if lem is added, then we should add the
+ // secant point c for (tf,d).
+ nlem.d_secantPoint.push_back(std::make_tuple(tf, d, c));
+ d_im.addPendingArithLemma(nlem, true);
+ }
+}
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
diff --git a/src/theory/arith/nl/transcendental/transcendental_state.h b/src/theory/arith/nl/transcendental/transcendental_state.h
new file mode 100644
index 000000000..0a4e46eca
--- /dev/null
+++ b/src/theory/arith/nl/transcendental/transcendental_state.h
@@ -0,0 +1,214 @@
+/********************* */
+/*! \file transcendental_state.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Utilities for transcendental lemmas.
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__TRANSCENDENTAL_STATE_H
+#define CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__TRANSCENDENTAL_STATE_H
+
+#include "expr/node.h"
+#include "theory/arith/inference_manager.h"
+#include "theory/arith/nl/nl_model.h"
+#include "theory/arith/nl/transcendental/taylor_generator.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+/**
+ * Holds common state and utilities for transcendental solvers.
+ *
+ * This includes common lookups and caches as well as generic utilities for
+ * secant plane lemmas and taylor approximations.
+ */
+struct TranscendentalState
+{
+ TranscendentalState(InferenceManager& im, NlModel& model);
+
+ /** init last call
+ *
+ * This is called at the beginning of last call effort check, where
+ * assertions are the set of assertions belonging to arithmetic,
+ * false_asserts is the subset of assertions that are false in the current
+ * model, and xts is the set of extended function terms that are active in
+ * the current context.
+ *
+ * This call may add lemmas to lems based on registering term
+ * information (for example, purification of sine terms).
+ */
+ void init(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts);
+
+ /** Initialize members for pi-related values */
+ void mkPi();
+ /** Get current bounds for pi as a lemma */
+ void getCurrentPiBounds();
+
+ /**
+ * Get the two closest secant points from the once stored already.
+ * "closest" is determined according to the current model.
+ * @param e The transcendental term (like (exp t))
+ * @param c The point currently under consideration (probably the model of t)
+ * @param d The taylor degree.
+ */
+ std::pair<Node, Node> getClosestSecantPoints(TNode e, TNode c, unsigned d);
+
+ /**
+ * Construct a secant plane between b and c
+ * @param arg The argument of the transcendental term
+ * @param b Left secant point
+ * @param c Right secant point
+ * @param approx Approximation for b (not yet substituted)
+ * @param approx_c Approximation for c (already substituted)
+ */
+ Node mkSecantPlane(TNode arg, TNode b, TNode c, TNode approx, TNode approx_c);
+
+ /**
+ * Construct a secant lemma between lower and upper for tf.
+ * @param lower Left secant point
+ * @param upper Right secant point
+ * @param concavity Concavity of the current regions
+ * @param tf Current transcendental term
+ * @param splane Secant plane as computed by mkSecantPlane()
+ */
+ NlLemma mkSecantLemma(
+ TNode lower, TNode upper, int concavity, TNode tf, TNode splane);
+
+ /**
+ * Construct and send secant lemmas (if appropriate)
+ * @param bounds Secant bounds
+ * @param poly_approx Polynomial approximation
+ * @param c Current point
+ * @param approx_c Approximation for c
+ * @param tf Current transcendental term
+ * @param d Current taylor degree
+ * @param concavity Concavity in region of c
+ */
+ void doSecantLemmas(const std::pair<Node, Node>& bounds,
+ TNode poly_approx,
+ TNode c,
+ TNode approx_c,
+ TNode tf,
+ unsigned d,
+ int concavity);
+
+ Node d_true;
+ Node d_false;
+ Node d_zero;
+ Node d_one;
+ Node d_neg_one;
+
+ /** The inference manager that we push conflicts and lemmas to. */
+ InferenceManager& d_im;
+ /** Reference to the non-linear model object */
+ NlModel& d_model;
+ /** Utility to compute taylor approximations */
+ TaylorGenerator d_taylor;
+
+ /**
+ * Some transcendental functions f(t) are "purified", e.g. we add
+ * t = y ^ f(t) = f(y) where y is a fresh variable. Those that are not
+ * purified we call "master terms".
+ *
+ * The maps below maintain a master/slave relationship over
+ * transcendental functions (SINE, EXPONENTIAL, PI), where above
+ * f(y) is the master of itself and of f(t).
+ *
+ * This is used for ensuring that the argument y of SINE we process is on
+ * the interval [-pi .. pi], and that exponentials are not applied to
+ * arguments that contain transcendental functions.
+ */
+ std::map<Node, Node> d_trMaster;
+ std::map<Node, std::unordered_set<Node, NodeHashFunction>> d_trSlaves;
+
+ /** concavity region for transcendental functions
+ *
+ * This stores an integer that identifies an interval in
+ * which the current model value for an argument of an
+ * application of a transcendental function resides.
+ *
+ * For exp( x ):
+ * region #1 is -infty < x < infty
+ * For sin( x ):
+ * region #0 is pi < x < infty (this is an invalid region)
+ * region #1 is pi/2 < x <= pi
+ * region #2 is 0 < x <= pi/2
+ * region #3 is -pi/2 < x <= 0
+ * region #4 is -pi < x <= -pi/2
+ * region #5 is -infty < x <= -pi (this is an invalid region)
+ * All regions not listed above, as well as regions 0 and 5
+ * for SINE are "invalid". We only process applications
+ * of transcendental functions whose arguments have model
+ * values that reside in valid regions.
+ */
+ std::unordered_map<Node, int, NodeHashFunction> d_tf_region;
+ /**
+ * Maps representives of a congruence class to the members of that class.
+ *
+ * In detail, a congruence class is a set of terms of the form
+ * { f(t1), ..., f(tn) }
+ * such that t1 = ... = tn in the current context. We choose an arbitrary
+ * term among these to be the repesentative of this congruence class.
+ *
+ * Moreover, notice we compute congruence classes only over terms that
+ * are transcendental function applications that are "master terms",
+ * see d_trMaster/d_trSlave.
+ */
+ std::map<Node, std::vector<Node>> d_funcCongClass;
+ /**
+ * A list of all functions for each kind in { EXPONENTIAL, SINE, POW, PI }
+ * that are representives of their congruence class.
+ */
+ std::map<Kind, std::vector<Node>> d_funcMap;
+
+ /** secant points (sorted list) for transcendental functions
+ *
+ * This is used for tangent plane refinements for
+ * transcendental functions. This is the set
+ * "get-previous-secant-points" in "Satisfiability
+ * Modulo Transcendental Functions via Incremental
+ * Linearization" by Cimatti et al., CADE 2017, for
+ * each transcendental function application. We store this set for each
+ * Taylor degree.
+ */
+ std::unordered_map<Node,
+ std::map<unsigned, std::vector<Node>>,
+ NodeHashFunction>
+ d_secant_points;
+
+ /** PI
+ *
+ * Note that PI is a (symbolic, non-constant) nullary operator. This is
+ * because its value cannot be computed exactly. We constraint PI to
+ * concrete lower and upper bounds stored in d_pi_bound below.
+ */
+ Node d_pi;
+ /** PI/2 */
+ Node d_pi_2;
+ /** -PI/2 */
+ Node d_pi_neg_2;
+ /** -PI */
+ Node d_pi_neg;
+ /** the concrete lower and upper bounds for PI */
+ Node d_pi_bound[2];
+};
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__TRANSCENDENTAL_STATE_H */
diff --git a/src/theory/arith/nl/transcendental_solver.cpp b/src/theory/arith/nl/transcendental_solver.cpp
deleted file mode 100644
index 2e25f1642..000000000
--- a/src/theory/arith/nl/transcendental_solver.cpp
+++ /dev/null
@@ -1,1474 +0,0 @@
-/********************* */
-/*! \file transcendental_solver.cpp
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds, Tim King, Gereon Kremer
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
- ** in the top-level source directory and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Implementation of solver for handling transcendental functions.
- **/
-
-#include "theory/arith/nl/transcendental_solver.h"
-
-#include <cmath>
-#include <set>
-
-#include "expr/node_algorithm.h"
-#include "expr/node_builder.h"
-#include "options/arith_options.h"
-#include "theory/arith/arith_msum.h"
-#include "theory/arith/arith_utilities.h"
-#include "theory/rewriter.h"
-
-using namespace CVC4::kind;
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-namespace nl {
-
-TranscendentalSolver::TranscendentalSolver(InferenceManager& im, NlModel& m) : d_im(im), d_model(m)
-{
- NodeManager* nm = NodeManager::currentNM();
- d_true = nm->mkConst(true);
- d_false = nm->mkConst(false);
- d_zero = nm->mkConst(Rational(0));
- d_one = nm->mkConst(Rational(1));
- d_neg_one = nm->mkConst(Rational(-1));
- d_taylor_real_fv = nm->mkBoundVar("x", nm->realType());
- d_taylor_real_fv_base = nm->mkBoundVar("a", nm->realType());
- d_taylor_real_fv_base_rem = nm->mkBoundVar("b", nm->realType());
- d_taylor_degree = options::nlExtTfTaylorDegree();
-}
-
-TranscendentalSolver::~TranscendentalSolver() {}
-
-void TranscendentalSolver::initLastCall(const std::vector<Node>& assertions,
- const std::vector<Node>& false_asserts,
- const std::vector<Node>& xts)
-{
- d_funcCongClass.clear();
- d_funcMap.clear();
- d_tf_region.clear();
-
- NodeManager* nm = NodeManager::currentNM();
-
- // register the extended function terms
- std::vector<Node> trNeedsMaster;
- bool needPi = false;
- // for computing congruence
- std::map<Kind, ArgTrie> argTrie;
- for (unsigned i = 0, xsize = xts.size(); i < xsize; i++)
- {
- Node a = xts[i];
- Kind ak = a.getKind();
- bool consider = true;
- // if is an unpurified application of SINE, or it is a transcendental
- // applied to a trancendental, purify.
- if (isTranscendentalKind(ak))
- {
- // if we've already computed master for a
- if (d_trMaster.find(a) != d_trMaster.end())
- {
- // a master has at least one slave
- consider = (d_trSlaves.find(a) != d_trSlaves.end());
- }
- else
- {
- if (ak == SINE)
- {
- // always not a master
- consider = false;
- }
- else
- {
- for (const Node& ac : a)
- {
- if (isTranscendentalKind(ac.getKind()))
- {
- consider = false;
- break;
- }
- }
- }
- if (!consider)
- {
- // wait to assign a master below
- trNeedsMaster.push_back(a);
- }
- else
- {
- d_trMaster[a] = a;
- d_trSlaves[a].insert(a);
- }
- }
- }
- if (ak == EXPONENTIAL || ak == SINE)
- {
- needPi = needPi || (ak == SINE);
- // if we didn't indicate that it should be purified above
- if (consider)
- {
- std::vector<Node> repList;
- for (const Node& ac : a)
- {
- Node r = d_model.computeConcreteModelValue(ac);
- repList.push_back(r);
- }
- Node aa = argTrie[ak].add(a, repList);
- if (aa != a)
- {
- // apply congruence to pairs of terms that are disequal and congruent
- Assert(aa.getNumChildren() == a.getNumChildren());
- Node mvaa = d_model.computeAbstractModelValue(a);
- Node mvaaa = d_model.computeAbstractModelValue(aa);
- if (mvaa != mvaaa)
- {
- std::vector<Node> exp;
- for (unsigned j = 0, size = a.getNumChildren(); j < size; j++)
- {
- exp.push_back(a[j].eqNode(aa[j]));
- }
- Node expn = exp.size() == 1 ? exp[0] : nm->mkNode(AND, exp);
- Node cong_lemma = nm->mkNode(OR, expn.negate(), a.eqNode(aa));
- d_im.addPendingArithLemma(cong_lemma, InferenceId::NL_CONGRUENCE);
- }
- }
- else
- {
- // new representative of congruence class
- d_funcMap[ak].push_back(a);
- }
- // add to congruence class
- d_funcCongClass[aa].push_back(a);
- }
- }
- else if (ak == PI)
- {
- Assert(consider);
- needPi = true;
- d_funcMap[ak].push_back(a);
- d_funcCongClass[a].push_back(a);
- }
- }
- // initialize pi if necessary
- if (needPi && d_pi.isNull())
- {
- mkPi();
- getCurrentPiBounds();
- }
-
- if (d_im.hasUsed())
- {
- return;
- }
-
- // process SINE phase shifting
- for (const Node& a : trNeedsMaster)
- {
- // should not have processed this already
- Assert(d_trMaster.find(a) == d_trMaster.end());
- Kind k = a.getKind();
- Assert(k == SINE || k == EXPONENTIAL);
- Node y =
- nm->mkSkolem("y", nm->realType(), "phase shifted trigonometric arg");
- Node new_a = nm->mkNode(k, y);
- d_trSlaves[new_a].insert(new_a);
- d_trSlaves[new_a].insert(a);
- d_trMaster[a] = new_a;
- d_trMaster[new_a] = new_a;
- Node lem;
- if (k == SINE)
- {
- Trace("nl-ext-tf") << "Basis sine : " << new_a << " for " << a
- << std::endl;
- Assert(!d_pi.isNull());
- Node shift = nm->mkSkolem("s", nm->integerType(), "number of shifts");
- // TODO : do not introduce shift here, instead needs model-based
- // refinement for constant shifts (cvc4-projects #1284)
- lem = nm->mkNode(
- AND,
- mkValidPhase(y, d_pi),
- nm->mkNode(
- ITE,
- mkValidPhase(a[0], d_pi),
- a[0].eqNode(y),
- a[0].eqNode(nm->mkNode(
- PLUS,
- y,
- nm->mkNode(MULT, nm->mkConst(Rational(2)), shift, d_pi)))),
- new_a.eqNode(a));
- }
- else
- {
- // do both equalities to ensure that new_a becomes a preregistered term
- lem = nm->mkNode(AND, a.eqNode(new_a), a[0].eqNode(y));
- }
- // note we must do preprocess on this lemma
- Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : purify : " << lem
- << std::endl;
- NlLemma nlem(lem, LemmaProperty::PREPROCESS, nullptr, InferenceId::NL_T_PURIFY_ARG);
- d_im.addPendingArithLemma(nlem);
- }
-
- if (Trace.isOn("nl-ext-mv"))
- {
- Trace("nl-ext-mv") << "Arguments of trancendental functions : "
- << std::endl;
- for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
- {
- Kind k = tfl.first;
- if (k == SINE || k == EXPONENTIAL)
- {
- for (const Node& tf : tfl.second)
- {
- Node v = tf[0];
- d_model.computeConcreteModelValue(v);
- d_model.computeAbstractModelValue(v);
- d_model.printModelValue("nl-ext-mv", v);
- }
- }
- }
- }
-}
-
-bool TranscendentalSolver::preprocessAssertionsCheckModel(
- std::vector<Node>& assertions)
-{
- std::vector<Node> pvars;
- std::vector<Node> psubs;
- for (const std::pair<const Node, Node>& tb : d_trMaster)
- {
- pvars.push_back(tb.first);
- psubs.push_back(tb.second);
- }
-
- // initialize representation of assertions
- std::vector<Node> passertions;
- for (const Node& a : assertions)
-
- {
- Node pa = a;
- if (!pvars.empty())
- {
- pa = arithSubstitute(pa, pvars, psubs);
- pa = Rewriter::rewrite(pa);
- }
- if (!pa.isConst() || !pa.getConst<bool>())
- {
- Trace("nl-ext-cm-assert") << "- assert : " << pa << std::endl;
- passertions.push_back(pa);
- }
- }
- // get model bounds for all transcendental functions
- Trace("nl-ext-cm") << "----- Get bounds for transcendental functions..."
- << std::endl;
- for (std::pair<const Kind, std::vector<Node> >& tfs : d_funcMap)
- {
- Kind k = tfs.first;
- for (const Node& tf : tfs.second)
- {
- Trace("nl-ext-cm") << "- Term: " << tf << std::endl;
- bool success = true;
- // tf is Figure 3 : tf( x )
- Node bl;
- Node bu;
- if (k == PI)
- {
- bl = d_pi_bound[0];
- bu = d_pi_bound[1];
- }
- else
- {
- std::pair<Node, Node> bounds = getTfModelBounds(tf, d_taylor_degree);
- bl = bounds.first;
- bu = bounds.second;
- if (bl != bu)
- {
- d_model.setUsedApproximate();
- }
- }
- if (!bl.isNull() && !bu.isNull())
- {
- // for each function in the congruence classe
- for (const Node& ctf : d_funcCongClass[tf])
- {
- // each term in congruence classes should be master terms
- Assert(d_trSlaves.find(ctf) != d_trSlaves.end());
- // we set the bounds for each slave of tf
- for (const Node& stf : d_trSlaves[ctf])
- {
- Trace("nl-ext-cm") << "...bound for " << stf << " : [" << bl << ", "
- << bu << "]" << std::endl;
- success = d_model.addCheckModelBound(stf, bl, bu);
- }
- }
- }
- else
- {
- Trace("nl-ext-cm") << "...no bound for " << tf << std::endl;
- }
- if (!success)
- {
- // a bound was conflicting
- Trace("nl-ext-cm") << "...failed to set bound for " << tf << std::endl;
- Trace("nl-ext-cm") << "-----" << std::endl;
- return false;
- }
- }
- }
- // replace the assertions
- assertions = passertions;
- return true;
-}
-
-void TranscendentalSolver::incrementTaylorDegree() { d_taylor_degree++; }
-unsigned TranscendentalSolver::getTaylorDegree() const
-{
- return d_taylor_degree;
-}
-
-void TranscendentalSolver::processSideEffect(const NlLemma& se)
-{
- for (const std::tuple<Node, unsigned, Node>& sp : se.d_secantPoint)
- {
- Node tf = std::get<0>(sp);
- unsigned d = std::get<1>(sp);
- Node c = std::get<2>(sp);
- d_secant_points[tf][d].push_back(c);
- }
-}
-
-void TranscendentalSolver::mkPi()
-{
- NodeManager* nm = NodeManager::currentNM();
- if (d_pi.isNull())
- {
- d_pi = nm->mkNullaryOperator(nm->realType(), PI);
- d_pi_2 = Rewriter::rewrite(
- nm->mkNode(MULT, d_pi, nm->mkConst(Rational(1) / Rational(2))));
- d_pi_neg_2 = Rewriter::rewrite(
- nm->mkNode(MULT, d_pi, nm->mkConst(Rational(-1) / Rational(2))));
- d_pi_neg =
- Rewriter::rewrite(nm->mkNode(MULT, d_pi, nm->mkConst(Rational(-1))));
- // initialize bounds
- d_pi_bound[0] = nm->mkConst(Rational(103993) / Rational(33102));
- d_pi_bound[1] = nm->mkConst(Rational(104348) / Rational(33215));
- }
-}
-
-void TranscendentalSolver::getCurrentPiBounds()
-{
- NodeManager* nm = NodeManager::currentNM();
- Node pi_lem = nm->mkNode(AND,
- nm->mkNode(GEQ, d_pi, d_pi_bound[0]),
- nm->mkNode(LEQ, d_pi, d_pi_bound[1]));
- d_im.addPendingArithLemma(pi_lem, InferenceId::NL_T_PI_BOUND);
-}
-
-void TranscendentalSolver::checkTranscendentalInitialRefine()
-{
- NodeManager* nm = NodeManager::currentNM();
- Trace("nl-ext")
- << "Get initial refinement lemmas for transcendental functions..."
- << std::endl;
- for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
- {
- Kind k = tfl.first;
- for (const Node& t : tfl.second)
- {
- // initial refinements
- if (d_tf_initial_refine.find(t) == d_tf_initial_refine.end())
- {
- d_tf_initial_refine[t] = true;
- Node lem;
- if (k == SINE)
- {
- Node symn = nm->mkNode(SINE, nm->mkNode(MULT, d_neg_one, t[0]));
- symn = Rewriter::rewrite(symn);
- // Can assume it is its own master since phase is split over 0,
- // hence -pi <= t[0] <= pi implies -pi <= -t[0] <= pi.
- d_trMaster[symn] = symn;
- d_trSlaves[symn].insert(symn);
- Assert(d_trSlaves.find(t) != d_trSlaves.end());
- std::vector<Node> children;
-
- lem = nm->mkNode(AND,
- // bounds
- nm->mkNode(AND,
- nm->mkNode(LEQ, t, d_one),
- nm->mkNode(GEQ, t, d_neg_one)),
- // symmetry
- nm->mkNode(PLUS, t, symn).eqNode(d_zero),
- // sign
- nm->mkNode(EQUAL,
- nm->mkNode(LT, t[0], d_zero),
- nm->mkNode(LT, t, d_zero)),
- // zero val
- nm->mkNode(EQUAL,
- nm->mkNode(GT, t[0], d_zero),
- nm->mkNode(GT, t, d_zero)));
- lem = nm->mkNode(
- AND,
- lem,
- // zero tangent
- nm->mkNode(AND,
- nm->mkNode(IMPLIES,
- nm->mkNode(GT, t[0], d_zero),
- nm->mkNode(LT, t, t[0])),
- nm->mkNode(IMPLIES,
- nm->mkNode(LT, t[0], d_zero),
- nm->mkNode(GT, t, t[0]))),
- // pi tangent
- nm->mkNode(
- AND,
- nm->mkNode(IMPLIES,
- nm->mkNode(LT, t[0], d_pi),
- nm->mkNode(LT, t, nm->mkNode(MINUS, d_pi, t[0]))),
- nm->mkNode(
- IMPLIES,
- nm->mkNode(GT, t[0], d_pi_neg),
- nm->mkNode(GT, t, nm->mkNode(MINUS, d_pi_neg, t[0])))));
- }
- else if (k == EXPONENTIAL)
- {
- // ( exp(x) > 0 ) ^ ( x=0 <=> exp( x ) = 1 ) ^ ( x < 0 <=> exp( x ) <
- // 1 ) ^ ( x <= 0 V exp( x ) > x + 1 )
- lem = nm->mkNode(
- AND,
- nm->mkNode(GT, t, d_zero),
- nm->mkNode(EQUAL, t[0].eqNode(d_zero), t.eqNode(d_one)),
- nm->mkNode(EQUAL,
- nm->mkNode(LT, t[0], d_zero),
- nm->mkNode(LT, t, d_one)),
- nm->mkNode(OR,
- nm->mkNode(LEQ, t[0], d_zero),
- nm->mkNode(GT, t, nm->mkNode(PLUS, t[0], d_one))));
- }
- if (!lem.isNull())
- {
- d_im.addPendingArithLemma(lem, InferenceId::NL_T_INIT_REFINE);
- }
- }
- }
- }
-}
-
-void TranscendentalSolver::checkTranscendentalMonotonic()
-{
- Trace("nl-ext") << "Get monotonicity lemmas for transcendental functions..."
- << std::endl;
-
- // sort arguments of all transcendentals
- std::map<Kind, std::vector<Node> > sorted_tf_args;
- std::map<Kind, std::map<Node, Node> > tf_arg_to_term;
-
- for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
- {
- Kind k = tfl.first;
- if (k == EXPONENTIAL || k == SINE)
- {
- for (const Node& tf : tfl.second)
- {
- Node a = tf[0];
- Node mvaa = d_model.computeAbstractModelValue(a);
- if (mvaa.isConst())
- {
- Trace("nl-ext-tf-mono-debug") << "...tf term : " << a << std::endl;
- sorted_tf_args[k].push_back(a);
- tf_arg_to_term[k][a] = tf;
- }
- }
- }
- }
-
- SortNlModel smv;
- smv.d_nlm = &d_model;
- // sort by concrete values
- smv.d_isConcrete = true;
- smv.d_reverse_order = true;
- for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
- {
- Kind k = tfl.first;
- if (!sorted_tf_args[k].empty())
- {
- std::sort(sorted_tf_args[k].begin(), sorted_tf_args[k].end(), smv);
- Trace("nl-ext-tf-mono") << "Sorted transcendental function list for " << k
- << " : " << std::endl;
- for (unsigned i = 0; i < sorted_tf_args[k].size(); i++)
- {
- Node targ = sorted_tf_args[k][i];
- Node mvatarg = d_model.computeAbstractModelValue(targ);
- Trace("nl-ext-tf-mono")
- << " " << targ << " -> " << mvatarg << std::endl;
- Node t = tf_arg_to_term[k][targ];
- Node mvat = d_model.computeAbstractModelValue(t);
- Trace("nl-ext-tf-mono") << " f-val : " << mvat << std::endl;
- }
- std::vector<Node> mpoints;
- std::vector<Node> mpoints_vals;
- if (k == SINE)
- {
- mpoints.push_back(d_pi);
- mpoints.push_back(d_pi_2);
- mpoints.push_back(d_zero);
- mpoints.push_back(d_pi_neg_2);
- mpoints.push_back(d_pi_neg);
- }
- else if (k == EXPONENTIAL)
- {
- mpoints.push_back(Node::null());
- }
- if (!mpoints.empty())
- {
- // get model values for points
- for (unsigned i = 0; i < mpoints.size(); i++)
- {
- Node mpv;
- if (!mpoints[i].isNull())
- {
- mpv = d_model.computeAbstractModelValue(mpoints[i]);
- Assert(mpv.isConst());
- }
- mpoints_vals.push_back(mpv);
- }
-
- unsigned mdir_index = 0;
- int monotonic_dir = -1;
- Node mono_bounds[2];
- Node targ, targval, t, tval;
- for (unsigned i = 0, size = sorted_tf_args[k].size(); i < size; i++)
- {
- Node sarg = sorted_tf_args[k][i];
- Node sargval = d_model.computeAbstractModelValue(sarg);
- Assert(sargval.isConst());
- Node s = tf_arg_to_term[k][sarg];
- Node sval = d_model.computeAbstractModelValue(s);
- Assert(sval.isConst());
-
- // increment to the proper monotonicity region
- bool increment = true;
- while (increment && mdir_index < mpoints.size())
- {
- increment = false;
- if (mpoints[mdir_index].isNull())
- {
- increment = true;
- }
- else
- {
- Node pval = mpoints_vals[mdir_index];
- Assert(pval.isConst());
- if (sargval.getConst<Rational>() < pval.getConst<Rational>())
- {
- increment = true;
- Trace("nl-ext-tf-mono") << "...increment at " << sarg
- << " since model value is less than "
- << mpoints[mdir_index] << std::endl;
- }
- }
- if (increment)
- {
- tval = Node::null();
- mono_bounds[1] = mpoints[mdir_index];
- mdir_index++;
- monotonic_dir = regionToMonotonicityDir(k, mdir_index);
- if (mdir_index < mpoints.size())
- {
- mono_bounds[0] = mpoints[mdir_index];
- }
- else
- {
- mono_bounds[0] = Node::null();
- }
- }
- }
- // store the concavity region
- d_tf_region[s] = mdir_index;
- Trace("nl-ext-concavity") << "Transcendental function " << s
- << " is in region #" << mdir_index;
- Trace("nl-ext-concavity")
- << ", arg model value = " << sargval << std::endl;
-
- if (!tval.isNull())
- {
- NodeManager* nm = NodeManager::currentNM();
- Node mono_lem;
- if (monotonic_dir == 1
- && sval.getConst<Rational>() > tval.getConst<Rational>())
- {
- mono_lem = nm->mkNode(
- IMPLIES, nm->mkNode(GEQ, targ, sarg), nm->mkNode(GEQ, t, s));
- }
- else if (monotonic_dir == -1
- && sval.getConst<Rational>() < tval.getConst<Rational>())
- {
- mono_lem = nm->mkNode(
- IMPLIES, nm->mkNode(LEQ, targ, sarg), nm->mkNode(LEQ, t, s));
- }
- if (!mono_lem.isNull())
- {
- if (!mono_bounds[0].isNull())
- {
- Assert(!mono_bounds[1].isNull());
- mono_lem = nm->mkNode(
- IMPLIES,
- nm->mkNode(AND,
- mkBounded(mono_bounds[0], targ, mono_bounds[1]),
- mkBounded(mono_bounds[0], sarg, mono_bounds[1])),
- mono_lem);
- }
- Trace("nl-ext-tf-mono")
- << "Monotonicity lemma : " << mono_lem << std::endl;
-
- d_im.addPendingArithLemma(mono_lem, InferenceId::NL_T_MONOTONICITY);
- }
- }
- // store the previous values
- targ = sarg;
- targval = sargval;
- t = s;
- tval = sval;
- }
- }
- }
- }
-}
-
-void TranscendentalSolver::checkTranscendentalTangentPlanes()
-{
- Trace("nl-ext") << "Get tangent plane lemmas for transcendental functions..."
- << std::endl;
- // this implements Figure 3 of "Satisfiaility Modulo Transcendental Functions
- // via Incremental Linearization" by Cimatti et al
- for (std::pair<const Kind, std::vector<Node> >& tfs : d_funcMap)
- {
- Kind k = tfs.first;
- if (k == PI)
- {
- // We do not use Taylor approximation for PI currently.
- // This is because the convergence is extremely slow, and hence an
- // initial approximation is superior.
- continue;
- }
- Trace("nl-ext-tftp-debug2") << "Taylor variables: " << std::endl;
- Trace("nl-ext-tftp-debug2")
- << " taylor_real_fv : " << d_taylor_real_fv << std::endl;
- Trace("nl-ext-tftp-debug2")
- << " taylor_real_fv_base : " << d_taylor_real_fv_base << std::endl;
- Trace("nl-ext-tftp-debug2")
- << " taylor_real_fv_base_rem : " << d_taylor_real_fv_base_rem
- << std::endl;
- Trace("nl-ext-tftp-debug2") << std::endl;
-
- // we substitute into the Taylor sum P_{n,f(0)}( x )
-
- for (const Node& tf : tfs.second)
- {
- // tf is Figure 3 : tf( x )
- Trace("nl-ext-tftp") << "Compute tangent planes " << tf << std::endl;
- // go until max degree is reached, or we don't meet bound criteria
- for (unsigned d = 1; d <= d_taylor_degree; d++)
- {
- Trace("nl-ext-tftp") << "- run at degree " << d << "..." << std::endl;
- unsigned prev = d_im.numPendingLemmas() + d_im.numWaitingLemmas();
- if (checkTfTangentPlanesFun(tf, d))
- {
- Trace("nl-ext-tftp")
- << "...fail, #lemmas = "
- << (d_im.numPendingLemmas() + d_im.numWaitingLemmas() - prev)
- << std::endl;
- break;
- }
- else
- {
- Trace("nl-ext-tftp") << "...success" << std::endl;
- }
- }
- }
- }
-}
-
-bool TranscendentalSolver::checkTfTangentPlanesFun(Node tf,
- unsigned d)
-{
- NodeManager* nm = NodeManager::currentNM();
- Kind k = tf.getKind();
- // this should only be run on master applications
- Assert(d_trSlaves.find(tf) != d_trSlaves.end());
-
- // Figure 3 : c
- Node c = d_model.computeAbstractModelValue(tf[0]);
- int csign = c.getConst<Rational>().sgn();
- if (csign == 0)
- {
- // no secant/tangent plane is necessary
- return true;
- }
- Assert(csign == 1 || csign == -1);
-
- // Figure 3: P_l, P_u
- // mapped to for signs of c
- std::map<int, Node> poly_approx_bounds[2];
- std::vector<Node> pbounds;
- getPolynomialApproximationBoundForArg(k, c, d, pbounds);
- poly_approx_bounds[0][1] = pbounds[0];
- poly_approx_bounds[0][-1] = pbounds[1];
- poly_approx_bounds[1][1] = pbounds[2];
- poly_approx_bounds[1][-1] = pbounds[3];
-
- // Figure 3 : v
- Node v = d_model.computeAbstractModelValue(tf);
-
- // check value of tf
- Trace("nl-ext-tftp-debug") << "Process tangent plane refinement for " << tf
- << ", degree " << d << "..." << std::endl;
- Trace("nl-ext-tftp-debug") << " value in model : " << v << std::endl;
- Trace("nl-ext-tftp-debug") << " arg value in model : " << c << std::endl;
-
- std::vector<Node> taylor_vars;
- taylor_vars.push_back(d_taylor_real_fv);
-
- // compute the concavity
- int region = -1;
- std::unordered_map<Node, int, NodeHashFunction>::iterator itr =
- d_tf_region.find(tf);
- if (itr != d_tf_region.end())
- {
- region = itr->second;
- Trace("nl-ext-tftp-debug") << " region is : " << region << std::endl;
- }
- // Figure 3 : conc
- int concavity = regionToConcavity(k, itr->second);
- Trace("nl-ext-tftp-debug") << " concavity is : " << concavity << std::endl;
- if (concavity == 0)
- {
- // no secant/tangent plane is necessary
- return true;
- }
- // bounds for which we are this concavity
- // Figure 3: < l, u >
- Node bounds[2];
- if (k == SINE)
- {
- bounds[0] = regionToLowerBound(k, region);
- Assert(!bounds[0].isNull());
- bounds[1] = regionToUpperBound(k, region);
- Assert(!bounds[1].isNull());
- }
-
- // Figure 3: P
- Node poly_approx;
-
- // compute whether this is a tangent refinement or a secant refinement
- bool is_tangent = false;
- bool is_secant = false;
- std::pair<Node, Node> mvb = getTfModelBounds(tf, d);
- // this is the approximated value of tf(c), which is a value such that:
- // M_A(tf(c)) >= poly_appox_c >= tf(c) or
- // M_A(tf(c)) <= poly_appox_c <= tf(c)
- // In other words, it is a better approximation of the true value of tf(c)
- // in the case that we add a refinement lemma. We use this value in the
- // refinement schemas below.
- Node poly_approx_c;
- for (unsigned r = 0; r < 2; r++)
- {
- Node pab = poly_approx_bounds[r][csign];
- Node v_pab = r == 0 ? mvb.first : mvb.second;
- if (!v_pab.isNull())
- {
- Trace("nl-ext-tftp-debug2")
- << "...model value of " << pab << " is " << v_pab << std::endl;
-
- Assert(v_pab.isConst());
- Node comp = nm->mkNode(r == 0 ? LT : GT, v, v_pab);
- Trace("nl-ext-tftp-debug2") << "...compare : " << comp << std::endl;
- Node compr = Rewriter::rewrite(comp);
- Trace("nl-ext-tftp-debug2") << "...got : " << compr << std::endl;
- if (compr == d_true)
- {
- poly_approx_c = v_pab;
- // beyond the bounds
- if (r == 0)
- {
- poly_approx = poly_approx_bounds[r][csign];
- is_tangent = concavity == 1;
- is_secant = concavity == -1;
- }
- else
- {
- poly_approx = poly_approx_bounds[r][csign];
- is_tangent = concavity == -1;
- is_secant = concavity == 1;
- }
- if (Trace.isOn("nl-ext-tftp"))
- {
- Trace("nl-ext-tftp") << "*** Outside boundary point (";
- Trace("nl-ext-tftp") << (r == 0 ? "low" : "high") << ") ";
- printRationalApprox("nl-ext-tftp", v_pab);
- Trace("nl-ext-tftp") << ", will refine..." << std::endl;
- Trace("nl-ext-tftp")
- << " poly_approx = " << poly_approx << std::endl;
- Trace("nl-ext-tftp")
- << " is_tangent = " << is_tangent << std::endl;
- Trace("nl-ext-tftp") << " is_secant = " << is_secant << std::endl;
- }
- break;
- }
- else
- {
- Trace("nl-ext-tftp")
- << " ...within " << (r == 0 ? "low" : "high") << " bound : ";
- printRationalApprox("nl-ext-tftp", v_pab);
- Trace("nl-ext-tftp") << std::endl;
- }
- }
- }
-
- // Figure 3: P( c )
- if (is_tangent || is_secant)
- {
- Trace("nl-ext-tftp-debug2")
- << "...poly approximation at c is " << poly_approx_c << std::endl;
- }
- else
- {
- // we may want to continue getting better bounds
- return false;
- }
-
- if (is_tangent)
- {
- // compute tangent plane
- // Figure 3: T( x )
- // We use zero slope tangent planes, since the concavity of the Taylor
- // approximation cannot be easily established.
- Node tplane = poly_approx_c;
-
- Node lem = nm->mkNode(concavity == 1 ? GEQ : LEQ, tf, tplane);
- std::vector<Node> antec;
- int mdir = regionToMonotonicityDir(k, region);
- for (unsigned i = 0; i < 2; i++)
- {
- // Tangent plane is valid in the interval [c,u) if the slope of the
- // function matches its concavity, and is valid in (l, c] otherwise.
- Node use_bound = (mdir == concavity) == (i == 0) ? c : bounds[i];
- if (!use_bound.isNull())
- {
- Node ant = nm->mkNode(i == 0 ? GEQ : LEQ, tf[0], use_bound);
- antec.push_back(ant);
- }
- }
- if (!antec.empty())
- {
- Node antec_n = antec.size() == 1 ? antec[0] : nm->mkNode(AND, antec);
- lem = nm->mkNode(IMPLIES, antec_n, lem);
- }
- Trace("nl-ext-tftp-debug2")
- << "*** Tangent plane lemma (pre-rewrite): " << lem << std::endl;
- lem = Rewriter::rewrite(lem);
- Trace("nl-ext-tftp-lemma")
- << "*** Tangent plane lemma : " << lem << std::endl;
- Assert(d_model.computeAbstractModelValue(lem) == d_false);
- // Figure 3 : line 9
- d_im.addPendingArithLemma(lem, InferenceId::NL_T_TANGENT, nullptr, true);
- }
- else if (is_secant)
- {
- // bounds are the minimum and maximum previous secant points
- // should not repeat secant points: secant lemmas should suffice to
- // rule out previous assignment
- Assert(std::find(
- d_secant_points[tf][d].begin(), d_secant_points[tf][d].end(), c)
- == d_secant_points[tf][d].end());
- // Insert into the (temporary) vector. We do not update this vector
- // until we are sure this secant plane lemma has been processed. We do
- // this by mapping the lemma to a side effect below.
- std::vector<Node> spoints = d_secant_points[tf][d];
- spoints.push_back(c);
-
- // sort
- SortNlModel smv;
- smv.d_nlm = &d_model;
- smv.d_isConcrete = true;
- std::sort(spoints.begin(), spoints.end(), smv);
- // get the resulting index of c
- unsigned index =
- std::find(spoints.begin(), spoints.end(), c) - spoints.begin();
- // bounds are the next closest upper/lower bound values
- if (index > 0)
- {
- bounds[0] = spoints[index - 1];
- }
- else
- {
- // otherwise, we use the lower boundary point for this concavity
- // region
- if (k == SINE)
- {
- Assert(!bounds[0].isNull());
- }
- else if (k == EXPONENTIAL)
- {
- // pick c-1
- bounds[0] = Rewriter::rewrite(nm->mkNode(MINUS, c, d_one));
- }
- }
- if (index < spoints.size() - 1)
- {
- bounds[1] = spoints[index + 1];
- }
- else
- {
- // otherwise, we use the upper boundary point for this concavity
- // region
- if (k == SINE)
- {
- Assert(!bounds[1].isNull());
- }
- else if (k == EXPONENTIAL)
- {
- // pick c+1
- bounds[1] = Rewriter::rewrite(nm->mkNode(PLUS, c, d_one));
- }
- }
- Trace("nl-ext-tftp-debug2") << "...secant bounds are : " << bounds[0]
- << " ... " << bounds[1] << std::endl;
-
- // the secant plane may be conjunction of 1-2 guarded inequalities
- std::vector<Node> lemmaConj;
- for (unsigned s = 0; s < 2; s++)
- {
- // compute secant plane
- Assert(!poly_approx.isNull());
- Assert(!bounds[s].isNull());
- // take the model value of l or u (since may contain PI)
- Node b = d_model.computeAbstractModelValue(bounds[s]);
- Trace("nl-ext-tftp-debug2") << "...model value of bound " << bounds[s]
- << " is " << b << std::endl;
- Assert(b.isConst());
- if (c != b)
- {
- // Figure 3 : P(l), P(u), for s = 0,1
- Node poly_approx_b;
- std::vector<Node> taylor_subs;
- taylor_subs.push_back(b);
- Assert(taylor_vars.size() == taylor_subs.size());
- poly_approx_b = poly_approx.substitute(taylor_vars.begin(),
- taylor_vars.end(),
- taylor_subs.begin(),
- taylor_subs.end());
- // Figure 3: S_l( x ), S_u( x ) for s = 0,1
- Node splane;
- Node rcoeff_n = Rewriter::rewrite(nm->mkNode(MINUS, b, c));
- Assert(rcoeff_n.isConst());
- Rational rcoeff = rcoeff_n.getConst<Rational>();
- Assert(rcoeff.sgn() != 0);
- poly_approx_b = Rewriter::rewrite(poly_approx_b);
- poly_approx_c = Rewriter::rewrite(poly_approx_c);
- splane = nm->mkNode(
- PLUS,
- poly_approx_b,
- nm->mkNode(MULT,
- nm->mkNode(MINUS, poly_approx_b, poly_approx_c),
- nm->mkConst(Rational(1) / rcoeff),
- nm->mkNode(MINUS, tf[0], b)));
-
- Node lem = nm->mkNode(concavity == 1 ? LEQ : GEQ, tf, splane);
- // With respect to Figure 3, this is slightly different.
- // In particular, we chose b to be the model value of bounds[s],
- // which is a constant although bounds[s] may not be (e.g. if it
- // contains PI).
- // To ensure that c...b does not cross an inflection point,
- // we guard with the symbolic version of bounds[s].
- // This leads to lemmas e.g. of this form:
- // ( c <= x <= PI/2 ) => ( sin(x) < ( P( b ) - P( c ) )*( x -
- // b ) + P( b ) )
- // where b = (PI/2)^M, the current value of PI/2 in the model.
- // This is sound since we are guarded by the symbolic
- // representation of PI/2.
- Node antec_n =
- nm->mkNode(AND,
- nm->mkNode(GEQ, tf[0], s == 0 ? bounds[s] : c),
- nm->mkNode(LEQ, tf[0], s == 0 ? c : bounds[s]));
- lem = nm->mkNode(IMPLIES, antec_n, lem);
- Trace("nl-ext-tftp-debug2")
- << "*** Secant plane lemma (pre-rewrite) : " << lem << std::endl;
- lem = Rewriter::rewrite(lem);
- Trace("nl-ext-tftp-lemma")
- << "*** Secant plane lemma : " << lem << std::endl;
- lemmaConj.push_back(lem);
- Assert(d_model.computeAbstractModelValue(lem) == d_false);
- }
- }
- // Figure 3 : line 22
- Assert(!lemmaConj.empty());
- Node lem =
- lemmaConj.size() == 1 ? lemmaConj[0] : nm->mkNode(AND, lemmaConj);
- NlLemma nlem(lem, LemmaProperty::NONE, nullptr, InferenceId::NL_T_SECANT);
- // The side effect says that if lem is added, then we should add the
- // secant point c for (tf,d).
- nlem.d_secantPoint.push_back(std::make_tuple(tf, d, c));
- d_im.addPendingArithLemma(nlem, true);
- }
- return true;
-}
-
-int TranscendentalSolver::regionToMonotonicityDir(Kind k, int region)
-{
- if (k == EXPONENTIAL)
- {
- if (region == 1)
- {
- return 1;
- }
- }
- else if (k == SINE)
- {
- if (region == 1 || region == 4)
- {
- return -1;
- }
- else if (region == 2 || region == 3)
- {
- return 1;
- }
- }
- return 0;
-}
-
-int TranscendentalSolver::regionToConcavity(Kind k, int region)
-{
- if (k == EXPONENTIAL)
- {
- if (region == 1)
- {
- return 1;
- }
- }
- else if (k == SINE)
- {
- if (region == 1 || region == 2)
- {
- return -1;
- }
- else if (region == 3 || region == 4)
- {
- return 1;
- }
- }
- return 0;
-}
-
-Node TranscendentalSolver::regionToLowerBound(Kind k, int region)
-{
- if (k == SINE)
- {
- if (region == 1)
- {
- return d_pi_2;
- }
- else if (region == 2)
- {
- return d_zero;
- }
- else if (region == 3)
- {
- return d_pi_neg_2;
- }
- else if (region == 4)
- {
- return d_pi_neg;
- }
- }
- return Node::null();
-}
-
-Node TranscendentalSolver::regionToUpperBound(Kind k, int region)
-{
- if (k == SINE)
- {
- if (region == 1)
- {
- return d_pi;
- }
- else if (region == 2)
- {
- return d_pi_2;
- }
- else if (region == 3)
- {
- return d_zero;
- }
- else if (region == 4)
- {
- return d_pi_neg_2;
- }
- }
- return Node::null();
-}
-
-Node TranscendentalSolver::getDerivative(Node n, Node x)
-{
- NodeManager* nm = NodeManager::currentNM();
- Assert(x.isVar());
- // only handle the cases of the taylor expansion of d
- if (n.getKind() == EXPONENTIAL)
- {
- if (n[0] == x)
- {
- return n;
- }
- }
- else if (n.getKind() == SINE)
- {
- if (n[0] == x)
- {
- Node na = nm->mkNode(MINUS, d_pi_2, n[0]);
- Node ret = nm->mkNode(SINE, na);
- ret = Rewriter::rewrite(ret);
- return ret;
- }
- }
- else if (n.getKind() == PLUS)
- {
- std::vector<Node> dchildren;
- for (unsigned i = 0; i < n.getNumChildren(); i++)
- {
- // PLUS is flattened in rewriter, recursion depth is bounded by 1
- Node dc = getDerivative(n[i], x);
- if (dc.isNull())
- {
- return dc;
- }
- else
- {
- dchildren.push_back(dc);
- }
- }
- return nm->mkNode(PLUS, dchildren);
- }
- else if (n.getKind() == MULT)
- {
- Assert(n[0].isConst());
- Node dc = getDerivative(n[1], x);
- if (!dc.isNull())
- {
- return nm->mkNode(MULT, n[0], dc);
- }
- }
- else if (n.getKind() == NONLINEAR_MULT)
- {
- unsigned xcount = 0;
- std::vector<Node> children;
- unsigned xindex = 0;
- for (unsigned i = 0, size = n.getNumChildren(); i < size; i++)
- {
- if (n[i] == x)
- {
- xcount++;
- xindex = i;
- }
- children.push_back(n[i]);
- }
- if (xcount == 0)
- {
- return d_zero;
- }
- else
- {
- children[xindex] = nm->mkConst(Rational(xcount));
- }
- return nm->mkNode(MULT, children);
- }
- else if (n.isVar())
- {
- return n == x ? d_one : d_zero;
- }
- else if (n.isConst())
- {
- return d_zero;
- }
- Trace("nl-ext-debug") << "No derivative computed for " << n;
- Trace("nl-ext-debug") << " for d/d{" << x << "}" << std::endl;
- return Node::null();
-}
-
-std::pair<Node, Node> TranscendentalSolver::getTaylor(Node fa, unsigned n)
-{
- NodeManager* nm = NodeManager::currentNM();
- Assert(n > 0);
- Node fac; // what term we cache for fa
- if (fa[0] == d_zero)
- {
- // optimization : simpler to compute (x-fa[0])^n if we are centered around 0
- fac = fa;
- }
- else
- {
- // otherwise we use a standard factor a in (x-a)^n
- fac = nm->mkNode(fa.getKind(), d_taylor_real_fv_base);
- }
- Node taylor_rem;
- Node taylor_sum;
- // check if we have already computed this Taylor series
- std::unordered_map<unsigned, Node>::iterator itt = d_taylor_sum[fac].find(n);
- if (itt == d_taylor_sum[fac].end())
- {
- Node i_exp_base;
- if (fa[0] == d_zero)
- {
- i_exp_base = d_taylor_real_fv;
- }
- else
- {
- i_exp_base = Rewriter::rewrite(
- nm->mkNode(MINUS, d_taylor_real_fv, d_taylor_real_fv_base));
- }
- Node i_derv = fac;
- Node i_fact = d_one;
- Node i_exp = d_one;
- int i_derv_status = 0;
- unsigned counter = 0;
- std::vector<Node> sum;
- do
- {
- counter++;
- if (fa.getKind() == EXPONENTIAL)
- {
- // unchanged
- }
- else if (fa.getKind() == SINE)
- {
- if (i_derv_status % 2 == 1)
- {
- Node arg = nm->mkNode(PLUS, d_pi_2, d_taylor_real_fv_base);
- i_derv = nm->mkNode(SINE, arg);
- }
- else
- {
- i_derv = fa;
- }
- if (i_derv_status >= 2)
- {
- i_derv = nm->mkNode(MINUS, d_zero, i_derv);
- }
- i_derv = Rewriter::rewrite(i_derv);
- i_derv_status = i_derv_status == 3 ? 0 : i_derv_status + 1;
- }
- if (counter == (n + 1))
- {
- TNode x = d_taylor_real_fv_base;
- i_derv = i_derv.substitute(x, d_taylor_real_fv_base_rem);
- }
- Node curr = nm->mkNode(MULT, nm->mkNode(DIVISION, i_derv, i_fact), i_exp);
- if (counter == (n + 1))
- {
- taylor_rem = curr;
- }
- else
- {
- sum.push_back(curr);
- i_fact = Rewriter::rewrite(
- nm->mkNode(MULT, nm->mkConst(Rational(counter)), i_fact));
- i_exp = Rewriter::rewrite(nm->mkNode(MULT, i_exp_base, i_exp));
- }
- } while (counter <= n);
- taylor_sum = sum.size() == 1 ? sum[0] : nm->mkNode(PLUS, sum);
-
- if (fac[0] != d_taylor_real_fv_base)
- {
- TNode x = d_taylor_real_fv_base;
- taylor_sum = taylor_sum.substitute(x, fac[0]);
- }
-
- // cache
- d_taylor_sum[fac][n] = taylor_sum;
- d_taylor_rem[fac][n] = taylor_rem;
- }
- else
- {
- taylor_sum = itt->second;
- Assert(d_taylor_rem[fac].find(n) != d_taylor_rem[fac].end());
- taylor_rem = d_taylor_rem[fac][n];
- }
-
- // must substitute for the argument if we were using a different lookup
- if (fa[0] != fac[0])
- {
- TNode x = d_taylor_real_fv_base;
- taylor_sum = taylor_sum.substitute(x, fa[0]);
- }
- return std::pair<Node, Node>(taylor_sum, taylor_rem);
-}
-
-void TranscendentalSolver::getPolynomialApproximationBounds(
- Kind k, unsigned d, std::vector<Node>& pbounds)
-{
- if (d_poly_bounds[k][d].empty())
- {
- NodeManager* nm = NodeManager::currentNM();
- Node tft = nm->mkNode(k, d_zero);
- // n is the Taylor degree we are currently considering
- unsigned n = 2 * d;
- // n must be even
- std::pair<Node, Node> taylor = getTaylor(tft, n);
- Trace("nl-ext-tftp-debug2")
- << "Taylor for " << k << " is : " << taylor.first << std::endl;
- Node taylor_sum = Rewriter::rewrite(taylor.first);
- Trace("nl-ext-tftp-debug2")
- << "Taylor for " << k << " is (post-rewrite) : " << taylor_sum
- << std::endl;
- Assert(taylor.second.getKind() == MULT);
- Assert(taylor.second.getNumChildren() == 2);
- Assert(taylor.second[0].getKind() == DIVISION);
- Trace("nl-ext-tftp-debug2")
- << "Taylor remainder for " << k << " is " << taylor.second << std::endl;
- // ru is x^{n+1}/(n+1)!
- Node ru = nm->mkNode(DIVISION, taylor.second[1], taylor.second[0][1]);
- ru = Rewriter::rewrite(ru);
- Trace("nl-ext-tftp-debug2")
- << "Taylor remainder factor is (post-rewrite) : " << ru << std::endl;
- if (k == EXPONENTIAL)
- {
- pbounds.push_back(taylor_sum);
- pbounds.push_back(taylor_sum);
- pbounds.push_back(Rewriter::rewrite(
- nm->mkNode(MULT, taylor_sum, nm->mkNode(PLUS, d_one, ru))));
- pbounds.push_back(Rewriter::rewrite(nm->mkNode(PLUS, taylor_sum, ru)));
- }
- else
- {
- Assert(k == SINE);
- Node l = Rewriter::rewrite(nm->mkNode(MINUS, taylor_sum, ru));
- Node u = Rewriter::rewrite(nm->mkNode(PLUS, taylor_sum, ru));
- pbounds.push_back(l);
- pbounds.push_back(l);
- pbounds.push_back(u);
- pbounds.push_back(u);
- }
- Trace("nl-ext-tf-tplanes")
- << "Polynomial approximation for " << k << " is: " << std::endl;
- Trace("nl-ext-tf-tplanes") << " Lower (pos): " << pbounds[0] << std::endl;
- Trace("nl-ext-tf-tplanes") << " Upper (pos): " << pbounds[2] << std::endl;
- Trace("nl-ext-tf-tplanes") << " Lower (neg): " << pbounds[1] << std::endl;
- Trace("nl-ext-tf-tplanes") << " Upper (neg): " << pbounds[3] << std::endl;
- d_poly_bounds[k][d].insert(
- d_poly_bounds[k][d].end(), pbounds.begin(), pbounds.end());
- }
- else
- {
- pbounds.insert(
- pbounds.end(), d_poly_bounds[k][d].begin(), d_poly_bounds[k][d].end());
- }
-}
-
-void TranscendentalSolver::getPolynomialApproximationBoundForArg(
- Kind k, Node c, unsigned d, std::vector<Node>& pbounds)
-{
- getPolynomialApproximationBounds(k, d, pbounds);
- Assert(c.isConst());
- if (k == EXPONENTIAL && c.getConst<Rational>().sgn() == 1)
- {
- NodeManager* nm = NodeManager::currentNM();
- Node tft = nm->mkNode(k, d_zero);
- bool success = false;
- unsigned ds = d;
- TNode ttrf = d_taylor_real_fv;
- TNode tc = c;
- do
- {
- success = true;
- unsigned n = 2 * ds;
- std::pair<Node, Node> taylor = getTaylor(tft, n);
- // check that 1-c^{n+1}/(n+1)! > 0
- Node ru = nm->mkNode(DIVISION, taylor.second[1], taylor.second[0][1]);
- Node rus = ru.substitute(ttrf, tc);
- rus = Rewriter::rewrite(rus);
- Assert(rus.isConst());
- if (rus.getConst<Rational>() > d_one.getConst<Rational>())
- {
- success = false;
- ds = ds + 1;
- }
- } while (!success);
- if (ds > d)
- {
- Trace("nl-ext-exp-taylor")
- << "*** Increase Taylor bound to " << ds << " > " << d << " for ("
- << k << " " << c << ")" << std::endl;
- // must use sound upper bound
- std::vector<Node> pboundss;
- getPolynomialApproximationBounds(k, ds, pboundss);
- pbounds[2] = pboundss[2];
- }
- }
-}
-
-std::pair<Node, Node> TranscendentalSolver::getTfModelBounds(Node tf,
- unsigned d)
-{
- // compute the model value of the argument
- Node c = d_model.computeAbstractModelValue(tf[0]);
- Assert(c.isConst());
- int csign = c.getConst<Rational>().sgn();
- Kind k = tf.getKind();
- if (csign == 0)
- {
- // at zero, its trivial
- if (k == SINE)
- {
- return std::pair<Node, Node>(d_zero, d_zero);
- }
- Assert(k == EXPONENTIAL);
- return std::pair<Node, Node>(d_one, d_one);
- }
- bool isNeg = csign == -1;
-
- std::vector<Node> pbounds;
- getPolynomialApproximationBoundForArg(k, c, d, pbounds);
-
- std::vector<Node> bounds;
- TNode tfv = d_taylor_real_fv;
- TNode tfs = tf[0];
- for (unsigned d2 = 0; d2 < 2; d2++)
- {
- int index = d2 == 0 ? (isNeg ? 1 : 0) : (isNeg ? 3 : 2);
- Node pab = pbounds[index];
- if (!pab.isNull())
- {
- // { x -> M_A(tf[0]) }
- // Notice that we compute the model value of tfs first, so that
- // the call to rewrite below does not modify the term, where notice that
- // rewrite( x*x { x -> M_A(t) } ) = M_A(t)*M_A(t)
- // is not equal to
- // M_A( x*x { x -> t } ) = M_A( t*t )
- // where M_A denotes the abstract model.
- Node mtfs = d_model.computeAbstractModelValue(tfs);
- pab = pab.substitute(tfv, mtfs);
- pab = Rewriter::rewrite(pab);
- Assert (pab.isConst());
- bounds.push_back(pab);
- }
- else
- {
- bounds.push_back(Node::null());
- }
- }
- return std::pair<Node, Node>(bounds[0], bounds[1]);
-}
-
-Node TranscendentalSolver::mkValidPhase(Node a, Node pi)
-{
- return mkBounded(
- NodeManager::currentNM()->mkNode(MULT, mkRationalNode(-1), pi), a, pi);
-}
-
-} // namespace nl
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
diff --git a/src/theory/arith/nl/transcendental_solver.h b/src/theory/arith/nl/transcendental_solver.h
deleted file mode 100644
index 004221be2..000000000
--- a/src/theory/arith/nl/transcendental_solver.h
+++ /dev/null
@@ -1,423 +0,0 @@
-/********************* */
-/*! \file transcendental_solver.h
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds, Tim King
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
- ** in the top-level source directory and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Solving for handling transcendental functions.
- **/
-
-#ifndef CVC4__THEORY__ARITH__NL__TRANSCENDENTAL_SOLVER_H
-#define CVC4__THEORY__ARITH__NL__TRANSCENDENTAL_SOLVER_H
-
-#include <map>
-#include <unordered_map>
-#include <unordered_set>
-#include <vector>
-
-#include "expr/node.h"
-#include "theory/arith/inference_manager.h"
-#include "theory/arith/nl/nl_model.h"
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-namespace nl {
-
-/** Transcendental solver class
- *
- * This class implements model-based refinement schemes
- * for transcendental functions, described in:
- *
- * - "Satisfiability Modulo Transcendental
- * Functions via Incremental Linearization" by Cimatti
- * et al., CADE 2017.
- *
- * It's main functionality are methods that implement lemma schemas below,
- * which return a set of lemmas that should be sent on the output channel.
- */
-class TranscendentalSolver
-{
- public:
- TranscendentalSolver(InferenceManager& im, NlModel& m);
- ~TranscendentalSolver();
-
- /** init last call
- *
- * This is called at the beginning of last call effort check, where
- * assertions are the set of assertions belonging to arithmetic,
- * false_asserts is the subset of assertions that are false in the current
- * model, and xts is the set of extended function terms that are active in
- * the current context.
- *
- * This call may add lemmas to lems based on registering term
- * information (for example, purification of sine terms).
- */
- void initLastCall(const std::vector<Node>& assertions,
- const std::vector<Node>& false_asserts,
- const std::vector<Node>& xts);
- /** increment taylor degree */
- void incrementTaylorDegree();
- /** get taylor degree */
- unsigned getTaylorDegree() const;
- /** preprocess assertions check model
- *
- * This modifies the given assertions in preparation for running a call
- * to check model.
- *
- * This method returns false if a bound for a transcendental function
- * was conflicting.
- */
- bool preprocessAssertionsCheckModel(std::vector<Node>& assertions);
- /** Process side effects in lemma se */
- void processSideEffect(const NlLemma& se);
- //-------------------------------------------- lemma schemas
- /** check transcendental initial refine
- *
- * Constructs a set of valid theory lemmas, based on
- * simple facts about transcendental functions.
- * This mostly follows the initial axioms described in
- * Section 4 of "Satisfiability
- * Modulo Transcendental Functions via Incremental
- * Linearization" by Cimatti et al., CADE 2017.
- *
- * Examples:
- *
- * sin( x ) = -sin( -x )
- * ( PI > x > 0 ) => 0 < sin( x ) < 1
- * exp( x )>0
- * x<0 => exp( x )<1
- */
- void checkTranscendentalInitialRefine();
-
- /** check transcendental monotonic
- *
- * Constructs a set of valid theory lemmas, based on a
- * lemma scheme that ensures that applications
- * of transcendental functions respect monotonicity.
- *
- * Examples:
- *
- * x > y => exp( x ) > exp( y )
- * PI/2 > x > y > 0 => sin( x ) > sin( y )
- * PI > x > y > PI/2 => sin( x ) < sin( y )
- */
- void checkTranscendentalMonotonic();
-
- /** check transcendental tangent planes
- *
- * Constructs a set of valid theory lemmas, based on
- * computing an "incremental linearization" of
- * transcendental functions based on the model values
- * of transcendental functions and their arguments.
- * It is based on Figure 3 of "Satisfiability
- * Modulo Transcendental Functions via Incremental
- * Linearization" by Cimatti et al., CADE 2017.
- * This schema is not terminating in general.
- * It is not enabled by default, and can
- * be enabled by --nl-ext-tf-tplanes.
- *
- * Example:
- *
- * Assume we have a term sin(y) where M( y ) = 1 where M is the current model.
- * Note that:
- * sin(1) ~= .841471
- *
- * The Taylor series and remainder of sin(y) of degree 7 is
- * P_{7,sin(0)}( x ) = x + (-1/6)*x^3 + (1/20)*x^5
- * R_{7,sin(0),b}( x ) = (-1/5040)*x^7
- *
- * This gives us lower and upper bounds :
- * P_u( x ) = P_{7,sin(0)}( x ) + R_{7,sin(0),b}( x )
- * ...where note P_u( 1 ) = 4243/5040 ~= .841865
- * P_l( x ) = P_{7,sin(0)}( x ) - R_{7,sin(0),b}( x )
- * ...where note P_l( 1 ) = 4241/5040 ~= .841468
- *
- * Assume that M( sin(y) ) > P_u( 1 ).
- * Since the concavity of sine in the region 0 < x < PI/2 is -1,
- * we add a tangent plane refinement.
- * The tangent plane at the point 1 in P_u is
- * given by the formula:
- * T( x ) = P_u( 1 ) + ((d/dx)(P_u(x)))( 1 )*( x - 1 )
- * We add the lemma:
- * ( 0 < y < PI/2 ) => sin( y ) <= T( y )
- * which is:
- * ( 0 < y < PI/2 ) => sin( y ) <= (391/720)*(y - 2737/1506)
- *
- * Assume that M( sin(y) ) < P_u( 1 ).
- * Since the concavity of sine in the region 0 < x < PI/2 is -1,
- * we add a secant plane refinement for some constants ( l, u )
- * such that 0 <= l < M( y ) < u <= PI/2. Assume we choose
- * l = 0 and u = M( PI/2 ) = 150517/47912.
- * The secant planes at point 1 for P_l
- * are given by the formulas:
- * S_l( x ) = (x-l)*(P_l( l )-P_l(c))/(l-1) + P_l( l )
- * S_u( x ) = (x-u)*(P_l( u )-P_l(c))/(u-1) + P_l( u )
- * We add the lemmas:
- * ( 0 < y < 1 ) => sin( y ) >= S_l( y )
- * ( 1 < y < PI/2 ) => sin( y ) >= S_u( y )
- * which are:
- * ( 0 < y < 1 ) => (sin y) >= 4251/5040*y
- * ( 1 < y < PI/2 ) => (sin y) >= c1*(y+c2)
- * where c1, c2 are rationals (for brevity, omitted here)
- * such that c1 ~= .277 and c2 ~= 2.032.
- */
- void checkTranscendentalTangentPlanes();
- private:
- /** check transcendental function refinement for tf
- *
- * This method is called by the above method for each "master"
- * transcendental function application that occurs in an assertion in the
- * current context. For example, an application like sin(t) is not a master
- * if we have introduced the constraints:
- * t=y+2*pi*n ^ -pi <= y <= pi ^ sin(t) = sin(y).
- * See d_trMaster/d_trSlaves for more detail.
- *
- * This runs Figure 3 of Cimatti et al., CADE 2017 for transcendental
- * function application tf for Taylor degree d. It may add a secant or
- * tangent plane lemma to lems, which includes the side effect of the lemma
- * (if one exists).
- *
- * It returns false if the bounds are not precise enough to add a
- * secant or tangent plane lemma.
- */
- bool checkTfTangentPlanesFun(Node tf, unsigned d);
- //-------------------------------------------- end lemma schemas
- /** polynomial approximation bounds
- *
- * This adds P_l+[x], P_l-[x], P_u+[x], P_u-[x] to pbounds, where x is
- * d_taylor_real_fv. These are polynomial approximations of the Taylor series
- * of <k>( 0 ) for degree 2*d where k is SINE or EXPONENTIAL.
- * These correspond to P_l and P_u from Figure 3 of Cimatti et al., CADE 2017,
- * for positive/negative (+/-) values of the argument of <k>( 0 ).
- *
- * Notice that for certain bounds (e.g. upper bounds for exponential), the
- * Taylor approximation for a fixed degree is only sound up to a given
- * upper bound on the argument. To obtain sound lower/upper bounds for a
- * given <k>( c ), use the function below.
- */
- void getPolynomialApproximationBounds(Kind k,
- unsigned d,
- std::vector<Node>& pbounds);
- /** polynomial approximation bounds
- *
- * This computes polynomial approximations P_l+[x], P_l-[x], P_u+[x], P_u-[x]
- * that are sound (lower, upper) bounds for <k>( c ). Notice that these
- * polynomials may depend on c. In particular, for P_u+[x] for <k>( c ) where
- * c>0, we return the P_u+[x] from the function above for the minimum degree
- * d' >= d such that (1-c^{2*d'+1}/(2*d'+1)!) is positive.
- */
- void getPolynomialApproximationBoundForArg(Kind k,
- Node c,
- unsigned d,
- std::vector<Node>& pbounds);
- /** get transcendental function model bounds
- *
- * This returns the current lower and upper bounds of transcendental
- * function application tf based on Taylor of degree 2*d, which is dependent
- * on the model value of its argument.
- */
- std::pair<Node, Node> getTfModelBounds(Node tf, unsigned d);
- /** get monotonicity direction
- *
- * Returns whether the slope is positive (+1) or negative(-1)
- * in region of transcendental function with kind k.
- * Returns 0 if region is invalid.
- */
- int regionToMonotonicityDir(Kind k, int region);
- /** get concavity
- *
- * Returns whether we are concave (+1) or convex (-1)
- * in region of transcendental function with kind k,
- * where region is defined above.
- * Returns 0 if region is invalid.
- */
- int regionToConcavity(Kind k, int region);
- /** region to lower bound
- *
- * Returns the term corresponding to the lower
- * bound of the region of transcendental function
- * with kind k. Returns Node::null if the region
- * is invalid, or there is no lower bound for the
- * region.
- */
- Node regionToLowerBound(Kind k, int region);
- /** region to upper bound
- *
- * Returns the term corresponding to the upper
- * bound of the region of transcendental function
- * with kind k. Returns Node::null if the region
- * is invalid, or there is no upper bound for the
- * region.
- */
- Node regionToUpperBound(Kind k, int region);
- /** get derivative
- *
- * Returns d/dx n. Supports cases of n
- * for transcendental functions applied to x,
- * multiplication, addition, constants and variables.
- * Returns Node::null() if derivative is an
- * unhandled case.
- */
- Node getDerivative(Node n, Node x);
-
- void mkPi();
- void getCurrentPiBounds();
- /** Make the node -pi <= a <= pi */
- static Node mkValidPhase(Node a, Node pi);
-
- /** The inference manager that we push conflicts and lemmas to. */
- InferenceManager& d_im;
- /** Reference to the non-linear model object */
- NlModel& d_model;
- /** commonly used terms */
- Node d_zero;
- Node d_one;
- Node d_neg_one;
- Node d_true;
- Node d_false;
- /**
- * Some transcendental functions f(t) are "purified", e.g. we add
- * t = y ^ f(t) = f(y) where y is a fresh variable. Those that are not
- * purified we call "master terms".
- *
- * The maps below maintain a master/slave relationship over
- * transcendental functions (SINE, EXPONENTIAL, PI), where above
- * f(y) is the master of itself and of f(t).
- *
- * This is used for ensuring that the argument y of SINE we process is on the
- * interval [-pi .. pi], and that exponentials are not applied to arguments
- * that contain transcendental functions.
- */
- std::map<Node, Node> d_trMaster;
- std::map<Node, std::unordered_set<Node, NodeHashFunction>> d_trSlaves;
- /** The transcendental functions we have done initial refinements on */
- std::map<Node, bool> d_tf_initial_refine;
-
- /** concavity region for transcendental functions
- *
- * This stores an integer that identifies an interval in
- * which the current model value for an argument of an
- * application of a transcendental function resides.
- *
- * For exp( x ):
- * region #1 is -infty < x < infty
- * For sin( x ):
- * region #0 is pi < x < infty (this is an invalid region)
- * region #1 is pi/2 < x <= pi
- * region #2 is 0 < x <= pi/2
- * region #3 is -pi/2 < x <= 0
- * region #4 is -pi < x <= -pi/2
- * region #5 is -infty < x <= -pi (this is an invalid region)
- * All regions not listed above, as well as regions 0 and 5
- * for SINE are "invalid". We only process applications
- * of transcendental functions whose arguments have model
- * values that reside in valid regions.
- */
- std::unordered_map<Node, int, NodeHashFunction> d_tf_region;
- /** cache of the above function */
- std::map<Kind, std::map<unsigned, std::vector<Node>>> d_poly_bounds;
-
- /**
- * Maps representives of a congruence class to the members of that class.
- *
- * In detail, a congruence class is a set of terms of the form
- * { f(t1), ..., f(tn) }
- * such that t1 = ... = tn in the current context. We choose an arbitrary
- * term among these to be the repesentative of this congruence class.
- *
- * Moreover, notice we compute congruence classes only over terms that
- * are transcendental function applications that are "master terms",
- * see d_trMaster/d_trSlave.
- */
- std::map<Node, std::vector<Node>> d_funcCongClass;
- /**
- * A list of all functions for each kind in { EXPONENTIAL, SINE, POW, PI }
- * that are representives of their congruence class.
- */
- std::map<Kind, std::vector<Node>> d_funcMap;
-
- // tangent plane bounds
- std::map<Node, std::map<Node, Node>> d_tangent_val_bound[4];
-
- /** secant points (sorted list) for transcendental functions
- *
- * This is used for tangent plane refinements for
- * transcendental functions. This is the set
- * "get-previous-secant-points" in "Satisfiability
- * Modulo Transcendental Functions via Incremental
- * Linearization" by Cimatti et al., CADE 2017, for
- * each transcendental function application. We store this set for each
- * Taylor degree.
- */
- std::unordered_map<Node,
- std::map<unsigned, std::vector<Node>>,
- NodeHashFunction>
- d_secant_points;
-
- /** get Taylor series of degree n for function fa centered around point fa[0].
- *
- * Return value is ( P_{n,f(a)}( x ), R_{n+1,f(a)}( x ) ) where
- * the first part of the pair is the Taylor series expansion :
- * P_{n,f(a)}( x ) = sum_{i=0}^n (f^i( a )/i!)*(x-a)^i
- * and the second part of the pair is the Taylor series remainder :
- * R_{n+1,f(a),b}( x ) = (f^{n+1}( b )/(n+1)!)*(x-a)^{n+1}
- *
- * The above values are cached for each (f,n) for a fixed variable "a".
- * To compute the Taylor series for fa, we compute the Taylor series
- * for ( fa.getKind(), n ) then substitute { a -> fa[0] } if fa[0]!=0.
- * We compute P_{n,f(0)}( x )/R_{n+1,f(0),b}( x ) for ( fa.getKind(), n )
- * if fa[0]=0.
- * In the latter case, note we compute the exponential x^{n+1}
- * instead of (x-a)^{n+1}, which can be done faster.
- */
- std::pair<Node, Node> getTaylor(Node fa, unsigned n);
-
- /** internal variables used for constructing (cached) versions of the Taylor
- * series above.
- */
- Node d_taylor_real_fv; // x above
- Node d_taylor_real_fv_base; // a above
- Node d_taylor_real_fv_base_rem; // b above
-
- /** cache of sum and remainder terms for getTaylor */
- std::unordered_map<Node, std::unordered_map<unsigned, Node>, NodeHashFunction>
- d_taylor_sum;
- std::unordered_map<Node, std::unordered_map<unsigned, Node>, NodeHashFunction>
- d_taylor_rem;
- /** taylor degree
- *
- * Indicates that the degree of the polynomials in the Taylor approximation of
- * all transcendental functions is 2*d_taylor_degree. This value is set
- * initially to options::nlExtTfTaylorDegree() and may be incremented
- * if the option options::nlExtTfIncPrecision() is enabled.
- */
- unsigned d_taylor_degree;
- /** PI
- *
- * Note that PI is a (symbolic, non-constant) nullary operator. This is
- * because its value cannot be computed exactly. We constraint PI to concrete
- * lower and upper bounds stored in d_pi_bound below.
- */
- Node d_pi;
- /** PI/2 */
- Node d_pi_2;
- /** -PI/2 */
- Node d_pi_neg_2;
- /** -PI */
- Node d_pi_neg;
- /** the concrete lower and upper bounds for PI */
- Node d_pi_bound[2];
-}; /* class TranscendentalSolver */
-
-} // namespace nl
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
-
-#endif /* CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H */
diff --git a/src/theory/datatypes/sygus_datatype_utils.cpp b/src/theory/datatypes/sygus_datatype_utils.cpp
index 612722d48..f99f58969 100644
--- a/src/theory/datatypes/sygus_datatype_utils.cpp
+++ b/src/theory/datatypes/sygus_datatype_utils.cpp
@@ -37,12 +37,19 @@ Node applySygusArgs(const DType& dt,
Node n,
const std::vector<Node>& args)
{
+ // optimization: if n is just a sygus bound variable, return immediately
+ // by replacing with the proper argument, or returning unchanged if it is
+ // a bound variable not corresponding to a formal argument.
if (n.getKind() == BOUND_VARIABLE)
{
- Assert(n.hasAttribute(SygusVarNumAttribute()));
- int vn = n.getAttribute(SygusVarNumAttribute());
- Assert(dt.getSygusVarList()[vn] == n);
- return args[vn];
+ if (n.hasAttribute(SygusVarNumAttribute()))
+ {
+ int vn = n.getAttribute(SygusVarNumAttribute());
+ Assert(dt.getSygusVarList()[vn] == n);
+ return args[vn];
+ }
+ // it is a different bound variable, it is unchanged
+ return n;
}
// n is an application of operator op.
// We must compute the free variables in op to determine if there are
@@ -551,8 +558,10 @@ Node sygusToBuiltinEval(Node n, const std::vector<Node>& args)
children.push_back(it->second);
}
index = indexOf(cur.getOperator());
- // apply to arguments
+ // apply to children, which constructs the builtin term
ret = mkSygusTerm(dt, index, children);
+ // now apply it to arguments in args
+ ret = applySygusArgs(dt, dt[index].getSygusOp(), ret, args);
}
visited[cur] = ret;
}
diff --git a/src/theory/datatypes/sygus_datatype_utils.h b/src/theory/datatypes/sygus_datatype_utils.h
index 3f833702c..fabb295eb 100644
--- a/src/theory/datatypes/sygus_datatype_utils.h
+++ b/src/theory/datatypes/sygus_datatype_utils.h
@@ -113,7 +113,7 @@ Node mkSygusTerm(Node op,
const std::vector<Node>& children,
bool doBetaReduction = true);
/**
- * n is a builtin term that is an application of operator op.
+ * n is a builtin term that is a (rewritten) application of operator op.
*
* This returns an n' such that (eval n args) is n', where n' is a instance of
* n for the appropriate substitution.
@@ -122,18 +122,18 @@ Node mkSygusTerm(Node op,
* say we have grammar:
* A -> A+A | A+x | A+(x+y) | y
* These lead to constructors with sygus ops:
- * C1 / (lambda w1 w2. w1+w2)
- * C2 / (lambda w1. w1+x)
- * C3 / (lambda w1. w1+(x+y))
- * C4 / y
+ * C1 / L1 where L1 is (lambda w1 w2. w1+w2)
+ * C2 / L2 where L2 is (lambda w1. w1+x)
+ * C3 / L3 where L3 is (lambda w1. w1+(x+y))
+ * C4 / L4 where L4 is y
* Examples of calling this function:
- * applySygusArgs( dt, C1, (APPLY_UF (lambda w1 w2. w1+w2) t1 t2), { 3, 5 } )
+ * applySygusArgs( dt, L1, (APPLY_UF L1 t1 t2), { 3, 5 } )
* ... returns (APPLY_UF (lambda w1 w2. w1+w2) t1 t2).
- * applySygusArgs( dt, C2, (APPLY_UF (lambda w1. w1+x) t1), { 3, 5 } )
+ * applySygusArgs( dt, L2, (APPLY_UF L2 t1), { 3, 5 } )
* ... returns (APPLY_UF (lambda w1. w1+3) t1).
- * applySygusArgs( dt, C3, (APPLY_UF (lambda w1. w1+(x+y)) t1), { 3, 5 } )
+ * applySygusArgs( dt, L3, (APPLY_UF L3 t1), { 3, 5 } )
* ... returns (APPLY_UF (lambda w1. w1+(3+5)) t1).
- * applySygusArgs( dt, C4, y, { 3, 5 } )
+ * applySygusArgs( dt, L4, L4, { 3, 5 } )
* ... returns 5.
* Notice the attribute SygusVarFreeAttribute is applied to C1, C2, C3, C4,
* to cache the results of whether the evaluation of this constructor needs
@@ -185,7 +185,7 @@ Node builtinVarToSygus(Node v);
* where n' is a variable, then this method returns:
* 12 + (DT_SYGUS_EVAL n' 3 4)
* Notice that the subterm C_*( C_x(), C_y() ) is converted to its builtin
- * equivalent x*y and evaluated under the substition { x -> 3, x -> 4 } giving
+ * equivalent x*y and evaluated under the substition { x -> 3, y -> 4 } giving
* 12. The subterm n' is non-constant and thus we return its evaluation under
* 3,4, giving the term (DT_SYGUS_EVAL n' 3 4). Since the top-level constructor
* is C_+, these terms are added together to give the result.
diff --git a/src/theory/datatypes/theory_datatypes.cpp b/src/theory/datatypes/theory_datatypes.cpp
index 6bfe136ce..74c822215 100644
--- a/src/theory/datatypes/theory_datatypes.cpp
+++ b/src/theory/datatypes/theory_datatypes.cpp
@@ -1551,13 +1551,18 @@ void TheoryDatatypes::instantiate( EqcInfo* eqc, Node n ){
}
eq = tt.eqNode(tt_cons);
// Determine if the equality must be sent out as a lemma. Notice that
- // we can keep new equalities from the instantiate rule internal as long as
- // they are for datatype constructors that have no arguments that have
- // finite external type. Such equalities must be sent because they introduce
- // selector terms that may contribute to conflicts due to cardinality (good
- // examples of this are regress0/datatypes/dt-param-card4-bool-sat.smt2 and
+ // we could potentially keep new equalities from the instantiate rule internal
+ // as long as they are for datatype constructors that have no arguments that
+ // have finite external type, which would correspond to:
+ // forceLemma = dt[index].hasFiniteExternalArgType(ttn);
+ // Such equalities must be sent because they introduce selector terms that
+ // may contribute to conflicts due to cardinality (good examples of this are
+ // regress0/datatypes/dt-param-card4-bool-sat.smt2 and
// regress0/datatypes/list-bool.smt2).
- bool forceLemma = dt[index].hasFiniteExternalArgType(ttn);
+ // For now, to be conservative, we always send lemmas out, since otherwise
+ // we may encounter conflicts during model building when datatypes adds its
+ // equality engine to the theory model.
+ bool forceLemma = true;
Trace("datatypes-infer-debug") << "DtInstantiate : " << eqc << " " << eq
<< " forceLemma = " << forceLemma << std::endl;
d_im.addPendingInference(eq, exp, forceLemma, InferId::INST);
diff --git a/src/theory/quantifiers/sygus/sygus_eval_unfold.cpp b/src/theory/quantifiers/sygus/sygus_eval_unfold.cpp
index 9f3ed4f4f..882c9ca77 100644
--- a/src/theory/quantifiers/sygus/sygus_eval_unfold.cpp
+++ b/src/theory/quantifiers/sygus/sygus_eval_unfold.cpp
@@ -314,8 +314,11 @@ Node SygusEvalUnfold::unfold(Node en,
Node ret = d_tds->mkGeneric(dt, i, pre);
// apply the appropriate substitution to ret
ret = datatypes::utils::applySygusArgs(dt, sop, ret, args);
+ Trace("sygus-eval-unfold-debug")
+ << "Applied sygus args : " << ret << std::endl;
// rewrite
ret = Rewriter::rewrite(ret);
+ Trace("sygus-eval-unfold-debug") << "Rewritten : " << ret << std::endl;
return ret;
}
diff --git a/src/theory/quantifiers/sygus/sygus_repair_const.cpp b/src/theory/quantifiers/sygus/sygus_repair_const.cpp
index 989f7b758..81120da28 100644
--- a/src/theory/quantifiers/sygus/sygus_repair_const.cpp
+++ b/src/theory/quantifiers/sygus/sygus_repair_const.cpp
@@ -15,6 +15,7 @@
#include "theory/quantifiers/sygus/sygus_repair_const.h"
#include "api/cvc4cpp.h"
+#include "expr/node_algorithm.h"
#include "options/base_options.h"
#include "options/quantifiers_options.h"
#include "printer/printer.h"
@@ -200,7 +201,10 @@ bool SygusRepairConst::repairSolution(Node sygusBody,
candidate_skeletons,
sk_vars,
sk_vars_to_subs);
+ Trace("sygus-repair-const-debug")
+ << "...after fit-to-logic : " << fo_body << std::endl;
}
+ Assert(!expr::hasFreeVar(fo_body));
if (fo_body.isNull() || sk_vars.empty())
{
diff --git a/src/theory/smt_engine_subsolver.cpp b/src/theory/smt_engine_subsolver.cpp
index ed10e85ae..ee1ae198f 100644
--- a/src/theory/smt_engine_subsolver.cpp
+++ b/src/theory/smt_engine_subsolver.cpp
@@ -48,7 +48,7 @@ void initializeSubsolver(std::unique_ptr<SmtEngine>& smte,
NodeManager* nm = NodeManager::currentNM();
SmtEngine* smtCurr = smt::currentSmtEngine();
// must copy the options
- smte.reset(new SmtEngine(nm->toExprManager(), &smtCurr->getOptions()));
+ smte.reset(new SmtEngine(nm, &smtCurr->getOptions()));
smte->setIsInternalSubsolver();
smte->setLogic(smtCurr->getLogicInfo());
// set the options
diff --git a/src/theory/strings/regexp_entail.cpp b/src/theory/strings/regexp_entail.cpp
index 0ab634c88..4a6df2fd8 100644
--- a/src/theory/strings/regexp_entail.cpp
+++ b/src/theory/strings/regexp_entail.cpp
@@ -252,9 +252,11 @@ Node RegExpEntail::simpleRegexpConsume(std::vector<Node>& mchildren,
{
if (children_s.empty())
{
- // check if beyond this, we can't do it or there is nothing
- // left, if so, repeat
- bool can_skip = false;
+ // Check if beyond this, we hit a conflict. In this case, we
+ // must repeat. Notice that we do not treat the case where
+ // there are no more strings to consume as a failure, since
+ // we may be within a recursive call, see issue #5510.
+ bool can_skip = true;
if (children.size() > 1)
{
std::vector<Node> mchildren_ss;
@@ -273,9 +275,9 @@ Node RegExpEntail::simpleRegexpConsume(std::vector<Node>& mchildren,
Trace("regexp-ext-rewrite-debug") << push;
Node rets = simpleRegexpConsume(mchildren_ss, children_ss, t);
Trace("regexp-ext-rewrite-debug") << pop;
- if (rets.isNull())
+ if (!rets.isNull())
{
- can_skip = true;
+ can_skip = false;
}
}
if (!can_skip)
diff --git a/src/theory/strings/theory_strings_preprocess.cpp b/src/theory/strings/theory_strings_preprocess.cpp
index 81ec79327..87ab533f4 100644
--- a/src/theory/strings/theory_strings_preprocess.cpp
+++ b/src/theory/strings/theory_strings_preprocess.cpp
@@ -965,7 +965,9 @@ Node StringsPreprocess::simplifyRec(Node t,
Node retNode = t;
if( t.getNumChildren()==0 ){
retNode = simplify(t, asserts);
- }else if( t.getKind()!=kind::FORALL ){
+ }
+ else if (!t.isClosure())
+ {
bool changed = false;
std::vector< Node > cc;
if( t.getMetaKind() == kind::metakind::PARAMETERIZED ){
diff --git a/src/theory/theory_preprocessor.cpp b/src/theory/theory_preprocessor.cpp
index 3b68a33ca..f5e5fa505 100644
--- a/src/theory/theory_preprocessor.cpp
+++ b/src/theory/theory_preprocessor.cpp
@@ -350,30 +350,32 @@ Node TheoryPreprocessor::ppTheoryRewrite(TNode term)
{
return (*find).second;
}
- unsigned nc = term.getNumChildren();
- if (nc == 0)
+ if (term.getNumChildren() == 0)
{
return preprocessWithProof(term);
}
Trace("theory-pp") << "ppTheoryRewrite { " << term << endl;
-
- Node newTerm = term;
+ // We must rewrite before preprocessing, because some terms when rewritten
+ // may introduce new terms that are not top-level and require preprocessing.
+ // An example of this is (forall ((x Int)) (and (tail L) (P x))) which
+ // rewrites to (and (tail L) (forall ((x Int)) (P x))). The subterm (tail L)
+ // must be preprocessed as a child here.
+ Node newTerm = rewriteWithProof(term);
// do not rewrite inside quantifiers
- if (!term.isClosure())
+ if (newTerm.getNumChildren() > 0 && !newTerm.isClosure())
{
- NodeBuilder<> newNode(term.getKind());
- if (term.getMetaKind() == kind::metakind::PARAMETERIZED)
+ NodeBuilder<> newNode(newTerm.getKind());
+ if (newTerm.getMetaKind() == kind::metakind::PARAMETERIZED)
{
- newNode << term.getOperator();
+ newNode << newTerm.getOperator();
}
- unsigned i;
- for (i = 0; i < nc; ++i)
+ for (const Node& nt : newTerm)
{
- newNode << ppTheoryRewrite(term[i]);
+ newNode << ppTheoryRewrite(nt);
}
newTerm = Node(newNode);
+ newTerm = rewriteWithProof(newTerm);
}
- newTerm = rewriteWithProof(newTerm);
newTerm = preprocessWithProof(newTerm);
d_ppCache[term] = newTerm;
Trace("theory-pp") << "ppTheoryRewrite returning " << newTerm << "}" << endl;
diff --git a/src/theory/uf/ho_extension.cpp b/src/theory/uf/ho_extension.cpp
index d7de0a025..66bcbc76f 100644
--- a/src/theory/uf/ho_extension.cpp
+++ b/src/theory/uf/ho_extension.cpp
@@ -36,14 +36,14 @@ HoExtension::HoExtension(TheoryState& state, TheoryInferenceManager& im)
d_true = NodeManager::currentNM()->mkConst(true);
}
-Node HoExtension::expandDefinition(Node node)
+Node HoExtension::ppRewrite(Node node)
{
// convert HO_APPLY to APPLY_UF if fully applied
if (node[0].getType().getNumChildren() == 2)
{
Trace("uf-ho") << "uf-ho : expanding definition : " << node << std::endl;
Node ret = getApplyUfForHoApply(node);
- Trace("uf-ho") << "uf-ho : expandDefinition : " << node << " to " << ret
+ Trace("uf-ho") << "uf-ho : ppRewrite : " << node << " to " << ret
<< std::endl;
return ret;
}
diff --git a/src/theory/uf/ho_extension.h b/src/theory/uf/ho_extension.h
index fa9c5c612..0e82dbda6 100644
--- a/src/theory/uf/ho_extension.h
+++ b/src/theory/uf/ho_extension.h
@@ -54,7 +54,7 @@ class HoExtension
public:
HoExtension(TheoryState& state, TheoryInferenceManager& im);
- /** expand definition
+ /** ppRewrite
*
* This returns the expanded form of node.
*
@@ -63,7 +63,7 @@ class HoExtension
* function variables for function heads that are not variables via the
* getApplyUfForHoApply method below.
*/
- Node expandDefinition(Node node);
+ Node ppRewrite(Node node);
/** check higher order
*
diff --git a/src/theory/uf/theory_uf.cpp b/src/theory/uf/theory_uf.cpp
index 099b56a33..6fdc969a4 100644
--- a/src/theory/uf/theory_uf.cpp
+++ b/src/theory/uf/theory_uf.cpp
@@ -204,21 +204,21 @@ void TheoryUF::notifyFact(TNode atom, bool pol, TNode fact, bool isInternal)
}
//--------------------------------- end standard check
-TrustNode TheoryUF::expandDefinition(Node node)
+TrustNode TheoryUF::ppRewrite(TNode node)
{
- Trace("uf-exp-def") << "TheoryUF::expandDefinition: expanding definition : "
- << node << std::endl;
+ Trace("uf-exp-def") << "TheoryUF::ppRewrite: expanding definition : " << node
+ << std::endl;
if( node.getKind()==kind::HO_APPLY ){
if( !options::ufHo() ){
std::stringstream ss;
ss << "Partial function applications are not supported in default mode, try --uf-ho.";
throw LogicException(ss.str());
}
- Node ret = d_ho->expandDefinition(node);
+ Node ret = d_ho->ppRewrite(node);
if (ret != node)
{
- Trace("uf-exp-def") << "TheoryUF::expandDefinition: higher-order: "
- << node << " to " << ret << std::endl;
+ Trace("uf-exp-def") << "TheoryUF::ppRewrite: higher-order: " << node
+ << " to " << ret << std::endl;
return TrustNode::mkTrustRewrite(node, ret, nullptr);
}
}
diff --git a/src/theory/uf/theory_uf.h b/src/theory/uf/theory_uf.h
index 4a63d9584..3f76919eb 100644
--- a/src/theory/uf/theory_uf.h
+++ b/src/theory/uf/theory_uf.h
@@ -142,7 +142,7 @@ private:
bool collectModelValues(TheoryModel* m,
const std::set<Node>& termSet) override;
- TrustNode expandDefinition(Node node) override;
+ TrustNode ppRewrite(TNode node) override;
void preRegisterTerm(TNode term) override;
TrustNode explain(TNode n) override;
diff --git a/test/regress/CMakeLists.txt b/test/regress/CMakeLists.txt
index af238db18..816202fa9 100644
--- a/test/regress/CMakeLists.txt
+++ b/test/regress/CMakeLists.txt
@@ -622,6 +622,7 @@ set(regress_0_tests
regress0/nl/issue3971.smt2
regress0/nl/issue3991.smt2
regress0/nl/issue4007-rint-uf.smt2
+ regress0/nl/issue5534-no-assertions.smt2
regress0/nl/magnitude-wrong-1020-m.smt2
regress0/nl/mult-po.smt2
regress0/nl/nia-wrong-tl.smt2
@@ -769,7 +770,6 @@ set(regress_0_tests
regress0/quantifiers/issue4086-infs.smt2
regress0/quantifiers/issue4275-qcf-cegqi-rep.smt2
regress0/quantifiers/issue4437-unc-quant.smt2
- regress0/quantifiers/issue4476-ext-rew.smt2
regress0/quantifiers/issue4576.smt2
regress0/quantifiers/lra-triv-gn.smt2
regress0/quantifiers/macros-int-real.smt2
@@ -1283,6 +1283,8 @@ set(regress_1_tests
regress1/arith/div.08.smt2
regress1/arith/div.09.smt2
regress1/arith/issue3952-rew-eq.smt2
+ regress1/arith/issue4985-model-success.smt2
+ regress1/arith/issue4985b-model-success.smt2
regress1/arith/issue789.smt2
regress1/arith/miplib3.cvc
regress1/arith/mod.02.smt2
@@ -1613,6 +1615,7 @@ set(regress_1_tests
regress1/quantifiers/issue5019-cegqi-i.smt2
regress1/quantifiers/issue5279-nqe.smt2
regress1/quantifiers/issue5365-nqe.smt2
+ regress1/quantifiers/issue5378-witness.smt2
regress1/quantifiers/issue5469-aext.smt2
regress1/quantifiers/issue5470-aext.smt2
regress1/quantifiers/issue5471-aext.smt2
@@ -1862,6 +1865,9 @@ set(regress_1_tests
regress1/strings/issue5330.smt2
regress1/strings/issue5330_2.smt2
regress1/strings/issue5374-proxy-i.smt2
+ regress1/strings/issue5483-pp-leq.smt2
+ regress1/strings/issue5510-re-consume.smt2
+ regress1/strings/issue5520-re-consume.smt2
regress1/strings/kaluza-fl.smt2
regress1/strings/loop002.smt2
regress1/strings/loop003.smt2
@@ -2412,6 +2418,8 @@ set(regression_disabled_tests
regress0/ite_arith.smt2
regress0/lemmas/fischer3-mutex-16.smtv1.smt2
regress0/nl/all-logic.smt2
+ # timeout after change to datatypes, impacting sygus-inst
+ regress0/quantifiers/issue4476-ext-rew.smt2
regress0/quantifiers/qbv-multi-lit-uge.smt2
regress0/quantifiers/qbv-test-invert-bvshl-0-neq.smt2
regress0/quantifiers/qbv-test-invert-concat-0-neq.smt2
diff --git a/test/regress/regress0/datatypes/dt-param-2.6-print.smt2 b/test/regress/regress0/datatypes/dt-param-2.6-print.smt2
index 2b706478f..ce92821c1 100644
--- a/test/regress/regress0/datatypes/dt-param-2.6-print.smt2
+++ b/test/regress/regress0/datatypes/dt-param-2.6-print.smt2
@@ -1,6 +1,5 @@
; EXPECT: sat
; EXPECT: (
-; EXPECT: (declare-datatypes ((Pair 2)) ((par (X Y)((mkPair (first X) (second Y))))))
; EXPECT: (define-fun x () (Pair Int Real) ((as mkPair (Pair Int Real)) 2 (/ 3 2)))
; EXPECT: )
diff --git a/test/regress/regress0/nl/issue5534-no-assertions.smt2 b/test/regress/regress0/nl/issue5534-no-assertions.smt2
new file mode 100644
index 000000000..e7a9f0cd1
--- /dev/null
+++ b/test/regress/regress0/nl/issue5534-no-assertions.smt2
@@ -0,0 +1,12 @@
+; COMMAND-LINE: --incremental
+; EXPECT: unsat
+; EXPECT: sat
+(set-logic QF_UFNRA)
+(declare-fun r () Real)
+(declare-fun u (Real Real) Bool)
+(assert (u 0.0 (* r r)))
+(push)
+(assert (and (< r 0.0) (< 0.0 r)))
+(check-sat)
+(pop)
+(check-sat) \ No newline at end of file
diff --git a/test/regress/regress1/arith/issue4985-model-success.smt2 b/test/regress/regress1/arith/issue4985-model-success.smt2
new file mode 100644
index 000000000..794eefb37
--- /dev/null
+++ b/test/regress/regress1/arith/issue4985-model-success.smt2
@@ -0,0 +1,7 @@
+(set-logic QF_AUFNRA)
+(set-info :status sat)
+(declare-const arr0 (Array Real Real))
+(declare-const r5 Real)
+(declare-const r19 Real)
+(assert (! (<= 0.0 0.0 48107.0 (- 6.7954749 0.0 6.7954749 0.0 (select arr0 (/ 40.87941 r5))) r19) :named IP_174))
+(check-sat)
diff --git a/test/regress/regress1/arith/issue4985b-model-success.smt2 b/test/regress/regress1/arith/issue4985b-model-success.smt2
new file mode 100644
index 000000000..eae8d369d
--- /dev/null
+++ b/test/regress/regress1/arith/issue4985b-model-success.smt2
@@ -0,0 +1,6 @@
+(set-logic QF_AUFNRA)
+(set-info :status sat)
+(declare-const a (Array Real Real))
+(declare-const r Real)
+(assert (= 1.0 (select a (/ 2 r))))
+(check-sat)
diff --git a/test/regress/regress1/ho/NUM925^1.p b/test/regress/regress1/ho/NUM925^1.p
index d2977ddb8..e162a63a4 100644
--- a/test/regress/regress1/ho/NUM925^1.p
+++ b/test/regress/regress1/ho/NUM925^1.p
@@ -1,4 +1,4 @@
-% COMMAND-LINE: --uf-ho
+% COMMAND-LINE: --uf-ho --ho-elim
% EXPECT: % SZS status Theorem for NUM925^1
%------------------------------------------------------------------------------
diff --git a/test/regress/regress1/ho/nested_lambdas-AGT034^2.smt2 b/test/regress/regress1/ho/nested_lambdas-AGT034^2.smt2
index edb55d756..313ff68a1 100644
--- a/test/regress/regress1/ho/nested_lambdas-AGT034^2.smt2
+++ b/test/regress/regress1/ho/nested_lambdas-AGT034^2.smt2
@@ -1,4 +1,4 @@
-; COMMAND-LINE: --uf-ho --no-check-unsat-cores --no-produce-models
+; COMMAND-LINE: --uf-ho --no-check-unsat-cores --no-produce-models --ho-elim
; EXPECT: unsat
(set-logic ALL)
diff --git a/test/regress/regress1/quantifiers/issue5378-witness.smt2 b/test/regress/regress1/quantifiers/issue5378-witness.smt2
new file mode 100644
index 000000000..b8628a432
--- /dev/null
+++ b/test/regress/regress1/quantifiers/issue5378-witness.smt2
@@ -0,0 +1,5 @@
+; COMMAND-LINE: --sygus-inst --strings-exp
+; EXPECT: unsat
+(set-logic ALL)
+(assert (forall ((a Int) (b Int)) (or (> a (/ 0 b)) (exists ((c Int)) (< a c b)))))
+(check-sat)
diff --git a/test/regress/regress1/strings/issue5483-pp-leq.smt2 b/test/regress/regress1/strings/issue5483-pp-leq.smt2
new file mode 100644
index 000000000..9e9900b21
--- /dev/null
+++ b/test/regress/regress1/strings/issue5483-pp-leq.smt2
@@ -0,0 +1,9 @@
+; COMMAND-LINE: -i
+; EXPECT: sat
+(set-logic QF_SLIA)
+(declare-fun _substvar_21_ () String)
+(declare-fun _substvar_29_ () String)
+(set-option :strings-lazy-pp false)
+(assert (xor true true true true (str.<= _substvar_21_ _substvar_29_) true true))
+(push 1)
+(check-sat)
diff --git a/test/regress/regress1/strings/issue5510-re-consume.smt2 b/test/regress/regress1/strings/issue5510-re-consume.smt2
new file mode 100644
index 000000000..3b7aaf344
--- /dev/null
+++ b/test/regress/regress1/strings/issue5510-re-consume.smt2
@@ -0,0 +1,9 @@
+; COMMAND-LINE: --strings-exp
+; EXPECT: sat
+(set-logic QF_S)
+(set-info :status sat)
+(declare-fun x () String)
+(declare-fun y () String)
+(assert (str.in_re (str.++ x "B" y) (re.* (re.++ (str.to_re "A") (re.union (re.* (str.to_re "A")) (str.to_re "B"))))))
+(assert (str.in_re y (str.to_re "A")))
+(check-sat)
diff --git a/test/regress/regress1/strings/issue5520-re-consume.smt2 b/test/regress/regress1/strings/issue5520-re-consume.smt2
new file mode 100644
index 000000000..3fc4fe8e5
--- /dev/null
+++ b/test/regress/regress1/strings/issue5520-re-consume.smt2
@@ -0,0 +1,12 @@
+; COMMAND-LINE: --strings-exp
+; EXPECT: sat
+(set-logic QF_S)
+(set-info :status sat)
+(declare-fun x () String)
+(assert
+ (not
+ (str.in_re (str.++ "a" x "ca")
+ (re.*
+ (re.++ (re.union (str.to_re "a") (str.to_re "b"))
+ (re.union (re.* (str.to_re "a")) (str.to_re "b") (str.to_re "c")))))))
+(check-sat)
diff --git a/test/regress/regress2/ho/involved_parsing_ALG248^3.p b/test/regress/regress2/ho/involved_parsing_ALG248^3.p
index 5ad693629..d0577dd1f 100644
--- a/test/regress/regress2/ho/involved_parsing_ALG248^3.p
+++ b/test/regress/regress2/ho/involved_parsing_ALG248^3.p
@@ -1,4 +1,4 @@
-% COMMAND-LINE: --uf-ho
+% COMMAND-LINE: --uf-ho --ho-elim
% EXPECT: % SZS status Theorem for involved_parsing_ALG248^3
%------------------------------------------------------------------------------
diff --git a/test/unit/expr/attribute_white.h b/test/unit/expr/attribute_white.h
index f1b782be2..de1795264 100644
--- a/test/unit/expr/attribute_white.h
+++ b/test/unit/expr/attribute_white.h
@@ -66,7 +66,7 @@ class AttributeWhite : public CxxTest::TestSuite {
{
d_em = new ExprManager();
d_nm = NodeManager::fromExprManager(d_em);
- d_smtEngine = new SmtEngine(d_em);
+ d_smtEngine = new SmtEngine(d_nm);
d_scope = new SmtScope(d_smtEngine);
d_booleanType = new TypeNode(d_nm->booleanType());
}
diff --git a/test/unit/expr/type_node_white.h b/test/unit/expr/type_node_white.h
index 22d3f0d84..8ac17bbbd 100644
--- a/test/unit/expr/type_node_white.h
+++ b/test/unit/expr/type_node_white.h
@@ -42,7 +42,7 @@ class TypeNodeWhite : public CxxTest::TestSuite {
{
d_em = new ExprManager();
d_nm = d_em->getNodeManager();
- d_smt = new SmtEngine(d_em);
+ d_smt = new SmtEngine(d_nm);
d_scope = new NodeManagerScope(d_nm);
}
diff --git a/test/unit/parser/parser_black.h b/test/unit/parser/parser_black.h
index 3b0bbb139..ef8f2e3cf 100644
--- a/test/unit/parser/parser_black.h
+++ b/test/unit/parser/parser_black.h
@@ -259,8 +259,6 @@ public:
tryGoodInput("a : INT = 5; a: INT;"); // decl after define, compatible
tryGoodInput("a : TYPE; a : INT;"); // ok, sort and variable symbol spaces distinct
tryGoodInput("a : TYPE; a : INT; b : a;"); // ok except a is both INT and sort `a'
- //tryGoodInput("a : [0..0]; b : [-5..5]; c : [-1..1]; d : [ _ .._];"); // subranges
- tryGoodInput("a : [ _..1]; b : [_.. 0]; c :[_..-1];");
tryGoodInput("DATATYPE list = nil | cons(car:INT,cdr:list) END; DATATYPE cons = null END;");
tryGoodInput("DATATYPE tree = node(data:list), list = cons(car:tree,cdr:list) | nil END;");
//tryGoodInput("DATATYPE tree = node(data:[list,list,ARRAY tree OF list]), list = cons(car:ARRAY list OF tree,cdr:BITVECTOR(32)) END;");
diff --git a/test/unit/preprocessing/pass_bv_gauss_white.h b/test/unit/preprocessing/pass_bv_gauss_white.h
index 0ff00a7d5..a0a708c5b 100644
--- a/test/unit/preprocessing/pass_bv_gauss_white.h
+++ b/test/unit/preprocessing/pass_bv_gauss_white.h
@@ -174,7 +174,7 @@ class TheoryBVGaussWhite : public CxxTest::TestSuite
{
d_em = new ExprManager();
d_nm = NodeManager::fromExprManager(d_em);
- d_smt = new SmtEngine(d_em);
+ d_smt = new SmtEngine(d_nm);
d_scope = new SmtScope(d_smt);
d_smt->finishInit();
diff --git a/test/unit/prop/cnf_stream_white.h b/test/unit/prop/cnf_stream_white.h
index ae79e1517..495097a79 100644
--- a/test/unit/prop/cnf_stream_white.h
+++ b/test/unit/prop/cnf_stream_white.h
@@ -131,9 +131,9 @@ class CnfStreamWhite : public CxxTest::TestSuite {
void setUp() override
{
d_exprManager = new ExprManager();
- d_smt = new SmtEngine(d_exprManager);
- d_smt->d_logic.lock();
d_nodeManager = NodeManager::fromExprManager(d_exprManager);
+ d_smt = new SmtEngine(d_nodeManager);
+ d_smt->d_logic.lock();
d_scope = new SmtScope(d_smt);
// Notice that this unit test uses the theory engine of a created SMT
diff --git a/test/unit/theory/evaluator_white.h b/test/unit/theory/evaluator_white.h
index 421987e3c..1f33d0fbf 100644
--- a/test/unit/theory/evaluator_white.h
+++ b/test/unit/theory/evaluator_white.h
@@ -50,7 +50,7 @@ class TheoryEvaluatorWhite : public CxxTest::TestSuite
opts.setOutputLanguage(language::output::LANG_SMTLIB_V2);
d_em = new ExprManager;
d_nm = NodeManager::fromExprManager(d_em);
- d_smt = new SmtEngine(d_em, &opts);
+ d_smt = new SmtEngine(d_nm, &opts);
d_scope = new SmtScope(d_smt);
d_smt->finishInit();
}
diff --git a/test/unit/theory/sequences_rewriter_white.h b/test/unit/theory/sequences_rewriter_white.h
index 1215b4a31..09cb925a3 100644
--- a/test/unit/theory/sequences_rewriter_white.h
+++ b/test/unit/theory/sequences_rewriter_white.h
@@ -47,12 +47,11 @@ class SequencesRewriterWhite : public CxxTest::TestSuite
Options opts;
opts.setOutputLanguage(language::output::LANG_SMTLIB_V2);
d_em = new ExprManager;
- d_smt = new SmtEngine(d_em, &opts);
+ d_nm = NodeManager::fromExprManager(d_em);
+ d_smt = new SmtEngine(d_nm, &opts);
d_scope = new SmtScope(d_smt);
d_smt->finishInit();
d_rewriter = new ExtendedRewriter(true);
-
- d_nm = NodeManager::currentNM();
}
void tearDown() override
diff --git a/test/unit/theory/theory_arith_white.h b/test/unit/theory/theory_arith_white.h
index f0b3f18fe..3475b1dc8 100644
--- a/test/unit/theory/theory_arith_white.h
+++ b/test/unit/theory/theory_arith_white.h
@@ -101,7 +101,7 @@ public:
{
d_em = new ExprManager();
d_nm = NodeManager::fromExprManager(d_em);
- d_smt = new SmtEngine(d_em);
+ d_smt = new SmtEngine(d_nm);
d_smt->setOption("incremental", CVC4::SExpr(false));
d_ctxt = d_smt->getContext();
d_uctxt = d_smt->getUserContext();
diff --git a/test/unit/theory/theory_bags_normal_form_white.h b/test/unit/theory/theory_bags_normal_form_white.h
index 4e19ba90e..72fc4284e 100644
--- a/test/unit/theory/theory_bags_normal_form_white.h
+++ b/test/unit/theory/theory_bags_normal_form_white.h
@@ -35,8 +35,8 @@ class BagsNormalFormWhite : public CxxTest::TestSuite
void setUp() override
{
d_em.reset(new ExprManager());
- d_smt.reset(new SmtEngine(d_em.get()));
d_nm.reset(NodeManager::fromExprManager(d_em.get()));
+ d_smt.reset(new SmtEngine(d_nm.get()));
d_smt->finishInit();
d_rewriter.reset(new BagsRewriter(nullptr));
}
diff --git a/test/unit/theory/theory_bags_rewriter_white.h b/test/unit/theory/theory_bags_rewriter_white.h
index e47e32784..a58e2cdad 100644
--- a/test/unit/theory/theory_bags_rewriter_white.h
+++ b/test/unit/theory/theory_bags_rewriter_white.h
@@ -34,8 +34,8 @@ class BagsTypeRuleWhite : public CxxTest::TestSuite
void setUp() override
{
d_em.reset(new ExprManager());
- d_smt.reset(new SmtEngine(d_em.get()));
d_nm.reset(NodeManager::fromExprManager(d_em.get()));
+ d_smt.reset(new SmtEngine(d_nm.get()));
d_smt->finishInit();
d_rewriter.reset(new BagsRewriter(nullptr));
}
diff --git a/test/unit/theory/theory_bags_type_rules_white.h b/test/unit/theory/theory_bags_type_rules_white.h
index e454dfa28..8d1db858d 100644
--- a/test/unit/theory/theory_bags_type_rules_white.h
+++ b/test/unit/theory/theory_bags_type_rules_white.h
@@ -34,8 +34,8 @@ class BagsTypeRuleWhite : public CxxTest::TestSuite
void setUp() override
{
d_em.reset(new ExprManager());
- d_smt.reset(new SmtEngine(d_em.get()));
d_nm.reset(NodeManager::fromExprManager(d_em.get()));
+ d_smt.reset(new SmtEngine(d_nm.get()));
d_smt->finishInit();
}
diff --git a/test/unit/theory/theory_bv_rewriter_white.h b/test/unit/theory/theory_bv_rewriter_white.h
index fae90918a..738a51831 100644
--- a/test/unit/theory/theory_bv_rewriter_white.h
+++ b/test/unit/theory/theory_bv_rewriter_white.h
@@ -41,11 +41,10 @@ class TheoryBvRewriterWhite : public CxxTest::TestSuite
Options opts;
opts.setOutputLanguage(language::output::LANG_SMTLIB_V2);
d_em = new ExprManager;
- d_smt = new SmtEngine(d_em, &opts);
+ d_nm = NodeManager::fromExprManager(d_em);
+ d_smt = new SmtEngine(d_nm, &opts);
d_scope = new SmtScope(d_smt);
d_smt->finishInit();
-
- d_nm = NodeManager::currentNM();
}
void tearDown() override
diff --git a/test/unit/theory/theory_bv_white.h b/test/unit/theory/theory_bv_white.h
index bfde7866e..f568dc779 100644
--- a/test/unit/theory/theory_bv_white.h
+++ b/test/unit/theory/theory_bv_white.h
@@ -54,7 +54,7 @@ public:
{
d_em = new ExprManager();
d_nm = NodeManager::fromExprManager(d_em);
- d_smt = new SmtEngine(d_em);
+ d_smt = new SmtEngine(d_nm);
d_scope = new SmtScope(d_smt);
}
diff --git a/test/unit/theory/theory_engine_white.h b/test/unit/theory/theory_engine_white.h
index 8a99946e5..97da7eb8b 100644
--- a/test/unit/theory/theory_engine_white.h
+++ b/test/unit/theory/theory_engine_white.h
@@ -192,8 +192,8 @@ public:
void setUp() override {
d_em = new ExprManager();
- d_smt = new SmtEngine(d_em);
d_nm = NodeManager::fromExprManager(d_em);
+ d_smt = new SmtEngine(d_nm);
d_scope = new SmtScope(d_smt);
d_ctxt = d_smt->getContext();
diff --git a/test/unit/theory/theory_quantifiers_bv_instantiator_white.h b/test/unit/theory/theory_quantifiers_bv_instantiator_white.h
index f2c5f0e1d..e19459f15 100644
--- a/test/unit/theory/theory_quantifiers_bv_instantiator_white.h
+++ b/test/unit/theory/theory_quantifiers_bv_instantiator_white.h
@@ -63,7 +63,7 @@ void BvInstantiatorWhite::setUp()
{
d_em = new ExprManager();
d_nm = NodeManager::fromExprManager(d_em);
- d_smt = new SmtEngine(d_em);
+ d_smt = new SmtEngine(d_nm);
d_scope = new SmtScope(d_smt);
d_smt->finishInit();
}
diff --git a/test/unit/theory/theory_quantifiers_bv_inverter_white.h b/test/unit/theory/theory_quantifiers_bv_inverter_white.h
index bb44656bc..ac25f60a9 100644
--- a/test/unit/theory/theory_quantifiers_bv_inverter_white.h
+++ b/test/unit/theory/theory_quantifiers_bv_inverter_white.h
@@ -222,7 +222,7 @@ class TheoryQuantifiersBvInverter : public CxxTest::TestSuite
{
d_em = new ExprManager();
d_nm = NodeManager::fromExprManager(d_em);
- d_smt = new SmtEngine(d_em);
+ d_smt = new SmtEngine(d_nm);
d_smt->setOption("cegqi-full", CVC4::SExpr(true));
d_smt->setOption("produce-models", CVC4::SExpr(true));
d_scope = new SmtScope(d_smt);
diff --git a/test/unit/theory/theory_sets_type_enumerator_white.h b/test/unit/theory/theory_sets_type_enumerator_white.h
index 965e59267..1b5ba1dcb 100644
--- a/test/unit/theory/theory_sets_type_enumerator_white.h
+++ b/test/unit/theory/theory_sets_type_enumerator_white.h
@@ -36,8 +36,8 @@ class SetEnumeratorWhite : public CxxTest::TestSuite
void setUp() override
{
d_em = new ExprManager();
- d_smt = new SmtEngine(d_em);
d_nm = NodeManager::fromExprManager(d_em);
+ d_smt = new SmtEngine(d_nm);
d_scope = new SmtScope(d_smt);
d_smt->finishInit();
}
diff --git a/test/unit/theory/theory_sets_type_rules_white.h b/test/unit/theory/theory_sets_type_rules_white.h
index 66c8dc123..00b012745 100644
--- a/test/unit/theory/theory_sets_type_rules_white.h
+++ b/test/unit/theory/theory_sets_type_rules_white.h
@@ -28,8 +28,8 @@ class SetsTypeRuleWhite : public CxxTest::TestSuite
{
d_slv.reset(new Solver());
d_em.reset(new ExprManager());
- d_smt.reset(new SmtEngine(d_em.get()));
d_nm.reset(NodeManager::fromExprManager(d_em.get()));
+ d_smt.reset(new SmtEngine(d_nm.get()));
d_smt->finishInit();
}
diff --git a/test/unit/theory/theory_strings_skolem_cache_black.h b/test/unit/theory/theory_strings_skolem_cache_black.h
index 4135adb0c..dd70dd5b9 100644
--- a/test/unit/theory/theory_strings_skolem_cache_black.h
+++ b/test/unit/theory/theory_strings_skolem_cache_black.h
@@ -35,13 +35,13 @@ class TheoryStringsSkolemCacheBlack : public CxxTest::TestSuite
void setUp() override
{
d_em.reset(new ExprManager());
- d_smt.reset(new SmtEngine(d_em.get()));
+ d_nm = NodeManager::fromExprManager(d_em.get());
+ d_smt.reset(new SmtEngine(d_nm));
d_scope.reset(new SmtScope(d_smt.get()));
// Ensure that the SMT engine is fully initialized (required for the
// rewriter)
d_smt->push();
- d_nm = NodeManager::fromExprManager(d_em.get());
}
void tearDown() override {}
diff --git a/test/unit/theory/theory_strings_word_white.h b/test/unit/theory/theory_strings_word_white.h
index 69143a69c..ce432d385 100644
--- a/test/unit/theory/theory_strings_word_white.h
+++ b/test/unit/theory/theory_strings_word_white.h
@@ -38,10 +38,10 @@ class TheoryStringsWordWhite : public CxxTest::TestSuite
Options opts;
opts.setOutputLanguage(language::output::LANG_SMTLIB_V2);
d_em = new ExprManager;
- d_smt = new SmtEngine(d_em, &opts);
+ d_nm = NodeManager::fromExprManager(d_em);
+ d_smt = new SmtEngine(d_nm, &opts);
d_scope = new SmtScope(d_smt);
- d_nm = NodeManager::currentNM();
}
void tearDown() override
diff --git a/test/unit/theory/theory_white.h b/test/unit/theory/theory_white.h
index 552aeb3c6..f69830fd8 100644
--- a/test/unit/theory/theory_white.h
+++ b/test/unit/theory/theory_white.h
@@ -175,7 +175,7 @@ class TheoryBlack : public CxxTest::TestSuite {
{
d_em = new ExprManager();
d_nm = NodeManager::fromExprManager(d_em);
- d_smt = new SmtEngine(d_em);
+ d_smt = new SmtEngine(d_nm);
d_ctxt = d_smt->getContext();
d_uctxt = d_smt->getUserContext();
d_scope = new SmtScope(d_smt);
diff --git a/test/unit/theory/type_enumerator_white.h b/test/unit/theory/type_enumerator_white.h
index ea5a38913..24bf9922e 100644
--- a/test/unit/theory/type_enumerator_white.h
+++ b/test/unit/theory/type_enumerator_white.h
@@ -42,8 +42,8 @@ class TypeEnumeratorWhite : public CxxTest::TestSuite {
void setUp() override
{
d_em = new ExprManager();
- d_smt = new SmtEngine(d_em);
d_nm = NodeManager::fromExprManager(d_em);
+ d_smt = new SmtEngine(d_nm);
d_scope = new SmtScope(d_smt);
d_smt->finishInit();
}
generated by cgit on debian on lair
contact matthew@masot.net with questions or feedback