summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/CMakeLists.txt3
-rw-r--r--src/theory/arith/arith_utilities.cpp6
-rw-r--r--src/theory/arith/arith_utilities.h3
-rw-r--r--src/theory/arith/nl_lemma_utils.cpp63
-rw-r--r--src/theory/arith/nl_lemma_utils.h52
-rw-r--r--src/theory/arith/nonlinear_extension.cpp1506
-rw-r--r--src/theory/arith/nonlinear_extension.h333
-rw-r--r--src/theory/arith/transcendental_solver.cpp1475
-rw-r--r--src/theory/arith/transcendental_solver.h419
9 files changed, 2051 insertions, 1809 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 809b00b04..760c3fba7 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -293,6 +293,7 @@ libcvc4_add_sources(
theory/arith/linear_equality.h
theory/arith/matrix.cpp
theory/arith/matrix.h
+ theory/arith/nl_lemma_utils.cpp
theory/arith/nl_lemma_utils.h
theory/arith/nl_model.cpp
theory/arith/nl_model.h
@@ -318,6 +319,8 @@ libcvc4_add_sources(
theory/arith/theory_arith_private.h
theory/arith/theory_arith_private_forward.h
theory/arith/theory_arith_type_rules.h
+ theory/arith/transcendental_solver.cpp
+ theory/arith/transcendental_solver.h
theory/arith/type_enumerator.h
theory/arrays/array_info.cpp
theory/arrays/array_info.h
diff --git a/src/theory/arith/arith_utilities.cpp b/src/theory/arith/arith_utilities.cpp
index cbb27197f..cb8524a58 100644
--- a/src/theory/arith/arith_utilities.cpp
+++ b/src/theory/arith/arith_utilities.cpp
@@ -272,6 +272,12 @@ Node arithSubstitute(Node n, std::vector<Node>& vars, std::vector<Node>& subs)
return visited[n];
}
+Node mkBounded(Node l, Node a, Node u)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ return nm->mkNode(AND, nm->mkNode(GEQ, a, l), nm->mkNode(LEQ, a, u));
+}
+
} // namespace arith
} // namespace theory
} // namespace CVC4
diff --git a/src/theory/arith/arith_utilities.h b/src/theory/arith/arith_utilities.h
index f87a908b4..2d466f52f 100644
--- a/src/theory/arith/arith_utilities.h
+++ b/src/theory/arith/arith_utilities.h
@@ -335,6 +335,9 @@ void printRationalApprox(const char* c, Node cr, unsigned prec = 5);
*/
Node arithSubstitute(Node n, std::vector<Node>& vars, std::vector<Node>& subs);
+/** Make the node u >= a ^ a >= l */
+Node mkBounded(Node l, Node a, Node u);
+
}/* CVC4::theory::arith namespace */
}/* CVC4::theory namespace */
}/* CVC4 namespace */
diff --git a/src/theory/arith/nl_lemma_utils.cpp b/src/theory/arith/nl_lemma_utils.cpp
new file mode 100644
index 000000000..e43a77b06
--- /dev/null
+++ b/src/theory/arith/nl_lemma_utils.cpp
@@ -0,0 +1,63 @@
+/********************* */
+/*! \file nl_lemma_utils.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of utilities for the non-linear solver
+ **/
+
+#include "theory/arith/nl_lemma_utils.h"
+
+#include "theory/arith/nl_model.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+
+bool SortNlModel::operator()(Node i, Node j)
+{
+ int cv = d_nlm->compare(i, j, d_isConcrete, d_isAbsolute);
+ if (cv == 0)
+ {
+ return i < j;
+ }
+ return d_reverse_order ? cv < 0 : cv > 0;
+}
+
+bool SortNonlinearDegree::operator()(Node i, Node j)
+{
+ unsigned i_count = getDegree(i);
+ unsigned j_count = getDegree(j);
+ return i_count == j_count ? (i < j) : (i_count < j_count ? true : false);
+}
+
+unsigned SortNonlinearDegree::getDegree(Node n) const
+{
+ std::map<Node, unsigned>::const_iterator it = d_mdegree.find(n);
+ Assert(it != d_mdegree.end());
+ return it->second;
+}
+
+Node ArgTrie::add(Node d, const std::vector<Node>& args)
+{
+ ArgTrie* at = this;
+ for (const Node& a : args)
+ {
+ at = &(at->d_children[a]);
+ }
+ if (at->d_data.isNull())
+ {
+ at->d_data = d;
+ }
+ return at->d_data;
+}
+
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
diff --git a/src/theory/arith/nl_lemma_utils.h b/src/theory/arith/nl_lemma_utils.h
index 74886a1fb..9ad9f2ca5 100644
--- a/src/theory/arith/nl_lemma_utils.h
+++ b/src/theory/arith/nl_lemma_utils.h
@@ -23,6 +23,8 @@ namespace CVC4 {
namespace theory {
namespace arith {
+class NlModel;
+
/**
* A side effect of adding a lemma in the non-linear solver. This is used
* to specify how the state of the non-linear solver should update. This
@@ -46,6 +48,56 @@ struct NlLemmaSideEffect
std::vector<std::tuple<Node, unsigned, Node> > d_secantPoint;
};
+struct SortNlModel
+{
+ SortNlModel()
+ : d_nlm(nullptr),
+ d_isConcrete(true),
+ d_isAbsolute(false),
+ d_reverse_order(false)
+ {
+ }
+ /** pointer to the model */
+ NlModel* d_nlm;
+ /** are we comparing concrete model values? */
+ bool d_isConcrete;
+ /** are we comparing absolute values? */
+ bool d_isAbsolute;
+ /** are we in reverse order? */
+ bool d_reverse_order;
+ /** the comparison */
+ bool operator()(Node i, Node j);
+};
+
+struct SortNonlinearDegree
+{
+ SortNonlinearDegree(std::map<Node, unsigned>& m) : d_mdegree(m) {}
+ /** pointer to the non-linear extension */
+ std::map<Node, unsigned>& d_mdegree;
+ /** Get the degree of n in d_mdegree */
+ unsigned getDegree(Node n) const;
+ /**
+ * Sorts by degree of the monomials, where lower degree monomials come
+ * first.
+ */
+ bool operator()(Node i, Node j);
+};
+
+/** An argument trie, for computing congruent terms */
+class ArgTrie
+{
+ public:
+ /** children of this node */
+ std::map<Node, ArgTrie> d_children;
+ /** the data of this node */
+ Node d_data;
+ /**
+ * Set d as the data on the node whose path is [args], return either d if
+ * that node has no data, or the data that already occurs there.
+ */
+ Node add(Node d, const std::vector<Node>& args);
+};
+
} // namespace arith
} // namespace theory
} // namespace CVC4
diff --git a/src/theory/arith/nonlinear_extension.cpp b/src/theory/arith/nonlinear_extension.cpp
index f11d55855..61a8b18b0 100644
--- a/src/theory/arith/nonlinear_extension.cpp
+++ b/src/theory/arith/nonlinear_extension.cpp
@@ -113,49 +113,6 @@ void debugPrintBound(const char* c, Node coeff, Node x, Kind type, Node rhs) {
Trace(c) << t << " " << type << " " << rhs;
}
-struct SortNlModel
-{
- SortNlModel()
- : d_nlm(nullptr),
- d_isConcrete(true),
- d_isAbsolute(false),
- d_reverse_order(false)
- {
- }
- /** pointer to the model */
- NlModel* d_nlm;
- /** are we comparing concrete model values? */
- bool d_isConcrete;
- /** are we comparing absolute values? */
- bool d_isAbsolute;
- /** are we in reverse order? */
- bool d_reverse_order;
- /** the comparison */
- bool operator()(Node i, Node j) {
- int cv = d_nlm->compare(i, j, d_isConcrete, d_isAbsolute);
- if (cv == 0) {
- return i < j;
- }
- return d_reverse_order ? cv < 0 : cv > 0;
- }
-};
-struct SortNonlinearDegree
-{
- SortNonlinearDegree(NodeMultiset& m) : d_mdegree(m) {}
- /** pointer to the non-linear extension */
- NodeMultiset& d_mdegree;
- /**
- * Sorts by degree of the monomials, where lower degree monomials come
- * first.
- */
- bool operator()(Node i, Node j)
- {
- unsigned i_count = getCount(d_mdegree, i);
- unsigned j_count = getCount(d_mdegree, j);
- return i_count == j_count ? (i < j) : (i_count < j_count ? true : false);
- }
-};
-
bool hasNewMonomials(Node n, const std::vector<Node>& existing) {
std::set<Node> visited;
@@ -189,6 +146,7 @@ NonlinearExtension::NonlinearExtension(TheoryArith& containing,
d_ee(ee),
d_needsLastCall(false),
d_model(containing.getSatContext()),
+ d_trSlv(d_model),
d_builtModel(containing.getSatContext(), false)
{
d_true = NodeManager::currentNM()->mkConst(true);
@@ -200,13 +158,6 @@ NonlinearExtension::NonlinearExtension(TheoryArith& containing,
d_order_points.push_back(d_neg_one);
d_order_points.push_back(d_zero);
d_order_points.push_back(d_one);
- d_taylor_real_fv = NodeManager::currentNM()->mkBoundVar(
- "x", NodeManager::currentNM()->realType());
- d_taylor_real_fv_base = NodeManager::currentNM()->mkBoundVar(
- "a", NodeManager::currentNM()->realType());
- d_taylor_real_fv_base_rem = NodeManager::currentNM()->mkBoundVar(
- "b", NodeManager::currentNM()->realType());
- d_taylor_degree = options::nlExtTfTaylorDegree();
}
NonlinearExtension::~NonlinearExtension() {}
@@ -509,13 +460,6 @@ Node NonlinearExtension::mkValidPhase(Node a, Node pi) {
NodeManager::currentNM()->mkNode(MULT, mkRationalNode(-1), pi), a, pi);
}
-Node NonlinearExtension::mkBounded( Node l, Node a, Node u ) {
- return NodeManager::currentNM()->mkNode(
- AND,
- NodeManager::currentNM()->mkNode(GEQ, a, l),
- NodeManager::currentNM()->mkNode(LEQ, a, u));
-}
-
Node NonlinearExtension::mkMonomialRemFactor(
Node n, const NodeMultiset& n_exp_rem) const {
std::vector<Node> children;
@@ -566,13 +510,7 @@ void NonlinearExtension::sendLemmas(const std::vector<Node>& out,
void NonlinearExtension::processSideEffect(const NlLemmaSideEffect& se)
{
- for (const std::tuple<Node, unsigned, Node>& sp : se.d_secantPoint)
- {
- Node tf = std::get<0>(sp);
- unsigned d = std::get<1>(sp);
- Node c = std::get<2>(sp);
- d_secant_points[tf][d].push_back(c);
- }
+ d_trSlv.processSideEffect(se);
}
unsigned NonlinearExtension::filterLemma(Node lem, std::vector<Node>& out)
@@ -779,90 +717,18 @@ bool NonlinearExtension::checkModel(const std::vector<Node>& assertions,
// get the presubstitution
Trace("nl-ext-cm-debug") << " apply pre-substitution..." << std::endl;
- std::vector<Node> pvars;
- std::vector<Node> psubs;
- for (std::pair<const Node, Node>& tb : d_trMaster)
- {
- pvars.push_back(tb.first);
- psubs.push_back(tb.second);
- }
- // initialize representation of assertions
- std::vector<Node> passertions;
- for (const Node& a : assertions)
- {
- Node pa = a;
- if (!pvars.empty())
- {
- pa = arithSubstitute(pa, pvars, psubs);
- pa = Rewriter::rewrite(pa);
- }
- if (!pa.isConst() || !pa.getConst<bool>())
- {
- Trace("nl-ext-cm-assert") << "- assert : " << pa << std::endl;
- passertions.push_back(pa);
- }
- }
+ std::vector<Node> passertions = assertions;
- // get model bounds for all transcendental functions
- Trace("nl-ext-cm") << "----- Get bounds for transcendental functions..."
- << std::endl;
- for (std::pair<const Kind, std::vector<Node> >& tfs : d_funcMap)
+ // preprocess the assertions with the trancendental solver
+ if (!d_trSlv.preprocessAssertionsCheckModel(passertions))
{
- Kind k = tfs.first;
- for (const Node& tf : tfs.second)
- {
- Trace("nl-ext-cm") << "- Term: " << tf << std::endl;
- bool success = true;
- // tf is Figure 3 : tf( x )
- Node bl;
- Node bu;
- if (k == PI)
- {
- bl = d_pi_bound[0];
- bu = d_pi_bound[1];
- }
- else
- {
- std::pair<Node, Node> bounds = getTfModelBounds(tf, d_taylor_degree);
- bl = bounds.first;
- bu = bounds.second;
- if (bl != bu)
- {
- d_model.setUsedApproximate();
- }
- }
- if (!bl.isNull() && !bu.isNull())
- {
- // for each function in the congruence classe
- for (const Node& ctf : d_funcCongClass[tf])
- {
- // each term in congruence classes should be master terms
- Assert(d_trSlaves.find(ctf) != d_trSlaves.end());
- // we set the bounds for each slave of tf
- for (const Node& stf : d_trSlaves[ctf])
- {
- Trace("nl-ext-cm") << "...bound for " << stf << " : [" << bl << ", "
- << bu << "]" << std::endl;
- success = d_model.addCheckModelBound(stf, bl, bu);
- }
- }
- }
- else
- {
- Trace("nl-ext-cm") << "...no bound for " << tf << std::endl;
- }
- if (!success)
- {
- // a bound was conflicting
- Trace("nl-ext-cm") << "...failed to set bound for " << tf << std::endl;
- Trace("nl-ext-cm") << "-----" << std::endl;
- return false;
- }
- }
+ return false;
}
+
Trace("nl-ext-cm") << "-----" << std::endl;
- bool ret = d_model.checkModel(
- passertions, false_asserts, d_taylor_degree, lemmas, gs);
+ unsigned tdegree = d_trSlv.getTaylorDegree();
+ bool ret =
+ d_model.checkModel(passertions, false_asserts, tdegree, lemmas, gs);
return ret;
}
@@ -882,33 +748,6 @@ std::vector<Node> NonlinearExtension::checkSplitZero() {
return lemmas;
}
-/** An argument trie, for computing congruent terms */
-class ArgTrie
-{
- public:
- /** children of this node */
- std::map<Node, ArgTrie> d_children;
- /** the data of this node */
- Node d_data;
- /**
- * Set d as the data on the node whose path is [args], return either d if
- * that node has no data, or the data that already occurs there.
- */
- Node add(Node d, const std::vector<Node>& args)
- {
- ArgTrie* at = this;
- for (const Node& a : args)
- {
- at = &(at->d_children[a]);
- }
- if (at->d_data.isNull())
- {
- at->d_data = d;
- }
- return at->d_data;
- }
-};
-
int NonlinearExtension::checkLastCall(const std::vector<Node>& assertions,
const std::vector<Node>& false_asserts,
const std::vector<Node>& xts,
@@ -926,18 +765,8 @@ int NonlinearExtension::checkLastCall(const std::vector<Node>& assertions,
d_ci.clear();
d_ci_exp.clear();
d_ci_max.clear();
- d_funcCongClass.clear();
- d_funcMap.clear();
- d_tf_region.clear();
-
- std::vector<Node> lemmas;
- NodeManager* nm = NodeManager::currentNM();
Trace("nl-ext-mv") << "Extended terms : " << std::endl;
- // register the extended function terms
- std::map< Node, Node > mvarg_to_term;
- std::vector<Node> trNeedsMaster;
- bool needPi = false;
// for computing congruence
std::map<Kind, ArgTrie> argTrie;
for (unsigned i = 0, xsize = xts.size(); i < xsize; i++)
@@ -947,47 +776,6 @@ int NonlinearExtension::checkLastCall(const std::vector<Node>& assertions,
d_model.computeAbstractModelValue(a);
d_model.printModelValue("nl-ext-mv", a);
Kind ak = a.getKind();
- bool consider = true;
- // if is an unpurified application of SINE, or it is a transcendental
- // applied to a trancendental, purify.
- if (isTranscendentalKind(ak))
- {
- // if we've already computed master for a
- if (d_trMaster.find(a) != d_trMaster.end())
- {
- // a master has at least one slave
- consider = (d_trSlaves.find(a) != d_trSlaves.end());
- }
- else
- {
- if (ak == SINE)
- {
- // always not a master
- consider = false;
- }
- else
- {
- for (const Node& ac : a)
- {
- if (isTranscendentalKind(ac.getKind()))
- {
- consider = false;
- break;
- }
- }
- }
- if (!consider)
- {
- // wait to assign a master below
- trNeedsMaster.push_back(a);
- }
- else
- {
- d_trMaster[a] = a;
- d_trSlaves[a].insert(a);
- }
- }
- }
if (ak == NONLINEAR_MULT)
{
d_ms.push_back( a );
@@ -1008,126 +796,19 @@ int NonlinearExtension::checkLastCall(const std::vector<Node>& assertions,
}
// mark processed if has a "one" factor (will look at reduced monomial)?
}
- else if (a.getNumChildren() > 0)
- {
- if (ak == SINE)
- {
- needPi = true;
- }
- // if we didn't indicate that it should be purified above
- if( consider ){
- std::vector<Node> repList;
- for (const Node& ac : a)
- {
- Node r = d_model.computeConcreteModelValue(ac);
- repList.push_back(r);
- }
- Node aa = argTrie[ak].add(a, repList);
- if (aa != a)
- {
- // apply congruence to pairs of terms that are disequal and congruent
- Assert(aa.getNumChildren() == a.getNumChildren());
- Node mvaa = d_model.computeAbstractModelValue(a);
- Node mvaaa = d_model.computeAbstractModelValue(aa);
- if (mvaa != mvaaa)
- {
- std::vector<Node> exp;
- for (unsigned j = 0, size = a.getNumChildren(); j < size; j++)
- {
- exp.push_back(a[j].eqNode(aa[j]));
- }
- Node expn = exp.size() == 1 ? exp[0] : nm->mkNode(AND, exp);
- Node cong_lemma = nm->mkNode(OR, expn.negate(), a.eqNode(aa));
- lemmas.push_back( cong_lemma );
- }
- }
- else
- {
- // new representative of congruence class
- d_funcMap[ak].push_back(a);
- }
- // add to congruence class
- d_funcCongClass[aa].push_back(a);
- }
- }
- else if (ak == PI)
- {
- Assert(consider);
- needPi = true;
- d_funcMap[ak].push_back(a);
- d_funcCongClass[a].push_back(a);
- }
- else
- {
- Assert(false);
- }
- }
- // initialize pi if necessary
- if (needPi && d_pi.isNull())
- {
- mkPi();
- getCurrentPiBounds(lemmas);
}
+ // initialize the trancendental function solver
+ std::vector<Node> lemmas;
+ d_trSlv.initLastCall(assertions, false_asserts, xts, lemmas, lemsPp);
+
+ // process lemmas that may have been generated by the transcendental solver
filterLemmas(lemmas, lems);
- if (!lems.empty())
+ if (!lems.empty() || !lemsPp.empty())
{
Trace("nl-ext") << " ...finished with " << lems.size()
<< " new lemmas during registration." << std::endl;
- return lems.size();
- }
-
- // process SINE phase shifting
- for (const Node& a : trNeedsMaster)
- {
- // should not have processed this already
- Assert(d_trMaster.find(a) == d_trMaster.end());
- Kind k = a.getKind();
- Assert(k == SINE || k == EXPONENTIAL);
- Node y =
- nm->mkSkolem("y", nm->realType(), "phase shifted trigonometric arg");
- Node new_a = nm->mkNode(k, y);
- d_trSlaves[new_a].insert(new_a);
- d_trSlaves[new_a].insert(a);
- d_trMaster[a] = new_a;
- d_trMaster[new_a] = new_a;
- Node lem;
- if (k == SINE)
- {
- Trace("nl-ext-tf") << "Basis sine : " << new_a << " for " << a
- << std::endl;
- Assert(!d_pi.isNull());
- Node shift = nm->mkSkolem("s", nm->integerType(), "number of shifts");
- // TODO : do not introduce shift here, instead needs model-based
- // refinement for constant shifts (cvc4-projects #1284)
- lem = nm->mkNode(
- AND,
- mkValidPhase(y, d_pi),
- nm->mkNode(
- ITE,
- mkValidPhase(a[0], d_pi),
- a[0].eqNode(y),
- a[0].eqNode(nm->mkNode(
- PLUS,
- y,
- nm->mkNode(MULT, nm->mkConst(Rational(2)), shift, d_pi)))),
- new_a.eqNode(a));
- }
- else
- {
- // do both equalities to ensure that new_a becomes a preregistered term
- lem = nm->mkNode(AND, a.eqNode(new_a), a[0].eqNode(y));
- }
- // note we must do preprocess on this lemma
- Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : purify : " << lem
- << std::endl;
- lemsPp.push_back(lem);
- }
- if (!lemsPp.empty())
- {
- Trace("nl-ext") << " ...finished with " << lemsPp.size()
- << " new lemmas SINE phase shifting." << std::endl;
- return lemsPp.size();
+ return lems.size() + lemsPp.size();
}
Trace("nl-ext") << "We have " << d_ms.size() << " monomials." << std::endl;
@@ -1148,25 +829,6 @@ int NonlinearExtension::checkLastCall(const std::vector<Node>& assertions,
d_model.computeAbstractModelValue(v);
d_model.printModelValue("nl-ext-mv", v);
}
- if (Trace.isOn("nl-ext-mv"))
- {
- Trace("nl-ext-mv") << "Arguments of trancendental functions : "
- << std::endl;
- for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
- {
- Kind k = tfl.first;
- if (k == SINE || k == EXPONENTIAL)
- {
- for (const Node& tf : tfl.second)
- {
- Node v = tf[0];
- d_model.computeConcreteModelValue(v);
- d_model.computeAbstractModelValue(v);
- d_model.printModelValue("nl-ext-mv", v);
- }
- }
- }
- }
//----------------------------------- possibly split on zero
if (options::nlExtSplitZero()) {
@@ -1182,7 +844,7 @@ int NonlinearExtension::checkLastCall(const std::vector<Node>& assertions,
}
//-----------------------------------initial lemmas for transcendental functions
- lemmas = checkTranscendentalInitialRefine();
+ lemmas = d_trSlv.checkTranscendentalInitialRefine();
filterLemmas(lemmas, lems);
if (!lems.empty())
{
@@ -1202,7 +864,7 @@ int NonlinearExtension::checkLastCall(const std::vector<Node>& assertions,
}
//-----------------------------------monotonicity of transdental functions
- lemmas = checkTranscendentalMonotonic();
+ lemmas = d_trSlv.checkTranscendentalMonotonic();
filterLemmas(lemmas, lems);
if (!lems.empty())
{
@@ -1313,7 +975,7 @@ int NonlinearExtension::checkLastCall(const std::vector<Node>& assertions,
}
if (options::nlExtTfTangentPlanes())
{
- lemmas = checkTranscendentalTangentPlanes(lemSE);
+ lemmas = d_trSlv.checkTranscendentalTangentPlanes(lemSE);
filterLemmas(lemmas, wlems);
}
Trace("nl-ext") << " ...finished with " << wlems.size() << " waiting lemmas."
@@ -1543,12 +1205,12 @@ bool NonlinearExtension::modelBasedRefinement(
// we are incomplete
if (options::nlExtIncPrecision() && d_model.usedApproximate())
{
- d_taylor_degree++;
+ d_trSlv.incrementTaylorDegree();
needsRecheck = true;
// increase precision for PI?
// Difficult since Taylor series is very slow to converge
- Trace("nl-ext") << "...increment Taylor degree to " << d_taylor_degree
- << std::endl;
+ Trace("nl-ext") << "...increment Taylor degree to "
+ << d_trSlv.getTaylorDegree() << std::endl;
}
else
{
@@ -1660,36 +1322,6 @@ void NonlinearExtension::assignOrderIds(std::vector<Node>& vars,
}
}
-void NonlinearExtension::mkPi(){
- if( d_pi.isNull() ){
- d_pi = NodeManager::currentNM()->mkNullaryOperator(
- NodeManager::currentNM()->realType(), PI);
- d_pi_2 = Rewriter::rewrite(NodeManager::currentNM()->mkNode(
- MULT,
- d_pi,
- NodeManager::currentNM()->mkConst(Rational(1) / Rational(2))));
- d_pi_neg_2 = Rewriter::rewrite(NodeManager::currentNM()->mkNode(
- MULT,
- d_pi,
- NodeManager::currentNM()->mkConst(Rational(-1) / Rational(2))));
- d_pi_neg = Rewriter::rewrite(NodeManager::currentNM()->mkNode(
- MULT, d_pi, NodeManager::currentNM()->mkConst(Rational(-1))));
- //initialize bounds
- d_pi_bound[0] =
- NodeManager::currentNM()->mkConst(Rational(103993) / Rational(33102));
- d_pi_bound[1] =
- NodeManager::currentNM()->mkConst(Rational(104348) / Rational(33215));
- }
-}
-
-void NonlinearExtension::getCurrentPiBounds( std::vector< Node >& lemmas ) {
- Node pi_lem = NodeManager::currentNM()->mkNode(
- AND,
- NodeManager::currentNM()->mkNode(GEQ, d_pi, d_pi_bound[0]),
- NodeManager::currentNM()->mkNode(LEQ, d_pi, d_pi_bound[1]));
- lemmas.push_back( pi_lem );
-}
-
bool NonlinearExtension::getApproximateSqrt(Node c,
Node& l,
Node& u,
@@ -2807,1098 +2439,6 @@ std::vector<Node> NonlinearExtension::checkMonomialInferResBounds() {
}
return lemmas;
}
-
-std::vector<Node> NonlinearExtension::checkTranscendentalInitialRefine() {
- std::vector< Node > lemmas;
- Trace("nl-ext") << "Get initial refinement lemmas for transcendental functions..." << std::endl;
- for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
- {
- Kind k = tfl.first;
- for (const Node& t : tfl.second)
- {
- //initial refinements
- if( d_tf_initial_refine.find( t )==d_tf_initial_refine.end() ){
- d_tf_initial_refine[t] = true;
- Node lem;
- if (k == SINE)
- {
- Node symn = NodeManager::currentNM()->mkNode(
- SINE, NodeManager::currentNM()->mkNode(MULT, d_neg_one, t[0]));
- symn = Rewriter::rewrite( symn );
- // Can assume it is its own master since phase is split over 0,
- // hence -pi <= t[0] <= pi implies -pi <= -t[0] <= pi.
- d_trMaster[symn] = symn;
- d_trSlaves[symn].insert(symn);
- Assert(d_trSlaves.find(t) != d_trSlaves.end());
- std::vector< Node > children;
-
- lem = NodeManager::currentNM()->mkNode(
- AND,
- // bounds
- NodeManager::currentNM()->mkNode(
- AND,
- NodeManager::currentNM()->mkNode(LEQ, t, d_one),
- NodeManager::currentNM()->mkNode(GEQ, t, d_neg_one)),
- // symmetry
- NodeManager::currentNM()->mkNode(PLUS, t, symn).eqNode(d_zero),
- // sign
- NodeManager::currentNM()->mkNode(
- EQUAL,
- NodeManager::currentNM()->mkNode(LT, t[0], d_zero),
- NodeManager::currentNM()->mkNode(LT, t, d_zero)),
- // zero val
- NodeManager::currentNM()->mkNode(
- EQUAL,
- NodeManager::currentNM()->mkNode(GT, t[0], d_zero),
- NodeManager::currentNM()->mkNode(GT, t, d_zero)));
- lem = NodeManager::currentNM()->mkNode(
- AND,
- lem,
- // zero tangent
- NodeManager::currentNM()->mkNode(
- AND,
- NodeManager::currentNM()->mkNode(
- IMPLIES,
- NodeManager::currentNM()->mkNode(GT, t[0], d_zero),
- NodeManager::currentNM()->mkNode(LT, t, t[0])),
- NodeManager::currentNM()->mkNode(
- IMPLIES,
- NodeManager::currentNM()->mkNode(LT, t[0], d_zero),
- NodeManager::currentNM()->mkNode(GT, t, t[0]))),
- // pi tangent
- NodeManager::currentNM()->mkNode(
- AND,
- NodeManager::currentNM()->mkNode(
- IMPLIES,
- NodeManager::currentNM()->mkNode(LT, t[0], d_pi),
- NodeManager::currentNM()->mkNode(
- LT,
- t,
- NodeManager::currentNM()->mkNode(MINUS, d_pi, t[0]))),
- NodeManager::currentNM()->mkNode(
- IMPLIES,
- NodeManager::currentNM()->mkNode(GT, t[0], d_pi_neg),
- NodeManager::currentNM()->mkNode(
- GT,
- t,
- NodeManager::currentNM()->mkNode(
- MINUS, d_pi_neg, t[0])))));
- }
- else if (k == EXPONENTIAL)
- {
- // ( exp(x) > 0 ) ^ ( x=0 <=> exp( x ) = 1 ) ^ ( x < 0 <=> exp( x ) <
- // 1 ) ^ ( x <= 0 V exp( x ) > x + 1 )
- lem = NodeManager::currentNM()->mkNode(
- AND,
- NodeManager::currentNM()->mkNode(GT, t, d_zero),
- NodeManager::currentNM()->mkNode(
- EQUAL, t[0].eqNode(d_zero), t.eqNode(d_one)),
- NodeManager::currentNM()->mkNode(
- EQUAL,
- NodeManager::currentNM()->mkNode(LT, t[0], d_zero),
- NodeManager::currentNM()->mkNode(LT, t, d_one)),
- NodeManager::currentNM()->mkNode(
- OR,
- NodeManager::currentNM()->mkNode(LEQ, t[0], d_zero),
- NodeManager::currentNM()->mkNode(
- GT,
- t,
- NodeManager::currentNM()->mkNode(PLUS, t[0], d_one))));
- }
- if( !lem.isNull() ){
- lemmas.push_back( lem );
- }
- }
- }
- }
-
- return lemmas;
-}
-
-std::vector<Node> NonlinearExtension::checkTranscendentalMonotonic() {
- std::vector< Node > lemmas;
- Trace("nl-ext") << "Get monotonicity lemmas for transcendental functions..." << std::endl;
-
- //sort arguments of all transcendentals
- std::map< Kind, std::vector< Node > > sorted_tf_args;
- std::map< Kind, std::map< Node, Node > > tf_arg_to_term;
-
- for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
- {
- Kind k = tfl.first;
- if (k == EXPONENTIAL || k == SINE)
- {
- for (const Node& tf : tfl.second)
- {
- Node a = tf[0];
- Node mvaa = d_model.computeAbstractModelValue(a);
- if (mvaa.isConst())
- {
- Trace("nl-ext-tf-mono-debug") << "...tf term : " << a << std::endl;
- sorted_tf_args[k].push_back(a);
- tf_arg_to_term[k][a] = tf;
- }
- }
- }
- }
-
- SortNlModel smv;
- smv.d_nlm = &d_model;
- //sort by concrete values
- smv.d_isConcrete = true;
- smv.d_reverse_order = true;
- for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
- {
- Kind k = tfl.first;
- if( !sorted_tf_args[k].empty() ){
- std::sort( sorted_tf_args[k].begin(), sorted_tf_args[k].end(), smv );
- Trace("nl-ext-tf-mono") << "Sorted transcendental function list for " << k << " : " << std::endl;
- for (unsigned i = 0; i < sorted_tf_args[k].size(); i++)
- {
- Node targ = sorted_tf_args[k][i];
- Node mvatarg = d_model.computeAbstractModelValue(targ);
- Trace("nl-ext-tf-mono")
- << " " << targ << " -> " << mvatarg << std::endl;
- Node t = tf_arg_to_term[k][targ];
- Node mvat = d_model.computeAbstractModelValue(t);
- Trace("nl-ext-tf-mono") << " f-val : " << mvat << std::endl;
- }
- std::vector< Node > mpoints;
- std::vector< Node > mpoints_vals;
- if (k == SINE)
- {
- mpoints.push_back( d_pi );
- mpoints.push_back( d_pi_2 );
- mpoints.push_back(d_zero);
- mpoints.push_back( d_pi_neg_2 );
- mpoints.push_back( d_pi_neg );
- }
- else if (k == EXPONENTIAL)
- {
- mpoints.push_back( Node::null() );
- }
- if( !mpoints.empty() ){
- //get model values for points
- for( unsigned i=0; i<mpoints.size(); i++ ){
- Node mpv;
- if( !mpoints[i].isNull() ){
- mpv = d_model.computeAbstractModelValue(mpoints[i]);
- Assert(mpv.isConst());
- }
- mpoints_vals.push_back( mpv );
- }
-
- unsigned mdir_index = 0;
- int monotonic_dir = -1;
- Node mono_bounds[2];
- Node targ, targval, t, tval;
- for (unsigned i = 0, size = sorted_tf_args[k].size(); i < size; i++)
- {
- Node sarg = sorted_tf_args[k][i];
- Node sargval = d_model.computeAbstractModelValue(sarg);
- Assert(sargval.isConst());
- Node s = tf_arg_to_term[k][ sarg ];
- Node sval = d_model.computeAbstractModelValue(s);
- Assert(sval.isConst());
-
- //increment to the proper monotonicity region
- bool increment = true;
- while (increment && mdir_index < mpoints.size())
- {
- increment = false;
- if( mpoints[mdir_index].isNull() ){
- increment = true;
- }else{
- Node pval = mpoints_vals[mdir_index];
- Assert(pval.isConst());
- if( sargval.getConst<Rational>() < pval.getConst<Rational>() ){
- increment = true;
- Trace("nl-ext-tf-mono") << "...increment at " << sarg << " since model value is less than " << mpoints[mdir_index] << std::endl;
- }
- }
- if( increment ){
- tval = Node::null();
- mono_bounds[1] = mpoints[mdir_index];
- mdir_index++;
- monotonic_dir = regionToMonotonicityDir(k, mdir_index);
- if (mdir_index < mpoints.size())
- {
- mono_bounds[0] = mpoints[mdir_index];
- }else{
- mono_bounds[0] = Node::null();
- }
- }
- }
- // store the concavity region
- d_tf_region[s] = mdir_index;
- Trace("nl-ext-concavity") << "Transcendental function " << s
- << " is in region #" << mdir_index;
- Trace("nl-ext-concavity") << ", arg model value = " << sargval
- << std::endl;
-
- if( !tval.isNull() ){
- Node mono_lem;
- if( monotonic_dir==1 && sval.getConst<Rational>() > tval.getConst<Rational>() ){
- mono_lem = NodeManager::currentNM()->mkNode(
- IMPLIES,
- NodeManager::currentNM()->mkNode(GEQ, targ, sarg),
- NodeManager::currentNM()->mkNode(GEQ, t, s));
- }else if( monotonic_dir==-1 && sval.getConst<Rational>() < tval.getConst<Rational>() ){
- mono_lem = NodeManager::currentNM()->mkNode(
- IMPLIES,
- NodeManager::currentNM()->mkNode(LEQ, targ, sarg),
- NodeManager::currentNM()->mkNode(LEQ, t, s));
- }
- if( !mono_lem.isNull() ){
- if( !mono_bounds[0].isNull() ){
- Assert(!mono_bounds[1].isNull());
- mono_lem = NodeManager::currentNM()->mkNode(
- IMPLIES,
- NodeManager::currentNM()->mkNode(
- AND,
- mkBounded(mono_bounds[0], targ, mono_bounds[1]),
- mkBounded(mono_bounds[0], sarg, mono_bounds[1])),
- mono_lem);
- }
- Trace("nl-ext-tf-mono") << "Monotonicity lemma : " << mono_lem << std::endl;
- lemmas.push_back( mono_lem );
- }
- }
- // store the previous values
- targ = sarg;
- targval = sargval;
- t = s;
- tval = sval;
- }
- }
- }
- }
- return lemmas;
-}
-
-std::vector<Node> NonlinearExtension::checkTranscendentalTangentPlanes(
- std::map<Node, NlLemmaSideEffect>& lemSE)
-{
- std::vector<Node> lemmas;
- Trace("nl-ext") << "Get tangent plane lemmas for transcendental functions..."
- << std::endl;
- // this implements Figure 3 of "Satisfiaility Modulo Transcendental Functions
- // via Incremental Linearization" by Cimatti et al
- for (std::pair<const Kind, std::vector<Node> >& tfs : d_funcMap)
- {
- Kind k = tfs.first;
- if (k == PI)
- {
- // We do not use Taylor approximation for PI currently.
- // This is because the convergence is extremely slow, and hence an
- // initial approximation is superior.
- continue;
- }
- Trace("nl-ext-tftp-debug2") << "Taylor variables: " << std::endl;
- Trace("nl-ext-tftp-debug2")
- << " taylor_real_fv : " << d_taylor_real_fv << std::endl;
- Trace("nl-ext-tftp-debug2")
- << " taylor_real_fv_base : " << d_taylor_real_fv_base << std::endl;
- Trace("nl-ext-tftp-debug2")
- << " taylor_real_fv_base_rem : " << d_taylor_real_fv_base_rem
- << std::endl;
- Trace("nl-ext-tftp-debug2") << std::endl;
-
- // we substitute into the Taylor sum P_{n,f(0)}( x )
-
- for (const Node& tf : tfs.second)
- {
- // tf is Figure 3 : tf( x )
- Trace("nl-ext-tftp") << "Compute tangent planes " << tf << std::endl;
- // go until max degree is reached, or we don't meet bound criteria
- for (unsigned d = 1; d <= d_taylor_degree; d++)
- {
- Trace("nl-ext-tftp") << "- run at degree " << d << "..." << std::endl;
- unsigned prev = lemmas.size();
- if (checkTfTangentPlanesFun(tf, d, lemmas, lemSE))
- {
- Trace("nl-ext-tftp")
- << "...fail, #lemmas = " << (lemmas.size() - prev) << std::endl;
- break;
- }
- else
- {
- Trace("nl-ext-tftp") << "...success" << std::endl;
- }
- }
- }
- }
-
- return lemmas;
-}
-
-bool NonlinearExtension::checkTfTangentPlanesFun(
- Node tf,
- unsigned d,
- std::vector<Node>& lemmas,
- std::map<Node, NlLemmaSideEffect>& lemSE)
-{
- NodeManager* nm = NodeManager::currentNM();
- Kind k = tf.getKind();
- // this should only be run on master applications
- Assert(d_trSlaves.find(tf) != d_trSlaves.end());
-
- // Figure 3 : c
- Node c = d_model.computeAbstractModelValue(tf[0]);
- int csign = c.getConst<Rational>().sgn();
- if (csign == 0)
- {
- // no secant/tangent plane is necessary
- return true;
- }
- Assert(csign == 1 || csign == -1);
-
- // Figure 3: P_l, P_u
- // mapped to for signs of c
- std::map<int, Node> poly_approx_bounds[2];
- std::vector<Node> pbounds;
- getPolynomialApproximationBoundForArg(k, c, d, pbounds);
- poly_approx_bounds[0][1] = pbounds[0];
- poly_approx_bounds[0][-1] = pbounds[1];
- poly_approx_bounds[1][1] = pbounds[2];
- poly_approx_bounds[1][-1] = pbounds[3];
-
- // Figure 3 : v
- Node v = d_model.computeAbstractModelValue(tf);
-
- // check value of tf
- Trace("nl-ext-tftp-debug") << "Process tangent plane refinement for " << tf
- << ", degree " << d << "..." << std::endl;
- Trace("nl-ext-tftp-debug") << " value in model : " << v << std::endl;
- Trace("nl-ext-tftp-debug") << " arg value in model : " << c << std::endl;
-
- std::vector<Node> taylor_vars;
- taylor_vars.push_back(d_taylor_real_fv);
-
- // compute the concavity
- int region = -1;
- std::unordered_map<Node, int, NodeHashFunction>::iterator itr =
- d_tf_region.find(tf);
- if (itr != d_tf_region.end())
- {
- region = itr->second;
- Trace("nl-ext-tftp-debug") << " region is : " << region << std::endl;
- }
- // Figure 3 : conc
- int concavity = regionToConcavity(k, itr->second);
- Trace("nl-ext-tftp-debug") << " concavity is : " << concavity << std::endl;
- if (concavity == 0)
- {
- // no secant/tangent plane is necessary
- return true;
- }
- // bounds for which we are this concavity
- // Figure 3: < l, u >
- Node bounds[2];
- if (k == SINE)
- {
- bounds[0] = regionToLowerBound(k, region);
- Assert(!bounds[0].isNull());
- bounds[1] = regionToUpperBound(k, region);
- Assert(!bounds[1].isNull());
- }
-
- // Figure 3: P
- Node poly_approx;
-
- // compute whether this is a tangent refinement or a secant refinement
- bool is_tangent = false;
- bool is_secant = false;
- std::pair<Node, Node> mvb = getTfModelBounds(tf, d);
- for (unsigned r = 0; r < 2; r++)
- {
- Node pab = poly_approx_bounds[r][csign];
- Node v_pab = r == 0 ? mvb.first : mvb.second;
- if (!v_pab.isNull())
- {
- Trace("nl-ext-tftp-debug2") << "...model value of " << pab << " is "
- << v_pab << std::endl;
-
- Assert(v_pab.isConst());
- Node comp = nm->mkNode(r == 0 ? LT : GT, v, v_pab);
- Trace("nl-ext-tftp-debug2") << "...compare : " << comp << std::endl;
- Node compr = Rewriter::rewrite(comp);
- Trace("nl-ext-tftp-debug2") << "...got : " << compr << std::endl;
- if (compr == d_true)
- {
- // beyond the bounds
- if (r == 0)
- {
- poly_approx = poly_approx_bounds[r][csign];
- is_tangent = concavity == 1;
- is_secant = concavity == -1;
- }
- else
- {
- poly_approx = poly_approx_bounds[r][csign];
- is_tangent = concavity == -1;
- is_secant = concavity == 1;
- }
- if (Trace.isOn("nl-ext-tftp"))
- {
- Trace("nl-ext-tftp") << "*** Outside boundary point (";
- Trace("nl-ext-tftp") << (r == 0 ? "low" : "high") << ") ";
- printRationalApprox("nl-ext-tftp", v_pab);
- Trace("nl-ext-tftp") << ", will refine..." << std::endl;
- Trace("nl-ext-tftp") << " poly_approx = " << poly_approx
- << std::endl;
- Trace("nl-ext-tftp") << " is_tangent = " << is_tangent
- << std::endl;
- Trace("nl-ext-tftp") << " is_secant = " << is_secant << std::endl;
- }
- break;
- }
- else
- {
- Trace("nl-ext-tftp") << " ...within " << (r == 0 ? "low" : "high")
- << " bound : ";
- printRationalApprox("nl-ext-tftp", v_pab);
- Trace("nl-ext-tftp") << std::endl;
- }
- }
- }
-
- // Figure 3: P( c )
- Node poly_approx_c;
- if (is_tangent || is_secant)
- {
- Assert(!poly_approx.isNull());
- std::vector<Node> taylor_subs;
- taylor_subs.push_back(c);
- Assert(taylor_vars.size() == taylor_subs.size());
- poly_approx_c = poly_approx.substitute(taylor_vars.begin(),
- taylor_vars.end(),
- taylor_subs.begin(),
- taylor_subs.end());
- Trace("nl-ext-tftp-debug2") << "...poly approximation at c is "
- << poly_approx_c << std::endl;
- }
- else
- {
- // we may want to continue getting better bounds
- return false;
- }
-
- if (is_tangent)
- {
- // compute tangent plane
- // Figure 3: T( x )
- // We use zero slope tangent planes, since the concavity of the Taylor
- // approximation cannot be easily established.
- Node tplane = poly_approx_c;
-
- Node lem = nm->mkNode(concavity == 1 ? GEQ : LEQ, tf, tplane);
- std::vector<Node> antec;
- int mdir = regionToMonotonicityDir(k, region);
- for (unsigned i = 0; i < 2; i++)
- {
- // Tangent plane is valid in the interval [c,u) if the slope of the
- // function matches its concavity, and is valid in (l, c] otherwise.
- Node use_bound = (mdir == concavity) == (i == 0) ? c : bounds[i];
- if (!use_bound.isNull())
- {
- Node ant = nm->mkNode(i == 0 ? GEQ : LEQ, tf[0], use_bound);
- antec.push_back(ant);
- }
- }
- if (!antec.empty())
- {
- Node antec_n = antec.size() == 1 ? antec[0] : nm->mkNode(AND, antec);
- lem = nm->mkNode(IMPLIES, antec_n, lem);
- }
- Trace("nl-ext-tftp-debug2")
- << "*** Tangent plane lemma (pre-rewrite): " << lem << std::endl;
- lem = Rewriter::rewrite(lem);
- Trace("nl-ext-tftp-lemma") << "*** Tangent plane lemma : " << lem
- << std::endl;
- Assert(d_model.computeAbstractModelValue(lem) == d_false);
- // Figure 3 : line 9
- lemmas.push_back(lem);
- }
- else if (is_secant)
- {
- // bounds are the minimum and maximum previous secant points
- // should not repeat secant points: secant lemmas should suffice to
- // rule out previous assignment
- Assert(std::find(
- d_secant_points[tf][d].begin(), d_secant_points[tf][d].end(), c)
- == d_secant_points[tf][d].end());
- // Insert into the (temporary) vector. We do not update this vector
- // until we are sure this secant plane lemma has been processed. We do
- // this by mapping the lemma to a side effect below.
- std::vector<Node> spoints = d_secant_points[tf][d];
- spoints.push_back(c);
-
- // sort
- SortNlModel smv;
- smv.d_nlm = &d_model;
- smv.d_isConcrete = true;
- std::sort(spoints.begin(), spoints.end(), smv);
- // get the resulting index of c
- unsigned index =
- std::find(spoints.begin(), spoints.end(), c) - spoints.begin();
- // bounds are the next closest upper/lower bound values
- if (index > 0)
- {
- bounds[0] = spoints[index - 1];
- }
- else
- {
- // otherwise, we use the lower boundary point for this concavity
- // region
- if (k == SINE)
- {
- Assert(!bounds[0].isNull());
- }
- else if (k == EXPONENTIAL)
- {
- // pick c-1
- bounds[0] = Rewriter::rewrite(nm->mkNode(MINUS, c, d_one));
- }
- }
- if (index < spoints.size() - 1)
- {
- bounds[1] = spoints[index + 1];
- }
- else
- {
- // otherwise, we use the upper boundary point for this concavity
- // region
- if (k == SINE)
- {
- Assert(!bounds[1].isNull());
- }
- else if (k == EXPONENTIAL)
- {
- // pick c+1
- bounds[1] = Rewriter::rewrite(nm->mkNode(PLUS, c, d_one));
- }
- }
- Trace("nl-ext-tftp-debug2") << "...secant bounds are : " << bounds[0]
- << " ... " << bounds[1] << std::endl;
-
- // the secant plane may be conjunction of 1-2 guarded inequalities
- std::vector<Node> lemmaConj;
- for (unsigned s = 0; s < 2; s++)
- {
- // compute secant plane
- Assert(!poly_approx.isNull());
- Assert(!bounds[s].isNull());
- // take the model value of l or u (since may contain PI)
- Node b = d_model.computeAbstractModelValue(bounds[s]);
- Trace("nl-ext-tftp-debug2") << "...model value of bound " << bounds[s]
- << " is " << b << std::endl;
- Assert(b.isConst());
- if (c != b)
- {
- // Figure 3 : P(l), P(u), for s = 0,1
- Node poly_approx_b;
- std::vector<Node> taylor_subs;
- taylor_subs.push_back(b);
- Assert(taylor_vars.size() == taylor_subs.size());
- poly_approx_b = poly_approx.substitute(taylor_vars.begin(),
- taylor_vars.end(),
- taylor_subs.begin(),
- taylor_subs.end());
- // Figure 3: S_l( x ), S_u( x ) for s = 0,1
- Node splane;
- Node rcoeff_n = Rewriter::rewrite(nm->mkNode(MINUS, b, c));
- Assert(rcoeff_n.isConst());
- Rational rcoeff = rcoeff_n.getConst<Rational>();
- Assert(rcoeff.sgn() != 0);
- poly_approx_b = Rewriter::rewrite(poly_approx_b);
- poly_approx_c = Rewriter::rewrite(poly_approx_c);
- splane = nm->mkNode(
- PLUS,
- poly_approx_b,
- nm->mkNode(MULT,
- nm->mkNode(MINUS, poly_approx_b, poly_approx_c),
- nm->mkConst(Rational(1) / rcoeff),
- nm->mkNode(MINUS, tf[0], b)));
-
- Node lem = nm->mkNode(concavity == 1 ? LEQ : GEQ, tf, splane);
- // With respect to Figure 3, this is slightly different.
- // In particular, we chose b to be the model value of bounds[s],
- // which is a constant although bounds[s] may not be (e.g. if it
- // contains PI).
- // To ensure that c...b does not cross an inflection point,
- // we guard with the symbolic version of bounds[s].
- // This leads to lemmas e.g. of this form:
- // ( c <= x <= PI/2 ) => ( sin(x) < ( P( b ) - P( c ) )*( x -
- // b ) + P( b ) )
- // where b = (PI/2)^M, the current value of PI/2 in the model.
- // This is sound since we are guarded by the symbolic
- // representation of PI/2.
- Node antec_n =
- nm->mkNode(AND,
- nm->mkNode(GEQ, tf[0], s == 0 ? bounds[s] : c),
- nm->mkNode(LEQ, tf[0], s == 0 ? c : bounds[s]));
- lem = nm->mkNode(IMPLIES, antec_n, lem);
- Trace("nl-ext-tftp-debug2")
- << "*** Secant plane lemma (pre-rewrite) : " << lem << std::endl;
- lem = Rewriter::rewrite(lem);
- Trace("nl-ext-tftp-lemma") << "*** Secant plane lemma : " << lem
- << std::endl;
- lemmaConj.push_back(lem);
- Assert(d_model.computeAbstractModelValue(lem) == d_false);
- }
- }
- // Figure 3 : line 22
- Assert(!lemmaConj.empty());
- Node lem =
- lemmaConj.size() == 1 ? lemmaConj[0] : nm->mkNode(AND, lemmaConj);
- lemmas.push_back(lem);
- // The side effect says that if lem is added, then we should add the
- // secant point c for (tf,d).
- lemSE[lem].d_secantPoint.push_back(std::make_tuple(tf, d, c));
- }
- return true;
-}
-
-int NonlinearExtension::regionToMonotonicityDir(Kind k, int region)
-{
- if (k == EXPONENTIAL)
- {
- if (region == 1)
- {
- return 1;
- }
- }
- else if (k == SINE)
- {
- if (region == 1 || region == 4)
- {
- return -1;
- }
- else if (region == 2 || region == 3)
- {
- return 1;
- }
- }
- return 0;
-}
-
-int NonlinearExtension::regionToConcavity(Kind k, int region)
-{
- if (k == EXPONENTIAL)
- {
- if (region == 1)
- {
- return 1;
- }
- }
- else if (k == SINE)
- {
- if (region == 1 || region == 2)
- {
- return -1;
- }
- else if (region == 3 || region == 4)
- {
- return 1;
- }
- }
- return 0;
-}
-
-Node NonlinearExtension::regionToLowerBound(Kind k, int region)
-{
- if (k == SINE)
- {
- if (region == 1)
- {
- return d_pi_2;
- }
- else if (region == 2)
- {
- return d_zero;
- }
- else if (region == 3)
- {
- return d_pi_neg_2;
- }
- else if (region == 4)
- {
- return d_pi_neg;
- }
- }
- return Node::null();
-}
-
-Node NonlinearExtension::regionToUpperBound(Kind k, int region)
-{
- if (k == SINE)
- {
- if (region == 1)
- {
- return d_pi;
- }
- else if (region == 2)
- {
- return d_pi_2;
- }
- else if (region == 3)
- {
- return d_zero;
- }
- else if (region == 4)
- {
- return d_pi_neg_2;
- }
- }
- return Node::null();
-}
-
-Node NonlinearExtension::getDerivative(Node n, Node x)
-{
- Assert(x.isVar());
- // only handle the cases of the taylor expansion of d
- if (n.getKind() == EXPONENTIAL)
- {
- if (n[0] == x)
- {
- return n;
- }
- }
- else if (n.getKind() == SINE)
- {
- if (n[0] == x)
- {
- Node na = NodeManager::currentNM()->mkNode(MINUS, d_pi_2, n[0]);
- Node ret = NodeManager::currentNM()->mkNode(SINE, na);
- ret = Rewriter::rewrite(ret);
- return ret;
- }
- }
- else if (n.getKind() == PLUS)
- {
- std::vector<Node> dchildren;
- for (unsigned i = 0; i < n.getNumChildren(); i++)
- {
- // PLUS is flattened in rewriter, recursion depth is bounded by 1
- Node dc = getDerivative(n[i], x);
- if (dc.isNull())
- {
- return dc;
- }else{
- dchildren.push_back(dc);
- }
- }
- return NodeManager::currentNM()->mkNode(PLUS, dchildren);
- }
- else if (n.getKind() == MULT)
- {
- Assert(n[0].isConst());
- Node dc = getDerivative(n[1], x);
- if (!dc.isNull())
- {
- return NodeManager::currentNM()->mkNode(MULT, n[0], dc);
- }
- }
- else if (n.getKind() == NONLINEAR_MULT)
- {
- unsigned xcount = 0;
- std::vector<Node> children;
- unsigned xindex = 0;
- for (unsigned i = 0, size = n.getNumChildren(); i < size; i++)
- {
- if (n[i] == x)
- {
- xcount++;
- xindex = i;
- }
- children.push_back(n[i]);
- }
- if (xcount == 0)
- {
- return d_zero;
- }
- else
- {
- children[xindex] = NodeManager::currentNM()->mkConst(Rational(xcount));
- }
- return NodeManager::currentNM()->mkNode(MULT, children);
- }
- else if (n.isVar())
- {
- return n == x ? d_one : d_zero;
- }
- else if (n.isConst())
- {
- return d_zero;
- }
- Trace("nl-ext-debug") << "No derivative computed for " << n;
- Trace("nl-ext-debug") << " for d/d{" << x << "}" << std::endl;
- return Node::null();
-}
-
-std::pair<Node, Node> NonlinearExtension::getTaylor(Node fa, unsigned n)
-{
- Assert(n > 0);
- Node fac; // what term we cache for fa
- if (fa[0] == d_zero)
- {
- // optimization : simpler to compute (x-fa[0])^n if we are centered around 0
- fac = fa;
- }
- else
- {
- // otherwise we use a standard factor a in (x-a)^n
- fac = NodeManager::currentNM()->mkNode(fa.getKind(), d_taylor_real_fv_base);
- }
- Node taylor_rem;
- Node taylor_sum;
- // check if we have already computed this Taylor series
- std::unordered_map<unsigned, Node>::iterator itt = d_taylor_sum[fac].find(n);
- if (itt == d_taylor_sum[fac].end())
- {
- Node i_exp_base;
- if (fa[0] == d_zero)
- {
- i_exp_base = d_taylor_real_fv;
- }
- else
- {
- i_exp_base = Rewriter::rewrite(NodeManager::currentNM()->mkNode(
- MINUS, d_taylor_real_fv, d_taylor_real_fv_base));
- }
- Node i_derv = fac;
- Node i_fact = d_one;
- Node i_exp = d_one;
- int i_derv_status = 0;
- unsigned counter = 0;
- std::vector<Node> sum;
- do
- {
- counter++;
- if (fa.getKind() == EXPONENTIAL)
- {
- // unchanged
- }
- else if (fa.getKind() == SINE)
- {
- if (i_derv_status % 2 == 1)
- {
- Node arg = NodeManager::currentNM()->mkNode(
- PLUS, d_pi_2, d_taylor_real_fv_base);
- i_derv = NodeManager::currentNM()->mkNode(SINE, arg);
- }
- else
- {
- i_derv = fa;
- }
- if (i_derv_status >= 2)
- {
- i_derv = NodeManager::currentNM()->mkNode(MINUS, d_zero, i_derv);
- }
- i_derv = Rewriter::rewrite(i_derv);
- i_derv_status = i_derv_status == 3 ? 0 : i_derv_status + 1;
- }
- if (counter == (n + 1))
- {
- TNode x = d_taylor_real_fv_base;
- i_derv = i_derv.substitute(x, d_taylor_real_fv_base_rem);
- }
- Node curr = NodeManager::currentNM()->mkNode(
- MULT,
- NodeManager::currentNM()->mkNode(DIVISION, i_derv, i_fact),
- i_exp);
- if (counter == (n + 1))
- {
- taylor_rem = curr;
- }
- else
- {
- sum.push_back(curr);
- i_fact = Rewriter::rewrite(NodeManager::currentNM()->mkNode(
- MULT,
- NodeManager::currentNM()->mkConst(Rational(counter)),
- i_fact));
- i_exp = Rewriter::rewrite(
- NodeManager::currentNM()->mkNode(MULT, i_exp_base, i_exp));
- }
- } while (counter <= n);
- taylor_sum =
- sum.size() == 1 ? sum[0] : NodeManager::currentNM()->mkNode(PLUS, sum);
-
- if (fac[0] != d_taylor_real_fv_base)
- {
- TNode x = d_taylor_real_fv_base;
- taylor_sum = taylor_sum.substitute(x, fac[0]);
- }
-
- // cache
- d_taylor_sum[fac][n] = taylor_sum;
- d_taylor_rem[fac][n] = taylor_rem;
- }
- else
- {
- taylor_sum = itt->second;
- Assert(d_taylor_rem[fac].find(n) != d_taylor_rem[fac].end());
- taylor_rem = d_taylor_rem[fac][n];
- }
-
- // must substitute for the argument if we were using a different lookup
- if (fa[0] != fac[0])
- {
- TNode x = d_taylor_real_fv_base;
- taylor_sum = taylor_sum.substitute(x, fa[0]);
- }
- return std::pair<Node, Node>(taylor_sum, taylor_rem);
-}
-
-void NonlinearExtension::getPolynomialApproximationBounds(
- Kind k, unsigned d, std::vector<Node>& pbounds)
-{
- if (d_poly_bounds[k][d].empty())
- {
- NodeManager* nm = NodeManager::currentNM();
- Node tft = nm->mkNode(k, d_zero);
- // n is the Taylor degree we are currently considering
- unsigned n = 2 * d;
- // n must be even
- std::pair<Node, Node> taylor = getTaylor(tft, n);
- Trace("nl-ext-tftp-debug2") << "Taylor for " << k
- << " is : " << taylor.first << std::endl;
- Node taylor_sum = Rewriter::rewrite(taylor.first);
- Trace("nl-ext-tftp-debug2") << "Taylor for " << k
- << " is (post-rewrite) : " << taylor_sum
- << std::endl;
- Assert(taylor.second.getKind() == MULT);
- Assert(taylor.second.getNumChildren() == 2);
- Assert(taylor.second[0].getKind() == DIVISION);
- Trace("nl-ext-tftp-debug2") << "Taylor remainder for " << k << " is "
- << taylor.second << std::endl;
- // ru is x^{n+1}/(n+1)!
- Node ru = nm->mkNode(DIVISION, taylor.second[1], taylor.second[0][1]);
- ru = Rewriter::rewrite(ru);
- Trace("nl-ext-tftp-debug2")
- << "Taylor remainder factor is (post-rewrite) : " << ru << std::endl;
- if (k == EXPONENTIAL)
- {
- pbounds.push_back(taylor_sum);
- pbounds.push_back(taylor_sum);
- pbounds.push_back(Rewriter::rewrite(
- nm->mkNode(MULT, taylor_sum, nm->mkNode(PLUS, d_one, ru))));
- pbounds.push_back(Rewriter::rewrite(nm->mkNode(PLUS, taylor_sum, ru)));
- }
- else
- {
- Assert(k == SINE);
- Node l = Rewriter::rewrite(nm->mkNode(MINUS, taylor_sum, ru));
- Node u = Rewriter::rewrite(nm->mkNode(PLUS, taylor_sum, ru));
- pbounds.push_back(l);
- pbounds.push_back(l);
- pbounds.push_back(u);
- pbounds.push_back(u);
- }
- Trace("nl-ext-tf-tplanes") << "Polynomial approximation for " << k
- << " is: " << std::endl;
- Trace("nl-ext-tf-tplanes") << " Lower (pos): " << pbounds[0] << std::endl;
- Trace("nl-ext-tf-tplanes") << " Upper (pos): " << pbounds[2] << std::endl;
- Trace("nl-ext-tf-tplanes") << " Lower (neg): " << pbounds[1] << std::endl;
- Trace("nl-ext-tf-tplanes") << " Upper (neg): " << pbounds[3] << std::endl;
- d_poly_bounds[k][d].insert(
- d_poly_bounds[k][d].end(), pbounds.begin(), pbounds.end());
- }
- else
- {
- pbounds.insert(
- pbounds.end(), d_poly_bounds[k][d].begin(), d_poly_bounds[k][d].end());
- }
-}
-
-void NonlinearExtension::getPolynomialApproximationBoundForArg(
- Kind k, Node c, unsigned d, std::vector<Node>& pbounds)
-{
- getPolynomialApproximationBounds(k, d, pbounds);
- Assert(c.isConst());
- if (k == EXPONENTIAL && c.getConst<Rational>().sgn() == 1)
- {
- NodeManager* nm = NodeManager::currentNM();
- Node tft = nm->mkNode(k, d_zero);
- bool success = false;
- unsigned ds = d;
- TNode ttrf = d_taylor_real_fv;
- TNode tc = c;
- do
- {
- success = true;
- unsigned n = 2 * ds;
- std::pair<Node, Node> taylor = getTaylor(tft, n);
- // check that 1-c^{n+1}/(n+1)! > 0
- Node ru = nm->mkNode(DIVISION, taylor.second[1], taylor.second[0][1]);
- Node rus = ru.substitute(ttrf, tc);
- rus = Rewriter::rewrite(rus);
- Assert(rus.isConst());
- if (rus.getConst<Rational>() > d_one.getConst<Rational>())
- {
- success = false;
- ds = ds + 1;
- }
- } while (!success);
- if (ds > d)
- {
- Trace("nl-ext-exp-taylor")
- << "*** Increase Taylor bound to " << ds << " > " << d << " for ("
- << k << " " << c << ")" << std::endl;
- // must use sound upper bound
- std::vector<Node> pboundss;
- getPolynomialApproximationBounds(k, ds, pboundss);
- pbounds[2] = pboundss[2];
- }
- }
-}
-
-std::pair<Node, Node> NonlinearExtension::getTfModelBounds(Node tf, unsigned d)
-{
- // compute the model value of the argument
- Node c = d_model.computeAbstractModelValue(tf[0]);
- Assert(c.isConst());
- int csign = c.getConst<Rational>().sgn();
- Kind k = tf.getKind();
- if (csign == 0)
- {
- // at zero, its trivial
- if (k == SINE)
- {
- return std::pair<Node, Node>(d_zero, d_zero);
- }
- Assert(k == EXPONENTIAL);
- return std::pair<Node, Node>(d_one, d_one);
- }
- bool isNeg = csign == -1;
-
- std::vector<Node> pbounds;
- getPolynomialApproximationBoundForArg(k, c, d, pbounds);
-
- std::vector<Node> bounds;
- TNode tfv = d_taylor_real_fv;
- TNode tfs = tf[0];
- for (unsigned d2 = 0; d2 < 2; d2++)
- {
- int index = d2 == 0 ? (isNeg ? 1 : 0) : (isNeg ? 3 : 2);
- Node pab = pbounds[index];
- if (!pab.isNull())
- {
- // { x -> tf[0] }
- pab = pab.substitute(tfv, tfs);
- pab = Rewriter::rewrite(pab);
- Node v_pab = d_model.computeAbstractModelValue(pab);
- bounds.push_back(v_pab);
- }
- else
- {
- bounds.push_back(Node::null());
- }
- }
- return std::pair<Node, Node>(bounds[0], bounds[1]);
-}
} // namespace arith
} // namespace theory
diff --git a/src/theory/arith/nonlinear_extension.h b/src/theory/arith/nonlinear_extension.h
index 19810730f..bfc713b12 100644
--- a/src/theory/arith/nonlinear_extension.h
+++ b/src/theory/arith/nonlinear_extension.h
@@ -37,6 +37,7 @@
#include "theory/arith/nl_lemma_utils.h"
#include "theory/arith/nl_model.h"
#include "theory/arith/theory_arith.h"
+#include "theory/arith/transcendental_solver.h"
#include "theory/uf/equality_engine.h"
namespace CVC4 {
@@ -253,7 +254,6 @@ class NonlinearExtension {
static Node mkLit(Node a, Node b, int status, bool isAbsolute = false);
static Node mkAbs(Node a);
static Node mkValidPhase(Node a, Node pi);
- static Node mkBounded( Node l, Node a, Node u );
Node mkMonomialRemFactor(Node n, const NodeMultiset& n_exp_rem) const;
//---------------------------------------end term utilities
@@ -449,21 +449,6 @@ class NonlinearExtension {
Node d_two;
Node d_true;
Node d_false;
- /** PI
- *
- * Note that PI is a (symbolic, non-constant) nullary operator. This is
- * because its value cannot be computed exactly. We constraint PI to concrete
- * lower and upper bounds stored in d_pi_bound below.
- */
- Node d_pi;
- /** PI/2 */
- Node d_pi_2;
- /** -PI/2 */
- Node d_pi_neg_2;
- /** -PI */
- Node d_pi_neg;
- /** the concrete lower and upper bounds for PI */
- Node d_pi_bound[2];
// The theory of arithmetic containing this extension.
TheoryArith& d_containing;
@@ -488,6 +473,12 @@ class NonlinearExtension {
* and for establishing when we are able to answer "SAT".
*/
NlModel d_model;
+ /** The transcendental extension object
+ *
+ * This is the subsolver responsible for running the procedure for
+ * transcendental functions.
+ */
+ TranscendentalSolver d_trSlv;
/**
* The lemmas we computed during collectModelInfo. We store two vectors of
* lemmas to be sent out on the output channel of TheoryArith. The first
@@ -509,27 +500,6 @@ class NonlinearExtension {
std::map<Node, unsigned> d_order_vars;
std::vector<Node> d_order_points;
- //transcendental functions
- /**
- * Some transcendental functions f(t) are "purified", e.g. we add
- * t = y ^ f(t) = f(y) where y is a fresh variable. Those that are not
- * purified we call "master terms".
- *
- * The maps below maintain a master/slave relationship over
- * transcendental functions (SINE, EXPONENTIAL, PI), where above
- * f(y) is the master of itself and of f(t).
- *
- * This is used for ensuring that the argument y of SINE we process is on the
- * interval [-pi .. pi], and that exponentials are not applied to arguments
- * that contain transcendental functions.
- */
- std::map<Node, Node> d_trMaster;
- std::map<Node, std::unordered_set<Node, NodeHashFunction>> d_trSlaves;
- /** The transcendental functions we have done initial refinements on */
- std::map< Node, bool > d_tf_initial_refine;
-
- void mkPi();
- void getCurrentPiBounds( std::vector< Node >& lemmas );
private:
//per last-call effort check
@@ -552,25 +522,6 @@ class NonlinearExtension {
std::map<Node, std::map<Node, std::map<Node, Node> > > d_ci_exp;
std::map<Node, std::map<Node, std::map<Node, bool> > > d_ci_max;
- /**
- * Maps representives of a congruence class to the members of that class.
- *
- * In detail, a congruence class is a set of terms of the form
- * { f(t1), ..., f(tn) }
- * such that t1 = ... = tn in the current context. We choose an arbitrary
- * term among these to be the repesentative of this congruence class.
- *
- * Moreover, notice we compute congruence classes only over terms that
- * are transcendental function applications that are "master terms",
- * see d_trMaster/d_trSlave.
- */
- std::map<Node, std::vector<Node> > d_funcCongClass;
- /**
- * A list of all functions for each kind in { EXPONENTIAL, SINE, POW, PI }
- * that are representives of their congruence class.
- */
- std::map<Kind, std::vector<Node> > d_funcMap;
-
// factor skolems
std::map< Node, Node > d_factor_skolem;
Node getFactorSkolem( Node n, std::vector< Node >& lemmas );
@@ -578,96 +529,6 @@ class NonlinearExtension {
// tangent plane bounds
std::map< Node, std::map< Node, Node > > d_tangent_val_bound[4];
- /** secant points (sorted list) for transcendental functions
- *
- * This is used for tangent plane refinements for
- * transcendental functions. This is the set
- * "get-previous-secant-points" in "Satisfiability
- * Modulo Transcendental Functions via Incremental
- * Linearization" by Cimatti et al., CADE 2017, for
- * each transcendental function application. We store this set for each
- * Taylor degree.
- */
- std::unordered_map<Node,
- std::map<unsigned, std::vector<Node> >,
- NodeHashFunction>
- d_secant_points;
-
- /** get Taylor series of degree n for function fa centered around point fa[0].
- *
- * Return value is ( P_{n,f(a)}( x ), R_{n+1,f(a)}( x ) ) where
- * the first part of the pair is the Taylor series expansion :
- * P_{n,f(a)}( x ) = sum_{i=0}^n (f^i( a )/i!)*(x-a)^i
- * and the second part of the pair is the Taylor series remainder :
- * R_{n+1,f(a),b}( x ) = (f^{n+1}( b )/(n+1)!)*(x-a)^{n+1}
- *
- * The above values are cached for each (f,n) for a fixed variable "a".
- * To compute the Taylor series for fa, we compute the Taylor series
- * for ( fa.getKind(), n ) then substitute { a -> fa[0] } if fa[0]!=0.
- * We compute P_{n,f(0)}( x )/R_{n+1,f(0),b}( x ) for ( fa.getKind(), n )
- * if fa[0]=0.
- * In the latter case, note we compute the exponential x^{n+1}
- * instead of (x-a)^{n+1}, which can be done faster.
- */
- std::pair<Node, Node> getTaylor(Node fa, unsigned n);
-
- /** internal variables used for constructing (cached) versions of the Taylor
- * series above.
- */
- Node d_taylor_real_fv; // x above
- Node d_taylor_real_fv_base; // a above
- Node d_taylor_real_fv_base_rem; // b above
-
- /** cache of sum and remainder terms for getTaylor */
- std::unordered_map<Node, std::unordered_map<unsigned, Node>, NodeHashFunction>
- d_taylor_sum;
- std::unordered_map<Node, std::unordered_map<unsigned, Node>, NodeHashFunction>
- d_taylor_rem;
- /** taylor degree
- *
- * Indicates that the degree of the polynomials in the Taylor approximation of
- * all transcendental functions is 2*d_taylor_degree. This value is set
- * initially to options::nlExtTfTaylorDegree() and may be incremented
- * if the option options::nlExtTfIncPrecision() is enabled.
- */
- unsigned d_taylor_degree;
- /** polynomial approximation bounds
- *
- * This adds P_l+[x], P_l-[x], P_u+[x], P_u-[x] to pbounds, where x is
- * d_taylor_real_fv. These are polynomial approximations of the Taylor series
- * of <k>( 0 ) for degree 2*d where k is SINE or EXPONENTIAL.
- * These correspond to P_l and P_u from Figure 3 of Cimatti et al., CADE 2017,
- * for positive/negative (+/-) values of the argument of <k>( 0 ).
- *
- * Notice that for certain bounds (e.g. upper bounds for exponential), the
- * Taylor approximation for a fixed degree is only sound up to a given
- * upper bound on the argument. To obtain sound lower/upper bounds for a
- * given <k>( c ), use the function below.
- */
- void getPolynomialApproximationBounds(Kind k,
- unsigned d,
- std::vector<Node>& pbounds);
- /** polynomial approximation bounds
- *
- * This computes polynomial approximations P_l+[x], P_l-[x], P_u+[x], P_u-[x]
- * that are sound (lower, upper) bounds for <k>( c ). Notice that these
- * polynomials may depend on c. In particular, for P_u+[x] for <k>( c ) where
- * c>0, we return the P_u+[x] from the function above for the minimum degree
- * d' >= d such that (1-c^{2*d'+1}/(2*d'+1)!) is positive.
- */
- void getPolynomialApproximationBoundForArg(Kind k,
- Node c,
- unsigned d,
- std::vector<Node>& pbounds);
- /** cache of the above function */
- std::map<Kind, std::map<unsigned, std::vector<Node> > > d_poly_bounds;
- /** get transcendental function model bounds
- *
- * This returns the current lower and upper bounds of transcendental
- * function application tf based on Taylor of degree 2*d, which is dependent
- * on the model value of its argument.
- */
- std::pair<Node, Node> getTfModelBounds(Node tf, unsigned d);
/** get approximate sqrt
*
* This approximates the square root of positive constant c. If this method
@@ -680,70 +541,6 @@ class NonlinearExtension {
*/
bool getApproximateSqrt(Node c, Node& l, Node& u, unsigned iter = 15) const;
- /** concavity region for transcendental functions
- *
- * This stores an integer that identifies an interval in
- * which the current model value for an argument of an
- * application of a transcendental function resides.
- *
- * For exp( x ):
- * region #1 is -infty < x < infty
- * For sin( x ):
- * region #0 is pi < x < infty (this is an invalid region)
- * region #1 is pi/2 < x <= pi
- * region #2 is 0 < x <= pi/2
- * region #3 is -pi/2 < x <= 0
- * region #4 is -pi < x <= -pi/2
- * region #5 is -infty < x <= -pi (this is an invalid region)
- * All regions not listed above, as well as regions 0 and 5
- * for SINE are "invalid". We only process applications
- * of transcendental functions whose arguments have model
- * values that reside in valid regions.
- */
- std::unordered_map<Node, int, NodeHashFunction> d_tf_region;
- /** get monotonicity direction
- *
- * Returns whether the slope is positive (+1) or negative(-1)
- * in region of transcendental function with kind k.
- * Returns 0 if region is invalid.
- */
- int regionToMonotonicityDir(Kind k, int region);
- /** get concavity
- *
- * Returns whether we are concave (+1) or convex (-1)
- * in region of transcendental function with kind k,
- * where region is defined above.
- * Returns 0 if region is invalid.
- */
- int regionToConcavity(Kind k, int region);
- /** region to lower bound
- *
- * Returns the term corresponding to the lower
- * bound of the region of transcendental function
- * with kind k. Returns Node::null if the region
- * is invalid, or there is no lower bound for the
- * region.
- */
- Node regionToLowerBound(Kind k, int region);
- /** region to upper bound
- *
- * Returns the term corresponding to the upper
- * bound of the region of transcendental function
- * with kind k. Returns Node::null if the region
- * is invalid, or there is no upper bound for the
- * region.
- */
- Node regionToUpperBound(Kind k, int region);
- /** get derivative
- *
- * Returns d/dx n. Supports cases of n
- * for transcendental functions applied to x,
- * multiplication, addition, constants and variables.
- * Returns Node::null() if derivative is an
- * unhandled case.
- */
- Node getDerivative(Node n, Node x);
-
private:
//-------------------------------------------- lemma schemas
/** check split zero
@@ -873,122 +670,6 @@ class NonlinearExtension {
*/
std::vector<Node> checkTangentPlanes();
- /** check transcendental initial refine
- *
- * Returns a set of valid theory lemmas, based on
- * simple facts about transcendental functions.
- * This mostly follows the initial axioms described in
- * Section 4 of "Satisfiability
- * Modulo Transcendental Functions via Incremental
- * Linearization" by Cimatti et al., CADE 2017.
- *
- * Examples:
- *
- * sin( x ) = -sin( -x )
- * ( PI > x > 0 ) => 0 < sin( x ) < 1
- * exp( x )>0
- * x<0 => exp( x )<1
- */
- std::vector<Node> checkTranscendentalInitialRefine();
-
- /** check transcendental monotonic
- *
- * Returns a set of valid theory lemmas, based on a
- * lemma scheme that ensures that applications
- * of transcendental functions respect monotonicity.
- *
- * Examples:
- *
- * x > y => exp( x ) > exp( y )
- * PI/2 > x > y > 0 => sin( x ) > sin( y )
- * PI > x > y > PI/2 => sin( x ) < sin( y )
- */
- std::vector<Node> checkTranscendentalMonotonic();
-
- /** check transcendental tangent planes
- *
- * Returns a set of valid theory lemmas, based on
- * computing an "incremental linearization" of
- * transcendental functions based on the model values
- * of transcendental functions and their arguments.
- * It is based on Figure 3 of "Satisfiability
- * Modulo Transcendental Functions via Incremental
- * Linearization" by Cimatti et al., CADE 2017.
- * This schema is not terminating in general.
- * It is not enabled by default, and can
- * be enabled by --nl-ext-tf-tplanes.
- *
- * Example:
- *
- * Assume we have a term sin(y) where M( y ) = 1 where M is the current model.
- * Note that:
- * sin(1) ~= .841471
- *
- * The Taylor series and remainder of sin(y) of degree 7 is
- * P_{7,sin(0)}( x ) = x + (-1/6)*x^3 + (1/20)*x^5
- * R_{7,sin(0),b}( x ) = (-1/5040)*x^7
- *
- * This gives us lower and upper bounds :
- * P_u( x ) = P_{7,sin(0)}( x ) + R_{7,sin(0),b}( x )
- * ...where note P_u( 1 ) = 4243/5040 ~= .841865
- * P_l( x ) = P_{7,sin(0)}( x ) - R_{7,sin(0),b}( x )
- * ...where note P_l( 1 ) = 4241/5040 ~= .841468
- *
- * Assume that M( sin(y) ) > P_u( 1 ).
- * Since the concavity of sine in the region 0 < x < PI/2 is -1,
- * we add a tangent plane refinement.
- * The tangent plane at the point 1 in P_u is
- * given by the formula:
- * T( x ) = P_u( 1 ) + ((d/dx)(P_u(x)))( 1 )*( x - 1 )
- * We add the lemma:
- * ( 0 < y < PI/2 ) => sin( y ) <= T( y )
- * which is:
- * ( 0 < y < PI/2 ) => sin( y ) <= (391/720)*(y - 2737/1506)
- *
- * Assume that M( sin(y) ) < P_u( 1 ).
- * Since the concavity of sine in the region 0 < x < PI/2 is -1,
- * we add a secant plane refinement for some constants ( l, u )
- * such that 0 <= l < M( y ) < u <= PI/2. Assume we choose
- * l = 0 and u = M( PI/2 ) = 150517/47912.
- * The secant planes at point 1 for P_l
- * are given by the formulas:
- * S_l( x ) = (x-l)*(P_l( l )-P_l(c))/(l-1) + P_l( l )
- * S_u( x ) = (x-u)*(P_l( u )-P_l(c))/(u-1) + P_l( u )
- * We add the lemmas:
- * ( 0 < y < 1 ) => sin( y ) >= S_l( y )
- * ( 1 < y < PI/2 ) => sin( y ) >= S_u( y )
- * which are:
- * ( 0 < y < 1 ) => (sin y) >= 4251/5040*y
- * ( 1 < y < PI/2 ) => (sin y) >= c1*(y+c2)
- * where c1, c2 are rationals (for brevity, omitted here)
- * such that c1 ~= .277 and c2 ~= 2.032.
- *
- * The argument lemSE is the "side effect" of the lemmas in the return
- * value of this function (for details, see checkLastCall).
- */
- std::vector<Node> checkTranscendentalTangentPlanes(
- std::map<Node, NlLemmaSideEffect>& lemSE);
- /** check transcendental function refinement for tf
- *
- * This method is called by the above method for each "master"
- * transcendental function application that occurs in an assertion in the
- * current context. For example, an application like sin(t) is not a master
- * if we have introduced the constraints:
- * t=y+2*pi*n ^ -pi <= y <= pi ^ sin(t) = sin(y).
- * See d_trMaster/d_trSlaves for more detail.
- *
- * This runs Figure 3 of Cimatti et al., CADE 2017 for transcendental
- * function application tf for Taylor degree d. It may add a secant or
- * tangent plane lemma to lems and its side effect (if one exists)
- * to lemSE.
- *
- * It returns false if the bounds are not precise enough to add a
- * secant or tangent plane lemma.
- */
- bool checkTfTangentPlanesFun(Node tf,
- unsigned d,
- std::vector<Node>& lems,
- std::map<Node, NlLemmaSideEffect>& lemSE);
//-------------------------------------------- end lemma schemas
}; /* class NonlinearExtension */
diff --git a/src/theory/arith/transcendental_solver.cpp b/src/theory/arith/transcendental_solver.cpp
new file mode 100644
index 000000000..665accc0a
--- /dev/null
+++ b/src/theory/arith/transcendental_solver.cpp
@@ -0,0 +1,1475 @@
+/********************* */
+/*! \file transcendental_solver.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of solver for handling transcendental functions.
+ **/
+
+#include "theory/arith/transcendental_solver.h"
+
+#include <cmath>
+#include <set>
+
+#include "expr/node_algorithm.h"
+#include "expr/node_builder.h"
+#include "options/arith_options.h"
+#include "theory/arith/arith_msum.h"
+#include "theory/arith/arith_utilities.h"
+#include "theory/rewriter.h"
+
+using namespace CVC4::kind;
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+
+TranscendentalSolver::TranscendentalSolver(NlModel& m) : d_model(m)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ d_true = nm->mkConst(true);
+ d_false = nm->mkConst(false);
+ d_zero = nm->mkConst(Rational(0));
+ d_one = nm->mkConst(Rational(1));
+ d_neg_one = nm->mkConst(Rational(-1));
+ d_taylor_real_fv = nm->mkBoundVar("x", nm->realType());
+ d_taylor_real_fv_base = nm->mkBoundVar("a", nm->realType());
+ d_taylor_real_fv_base_rem = nm->mkBoundVar("b", nm->realType());
+ d_taylor_degree = options::nlExtTfTaylorDegree();
+}
+
+TranscendentalSolver::~TranscendentalSolver() {}
+
+void TranscendentalSolver::initLastCall(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts,
+ std::vector<Node>& lems,
+ std::vector<Node>& lemsPp)
+{
+ d_funcCongClass.clear();
+ d_funcMap.clear();
+ d_tf_region.clear();
+
+ NodeManager* nm = NodeManager::currentNM();
+
+ // register the extended function terms
+ std::vector<Node> trNeedsMaster;
+ bool needPi = false;
+ // for computing congruence
+ std::map<Kind, ArgTrie> argTrie;
+ for (unsigned i = 0, xsize = xts.size(); i < xsize; i++)
+ {
+ Node a = xts[i];
+ Kind ak = a.getKind();
+ bool consider = true;
+ // if is an unpurified application of SINE, or it is a transcendental
+ // applied to a trancendental, purify.
+ if (isTranscendentalKind(ak))
+ {
+ // if we've already computed master for a
+ if (d_trMaster.find(a) != d_trMaster.end())
+ {
+ // a master has at least one slave
+ consider = (d_trSlaves.find(a) != d_trSlaves.end());
+ }
+ else
+ {
+ if (ak == SINE)
+ {
+ // always not a master
+ consider = false;
+ }
+ else
+ {
+ for (const Node& ac : a)
+ {
+ if (isTranscendentalKind(ac.getKind()))
+ {
+ consider = false;
+ break;
+ }
+ }
+ }
+ if (!consider)
+ {
+ // wait to assign a master below
+ trNeedsMaster.push_back(a);
+ }
+ else
+ {
+ d_trMaster[a] = a;
+ d_trSlaves[a].insert(a);
+ }
+ }
+ }
+ if (ak == EXPONENTIAL || ak == SINE)
+ {
+ needPi = needPi || (ak == SINE);
+ // if we didn't indicate that it should be purified above
+ if (consider)
+ {
+ std::vector<Node> repList;
+ for (const Node& ac : a)
+ {
+ Node r = d_model.computeConcreteModelValue(ac);
+ repList.push_back(r);
+ }
+ Node aa = argTrie[ak].add(a, repList);
+ if (aa != a)
+ {
+ // apply congruence to pairs of terms that are disequal and congruent
+ Assert(aa.getNumChildren() == a.getNumChildren());
+ Node mvaa = d_model.computeAbstractModelValue(a);
+ Node mvaaa = d_model.computeAbstractModelValue(aa);
+ if (mvaa != mvaaa)
+ {
+ std::vector<Node> exp;
+ for (unsigned j = 0, size = a.getNumChildren(); j < size; j++)
+ {
+ exp.push_back(a[j].eqNode(aa[j]));
+ }
+ Node expn = exp.size() == 1 ? exp[0] : nm->mkNode(AND, exp);
+ Node cong_lemma = nm->mkNode(OR, expn.negate(), a.eqNode(aa));
+ lems.push_back(cong_lemma);
+ }
+ }
+ else
+ {
+ // new representative of congruence class
+ d_funcMap[ak].push_back(a);
+ }
+ // add to congruence class
+ d_funcCongClass[aa].push_back(a);
+ }
+ }
+ else if (ak == PI)
+ {
+ Assert(consider);
+ needPi = true;
+ d_funcMap[ak].push_back(a);
+ d_funcCongClass[a].push_back(a);
+ }
+ }
+ // initialize pi if necessary
+ if (needPi && d_pi.isNull())
+ {
+ mkPi();
+ getCurrentPiBounds(lems);
+ }
+
+ if (!lems.empty())
+ {
+ return;
+ }
+
+ // process SINE phase shifting
+ for (const Node& a : trNeedsMaster)
+ {
+ // should not have processed this already
+ Assert(d_trMaster.find(a) == d_trMaster.end());
+ Kind k = a.getKind();
+ Assert(k == SINE || k == EXPONENTIAL);
+ Node y =
+ nm->mkSkolem("y", nm->realType(), "phase shifted trigonometric arg");
+ Node new_a = nm->mkNode(k, y);
+ d_trSlaves[new_a].insert(new_a);
+ d_trSlaves[new_a].insert(a);
+ d_trMaster[a] = new_a;
+ d_trMaster[new_a] = new_a;
+ Node lem;
+ if (k == SINE)
+ {
+ Trace("nl-ext-tf") << "Basis sine : " << new_a << " for " << a
+ << std::endl;
+ Assert(!d_pi.isNull());
+ Node shift = nm->mkSkolem("s", nm->integerType(), "number of shifts");
+ // TODO : do not introduce shift here, instead needs model-based
+ // refinement for constant shifts (cvc4-projects #1284)
+ lem = nm->mkNode(
+ AND,
+ mkValidPhase(y, d_pi),
+ nm->mkNode(
+ ITE,
+ mkValidPhase(a[0], d_pi),
+ a[0].eqNode(y),
+ a[0].eqNode(nm->mkNode(
+ PLUS,
+ y,
+ nm->mkNode(MULT, nm->mkConst(Rational(2)), shift, d_pi)))),
+ new_a.eqNode(a));
+ }
+ else
+ {
+ // do both equalities to ensure that new_a becomes a preregistered term
+ lem = nm->mkNode(AND, a.eqNode(new_a), a[0].eqNode(y));
+ }
+ // note we must do preprocess on this lemma
+ Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : purify : " << lem
+ << std::endl;
+ lemsPp.push_back(lem);
+ }
+
+ if (Trace.isOn("nl-ext-mv"))
+ {
+ Trace("nl-ext-mv") << "Arguments of trancendental functions : "
+ << std::endl;
+ for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
+ {
+ Kind k = tfl.first;
+ if (k == SINE || k == EXPONENTIAL)
+ {
+ for (const Node& tf : tfl.second)
+ {
+ Node v = tf[0];
+ d_model.computeConcreteModelValue(v);
+ d_model.computeAbstractModelValue(v);
+ d_model.printModelValue("nl-ext-mv", v);
+ }
+ }
+ }
+ }
+}
+
+bool TranscendentalSolver::preprocessAssertionsCheckModel(
+ std::vector<Node>& assertions)
+{
+ std::vector<Node> pvars;
+ std::vector<Node> psubs;
+ for (const std::pair<const Node, Node>& tb : d_trMaster)
+ {
+ pvars.push_back(tb.first);
+ psubs.push_back(tb.second);
+ }
+
+ // initialize representation of assertions
+ std::vector<Node> passertions;
+ for (const Node& a : assertions)
+
+ {
+ Node pa = a;
+ if (!pvars.empty())
+ {
+ pa = arithSubstitute(pa, pvars, psubs);
+ pa = Rewriter::rewrite(pa);
+ }
+ if (!pa.isConst() || !pa.getConst<bool>())
+ {
+ Trace("nl-ext-cm-assert") << "- assert : " << pa << std::endl;
+ passertions.push_back(pa);
+ }
+ }
+ // get model bounds for all transcendental functions
+ Trace("nl-ext-cm") << "----- Get bounds for transcendental functions..."
+ << std::endl;
+ for (std::pair<const Kind, std::vector<Node> >& tfs : d_funcMap)
+ {
+ Kind k = tfs.first;
+ for (const Node& tf : tfs.second)
+ {
+ Trace("nl-ext-cm") << "- Term: " << tf << std::endl;
+ bool success = true;
+ // tf is Figure 3 : tf( x )
+ Node bl;
+ Node bu;
+ if (k == PI)
+ {
+ bl = d_pi_bound[0];
+ bu = d_pi_bound[1];
+ }
+ else
+ {
+ std::pair<Node, Node> bounds = getTfModelBounds(tf, d_taylor_degree);
+ bl = bounds.first;
+ bu = bounds.second;
+ if (bl != bu)
+ {
+ d_model.setUsedApproximate();
+ }
+ }
+ if (!bl.isNull() && !bu.isNull())
+ {
+ // for each function in the congruence classe
+ for (const Node& ctf : d_funcCongClass[tf])
+ {
+ // each term in congruence classes should be master terms
+ Assert(d_trSlaves.find(ctf) != d_trSlaves.end());
+ // we set the bounds for each slave of tf
+ for (const Node& stf : d_trSlaves[ctf])
+ {
+ Trace("nl-ext-cm") << "...bound for " << stf << " : [" << bl << ", "
+ << bu << "]" << std::endl;
+ success = d_model.addCheckModelBound(stf, bl, bu);
+ }
+ }
+ }
+ else
+ {
+ Trace("nl-ext-cm") << "...no bound for " << tf << std::endl;
+ }
+ if (!success)
+ {
+ // a bound was conflicting
+ Trace("nl-ext-cm") << "...failed to set bound for " << tf << std::endl;
+ Trace("nl-ext-cm") << "-----" << std::endl;
+ return false;
+ }
+ }
+ }
+ // replace the assertions
+ assertions = passertions;
+ return true;
+}
+
+void TranscendentalSolver::incrementTaylorDegree() { d_taylor_degree++; }
+unsigned TranscendentalSolver::getTaylorDegree() const
+{
+ return d_taylor_degree;
+}
+
+void TranscendentalSolver::processSideEffect(const NlLemmaSideEffect& se)
+{
+ for (const std::tuple<Node, unsigned, Node>& sp : se.d_secantPoint)
+ {
+ Node tf = std::get<0>(sp);
+ unsigned d = std::get<1>(sp);
+ Node c = std::get<2>(sp);
+ d_secant_points[tf][d].push_back(c);
+ }
+}
+
+void TranscendentalSolver::mkPi()
+{
+ NodeManager* nm = NodeManager::currentNM();
+ if (d_pi.isNull())
+ {
+ d_pi = nm->mkNullaryOperator(nm->realType(), PI);
+ d_pi_2 = Rewriter::rewrite(
+ nm->mkNode(MULT, d_pi, nm->mkConst(Rational(1) / Rational(2))));
+ d_pi_neg_2 = Rewriter::rewrite(
+ nm->mkNode(MULT, d_pi, nm->mkConst(Rational(-1) / Rational(2))));
+ d_pi_neg =
+ Rewriter::rewrite(nm->mkNode(MULT, d_pi, nm->mkConst(Rational(-1))));
+ // initialize bounds
+ d_pi_bound[0] = nm->mkConst(Rational(103993) / Rational(33102));
+ d_pi_bound[1] = nm->mkConst(Rational(104348) / Rational(33215));
+ }
+}
+
+void TranscendentalSolver::getCurrentPiBounds(std::vector<Node>& lemmas)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Node pi_lem = nm->mkNode(AND,
+ nm->mkNode(GEQ, d_pi, d_pi_bound[0]),
+ nm->mkNode(LEQ, d_pi, d_pi_bound[1]));
+ lemmas.push_back(pi_lem);
+}
+
+std::vector<Node> TranscendentalSolver::checkTranscendentalInitialRefine()
+{
+ NodeManager* nm = NodeManager::currentNM();
+ std::vector<Node> lemmas;
+ Trace("nl-ext")
+ << "Get initial refinement lemmas for transcendental functions..."
+ << std::endl;
+ for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
+ {
+ Kind k = tfl.first;
+ for (const Node& t : tfl.second)
+ {
+ // initial refinements
+ if (d_tf_initial_refine.find(t) == d_tf_initial_refine.end())
+ {
+ d_tf_initial_refine[t] = true;
+ Node lem;
+ if (k == SINE)
+ {
+ Node symn = nm->mkNode(SINE, nm->mkNode(MULT, d_neg_one, t[0]));
+ symn = Rewriter::rewrite(symn);
+ // Can assume it is its own master since phase is split over 0,
+ // hence -pi <= t[0] <= pi implies -pi <= -t[0] <= pi.
+ d_trMaster[symn] = symn;
+ d_trSlaves[symn].insert(symn);
+ Assert(d_trSlaves.find(t) != d_trSlaves.end());
+ std::vector<Node> children;
+
+ lem = nm->mkNode(AND,
+ // bounds
+ nm->mkNode(AND,
+ nm->mkNode(LEQ, t, d_one),
+ nm->mkNode(GEQ, t, d_neg_one)),
+ // symmetry
+ nm->mkNode(PLUS, t, symn).eqNode(d_zero),
+ // sign
+ nm->mkNode(EQUAL,
+ nm->mkNode(LT, t[0], d_zero),
+ nm->mkNode(LT, t, d_zero)),
+ // zero val
+ nm->mkNode(EQUAL,
+ nm->mkNode(GT, t[0], d_zero),
+ nm->mkNode(GT, t, d_zero)));
+ lem = nm->mkNode(
+ AND,
+ lem,
+ // zero tangent
+ nm->mkNode(AND,
+ nm->mkNode(IMPLIES,
+ nm->mkNode(GT, t[0], d_zero),
+ nm->mkNode(LT, t, t[0])),
+ nm->mkNode(IMPLIES,
+ nm->mkNode(LT, t[0], d_zero),
+ nm->mkNode(GT, t, t[0]))),
+ // pi tangent
+ nm->mkNode(
+ AND,
+ nm->mkNode(IMPLIES,
+ nm->mkNode(LT, t[0], d_pi),
+ nm->mkNode(LT, t, nm->mkNode(MINUS, d_pi, t[0]))),
+ nm->mkNode(
+ IMPLIES,
+ nm->mkNode(GT, t[0], d_pi_neg),
+ nm->mkNode(GT, t, nm->mkNode(MINUS, d_pi_neg, t[0])))));
+ }
+ else if (k == EXPONENTIAL)
+ {
+ // ( exp(x) > 0 ) ^ ( x=0 <=> exp( x ) = 1 ) ^ ( x < 0 <=> exp( x ) <
+ // 1 ) ^ ( x <= 0 V exp( x ) > x + 1 )
+ lem = nm->mkNode(
+ AND,
+ nm->mkNode(GT, t, d_zero),
+ nm->mkNode(EQUAL, t[0].eqNode(d_zero), t.eqNode(d_one)),
+ nm->mkNode(EQUAL,
+ nm->mkNode(LT, t[0], d_zero),
+ nm->mkNode(LT, t, d_one)),
+ nm->mkNode(OR,
+ nm->mkNode(LEQ, t[0], d_zero),
+ nm->mkNode(GT, t, nm->mkNode(PLUS, t[0], d_one))));
+ }
+ if (!lem.isNull())
+ {
+ lemmas.push_back(lem);
+ }
+ }
+ }
+ }
+
+ return lemmas;
+}
+
+std::vector<Node> TranscendentalSolver::checkTranscendentalMonotonic()
+{
+ std::vector<Node> lemmas;
+ Trace("nl-ext") << "Get monotonicity lemmas for transcendental functions..."
+ << std::endl;
+
+ // sort arguments of all transcendentals
+ std::map<Kind, std::vector<Node> > sorted_tf_args;
+ std::map<Kind, std::map<Node, Node> > tf_arg_to_term;
+
+ for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
+ {
+ Kind k = tfl.first;
+ if (k == EXPONENTIAL || k == SINE)
+ {
+ for (const Node& tf : tfl.second)
+ {
+ Node a = tf[0];
+ Node mvaa = d_model.computeAbstractModelValue(a);
+ if (mvaa.isConst())
+ {
+ Trace("nl-ext-tf-mono-debug") << "...tf term : " << a << std::endl;
+ sorted_tf_args[k].push_back(a);
+ tf_arg_to_term[k][a] = tf;
+ }
+ }
+ }
+ }
+
+ SortNlModel smv;
+ smv.d_nlm = &d_model;
+ // sort by concrete values
+ smv.d_isConcrete = true;
+ smv.d_reverse_order = true;
+ for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
+ {
+ Kind k = tfl.first;
+ if (!sorted_tf_args[k].empty())
+ {
+ std::sort(sorted_tf_args[k].begin(), sorted_tf_args[k].end(), smv);
+ Trace("nl-ext-tf-mono") << "Sorted transcendental function list for " << k
+ << " : " << std::endl;
+ for (unsigned i = 0; i < sorted_tf_args[k].size(); i++)
+ {
+ Node targ = sorted_tf_args[k][i];
+ Node mvatarg = d_model.computeAbstractModelValue(targ);
+ Trace("nl-ext-tf-mono")
+ << " " << targ << " -> " << mvatarg << std::endl;
+ Node t = tf_arg_to_term[k][targ];
+ Node mvat = d_model.computeAbstractModelValue(t);
+ Trace("nl-ext-tf-mono") << " f-val : " << mvat << std::endl;
+ }
+ std::vector<Node> mpoints;
+ std::vector<Node> mpoints_vals;
+ if (k == SINE)
+ {
+ mpoints.push_back(d_pi);
+ mpoints.push_back(d_pi_2);
+ mpoints.push_back(d_zero);
+ mpoints.push_back(d_pi_neg_2);
+ mpoints.push_back(d_pi_neg);
+ }
+ else if (k == EXPONENTIAL)
+ {
+ mpoints.push_back(Node::null());
+ }
+ if (!mpoints.empty())
+ {
+ // get model values for points
+ for (unsigned i = 0; i < mpoints.size(); i++)
+ {
+ Node mpv;
+ if (!mpoints[i].isNull())
+ {
+ mpv = d_model.computeAbstractModelValue(mpoints[i]);
+ Assert(mpv.isConst());
+ }
+ mpoints_vals.push_back(mpv);
+ }
+
+ unsigned mdir_index = 0;
+ int monotonic_dir = -1;
+ Node mono_bounds[2];
+ Node targ, targval, t, tval;
+ for (unsigned i = 0, size = sorted_tf_args[k].size(); i < size; i++)
+ {
+ Node sarg = sorted_tf_args[k][i];
+ Node sargval = d_model.computeAbstractModelValue(sarg);
+ Assert(sargval.isConst());
+ Node s = tf_arg_to_term[k][sarg];
+ Node sval = d_model.computeAbstractModelValue(s);
+ Assert(sval.isConst());
+
+ // increment to the proper monotonicity region
+ bool increment = true;
+ while (increment && mdir_index < mpoints.size())
+ {
+ increment = false;
+ if (mpoints[mdir_index].isNull())
+ {
+ increment = true;
+ }
+ else
+ {
+ Node pval = mpoints_vals[mdir_index];
+ Assert(pval.isConst());
+ if (sargval.getConst<Rational>() < pval.getConst<Rational>())
+ {
+ increment = true;
+ Trace("nl-ext-tf-mono") << "...increment at " << sarg
+ << " since model value is less than "
+ << mpoints[mdir_index] << std::endl;
+ }
+ }
+ if (increment)
+ {
+ tval = Node::null();
+ mono_bounds[1] = mpoints[mdir_index];
+ mdir_index++;
+ monotonic_dir = regionToMonotonicityDir(k, mdir_index);
+ if (mdir_index < mpoints.size())
+ {
+ mono_bounds[0] = mpoints[mdir_index];
+ }
+ else
+ {
+ mono_bounds[0] = Node::null();
+ }
+ }
+ }
+ // store the concavity region
+ d_tf_region[s] = mdir_index;
+ Trace("nl-ext-concavity") << "Transcendental function " << s
+ << " is in region #" << mdir_index;
+ Trace("nl-ext-concavity")
+ << ", arg model value = " << sargval << std::endl;
+
+ if (!tval.isNull())
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node mono_lem;
+ if (monotonic_dir == 1
+ && sval.getConst<Rational>() > tval.getConst<Rational>())
+ {
+ mono_lem = nm->mkNode(
+ IMPLIES, nm->mkNode(GEQ, targ, sarg), nm->mkNode(GEQ, t, s));
+ }
+ else if (monotonic_dir == -1
+ && sval.getConst<Rational>() < tval.getConst<Rational>())
+ {
+ mono_lem = nm->mkNode(
+ IMPLIES, nm->mkNode(LEQ, targ, sarg), nm->mkNode(LEQ, t, s));
+ }
+ if (!mono_lem.isNull())
+ {
+ if (!mono_bounds[0].isNull())
+ {
+ Assert(!mono_bounds[1].isNull());
+ mono_lem = nm->mkNode(
+ IMPLIES,
+ nm->mkNode(AND,
+ mkBounded(mono_bounds[0], targ, mono_bounds[1]),
+ mkBounded(mono_bounds[0], sarg, mono_bounds[1])),
+ mono_lem);
+ }
+ Trace("nl-ext-tf-mono")
+ << "Monotonicity lemma : " << mono_lem << std::endl;
+ lemmas.push_back(mono_lem);
+ }
+ }
+ // store the previous values
+ targ = sarg;
+ targval = sargval;
+ t = s;
+ tval = sval;
+ }
+ }
+ }
+ }
+ return lemmas;
+}
+
+std::vector<Node> TranscendentalSolver::checkTranscendentalTangentPlanes(
+ std::map<Node, NlLemmaSideEffect>& lemSE)
+{
+ std::vector<Node> lemmas;
+ Trace("nl-ext") << "Get tangent plane lemmas for transcendental functions..."
+ << std::endl;
+ // this implements Figure 3 of "Satisfiaility Modulo Transcendental Functions
+ // via Incremental Linearization" by Cimatti et al
+ for (std::pair<const Kind, std::vector<Node> >& tfs : d_funcMap)
+ {
+ Kind k = tfs.first;
+ if (k == PI)
+ {
+ // We do not use Taylor approximation for PI currently.
+ // This is because the convergence is extremely slow, and hence an
+ // initial approximation is superior.
+ continue;
+ }
+ Trace("nl-ext-tftp-debug2") << "Taylor variables: " << std::endl;
+ Trace("nl-ext-tftp-debug2")
+ << " taylor_real_fv : " << d_taylor_real_fv << std::endl;
+ Trace("nl-ext-tftp-debug2")
+ << " taylor_real_fv_base : " << d_taylor_real_fv_base << std::endl;
+ Trace("nl-ext-tftp-debug2")
+ << " taylor_real_fv_base_rem : " << d_taylor_real_fv_base_rem
+ << std::endl;
+ Trace("nl-ext-tftp-debug2") << std::endl;
+
+ // we substitute into the Taylor sum P_{n,f(0)}( x )
+
+ for (const Node& tf : tfs.second)
+ {
+ // tf is Figure 3 : tf( x )
+ Trace("nl-ext-tftp") << "Compute tangent planes " << tf << std::endl;
+ // go until max degree is reached, or we don't meet bound criteria
+ for (unsigned d = 1; d <= d_taylor_degree; d++)
+ {
+ Trace("nl-ext-tftp") << "- run at degree " << d << "..." << std::endl;
+ unsigned prev = lemmas.size();
+ if (checkTfTangentPlanesFun(tf, d, lemmas, lemSE))
+ {
+ Trace("nl-ext-tftp")
+ << "...fail, #lemmas = " << (lemmas.size() - prev) << std::endl;
+ break;
+ }
+ else
+ {
+ Trace("nl-ext-tftp") << "...success" << std::endl;
+ }
+ }
+ }
+ }
+
+ return lemmas;
+}
+
+bool TranscendentalSolver::checkTfTangentPlanesFun(
+ Node tf,
+ unsigned d,
+ std::vector<Node>& lemmas,
+ std::map<Node, NlLemmaSideEffect>& lemSE)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Kind k = tf.getKind();
+ // this should only be run on master applications
+ Assert(d_trSlaves.find(tf) != d_trSlaves.end());
+
+ // Figure 3 : c
+ Node c = d_model.computeAbstractModelValue(tf[0]);
+ int csign = c.getConst<Rational>().sgn();
+ if (csign == 0)
+ {
+ // no secant/tangent plane is necessary
+ return true;
+ }
+ Assert(csign == 1 || csign == -1);
+
+ // Figure 3: P_l, P_u
+ // mapped to for signs of c
+ std::map<int, Node> poly_approx_bounds[2];
+ std::vector<Node> pbounds;
+ getPolynomialApproximationBoundForArg(k, c, d, pbounds);
+ poly_approx_bounds[0][1] = pbounds[0];
+ poly_approx_bounds[0][-1] = pbounds[1];
+ poly_approx_bounds[1][1] = pbounds[2];
+ poly_approx_bounds[1][-1] = pbounds[3];
+
+ // Figure 3 : v
+ Node v = d_model.computeAbstractModelValue(tf);
+
+ // check value of tf
+ Trace("nl-ext-tftp-debug") << "Process tangent plane refinement for " << tf
+ << ", degree " << d << "..." << std::endl;
+ Trace("nl-ext-tftp-debug") << " value in model : " << v << std::endl;
+ Trace("nl-ext-tftp-debug") << " arg value in model : " << c << std::endl;
+
+ std::vector<Node> taylor_vars;
+ taylor_vars.push_back(d_taylor_real_fv);
+
+ // compute the concavity
+ int region = -1;
+ std::unordered_map<Node, int, NodeHashFunction>::iterator itr =
+ d_tf_region.find(tf);
+ if (itr != d_tf_region.end())
+ {
+ region = itr->second;
+ Trace("nl-ext-tftp-debug") << " region is : " << region << std::endl;
+ }
+ // Figure 3 : conc
+ int concavity = regionToConcavity(k, itr->second);
+ Trace("nl-ext-tftp-debug") << " concavity is : " << concavity << std::endl;
+ if (concavity == 0)
+ {
+ // no secant/tangent plane is necessary
+ return true;
+ }
+ // bounds for which we are this concavity
+ // Figure 3: < l, u >
+ Node bounds[2];
+ if (k == SINE)
+ {
+ bounds[0] = regionToLowerBound(k, region);
+ Assert(!bounds[0].isNull());
+ bounds[1] = regionToUpperBound(k, region);
+ Assert(!bounds[1].isNull());
+ }
+
+ // Figure 3: P
+ Node poly_approx;
+
+ // compute whether this is a tangent refinement or a secant refinement
+ bool is_tangent = false;
+ bool is_secant = false;
+ std::pair<Node, Node> mvb = getTfModelBounds(tf, d);
+ for (unsigned r = 0; r < 2; r++)
+ {
+ Node pab = poly_approx_bounds[r][csign];
+ Node v_pab = r == 0 ? mvb.first : mvb.second;
+ if (!v_pab.isNull())
+ {
+ Trace("nl-ext-tftp-debug2")
+ << "...model value of " << pab << " is " << v_pab << std::endl;
+
+ Assert(v_pab.isConst());
+ Node comp = nm->mkNode(r == 0 ? LT : GT, v, v_pab);
+ Trace("nl-ext-tftp-debug2") << "...compare : " << comp << std::endl;
+ Node compr = Rewriter::rewrite(comp);
+ Trace("nl-ext-tftp-debug2") << "...got : " << compr << std::endl;
+ if (compr == d_true)
+ {
+ // beyond the bounds
+ if (r == 0)
+ {
+ poly_approx = poly_approx_bounds[r][csign];
+ is_tangent = concavity == 1;
+ is_secant = concavity == -1;
+ }
+ else
+ {
+ poly_approx = poly_approx_bounds[r][csign];
+ is_tangent = concavity == -1;
+ is_secant = concavity == 1;
+ }
+ if (Trace.isOn("nl-ext-tftp"))
+ {
+ Trace("nl-ext-tftp") << "*** Outside boundary point (";
+ Trace("nl-ext-tftp") << (r == 0 ? "low" : "high") << ") ";
+ printRationalApprox("nl-ext-tftp", v_pab);
+ Trace("nl-ext-tftp") << ", will refine..." << std::endl;
+ Trace("nl-ext-tftp")
+ << " poly_approx = " << poly_approx << std::endl;
+ Trace("nl-ext-tftp")
+ << " is_tangent = " << is_tangent << std::endl;
+ Trace("nl-ext-tftp") << " is_secant = " << is_secant << std::endl;
+ }
+ break;
+ }
+ else
+ {
+ Trace("nl-ext-tftp")
+ << " ...within " << (r == 0 ? "low" : "high") << " bound : ";
+ printRationalApprox("nl-ext-tftp", v_pab);
+ Trace("nl-ext-tftp") << std::endl;
+ }
+ }
+ }
+
+ // Figure 3: P( c )
+ Node poly_approx_c;
+ if (is_tangent || is_secant)
+ {
+ Assert(!poly_approx.isNull());
+ std::vector<Node> taylor_subs;
+ taylor_subs.push_back(c);
+ Assert(taylor_vars.size() == taylor_subs.size());
+ poly_approx_c = poly_approx.substitute(taylor_vars.begin(),
+ taylor_vars.end(),
+ taylor_subs.begin(),
+ taylor_subs.end());
+ Trace("nl-ext-tftp-debug2")
+ << "...poly approximation at c is " << poly_approx_c << std::endl;
+ }
+ else
+ {
+ // we may want to continue getting better bounds
+ return false;
+ }
+
+ if (is_tangent)
+ {
+ // compute tangent plane
+ // Figure 3: T( x )
+ // We use zero slope tangent planes, since the concavity of the Taylor
+ // approximation cannot be easily established.
+ Node tplane = poly_approx_c;
+
+ Node lem = nm->mkNode(concavity == 1 ? GEQ : LEQ, tf, tplane);
+ std::vector<Node> antec;
+ int mdir = regionToMonotonicityDir(k, region);
+ for (unsigned i = 0; i < 2; i++)
+ {
+ // Tangent plane is valid in the interval [c,u) if the slope of the
+ // function matches its concavity, and is valid in (l, c] otherwise.
+ Node use_bound = (mdir == concavity) == (i == 0) ? c : bounds[i];
+ if (!use_bound.isNull())
+ {
+ Node ant = nm->mkNode(i == 0 ? GEQ : LEQ, tf[0], use_bound);
+ antec.push_back(ant);
+ }
+ }
+ if (!antec.empty())
+ {
+ Node antec_n = antec.size() == 1 ? antec[0] : nm->mkNode(AND, antec);
+ lem = nm->mkNode(IMPLIES, antec_n, lem);
+ }
+ Trace("nl-ext-tftp-debug2")
+ << "*** Tangent plane lemma (pre-rewrite): " << lem << std::endl;
+ lem = Rewriter::rewrite(lem);
+ Trace("nl-ext-tftp-lemma")
+ << "*** Tangent plane lemma : " << lem << std::endl;
+ Assert(d_model.computeAbstractModelValue(lem) == d_false);
+ // Figure 3 : line 9
+ lemmas.push_back(lem);
+ }
+ else if (is_secant)
+ {
+ // bounds are the minimum and maximum previous secant points
+ // should not repeat secant points: secant lemmas should suffice to
+ // rule out previous assignment
+ Assert(std::find(
+ d_secant_points[tf][d].begin(), d_secant_points[tf][d].end(), c)
+ == d_secant_points[tf][d].end());
+ // Insert into the (temporary) vector. We do not update this vector
+ // until we are sure this secant plane lemma has been processed. We do
+ // this by mapping the lemma to a side effect below.
+ std::vector<Node> spoints = d_secant_points[tf][d];
+ spoints.push_back(c);
+
+ // sort
+ SortNlModel smv;
+ smv.d_nlm = &d_model;
+ smv.d_isConcrete = true;
+ std::sort(spoints.begin(), spoints.end(), smv);
+ // get the resulting index of c
+ unsigned index =
+ std::find(spoints.begin(), spoints.end(), c) - spoints.begin();
+ // bounds are the next closest upper/lower bound values
+ if (index > 0)
+ {
+ bounds[0] = spoints[index - 1];
+ }
+ else
+ {
+ // otherwise, we use the lower boundary point for this concavity
+ // region
+ if (k == SINE)
+ {
+ Assert(!bounds[0].isNull());
+ }
+ else if (k == EXPONENTIAL)
+ {
+ // pick c-1
+ bounds[0] = Rewriter::rewrite(nm->mkNode(MINUS, c, d_one));
+ }
+ }
+ if (index < spoints.size() - 1)
+ {
+ bounds[1] = spoints[index + 1];
+ }
+ else
+ {
+ // otherwise, we use the upper boundary point for this concavity
+ // region
+ if (k == SINE)
+ {
+ Assert(!bounds[1].isNull());
+ }
+ else if (k == EXPONENTIAL)
+ {
+ // pick c+1
+ bounds[1] = Rewriter::rewrite(nm->mkNode(PLUS, c, d_one));
+ }
+ }
+ Trace("nl-ext-tftp-debug2") << "...secant bounds are : " << bounds[0]
+ << " ... " << bounds[1] << std::endl;
+
+ // the secant plane may be conjunction of 1-2 guarded inequalities
+ std::vector<Node> lemmaConj;
+ for (unsigned s = 0; s < 2; s++)
+ {
+ // compute secant plane
+ Assert(!poly_approx.isNull());
+ Assert(!bounds[s].isNull());
+ // take the model value of l or u (since may contain PI)
+ Node b = d_model.computeAbstractModelValue(bounds[s]);
+ Trace("nl-ext-tftp-debug2") << "...model value of bound " << bounds[s]
+ << " is " << b << std::endl;
+ Assert(b.isConst());
+ if (c != b)
+ {
+ // Figure 3 : P(l), P(u), for s = 0,1
+ Node poly_approx_b;
+ std::vector<Node> taylor_subs;
+ taylor_subs.push_back(b);
+ Assert(taylor_vars.size() == taylor_subs.size());
+ poly_approx_b = poly_approx.substitute(taylor_vars.begin(),
+ taylor_vars.end(),
+ taylor_subs.begin(),
+ taylor_subs.end());
+ // Figure 3: S_l( x ), S_u( x ) for s = 0,1
+ Node splane;
+ Node rcoeff_n = Rewriter::rewrite(nm->mkNode(MINUS, b, c));
+ Assert(rcoeff_n.isConst());
+ Rational rcoeff = rcoeff_n.getConst<Rational>();
+ Assert(rcoeff.sgn() != 0);
+ poly_approx_b = Rewriter::rewrite(poly_approx_b);
+ poly_approx_c = Rewriter::rewrite(poly_approx_c);
+ splane = nm->mkNode(
+ PLUS,
+ poly_approx_b,
+ nm->mkNode(MULT,
+ nm->mkNode(MINUS, poly_approx_b, poly_approx_c),
+ nm->mkConst(Rational(1) / rcoeff),
+ nm->mkNode(MINUS, tf[0], b)));
+
+ Node lem = nm->mkNode(concavity == 1 ? LEQ : GEQ, tf, splane);
+ // With respect to Figure 3, this is slightly different.
+ // In particular, we chose b to be the model value of bounds[s],
+ // which is a constant although bounds[s] may not be (e.g. if it
+ // contains PI).
+ // To ensure that c...b does not cross an inflection point,
+ // we guard with the symbolic version of bounds[s].
+ // This leads to lemmas e.g. of this form:
+ // ( c <= x <= PI/2 ) => ( sin(x) < ( P( b ) - P( c ) )*( x -
+ // b ) + P( b ) )
+ // where b = (PI/2)^M, the current value of PI/2 in the model.
+ // This is sound since we are guarded by the symbolic
+ // representation of PI/2.
+ Node antec_n =
+ nm->mkNode(AND,
+ nm->mkNode(GEQ, tf[0], s == 0 ? bounds[s] : c),
+ nm->mkNode(LEQ, tf[0], s == 0 ? c : bounds[s]));
+ lem = nm->mkNode(IMPLIES, antec_n, lem);
+ Trace("nl-ext-tftp-debug2")
+ << "*** Secant plane lemma (pre-rewrite) : " << lem << std::endl;
+ lem = Rewriter::rewrite(lem);
+ Trace("nl-ext-tftp-lemma")
+ << "*** Secant plane lemma : " << lem << std::endl;
+ lemmaConj.push_back(lem);
+ Assert(d_model.computeAbstractModelValue(lem) == d_false);
+ }
+ }
+ // Figure 3 : line 22
+ Assert(!lemmaConj.empty());
+ Node lem =
+ lemmaConj.size() == 1 ? lemmaConj[0] : nm->mkNode(AND, lemmaConj);
+ lemmas.push_back(lem);
+ // The side effect says that if lem is added, then we should add the
+ // secant point c for (tf,d).
+ lemSE[lem].d_secantPoint.push_back(std::make_tuple(tf, d, c));
+ }
+ return true;
+}
+
+int TranscendentalSolver::regionToMonotonicityDir(Kind k, int region)
+{
+ if (k == EXPONENTIAL)
+ {
+ if (region == 1)
+ {
+ return 1;
+ }
+ }
+ else if (k == SINE)
+ {
+ if (region == 1 || region == 4)
+ {
+ return -1;
+ }
+ else if (region == 2 || region == 3)
+ {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int TranscendentalSolver::regionToConcavity(Kind k, int region)
+{
+ if (k == EXPONENTIAL)
+ {
+ if (region == 1)
+ {
+ return 1;
+ }
+ }
+ else if (k == SINE)
+ {
+ if (region == 1 || region == 2)
+ {
+ return -1;
+ }
+ else if (region == 3 || region == 4)
+ {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+Node TranscendentalSolver::regionToLowerBound(Kind k, int region)
+{
+ if (k == SINE)
+ {
+ if (region == 1)
+ {
+ return d_pi_2;
+ }
+ else if (region == 2)
+ {
+ return d_zero;
+ }
+ else if (region == 3)
+ {
+ return d_pi_neg_2;
+ }
+ else if (region == 4)
+ {
+ return d_pi_neg;
+ }
+ }
+ return Node::null();
+}
+
+Node TranscendentalSolver::regionToUpperBound(Kind k, int region)
+{
+ if (k == SINE)
+ {
+ if (region == 1)
+ {
+ return d_pi;
+ }
+ else if (region == 2)
+ {
+ return d_pi_2;
+ }
+ else if (region == 3)
+ {
+ return d_zero;
+ }
+ else if (region == 4)
+ {
+ return d_pi_neg_2;
+ }
+ }
+ return Node::null();
+}
+
+Node TranscendentalSolver::getDerivative(Node n, Node x)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Assert(x.isVar());
+ // only handle the cases of the taylor expansion of d
+ if (n.getKind() == EXPONENTIAL)
+ {
+ if (n[0] == x)
+ {
+ return n;
+ }
+ }
+ else if (n.getKind() == SINE)
+ {
+ if (n[0] == x)
+ {
+ Node na = nm->mkNode(MINUS, d_pi_2, n[0]);
+ Node ret = nm->mkNode(SINE, na);
+ ret = Rewriter::rewrite(ret);
+ return ret;
+ }
+ }
+ else if (n.getKind() == PLUS)
+ {
+ std::vector<Node> dchildren;
+ for (unsigned i = 0; i < n.getNumChildren(); i++)
+ {
+ // PLUS is flattened in rewriter, recursion depth is bounded by 1
+ Node dc = getDerivative(n[i], x);
+ if (dc.isNull())
+ {
+ return dc;
+ }
+ else
+ {
+ dchildren.push_back(dc);
+ }
+ }
+ return nm->mkNode(PLUS, dchildren);
+ }
+ else if (n.getKind() == MULT)
+ {
+ Assert(n[0].isConst());
+ Node dc = getDerivative(n[1], x);
+ if (!dc.isNull())
+ {
+ return nm->mkNode(MULT, n[0], dc);
+ }
+ }
+ else if (n.getKind() == NONLINEAR_MULT)
+ {
+ unsigned xcount = 0;
+ std::vector<Node> children;
+ unsigned xindex = 0;
+ for (unsigned i = 0, size = n.getNumChildren(); i < size; i++)
+ {
+ if (n[i] == x)
+ {
+ xcount++;
+ xindex = i;
+ }
+ children.push_back(n[i]);
+ }
+ if (xcount == 0)
+ {
+ return d_zero;
+ }
+ else
+ {
+ children[xindex] = nm->mkConst(Rational(xcount));
+ }
+ return nm->mkNode(MULT, children);
+ }
+ else if (n.isVar())
+ {
+ return n == x ? d_one : d_zero;
+ }
+ else if (n.isConst())
+ {
+ return d_zero;
+ }
+ Trace("nl-ext-debug") << "No derivative computed for " << n;
+ Trace("nl-ext-debug") << " for d/d{" << x << "}" << std::endl;
+ return Node::null();
+}
+
+std::pair<Node, Node> TranscendentalSolver::getTaylor(Node fa, unsigned n)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Assert(n > 0);
+ Node fac; // what term we cache for fa
+ if (fa[0] == d_zero)
+ {
+ // optimization : simpler to compute (x-fa[0])^n if we are centered around 0
+ fac = fa;
+ }
+ else
+ {
+ // otherwise we use a standard factor a in (x-a)^n
+ fac = nm->mkNode(fa.getKind(), d_taylor_real_fv_base);
+ }
+ Node taylor_rem;
+ Node taylor_sum;
+ // check if we have already computed this Taylor series
+ std::unordered_map<unsigned, Node>::iterator itt = d_taylor_sum[fac].find(n);
+ if (itt == d_taylor_sum[fac].end())
+ {
+ Node i_exp_base;
+ if (fa[0] == d_zero)
+ {
+ i_exp_base = d_taylor_real_fv;
+ }
+ else
+ {
+ i_exp_base = Rewriter::rewrite(
+ nm->mkNode(MINUS, d_taylor_real_fv, d_taylor_real_fv_base));
+ }
+ Node i_derv = fac;
+ Node i_fact = d_one;
+ Node i_exp = d_one;
+ int i_derv_status = 0;
+ unsigned counter = 0;
+ std::vector<Node> sum;
+ do
+ {
+ counter++;
+ if (fa.getKind() == EXPONENTIAL)
+ {
+ // unchanged
+ }
+ else if (fa.getKind() == SINE)
+ {
+ if (i_derv_status % 2 == 1)
+ {
+ Node arg = nm->mkNode(PLUS, d_pi_2, d_taylor_real_fv_base);
+ i_derv = nm->mkNode(SINE, arg);
+ }
+ else
+ {
+ i_derv = fa;
+ }
+ if (i_derv_status >= 2)
+ {
+ i_derv = nm->mkNode(MINUS, d_zero, i_derv);
+ }
+ i_derv = Rewriter::rewrite(i_derv);
+ i_derv_status = i_derv_status == 3 ? 0 : i_derv_status + 1;
+ }
+ if (counter == (n + 1))
+ {
+ TNode x = d_taylor_real_fv_base;
+ i_derv = i_derv.substitute(x, d_taylor_real_fv_base_rem);
+ }
+ Node curr = nm->mkNode(MULT, nm->mkNode(DIVISION, i_derv, i_fact), i_exp);
+ if (counter == (n + 1))
+ {
+ taylor_rem = curr;
+ }
+ else
+ {
+ sum.push_back(curr);
+ i_fact = Rewriter::rewrite(
+ nm->mkNode(MULT, nm->mkConst(Rational(counter)), i_fact));
+ i_exp = Rewriter::rewrite(nm->mkNode(MULT, i_exp_base, i_exp));
+ }
+ } while (counter <= n);
+ taylor_sum = sum.size() == 1 ? sum[0] : nm->mkNode(PLUS, sum);
+
+ if (fac[0] != d_taylor_real_fv_base)
+ {
+ TNode x = d_taylor_real_fv_base;
+ taylor_sum = taylor_sum.substitute(x, fac[0]);
+ }
+
+ // cache
+ d_taylor_sum[fac][n] = taylor_sum;
+ d_taylor_rem[fac][n] = taylor_rem;
+ }
+ else
+ {
+ taylor_sum = itt->second;
+ Assert(d_taylor_rem[fac].find(n) != d_taylor_rem[fac].end());
+ taylor_rem = d_taylor_rem[fac][n];
+ }
+
+ // must substitute for the argument if we were using a different lookup
+ if (fa[0] != fac[0])
+ {
+ TNode x = d_taylor_real_fv_base;
+ taylor_sum = taylor_sum.substitute(x, fa[0]);
+ }
+ return std::pair<Node, Node>(taylor_sum, taylor_rem);
+}
+
+void TranscendentalSolver::getPolynomialApproximationBounds(
+ Kind k, unsigned d, std::vector<Node>& pbounds)
+{
+ if (d_poly_bounds[k][d].empty())
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node tft = nm->mkNode(k, d_zero);
+ // n is the Taylor degree we are currently considering
+ unsigned n = 2 * d;
+ // n must be even
+ std::pair<Node, Node> taylor = getTaylor(tft, n);
+ Trace("nl-ext-tftp-debug2")
+ << "Taylor for " << k << " is : " << taylor.first << std::endl;
+ Node taylor_sum = Rewriter::rewrite(taylor.first);
+ Trace("nl-ext-tftp-debug2")
+ << "Taylor for " << k << " is (post-rewrite) : " << taylor_sum
+ << std::endl;
+ Assert(taylor.second.getKind() == MULT);
+ Assert(taylor.second.getNumChildren() == 2);
+ Assert(taylor.second[0].getKind() == DIVISION);
+ Trace("nl-ext-tftp-debug2")
+ << "Taylor remainder for " << k << " is " << taylor.second << std::endl;
+ // ru is x^{n+1}/(n+1)!
+ Node ru = nm->mkNode(DIVISION, taylor.second[1], taylor.second[0][1]);
+ ru = Rewriter::rewrite(ru);
+ Trace("nl-ext-tftp-debug2")
+ << "Taylor remainder factor is (post-rewrite) : " << ru << std::endl;
+ if (k == EXPONENTIAL)
+ {
+ pbounds.push_back(taylor_sum);
+ pbounds.push_back(taylor_sum);
+ pbounds.push_back(Rewriter::rewrite(
+ nm->mkNode(MULT, taylor_sum, nm->mkNode(PLUS, d_one, ru))));
+ pbounds.push_back(Rewriter::rewrite(nm->mkNode(PLUS, taylor_sum, ru)));
+ }
+ else
+ {
+ Assert(k == SINE);
+ Node l = Rewriter::rewrite(nm->mkNode(MINUS, taylor_sum, ru));
+ Node u = Rewriter::rewrite(nm->mkNode(PLUS, taylor_sum, ru));
+ pbounds.push_back(l);
+ pbounds.push_back(l);
+ pbounds.push_back(u);
+ pbounds.push_back(u);
+ }
+ Trace("nl-ext-tf-tplanes")
+ << "Polynomial approximation for " << k << " is: " << std::endl;
+ Trace("nl-ext-tf-tplanes") << " Lower (pos): " << pbounds[0] << std::endl;
+ Trace("nl-ext-tf-tplanes") << " Upper (pos): " << pbounds[2] << std::endl;
+ Trace("nl-ext-tf-tplanes") << " Lower (neg): " << pbounds[1] << std::endl;
+ Trace("nl-ext-tf-tplanes") << " Upper (neg): " << pbounds[3] << std::endl;
+ d_poly_bounds[k][d].insert(
+ d_poly_bounds[k][d].end(), pbounds.begin(), pbounds.end());
+ }
+ else
+ {
+ pbounds.insert(
+ pbounds.end(), d_poly_bounds[k][d].begin(), d_poly_bounds[k][d].end());
+ }
+}
+
+void TranscendentalSolver::getPolynomialApproximationBoundForArg(
+ Kind k, Node c, unsigned d, std::vector<Node>& pbounds)
+{
+ getPolynomialApproximationBounds(k, d, pbounds);
+ Assert(c.isConst());
+ if (k == EXPONENTIAL && c.getConst<Rational>().sgn() == 1)
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node tft = nm->mkNode(k, d_zero);
+ bool success = false;
+ unsigned ds = d;
+ TNode ttrf = d_taylor_real_fv;
+ TNode tc = c;
+ do
+ {
+ success = true;
+ unsigned n = 2 * ds;
+ std::pair<Node, Node> taylor = getTaylor(tft, n);
+ // check that 1-c^{n+1}/(n+1)! > 0
+ Node ru = nm->mkNode(DIVISION, taylor.second[1], taylor.second[0][1]);
+ Node rus = ru.substitute(ttrf, tc);
+ rus = Rewriter::rewrite(rus);
+ Assert(rus.isConst());
+ if (rus.getConst<Rational>() > d_one.getConst<Rational>())
+ {
+ success = false;
+ ds = ds + 1;
+ }
+ } while (!success);
+ if (ds > d)
+ {
+ Trace("nl-ext-exp-taylor")
+ << "*** Increase Taylor bound to " << ds << " > " << d << " for ("
+ << k << " " << c << ")" << std::endl;
+ // must use sound upper bound
+ std::vector<Node> pboundss;
+ getPolynomialApproximationBounds(k, ds, pboundss);
+ pbounds[2] = pboundss[2];
+ }
+ }
+}
+
+std::pair<Node, Node> TranscendentalSolver::getTfModelBounds(Node tf,
+ unsigned d)
+{
+ // compute the model value of the argument
+ Node c = d_model.computeAbstractModelValue(tf[0]);
+ Assert(c.isConst());
+ int csign = c.getConst<Rational>().sgn();
+ Kind k = tf.getKind();
+ if (csign == 0)
+ {
+ // at zero, its trivial
+ if (k == SINE)
+ {
+ return std::pair<Node, Node>(d_zero, d_zero);
+ }
+ Assert(k == EXPONENTIAL);
+ return std::pair<Node, Node>(d_one, d_one);
+ }
+ bool isNeg = csign == -1;
+
+ std::vector<Node> pbounds;
+ getPolynomialApproximationBoundForArg(k, c, d, pbounds);
+
+ std::vector<Node> bounds;
+ TNode tfv = d_taylor_real_fv;
+ TNode tfs = tf[0];
+ for (unsigned d2 = 0; d2 < 2; d2++)
+ {
+ int index = d2 == 0 ? (isNeg ? 1 : 0) : (isNeg ? 3 : 2);
+ Node pab = pbounds[index];
+ if (!pab.isNull())
+ {
+ // { x -> tf[0] }
+ pab = pab.substitute(tfv, tfs);
+ pab = Rewriter::rewrite(pab);
+ Node v_pab = d_model.computeAbstractModelValue(pab);
+ bounds.push_back(v_pab);
+ }
+ else
+ {
+ bounds.push_back(Node::null());
+ }
+ }
+ return std::pair<Node, Node>(bounds[0], bounds[1]);
+}
+
+Node TranscendentalSolver::mkValidPhase(Node a, Node pi)
+{
+ return mkBounded(
+ NodeManager::currentNM()->mkNode(MULT, mkRationalNode(-1), pi), a, pi);
+}
+
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
diff --git a/src/theory/arith/transcendental_solver.h b/src/theory/arith/transcendental_solver.h
new file mode 100644
index 000000000..8e539bbf0
--- /dev/null
+++ b/src/theory/arith/transcendental_solver.h
@@ -0,0 +1,419 @@
+/********************* */
+/*! \file transcendental_solver.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Solving for handling transcendental functions.
+ **/
+
+#ifndef CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H
+#define CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H
+
+#include <map>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "expr/node.h"
+#include "theory/arith/nl_lemma_utils.h"
+#include "theory/arith/nl_model.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+
+/** Transcendental solver class
+ *
+ * This class implements model-based refinement schemes
+ * for transcendental functions, described in:
+ *
+ * - "Satisfiability Modulo Transcendental
+ * Functions via Incremental Linearization" by Cimatti
+ * et al., CADE 2017.
+ *
+ * It's main functionality are methods that implement lemma schemas below,
+ * which return a set of lemmas that should be sent on the output channel.
+ */
+class TranscendentalSolver
+{
+ public:
+ TranscendentalSolver(NlModel& m);
+ ~TranscendentalSolver();
+
+ /** init last call
+ */
+ void initLastCall(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts,
+ std::vector<Node>& lems,
+ std::vector<Node>& lemsPp);
+ /** increment taylor degree */
+ void incrementTaylorDegree();
+ /** get taylor degree */
+ unsigned getTaylorDegree() const;
+ /** preprocess assertions check model
+ *
+ * This modifies the given assertions in preparation for running a call
+ * to check model.
+ *
+ * This method returns false if a bound for a transcendental function
+ * was conflicting.
+ */
+ bool preprocessAssertionsCheckModel(std::vector<Node>& assertions);
+ /** Process side effect se */
+ void processSideEffect(const NlLemmaSideEffect& se);
+ //-------------------------------------------- lemma schemas
+ /** check transcendental initial refine
+ *
+ * Returns a set of valid theory lemmas, based on
+ * simple facts about transcendental functions.
+ * This mostly follows the initial axioms described in
+ * Section 4 of "Satisfiability
+ * Modulo Transcendental Functions via Incremental
+ * Linearization" by Cimatti et al., CADE 2017.
+ *
+ * Examples:
+ *
+ * sin( x ) = -sin( -x )
+ * ( PI > x > 0 ) => 0 < sin( x ) < 1
+ * exp( x )>0
+ * x<0 => exp( x )<1
+ */
+ std::vector<Node> checkTranscendentalInitialRefine();
+
+ /** check transcendental monotonic
+ *
+ * Returns a set of valid theory lemmas, based on a
+ * lemma scheme that ensures that applications
+ * of transcendental functions respect monotonicity.
+ *
+ * Examples:
+ *
+ * x > y => exp( x ) > exp( y )
+ * PI/2 > x > y > 0 => sin( x ) > sin( y )
+ * PI > x > y > PI/2 => sin( x ) < sin( y )
+ */
+ std::vector<Node> checkTranscendentalMonotonic();
+
+ /** check transcendental tangent planes
+ *
+ * Returns a set of valid theory lemmas, based on
+ * computing an "incremental linearization" of
+ * transcendental functions based on the model values
+ * of transcendental functions and their arguments.
+ * It is based on Figure 3 of "Satisfiability
+ * Modulo Transcendental Functions via Incremental
+ * Linearization" by Cimatti et al., CADE 2017.
+ * This schema is not terminating in general.
+ * It is not enabled by default, and can
+ * be enabled by --nl-ext-tf-tplanes.
+ *
+ * Example:
+ *
+ * Assume we have a term sin(y) where M( y ) = 1 where M is the current model.
+ * Note that:
+ * sin(1) ~= .841471
+ *
+ * The Taylor series and remainder of sin(y) of degree 7 is
+ * P_{7,sin(0)}( x ) = x + (-1/6)*x^3 + (1/20)*x^5
+ * R_{7,sin(0),b}( x ) = (-1/5040)*x^7
+ *
+ * This gives us lower and upper bounds :
+ * P_u( x ) = P_{7,sin(0)}( x ) + R_{7,sin(0),b}( x )
+ * ...where note P_u( 1 ) = 4243/5040 ~= .841865
+ * P_l( x ) = P_{7,sin(0)}( x ) - R_{7,sin(0),b}( x )
+ * ...where note P_l( 1 ) = 4241/5040 ~= .841468
+ *
+ * Assume that M( sin(y) ) > P_u( 1 ).
+ * Since the concavity of sine in the region 0 < x < PI/2 is -1,
+ * we add a tangent plane refinement.
+ * The tangent plane at the point 1 in P_u is
+ * given by the formula:
+ * T( x ) = P_u( 1 ) + ((d/dx)(P_u(x)))( 1 )*( x - 1 )
+ * We add the lemma:
+ * ( 0 < y < PI/2 ) => sin( y ) <= T( y )
+ * which is:
+ * ( 0 < y < PI/2 ) => sin( y ) <= (391/720)*(y - 2737/1506)
+ *
+ * Assume that M( sin(y) ) < P_u( 1 ).
+ * Since the concavity of sine in the region 0 < x < PI/2 is -1,
+ * we add a secant plane refinement for some constants ( l, u )
+ * such that 0 <= l < M( y ) < u <= PI/2. Assume we choose
+ * l = 0 and u = M( PI/2 ) = 150517/47912.
+ * The secant planes at point 1 for P_l
+ * are given by the formulas:
+ * S_l( x ) = (x-l)*(P_l( l )-P_l(c))/(l-1) + P_l( l )
+ * S_u( x ) = (x-u)*(P_l( u )-P_l(c))/(u-1) + P_l( u )
+ * We add the lemmas:
+ * ( 0 < y < 1 ) => sin( y ) >= S_l( y )
+ * ( 1 < y < PI/2 ) => sin( y ) >= S_u( y )
+ * which are:
+ * ( 0 < y < 1 ) => (sin y) >= 4251/5040*y
+ * ( 1 < y < PI/2 ) => (sin y) >= c1*(y+c2)
+ * where c1, c2 are rationals (for brevity, omitted here)
+ * such that c1 ~= .277 and c2 ~= 2.032.
+ *
+ * The argument lemSE is the "side effect" of the lemmas in the return
+ * value of this function (for details, see checkLastCall).
+ */
+ std::vector<Node> checkTranscendentalTangentPlanes(
+ std::map<Node, NlLemmaSideEffect>& lemSE);
+ /** check transcendental function refinement for tf
+ *
+ * This method is called by the above method for each "master"
+ * transcendental function application that occurs in an assertion in the
+ * current context. For example, an application like sin(t) is not a master
+ * if we have introduced the constraints:
+ * t=y+2*pi*n ^ -pi <= y <= pi ^ sin(t) = sin(y).
+ * See d_trMaster/d_trSlaves for more detail.
+ *
+ * This runs Figure 3 of Cimatti et al., CADE 2017 for transcendental
+ * function application tf for Taylor degree d. It may add a secant or
+ * tangent plane lemma to lems and its side effect (if one exists)
+ * to lemSE.
+ *
+ * It returns false if the bounds are not precise enough to add a
+ * secant or tangent plane lemma.
+ */
+ bool checkTfTangentPlanesFun(Node tf,
+ unsigned d,
+ std::vector<Node>& lems,
+ std::map<Node, NlLemmaSideEffect>& lemSE);
+ //-------------------------------------------- end lemma schemas
+ private:
+ /** polynomial approximation bounds
+ *
+ * This adds P_l+[x], P_l-[x], P_u+[x], P_u-[x] to pbounds, where x is
+ * d_taylor_real_fv. These are polynomial approximations of the Taylor series
+ * of <k>( 0 ) for degree 2*d where k is SINE or EXPONENTIAL.
+ * These correspond to P_l and P_u from Figure 3 of Cimatti et al., CADE 2017,
+ * for positive/negative (+/-) values of the argument of <k>( 0 ).
+ *
+ * Notice that for certain bounds (e.g. upper bounds for exponential), the
+ * Taylor approximation for a fixed degree is only sound up to a given
+ * upper bound on the argument. To obtain sound lower/upper bounds for a
+ * given <k>( c ), use the function below.
+ */
+ void getPolynomialApproximationBounds(Kind k,
+ unsigned d,
+ std::vector<Node>& pbounds);
+ /** polynomial approximation bounds
+ *
+ * This computes polynomial approximations P_l+[x], P_l-[x], P_u+[x], P_u-[x]
+ * that are sound (lower, upper) bounds for <k>( c ). Notice that these
+ * polynomials may depend on c. In particular, for P_u+[x] for <k>( c ) where
+ * c>0, we return the P_u+[x] from the function above for the minimum degree
+ * d' >= d such that (1-c^{2*d'+1}/(2*d'+1)!) is positive.
+ */
+ void getPolynomialApproximationBoundForArg(Kind k,
+ Node c,
+ unsigned d,
+ std::vector<Node>& pbounds);
+ /** get transcendental function model bounds
+ *
+ * This returns the current lower and upper bounds of transcendental
+ * function application tf based on Taylor of degree 2*d, which is dependent
+ * on the model value of its argument.
+ */
+ std::pair<Node, Node> getTfModelBounds(Node tf, unsigned d);
+ /** get monotonicity direction
+ *
+ * Returns whether the slope is positive (+1) or negative(-1)
+ * in region of transcendental function with kind k.
+ * Returns 0 if region is invalid.
+ */
+ int regionToMonotonicityDir(Kind k, int region);
+ /** get concavity
+ *
+ * Returns whether we are concave (+1) or convex (-1)
+ * in region of transcendental function with kind k,
+ * where region is defined above.
+ * Returns 0 if region is invalid.
+ */
+ int regionToConcavity(Kind k, int region);
+ /** region to lower bound
+ *
+ * Returns the term corresponding to the lower
+ * bound of the region of transcendental function
+ * with kind k. Returns Node::null if the region
+ * is invalid, or there is no lower bound for the
+ * region.
+ */
+ Node regionToLowerBound(Kind k, int region);
+ /** region to upper bound
+ *
+ * Returns the term corresponding to the upper
+ * bound of the region of transcendental function
+ * with kind k. Returns Node::null if the region
+ * is invalid, or there is no upper bound for the
+ * region.
+ */
+ Node regionToUpperBound(Kind k, int region);
+ /** get derivative
+ *
+ * Returns d/dx n. Supports cases of n
+ * for transcendental functions applied to x,
+ * multiplication, addition, constants and variables.
+ * Returns Node::null() if derivative is an
+ * unhandled case.
+ */
+ Node getDerivative(Node n, Node x);
+
+ void mkPi();
+ void getCurrentPiBounds(std::vector<Node>& lemmas);
+ /** Make the node -pi <= a <= pi */
+ static Node mkValidPhase(Node a, Node pi);
+
+ /** Reference to the non-linear model object */
+ NlModel& d_model;
+ /** commonly used terms */
+ Node d_zero;
+ Node d_one;
+ Node d_neg_one;
+ Node d_true;
+ Node d_false;
+ /**
+ * Some transcendental functions f(t) are "purified", e.g. we add
+ * t = y ^ f(t) = f(y) where y is a fresh variable. Those that are not
+ * purified we call "master terms".
+ *
+ * The maps below maintain a master/slave relationship over
+ * transcendental functions (SINE, EXPONENTIAL, PI), where above
+ * f(y) is the master of itself and of f(t).
+ *
+ * This is used for ensuring that the argument y of SINE we process is on the
+ * interval [-pi .. pi], and that exponentials are not applied to arguments
+ * that contain transcendental functions.
+ */
+ std::map<Node, Node> d_trMaster;
+ std::map<Node, std::unordered_set<Node, NodeHashFunction>> d_trSlaves;
+ /** The transcendental functions we have done initial refinements on */
+ std::map<Node, bool> d_tf_initial_refine;
+
+ /** concavity region for transcendental functions
+ *
+ * This stores an integer that identifies an interval in
+ * which the current model value for an argument of an
+ * application of a transcendental function resides.
+ *
+ * For exp( x ):
+ * region #1 is -infty < x < infty
+ * For sin( x ):
+ * region #0 is pi < x < infty (this is an invalid region)
+ * region #1 is pi/2 < x <= pi
+ * region #2 is 0 < x <= pi/2
+ * region #3 is -pi/2 < x <= 0
+ * region #4 is -pi < x <= -pi/2
+ * region #5 is -infty < x <= -pi (this is an invalid region)
+ * All regions not listed above, as well as regions 0 and 5
+ * for SINE are "invalid". We only process applications
+ * of transcendental functions whose arguments have model
+ * values that reside in valid regions.
+ */
+ std::unordered_map<Node, int, NodeHashFunction> d_tf_region;
+ /** cache of the above function */
+ std::map<Kind, std::map<unsigned, std::vector<Node>>> d_poly_bounds;
+
+ /**
+ * Maps representives of a congruence class to the members of that class.
+ *
+ * In detail, a congruence class is a set of terms of the form
+ * { f(t1), ..., f(tn) }
+ * such that t1 = ... = tn in the current context. We choose an arbitrary
+ * term among these to be the repesentative of this congruence class.
+ *
+ * Moreover, notice we compute congruence classes only over terms that
+ * are transcendental function applications that are "master terms",
+ * see d_trMaster/d_trSlave.
+ */
+ std::map<Node, std::vector<Node>> d_funcCongClass;
+ /**
+ * A list of all functions for each kind in { EXPONENTIAL, SINE, POW, PI }
+ * that are representives of their congruence class.
+ */
+ std::map<Kind, std::vector<Node>> d_funcMap;
+
+ // tangent plane bounds
+ std::map<Node, std::map<Node, Node>> d_tangent_val_bound[4];
+
+ /** secant points (sorted list) for transcendental functions
+ *
+ * This is used for tangent plane refinements for
+ * transcendental functions. This is the set
+ * "get-previous-secant-points" in "Satisfiability
+ * Modulo Transcendental Functions via Incremental
+ * Linearization" by Cimatti et al., CADE 2017, for
+ * each transcendental function application. We store this set for each
+ * Taylor degree.
+ */
+ std::unordered_map<Node,
+ std::map<unsigned, std::vector<Node>>,
+ NodeHashFunction>
+ d_secant_points;
+
+ /** get Taylor series of degree n for function fa centered around point fa[0].
+ *
+ * Return value is ( P_{n,f(a)}( x ), R_{n+1,f(a)}( x ) ) where
+ * the first part of the pair is the Taylor series expansion :
+ * P_{n,f(a)}( x ) = sum_{i=0}^n (f^i( a )/i!)*(x-a)^i
+ * and the second part of the pair is the Taylor series remainder :
+ * R_{n+1,f(a),b}( x ) = (f^{n+1}( b )/(n+1)!)*(x-a)^{n+1}
+ *
+ * The above values are cached for each (f,n) for a fixed variable "a".
+ * To compute the Taylor series for fa, we compute the Taylor series
+ * for ( fa.getKind(), n ) then substitute { a -> fa[0] } if fa[0]!=0.
+ * We compute P_{n,f(0)}( x )/R_{n+1,f(0),b}( x ) for ( fa.getKind(), n )
+ * if fa[0]=0.
+ * In the latter case, note we compute the exponential x^{n+1}
+ * instead of (x-a)^{n+1}, which can be done faster.
+ */
+ std::pair<Node, Node> getTaylor(Node fa, unsigned n);
+
+ /** internal variables used for constructing (cached) versions of the Taylor
+ * series above.
+ */
+ Node d_taylor_real_fv; // x above
+ Node d_taylor_real_fv_base; // a above
+ Node d_taylor_real_fv_base_rem; // b above
+
+ /** cache of sum and remainder terms for getTaylor */
+ std::unordered_map<Node, std::unordered_map<unsigned, Node>, NodeHashFunction>
+ d_taylor_sum;
+ std::unordered_map<Node, std::unordered_map<unsigned, Node>, NodeHashFunction>
+ d_taylor_rem;
+ /** taylor degree
+ *
+ * Indicates that the degree of the polynomials in the Taylor approximation of
+ * all transcendental functions is 2*d_taylor_degree. This value is set
+ * initially to options::nlExtTfTaylorDegree() and may be incremented
+ * if the option options::nlExtTfIncPrecision() is enabled.
+ */
+ unsigned d_taylor_degree;
+ /** PI
+ *
+ * Note that PI is a (symbolic, non-constant) nullary operator. This is
+ * because its value cannot be computed exactly. We constraint PI to concrete
+ * lower and upper bounds stored in d_pi_bound below.
+ */
+ Node d_pi;
+ /** PI/2 */
+ Node d_pi_2;
+ /** -PI/2 */
+ Node d_pi_neg_2;
+ /** -PI */
+ Node d_pi_neg;
+ /** the concrete lower and upper bounds for PI */
+ Node d_pi_bound[2];
+}; /* class TranscendentalSolver */
+
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H */
generated by cgit on debian on lair
contact matthew@masot.net with questions or feedback