summaryrefslogtreecommitdiff
path: root/src/theory
diff options
context:
space:
mode:
authorHaniel Barbosa <hanielbbarbosa@gmail.com>2020-09-01 19:08:23 -0300
committerGitHub <noreply@github.com>2020-09-01 19:08:23 -0300
commit8ad308b23c705e73507a42d2425289e999d47f86 (patch)
tree29e3ac78844bc57171e0d122d758a8371a292a93 /src/theory
parent62ec0666dd4d409ee85603ae94d7ab1a2b4c9dcc (diff)
Removes old proof code (#4964)
This deletes much of the old proof code. Basically everything but the minimal necessary infra-structure for producing unsat cores. That includes dependency tracking in preprocessing, the prop engine proof and the unsat core computation code in the old proof manager. These should also go once we fully integrate into master the new proof infrastructure. It also cleans interfaces that were using old-proof-code-specific constructs (such as LemmaProofRecipe). When possible or when it made sense standalone local proof production code was kept, but deactivated (such is in the equality engine and in the arithmetic solver).
Diffstat (limited to 'src/theory')
-rw-r--r--src/theory/arith/callbacks.cpp29
-rw-r--r--src/theory/arith/constraint.cpp114
-rw-r--r--src/theory/arith/constraint.h48
-rw-r--r--src/theory/arith/linear_equality.cpp24
-rw-r--r--src/theory/arith/nl/nl_model.cpp1
-rw-r--r--src/theory/arith/theory_arith.cpp1
-rw-r--r--src/theory/arith/theory_arith.h11
-rw-r--r--src/theory/arith/theory_arith_private.cpp99
-rw-r--r--src/theory/arrays/array_proof_reconstruction.cpp199
-rw-r--r--src/theory/arrays/array_proof_reconstruction.h59
-rw-r--r--src/theory/arrays/theory_arrays.cpp182
-rw-r--r--src/theory/arrays/theory_arrays.h19
-rw-r--r--src/theory/bv/bitblast/aig_bitblaster.h6
-rw-r--r--src/theory/bv/bitblast/bitblaster.h21
-rw-r--r--src/theory/bv/bitblast/eager_bitblaster.cpp19
-rw-r--r--src/theory/bv/bitblast/eager_bitblaster.h2
-rw-r--r--src/theory/bv/bitblast/lazy_bitblaster.cpp19
-rw-r--r--src/theory/bv/bitblast/lazy_bitblaster.h1
-rw-r--r--src/theory/bv/bv_eager_solver.cpp12
-rw-r--r--src/theory/bv/bv_eager_solver.h3
-rw-r--r--src/theory/bv/bv_subtheory.h9
-rw-r--r--src/theory/bv/bv_subtheory_bitblast.cpp7
-rw-r--r--src/theory/bv/bv_subtheory_bitblast.h5
-rw-r--r--src/theory/bv/theory_bv.cpp57
-rw-r--r--src/theory/bv/theory_bv.h17
-rw-r--r--src/theory/combination_engine.cpp2
-rw-r--r--src/theory/engine_output_channel.cpp153
-rw-r--r--src/theory/engine_output_channel.h4
-rw-r--r--src/theory/ext_theory.h1
-rw-r--r--src/theory/output_channel.cpp5
-rw-r--r--src/theory/output_channel.h16
-rw-r--r--src/theory/quantifiers/fun_def_process.cpp36
-rw-r--r--src/theory/quantifiers/instantiate.cpp32
-rw-r--r--src/theory/quantifiers/instantiate.h15
-rw-r--r--src/theory/quantifiers_engine.cpp19
-rw-r--r--src/theory/quantifiers_engine.h4
-rw-r--r--src/theory/sort_inference.cpp1
-rw-r--r--src/theory/strings/theory_strings_preprocess.cpp6
-rw-r--r--src/theory/theory.cpp14
-rw-r--r--src/theory/theory.h17
-rw-r--r--src/theory/theory_engine.cpp241
-rw-r--r--src/theory/theory_engine.h26
-rw-r--r--src/theory/theory_test_utils.h8
-rw-r--r--src/theory/uf/eq_proof.cpp32
-rw-r--r--src/theory/uf/eq_proof.h25
-rw-r--r--src/theory/uf/equality_engine.cpp167
-rw-r--r--src/theory/uf/equality_engine.h32
-rw-r--r--src/theory/uf/equality_engine_types.h32
-rw-r--r--src/theory/uf/theory_uf.cpp51
-rw-r--r--src/theory/uf/theory_uf.h14
50 files changed, 426 insertions, 1491 deletions
diff --git a/src/theory/arith/callbacks.cpp b/src/theory/arith/callbacks.cpp
index 758a337ba..f5f8a1a10 100644
--- a/src/theory/arith/callbacks.cpp
+++ b/src/theory/arith/callbacks.cpp
@@ -16,6 +16,8 @@
**/
#include "theory/arith/callbacks.h"
+
+#include "theory/arith/proof_macros.h"
#include "theory/arith/theory_arith_private.h"
namespace CVC4 {
@@ -87,17 +89,17 @@ void FarkasConflictBuilder::reset(){
d_consequent = NullConstraint;
d_constraints.clear();
d_consequentSet = false;
- PROOF(d_farkas.clear());
+ ARITH_PROOF(d_farkas.clear());
Assert(!underConstruction());
}
/* Adds a constraint to the constraint under construction. */
void FarkasConflictBuilder::addConstraint(ConstraintCP c, const Rational& fc){
Assert(
- !PROOF_ON()
+ !ARITH_PROOF_ON()
|| (!underConstruction() && d_constraints.empty() && d_farkas.empty())
|| (underConstruction() && d_constraints.size() + 1 == d_farkas.size()));
- Assert(PROOF_ON() || d_farkas.empty());
+ Assert(ARITH_PROOF_ON() || d_farkas.empty());
Assert(c->isTrue());
if(d_consequent == NullConstraint){
@@ -105,17 +107,20 @@ void FarkasConflictBuilder::addConstraint(ConstraintCP c, const Rational& fc){
} else {
d_constraints.push_back(c);
}
- PROOF(d_farkas.push_back(fc););
- Assert(!PROOF_ON() || d_constraints.size() + 1 == d_farkas.size());
- Assert(PROOF_ON() || d_farkas.empty());
+ ARITH_PROOF(d_farkas.push_back(fc));
+ Assert(!ARITH_PROOF_ON() || d_constraints.size() + 1 == d_farkas.size());
+ Assert(ARITH_PROOF_ON() || d_farkas.empty());
}
void FarkasConflictBuilder::addConstraint(ConstraintCP c, const Rational& fc, const Rational& mult){
Assert(!mult.isZero());
- if(PROOF_ON() && !mult.isOne()){
+ if (ARITH_PROOF_ON() && !mult.isOne())
+ {
Rational prod = fc * mult;
addConstraint(c, prod);
- }else{
+ }
+ else
+ {
addConstraint(c, fc);
}
}
@@ -132,7 +137,7 @@ void FarkasConflictBuilder::makeLastConsequent(){
ConstraintCP last = d_constraints.back();
d_constraints.back() = d_consequent;
d_consequent = last;
- PROOF( std::swap( d_farkas.front(), d_farkas.back() ) );
+ ARITH_PROOF(std::swap(d_farkas.front(), d_farkas.back()));
d_consequentSet = true;
}
@@ -145,14 +150,14 @@ ConstraintCP FarkasConflictBuilder::commitConflict(){
Assert(underConstruction());
Assert(!d_constraints.empty());
Assert(
- !PROOF_ON()
+ !ARITH_PROOF_ON()
|| (!underConstruction() && d_constraints.empty() && d_farkas.empty())
|| (underConstruction() && d_constraints.size() + 1 == d_farkas.size()));
- Assert(PROOF_ON() || d_farkas.empty());
+ Assert(ARITH_PROOF_ON() || d_farkas.empty());
Assert(d_consequentSet);
ConstraintP not_c = d_consequent->getNegation();
- RationalVectorCP coeffs = NULLPROOF(&d_farkas);
+ RationalVectorCP coeffs = ARITH_NULLPROOF(&d_farkas);
not_c->impliedByFarkas(d_constraints, coeffs, true );
reset();
diff --git a/src/theory/arith/constraint.cpp b/src/theory/arith/constraint.cpp
index 6a04e70d1..081bc08a7 100644
--- a/src/theory/arith/constraint.cpp
+++ b/src/theory/arith/constraint.cpp
@@ -21,7 +21,6 @@
#include <unordered_set>
#include "base/output.h"
-#include "proof/proof.h"
#include "smt/smt_statistics_registry.h"
#include "theory/arith/arith_utilities.h"
#include "theory/arith/normal_form.h"
@@ -551,46 +550,49 @@ bool Constraint::hasTrichotomyProof() const {
void Constraint::printProofTree(std::ostream& out, size_t depth) const
{
-#if IS_PROOFS_BUILD
- const ConstraintRule& rule = getConstraintRule();
- out << std::string(2 * depth, ' ') << "* " << getVariable() << " [";
- if (hasLiteral())
+ if (ARITH_PROOF_ON())
{
- out << getLiteral();
- }
- else
- {
- out << "NOLIT";
- };
- out << "]" << ' ' << getType() << ' ' << getValue() << " (" << getProofType()
- << ")";
- if (getProofType() == FarkasAP)
- {
- out << " [";
- bool first = true;
- for (const auto& coeff : *rule.d_farkasCoefficients)
+ const ConstraintRule& rule = getConstraintRule();
+ out << std::string(2 * depth, ' ') << "* " << getVariable() << " [";
+ if (hasLiteral())
{
- if (not first)
+ out << getLiteral();
+ }
+ else
+ {
+ out << "NOLIT";
+ };
+ out << "]" << ' ' << getType() << ' ' << getValue() << " ("
+ << getProofType() << ")";
+ if (getProofType() == FarkasAP)
+ {
+ out << " [";
+ bool first = true;
+ for (const auto& coeff : *rule.d_farkasCoefficients)
{
- out << ", ";
+ if (not first)
+ {
+ out << ", ";
+ }
+ first = false;
+ out << coeff;
}
- first = false;
- out << coeff;
+ out << "]";
}
- out << "]";
- }
- out << endl;
+ out << endl;
- for (AntecedentId i = rule.d_antecedentEnd; i != AntecedentIdSentinel; --i) {
- ConstraintCP antecdent = d_database->getAntecedent(i);
- if (antecdent == NullConstraint) {
- break;
+ for (AntecedentId i = rule.d_antecedentEnd; i != AntecedentIdSentinel; --i)
+ {
+ ConstraintCP antecdent = d_database->getAntecedent(i);
+ if (antecdent == NullConstraint)
+ {
+ break;
+ }
+ antecdent->printProofTree(out, depth + 1);
}
- antecdent->printProofTree(out, depth + 1);
+ return;
}
-#else /* IS_PROOFS_BUILD */
out << "Cannot print proof. This is not a proof build." << endl;
-#endif /* IS_PROOFS_BUILD */
}
bool Constraint::sanityChecking(Node n) const {
@@ -648,8 +650,7 @@ ConstraintCP ConstraintDatabase::getAntecedent (AntecedentId p) const {
void ConstraintRule::print(std::ostream& out) const {
-
- RationalVectorCP coeffs = NULLPROOF(d_farkasCoefficients);
+ RationalVectorCP coeffs = ARITH_NULLPROOF(d_farkasCoefficients);
out << "{ConstraintRule, ";
out << d_constraint << std::endl;
out << "d_proofType= " << d_proofType << ", " << std::endl;
@@ -658,7 +659,7 @@ void ConstraintRule::print(std::ostream& out) const {
if (d_constraint != NullConstraint && d_antecedentEnd != AntecedentIdSentinel)
{
const ConstraintDatabase& database = d_constraint->getDatabase();
-
+
size_t coeffIterator = (coeffs != RationalVectorCPSentinel) ? coeffs->size()-1 : 0;
AntecedentId p = d_antecedentEnd;
// must have at least one antecedent
@@ -700,9 +701,11 @@ bool Constraint::wellFormedFarkasProof() const {
ConstraintCP antecedent = d_database->d_antecedents[p];
if(antecedent == NullConstraint) { return false; }
-#if IS_PROOFS_BUILD
- if(!PROOF_ON()){ return cr.d_farkasCoefficients == RationalVectorCPSentinel; }
- Assert(PROOF_ON());
+ if (!ARITH_PROOF_ON())
+ {
+ return cr.d_farkasCoefficients == RationalVectorCPSentinel;
+ }
+ Assert(ARITH_PROOF_ON());
if(cr.d_farkasCoefficients == RationalVectorCPSentinel){ return false; }
if(cr.d_farkasCoefficients->size() < 2){ return false; }
@@ -755,7 +758,7 @@ bool Constraint::wellFormedFarkasProof() const {
default:
return false;
}
-
+
if(coeffIterator == coeffBegin){ return false; }
--coeffIterator;
--p;
@@ -800,10 +803,6 @@ bool Constraint::wellFormedFarkasProof() const {
// 0 = lhs <= rhs < 0
return (lhs.isNull() || (Constant::isMember(lhs) && Constant(lhs).isZero()))
&& rhs.sgn() < 0;
-
-#else /* IS_PROOFS_BUILD */
- return true;
-#endif /* IS_PROOFS_BUILD */
}
ConstraintP Constraint::makeNegation(ArithVar v, ConstraintType t, const DeltaRational& r){
@@ -860,7 +859,6 @@ ConstraintDatabase::ConstraintDatabase(context::Context* satContext, context::Co
, d_one(1)
, d_negOne(-1)
{
-
}
SortedConstraintMap& ConstraintDatabase::getVariableSCM(ArithVar v) const{
@@ -1109,7 +1107,7 @@ ConstraintP ConstraintDatabase::addLiteral(TNode literal){
return isNot ? hit->getNegation(): hit;
}else{
Comparison negCmp = Comparison::parseNormalForm(negationNode);
-
+
ConstraintType negType = Constraint::constraintTypeOfComparison(negCmp);
DeltaRational negDR = negCmp.normalizedDeltaRational();
@@ -1213,7 +1211,8 @@ void Constraint::impliedByUnate(ConstraintCP imp, bool nowInConflict){
AntecedentId antecedentEnd = d_database->d_antecedents.size() - 1;
RationalVectorP coeffs;
- if(PROOF_ON()){
+ if (ARITH_PROOF_ON())
+ {
std::pair<int, int> sgns = unateFarkasSigns(getNegation(), imp);
Rational first(sgns.first);
@@ -1222,10 +1221,11 @@ void Constraint::impliedByUnate(ConstraintCP imp, bool nowInConflict){
coeffs = new RationalVector();
coeffs->push_back(first);
coeffs->push_back(second);
- } else {
+ }
+ else
+ {
coeffs = RationalVectorPSentinel;
}
-
// no need to delete coeffs the memory is owned by ConstraintRule
d_database->pushConstraintRule(ConstraintRule(this, FarkasAP, antecedentEnd, coeffs));
@@ -1233,7 +1233,7 @@ void Constraint::impliedByUnate(ConstraintCP imp, bool nowInConflict){
if(Debug.isOn("constraint::conflictCommit") && inConflict()){
Debug("constraint::conflictCommit") << "inConflict@impliedByUnate " << this << std::endl;
}
-
+
if(Debug.isOn("constraints::wffp") && !wellFormedFarkasProof()){
getConstraintRule().print(Debug("constraints::wffp"));
}
@@ -1343,7 +1343,7 @@ void Constraint::impliedByIntHole(const ConstraintCPVec& b, bool nowInConflict){
* coeffs != RationalVectorSentinal,
* coeffs->size() = a.size() + 1,
* for i in [0,a.size) : coeff[i] corresponds to a[i], and
- * coeff.back() corresponds to the current constraint.
+ * coeff.back() corresponds to the current constraint.
*/
void Constraint::impliedByFarkas(const ConstraintCPVec& a, RationalVectorCP coeffs, bool nowInConflict){
Debug("constraints::pf") << "impliedByFarkas(" << this;
@@ -1359,10 +1359,9 @@ void Constraint::impliedByFarkas(const ConstraintCPVec& a, RationalVectorCP coef
Assert(negationHasProof() == nowInConflict);
Assert(allHaveProof(a));
- Assert(PROOF_ON() == (coeffs != RationalVectorCPSentinel));
- // !PROOF_ON() => coeffs == RationalVectorCPSentinel
- // PROOF_ON() => coeffs->size() == a.size() + 1
- Assert(!PROOF_ON() || coeffs->size() == a.size() + 1);
+ Assert(ARITH_PROOF_ON() == (coeffs != RationalVectorCPSentinel));
+ Assert(!ARITH_PROOF_ON() || coeffs->size() == a.size() + 1);
+
Assert(a.size() >= 1);
d_database->d_antecedents.push_back(NullConstraint);
@@ -1374,10 +1373,13 @@ void Constraint::impliedByFarkas(const ConstraintCPVec& a, RationalVectorCP coef
AntecedentId antecedentEnd = d_database->d_antecedents.size() - 1;
RationalVectorCP coeffsCopy;
- if(PROOF_ON()){
+ if (ARITH_PROOF_ON())
+ {
Assert(coeffs != RationalVectorCPSentinel);
coeffsCopy = new RationalVector(*coeffs);
- } else {
+ }
+ else
+ {
coeffsCopy = RationalVectorCPSentinel;
}
d_database->pushConstraintRule(ConstraintRule(this, FarkasAP, antecedentEnd, coeffsCopy));
diff --git a/src/theory/arith/constraint.h b/src/theory/arith/constraint.h
index b32616a04..3caccdebd 100644
--- a/src/theory/arith/constraint.h
+++ b/src/theory/arith/constraint.h
@@ -75,9 +75,9 @@
#ifndef CVC4__THEORY__ARITH__CONSTRAINT_H
#define CVC4__THEORY__ARITH__CONSTRAINT_H
-#include <unordered_map>
#include <list>
#include <set>
+#include <unordered_map>
#include <vector>
#include "base/configuration_private.h"
@@ -85,12 +85,12 @@
#include "context/cdqueue.h"
#include "context/context.h"
#include "expr/node.h"
-#include "proof/proof.h"
#include "theory/arith/arithvar.h"
#include "theory/arith/callbacks.h"
#include "theory/arith/congruence_manager.h"
#include "theory/arith/constraint_forward.h"
#include "theory/arith/delta_rational.h"
+#include "theory/arith/proof_macros.h"
namespace CVC4 {
namespace theory {
@@ -252,11 +252,11 @@ struct PerVariableDatabase{
}
};
-
/**
* If proofs are on, there is a vector of rationals for farkas coefficients.
- * This is the owner of the memory for the vector, and calls delete upon cleanup.
- *
+ * This is the owner of the memory for the vector, and calls delete upon
+ * cleanup.
+ *
*/
struct ConstraintRule {
ConstraintP d_constraint;
@@ -302,17 +302,13 @@ struct ConstraintRule {
* We do however use all of the constraints by requiring non-zero
* coefficients.
*/
-#if IS_PROOFS_BUILD
RationalVectorCP d_farkasCoefficients;
-#endif /* IS_PROOFS_BUILD */
ConstraintRule()
: d_constraint(NullConstraint)
, d_proofType(NoAP)
, d_antecedentEnd(AntecedentIdSentinel)
{
-#if IS_PROOFS_BUILD
d_farkasCoefficients = RationalVectorCPSentinel;
-#endif /* IS_PROOFS_BUILD */
}
ConstraintRule(ConstraintP con, ArithProofType pt)
@@ -320,18 +316,14 @@ struct ConstraintRule {
, d_proofType(pt)
, d_antecedentEnd(AntecedentIdSentinel)
{
-#if IS_PROOFS_BUILD
d_farkasCoefficients = RationalVectorCPSentinel;
-#endif /* IS_PROOFS_BUILD */
}
ConstraintRule(ConstraintP con, ArithProofType pt, AntecedentId antecedentEnd)
: d_constraint(con)
, d_proofType(pt)
, d_antecedentEnd(antecedentEnd)
{
-#if IS_PROOFS_BUILD
d_farkasCoefficients = RationalVectorCPSentinel;
-#endif /* IS_PROOFS_BUILD */
}
ConstraintRule(ConstraintP con, ArithProofType pt, AntecedentId antecedentEnd, RationalVectorCP coeffs)
@@ -339,10 +331,8 @@ struct ConstraintRule {
, d_proofType(pt)
, d_antecedentEnd(antecedentEnd)
{
- Assert(PROOF_ON() || coeffs == RationalVectorCPSentinel);
-#if IS_PROOFS_BUILD
+ Assert(ARITH_PROOF_ON() || coeffs == RationalVectorCPSentinel);
d_farkasCoefficients = coeffs;
-#endif /* IS_PROOFS_BUILD */
}
void print(std::ostream& out) const;
@@ -750,7 +740,7 @@ class Constraint {
/**
* If the constraint
- * canBePropagated() and
+ * canBePropagated() and
* !assertedToTheTheory(),
* the constraint is added to the database's propagation queue.
*
@@ -789,9 +779,11 @@ class Constraint {
ConstraintP constraint = crp->d_constraint;
Assert(constraint->d_crid != ConstraintRuleIdSentinel);
constraint->d_crid = ConstraintRuleIdSentinel;
-
- PROOF(if (crp->d_farkasCoefficients != RationalVectorCPSentinel) {
- delete crp->d_farkasCoefficients;
+ ARITH_PROOF({
+ if (crp->d_farkasCoefficients != RationalVectorCPSentinel)
+ {
+ delete crp->d_farkasCoefficients;
+ }
});
}
};
@@ -876,10 +868,11 @@ class Constraint {
return getConstraintRule().d_antecedentEnd;
}
- inline RationalVectorCP getFarkasCoefficients() const {
- return NULLPROOF(getConstraintRule().d_farkasCoefficients);
+ inline RationalVectorCP getFarkasCoefficients() const
+ {
+ return ARITH_NULLPROOF(getConstraintRule().d_farkasCoefficients);
}
-
+
void debugPrint() const;
/**
@@ -1051,8 +1044,7 @@ private:
* The index in this list is the proper ordering of the proofs.
*/
ConstraintRuleList d_constraintProofs;
-
-
+
/**
* Contains the exact list of constraints that can be used for propagation.
*/
@@ -1100,9 +1092,9 @@ private:
const Rational d_one;
const Rational d_negOne;
-
+
friend class Constraint;
-
+
public:
ConstraintDatabase( context::Context* satContext,
@@ -1209,7 +1201,7 @@ public:
/** AntecendentID must be in range. */
ConstraintCP getAntecedent(AntecedentId p) const;
-
+
private:
/** returns true if cons is now in conflict. */
bool handleUnateProp(ConstraintP ant, ConstraintP cons);
diff --git a/src/theory/arith/linear_equality.cpp b/src/theory/arith/linear_equality.cpp
index 7eb2f3f9e..3c4f678a2 100644
--- a/src/theory/arith/linear_equality.cpp
+++ b/src/theory/arith/linear_equality.cpp
@@ -510,11 +510,11 @@ void LinearEqualityModule::propagateBasicFromRow(ConstraintP c){
RowIndex ridx = d_tableau.basicToRowIndex(basic);
ConstraintCPVec bounds;
- RationalVectorP coeffs = NULLPROOF(new RationalVector());
+ RationalVectorP coeffs = ARITH_NULLPROOF(new RationalVector());
propagateRow(bounds, ridx, upperBound, c, coeffs);
c->impliedByFarkas(bounds, coeffs, false);
c->tryToPropagate();
-
+
if(coeffs != RationalVectorPSentinel) { delete coeffs; }
}
@@ -524,9 +524,9 @@ void LinearEqualityModule::propagateBasicFromRow(ConstraintP c){
* The proof is in terms of the other constraints and the negation of c, ~c.
*
* A row has the form:
- * sum a_i * x_i = 0
+ * sum a_i * x_i = 0
* or
- * sx + sum r y + sum q z = 0
+ * sx + sum r y + sum q z = 0
* where r > 0 and q < 0.
*
* If rowUp, we are proving c
@@ -555,7 +555,7 @@ void LinearEqualityModule::propagateRow(ConstraintCPVec& into, RowIndex ridx, bo
Assert(farkas->empty());
farkas->push_back(Rational(0));
}
-
+
ArithVar v = c->getVariable();
Debug("arith::propagateRow") << "LinearEqualityModule::propagateRow("
<< ridx << ", " << rowUp << ", " << v << ") start" << endl;
@@ -563,7 +563,7 @@ void LinearEqualityModule::propagateRow(ConstraintCPVec& into, RowIndex ridx, bo
const Rational& multiple = rowUp ? d_one : d_negOne;
Debug("arith::propagateRow") << "multiple: " << multiple << endl;
-
+
Tableau::RowIterator iter = d_tableau.ridRowIterator(ridx);
for(; !iter.atEnd(); ++iter){
const Tableau::Entry& entry = *iter;
@@ -595,8 +595,8 @@ void LinearEqualityModule::propagateRow(ConstraintCPVec& into, RowIndex ridx, bo
if(farkas != RationalVectorPSentinel){
Assert(farkas->front().isZero());
Rational multAij = multiple * a_ij;
- Debug("arith::propagateRow") << "("<<multAij<<") ";
- farkas->front() = multAij;
+ Debug("arith::propagateRow") << "(" << multAij << ") ";
+ farkas->front() = multAij;
}
Debug("arith::propagateRow") << c << endl;
@@ -605,10 +605,10 @@ void LinearEqualityModule::propagateRow(ConstraintCPVec& into, RowIndex ridx, bo
ConstraintCP bound = selectUb
? d_variables.getUpperBoundConstraint(nonbasic)
: d_variables.getLowerBoundConstraint(nonbasic);
-
+
if(farkas != RationalVectorPSentinel){
Rational multAij = multiple * a_ij;
- Debug("arith::propagateRow") << "("<<multAij<<") ";
+ Debug("arith::propagateRow") << "(" << multAij << ") ";
farkas->push_back(multAij);
}
Assert(bound != NullConstraint);
@@ -678,7 +678,7 @@ ConstraintP LinearEqualityModule::weakestExplanation(bool aboveUpper, DeltaRatio
* If !aboveUpper, then the conflict is with the constraint c : x_b >= l_b.
*
* A row has the form:
- * -x_b sum a_i * x_i = 0
+ * -x_b sum a_i * x_i = 0
* or
* -x_b + sum r y + sum q z = 0,
* x_b = sum r y + sum q z
@@ -724,7 +724,7 @@ ConstraintCP LinearEqualityModule::minimallyWeakConflict(bool aboveUpper, ArithV
Assert(assignment < d_variables.getLowerBound(basicVar));
surplus = d_variables.getLowerBound(basicVar) - assignment;
}
-
+
bool anyWeakenings = false;
for(Tableau::RowIterator i = d_tableau.basicRowIterator(basicVar); !i.atEnd(); ++i){
const Tableau::Entry& entry = *i;
diff --git a/src/theory/arith/nl/nl_model.cpp b/src/theory/arith/nl/nl_model.cpp
index cc10d6659..cfa153a56 100644
--- a/src/theory/arith/nl/nl_model.cpp
+++ b/src/theory/arith/nl/nl_model.cpp
@@ -16,6 +16,7 @@
#include "expr/node_algorithm.h"
#include "options/arith_options.h"
+#include "options/smt_options.h"
#include "options/theory_options.h"
#include "theory/arith/arith_msum.h"
#include "theory/arith/arith_utilities.h"
diff --git a/src/theory/arith/theory_arith.cpp b/src/theory/arith/theory_arith.cpp
index ea751ca74..762634ce7 100644
--- a/src/theory/arith/theory_arith.cpp
+++ b/src/theory/arith/theory_arith.cpp
@@ -41,7 +41,6 @@ TheoryArith::TheoryArith(context::Context* c,
d_internal(
new TheoryArithPrivate(*this, c, u, out, valuation, logicInfo, pnm)),
d_ppRewriteTimer("theory::arith::ppRewriteTimer"),
- d_proofRecorder(nullptr),
d_astate(*d_internal, c, u, valuation)
{
smtStatisticsRegistry()->registerStat(&d_ppRewriteTimer);
diff --git a/src/theory/arith/theory_arith.h b/src/theory/arith/theory_arith.h
index bfe30db61..6adf8f66a 100644
--- a/src/theory/arith/theory_arith.h
+++ b/src/theory/arith/theory_arith.h
@@ -18,7 +18,6 @@
#pragma once
#include "expr/node.h"
-#include "proof/arith_proof_recorder.h"
#include "theory/arith/arith_state.h"
#include "theory/arith/theory_arith_private_forward.h"
#include "theory/theory.h"
@@ -41,11 +40,6 @@ class TheoryArith : public Theory {
TimerStat d_ppRewriteTimer;
- /**
- * @brief Where to store Farkas proofs of lemmas
- */
- proof::ArithProofRecorder * d_proofRecorder;
-
public:
TheoryArith(context::Context* c,
context::UserContext* u,
@@ -110,11 +104,6 @@ class TheoryArith : public Theory {
std::pair<bool, Node> entailmentCheck(TNode lit) override;
- void setProofRecorder(proof::ArithProofRecorder* proofRecorder)
- {
- d_proofRecorder = proofRecorder;
- }
-
private:
/** The state object wrapping TheoryArithPrivate */
ArithState d_astate;
diff --git a/src/theory/arith/theory_arith_private.cpp b/src/theory/arith/theory_arith_private.cpp
index 7f521e2f9..8a780116c 100644
--- a/src/theory/arith/theory_arith_private.cpp
+++ b/src/theory/arith/theory_arith_private.cpp
@@ -624,7 +624,7 @@ bool TheoryArithPrivate::AssertLower(ConstraintP constraint){
ConstraintP ubc = d_partialModel.getUpperBoundConstraint(x_i);
ConstraintP negation = constraint->getNegation();
negation->impliedByUnate(ubc, true);
-
+
raiseConflict(constraint);
++(d_statistics.d_statAssertLowerConflicts);
@@ -757,7 +757,7 @@ bool TheoryArithPrivate::AssertUpper(ConstraintP constraint){
if(d_partialModel.greaterThanUpperBound(x_i, c_i) ){ // \upperbound(x_i) <= c_i
return false; //sat
}
-
+
// cmpToLb = \lowerbound(x_i).cmp(c_i)
int cmpToLB = d_partialModel.cmpToLowerBound(x_i, c_i);
if( cmpToLB < 0 ){ // \upperbound(x_i) < \lowerbound(x_i)
@@ -802,7 +802,7 @@ bool TheoryArithPrivate::AssertUpper(ConstraintP constraint){
++(d_statistics.d_statDisequalityConflicts);
raiseConflict(eq);
return true;
- }
+ }
}
}else if(cmpToLB > 0){
// l <= x <= u and l < u
@@ -1291,8 +1291,10 @@ void TheoryArithPrivate::setupVariableList(const VarList& vl){
}else{
if (d_nonlinearExtension == nullptr)
{
- if( vlNode.getKind()==kind::EXPONENTIAL || vlNode.getKind()==kind::SINE ||
- vlNode.getKind()==kind::COSINE || vlNode.getKind()==kind::TANGENT ){
+ if (vlNode.getKind() == kind::EXPONENTIAL
+ || vlNode.getKind() == kind::SINE || vlNode.getKind() == kind::COSINE
+ || vlNode.getKind() == kind::TANGENT)
+ {
d_nlIncomplete = true;
}
}
@@ -1737,7 +1739,6 @@ ConstraintP TheoryArithPrivate::constraintFromFactQueue(){
} else {
Debug("arith::constraint") << "already has proof: " << constraint->externalExplainByAssertions() << endl;
}
-
if(Debug.isOn("arith::negatedassumption") && inConflict){
ConstraintP negation = constraint->getNegation();
@@ -1905,7 +1906,7 @@ void TheoryArithPrivate::outputConflicts(){
Debug("arith::conflict") << "outputting conflicts" << std::endl;
Assert(anyConflict());
static unsigned int conflicts = 0;
-
+
if(!conflictQueueEmpty()){
Assert(!d_conflicts.empty());
for(size_t i = 0, i_end = d_conflicts.size(); i < i_end; ++i){
@@ -1923,35 +1924,6 @@ void TheoryArithPrivate::outputConflicts(){
++conflicts;
Debug("arith::conflict") << "d_conflicts[" << i << "] " << conflict
<< " has proof: " << hasProof << endl;
- PROOF(if (d_containing.d_proofRecorder && confConstraint->hasFarkasProof()
- && pf.d_farkasCoefficients->size()
- == conflict.getNumChildren()) {
- // The Farkas coefficients and the children of `conflict` seem to be in
- // opposite orders... There is some relevant documentation in the
- // comment for the d_farkasCoefficients field in "constraint.h"
- //
- // Anyways, we reverse the children in `conflict` here.
- NodeBuilder<> conflictInFarkasCoefficientOrder(kind::AND);
- for (size_t j = 0, nchildren = conflict.getNumChildren(); j < nchildren;
- ++j)
- {
- conflictInFarkasCoefficientOrder
- << conflict[conflict.getNumChildren() - j - 1];
- }
-
- if (Debug.isOn("arith::pf::tree")) {
- confConstraint->printProofTree(Debug("arith::pf::tree"));
- confConstraint->getNegation()->printProofTree(Debug("arith::pf::tree"));
- }
-
- Assert(conflict.getNumChildren() == pf.d_farkasCoefficients->size());
- if (confConstraint->hasSimpleFarkasProof()
- && confConstraint->getNegation()->isPossiblyTightenedAssumption())
- {
- d_containing.d_proofRecorder->saveFarkasCoefficients(
- conflictInFarkasCoefficientOrder, pf.d_farkasCoefficients);
- }
- })
if(Debug.isOn("arith::normalize::external")){
conflict = flattenAndSort(conflict);
Debug("arith::conflict") << "(normalized to) " << conflict << endl;
@@ -2190,7 +2162,6 @@ std::pair<ConstraintP, ArithVar> TheoryArithPrivate::replayGetConstraint(const D
return make_pair(imp, added);
}
}
-
ConstraintP newc = d_constraintDatabase.getConstraint(v, t, dr);
d_replayConstraints.push_back(newc);
@@ -2337,7 +2308,7 @@ void TheoryArithPrivate::tryBranchCut(ApproximateSimplex* approx, int nid, Branc
// ConstraintCPVec& back = conflicts.back();
// back.push_back(conflicting);
// back.push_back(negConflicting);
-
+
// // remove the floor/ceiling contraint implied by bcneg
// Constraint::assertionFringe(back);
}
@@ -2375,14 +2346,15 @@ void TheoryArithPrivate::replayAssert(ConstraintP c) {
}else{
Debug("approx::replayAssert") << "replayAssert " << c << " has explanation" << endl;
}
- Debug("approx::replayAssert") << "replayAssertion " << c << endl;
+ Debug("approx::replayAssert") << "replayAssertion " << c << endl;
if(inConflict){
raiseConflict(c);
}else{
assertionCases(c);
}
}else{
- Debug("approx::replayAssert") << "replayAssert " << c << " already asserted" << endl;
+ Debug("approx::replayAssert")
+ << "replayAssert " << c << " already asserted" << endl;
}
}
@@ -2551,7 +2523,7 @@ std::vector<ConstraintCPVec> TheoryArithPrivate::replayLogRec(ApproximateSimplex
SimplexDecisionProcedure& simplex = selectSimplex(true);
simplex.findModel(false);
- // can change d_qflraStatus
+ // can change d_qflraStatus
d_linEq.stopTrackingBoundCounts();
d_partialModel.startQueueingBoundCounts();
@@ -3101,13 +3073,13 @@ bool TheoryArithPrivate::solveRealRelaxation(Theory::Effort effortLevel){
<< " " << useApprox
<< " " << safeToCallApprox()
<< endl;
-
+
bool noPivotLimitPass1 = noPivotLimit && !useApprox;
d_qflraStatus = simplex.findModel(noPivotLimitPass1);
Debug("TheoryArithPrivate::solveRealRelaxation")
<< "solveRealRelaxation()" << " pass1 " << d_qflraStatus << endl;
-
+
if(d_qflraStatus == Result::SAT_UNKNOWN && useApprox && safeToCallApprox()){
// pass2: fancy-final
static const int32_t relaxationLimit = 10000;
@@ -3275,7 +3247,6 @@ bool TheoryArithPrivate::solveRealRelaxation(Theory::Effort effortLevel){
// if(!useFancyFinal){
// d_qflraStatus = simplex.findModel(noPivotLimit);
// }else{
-
// if(d_qflraStatus == Result::SAT_UNKNOWN){
// //Message() << "got sat unknown" << endl;
@@ -3711,7 +3682,7 @@ Node TheoryArithPrivate::branchIntegerVariable(ArithVar x) const {
Integer ceil_d = d.ceiling();
Rational f = r - floor_d;
// Multiply by -1 to get abs value.
- Rational c = (r - ceil_d) * (-1);
+ Rational c = (r - ceil_d) * (-1);
Integer nearest = (c > f) ? floor_d : ceil_d;
// Prioritize trying a simple rounding of the real solution first,
@@ -4651,7 +4622,6 @@ bool TheoryArithPrivate::tryToPropagate(RowIndex ridx, bool rowUp, ArithVar v, b
ConstraintP implied = d_constraintDatabase.getBestImpliedBound(v, t, bound);
if(implied != NullConstraint){
-
return rowImplicationCanBeApplied(ridx, rowUp, implied);
}
}
@@ -4699,9 +4669,8 @@ bool TheoryArithPrivate::rowImplicationCanBeApplied(RowIndex ridx, bool rowUp, C
if( !assertedToTheTheory && canBePropagated && !hasProof ){
ConstraintCPVec explain;
-
- PROOF(d_farkasBuffer.clear());
- RationalVectorP coeffs = NULLPROOF(&d_farkasBuffer);
+ ARITH_PROOF(d_farkasBuffer.clear());
+ RationalVectorP coeffs = ARITH_NULLPROOF(&d_farkasBuffer);
// After invoking `propegateRow`:
// * coeffs[0] is for implied
@@ -4716,38 +4685,6 @@ bool TheoryArithPrivate::rowImplicationCanBeApplied(RowIndex ridx, bool rowUp, C
}
Node implication = implied->externalImplication(explain);
Node clause = flattenImplication(implication);
- PROOF(if (d_containing.d_proofRecorder
- && coeffs != RationalVectorCPSentinel
- && coeffs->size() == clause.getNumChildren()) {
- Debug("arith::prop") << "implied : " << implied << std::endl;
- Debug("arith::prop") << "implication: " << implication << std::endl;
- Debug("arith::prop") << "coeff len: " << coeffs->size() << std::endl;
- Debug("arith::prop") << "exp : " << explain << std::endl;
- Debug("arith::prop") << "clause : " << clause << std::endl;
- Debug("arith::prop")
- << "clause len: " << clause.getNumChildren() << std::endl;
- Debug("arith::prop") << "exp len: " << explain.size() << std::endl;
- // Using the information from the above comment we assemble a conflict
- // AND in coefficient order
- NodeBuilder<> conflictInFarkasCoefficientOrder(kind::AND);
- conflictInFarkasCoefficientOrder << implication[1].negate();
- for (const Node& antecedent : implication[0])
- {
- Debug("arith::prop") << " ante: " << antecedent << std::endl;
- conflictInFarkasCoefficientOrder << antecedent;
- }
-
- Assert(coeffs != RationalVectorPSentinel);
- Assert(conflictInFarkasCoefficientOrder.getNumChildren()
- == coeffs->size());
- if (std::all_of(explain.begin(), explain.end(), [](ConstraintCP c) {
- return c->isAssumption() || c->hasIntTightenProof();
- }))
- {
- d_containing.d_proofRecorder->saveFarkasCoefficients(
- conflictInFarkasCoefficientOrder, coeffs);
- }
- })
outputLemma(clause);
}else{
Assert(!implied->negationHasProof());
diff --git a/src/theory/arrays/array_proof_reconstruction.cpp b/src/theory/arrays/array_proof_reconstruction.cpp
deleted file mode 100644
index abc4857e8..000000000
--- a/src/theory/arrays/array_proof_reconstruction.cpp
+++ /dev/null
@@ -1,199 +0,0 @@
-/********************* */
-/*! \file array_proof_reconstruction.cpp
- ** \verbatim
- ** Top contributors (to current version):
- ** Guy Katz, Tim King
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
- ** in the top-level source directory) and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** [[ Add lengthier description here ]]
-
- ** \todo document this file
-
-**/
-
-#include "theory/arrays/array_proof_reconstruction.h"
-
-#include <memory>
-
-namespace CVC4 {
-namespace theory {
-namespace arrays {
-
-ArrayProofReconstruction::ArrayProofReconstruction(const eq::EqualityEngine* equalityEngine)
- : d_equalityEngine(equalityEngine) {
-}
-
-void ArrayProofReconstruction::setRowMergeTag(unsigned tag) {
- d_reasonRow = tag;
-}
-
-void ArrayProofReconstruction::setRow1MergeTag(unsigned tag) {
- d_reasonRow1 = tag;
-}
-
-void ArrayProofReconstruction::setExtMergeTag(unsigned tag) {
- d_reasonExt = tag;
-}
-
-void ArrayProofReconstruction::notify(
- unsigned reasonType, Node reason, Node a, Node b,
- std::vector<TNode>& equalities, eq::EqProof* proof) const {
- Debug("pf::array") << "ArrayProofReconstruction::notify( "
- << reason << ", " << a << ", " << b << std::endl;
-
-
- if (reasonType == d_reasonExt) {
- if (proof) {
- // Todo: here we assume that a=b is an assertion. We should probably call
- // explain() recursively, to explain this.
- std::shared_ptr<eq::EqProof> childProof = std::make_shared<eq::EqProof>();
- childProof->d_node = reason;
- proof->d_children.push_back(childProof);
- }
- }
-
- else if (reasonType == d_reasonRow) {
- // ROW rules mean that (i==k) OR ((a[i]:=t)[k] == a[k])
- // The equality here will be either (i == k) because ((a[i]:=t)[k] != a[k]),
- // or ((a[i]:=t)[k] == a[k]) because (i != k).
-
- if (proof) {
- if (a.getKind() == kind::SELECT) {
- // This is the case of ((a[i]:=t)[k] == a[k]) because (i != k).
-
- // The edge is ((a[i]:=t)[k], a[k]), or (a[k], (a[i]:=t)[k]). This flag should be
- // false in the first case and true in the second case.
- bool currentNodeIsUnchangedArray;
-
- Assert(a.getNumChildren() == 2);
- Assert(b.getNumChildren() == 2);
-
- if (a[0].getKind() == kind::VARIABLE || a[0].getKind() == kind::SKOLEM) {
- currentNodeIsUnchangedArray = true;
- } else if (b[0].getKind() == kind::VARIABLE || b[0].getKind() == kind::SKOLEM) {
- currentNodeIsUnchangedArray = false;
- } else {
- Assert(a[0].getKind() == kind::STORE);
- Assert(b[0].getKind() == kind::STORE);
-
- if (a[0][0] == b[0]) {
- currentNodeIsUnchangedArray = false;
- } else if (b[0][0] == a[0]) {
- currentNodeIsUnchangedArray = true;
- } else {
- Unreachable();
- }
- }
-
- Node indexOne = currentNodeIsUnchangedArray ? a[1] : a[0][1];
- Node indexTwo = currentNodeIsUnchangedArray ? b[0][1] : b[1];
-
- // Some assertions to ensure that the theory of arrays behaves as expected
- Assert(a[1] == b[1]);
- if (currentNodeIsUnchangedArray) {
- Assert(a[0] == b[0][0]);
- } else {
- Assert(a[0][0] == b[0]);
- }
-
- Debug("pf::ee") << "Getting explanation for ROW guard: "
- << indexOne << " != " << indexTwo << std::endl;
-
- std::shared_ptr<eq::EqProof> childProof =
- std::make_shared<eq::EqProof>();
- d_equalityEngine->explainEquality(indexOne, indexTwo, false, equalities,
- childProof.get());
-
- // It could be that the guard condition is a constant disequality. In
- // this case, we need to change it to a different format.
- bool haveNegChild = false;
- for (unsigned i = 0; i < childProof->d_children.size(); ++i) {
- if (childProof->d_children[i]->d_node.getKind() == kind::NOT)
- haveNegChild = true;
- }
-
- if ((childProof->d_children.size() != 0) &&
- (childProof->d_id == theory::eq::MERGED_THROUGH_CONSTANTS || !haveNegChild)) {
- // The proof has two children, explaining why each index is a (different) constant.
- Assert(childProof->d_children.size() == 2);
-
- Node constantOne, constantTwo;
- // Each subproof explains why one of the indices is constant.
-
- if (childProof->d_children[0]->d_id == theory::eq::MERGED_THROUGH_REFLEXIVITY) {
- constantOne = childProof->d_children[0]->d_node;
- } else {
- Assert(childProof->d_children[0]->d_node.getKind() == kind::EQUAL);
- if ((childProof->d_children[0]->d_node[0] == indexOne) ||
- (childProof->d_children[0]->d_node[0] == indexTwo)) {
- constantOne = childProof->d_children[0]->d_node[1];
- } else {
- constantOne = childProof->d_children[0]->d_node[0];
- }
- }
-
- if (childProof->d_children[1]->d_id == theory::eq::MERGED_THROUGH_REFLEXIVITY) {
- constantTwo = childProof->d_children[1]->d_node;
- } else {
- Assert(childProof->d_children[1]->d_node.getKind() == kind::EQUAL);
- if ((childProof->d_children[1]->d_node[0] == indexOne) ||
- (childProof->d_children[1]->d_node[0] == indexTwo)) {
- constantTwo = childProof->d_children[1]->d_node[1];
- } else {
- constantTwo = childProof->d_children[1]->d_node[0];
- }
- }
-
- std::shared_ptr<eq::EqProof> constantDisequalityProof =
- std::make_shared<eq::EqProof>();
- constantDisequalityProof->d_id = theory::eq::MERGED_THROUGH_CONSTANTS;
- constantDisequalityProof->d_node =
- NodeManager::currentNM()->mkNode(kind::EQUAL, constantOne, constantTwo).negate();
-
- // Middle is where we need to insert the new disequality
- std::vector<std::shared_ptr<eq::EqProof>>::iterator middle =
- childProof->d_children.begin();
- ++middle;
-
- childProof->d_children.insert(middle, constantDisequalityProof);
-
- childProof->d_id = theory::eq::MERGED_THROUGH_TRANS;
- childProof->d_node =
- NodeManager::currentNM()->mkNode(kind::EQUAL, indexOne, indexTwo).negate();
- }
-
- proof->d_children.push_back(childProof);
- } else {
- // This is the case of (i == k) because ((a[i]:=t)[k] != a[k]),
-
- Node indexOne = a;
- Node indexTwo = b;
-
- Debug("pf::ee") << "The two indices are: " << indexOne << ", " << indexTwo << std::endl
- << "The reason for the edge is: " << reason << std::endl;
-
- Assert(reason.getNumChildren() == 2);
- Debug("pf::ee") << "Getting explanation for ROW guard: " << reason[1] << std::endl;
-
- std::shared_ptr<eq::EqProof> childProof =
- std::make_shared<eq::EqProof>();
- d_equalityEngine->explainEquality(reason[1][0], reason[1][1], false,
- equalities, childProof.get());
- proof->d_children.push_back(childProof);
- }
- }
-
- }
-
- else if (reasonType == d_reasonRow1) {
- // No special handling required at this time
- }
-}
-
-}/* CVC4::theory::arrays namespace */
-}/* CVC4::theory namespace */
-}/* CVC4 namespace */
diff --git a/src/theory/arrays/array_proof_reconstruction.h b/src/theory/arrays/array_proof_reconstruction.h
deleted file mode 100644
index a73b5dd08..000000000
--- a/src/theory/arrays/array_proof_reconstruction.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/********************* */
-/*! \file array_proof_reconstruction.h
- ** \verbatim
- ** Top contributors (to current version):
- ** Paul Meng, Mathias Preiner, Tim King
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
- ** in the top-level source directory) and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Array-specific proof construction logic to be used during the
- ** equality engine's path reconstruction
- **/
-
-#include "cvc4_private.h"
-
-#ifndef CVC4__THEORY__ARRAYS__ARRAY_PROOF_RECONSTRUCTION_H
-#define CVC4__THEORY__ARRAYS__ARRAY_PROOF_RECONSTRUCTION_H
-
-#include "theory/uf/equality_engine.h"
-
-namespace CVC4 {
-namespace theory {
-namespace arrays {
-
-/**
- * A callback class to be invoked whenever the equality engine traverses
- * an "array-owned" edge during path reconstruction.
- */
-
-class ArrayProofReconstruction : public eq::PathReconstructionNotify {
-public:
- ArrayProofReconstruction(const eq::EqualityEngine* equalityEngine);
-
- void notify(unsigned reasonType, Node reason, Node a, Node b,
- std::vector<TNode>& equalities,
- eq::EqProof* proof) const override;
-
- void setRowMergeTag(unsigned tag);
- void setRow1MergeTag(unsigned tag);
- void setExtMergeTag(unsigned tag);
-
-private:
- /** Merge tag for ROW applications */
- unsigned d_reasonRow;
- /** Merge tag for ROW1 applications */
- unsigned d_reasonRow1;
- /** Merge tag for EXT applications */
- unsigned d_reasonExt;
-
- const eq::EqualityEngine* d_equalityEngine;
-}; /* class ArrayProofReconstruction */
-
-}/* CVC4::theory::arrays namespace */
-}/* CVC4::theory namespace */
-}/* CVC4 namespace */
-
-#endif /* CVC4__THEORY__ARRAYS__ARRAY_PROOF_RECONSTRUCTION_H */
diff --git a/src/theory/arrays/theory_arrays.cpp b/src/theory/arrays/theory_arrays.cpp
index 3adcd4f49..603dc9639 100644
--- a/src/theory/arrays/theory_arrays.cpp
+++ b/src/theory/arrays/theory_arrays.cpp
@@ -23,9 +23,6 @@
#include "expr/node_algorithm.h"
#include "options/arrays_options.h"
#include "options/smt_options.h"
-#include "proof/array_proof.h"
-#include "proof/proof_manager.h"
-#include "proof/theory_proof.h"
#include "smt/command.h"
#include "smt/logic_exception.h"
#include "smt/smt_statistics_registry.h"
@@ -111,7 +108,6 @@ TheoryArrays::TheoryArrays(context::Context* c,
d_readTableContext(new context::Context()),
d_arrayMerges(c),
d_inCheckModel(false),
- d_proofReconstruction(nullptr),
d_dstrat(new TheoryArraysDecisionStrategy(this)),
d_dstratInit(false)
{
@@ -183,22 +179,6 @@ void TheoryArrays::finishInit()
{
d_equalityEngine->addFunctionKind(kind::ARR_TABLE_FUN);
}
-
- d_proofReconstruction.reset(new ArrayProofReconstruction(d_equalityEngine));
- d_reasonRow = d_equalityEngine->getFreshMergeReasonType();
- d_reasonRow1 = d_equalityEngine->getFreshMergeReasonType();
- d_reasonExt = d_equalityEngine->getFreshMergeReasonType();
-
- d_proofReconstruction->setRowMergeTag(d_reasonRow);
- d_proofReconstruction->setRow1MergeTag(d_reasonRow1);
- d_proofReconstruction->setExtMergeTag(d_reasonExt);
-
- d_equalityEngine->addPathReconstructionTrigger(d_reasonRow,
- d_proofReconstruction.get());
- d_equalityEngine->addPathReconstructionTrigger(d_reasonRow1,
- d_proofReconstruction.get());
- d_equalityEngine->addPathReconstructionTrigger(d_reasonExt,
- d_proofReconstruction.get());
}
/////////////////////////////////////////////////////////////////////////////
@@ -440,37 +420,6 @@ bool TheoryArrays::propagateLit(TNode literal)
}/* TheoryArrays::propagate(TNode) */
-void TheoryArrays::explain(TNode literal, std::vector<TNode>& assumptions,
- eq::EqProof* proof) {
- // Do the work
- bool polarity = literal.getKind() != kind::NOT;
- TNode atom = polarity ? literal : literal[0];
- //eq::EqProof * eqp = new eq::EqProof;
- // eq::EqProof * eqp = NULL;
- if (atom.getKind() == kind::EQUAL) {
- d_equalityEngine->explainEquality(
- atom[0], atom[1], polarity, assumptions, proof);
- } else {
- d_equalityEngine->explainPredicate(atom, polarity, assumptions, proof);
- }
- if (Debug.isOn("pf::array"))
- {
- if (proof)
- {
- Debug("pf::array") << " Proof is : " << std::endl;
- proof->debug_print("pf::array");
- }
-
- Debug("pf::array") << "Array: explain( " << literal << " ):" << std::endl
- << "\t";
- for (unsigned i = 0; i < assumptions.size(); ++i)
- {
- Debug("pf::array") << assumptions[i] << " ";
- }
- Debug("pf::array") << std::endl;
- }
-}
-
TNode TheoryArrays::weakEquivGetRep(TNode node) {
TNode pointer;
while (true) {
@@ -795,7 +744,8 @@ void TheoryArrays::preRegisterTermInternal(TNode node)
}
// Apply RIntro1 Rule
- d_equalityEngine->assertEquality(ni.eqNode(v), true, d_true, d_reasonRow1);
+ d_equalityEngine->assertEquality(
+ ni.eqNode(v), true, d_true, theory::eq::MERGED_THROUGH_ROW1);
d_infoMap.addStore(node, node);
d_infoMap.addInStore(a, node);
@@ -864,19 +814,32 @@ void TheoryArrays::preRegisterTerm(TNode node)
}
}
-TrustNode TheoryArrays::explain(TNode literal)
+void TheoryArrays::explain(TNode literal, Node& explanation)
{
- Node explanation = explain(literal, NULL);
- return TrustNode::mkTrustPropExp(literal, explanation, nullptr);
-}
-
-Node TheoryArrays::explain(TNode literal, eq::EqProof* proof) {
++d_numExplain;
Debug("arrays") << spaces(getSatContext()->getLevel())
<< "TheoryArrays::explain(" << literal << ")" << std::endl;
std::vector<TNode> assumptions;
- explain(literal, assumptions, proof);
- return mkAnd(assumptions);
+ // Do the work
+ bool polarity = literal.getKind() != kind::NOT;
+ TNode atom = polarity ? literal : literal[0];
+ if (atom.getKind() == kind::EQUAL)
+ {
+ d_equalityEngine->explainEquality(
+ atom[0], atom[1], polarity, assumptions, nullptr);
+ }
+ else
+ {
+ d_equalityEngine->explainPredicate(atom, polarity, assumptions, nullptr);
+ }
+ explanation = mkAnd(assumptions);
+}
+
+TrustNode TheoryArrays::explain(TNode literal)
+{
+ Node explanation;
+ explain(literal, explanation);
+ return TrustNode::mkTrustPropExp(literal, explanation, nullptr);
}
/////////////////////////////////////////////////////////////////////////////
@@ -1329,49 +1292,20 @@ void TheoryArrays::check(Effort e) {
TNode k;
// k is the skolem for this disequality.
- if (!d_proofsEnabled) {
- Debug("pf::array") << "Check: kind::NOT: array theory making a skolem" << std::endl;
-
- // If not in replay mode, generate a fresh skolem variable
- k = getSkolem(fact,
- "array_ext_index",
- indexType,
- "an extensional lemma index variable from the theory of arrays",
- false);
-
- // Register this skolem for the proof replay phase
- PROOF(ProofManager::getSkolemizationManager()->registerSkolem(fact, k));
- } else {
- if (!ProofManager::getSkolemizationManager()->hasSkolem(fact)) {
- // In the solution pass we didn't need this skolem. Therefore, we don't need it
- // in this reply pass, either.
- break;
- }
-
- // Reuse the same skolem as in the solution pass
- k = ProofManager::getSkolemizationManager()->getSkolem(fact);
- Debug("pf::array") << "Skolem = " << k << std::endl;
- }
-
+ Debug("pf::array")
+ << "Check: kind::NOT: array theory making a skolem"
+ << std::endl;
+ k = getSkolem(
+ fact,
+ "array_ext_index",
+ indexType,
+ "an extensional lemma index variable from the theory of arrays",
+ false);
Node ak = nm->mkNode(kind::SELECT, fact[0][0], k);
Node bk = nm->mkNode(kind::SELECT, fact[0][1], k);
Node eq = ak.eqNode(bk);
Node lemma = fact[0].orNode(eq.notNode());
- // In solve mode we don't care if ak and bk are registered. If they aren't, they'll be registered
- // when we output the lemma. However, in replay need the lemma to be propagated, and so we
- // preregister manually.
- if (d_proofsEnabled) {
- if (!d_equalityEngine->hasTerm(ak))
- {
- preRegisterTermInternal(ak);
- }
- if (!d_equalityEngine->hasTerm(bk))
- {
- preRegisterTermInternal(bk);
- }
- }
-
if (options::arraysPropagate() > 0 && d_equalityEngine->hasTerm(ak)
&& d_equalityEngine->hasTerm(bk))
{
@@ -1381,17 +1315,16 @@ void TheoryArrays::check(Effort e) {
<< "\teq = " << eq << std::endl
<< "\treason = " << fact << std::endl;
- d_equalityEngine->assertEquality(eq, false, fact, d_reasonExt);
+ d_equalityEngine->assertEquality(
+ eq, false, fact, theory::eq::MERGED_THROUGH_EXT);
++d_numProp;
}
- if (!d_proofsEnabled) {
- // If this is the solution pass, generate the lemma. Otherwise, don't generate it -
- // as this is the lemma that we're reproving...
- Trace("arrays-lem")<<"Arrays::addExtLemma " << lemma <<"\n";
- d_out->lemma(lemma);
- ++d_numExt;
- }
+ // If this is the solution pass, generate the lemma. Otherwise,
+ // don't generate it - as this is the lemma that we're reproving...
+ Trace("arrays-lem") << "Arrays::addExtLemma " << lemma << "\n";
+ d_out->lemma(lemma);
+ ++d_numExt;
} else {
Debug("pf::array") << "Check: kind::NOT: array theory NOT making a skolem" << std::endl;
d_modelConstraints.push_back(fact);
@@ -1480,7 +1413,7 @@ void TheoryArrays::check(Effort e) {
lemma = mkAnd(conjunctions, true);
// LSH FIXME: which kind of arrays lemma is this
Trace("arrays-lem") << "Arrays::addExtLemma " << lemma <<"\n";
- d_out->lemma(lemma, RULE_INVALID, LemmaProperty::SEND_ATOMS);
+ d_out->lemma(lemma, LemmaProperty::SEND_ATOMS);
d_readTableContext->pop();
Trace("arrays") << spaces(getSatContext()->getLevel()) << "Arrays::check(): done" << endl;
return;
@@ -1908,7 +1841,8 @@ void TheoryArrays::propagate(RowLemmaType lem)
if (!bjExists) {
preRegisterTermInternal(bj);
}
- d_equalityEngine->assertEquality(aj_eq_bj, true, reason, d_reasonRow);
+ d_equalityEngine->assertEquality(
+ aj_eq_bj, true, reason, theory::eq::MERGED_THROUGH_ROW);
++d_numProp;
return;
}
@@ -1919,7 +1853,8 @@ void TheoryArrays::propagate(RowLemmaType lem)
(aj.isConst() && bj.isConst()) ? d_true : aj.eqNode(bj).notNode();
Node i_eq_j = i.eqNode(j);
d_permRef.push_back(reason);
- d_equalityEngine->assertEquality(i_eq_j, true, reason, d_reasonRow);
+ d_equalityEngine->assertEquality(
+ i_eq_j, true, reason, theory::eq::MERGED_THROUGH_ROW);
++d_numProp;
return;
}
@@ -1971,19 +1906,18 @@ void TheoryArrays::queueRowLemma(RowLemmaType lem)
&& !d_equalityEngine->areDisequal(i, j, false))
{
Node i_eq_j;
- if (!d_proofsEnabled) {
- i_eq_j = d_valuation.ensureLiteral(i.eqNode(j)); // TODO: think about this
- } else {
- i_eq_j = i.eqNode(j);
- }
-
+ i_eq_j = d_valuation.ensureLiteral(i.eqNode(j)); // TODO: think about this
+#if 0
+ i_eq_j = i.eqNode(j);
+#endif
getOutputChannel().requirePhase(i_eq_j, true);
d_decisionRequests.push(i_eq_j);
}
// TODO: maybe add triggers here
- if ((options::arraysEagerLemmas() || bothExist) && !d_proofsEnabled) {
+ if (options::arraysEagerLemmas() || bothExist)
+ {
// Make sure that any terms introduced by rewriting are appropriately stored in the equality database
Node aj2 = Rewriter::rewrite(aj);
if (aj != aj2) {
@@ -2162,23 +2096,11 @@ bool TheoryArrays::dischargeLemmas()
void TheoryArrays::conflict(TNode a, TNode b) {
Debug("pf::array") << "TheoryArrays::Conflict called" << std::endl;
- std::shared_ptr<eq::EqProof> proof = d_proofsEnabled ?
- std::make_shared<eq::EqProof>() : nullptr;
- d_conflictNode = explain(a.eqNode(b), proof.get());
+ explain(a.eqNode(b), d_conflictNode);
if (!d_inCheckModel) {
- std::unique_ptr<ProofArray> proof_array;
-
- if (d_proofsEnabled) {
- proof->debug_print("pf::array");
- proof_array.reset(new ProofArray(proof,
- /*row=*/d_reasonRow,
- /*row1=*/d_reasonRow1,
- /*ext=*/d_reasonExt));
- }
-
- d_out->conflict(d_conflictNode, std::move(proof_array));
+ d_out->conflict(d_conflictNode);
}
d_conflict = true;
diff --git a/src/theory/arrays/theory_arrays.h b/src/theory/arrays/theory_arrays.h
index 9044b9950..8fdbde0ab 100644
--- a/src/theory/arrays/theory_arrays.h
+++ b/src/theory/arrays/theory_arrays.h
@@ -26,7 +26,6 @@
#include "context/cdhashset.h"
#include "context/cdqueue.h"
#include "theory/arrays/array_info.h"
-#include "theory/arrays/array_proof_reconstruction.h"
#include "theory/arrays/theory_arrays_rewriter.h"
#include "theory/theory.h"
#include "theory/uf/equality_engine.h"
@@ -129,15 +128,6 @@ class TheoryArrays : public Theory {
/** conflicts in setModelVal */
IntStat d_numSetModelValConflicts;
- // Merge reason types
-
- /** Merge tag for ROW applications */
- unsigned d_reasonRow;
- /** Merge tag for ROW1 applications */
- unsigned d_reasonRow1;
- /** Merge tag for EXT applications */
- unsigned d_reasonExt;
-
public:
TheoryArrays(context::Context* c,
context::UserContext* u,
@@ -215,9 +205,8 @@ class TheoryArrays : public Theory {
/** Should be called to propagate the literal. */
bool propagateLit(TNode literal);
- /** Explain why this literal is true by adding assumptions */
- void explain(TNode literal, std::vector<TNode>& assumptions,
- eq::EqProof* proof);
+ /** Explain why this literal is true by building an explanation */
+ void explain(TNode literal, Node& exp);
/** For debugging only- checks invariants about when things are preregistered*/
context::CDHashSet<Node, NodeHashFunction > d_isPreRegistered;
@@ -227,7 +216,6 @@ class TheoryArrays : public Theory {
public:
void preRegisterTerm(TNode n) override;
- Node explain(TNode n, eq::EqProof* proof);
TrustNode explain(TNode n) override;
/////////////////////////////////////////////////////////////////////////////
@@ -446,9 +434,6 @@ class TheoryArrays : public Theory {
bool d_inCheckModel;
int d_topLevel;
- /** An equality-engine callback for proof reconstruction */
- std::unique_ptr<ArrayProofReconstruction> d_proofReconstruction;
-
/**
* The decision strategy for the theory of arrays, which calls the
* getNextDecisionEngineRequest function below.
diff --git a/src/theory/bv/bitblast/aig_bitblaster.h b/src/theory/bv/bitblast/aig_bitblaster.h
index 1e1b5bab4..fef45cdf5 100644
--- a/src/theory/bv/bitblast/aig_bitblaster.h
+++ b/src/theory/bv/bitblast/aig_bitblaster.h
@@ -89,12 +89,6 @@ class AigBitblaster : public TBitblaster<Abc_Obj_t*>
prop::SatSolver* getSatSolver() override { return d_satSolver.get(); }
- void setProofLog(proof::BitVectorProof* bvp) override
- {
- // Proofs are currently not supported with ABC
- Unimplemented();
- }
-
class Statistics
{
public:
diff --git a/src/theory/bv/bitblast/bitblaster.h b/src/theory/bv/bitblast/bitblaster.h
index defc66b74..74e3c3f56 100644
--- a/src/theory/bv/bitblast/bitblaster.h
+++ b/src/theory/bv/bitblast/bitblaster.h
@@ -24,8 +24,8 @@
#include <vector>
#include "expr/node.h"
-#include "proof/bitvector_proof.h"
#include "prop/bv_sat_solver_notify.h"
+#include "prop/sat_solver.h"
#include "prop/sat_solver_types.h"
#include "smt/smt_engine_scope.h"
#include "theory/bv/bitblast/bitblast_strategies_template.h"
@@ -64,7 +64,6 @@ class TBitblaster
// sat solver used for bitblasting and associated CnfStream
std::unique_ptr<context::Context> d_nullContext;
std::unique_ptr<prop::CnfStream> d_cnfStream;
- proof::BitVectorProof* d_bvp;
void initAtomBBStrategies();
void initTermBBStrategies();
@@ -91,7 +90,6 @@ class TBitblaster
bool hasBBTerm(TNode node) const;
void getBBTerm(TNode node, Bits& bits) const;
virtual void storeBBTerm(TNode term, const Bits& bits);
- virtual void setProofLog(proof::BitVectorProof* bvp);
/**
* Return a constant representing the value of a in the model.
@@ -186,8 +184,7 @@ TBitblaster<T>::TBitblaster()
: d_termCache(),
d_modelCache(),
d_nullContext(new context::Context()),
- d_cnfStream(),
- d_bvp(nullptr)
+ d_cnfStream()
{
initAtomBBStrategies();
initTermBBStrategies();
@@ -218,20 +215,6 @@ void TBitblaster<T>::invalidateModelCache()
}
template <class T>
-void TBitblaster<T>::setProofLog(proof::BitVectorProof* bvp)
-{
- if (THEORY_PROOF_ON())
- {
- d_bvp = bvp;
- prop::SatSolver* satSolver = getSatSolver();
- bvp->attachToSatSolver(*satSolver);
- prop::SatVariable t = satSolver->trueVar();
- prop::SatVariable f = satSolver->falseVar();
- bvp->initCnfProof(d_cnfStream.get(), d_nullContext.get(), t, f);
- }
-}
-
-template <class T>
Node TBitblaster<T>::getTermModel(TNode node, bool fullModel)
{
if (d_modelCache.find(node) != d_modelCache.end()) return d_modelCache[node];
diff --git a/src/theory/bv/bitblast/eager_bitblaster.cpp b/src/theory/bv/bitblast/eager_bitblaster.cpp
index 4acd1d2f8..627a17bc5 100644
--- a/src/theory/bv/bitblast/eager_bitblaster.cpp
+++ b/src/theory/bv/bitblast/eager_bitblaster.cpp
@@ -72,7 +72,7 @@ EagerBitblaster::EagerBitblaster(TheoryBV* theory_bv, context::Context* c)
d_bitblastingRegistrar.get(),
d_nullContext.get(),
rm,
- options::proof(),
+ false,
"EagerBitblaster"));
}
@@ -87,8 +87,7 @@ void EagerBitblaster::bbFormula(TNode node)
}
else
{
- d_cnfStream->convertAndAssert(
- node, false, false, RULE_INVALID, TNode::null());
+ d_cnfStream->convertAndAssert(node, false, false);
}
}
@@ -116,10 +115,7 @@ void EagerBitblaster::bbAtom(TNode node)
? d_atomBBStrategies[normalized.getKind()](normalized, this)
: normalized;
- if (!options::proof())
- {
- atom_bb = Rewriter::rewrite(atom_bb);
- }
+ atom_bb = Rewriter::rewrite(atom_bb);
// asserting that the atom is true iff the definition holds
Node atom_definition =
@@ -127,21 +123,14 @@ void EagerBitblaster::bbAtom(TNode node)
AlwaysAssert(options::bitblastMode() == options::BitblastMode::EAGER);
storeBBAtom(node, atom_bb);
- d_cnfStream->convertAndAssert(
- atom_definition, false, false, RULE_INVALID, TNode::null());
+ d_cnfStream->convertAndAssert(atom_definition, false, false);
}
void EagerBitblaster::storeBBAtom(TNode atom, Node atom_bb) {
- if (d_bvp) {
- d_bvp->registerAtomBB(atom.toExpr(), atom_bb.toExpr());
- }
d_bbAtoms.insert(atom);
}
void EagerBitblaster::storeBBTerm(TNode node, const Bits& bits) {
- if (d_bvp) {
- d_bvp->registerTermBB(node.toExpr());
- }
d_termCache.insert(std::make_pair(node, bits));
}
diff --git a/src/theory/bv/bitblast/eager_bitblaster.h b/src/theory/bv/bitblast/eager_bitblaster.h
index a8b7ccbe5..da9488d43 100644
--- a/src/theory/bv/bitblast/eager_bitblaster.h
+++ b/src/theory/bv/bitblast/eager_bitblaster.h
@@ -23,8 +23,6 @@
#include "theory/bv/bitblast/bitblaster.h"
-#include "proof/bitvector_proof.h"
-#include "proof/resolution_bitvector_proof.h"
#include "prop/cnf_stream.h"
#include "prop/sat_solver.h"
diff --git a/src/theory/bv/bitblast/lazy_bitblaster.cpp b/src/theory/bv/bitblast/lazy_bitblaster.cpp
index 83e286f10..3109d6ed7 100644
--- a/src/theory/bv/bitblast/lazy_bitblaster.cpp
+++ b/src/theory/bv/bitblast/lazy_bitblaster.cpp
@@ -19,7 +19,6 @@
#include "theory/bv/bitblast/lazy_bitblaster.h"
#include "options/bv_options.h"
-#include "proof/proof_manager.h"
#include "prop/cnf_stream.h"
#include "prop/sat_solver.h"
#include "prop/sat_solver_factory.h"
@@ -84,7 +83,7 @@ TLazyBitblaster::TLazyBitblaster(context::Context* c,
d_nullRegistrar.get(),
d_nullContext.get(),
rm,
- options::proof(),
+ false,
"LazyBitblaster"));
d_satSolverNotify.reset(
@@ -161,8 +160,7 @@ void TLazyBitblaster::bbAtom(TNode node)
Assert(!atom_bb.isNull());
Node atom_definition = nm->mkNode(kind::EQUAL, node, atom_bb);
storeBBAtom(node, atom_bb);
- d_cnfStream->convertAndAssert(
- atom_definition, false, false, RULE_INVALID, TNode::null());
+ d_cnfStream->convertAndAssert(atom_definition, false, false);
return;
}
@@ -173,28 +171,19 @@ void TLazyBitblaster::bbAtom(TNode node)
? d_atomBBStrategies[normalized.getKind()](normalized, this)
: normalized;
- if (!options::proof())
- {
- atom_bb = Rewriter::rewrite(atom_bb);
- }
+ atom_bb = Rewriter::rewrite(atom_bb);
// asserting that the atom is true iff the definition holds
Node atom_definition = nm->mkNode(kind::EQUAL, node, atom_bb);
storeBBAtom(node, atom_bb);
- d_cnfStream->convertAndAssert(
- atom_definition, false, false, RULE_INVALID, TNode::null());
+ d_cnfStream->convertAndAssert(atom_definition, false, false);
}
void TLazyBitblaster::storeBBAtom(TNode atom, Node atom_bb) {
- // No need to store the definition for the lazy bit-blaster (unless proofs are enabled).
- if( d_bvp != NULL ){
- d_bvp->registerAtomBB(atom.toExpr(), atom_bb.toExpr());
- }
d_bbAtoms.insert(atom);
}
void TLazyBitblaster::storeBBTerm(TNode node, const Bits& bits) {
- if( d_bvp ){ d_bvp->registerTermBB(node.toExpr()); }
d_termCache.insert(std::make_pair(node, bits));
}
diff --git a/src/theory/bv/bitblast/lazy_bitblaster.h b/src/theory/bv/bitblast/lazy_bitblaster.h
index a355d42c4..bc930aec4 100644
--- a/src/theory/bv/bitblast/lazy_bitblaster.h
+++ b/src/theory/bv/bitblast/lazy_bitblaster.h
@@ -19,7 +19,6 @@
#ifndef CVC4__THEORY__BV__BITBLAST__LAZY_BITBLASTER_H
#define CVC4__THEORY__BV__BITBLAST__LAZY_BITBLASTER_H
-#include "proof/resolution_bitvector_proof.h"
#include "theory/bv/bitblast/bitblaster.h"
#include "context/cdhashmap.h"
diff --git a/src/theory/bv/bv_eager_solver.cpp b/src/theory/bv/bv_eager_solver.cpp
index 36aa72da3..d1490374d 100644
--- a/src/theory/bv/bv_eager_solver.cpp
+++ b/src/theory/bv/bv_eager_solver.cpp
@@ -33,8 +33,7 @@ EagerBitblastSolver::EagerBitblastSolver(context::Context* c, TheoryBV* bv)
d_bitblaster(),
d_aigBitblaster(),
d_useAig(options::bitvectorAig()),
- d_bv(bv),
- d_bvp(nullptr)
+ d_bv(bv)
{
}
@@ -55,10 +54,6 @@ void EagerBitblastSolver::initialize() {
#endif
} else {
d_bitblaster.reset(new EagerBitblaster(d_bv, d_context));
- THEORY_PROOF(if (d_bvp) {
- d_bitblaster->setProofLog(d_bvp);
- d_bvp->setBitblaster(d_bitblaster.get());
- });
}
}
@@ -127,11 +122,6 @@ bool EagerBitblastSolver::collectModelInfo(TheoryModel* m, bool fullModel)
return d_bitblaster->collectModelInfo(m, fullModel);
}
-void EagerBitblastSolver::setProofLog(proof::BitVectorProof* bvp)
-{
- d_bvp = bvp;
-}
-
} // namespace bv
} // namespace theory
} // namespace CVC4
diff --git a/src/theory/bv/bv_eager_solver.h b/src/theory/bv/bv_eager_solver.h
index 6182832e9..e0b55c23b 100644
--- a/src/theory/bv/bv_eager_solver.h
+++ b/src/theory/bv/bv_eager_solver.h
@@ -23,7 +23,6 @@
#include <vector>
#include "expr/node.h"
-#include "proof/resolution_bitvector_proof.h"
#include "theory/bv/theory_bv.h"
#include "theory/theory_model.h"
@@ -48,7 +47,6 @@ class EagerBitblastSolver {
bool isInitialized();
void initialize();
bool collectModelInfo(theory::TheoryModel* m, bool fullModel);
- void setProofLog(proof::BitVectorProof* bvp);
private:
context::CDHashSet<Node, NodeHashFunction> d_assertionSet;
@@ -61,7 +59,6 @@ class EagerBitblastSolver {
bool d_useAig;
TheoryBV* d_bv;
- proof::BitVectorProof* d_bvp;
}; // class EagerBitblastSolver
} // namespace bv
diff --git a/src/theory/bv/bv_subtheory.h b/src/theory/bv/bv_subtheory.h
index 725b61f95..f4b88b719 100644
--- a/src/theory/bv/bv_subtheory.h
+++ b/src/theory/bv/bv_subtheory.h
@@ -25,10 +25,6 @@
namespace CVC4 {
-namespace proof {
-class BitVectorProof;
-}
-
namespace theory {
class TheoryModel;
@@ -72,7 +68,6 @@ class SubtheorySolver {
SubtheorySolver(context::Context* c, TheoryBV* bv)
: d_context(c),
d_bv(bv),
- d_bvp(nullptr),
d_assertionQueue(c),
d_assertionIndex(c, 0) {}
virtual ~SubtheorySolver() {}
@@ -93,7 +88,7 @@ class SubtheorySolver {
return res;
}
virtual void assertFact(TNode fact) { d_assertionQueue.push_back(fact); }
- virtual void setProofLog(proof::BitVectorProof* bvp) {}
+
AssertionQueue::const_iterator assertionsBegin() {
return d_assertionQueue.begin();
}
@@ -107,8 +102,6 @@ class SubtheorySolver {
/** The bit-vector theory */
TheoryBV* d_bv;
- /** proof log */
- proof::ResolutionBitVectorProof* d_bvp;
AssertionQueue d_assertionQueue;
context::CDO<uint32_t> d_assertionIndex;
}; /* class SubtheorySolver */
diff --git a/src/theory/bv/bv_subtheory_bitblast.cpp b/src/theory/bv/bv_subtheory_bitblast.cpp
index 8f87bc4b8..28c70a5b8 100644
--- a/src/theory/bv/bv_subtheory_bitblast.cpp
+++ b/src/theory/bv/bv_subtheory_bitblast.cpp
@@ -18,7 +18,6 @@
#include "decision/decision_attributes.h"
#include "options/bv_options.h"
#include "options/decision_options.h"
-#include "proof/proof_manager.h"
#include "smt/smt_statistics_registry.h"
#include "theory/bv/abstraction.h"
#include "theory/bv/bitblast/lazy_bitblaster.h"
@@ -276,12 +275,6 @@ void BitblastSolver::setConflict(TNode conflict) {
d_bv->setConflict(final_conflict);
}
-void BitblastSolver::setProofLog(proof::BitVectorProof* bvp)
-{
- d_bitblaster->setProofLog( bvp );
- bvp->setBitblaster(d_bitblaster.get());
-}
-
}/* namespace CVC4::theory::bv */
}/* namespace CVC4::theory */
}/* namespace CVC4 */
diff --git a/src/theory/bv/bv_subtheory_bitblast.h b/src/theory/bv/bv_subtheory_bitblast.h
index a2b099609..60ef08d93 100644
--- a/src/theory/bv/bv_subtheory_bitblast.h
+++ b/src/theory/bv/bv_subtheory_bitblast.h
@@ -24,10 +24,6 @@
namespace CVC4 {
-namespace proof {
-class ResolutionBitVectorProof;
-}
-
namespace theory {
namespace bv {
@@ -79,7 +75,6 @@ public:
void bitblastQueue();
void setAbstraction(AbstractionModule* module);
uint64_t computeAtomWeight(TNode atom);
- void setProofLog(proof::BitVectorProof* bvp) override;
};
} /* namespace CVC4::theory::bv */
diff --git a/src/theory/bv/theory_bv.cpp b/src/theory/bv/theory_bv.cpp
index 1696d6185..d6492f177 100644
--- a/src/theory/bv/theory_bv.cpp
+++ b/src/theory/bv/theory_bv.cpp
@@ -18,8 +18,6 @@
#include "expr/node_algorithm.h"
#include "options/bv_options.h"
#include "options/smt_options.h"
-#include "proof/proof_manager.h"
-#include "proof/theory_proof.h"
#include "smt/smt_statistics_registry.h"
#include "theory/bv/abstraction.h"
#include "theory/bv/bv_eager_solver.h"
@@ -83,19 +81,19 @@ TheoryBV::TheoryBV(context::Context* c,
return;
}
- if (options::bitvectorEqualitySolver() && !options::proof())
+ if (options::bitvectorEqualitySolver())
{
d_subtheories.emplace_back(new CoreSolver(c, this, d_extTheory.get()));
d_subtheoryMap[SUB_CORE] = d_subtheories.back().get();
}
- if (options::bitvectorInequalitySolver() && !options::proof())
+ if (options::bitvectorInequalitySolver())
{
d_subtheories.emplace_back(new InequalitySolver(c, u, this));
d_subtheoryMap[SUB_INEQUALITY] = d_subtheories.back().get();
}
- if (options::bitvectorAlgebraicSolver() && !options::proof())
+ if (options::bitvectorAlgebraicSolver())
{
d_subtheories.emplace_back(new AlgebraicSolver(c, this));
d_subtheoryMap[SUB_ALGEBRAIC] = d_subtheories.back().get();
@@ -230,8 +228,11 @@ TrustNode TheoryBV::expandDefinition(Node node)
TNode num = node[0], den = node[1];
Node den_eq_0 = nm->mkNode(kind::EQUAL, den, utils::mkZero(width));
- Node divTotalNumDen = nm->mkNode(node.getKind() == kind::BITVECTOR_UDIV ? kind::BITVECTOR_UDIV_TOTAL :
- kind::BITVECTOR_UREM_TOTAL, num, den);
+ Node divTotalNumDen = nm->mkNode(node.getKind() == kind::BITVECTOR_UDIV
+ ? kind::BITVECTOR_UDIV_TOTAL
+ : kind::BITVECTOR_UREM_TOTAL,
+ num,
+ den);
Node divByZero = getBVDivByZero(node.getKind(), width);
Node divByZeroNum = nm->mkNode(kind::APPLY_UF, divByZero, num);
ret = nm->mkNode(kind::ITE, den_eq_0, divByZeroNum, divTotalNumDen);
@@ -327,7 +328,7 @@ void TheoryBV::check(Effort e)
if (done() && e<Theory::EFFORT_FULL) {
return;
}
-
+
//last call : do reductions on extended bitvector functions
if (e == Theory::EFFORT_LAST_CALL) {
std::vector<Node> nred = d_extTheory->getActive();
@@ -410,7 +411,7 @@ void TheoryBV::check(Effort e)
break;
}
}
-
+
//check extended functions
if (Theory::fullEffort(e)) {
//do inferences (adds external lemmas) TODO: this can be improved to add internal inferences
@@ -431,7 +432,9 @@ void TheoryBV::check(Effort e)
if( doExtfReductions( nred ) ){
return;
}
- }else{
+ }
+ else
+ {
d_needsLastCallCheck = true;
}
}
@@ -726,11 +729,13 @@ TrustNode TheoryBV::ppRewrite(TNode t)
} else if (RewriteRule<UltPlusOne>::applies(t)) {
Node result = RewriteRule<UltPlusOne>::run<false>(t);
res = Rewriter::rewrite(result);
- } else if( res.getKind() == kind::EQUAL &&
- ((res[0].getKind() == kind::BITVECTOR_PLUS &&
- RewriteRule<ConcatToMult>::applies(res[1])) ||
- (res[1].getKind() == kind::BITVECTOR_PLUS &&
- RewriteRule<ConcatToMult>::applies(res[0])))) {
+ }
+ else if (res.getKind() == kind::EQUAL
+ && ((res[0].getKind() == kind::BITVECTOR_PLUS
+ && RewriteRule<ConcatToMult>::applies(res[1]))
+ || (res[1].getKind() == kind::BITVECTOR_PLUS
+ && RewriteRule<ConcatToMult>::applies(res[0]))))
+ {
Node mult = RewriteRule<ConcatToMult>::applies(res[0])?
RewriteRule<ConcatToMult>::run<false>(res[0]) :
RewriteRule<ConcatToMult>::run<true>(res[1]);
@@ -743,9 +748,13 @@ TrustNode TheoryBV::ppRewrite(TNode t)
} else {
res = t;
}
- } else if (RewriteRule<SignExtendEqConst>::applies(t)) {
+ }
+ else if (RewriteRule<SignExtendEqConst>::applies(t))
+ {
res = RewriteRule<SignExtendEqConst>::run<false>(t);
- } else if (RewriteRule<ZeroExtendEqConst>::applies(t)) {
+ }
+ else if (RewriteRule<ZeroExtendEqConst>::applies(t))
+ {
res = RewriteRule<ZeroExtendEqConst>::run<false>(t);
}
else if (RewriteRule<NormalizeEqPlusNeg>::applies(t))
@@ -960,20 +969,6 @@ bool TheoryBV::applyAbstraction(const std::vector<Node>& assertions, std::vector
return changed;
}
-void TheoryBV::setProofLog(proof::BitVectorProof* bvp)
-{
- if (options::bitblastMode() == options::BitblastMode::EAGER)
- {
- d_eagerSolver->setProofLog(bvp);
- }
- else
- {
- for( unsigned i=0; i< d_subtheories.size(); i++ ){
- d_subtheories[i]->setProofLog( bvp );
- }
- }
-}
-
void TheoryBV::setConflict(Node conflict)
{
if (options::bvAbstraction())
diff --git a/src/theory/bv/theory_bv.h b/src/theory/bv/theory_bv.h
index c6e9282f4..2f63f1a52 100644
--- a/src/theory/bv/theory_bv.h
+++ b/src/theory/bv/theory_bv.h
@@ -32,12 +32,7 @@
#include "util/hash.h"
#include "util/statistics_registry.h"
-// Forward declarations, needed because the BV theory and the BV Proof classes
-// are cyclically dependent
namespace CVC4 {
-namespace proof {
-class BitVectorProof;
-}
namespace theory {
@@ -123,8 +118,6 @@ class TheoryBV : public Theory {
bool applyAbstraction(const std::vector<Node>& assertions,
std::vector<Node>& new_assertions);
- void setProofLog(proof::BitVectorProof* bvp);
-
private:
class Statistics
{
@@ -197,7 +190,7 @@ class TheoryBV : public Theory {
std::unique_ptr<EagerBitblastSolver> d_eagerSolver;
std::unique_ptr<AbstractionModule> d_abstractionModule;
bool d_calledPreregister;
-
+
//for extended functions
bool d_needsLastCallCheck;
context::CDHashSet<Node, NodeHashFunction> d_extf_range_infer;
@@ -225,7 +218,7 @@ class TheoryBV : public Theory {
* (ite ((_ extract 1 0) x) 1 0)
*/
bool doExtfReductions( std::vector< Node >& terms );
-
+
bool wasPropagatedBySubtheory(TNode literal) const {
return d_propagatedBy.find(literal) != d_propagatedBy.end();
}
@@ -266,7 +259,11 @@ class TheoryBV : public Theory {
void sendConflict();
- void lemma(TNode node) { d_out->lemma(node, RULE_CONFLICT); d_lemmasAdded = true; }
+ void lemma(TNode node)
+ {
+ d_out->lemma(node);
+ d_lemmasAdded = true;
+ }
void checkForLemma(TNode node);
diff --git a/src/theory/combination_engine.cpp b/src/theory/combination_engine.cpp
index e1317cf29..f1e977fe3 100644
--- a/src/theory/combination_engine.cpp
+++ b/src/theory/combination_engine.cpp
@@ -113,7 +113,7 @@ eq::EqualityEngineNotify* CombinationEngine::getModelEqualityEngineNotify()
void CombinationEngine::sendLemma(TrustNode trn, TheoryId atomsTo)
{
- d_te.lemma(trn.getNode(), RULE_INVALID, false, LemmaProperty::NONE, atomsTo);
+ d_te.lemma(trn.getNode(), false, LemmaProperty::NONE, atomsTo);
}
void CombinationEngine::resetRound()
diff --git a/src/theory/engine_output_channel.cpp b/src/theory/engine_output_channel.cpp
index a271d6d9c..b6d9a19db 100644
--- a/src/theory/engine_output_channel.cpp
+++ b/src/theory/engine_output_channel.cpp
@@ -14,10 +14,6 @@
#include "theory/engine_output_channel.h"
-#include "proof/cnf_proof.h"
-#include "proof/lemma_proof.h"
-#include "proof/proof_manager.h"
-#include "proof/theory_proof.h"
#include "prop/prop_engine.h"
#include "smt/smt_statistics_registry.h"
#include "theory/theory_engine.h"
@@ -71,9 +67,7 @@ void EngineOutputChannel::safePoint(ResourceManager::Resource r)
}
}
-theory::LemmaStatus EngineOutputChannel::lemma(TNode lemma,
- ProofRule rule,
- LemmaProperty p)
+theory::LemmaStatus EngineOutputChannel::lemma(TNode lemma, LemmaProperty p)
{
Debug("theory::lemma") << "EngineOutputChannel<" << d_theory << ">::lemma("
<< lemma << ")"
@@ -81,151 +75,15 @@ theory::LemmaStatus EngineOutputChannel::lemma(TNode lemma,
++d_statistics.lemmas;
d_engine->d_outputChannelUsed = true;
- PROOF({
- bool preprocess = isLemmaPropertyPreprocess(p);
- registerLemmaRecipe(lemma, lemma, preprocess, d_theory);
- });
-
TrustNode tlem = TrustNode::mkTrustLemma(lemma);
theory::LemmaStatus result = d_engine->lemma(
tlem.getNode(),
- rule,
false,
p,
isLemmaPropertySendAtoms(p) ? d_theory : theory::THEORY_LAST);
return result;
}
-void EngineOutputChannel::registerLemmaRecipe(Node lemma,
- Node originalLemma,
- bool preprocess,
- theory::TheoryId theoryId)
-{
- // During CNF conversion, conjunctions will be broken down into
- // multiple lemmas. In order for the recipes to match, we have to do
- // the same here.
- NodeManager* nm = NodeManager::currentNM();
-
- if (preprocess) lemma = d_engine->preprocess(lemma);
-
- bool negated = (lemma.getKind() == NOT);
- Node nnLemma = negated ? lemma[0] : lemma;
-
- switch (nnLemma.getKind())
- {
- case AND:
- if (!negated)
- {
- for (unsigned i = 0; i < nnLemma.getNumChildren(); ++i)
- registerLemmaRecipe(nnLemma[i], originalLemma, false, theoryId);
- }
- else
- {
- NodeBuilder<> builder(OR);
- for (unsigned i = 0; i < nnLemma.getNumChildren(); ++i)
- builder << nnLemma[i].negate();
-
- Node disjunction =
- (builder.getNumChildren() == 1) ? builder[0] : builder;
- registerLemmaRecipe(disjunction, originalLemma, false, theoryId);
- }
- break;
-
- case EQUAL:
- if (nnLemma[0].getType().isBoolean())
- {
- if (!negated)
- {
- registerLemmaRecipe(nm->mkNode(OR, nnLemma[0], nnLemma[1].negate()),
- originalLemma,
- false,
- theoryId);
- registerLemmaRecipe(nm->mkNode(OR, nnLemma[0].negate(), nnLemma[1]),
- originalLemma,
- false,
- theoryId);
- }
- else
- {
- registerLemmaRecipe(nm->mkNode(OR, nnLemma[0], nnLemma[1]),
- originalLemma,
- false,
- theoryId);
- registerLemmaRecipe(
- nm->mkNode(OR, nnLemma[0].negate(), nnLemma[1].negate()),
- originalLemma,
- false,
- theoryId);
- }
- }
- break;
-
- case ITE:
- if (!negated)
- {
- registerLemmaRecipe(nm->mkNode(OR, nnLemma[0].negate(), nnLemma[1]),
- originalLemma,
- false,
- theoryId);
- registerLemmaRecipe(nm->mkNode(OR, nnLemma[0], nnLemma[2]),
- originalLemma,
- false,
- theoryId);
- }
- else
- {
- registerLemmaRecipe(
- nm->mkNode(OR, nnLemma[0].negate(), nnLemma[1].negate()),
- originalLemma,
- false,
- theoryId);
- registerLemmaRecipe(nm->mkNode(OR, nnLemma[0], nnLemma[2].negate()),
- originalLemma,
- false,
- theoryId);
- }
- break;
-
- default: break;
- }
-
- // Theory lemmas have one step that proves the empty clause
- LemmaProofRecipe proofRecipe;
- Node emptyNode;
- LemmaProofRecipe::ProofStep proofStep(theoryId, emptyNode);
-
- // Remember the original lemma, so we can report this later when asked to
- proofRecipe.setOriginalLemma(originalLemma);
-
- // Record the assertions and rewrites
- Node rewritten;
- if (lemma.getKind() == OR)
- {
- for (unsigned i = 0; i < lemma.getNumChildren(); ++i)
- {
- rewritten = theory::Rewriter::rewrite(lemma[i]);
- if (rewritten != lemma[i])
- {
- proofRecipe.addRewriteRule(lemma[i].negate(), rewritten.negate());
- }
- proofStep.addAssertion(lemma[i]);
- proofRecipe.addBaseAssertion(rewritten);
- }
- }
- else
- {
- rewritten = theory::Rewriter::rewrite(lemma);
- if (rewritten != lemma)
- {
- proofRecipe.addRewriteRule(lemma.negate(), rewritten.negate());
- }
- proofStep.addAssertion(lemma);
- proofRecipe.addBaseAssertion(rewritten);
- }
- proofRecipe.addStep(proofStep);
- ProofManager::getCnfProof()->setProofRecipe(&proofRecipe);
-}
-
theory::LemmaStatus EngineOutputChannel::splitLemma(TNode lemma, bool removable)
{
Debug("theory::lemma") << "EngineOutputChannel<" << d_theory << ">::lemma("
@@ -238,7 +96,7 @@ theory::LemmaStatus EngineOutputChannel::splitLemma(TNode lemma, bool removable)
TrustNode tlem = TrustNode::mkTrustLemma(lemma);
LemmaProperty p = removable ? LemmaProperty::REMOVABLE : LemmaProperty::NONE;
theory::LemmaStatus result =
- d_engine->lemma(tlem.getNode(), RULE_SPLIT, false, p, d_theory);
+ d_engine->lemma(tlem.getNode(), false, p, d_theory);
return result;
}
@@ -251,13 +109,11 @@ bool EngineOutputChannel::propagate(TNode literal)
return d_engine->propagate(literal, d_theory);
}
-void EngineOutputChannel::conflict(TNode conflictNode,
- std::unique_ptr<Proof> proof)
+void EngineOutputChannel::conflict(TNode conflictNode)
{
Trace("theory::conflict")
<< "EngineOutputChannel<" << d_theory << ">::conflict(" << conflictNode
<< ")" << std::endl;
- Assert(!proof); // Theory shouldn't be producing proofs yet
++d_statistics.conflicts;
d_engine->d_outputChannelUsed = true;
TrustNode tConf = TrustNode::mkTrustConflict(conflictNode);
@@ -274,7 +130,7 @@ void EngineOutputChannel::demandRestart()
Trace("theory::restart") << "EngineOutputChannel<" << d_theory
<< ">::restart(" << restartVar << ")" << std::endl;
++d_statistics.restartDemands;
- lemma(restartVar, RULE_INVALID, LemmaProperty::REMOVABLE);
+ lemma(restartVar, LemmaProperty::REMOVABLE);
}
void EngineOutputChannel::requirePhase(TNode n, bool phase)
@@ -329,7 +185,6 @@ LemmaStatus EngineOutputChannel::trustedLemma(TrustNode plem, LemmaProperty p)
// now, call the normal interface for lemma
return d_engine->lemma(
plem.getNode(),
- RULE_INVALID,
false,
p,
isLemmaPropertySendAtoms(p) ? d_theory : theory::THEORY_LAST);
diff --git a/src/theory/engine_output_channel.h b/src/theory/engine_output_channel.h
index 3e959898f..99f812ed4 100644
--- a/src/theory/engine_output_channel.h
+++ b/src/theory/engine_output_channel.h
@@ -45,12 +45,10 @@ class EngineOutputChannel : public theory::OutputChannel
void safePoint(ResourceManager::Resource r) override;
- void conflict(TNode conflictNode,
- std::unique_ptr<Proof> pf = nullptr) override;
+ void conflict(TNode conflictNode) override;
bool propagate(TNode literal) override;
theory::LemmaStatus lemma(TNode lemma,
- ProofRule rule,
LemmaProperty p = LemmaProperty::NONE) override;
theory::LemmaStatus splitLemma(TNode lemma, bool removable = false) override;
diff --git a/src/theory/ext_theory.h b/src/theory/ext_theory.h
index 420932bfe..2721bc89e 100644
--- a/src/theory/ext_theory.h
+++ b/src/theory/ext_theory.h
@@ -36,6 +36,7 @@
#include <map>
#include <set>
+#include "context/cdhashmap.h"
#include "context/cdhashset.h"
#include "context/context.h"
#include "expr/node.h"
diff --git a/src/theory/output_channel.cpp b/src/theory/output_channel.cpp
index c918438ee..ad60dbe0e 100644
--- a/src/theory/output_channel.cpp
+++ b/src/theory/output_channel.cpp
@@ -93,11 +93,6 @@ TNode LemmaStatus::getRewrittenLemma() const { return d_rewrittenLemma; }
unsigned LemmaStatus::getLevel() const { return d_level; }
-LemmaStatus OutputChannel::lemma(TNode n, LemmaProperty p)
-{
- return lemma(n, RULE_INVALID, p);
-}
-
LemmaStatus OutputChannel::split(TNode n)
{
return splitLemma(n.orNode(n.notNode()));
diff --git a/src/theory/output_channel.h b/src/theory/output_channel.h
index 0fd610c58..23d7d8784 100644
--- a/src/theory/output_channel.h
+++ b/src/theory/output_channel.h
@@ -22,11 +22,9 @@
#include <memory>
#include "expr/proof_node.h"
-#include "proof/proof_manager.h"
#include "smt/logic_exception.h"
#include "theory/interrupted.h"
#include "theory/trust_node.h"
-#include "util/proof.h"
#include "util/resource_manager.h"
namespace CVC4 {
@@ -135,10 +133,8 @@ class OutputChannel {
* assigned false), or else a literal by itself (in the case of a
* unit conflict) which is assigned TRUE (and T-conflicting) in the
* current assignment.
- * @param pf - a proof of the conflict. This is only non-null if proofs
- * are enabled.
*/
- virtual void conflict(TNode n, std::unique_ptr<Proof> pf = nullptr) = 0;
+ virtual void conflict(TNode n) = 0;
/**
* Propagate a theory literal.
@@ -153,19 +149,11 @@ class OutputChannel {
* been detected. (This requests a split.)
*
* @param n - a theory lemma valid at decision level 0
- * @param rule - the proof rule for this lemma
* @param p The properties of the lemma
* @return the "status" of the lemma, including user level at which
* the lemma resides; the lemma will be removed when this user level pops
*/
- virtual LemmaStatus lemma(TNode n,
- ProofRule rule,
- LemmaProperty p = LemmaProperty::NONE) = 0;
-
- /**
- * Variant of the lemma function that does not require providing a proof rule.
- */
- virtual LemmaStatus lemma(TNode n, LemmaProperty p = LemmaProperty::NONE);
+ virtual LemmaStatus lemma(TNode n, LemmaProperty p = LemmaProperty::NONE) = 0;
/**
* Request a split on a new theory atom. This is equivalent to
diff --git a/src/theory/quantifiers/fun_def_process.cpp b/src/theory/quantifiers/fun_def_process.cpp
index 2c5eab94c..eb87db4de 100644
--- a/src/theory/quantifiers/fun_def_process.cpp
+++ b/src/theory/quantifiers/fun_def_process.cpp
@@ -14,13 +14,15 @@
** This class implements pre-process steps for admissible recursive function definitions (Reynolds et al IJCAR2016)
**/
+#include "theory/quantifiers/fun_def_process.h"
+
#include <vector>
-#include "theory/quantifiers/fun_def_process.h"
+#include "options/smt_options.h"
+#include "proof/proof_manager.h"
#include "theory/quantifiers/quantifiers_attributes.h"
#include "theory/quantifiers/term_database.h"
#include "theory/quantifiers/term_util.h"
-#include "proof/proof_manager.h"
using namespace CVC4;
using namespace std;
@@ -86,7 +88,10 @@ void FunDefFmf::simplify( std::vector< Node >& assertions ) {
Trace("fmf-fun-def") << " to " << std::endl;
Node new_q = NodeManager::currentNM()->mkNode( FORALL, bvl, bd );
new_q = Rewriter::rewrite( new_q );
- PROOF( ProofManager::currentPM()->addDependence(new_q, assertions[i]); );
+ if (options::unsatCores())
+ {
+ ProofManager::currentPM()->addDependence(new_q, assertions[i]);
+ }
assertions[i] = new_q;
Trace("fmf-fun-def") << " " << assertions[i] << std::endl;
fd_assertions.push_back( i );
@@ -120,7 +125,10 @@ void FunDefFmf::simplify( std::vector< Node >& assertions ) {
<< std::endl;
Trace("fmf-fun-def-rewrite") << " to " << std::endl;
Trace("fmf-fun-def-rewrite") << " " << n << std::endl;
- PROOF(ProofManager::currentPM()->addDependence(n, assertions[i]););
+ if (options::unsatCores())
+ {
+ ProofManager::currentPM()->addDependence(n, assertions[i]);
+ }
assertions[i] = n;
}
}
@@ -175,8 +183,12 @@ Node FunDefFmf::simplifyFormula( Node n, bool pol, bool hasPol, std::vector< Nod
c = simplifyFormula( n[i], newPol, newHasPol, cconstraints, hd, false, visited, visited_cons );
if( branch_pos ){
// if at a branching position, the other constraints don't matter if this is satisfied
- Node bcons = cconstraints.empty() ? NodeManager::currentNM()->mkConst( true ) :
- ( cconstraints.size()==1 ? cconstraints[0] : NodeManager::currentNM()->mkNode( AND, cconstraints ) );
+ Node bcons = cconstraints.empty()
+ ? NodeManager::currentNM()->mkConst(true)
+ : (cconstraints.size() == 1
+ ? cconstraints[0]
+ : NodeManager::currentNM()->mkNode(
+ AND, cconstraints));
branch_constraints.push_back( bcons );
Trace("fmf-fun-def-debug2") << "Branching constraint at arg " << i << " is " << bcons << std::endl;
}
@@ -201,10 +213,14 @@ Node FunDefFmf::simplifyFormula( Node n, bool pol, bool hasPol, std::vector< Nod
// in the default case, we care about all conditions
branch_cond = constraints.size()==1 ? constraints[0] : NodeManager::currentNM()->mkNode( AND, constraints );
for( unsigned i=0; i<n.getNumChildren(); i++ ){
- // if this child holds with forcing polarity (true child of OR or false child of AND),
- // then we only care about its associated recursive conditions
- branch_cond = NodeManager::currentNM()->mkNode( kind::ITE,
- ( n.getKind()==OR ? n[i] : n[i].negate() ), branch_constraints[i], branch_cond );
+ // if this child holds with forcing polarity (true child of OR or
+ // false child of AND), then we only care about its associated
+ // recursive conditions
+ branch_cond = NodeManager::currentNM()->mkNode(
+ kind::ITE,
+ (n.getKind() == OR ? n[i] : n[i].negate()),
+ branch_constraints[i],
+ branch_cond);
}
}
Trace("fmf-fun-def-debug2") << "Made branching condition " << branch_cond << std::endl;
diff --git a/src/theory/quantifiers/instantiate.cpp b/src/theory/quantifiers/instantiate.cpp
index 77b61e9dd..04b6e0dda 100644
--- a/src/theory/quantifiers/instantiate.cpp
+++ b/src/theory/quantifiers/instantiate.cpp
@@ -16,6 +16,8 @@
#include "expr/node_algorithm.h"
#include "options/quantifiers_options.h"
+#include "options/smt_options.h"
+#include "proof/proof_manager.h"
#include "smt/smt_statistics_registry.h"
#include "theory/quantifiers/cegqi/inst_strategy_cegqi.h"
#include "theory/quantifiers/first_order_model.h"
@@ -577,18 +579,21 @@ void Instantiate::getInstantiatedQuantifiedFormulas(std::vector<Node>& qs)
bool Instantiate::getUnsatCoreLemmas(std::vector<Node>& active_lemmas)
{
// only if unsat core available
- if (options::proof())
+ if (options::unsatCores())
{
if (!ProofManager::currentPM()->unsatCoreAvailable())
{
return false;
}
}
+ else
+ {
+ return false;
+ }
Trace("inst-unsat-core") << "Get instantiations in unsat core..."
<< std::endl;
- ProofManager::currentPM()->getLemmasInUnsatCore(theory::THEORY_QUANTIFIERS,
- active_lemmas);
+ ProofManager::currentPM()->getLemmasInUnsatCore(active_lemmas);
if (Trace.isOn("inst-unsat-core"))
{
Trace("inst-unsat-core") << "Quantifiers lemmas in unsat core: "
@@ -602,27 +607,6 @@ bool Instantiate::getUnsatCoreLemmas(std::vector<Node>& active_lemmas)
return true;
}
-bool Instantiate::getUnsatCoreLemmas(std::vector<Node>& active_lemmas,
- std::map<Node, Node>& weak_imp)
-{
- if (getUnsatCoreLemmas(active_lemmas))
- {
- for (unsigned i = 0, size = active_lemmas.size(); i < size; ++i)
- {
- Node n = ProofManager::currentPM()->getWeakestImplicantInUnsatCore(
- active_lemmas[i]);
- if (n != active_lemmas[i])
- {
- Trace("inst-unsat-core") << " weaken : " << active_lemmas[i] << " -> "
- << n << std::endl;
- }
- weak_imp[active_lemmas[i]] = n;
- }
- return true;
- }
- return false;
-}
-
void Instantiate::getInstantiationTermVectors(
Node q, std::vector<std::vector<Node> >& tvecs)
{
diff --git a/src/theory/quantifiers/instantiate.h b/src/theory/quantifiers/instantiate.h
index cb40bbbbc..8b1e0f7fc 100644
--- a/src/theory/quantifiers/instantiate.h
+++ b/src/theory/quantifiers/instantiate.h
@@ -268,21 +268,6 @@ class Instantiate : public QuantifiersUtil
* This method returns false if the unsat core is not available.
*/
bool getUnsatCoreLemmas(std::vector<Node>& active_lemmas);
- /** get unsat core lemmas
- *
- * If this method returns true, then it appends to active_lemmas all lemmas
- * that are in the unsat core that originated from the theory of quantifiers.
- * This method returns false if the unsat core is not available.
- *
- * It also computes a weak implicant for each of these lemmas. For each lemma
- * L in active_lemmas, this is a formula L' such that:
- * L => L'
- * and replacing L by L' in the unsat core results in a set that is still
- * unsatisfiable. The map weak_imp stores this formula for each formula in
- * active_lemmas.
- */
- bool getUnsatCoreLemmas(std::vector<Node>& active_lemmas,
- std::map<Node, Node>& weak_imp);
/** get explanation for instantiation lemmas
*
*
diff --git a/src/theory/quantifiers_engine.cpp b/src/theory/quantifiers_engine.cpp
index cb7a4d055..2e69080e7 100644
--- a/src/theory/quantifiers_engine.cpp
+++ b/src/theory/quantifiers_engine.cpp
@@ -217,7 +217,7 @@ QuantifiersEngine::QuantifiersEngine(context::Context* c,
if (options::sygus() || options::sygusInst())
{
d_sygus_tdb.reset(new quantifiers::TermDbSygus(c, this));
- }
+ }
d_util.push_back(d_instantiate.get());
@@ -580,7 +580,7 @@ void QuantifiersEngine::check( Theory::Effort e ){
if( d_hasAddedLemma ){
return;
}
-
+
double clSet = 0;
if( Trace.isOn("quant-engine") ){
clSet = double(clock())/double(CLOCKS_PER_SEC);
@@ -805,7 +805,7 @@ void QuantifiersEngine::check( Theory::Effort e ){
Trace("quant-engine") << ", added lemma = " << d_hasAddedLemma;
Trace("quant-engine") << std::endl;
}
-
+
Trace("quant-engine-debug2") << "Finished quantifiers engine check." << std::endl;
}else{
Trace("quant-engine-debug2") << "Quantifiers Engine does not need check." << std::endl;
@@ -1035,7 +1035,7 @@ bool QuantifiersEngine::removeLemma( Node lem ) {
void QuantifiersEngine::addRequirePhase( Node lit, bool req ){
d_phase_req_waiting[lit] = req;
}
-
+
void QuantifiersEngine::markRelevant( Node q ) {
d_model->markRelevant( q );
}
@@ -1048,9 +1048,10 @@ bool QuantifiersEngine::theoryEngineNeedsCheck() const
return d_te->needCheck();
}
-void QuantifiersEngine::setConflict() {
- d_conflict = true;
- d_conflict_c = true;
+void QuantifiersEngine::setConflict()
+{
+ d_conflict = true;
+ d_conflict_c = true;
}
bool QuantifiersEngine::getInstWhenNeedsCheck( Theory::Effort e ) {
@@ -1123,10 +1124,6 @@ bool QuantifiersEngine::getUnsatCoreLemmas( std::vector< Node >& active_lemmas )
return d_instantiate->getUnsatCoreLemmas(active_lemmas);
}
-bool QuantifiersEngine::getUnsatCoreLemmas( std::vector< Node >& active_lemmas, std::map< Node, Node >& weak_imp ) {
- return d_instantiate->getUnsatCoreLemmas(active_lemmas, weak_imp);
-}
-
void QuantifiersEngine::getInstantiationTermVectors( Node q, std::vector< std::vector< Node > >& tvecs ) {
d_instantiate->getInstantiationTermVectors(q, tvecs);
}
diff --git a/src/theory/quantifiers_engine.h b/src/theory/quantifiers_engine.h
index eca108587..6afc4f925 100644
--- a/src/theory/quantifiers_engine.h
+++ b/src/theory/quantifiers_engine.h
@@ -265,8 +265,6 @@ public:
Node getInstantiatedConjunction(Node q);
/** get unsat core lemmas */
bool getUnsatCoreLemmas(std::vector<Node>& active_lemmas);
- bool getUnsatCoreLemmas(std::vector<Node>& active_lemmas,
- std::map<Node, Node>& weak_imp);
/** get explanation for instantiation lemmas */
void getExplanationForInstLemmas(const std::vector<Node>& lems,
std::map<Node, Node>& quant,
@@ -317,7 +315,7 @@ public:
~Statistics();
};/* class QuantifiersEngine::Statistics */
Statistics d_statistics;
-
+
private:
/** reference to theory engine object */
TheoryEngine* d_te;
diff --git a/src/theory/sort_inference.cpp b/src/theory/sort_inference.cpp
index 144f5b54d..feb266a20 100644
--- a/src/theory/sort_inference.cpp
+++ b/src/theory/sort_inference.cpp
@@ -24,7 +24,6 @@
#include "options/quantifiers_options.h"
#include "options/smt_options.h"
#include "options/uf_options.h"
-#include "proof/proof_manager.h"
#include "theory/rewriter.h"
#include "theory/quantifiers/quant_util.h"
diff --git a/src/theory/strings/theory_strings_preprocess.cpp b/src/theory/strings/theory_strings_preprocess.cpp
index a752958b2..084e2ac91 100644
--- a/src/theory/strings/theory_strings_preprocess.cpp
+++ b/src/theory/strings/theory_strings_preprocess.cpp
@@ -19,6 +19,7 @@
#include <stdint.h>
#include "expr/kind.h"
+#include "options/smt_options.h"
#include "options/strings_options.h"
#include "proof/proof_manager.h"
#include "smt/logic_exception.h"
@@ -972,7 +973,10 @@ void StringsPreprocess::processAssertions( std::vector< Node > &vec_node ){
: NodeManager::currentNM()->mkNode(kind::AND, asserts);
if( res!=vec_node[i] ){
res = Rewriter::rewrite( res );
- PROOF( ProofManager::currentPM()->addDependence( res, vec_node[i] ); );
+ if (options::unsatCores())
+ {
+ ProofManager::currentPM()->addDependence(res, vec_node[i]);
+ }
vec_node[i] = res;
}
}
diff --git a/src/theory/theory.cpp b/src/theory/theory.cpp
index d69c6edc5..3c603051c 100644
--- a/src/theory/theory.cpp
+++ b/src/theory/theory.cpp
@@ -23,6 +23,7 @@
#include "base/check.h"
#include "expr/node_algorithm.h"
+#include "options/smt_options.h"
#include "options/theory_options.h"
#include "smt/smt_statistics_registry.h"
#include "theory/ext_theory.h"
@@ -81,8 +82,7 @@ Theory::Theory(TheoryId id,
d_equalityEngine(nullptr),
d_allocEqualityEngine(nullptr),
d_theoryState(nullptr),
- d_inferManager(nullptr),
- d_proofsEnabled(false)
+ d_inferManager(nullptr)
{
smtStatisticsRegistry()->registerStat(&d_checkTime);
smtStatisticsRegistry()->registerStat(&d_computeCareGraphTime);
@@ -296,12 +296,12 @@ void Theory::computeCareGraph() {
switch (d_valuation.getEqualityStatus(a, b)) {
case EQUALITY_TRUE_AND_PROPAGATED:
case EQUALITY_FALSE_AND_PROPAGATED:
- // If we know about it, we should have propagated it, so we can skip
- break;
+ // If we know about it, we should have propagated it, so we can skip
+ break;
default:
- // Let's split on it
- addCarePair(a, b);
- break;
+ // Let's split on it
+ addCarePair(a, b);
+ break;
}
}
}
diff --git a/src/theory/theory.h b/src/theory/theory.h
index 039fdebf1..176d4b672 100644
--- a/src/theory/theory.h
+++ b/src/theory/theory.h
@@ -246,11 +246,6 @@ class Theory {
* the equality engine are used properly.
*/
TheoryInferenceManager* d_inferManager;
- /**
- * Whether proofs are enabled
- *
- */
- bool d_proofsEnabled;
/**
* Returns the next assertion in the assertFact() queue.
@@ -581,6 +576,7 @@ class Theory {
Unimplemented() << "Theory " << identify()
<< " propagated a node but doesn't implement the "
"Theory::explain() interface!";
+ return TrustNode::null();
}
//--------------------------------- check
@@ -824,15 +820,13 @@ class Theory {
*
* @return true iff facts have been asserted to this theory.
*/
- bool hasFacts() {
- return !d_facts.empty();
- }
+ bool hasFacts() { return !d_facts.empty(); }
/** Return total number of facts asserted to this theory */
size_t numAssertions() {
return d_facts.size();
}
-
+
typedef context::CDList<TNode>::const_iterator shared_terms_iterator;
/**
@@ -917,7 +911,7 @@ class Theory {
/* is extended function reduced */
virtual bool isExtfReduced( int effort, Node n, Node on, std::vector< Node >& exp ) { return n.isConst(); }
-
+
/**
* Get reduction for node
* If return value is not 0, then n is reduced.
@@ -927,9 +921,6 @@ class Theory {
* and return value should be <0.
*/
virtual int getReduction( int effort, Node n, Node& nr ) { return 0; }
-
- /** Turn on proof-production mode. */
- void produceProofs() { d_proofsEnabled = true; }
};/* class Theory */
std::ostream& operator<<(std::ostream& os, theory::Theory::Effort level);
diff --git a/src/theory/theory_engine.cpp b/src/theory/theory_engine.cpp
index c61879b6d..6837d4be5 100644
--- a/src/theory/theory_engine.cpp
+++ b/src/theory/theory_engine.cpp
@@ -28,14 +28,9 @@
#include "expr/node_visitor.h"
#include "options/bv_options.h"
#include "options/options.h"
-#include "options/proof_options.h"
#include "options/quantifiers_options.h"
#include "options/theory_options.h"
#include "preprocessing/assertion_pipeline.h"
-#include "proof/cnf_proof.h"
-#include "proof/lemma_proof.h"
-#include "proof/proof_manager.h"
-#include "proof/theory_proof.h"
#include "smt/logic_exception.h"
#include "smt/term_formula_removal.h"
#include "theory/arith/arith_ite_utils.h"
@@ -256,10 +251,6 @@ TheoryEngine::TheoryEngine(context::Context* context,
d_true = NodeManager::currentNM()->mkConst<bool>(true);
d_false = NodeManager::currentNM()->mkConst<bool>(false);
-#ifdef CVC4_PROOF
- ProofManager::currentPM()->initTheoryProofEngine();
-#endif
-
smtStatisticsRegistry()->registerStat(&d_arithSubstitutionsAdded);
}
@@ -1150,23 +1141,6 @@ bool TheoryEngine::propagate(TNode literal, theory::TheoryId theory) {
assertToTheory(literal, literal, /* to */ THEORY_BUILTIN, /* from */ theory);
}
} else {
- // We could be propagating a unit-clause lemma. In this case, we need to provide a
- // recipe.
- // TODO: Consider putting this someplace else? This is the only refence to the proof
- // manager in this class.
-
- PROOF({
- LemmaProofRecipe proofRecipe;
- proofRecipe.addBaseAssertion(literal);
-
- Node emptyNode;
- LemmaProofRecipe::ProofStep proofStep(theory, emptyNode);
- proofStep.addAssertion(literal);
- proofRecipe.addStep(proofStep);
-
- ProofManager::getCnfProof()->setProofRecipe(&proofRecipe);
- });
-
// Just send off to the SAT solver
Assert(d_propEngine->isSatLiteral(literal));
assertToTheory(literal, literal, /* to */ THEORY_SAT_SOLVER, /* from */ theory);
@@ -1309,65 +1283,31 @@ static Node mkExplanation(const std::vector<NodeTheoryPair>& explanation) {
return conjunction;
}
-Node TheoryEngine::getExplanationAndRecipe(TNode node, LemmaProofRecipe* proofRecipe) {
- Debug("theory::explain") << "TheoryEngine::getExplanation(" << node << "): current propagation index = " << d_propagationMapTimestamp << endl;
+Node TheoryEngine::getExplanation(TNode node)
+{
+ Debug("theory::explain") << "TheoryEngine::getExplanation(" << node
+ << "): current propagation index = "
+ << d_propagationMapTimestamp << endl;
bool polarity = node.getKind() != kind::NOT;
TNode atom = polarity ? node : node[0];
// If we're not in shared mode, explanations are simple
- if (!d_logicInfo.isSharingEnabled()) {
- Debug("theory::explain") << "TheoryEngine::getExplanation: sharing is NOT enabled. "
- << " Responsible theory is: "
- << theoryOf(atom)->getId() << std::endl;
+ if (!d_logicInfo.isSharingEnabled())
+ {
+ Debug("theory::explain")
+ << "TheoryEngine::getExplanation: sharing is NOT enabled. "
+ << " Responsible theory is: " << theoryOf(atom)->getId() << std::endl;
TrustNode texplanation = theoryOf(atom)->explain(node);
Node explanation = texplanation.getNode();
- Debug("theory::explain") << "TheoryEngine::getExplanation(" << node << ") => " << explanation << endl;
- PROOF({
- if(proofRecipe) {
- Node emptyNode;
- LemmaProofRecipe::ProofStep proofStep(theoryOf(atom)->getId(), emptyNode);
- proofStep.addAssertion(node);
- proofRecipe->addBaseAssertion(node);
-
- if (explanation.getKind() == kind::AND) {
- // If the explanation is a conjunction, the recipe for the corresponding lemma is
- // the negation of its conjuncts.
- Node flat = flattenAnd(explanation);
- for (unsigned i = 0; i < flat.getNumChildren(); ++i) {
- if (flat[i].isConst() && flat[i].getConst<bool>()) {
- ++ i;
- continue;
- }
- if (flat[i].getKind() == kind::NOT &&
- flat[i][0].isConst() && !flat[i][0].getConst<bool>()) {
- ++ i;
- continue;
- }
- Debug("theory::explain") << "TheoryEngine::getExplanationAndRecipe: adding recipe assertion: "
- << flat[i].negate() << std::endl;
- proofStep.addAssertion(flat[i].negate());
- proofRecipe->addBaseAssertion(flat[i].negate());
- }
- } else {
- // The recipe for proving it is by negating it. "True" is not an acceptable reason.
- if (!((explanation.isConst() && explanation.getConst<bool>()) ||
- (explanation.getKind() == kind::NOT &&
- explanation[0].isConst() && !explanation[0].getConst<bool>()))) {
- proofStep.addAssertion(explanation.negate());
- proofRecipe->addBaseAssertion(explanation.negate());
- }
- }
-
- proofRecipe->addStep(proofStep);
- }
- });
-
+ Debug("theory::explain") << "TheoryEngine::getExplanation(" << node
+ << ") => " << explanation << endl;
return explanation;
}
- Debug("theory::explain") << "TheoryEngine::getExplanation: sharing IS enabled" << std::endl;
+ Debug("theory::explain") << "TheoryEngine::getExplanation: sharing IS enabled"
+ << std::endl;
// Initial thing to explain
NodeTheoryPair toExplain(node, THEORY_SAT_SOLVER, d_propagationMapTimestamp);
@@ -1378,33 +1318,20 @@ Node TheoryEngine::getExplanationAndRecipe(TNode node, LemmaProofRecipe* proofRe
<< "TheoryEngine::getExplanation: explainer for node "
<< nodeExplainerPair.d_node
<< " is theory: " << nodeExplainerPair.d_theory << std::endl;
- TheoryId explainer = nodeExplainerPair.d_theory;
// Create the workplace for explanations
std::vector<NodeTheoryPair> explanationVector;
explanationVector.push_back(d_propagationMap[toExplain]);
// Process the explanation
- if (proofRecipe) {
- Node emptyNode;
- LemmaProofRecipe::ProofStep proofStep(explainer, emptyNode);
- proofStep.addAssertion(node);
- proofRecipe->addStep(proofStep);
- proofRecipe->addBaseAssertion(node);
- }
-
- getExplanation(explanationVector, proofRecipe);
+ getExplanation(explanationVector);
Node explanation = mkExplanation(explanationVector);
- Debug("theory::explain") << "TheoryEngine::getExplanation(" << node << ") => " << explanation << endl;
+ Debug("theory::explain") << "TheoryEngine::getExplanation(" << node << ") => "
+ << explanation << endl;
return explanation;
}
-Node TheoryEngine::getExplanation(TNode node) {
- LemmaProofRecipe *dontCareRecipe = NULL;
- return getExplanationAndRecipe(node, dontCareRecipe);
-}
-
struct AtomsCollect {
std::vector<TNode> d_atoms;
@@ -1504,7 +1431,6 @@ void TheoryEngine::ensureLemmaAtoms(const std::vector<TNode>& atoms, theory::The
}
theory::LemmaStatus TheoryEngine::lemma(TNode node,
- ProofRule rule,
bool negated,
theory::LemmaProperty p,
theory::TheoryId atomsTo)
@@ -1567,8 +1493,7 @@ theory::LemmaStatus TheoryEngine::lemma(TNode node,
// assert lemmas to prop engine
for (size_t i = 0, lsize = lemmas.size(); i < lsize; ++i)
{
- d_propEngine->assertLemma(
- lemmas[i], i == 0 && negated, removable, rule, node);
+ d_propEngine->assertLemma(lemmas[i], i == 0 && negated, removable);
}
// WARNING: Below this point don't assume lemmas[0] to be not negated.
@@ -1611,23 +1536,6 @@ void TheoryEngine::conflict(TNode conflict, TheoryId theoryId) {
<< CheckSatCommand(conflict.toExpr());
}
- LemmaProofRecipe* proofRecipe = NULL;
- PROOF({
- proofRecipe = new LemmaProofRecipe;
- Node emptyNode;
- LemmaProofRecipe::ProofStep proofStep(theoryId, emptyNode);
-
- if (conflict.getKind() == kind::AND) {
- for (unsigned i = 0; i < conflict.getNumChildren(); ++i) {
- proofStep.addAssertion(conflict[i].negate());
- }
- } else {
- proofStep.addAssertion(conflict.negate());
- }
-
- proofRecipe->addStep(proofStep);
- });
-
// In the multiple-theories case, we need to reconstruct the conflict
if (d_logicInfo.isSharingEnabled()) {
// Create the workplace for explanations
@@ -1635,67 +1543,29 @@ void TheoryEngine::conflict(TNode conflict, TheoryId theoryId) {
explanationVector.push_back(NodeTheoryPair(conflict, theoryId, d_propagationMapTimestamp));
// Process the explanation
- getExplanation(explanationVector, proofRecipe);
- PROOF(ProofManager::getCnfProof()->setProofRecipe(proofRecipe));
+ getExplanation(explanationVector);
Node fullConflict = mkExplanation(explanationVector);
Debug("theory::conflict") << "TheoryEngine::conflict(" << conflict << ", " << theoryId << "): full = " << fullConflict << endl;
Assert(properConflict(fullConflict));
lemma(fullConflict,
- RULE_CONFLICT,
true,
LemmaProperty::REMOVABLE,
THEORY_LAST);
} else {
// When only one theory, the conflict should need no processing
Assert(properConflict(conflict));
- PROOF({
- if (conflict.getKind() == kind::AND) {
- // If the conflict is a conjunction, the corresponding lemma is derived by negating
- // its conjuncts.
- for (unsigned i = 0; i < conflict.getNumChildren(); ++i) {
- if (conflict[i].isConst() && conflict[i].getConst<bool>()) {
- ++ i;
- continue;
- }
- if (conflict[i].getKind() == kind::NOT &&
- conflict[i][0].isConst() && !conflict[i][0].getConst<bool>()) {
- ++ i;
- continue;
- }
- proofRecipe->getStep(0)->addAssertion(conflict[i].negate());
- proofRecipe->addBaseAssertion(conflict[i].negate());
- }
- } else {
- proofRecipe->getStep(0)->addAssertion(conflict.negate());
- proofRecipe->addBaseAssertion(conflict.negate());
- }
-
- ProofManager::getCnfProof()->setProofRecipe(proofRecipe);
- });
-
- lemma(conflict, RULE_CONFLICT, true, LemmaProperty::REMOVABLE, THEORY_LAST);
+ lemma(conflict, true, LemmaProperty::REMOVABLE, THEORY_LAST);
}
-
- PROOF({
- delete proofRecipe;
- proofRecipe = NULL;
- });
}
-void TheoryEngine::getExplanation(std::vector<NodeTheoryPair>& explanationVector, LemmaProofRecipe* proofRecipe) {
+void TheoryEngine::getExplanation(
+ std::vector<NodeTheoryPair>& explanationVector)
+{
Assert(explanationVector.size() > 0);
unsigned i = 0; // Index of the current literal we are processing
unsigned j = 0; // Index of the last literal we are keeping
- std::unique_ptr<std::set<Node>> inputAssertions = nullptr;
- PROOF({
- if (proofRecipe)
- {
- inputAssertions.reset(
- new std::set<Node>(proofRecipe->getStep(0)->getAssertions()));
- }
- });
// cache of nodes we have already explained by some theory
std::unordered_map<Node, size_t, NodeHashFunction> cache;
@@ -1768,22 +1638,6 @@ void TheoryEngine::getExplanation(std::vector<NodeTheoryPair>& explanationVector
explanationVector.push_back((*find).second);
++i;
- PROOF({
- if (toExplain.d_node != (*find).second.d_node)
- {
- Debug("pf::explain")
- << "TheoryEngine::getExplanation: Rewrite alert! toAssert = "
- << toExplain.d_node << ", toExplain = " << (*find).second.d_node
- << std::endl;
-
- if (proofRecipe)
- {
- proofRecipe->addRewriteRule(toExplain.d_node,
- (*find).second.d_node);
- }
- }
- })
-
continue;
}
}
@@ -1814,59 +1668,10 @@ void TheoryEngine::getExplanation(std::vector<NodeTheoryPair>& explanationVector
explanationVector.push_back(newExplain);
++ i;
-
- PROOF({
- if (proofRecipe && inputAssertions)
- {
- // If we're expanding the target node of the explanation (this is the
- // first expansion...), we don't want to add it as a separate proof
- // step. It is already part of the assertions.
- if (!ContainsKey(*inputAssertions, toExplain.d_node))
- {
- LemmaProofRecipe::ProofStep proofStep(toExplain.d_theory,
- toExplain.d_node);
- if (explanation.getKind() == kind::AND)
- {
- Node flat = flattenAnd(explanation);
- for (unsigned k = 0; k < flat.getNumChildren(); ++k)
- {
- // If a true constant or a negation of a false constant we can
- // ignore it
- if (!((flat[k].isConst() && flat[k].getConst<bool>())
- || (flat[k].getKind() == kind::NOT && flat[k][0].isConst()
- && !flat[k][0].getConst<bool>())))
- {
- proofStep.addAssertion(flat[k].negate());
- }
- }
- }
- else
- {
- if (!((explanation.isConst() && explanation.getConst<bool>())
- || (explanation.getKind() == kind::NOT
- && explanation[0].isConst()
- && !explanation[0].getConst<bool>())))
- {
- proofStep.addAssertion(explanation.negate());
- }
- }
- proofRecipe->addStep(proofStep);
- }
- }
- });
}
// Keep only the relevant literals
explanationVector.resize(j);
-
- PROOF({
- if (proofRecipe) {
- // The remaining literals are the base of the proof
- for (unsigned k = 0; k < explanationVector.size(); ++k) {
- proofRecipe->addBaseAssertion(explanationVector[k].d_node.negate());
- }
- }
- });
}
void TheoryEngine::setUserAttribute(const std::string& attr,
diff --git a/src/theory/theory_engine.h b/src/theory/theory_engine.h
index b1543ad0b..167bd6d75 100644
--- a/src/theory/theory_engine.h
+++ b/src/theory/theory_engine.h
@@ -53,7 +53,6 @@
namespace CVC4 {
class ResourceManager;
-class LemmaProofRecipe;
/**
* A pair of a theory and a node. This is used to mark the flow of
@@ -292,7 +291,6 @@ class TheoryEngine {
* @param p the properties of the lemma.
*/
theory::LemmaStatus lemma(TNode node,
- ProofRule rule,
bool negated,
theory::LemmaProperty p,
theory::TheoryId atomsTo);
@@ -442,14 +440,13 @@ class TheoryEngine {
bool markPropagation(TNode assertion, TNode originalAssertions, theory::TheoryId toTheoryId, theory::TheoryId fromTheoryId);
/**
- * Computes the explanation by travarsing the propagation graph and
+ * Computes the explanation by traversing the propagation graph and
* asking relevant theories to explain the propagations. Initially
* the explanation vector should contain only the element (node, theory)
* where the node is the one to be explained, and the theory is the
- * theory that sent the literal. The lemmaProofRecipe will contain a list
- * of the explanation steps required to produce the original node.
+ * theory that sent the literal.
*/
- void getExplanation(std::vector<NodeTheoryPair>& explanationVector, LemmaProofRecipe* lemmaProofRecipe);
+ void getExplanation(std::vector<NodeTheoryPair>& explanationVector);
public:
/**
@@ -570,12 +567,6 @@ class TheoryEngine {
Node getExplanation(TNode node);
/**
- * Returns an explanation of the node propagated to the SAT solver and the theory
- * that propagated it.
- */
- Node getExplanationAndRecipe(TNode node, LemmaProofRecipe* proofRecipe);
-
- /**
* Get the pointer to the model object used by this theory engine.
*/
theory::TheoryModel* getModel();
@@ -687,14 +678,15 @@ class TheoryEngine {
/**
* Get instantiation methods
* first inputs forall x.q[x] and returns ( q[a], ..., q[z] )
- * second inputs forall x.q[x] and returns ( a, ..., z )
- * third and fourth return mappings e.g. forall x.q1[x] -> ( q1[a]...q1[z] ) , ... , forall x.qn[x] -> ( qn[a]...qn[z] )
+ * second inputs forall x.q[x] and returns ( a, ..., z )
+ * third and fourth return mappings e.g. forall x.q1[x] -> ( q1[a]...q1[z] )
+ * , ... , forall x.qn[x] -> ( qn[a]...qn[z] )
*/
void getInstantiations( Node q, std::vector< Node >& insts );
void getInstantiationTermVectors( Node q, std::vector< std::vector< Node > >& tvecs );
void getInstantiations( std::map< Node, std::vector< Node > >& insts );
void getInstantiationTermVectors( std::map< Node, std::vector< std::vector< Node > > >& insts );
-
+
/**
* Get instantiated conjunction, returns q[t1] ^ ... ^ q[tn] where t1...tn are current set of instantiations for q.
* Can be used for quantifier elimination when satisfiable and q[t1] ^ ... ^ q[tn] |= q
@@ -726,7 +718,7 @@ private:
public:
/** Set user attribute.
- *
+ *
* This function is called when an attribute is set by a user. In SMT-LIBv2
* this is done via the syntax (! n :attr)
*/
@@ -736,7 +728,7 @@ private:
const std::string& str_value);
/** Handle user attribute.
- *
+ *
* Associates theory t with the attribute attr. Theory t will be
* notified whenever an attribute of name attr is set.
*/
diff --git a/src/theory/theory_test_utils.h b/src/theory/theory_test_utils.h
index 2593b11a6..965e99338 100644
--- a/src/theory/theory_test_utils.h
+++ b/src/theory/theory_test_utils.h
@@ -27,7 +27,6 @@
#include "expr/node.h"
#include "theory/interrupted.h"
#include "theory/output_channel.h"
-#include "util/proof.h"
#include "util/resource_manager.h"
#include "util/unsafe_interrupt_exception.h"
@@ -69,17 +68,14 @@ public:
~TestOutputChannel() override {}
void safePoint(ResourceManager::Resource r) override {}
- void conflict(TNode n, std::unique_ptr<Proof> pf) override
- {
- push(CONFLICT, n);
- }
+ void conflict(TNode n) override { push(CONFLICT, n); }
bool propagate(TNode n) override {
push(PROPAGATE, n);
return true;
}
- LemmaStatus lemma(TNode n, ProofRule rule, LemmaProperty p) override
+ LemmaStatus lemma(TNode n, LemmaProperty p) override
{
push(LEMMA, n);
return LemmaStatus(Node::null(), 0);
diff --git a/src/theory/uf/eq_proof.cpp b/src/theory/uf/eq_proof.cpp
index 513cf2f39..d7b615ffa 100644
--- a/src/theory/uf/eq_proof.cpp
+++ b/src/theory/uf/eq_proof.cpp
@@ -21,33 +21,20 @@ namespace CVC4 {
namespace theory {
namespace eq {
-void EqProof::debug_print(const char* c,
- unsigned tb,
- PrettyPrinter* prettyPrinter) const
+void EqProof::debug_print(const char* c, unsigned tb) const
{
std::stringstream ss;
- debug_print(ss, tb, prettyPrinter);
+ debug_print(ss, tb);
Debug(c) << ss.str();
}
-void EqProof::debug_print(std::ostream& os,
- unsigned tb,
- PrettyPrinter* prettyPrinter) const
+void EqProof::debug_print(std::ostream& os, unsigned tb) const
{
for (unsigned i = 0; i < tb; i++)
{
os << " ";
}
-
- if (prettyPrinter)
- {
- os << prettyPrinter->printTag(d_id);
- }
- else
- {
- os << static_cast<MergeReasonType>(d_id);
- }
- os << "(";
+ os << d_id << "(";
if (d_children.empty() && d_node.isNull())
{
os << ")";
@@ -66,7 +53,7 @@ void EqProof::debug_print(std::ostream& os,
for (unsigned i = 0; i < size; ++i)
{
os << std::endl;
- d_children[i]->debug_print(os, tb + 1, prettyPrinter);
+ d_children[i]->debug_print(os, tb + 1);
if (i < size - 1)
{
for (unsigned j = 0; j < tb + 1; ++j)
@@ -850,8 +837,7 @@ Node EqProof::addToProof(
<< ", returning " << it->second << "\n";
return it->second;
}
- Trace("eqproof-conv") << "EqProof::addToProof: adding step for "
- << static_cast<MergeReasonType>(d_id)
+ Trace("eqproof-conv") << "EqProof::addToProof: adding step for " << d_id
<< " with conclusion " << d_node << "\n";
// Assumption
if (d_id == MERGED_THROUGH_EQUALITY)
@@ -976,12 +962,10 @@ Node EqProof::addToProof(
{
Assert(!d_node.isNull() && d_node.getKind() == kind::EQUAL
&& d_node[1].isConst())
- << ". Conclusion " << d_node << " from "
- << static_cast<MergeReasonType>(d_id)
+ << ". Conclusion " << d_node << " from " << d_id
<< " was expected to be (= (f t1 ... tn) c)\n";
Assert(!assumptions.count(d_node))
- << "Conclusion " << d_node << " from "
- << static_cast<MergeReasonType>(d_id) << " is an assumption\n";
+ << "Conclusion " << d_node << " from " << d_id << " is an assumption\n";
// The step has the form
// [(= t1 c1)] ... [(= tn cn)]
// ------------------------
diff --git a/src/theory/uf/eq_proof.h b/src/theory/uf/eq_proof.h
index 492252baa..72368c8c9 100644
--- a/src/theory/uf/eq_proof.h
+++ b/src/theory/uf/eq_proof.h
@@ -35,36 +35,21 @@ namespace eq {
class EqProof
{
public:
- /** A custom pretty printer used for custom rules being those in
- * MergeReasonType. */
- class PrettyPrinter
- {
- public:
- virtual ~PrettyPrinter() {}
- virtual std::string printTag(unsigned tag) = 0;
- };
-
EqProof() : d_id(MERGED_THROUGH_REFLEXIVITY) {}
/** The proof rule for concluding d_node */
- unsigned d_id;
+ MergeReasonType d_id;
/** The conclusion of this EqProof */
Node d_node;
/** The proofs of the premises for deriving d_node with d_id */
std::vector<std::shared_ptr<EqProof>> d_children;
/**
- * Debug print this proof on debug trace c with tabulation tb and pretty
- * printer prettyPrinter.
+ * Debug print this proof on debug trace c with tabulation tb.
*/
- void debug_print(const char* c,
- unsigned tb = 0,
- PrettyPrinter* prettyPrinter = nullptr) const;
+ void debug_print(const char* c, unsigned tb = 0) const;
/**
- * Debug print this proof on output stream os with tabulation tb and pretty
- * printer prettyPrinter.
+ * Debug print this proof on output stream os with tabulation tb.
*/
- void debug_print(std::ostream& os,
- unsigned tb = 0,
- PrettyPrinter* prettyPrinter = nullptr) const;
+ void debug_print(std::ostream& os, unsigned tb = 0) const;
/** Add to proof
*
diff --git a/src/theory/uf/equality_engine.cpp b/src/theory/uf/equality_engine.cpp
index dd142edf4..c97c99776 100644
--- a/src/theory/uf/equality_engine.cpp
+++ b/src/theory/uf/equality_engine.cpp
@@ -103,8 +103,6 @@ void EqualityEngine::init() {
d_trueId = getNodeId(d_true);
d_falseId = getNodeId(d_false);
-
- d_freshMergeReasonType = eq::NUMBER_OF_MERGE_REASONS;
}
EqualityEngine::~EqualityEngine() {
@@ -1100,7 +1098,7 @@ void EqualityEngine::explainEquality(TNode t1, TNode t2, bool polarity,
getExplanation(t1Id, t2Id, equalities, cache, eqp);
} else {
if (eqp) {
- eqp->d_id = eq::MERGED_THROUGH_TRANS;
+ eqp->d_id = MERGED_THROUGH_TRANS;
eqp->d_node = d_nodes[t1Id].eqNode(d_nodes[t2Id]).notNode();
}
@@ -1137,12 +1135,13 @@ void EqualityEngine::explainEquality(TNode t1, TNode t2, bool polarity,
Debug("pf::ee") << "Child proof is:" << std::endl;
eqpc->debug_print("pf::ee", 1);
}
- if (eqpc->d_id == eq::MERGED_THROUGH_TRANS) {
+ if (eqpc->d_id == MERGED_THROUGH_TRANS)
+ {
std::vector<std::shared_ptr<EqProof>> orderedChildren;
bool nullCongruenceFound = false;
for (const auto& child : eqpc->d_children)
{
- if (child->d_id == eq::MERGED_THROUGH_CONGRUENCE
+ if (child->d_id == MERGED_THROUGH_CONGRUENCE
&& child->d_node.isNull())
{
nullCongruenceFound = true;
@@ -1382,35 +1381,24 @@ void EqualityEngine::getExplanation(
#endif
// If the nodes are the same, we're done
- if (t1Id == t2Id){
- if( eqp ) {
- if (options::proofNew())
- {
- // ignore equalities between function symbols, i.e. internal nullary
- // non-constant nodes.
- //
- // Note that this is robust for HOL because in that case function
- // symbols are not internal nodes
- if (d_isInternal[t1Id] && d_nodes[t1Id].getNumChildren() == 0
- && !d_isConstant[t1Id])
- {
- eqp->d_node = Node::null();
- }
- else
- {
- Assert(d_nodes[t1Id].getKind() != kind::BUILTIN);
- eqp->d_node = d_nodes[t1Id].eqNode(d_nodes[t1Id]);
- }
- }
- else if ((d_nodes[t1Id].getKind() == kind::BUILTIN)
- && (d_nodes[t1Id].getConst<Kind>() == kind::SELECT))
+ if (t1Id == t2Id)
+ {
+ if (eqp)
+ {
+ // ignore equalities between function symbols, i.e. internal nullary
+ // non-constant nodes.
+ //
+ // Note that this is robust for HOL because in that case function
+ // symbols are not internal nodes
+ if (d_isInternal[t1Id] && d_nodes[t1Id].getNumChildren() == 0
+ && !d_isConstant[t1Id])
{
- std::vector<Node> no_children;
- eqp->d_node = NodeManager::currentNM()->mkNode(kind::PARTIAL_SELECT_0, no_children);
+ eqp->d_node = Node::null();
}
else
{
- eqp->d_node = ProofManager::currentPM()->mkOp(d_nodes[t1Id]);
+ Assert(d_nodes[t1Id].getKind() != kind::BUILTIN);
+ eqp->d_node = d_nodes[t1Id].eqNode(d_nodes[t1Id]);
}
}
return;
@@ -1466,7 +1454,8 @@ void EqualityEngine::getExplanation(
// The current node
currentNode = bfsQueue[currentIndex].d_nodeId;
EqualityNodeId edgeNode = d_equalityEdges[currentEdge].getNodeId();
- unsigned reasonType = d_equalityEdges[currentEdge].getReasonType();
+ MergeReasonType reasonType = static_cast<MergeReasonType>(
+ d_equalityEdges[currentEdge].getReasonType());
Node reason = d_equalityEdges[currentEdge].getReason();
Debug("equality")
@@ -1482,8 +1471,9 @@ void EqualityEngine::getExplanation(
<< edge.getNodeId() << "} " << d_nodes[edge.getNodeId()] << ")"
<< std::endl;
Debug("equality")
- << d_name << " reason type = "
- << static_cast<MergeReasonType>(reasonType) << std::endl;
+ << d_name
+ << " reason type = " << reasonType
+ << "\n";
std::shared_ptr<EqProof> eqpc;;
// Make child proof if a proof is being constructed
@@ -1518,63 +1508,21 @@ void EqualityEngine::getExplanation(
{
eqpc->d_children.push_back(eqpc1);
eqpc->d_children.push_back(eqpc2);
- if (options::proofNew())
+ // build conclusion if ids correspond to non-internal nodes or
+ // if non-internal nodes can be retrieved from them (in the
+ // case of n-ary applications), otherwise leave conclusion as
+ // null. This is only done for congruence kinds, since
+ // congruence is not used otherwise.
+ Kind k = d_nodes[currentNode].getKind();
+ if (d_congruenceKinds[k])
{
- // build conclusion if ids correspond to non-internal nodes or
- // if non-internal nodes can be retrieved from them (in the
- // case of n-ary applications), otherwise leave conclusion as
- // null. This is only done for congruence kinds, since
- // congruence is not used otherwise.
- Kind k = d_nodes[currentNode].getKind();
- if (d_congruenceKinds[k])
- {
- buildEqConclusion(currentNode, edgeNode, eqpc.get());
- }
- else
- {
- Assert(k == kind::EQUAL)
- << "not an internal node " << d_nodes[currentNode]
- << " with non-congruence with " << k << "\n";
- }
- }
- else if (d_nodes[currentNode].getKind() == kind::EQUAL)
- {
- //leave node null for now
- eqpc->d_node = Node::null();
+ buildEqConclusion(currentNode, edgeNode, eqpc.get());
}
else
{
- if (d_nodes[f1.d_a].getKind() == kind::APPLY_UF
- || d_nodes[f1.d_a].getKind() == kind::SELECT
- || d_nodes[f1.d_a].getKind() == kind::STORE)
- {
- eqpc->d_node = d_nodes[f1.d_a];
- }
- else
- {
- if (d_nodes[f1.d_a].getKind() == kind::BUILTIN
- && d_nodes[f1.d_a].getConst<Kind>() == kind::SELECT)
- {
- eqpc->d_node = NodeManager::currentNM()->mkNode(
- kind::PARTIAL_SELECT_1, d_nodes[f1.d_b]);
- // The first child is a PARTIAL_SELECT_0.
- // Give it a child so that we know what kind of (read) it is, when we dump to LFSC.
- Assert(eqpc->d_children[0]->d_node.getKind()
- == kind::PARTIAL_SELECT_0);
- Assert(eqpc->d_children[0]->d_children.size() == 0);
-
- eqpc->d_children[0]->d_node =
- NodeManager::currentNM()->mkNode(
- kind::PARTIAL_SELECT_0, d_nodes[f1.d_b]);
- }
- else
- {
- eqpc->d_node = NodeManager::currentNM()->mkNode(
- kind::PARTIAL_APPLY_UF,
- ProofManager::currentPM()->mkOp(d_nodes[f1.d_a]),
- d_nodes[f1.d_b]);
- }
- }
+ Assert(k == kind::EQUAL)
+ << "not an internal node " << d_nodes[currentNode]
+ << " with non-congruence with " << k << "\n";
}
}
Debug("equality") << pop;
@@ -1608,7 +1556,7 @@ void EqualityEngine::getExplanation(
// Get the node we interpreted
TNode interpreted;
- if (eqpc && options::proofNew())
+ if (eqpc)
{
// build the conclusion f(c1, ..., cn) = c
if (d_nodes[currentNode].isConst())
@@ -1661,24 +1609,19 @@ void EqualityEngine::getExplanation(
Debug("equality") << d_name << "::eq::getExplanation(): adding: "
<< reason << std::endl;
Debug("equality")
- << d_name << "::eq::getExplanation(): reason type = "
- << static_cast<MergeReasonType>(reasonType) << std::endl;
+ << d_name
+ << "::eq::getExplanation(): reason type = " << reasonType
+ << "\n";
Node a = d_nodes[currentNode];
Node b = d_nodes[d_equalityEdges[currentEdge].getNodeId()];
if (eqpc) {
- //apply proof reconstruction processing (when eqpc is non-null)
- if (d_pathReconstructionTriggers.find(reasonType) != d_pathReconstructionTriggers.end()) {
- d_pathReconstructionTriggers.find(reasonType)
- ->second->notify(reasonType, reason, a, b, equalities,
- eqpc.get());
- }
if (reasonType == MERGED_THROUGH_EQUALITY) {
// in the new proof infrastructure we can assume that "theory
// assumptions", which are a consequence of theory reasoning
// on other assumptions, are externally justified. In this
// case we can use (= a b) directly as the conclusion here.
- eqpc->d_node = !options::proofNew() ? reason : b.eqNode(a);
+ eqpc->d_node = b.eqNode(a);
} else {
// The LFSC translator prefers (not (= a b)) over (= (= a b) false)
@@ -1722,20 +1665,12 @@ void EqualityEngine::getExplanation(
} else {
eqp->d_id = MERGED_THROUGH_TRANS;
eqp->d_children.insert( eqp->d_children.end(), eqp_trans.begin(), eqp_trans.end() );
- if (options::proofNew())
- {
- // build conclusion in case of equality between non-internal
- // nodes or of n-ary congruence kinds, otherwise leave as
- // null. The latter is necessary for the overall handling of
- // congruence proofs involving n-ary kinds, see
- // EqProof::reduceNestedCongruence for more details.
- buildEqConclusion(t1Id, t2Id, eqp);
- }
- else
- {
- eqp->d_node = NodeManager::currentNM()->mkNode(
- kind::EQUAL, d_nodes[t1Id], d_nodes[t2Id]);
- }
+ // build conclusion in case of equality between non-internal
+ // nodes or of n-ary congruence kinds, otherwise leave as
+ // null. The latter is necessary for the overall handling of
+ // congruence proofs involving n-ary kinds, see
+ // EqProof::reduceNestedCongruence for more details.
+ buildEqConclusion(t1Id, t2Id, eqp);
}
if (Debug.isOn("pf::ee"))
{
@@ -2218,18 +2153,6 @@ size_t EqualityEngine::getSize(TNode t) {
return getEqualityNode(getEqualityNode(t).getFind()).getSize();
}
-
-void EqualityEngine::addPathReconstructionTrigger(unsigned trigger, const PathReconstructionNotify* notify) {
- // Currently we can only inform one callback per trigger
- Assert(d_pathReconstructionTriggers.find(trigger)
- == d_pathReconstructionTriggers.end());
- d_pathReconstructionTriggers[trigger] = notify;
-}
-
-unsigned EqualityEngine::getFreshMergeReasonType() {
- return d_freshMergeReasonType++;
-}
-
std::string EqualityEngine::identify() const { return d_name; }
void EqualityEngine::addTriggerTerm(TNode t, TheoryId tag)
diff --git a/src/theory/uf/equality_engine.h b/src/theory/uf/equality_engine.h
index 19a10eba8..f8444965f 100644
--- a/src/theory/uf/equality_engine.h
+++ b/src/theory/uf/equality_engine.h
@@ -43,26 +43,10 @@ namespace CVC4 {
namespace theory {
namespace eq {
-
-class EqProof;
class EqClassesIterator;
class EqClassIterator;
/**
- * An interface for equality engine notifications during equality path reconstruction.
- * Can be used to add theory-specific logic for, e.g., proof construction.
- */
-class PathReconstructionNotify {
-public:
-
- virtual ~PathReconstructionNotify() {}
-
- virtual void notify(unsigned reasonType, Node reason, Node a, Node b,
- std::vector<TNode>& equalities,
- EqProof* proof) const = 0;
-};
-
-/**
* Class for keeping an incremental congruence closure over a set of terms. It provides
* notifications via an EqualityEngineNotify object.
*/
@@ -152,9 +136,6 @@ private:
/** The map of kinds with operators to be considered external (for higher-order) */
KindMap d_congruenceKindsExtOperators;
- /** Objects that need to be notified during equality path reconstruction */
- std::map<unsigned, const PathReconstructionNotify*> d_pathReconstructionTriggers;
-
/** Map from nodes to their ids */
std::unordered_map<TNode, EqualityNodeId, TNodeHashFunction> d_nodeIds;
@@ -196,9 +177,6 @@ private:
/** Memory for the use-list nodes */
std::vector<UseListNode> d_useListNodes;
- /** A fresh merge reason type to return upon request */
- unsigned d_freshMergeReasonType;
-
/**
* We keep a list of asserted equalities. Not among original terms, but
* among the class representatives.
@@ -861,16 +839,6 @@ private:
*/
bool consistent() const { return !d_done; }
- /**
- * Marks an object for merge type based notification during equality path reconstruction.
- */
- void addPathReconstructionTrigger(unsigned trigger, const PathReconstructionNotify* notify);
-
- /**
- * Returns a fresh merge reason type tag for the client to use.
- */
- unsigned getFreshMergeReasonType();
-
/** Identify this equality engine (for debugging, etc..) */
std::string identify() const;
};
diff --git a/src/theory/uf/equality_engine_types.h b/src/theory/uf/equality_engine_types.h
index 14cd80436..cceffa51d 100644
--- a/src/theory/uf/equality_engine_types.h
+++ b/src/theory/uf/equality_engine_types.h
@@ -63,20 +63,26 @@ static const EqualityEdgeId null_edge = (EqualityEdgeId)(-1);
* or a merge of an equality to false due to both sides being
* (different) constants.
*/
-enum MergeReasonType {
- /** Terms were merged due to application of congruence closure */
+enum MergeReasonType
+{
+ /** Terms were merged due to congruence */
MERGED_THROUGH_CONGRUENCE,
- /** Terms were merged due to application of pure equality */
+ /** Terms were merged due to an assumption */
MERGED_THROUGH_EQUALITY,
- /** Equality was merged to true, due to both sides of equality being in the same class */
+ /** Terms were merged due to reflexivity */
MERGED_THROUGH_REFLEXIVITY,
- /** Equality was merged to false, due to both sides of equality being a constant */
+ /** Terms were merged due to theory reasoning */
MERGED_THROUGH_CONSTANTS,
- /** (for proofs only) Equality was merged due to transitivity */
+ /** Terms were merged due to transitivity */
MERGED_THROUGH_TRANS,
-
- /** Reason types beyond this constant are theory specific reasons */
- NUMBER_OF_MERGE_REASONS
+ // TEMPORARY RULES WHILE WE DON'T MIGRATE TO PROOF_NEW
+
+ /** Terms were merged due to arrays read-over-write */
+ MERGED_THROUGH_ROW,
+ /** Terms were merged due to arrays read-over-write (1) */
+ MERGED_THROUGH_ROW1,
+ /** Terms were merged due to extensionality */
+ MERGED_THROUGH_EXT,
};
inline std::ostream& operator << (std::ostream& out, MergeReasonType reason) {
@@ -90,13 +96,13 @@ inline std::ostream& operator << (std::ostream& out, MergeReasonType reason) {
case MERGED_THROUGH_REFLEXIVITY:
out << "reflexivity";
break;
- case MERGED_THROUGH_CONSTANTS:
- out << "constants disequal";
- break;
+ case MERGED_THROUGH_CONSTANTS: out << "theory constants"; break;
case MERGED_THROUGH_TRANS:
out << "transitivity";
break;
-
+ case MERGED_THROUGH_ROW: out << "read-over-write"; break;
+ case MERGED_THROUGH_ROW1: out << "read-over-write (1)"; break;
+ case MERGED_THROUGH_EXT: out << "extensionality"; break;
default:
out << "[theory]";
break;
diff --git a/src/theory/uf/theory_uf.cpp b/src/theory/uf/theory_uf.cpp
index f94cc36af..3d90637e2 100644
--- a/src/theory/uf/theory_uf.cpp
+++ b/src/theory/uf/theory_uf.cpp
@@ -25,9 +25,6 @@
#include "options/smt_options.h"
#include "options/theory_options.h"
#include "options/uf_options.h"
-#include "proof/proof_manager.h"
-#include "proof/theory_proof.h"
-#include "proof/uf_proof.h"
#include "theory/theory_model.h"
#include "theory/type_enumerator.h"
#include "theory/uf/cardinality_extension.h"
@@ -288,39 +285,30 @@ bool TheoryUF::propagateLit(TNode literal)
return ok;
}/* TheoryUF::propagate(TNode) */
-void TheoryUF::explain(TNode literal, std::vector<TNode>& assumptions, eq::EqProof* pf) {
+void TheoryUF::explain(TNode literal, Node& exp)
+{
+ Debug("uf") << "TheoryUF::explain(" << literal << ")" << std::endl;
+ std::vector<TNode> assumptions;
// Do the work
bool polarity = literal.getKind() != kind::NOT;
TNode atom = polarity ? literal : literal[0];
- if (atom.getKind() == kind::EQUAL) {
+ if (atom.getKind() == kind::EQUAL)
+ {
d_equalityEngine->explainEquality(
- atom[0], atom[1], polarity, assumptions, pf);
- } else {
- d_equalityEngine->explainPredicate(atom, polarity, assumptions, pf);
- }
- if( pf ){
- Debug("pf::uf") << std::endl;
- pf->debug_print("pf::uf");
+ atom[0], atom[1], polarity, assumptions, nullptr);
}
-
- Debug("pf::uf") << "UF: explain( " << literal << " ):" << std::endl << "\t";
- for (unsigned i = 0; i < assumptions.size(); ++i) {
- Debug("pf::uf") << assumptions[i] << " ";
+ else
+ {
+ d_equalityEngine->explainPredicate(atom, polarity, assumptions, nullptr);
}
- Debug("pf::uf") << std::endl;
+ exp = mkAnd(assumptions);
}
TrustNode TheoryUF::explain(TNode literal)
{
- Node exp = explain(literal, NULL);
- return TrustNode::mkTrustPropExp(literal, exp, nullptr);
-}
-
-Node TheoryUF::explain(TNode literal, eq::EqProof* pf) {
- Debug("uf") << "TheoryUF::explain(" << literal << ")" << std::endl;
- std::vector<TNode> assumptions;
- explain(literal, assumptions, pf);
- return mkAnd(assumptions);
+ Node explanation;
+ explain(literal, explanation);
+ return TrustNode::mkTrustPropExp(literal, explanation, nullptr);
}
bool TheoryUF::collectModelValues(TheoryModel* m, const std::set<Node>& termSet)
@@ -662,12 +650,11 @@ void TheoryUF::computeCareGraph() {
<< std::endl;
}/* TheoryUF::computeCareGraph() */
-void TheoryUF::conflict(TNode a, TNode b) {
- std::shared_ptr<eq::EqProof> pf =
- d_proofsEnabled ? std::make_shared<eq::EqProof>() : nullptr;
- Node conf = explain(a.eqNode(b), pf.get());
- std::unique_ptr<ProofUF> puf(d_proofsEnabled ? new ProofUF(pf) : nullptr);
- d_out->conflict(conf, std::move(puf));
+void TheoryUF::conflict(TNode a, TNode b)
+{
+ Node conf;
+ explain(a.eqNode(b), conf);
+ d_out->conflict(conf);
d_state.notifyInConflict();
}
diff --git a/src/theory/uf/theory_uf.h b/src/theory/uf/theory_uf.h
index 2bfd7e16c..41f2ba9d5 100644
--- a/src/theory/uf/theory_uf.h
+++ b/src/theory/uf/theory_uf.h
@@ -112,17 +112,6 @@ private:
*/
bool propagateLit(TNode literal);
- /**
- * Explain why this literal is true by adding assumptions
- * with proof (if "pf" is non-NULL).
- */
- void explain(TNode literal, std::vector<TNode>& assumptions, eq::EqProof* pf);
-
- /**
- * Explain a literal, with proof (if "pf" is non-NULL).
- */
- Node explain(TNode literal, eq::EqProof* pf);
-
/** All the function terms that the theory has seen */
context::CDList<TNode> d_functionsTerms;
@@ -202,6 +191,9 @@ private:
CardinalityExtension* getCardinalityExtension() const { return d_thss.get(); }
private:
+ /** Explain why this literal is true by building an explanation */
+ void explain(TNode literal, Node& exp);
+
bool areCareDisequal(TNode x, TNode y);
void addCarePairs(const TNodeTrie* t1,
const TNodeTrie* t2,
generated by cgit on debian on lair
contact matthew@masot.net with questions or feedback