From c4822869beac8d4a0eac4b234e0662d3db49f995 Mon Sep 17 00:00:00 2001 From: Andrew Reynolds Date: Fri, 19 Feb 2021 09:26:36 -0600 Subject: Refactoring theory inference process (#5920) This PR refactors TheoryInference so that it is not responsible for calling back into InferenceManager, instead it sets data so that InferenceManagerBuffered has enough information to do this itself. It also makes the decision of whether to cache lemmas in theory inference manager a global choice per-theory instead of per-lemma. --- src/theory/theory_inference_manager.cpp | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) (limited to 'src/theory/theory_inference_manager.cpp') diff --git a/src/theory/theory_inference_manager.cpp b/src/theory/theory_inference_manager.cpp index a1c1545bf..53a812653 100644 --- a/src/theory/theory_inference_manager.cpp +++ b/src/theory/theory_inference_manager.cpp @@ -26,12 +26,14 @@ namespace theory { TheoryInferenceManager::TheoryInferenceManager(Theory& t, TheoryState& state, ProofNodeManager* pnm, - const std::string& name) + const std::string& name, + bool cacheLemmas) : d_theory(t), d_theoryState(state), d_out(t.getOutputChannel()), d_ee(nullptr), d_pnm(pnm), + d_cacheLemmas(cacheLemmas), d_keep(t.getSatContext()), d_lemmasSent(t.getUserContext()), d_numConflicts(0), @@ -226,21 +228,19 @@ TrustNode TheoryInferenceManager::explainConflictEqConstantMerge(TNode a, << " mkTrustedConflictEqConstantMerge"; } -bool TheoryInferenceManager::lemma(TNode lem, - InferenceId id, - LemmaProperty p, - bool doCache) +bool TheoryInferenceManager::lemma(TNode lem, InferenceId id, LemmaProperty p) { TrustNode tlem = TrustNode::mkTrustLemma(lem, nullptr); - return trustedLemma(tlem, id, p, doCache); + return trustedLemma(tlem, id, p); } bool TheoryInferenceManager::trustedLemma(const TrustNode& tlem, InferenceId id, - LemmaProperty p, - bool doCache) + LemmaProperty p) { - if (doCache) + // if the policy says to cache lemmas, check the cache and return false if + // we are a duplicate + if (d_cacheLemmas) { if (!cacheLemma(tlem.getNode(), p)) { @@ -259,13 +259,12 @@ bool TheoryInferenceManager::lemmaExp(Node conc, const std::vector& exp, const std::vector& noExplain, const std::vector& args, - LemmaProperty p, - bool doCache) + LemmaProperty p) { // make the trust node TrustNode trn = mkLemmaExp(conc, pfr, exp, noExplain, args); // send it on the output channel - return trustedLemma(trn, id, p, doCache); + return trustedLemma(trn, id, p); } TrustNode TheoryInferenceManager::mkLemmaExp(Node conc, @@ -290,13 +289,12 @@ bool TheoryInferenceManager::lemmaExp(Node conc, const std::vector& exp, const std::vector& noExplain, ProofGenerator* pg, - LemmaProperty p, - bool doCache) + LemmaProperty p) { // make the trust node TrustNode trn = mkLemmaExp(conc, exp, noExplain, pg); // send it on the output channel - return trustedLemma(trn, id, p, doCache); + return trustedLemma(trn, id, p); } TrustNode TheoryInferenceManager::mkLemmaExp(Node conc, @@ -358,7 +356,6 @@ bool TheoryInferenceManager::assertInternalFact(TNode atom, const std::vector& exp, ProofGenerator* pg) { - Assert(pg != nullptr); d_factIdStats << id; return processInternalFact(atom, pol, PfRule::ASSUME, exp, {}, pg); } -- cgit v1.2.3