summaryrefslogtreecommitdiff
path: root/src/theory/theory_inference_manager.cpp
diff options
context:
space:
mode:
authorAndrew Reynolds <andrew.j.reynolds@gmail.com>2021-02-19 09:26:36 -0600
committerGitHub <noreply@github.com>2021-02-19 09:26:36 -0600
commitc4822869beac8d4a0eac4b234e0662d3db49f995 (patch)
treed34b86c54b0ac8de6df4734e1e76afa5f43d5efb /src/theory/theory_inference_manager.cpp
parent00479d03cdeac3e864a1930dddb16c71c5bf2ce9 (diff)
Refactoring theory inference process (#5920)
This PR refactors TheoryInference so that it is not responsible for calling back into InferenceManager, instead it sets data so that InferenceManagerBuffered has enough information to do this itself. It also makes the decision of whether to cache lemmas in theory inference manager a global choice per-theory instead of per-lemma.
Diffstat (limited to 'src/theory/theory_inference_manager.cpp')
-rw-r--r--src/theory/theory_inference_manager.cpp29
1 files changed, 13 insertions, 16 deletions
diff --git a/src/theory/theory_inference_manager.cpp b/src/theory/theory_inference_manager.cpp
index a1c1545bf..53a812653 100644
--- a/src/theory/theory_inference_manager.cpp
+++ b/src/theory/theory_inference_manager.cpp
@@ -26,12 +26,14 @@ namespace theory {
TheoryInferenceManager::TheoryInferenceManager(Theory& t,
TheoryState& state,
ProofNodeManager* pnm,
- const std::string& name)
+ const std::string& name,
+ bool cacheLemmas)
: d_theory(t),
d_theoryState(state),
d_out(t.getOutputChannel()),
d_ee(nullptr),
d_pnm(pnm),
+ d_cacheLemmas(cacheLemmas),
d_keep(t.getSatContext()),
d_lemmasSent(t.getUserContext()),
d_numConflicts(0),
@@ -226,21 +228,19 @@ TrustNode TheoryInferenceManager::explainConflictEqConstantMerge(TNode a,
<< " mkTrustedConflictEqConstantMerge";
}
-bool TheoryInferenceManager::lemma(TNode lem,
- InferenceId id,
- LemmaProperty p,
- bool doCache)
+bool TheoryInferenceManager::lemma(TNode lem, InferenceId id, LemmaProperty p)
{
TrustNode tlem = TrustNode::mkTrustLemma(lem, nullptr);
- return trustedLemma(tlem, id, p, doCache);
+ return trustedLemma(tlem, id, p);
}
bool TheoryInferenceManager::trustedLemma(const TrustNode& tlem,
InferenceId id,
- LemmaProperty p,
- bool doCache)
+ LemmaProperty p)
{
- if (doCache)
+ // if the policy says to cache lemmas, check the cache and return false if
+ // we are a duplicate
+ if (d_cacheLemmas)
{
if (!cacheLemma(tlem.getNode(), p))
{
@@ -259,13 +259,12 @@ bool TheoryInferenceManager::lemmaExp(Node conc,
const std::vector<Node>& exp,
const std::vector<Node>& noExplain,
const std::vector<Node>& args,
- LemmaProperty p,
- bool doCache)
+ LemmaProperty p)
{
// make the trust node
TrustNode trn = mkLemmaExp(conc, pfr, exp, noExplain, args);
// send it on the output channel
- return trustedLemma(trn, id, p, doCache);
+ return trustedLemma(trn, id, p);
}
TrustNode TheoryInferenceManager::mkLemmaExp(Node conc,
@@ -290,13 +289,12 @@ bool TheoryInferenceManager::lemmaExp(Node conc,
const std::vector<Node>& exp,
const std::vector<Node>& noExplain,
ProofGenerator* pg,
- LemmaProperty p,
- bool doCache)
+ LemmaProperty p)
{
// make the trust node
TrustNode trn = mkLemmaExp(conc, exp, noExplain, pg);
// send it on the output channel
- return trustedLemma(trn, id, p, doCache);
+ return trustedLemma(trn, id, p);
}
TrustNode TheoryInferenceManager::mkLemmaExp(Node conc,
@@ -358,7 +356,6 @@ bool TheoryInferenceManager::assertInternalFact(TNode atom,
const std::vector<Node>& exp,
ProofGenerator* pg)
{
- Assert(pg != nullptr);
d_factIdStats << id;
return processInternalFact(atom, pol, PfRule::ASSUME, exp, {}, pg);
}
generated by cgit on debian on lair
contact matthew@masot.net with questions or feedback