summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMathias Preiner <mathias.preiner@gmail.com>2021-09-30 10:20:04 -0700
committerGitHub <noreply@github.com>2021-09-30 17:20:04 +0000
commit5a824c9e67789a529e34b7e2a7229986412bf979 (patch)
tree94391068e0b6f074937015aae86d67f258f5938a
parent720e3afc907f429cf8105ee49b3628ddaacbf7a3 (diff)
Properly cache assertions in static learning preprocessing pass. (#7242)
The cache is user-context dependent and guarantees that ppStaticLearn is only called once per assertion in a given context - if assertions are popped from the context and added again ppStaticLearn is called again on that assertion. Further, the preprocessing pass now also handles the splitting of top-level AND terms. Before that each theory was responsible for splitting and caching. Refactoring the corresponding ppStaticLearn theory methods will be part of future PRs. Marking do-not-merge until cluster runs finished.
-rw-r--r--src/preprocessing/passes/static_learning.cpp52
-rw-r--r--src/preprocessing/passes/static_learning.h8
2 files changed, 55 insertions, 5 deletions
diff --git a/src/preprocessing/passes/static_learning.cpp b/src/preprocessing/passes/static_learning.cpp
index 24d25e354..e55a8c001 100644
--- a/src/preprocessing/passes/static_learning.cpp
+++ b/src/preprocessing/passes/static_learning.cpp
@@ -28,19 +28,37 @@ namespace preprocessing {
namespace passes {
StaticLearning::StaticLearning(PreprocessingPassContext* preprocContext)
- : PreprocessingPass(preprocContext, "static-learning"){};
+ : PreprocessingPass(preprocContext, "static-learning"),
+ d_cache(userContext()){};
PreprocessingPassResult StaticLearning::applyInternal(
AssertionPipeline* assertionsToPreprocess)
{
d_preprocContext->spendResource(Resource::PreprocessStep);
- for (unsigned i = 0; i < assertionsToPreprocess->size(); ++i)
+ std::vector<TNode> toProcess;
+
+ for (size_t i = 0, size = assertionsToPreprocess->size(); i < size; ++i)
{
+ const Node& n = (*assertionsToPreprocess)[i];
+
+ /* Already processed in this context. */
+ if (d_cache.find(n) != d_cache.end())
+ {
+ continue;
+ }
+
NodeBuilder learned(kind::AND);
- learned << (*assertionsToPreprocess)[i];
- d_preprocContext->getTheoryEngine()->ppStaticLearn(
- (*assertionsToPreprocess)[i], learned);
+ learned << n;
+
+ /* Process all assertions in nested AND terms. */
+ std::vector<TNode> assertions;
+ flattenAnd(n, assertions);
+ for (TNode a : assertions)
+ {
+ d_preprocContext->getTheoryEngine()->ppStaticLearn(a, learned);
+ }
+
if (learned.getNumChildren() == 1)
{
learned.clear();
@@ -53,6 +71,30 @@ PreprocessingPassResult StaticLearning::applyInternal(
return PreprocessingPassResult::NO_CONFLICT;
}
+void StaticLearning::flattenAnd(TNode node, std::vector<TNode>& children)
+{
+ std::vector<TNode> visit = {node};
+ do
+ {
+ TNode cur = visit.back();
+ visit.pop_back();
+
+ if (d_cache.find(cur) != d_cache.end())
+ {
+ continue;
+ }
+ d_cache.insert(cur);
+
+ if (cur.getKind() == kind::AND)
+ {
+ visit.insert(visit.end(), cur.begin(), cur.end());
+ }
+ else
+ {
+ children.push_back(cur);
+ }
+ } while (!visit.empty());
+}
} // namespace passes
} // namespace preprocessing
diff --git a/src/preprocessing/passes/static_learning.h b/src/preprocessing/passes/static_learning.h
index ac9ab6251..b44c2c86f 100644
--- a/src/preprocessing/passes/static_learning.h
+++ b/src/preprocessing/passes/static_learning.h
@@ -18,6 +18,7 @@
#ifndef CVC5__PREPROCESSING__PASSES__STATIC_LEARNING_H
#define CVC5__PREPROCESSING__PASSES__STATIC_LEARNING_H
+#include "context/cdhashset.h"
#include "preprocessing/preprocessing_pass.h"
namespace cvc5 {
@@ -32,6 +33,13 @@ class StaticLearning : public PreprocessingPass
protected:
PreprocessingPassResult applyInternal(
AssertionPipeline* assertionsToPreprocess) override;
+
+ private:
+ /** Collect children of flattened AND term. */
+ void flattenAnd(TNode node, std::vector<TNode>& children);
+
+ /** CD-cache for visiting nodes used by `flattenAnd`. */
+ context::CDHashSet<Node> d_cache;
};
} // namespace passes
generated by cgit on debian on lair
contact matthew@masot.net with questions or feedback