summaryrefslogtreecommitdiff
path: root/src/theory/quantifiers
diff options
context:
space:
mode:
authorajreynol <andrew.j.reynolds@gmail.com>2018-05-29 09:50:22 -0500
committerajreynol <andrew.j.reynolds@gmail.com>2018-05-29 09:50:22 -0500
commit61a051bb70d3a5a5e43e1402ab96a5a8b2cf0e5d (patch)
treee97f4f736118e31f8455ef71e60947f8235ff54d /src/theory/quantifiers
parent6059866b361d0852d0b70d484b0cb397f3cc5bf4 (diff)
Initialize with extRewBv branch.
Diffstat (limited to 'src/theory/quantifiers')
-rw-r--r--src/theory/quantifiers/extended_rewrite.cpp1527
-rw-r--r--src/theory/quantifiers/extended_rewrite.h79
2 files changed, 1604 insertions, 2 deletions
diff --git a/src/theory/quantifiers/extended_rewrite.cpp b/src/theory/quantifiers/extended_rewrite.cpp
index 98a493423..1a3574990 100644
--- a/src/theory/quantifiers/extended_rewrite.cpp
+++ b/src/theory/quantifiers/extended_rewrite.cpp
@@ -36,6 +36,7 @@ typedef expr::Attribute<ExtRewriteAttributeId, Node> ExtRewriteAttribute;
ExtendedRewriter::ExtendedRewriter(bool aggr) : d_aggr(aggr)
{
}
+
void ExtendedRewriter::setCache(Node n, Node ret)
{
ExtRewriteAttribute era;
@@ -93,6 +94,7 @@ Node ExtendedRewriter::extendedRewrite(Node n)
if (!pre_new_ret.isNull())
{
ret = extendedRewrite(pre_new_ret);
+
Trace("q-ext-rewrite-debug") << "...ext-pre-rewrite : " << n << " -> "
<< pre_new_ret << std::endl;
setCache(n, ret);
@@ -201,6 +203,10 @@ Node ExtendedRewriter::extendedRewrite(Node n)
{
new_ret = extendedRewriteArith(atom, pol);
}
+ else if (tid == THEORY_BV)
+ {
+ new_ret = extendedRewriteBv(atom, pol);
+ }
// add back negation if not processed
if (!pol && !new_ret.isNull())
{
@@ -1058,9 +1064,30 @@ Node ExtendedRewriter::partialSubstitute(Node n,
Node ExtendedRewriter::solveEquality(Node n)
{
- // TODO (#1706) : implement
Assert(n.getKind() == EQUAL);
+ if (n[0].getType().isBitVector())
+ {
+ for (unsigned i = 0; i < 2; i++)
+ {
+ if (isConstBv(n[1 - i], false))
+ {
+ // (bvadd x (bvneg y)) = 0 ---> x = y
+ if (n[i].getKind() == BITVECTOR_PLUS && n[i].getNumChildren() == 2)
+ {
+ for (unsigned j = 0; j < 2; j++)
+ {
+ if (n[i][j].isVar() && n[i][1 - j].getKind() == BITVECTOR_NEG
+ && n[i][1 - j][0].isVar())
+ {
+ return n[i][j].eqNode(n[i][1 - j][0]);
+ }
+ }
+ }
+ }
+ }
+ }
+
return Node::null();
}
@@ -1097,7 +1124,7 @@ bool ExtendedRewriter::inferSubstitution(Node n,
r2 = n[1 - i];
if (v[i] != n[i])
{
- Assert( TermUtil::isNegate( n[i].getKind() ) );
+ Assert(TermUtil::isNegate(n[i].getKind()));
r2 = TermUtil::mkNegate(n[i].getKind(), r2);
}
// TODO (#1706) : union find
@@ -1148,6 +1175,1502 @@ Node ExtendedRewriter::extendedRewriteArith(Node ret, bool& pol)
return new_ret;
}
+Node ExtendedRewriter::extendedRewriteBv(Node ret, bool& pol)
+{
+ if (Trace.isOn("q-ext-rewrite-bv"))
+ {
+ Trace("q-ext-rewrite-bv") << "Extended rewrite bv : ";
+ if (!pol)
+ {
+ Trace("q-ext-rewrite-bv") << "(not) ";
+ }
+ Trace("q-ext-rewrite-bv") << ret << std::endl;
+ }
+ Kind k = ret.getKind();
+ NodeManager* nm = NodeManager::currentNM();
+ Node new_ret;
+ if (k == EQUAL)
+ {
+ // splicing
+ if (ret[0].getKind() == BITVECTOR_CONCAT
+ || ret[1].getKind() == BITVECTOR_CONCAT)
+ {
+ // get spliced forms
+ std::vector<Node> v1;
+ std::vector<Node> v2;
+ spliceBv(ret[0], ret[1], v1, v2);
+ Assert(v1.size() > 1);
+ Assert(v1.size() == v2.size());
+ unsigned nconst_count = 0;
+ int nconst_index = -1;
+ for (unsigned i = 0, size = v1.size(); i < size; i++)
+ {
+ Node eeq = v1[i].eqNode(v2[i]);
+ eeq = Rewriter::rewrite(eeq);
+ if (eeq.isConst())
+ {
+ if (!eeq.getConst<bool>())
+ {
+ new_ret = eeq;
+ debugExtendedRewrite(ret, new_ret, "CONCAT eq false");
+ return new_ret;
+ }
+ }
+ else
+ {
+ nconst_count++;
+ nconst_index = i;
+ }
+ }
+ if (nconst_count == 1)
+ {
+ new_ret = v1[nconst_index].eqNode(v2[nconst_index]);
+ debugExtendedRewrite(ret, new_ret, "CONCAT eq true");
+ return new_ret;
+ }
+ }
+
+ Assert(ret[0].getType().isBitVector());
+ bool hasZero = false;
+ bool hasConst = false;
+ unsigned cindex = 0;
+ for (unsigned i = 0; i < 2; i++)
+ {
+ if (ret[i].isConst())
+ {
+ hasConst = true;
+ cindex = i;
+ if (isConstBv(ret[i], false))
+ {
+ hasZero = true;
+ break;
+ }
+ }
+ }
+ if (hasConst)
+ {
+ // these can be flipped
+ Kind nck = ret[1 - cindex].getKind();
+ if (nck == BITVECTOR_NOT || nck == BITVECTOR_NEG)
+ {
+ new_ret = ret[1 - cindex][0].eqNode(nm->mkNode(nck, ret[cindex]));
+ debugExtendedRewrite(ret, new_ret, "EQUAL const");
+ return new_ret;
+ }
+ }
+ new_ret = ret;
+ Node bv_zero = mkConstBv(ret[0], false);
+ for (unsigned i = 0; i < 2; i++)
+ {
+ if (ret[1 - i] != bv_zero)
+ {
+ Node slv_ret = TermUtil::mkNegate(BITVECTOR_NEG, ret[1 - i]);
+ if (ret[i] != bv_zero)
+ {
+ slv_ret = nm->mkNode(BITVECTOR_PLUS, ret[i], slv_ret);
+ }
+ // FIXME : avoid this call?
+ slv_ret = extendedRewrite(slv_ret);
+ slv_ret = slv_ret.eqNode(bv_zero);
+ slv_ret = Rewriter::rewrite(slv_ret);
+ if (slv_ret.isConst())
+ {
+ new_ret = slv_ret;
+ break;
+ }
+ if (d_aggr)
+ {
+ if ((!hasZero && i == 0) || slv_ret < new_ret)
+ {
+ new_ret = slv_ret;
+ }
+ }
+ }
+ }
+ if (new_ret == ret)
+ {
+ new_ret = Node::null();
+ }
+ debugExtendedRewrite(ret, new_ret, "BV-eq-solve");
+ }
+ else if (k == BITVECTOR_AND || k == BITVECTOR_OR)
+ {
+ new_ret = rewriteBvBool(ret);
+ }
+ else if (k == BITVECTOR_XOR)
+ {
+ new_ret = extendedRewriteEqChain(
+ BITVECTOR_XOR, BITVECTOR_AND, BITVECTOR_OR, BITVECTOR_NOT, ret, true);
+ debugExtendedRewrite(ret, new_ret, "XOR chain simplify");
+ }
+ else if (k == BITVECTOR_ULT || k == BITVECTOR_SLT)
+ {
+ Node zero = mkConstBv(ret[0], false);
+ if (ret[0] == TermUtil::mkNegate(BITVECTOR_NEG, ret[1]))
+ {
+ // t <_u -t ---> 0 <_s t
+ // t <_s -t ---> 0 <_s -t
+ new_ret =
+ nm->mkNode(BITVECTOR_SLT, zero, ret[k == BITVECTOR_ULT ? 0 : 1]);
+ debugExtendedRewrite(ret, new_ret, "Ineq-self-neg");
+ return new_ret;
+ }
+ else if (ret[0] == TermUtil::mkNegate(BITVECTOR_NOT, ret[1]))
+ {
+ // t <_u ~t ---> ~( t <_s 0 )
+ // t <_s ~t ---> ( t <_s 0 )
+ new_ret = nm->mkNode(BITVECTOR_SLT, ret[0], zero);
+ if (k == BITVECTOR_ULT)
+ {
+ new_ret = TermUtil::mkNegate(NOT, new_ret);
+ }
+ debugExtendedRewrite(ret, new_ret, "Ineq-self-not");
+ return new_ret;
+ }
+ if (k == BITVECTOR_ULT)
+ {
+ if (bitVectorArithComp(ret[0], ret[1]))
+ {
+ new_ret = nm->mkConst(false);
+ debugExtendedRewrite(ret, new_ret, "ULT-arith");
+ return new_ret;
+ }
+ if (d_aggr)
+ {
+ if (ret[0] == zero || isConstBv(ret[1], true))
+ {
+ new_ret = ret[0].eqNode(ret[1]);
+ new_ret = new_ret.negate();
+ debugExtendedRewrite(ret, new_ret, "ULT-neq");
+ }
+ }
+ }
+ else // k === BITVECTOR_SLT
+ {
+ if (ret[0] == ret[1])
+ {
+ new_ret = nm->mkConst(false);
+ debugExtendedRewrite(ret, new_ret, "SLT-id");
+ }
+ }
+ }
+ else if (k == BITVECTOR_LSHR || k == BITVECTOR_SHL)
+ {
+ new_ret = rewriteBvShift(ret);
+ }
+ else if (k == BITVECTOR_PLUS || k == BITVECTOR_MULT)
+ {
+ new_ret = rewriteBvArith(ret);
+ }
+ else if (k == BITVECTOR_NEG)
+ {
+ Kind ck = ret[0].getKind();
+ // miniscope
+ if (ck == BITVECTOR_SHL)
+ {
+ new_ret = nm->mkNode(BITVECTOR_SHL,
+ TermUtil::mkNegate(BITVECTOR_NEG, ret[0][0]),
+ ret[0][1]);
+ debugExtendedRewrite(ret, new_ret, "NEG-SHL-miniscope");
+ }
+ else if (ck == BITVECTOR_NOT)
+ {
+ // this should be handled by NOT-plus-miniscope below
+ Assert(ret[0][0].getKind() != BITVECTOR_PLUS
+ || !hasConstBvChild(ret[0][0]));
+ }
+ else if (ck == BITVECTOR_AND || ck == BITVECTOR_OR)
+ {
+ if (ret[0].getNumChildren() == 2)
+ {
+ std::vector<Node> children;
+ for (const Node& cn : ret[0])
+ {
+ children.push_back(cn);
+ }
+ Node cplus = nm->mkNode(BITVECTOR_PLUS, children);
+ cplus = Rewriter::rewrite(cplus);
+ if (isConstBv(cplus, false))
+ {
+ // if x+y=0, then
+ // -( x and y ) ---> ( x or y )
+ // -( x or y ) ---> ( x and y )
+ new_ret = nm->mkNode(
+ ck == BITVECTOR_AND ? BITVECTOR_OR : BITVECTOR_AND, children);
+ debugExtendedRewrite(ret, new_ret, "NEG-AND/OR-zero-miniscope");
+ }
+ }
+ }
+ else
+ {
+ new_ret = normalizeBvMonomial(ret);
+ debugExtendedRewrite(ret, new_ret, "NEG-mnormalize");
+ }
+ }
+ else if (k == BITVECTOR_NOT)
+ {
+ // ~( x+y ) ----> -(x+y)-1
+ Kind ck = ret[0].getKind();
+ if (ck == BITVECTOR_PLUS && hasConstBvChild(ret[0]))
+ {
+ Node max_bv = mkConstBv(ret[0], true);
+ Node c = TermUtil::mkNegate(BITVECTOR_NEG, ret[0]);
+ new_ret = nm->mkNode(BITVECTOR_PLUS, c, max_bv);
+ debugExtendedRewrite(ret, new_ret, "NOT-plus-miniscope");
+ }
+
+ // NNF
+ Kind nnfk = UNDEFINED_KIND;
+ bool neg_ch = true;
+ bool neg_ch_1 = false;
+ if (ck == BITVECTOR_CONCAT)
+ {
+ nnfk = ck;
+ }
+ else if (ck == BITVECTOR_AND)
+ {
+ nnfk = BITVECTOR_OR;
+ }
+ else if (ck == BITVECTOR_OR)
+ {
+ nnfk = BITVECTOR_AND;
+ }
+ else if (ck == BITVECTOR_XOR)
+ {
+ neg_ch_1 = true;
+ nnfk = ck;
+ }
+ if (nnfk != UNDEFINED_KIND)
+ {
+ std::vector<Node> nnfc;
+ if (ret[0].getMetaKind() == kind::metakind::PARAMETERIZED)
+ {
+ nnfc.push_back(ret[0].getOperator());
+ }
+ for (unsigned i = 0, size = ret[0].getNumChildren(); i < size; i++)
+ {
+ Node c = ret[0][i];
+ c = (i == 0 ? neg_ch_1 : false) != neg_ch
+ ? TermUtil::mkNegate(BITVECTOR_NOT, c)
+ : c;
+ nnfc.push_back(c);
+ }
+ new_ret = nm->mkNode(nnfk, nnfc);
+ debugExtendedRewrite(ret, new_ret, "NNF bv");
+ }
+ }
+ else if (k == BITVECTOR_CONCAT)
+ {
+ new_ret = normalizeBvMonomial(ret);
+ debugExtendedRewrite(ret, new_ret, "CONCAT-mnormalize");
+ }
+ else if (k == BITVECTOR_EXTRACT)
+ {
+ // miniscoping
+ if (ret[0].getKind() == ITE)
+ {
+ std::vector<Node> new_children;
+ for (unsigned i = 0; i < 3; i++)
+ {
+ Node c = i == 0 ? ret[0][i] : nm->mkNode(ret.getOperator(), ret[0][i]);
+ new_children.push_back(c);
+ }
+ new_ret = nm->mkNode(ITE, new_children);
+ debugExtendedRewrite(ret, new_ret, "EXTRACT-miniscope");
+ }
+ else if (ret[0].getKind() == BITVECTOR_NEG)
+ {
+ if (bv::utils::getExtractHigh(ret) == 0
+ && bv::utils::getExtractLow(ret) == 0)
+ {
+ new_ret = nm->mkNode(ret.getOperator(), ret[0][0]);
+ debugExtendedRewrite(ret, new_ret, "EXTRACT-NEG-0bit");
+ }
+ }
+ }
+
+ if (!new_ret.isNull())
+ {
+ return new_ret;
+ }
+
+ // all kinds k that are bitwise go here
+ // k is bitwise if <k>( x, y )[n:m] = <k>( x[n:m], y[n:m] )
+ if (k == BITVECTOR_AND || k == BITVECTOR_OR || k == BITVECTOR_XOR)
+ {
+ // concat child pulling
+ unsigned nchild = ret.getNumChildren();
+ std::vector<Node> children;
+ children.resize(nchild);
+ for (unsigned i = 0; i < nchild; i++)
+ {
+ if (ret[i].getKind() == BITVECTOR_CONCAT)
+ {
+ Trace("q-ext-rewrite-debug")
+ << "Try concat pull " << i << " of " << ret << std::endl;
+ std::vector<Node> new_children;
+ int copy_count = 0;
+ unsigned last = bv::utils::getSize(ret[i]) - 1;
+ for (unsigned j = 0, sizei = ret[i].getNumChildren(); j < sizei; j++)
+ {
+ unsigned csize = bv::utils::getSize(ret[i][j]);
+ Assert((last + 1) >= csize);
+ Node eop = nm->mkConst<BitVectorExtract>(
+ BitVectorExtract(last, (last + 1) - csize));
+ for (unsigned l = 0; l < nchild; l++)
+ {
+ children[l] = l == i ? ret[i][j] : nm->mkNode(eop, ret[l]);
+ }
+ Node cc = nm->mkNode(k, children);
+ cc = extendedRewrite(cc);
+ // check if cc copies children of ret
+ if (!cc.isConst() && cc != ret[i][j])
+ {
+ Trace("q-ext-rewrite-debug") << "...copy : " << cc << std::endl;
+ copy_count++;
+ if (copy_count > 1)
+ {
+ // do not permit more than one copy
+ break;
+ }
+ }
+ new_children.push_back(cc);
+ // decrement last if not the last child
+ if ((j + 1) < sizei)
+ {
+ Assert(last >= csize);
+ last = last - csize;
+ }
+ }
+ if (copy_count <= 1)
+ {
+ new_ret = nm->mkNode(BITVECTOR_CONCAT, new_children);
+ debugExtendedRewrite(ret, new_ret, "CONCAT pull");
+ return new_ret;
+ }
+ Trace("q-ext-rewrite-debug")
+ << "Failed concat pull " << i << " of " << ret << std::endl;
+ }
+ }
+ }
+
+ return new_ret;
+}
+
+Node ExtendedRewriter::rewriteBvArith(Node ret)
+{
+ Trace("q-ext-rewrite-debug") << "Rewrite bv arith " << ret << std::endl;
+ Kind k = ret.getKind();
+ Assert(k == BITVECTOR_PLUS || k == BITVECTOR_MULT);
+ NodeManager* nm = NodeManager::currentNM();
+ Node new_ret;
+ unsigned size = bv::utils::getSize(ret);
+ Node bv_one = bv::utils::mkOne(size);
+ Node bv_neg_one = bv::utils::mkOnes(size);
+ if (d_aggr)
+ {
+ if (k == BITVECTOR_PLUS)
+ {
+ std::vector<Node> nconst;
+ Node cc = getConstBvChild(ret, nconst);
+ if (!cc.isNull() && (cc == bv_one || cc == bv_neg_one))
+ {
+ Node rem = nconst.size() == 1 ? nconst[0] : nm->mkNode(k, nconst);
+ if (cc == bv_one)
+ {
+ // x+1 ---> -( ~x )
+ new_ret =
+ nm->mkNode(BITVECTOR_NEG, TermUtil::mkNegate(BITVECTOR_NOT, rem));
+ }
+ else if (cc == bv_neg_one)
+ {
+ // x-1 ---> ~( -x )
+ new_ret =
+ nm->mkNode(BITVECTOR_NOT, TermUtil::mkNegate(BITVECTOR_NEG, rem));
+ }
+ }
+ if (!new_ret.isNull())
+ {
+ debugExtendedRewrite(ret, new_ret, "arith-plus-elim");
+ return new_ret;
+ }
+ }
+ }
+
+ std::vector<Node> rchildren;
+ bool rchildrenChanged = false;
+ for (const Node& rc : ret)
+ {
+ bool isNeg = rc.getKind() == BITVECTOR_NEG;
+ Node rcc = isNeg ? rc[0] : rc;
+ if (rcc.getKind() == BITVECTOR_NOT)
+ {
+ // ~x----> -(x+1)
+ rcc = nm->mkNode(BITVECTOR_PLUS, rcc[0], bv_one);
+ isNeg = !isNeg;
+ rchildrenChanged = true;
+ }
+ if (isNeg)
+ {
+ // negate it
+ rcc = TermUtil::mkNegate(BITVECTOR_NEG, rcc);
+ }
+ rchildren.push_back(rcc);
+ }
+ if (rchildrenChanged)
+ {
+ new_ret = nm->mkNode(k, rchildren);
+ debugExtendedRewrite(ret, new_ret, "arith-not-child");
+ return new_ret;
+ }
+
+ // general monomial normalization
+ new_ret = normalizeBvMonomial(ret);
+ if (!new_ret.isNull())
+ {
+ debugExtendedRewrite(ret, new_ret, "arith-mnormalize");
+ return new_ret;
+ }
+
+ if (k == BITVECTOR_PLUS)
+ {
+ unsigned size = ret.getNumChildren();
+ for (unsigned i = 0; i < size; i++)
+ {
+ for (unsigned j = (i + 1); j < size; j++)
+ {
+ if (bitVectorDisjoint(ret[i], ret[j]))
+ {
+ // if we prove disjointness, PLUS turns into OR
+ // ( ( x ^ y ) ---> 0 ) => ( x+y ----> x V y )
+ std::vector<Node> children;
+ children.push_back(nm->mkNode(BITVECTOR_OR, ret[i], ret[j]));
+ for (unsigned k = 0; k < size; k++)
+ {
+ if (k != i && k != j)
+ {
+ children.push_back(ret[k]);
+ }
+ }
+ new_ret = children.size() == 1 ? children[0]
+ : nm->mkNode(BITVECTOR_PLUS, children);
+ debugExtendedRewrite(ret, new_ret, "PLUS-disjoint");
+ return new_ret;
+ }
+ }
+ }
+
+ // cancelling of AND/OR children, handles these cases:
+ // s - ( s & t ) ----> s & ~t
+ // s - ( s | t ) ----> -( ~s & t )
+ // ( s & t ) - s ----> -( s & ~t )
+ // ( s | t ) - s ----> ~s & t
+
+ // TODO :
+ // s + ( ~s | t ) = ~( -( s & t ) )
+ // s + ( t | -s ) = ( s & ~-t )
+
+ std::map<Node, bool> retc;
+ for (const Node& rc : ret)
+ {
+ bool pol = rc.getKind() != BITVECTOR_NEG;
+ Node rca = rc.getKind() == BITVECTOR_NEG ? rc[0] : rc;
+ Assert(rca.getKind() != BITVECTOR_NOT);
+ Assert(retc.find(rca) == retc.end());
+ retc[rca] = pol;
+ }
+ for (std::pair<const Node, bool>& rcp : retc)
+ {
+ // does AND/OR occur as child of PLUS?
+ Node rc = rcp.first;
+ Kind rck = rc.getKind();
+ if (rck == BITVECTOR_AND || rck == BITVECTOR_OR)
+ {
+ bool rcpol = rcp.second;
+ // is there a child that cancels?
+ unsigned nchild = rc.getNumChildren();
+ for (unsigned j = 0; j < nchild; j++)
+ {
+ Node rcc = rc[j];
+ // check if it occurs under ret
+ std::map<Node, bool>::iterator itr = retc.find(rcc);
+ if (itr != retc.end())
+ {
+ // with opposite polarity?
+ if (rcpol != itr->second)
+ {
+ // collect remainder
+ std::vector<Node> new_children;
+ for (unsigned k = 0; k < nchild; k++)
+ {
+ if (k != j)
+ {
+ new_children.push_back(rc[k]);
+ }
+ }
+ Node nc[2];
+ nc[0] = rc[j];
+ nc[1] = new_children.size() == 1 ? new_children[0]
+ : nm->mkNode(rck, new_children);
+ // determine the index to negate
+ unsigned nindex = rck == BITVECTOR_AND ? 1 : 0;
+ nc[nindex] = TermUtil::mkNegate(BITVECTOR_NOT, nc[nindex]);
+ // combine the children
+ Node new_c = nm->mkNode(BITVECTOR_AND, nc[0], nc[1]);
+ new_c = rcpol == (rck == BITVECTOR_AND)
+ ? TermUtil::mkNegate(BITVECTOR_NEG, new_c)
+ : new_c;
+ retc.erase(rc);
+ retc.erase(rcc);
+ std::vector<Node> sum;
+ for (std::pair<const Node, bool>& rcnp : retc)
+ {
+ Node rcn = rcnp.first;
+ Node rcnn =
+ rcnp.second ? rcn : TermUtil::mkNegate(BITVECTOR_NEG, rcn);
+ sum.push_back(rcnn);
+ }
+ sum.push_back(new_c);
+ new_ret =
+ sum.size() == 1 ? sum[0] : nm->mkNode(BITVECTOR_PLUS, sum);
+ debugExtendedRewrite(ret, new_ret, "PLUS-AND/OR cancel");
+ return new_ret;
+ }
+ }
+ }
+ }
+ }
+ }
+ return Node::null();
+}
+
+Node ExtendedRewriter::rewriteBvShift(Node ret)
+{
+ Kind k = ret.getKind();
+ Assert(k == BITVECTOR_LSHR || k == BITVECTOR_SHL);
+ NodeManager* nm = NodeManager::currentNM();
+ Node new_ret;
+
+ std::vector<Node> cchildren;
+ Node base = decomposeRightAssocChain(k, ret, cchildren);
+
+ if (k == BITVECTOR_LSHR)
+ {
+ std::vector<Node> bchildren;
+ Kind bk = base.getKind();
+ if (bk == BITVECTOR_OR || bk == BITVECTOR_XOR)
+ {
+ for (const Node& cr : base)
+ {
+ bchildren.push_back(cr);
+ }
+ }
+ else
+ {
+ bchildren.push_back(base);
+ }
+ std::vector<Node> bctemp;
+ bctemp.insert(bctemp.end(), bchildren.begin(), bchildren.end());
+ bchildren.clear();
+ bool childChanged = false;
+ for (const Node& bc : bctemp)
+ {
+ // take into account NOT
+ Node bca = bc.getKind() == BITVECTOR_NOT ? bc[0] : bc;
+ bool shifted = false;
+ for (const Node& cc : cchildren)
+ {
+ if (bitVectorArithComp(cc, bca))
+ {
+ shifted = true;
+ break;
+ }
+ }
+ // we are not able to shift away its 1-bits
+ if (shifted)
+ {
+ // rewrite rule #20
+ // if we shifted it away, then it might has well have been all 0's.
+ Node const_bv = mkConstBv(bc, bc.getKind() == BITVECTOR_NOT);
+ bchildren.push_back(const_bv);
+ childChanged = true;
+ }
+ else
+ {
+ bchildren.push_back(bc);
+ }
+ }
+ if (childChanged)
+ {
+ base = bchildren.size() == 1 ? bchildren[0] : nm->mkNode(bk, bchildren);
+ }
+ }
+ new_ret = mkRightAssocChain(k, base, cchildren);
+ if (new_ret != ret)
+ {
+ debugExtendedRewrite(ret, new_ret, "shift-sort-arith");
+ return new_ret;
+ }
+
+ if (k == BITVECTOR_SHL)
+ {
+ new_ret = normalizeBvMonomial(ret);
+ if (!new_ret.isNull())
+ {
+ debugExtendedRewrite(ret, new_ret, "SHL-mnormalize");
+ return new_ret;
+ }
+ }
+
+ return Node::null();
+}
+
+Node ExtendedRewriter::rewriteBvBool(Node ret)
+{
+ Kind k = ret.getKind();
+ Assert(k == BITVECTOR_AND || k == BITVECTOR_OR);
+ NodeManager* nm = NodeManager::currentNM();
+ Node new_ret;
+
+ bool isAnd = (k == BITVECTOR_AND);
+ std::unordered_set<unsigned> drops;
+ std::vector<Node> children;
+ unsigned size = ret.getNumChildren();
+ for (unsigned i = 0; i < size; i++)
+ {
+ Node cmpi = isAnd ? ret[i] : TermUtil::mkNegate(BITVECTOR_NOT, ret[i]);
+ bool success = true;
+ for (unsigned j = 0; j < size; j++)
+ {
+ if (i != j && drops.find(j) == drops.end())
+ {
+ if (bitVectorSubsume(ret[isAnd ? i : j], ret[isAnd ? j : i]) != 0)
+ {
+ drops.insert(i);
+ success = false;
+ }
+ Node cmpj = isAnd ? ret[j] : TermUtil::mkNegate(BITVECTOR_NOT, ret[j]);
+ if (i < j && bitVectorDisjoint(cmpi, cmpj))
+ {
+ new_ret = mkConstBv(ret, !isAnd);
+ debugExtendedRewrite(ret, new_ret, "AND/OR-disjoint");
+ return new_ret;
+ }
+ }
+ }
+ if (success)
+ {
+ children.push_back(ret[i]);
+ }
+ }
+ Assert(!children.empty());
+ if (children.size() < size)
+ {
+ new_ret = children.size() == 1 ? children[0] : nm->mkNode(k, children);
+ debugExtendedRewrite(ret, new_ret, "AND/OR subsume");
+ return new_ret;
+ }
+
+ // Boolean constraint propagation
+ if (d_aggr)
+ {
+ // specify legal BCP kinds
+ std::map<Kind, bool> bcp_kinds;
+ bcp_kinds[BITVECTOR_AND] = true;
+ bcp_kinds[BITVECTOR_OR] = true;
+ bcp_kinds[BITVECTOR_NOT] = true;
+ bcp_kinds[BITVECTOR_XOR] = true;
+ new_ret = extendedRewriteBcp(
+ BITVECTOR_AND, BITVECTOR_OR, BITVECTOR_NOT, bcp_kinds, ret);
+ if (!new_ret.isNull())
+ {
+ debugExtendedRewrite(ret, new_ret, "BV bcp");
+ return new_ret;
+ }
+ new_ret = extendedRewriteEqRes(BITVECTOR_AND,
+ BITVECTOR_OR,
+ BITVECTOR_XOR,
+ BITVECTOR_NOT,
+ bcp_kinds,
+ ret,
+ true);
+ if (!new_ret.isNull())
+ {
+ debugExtendedRewrite(ret, new_ret, "BV eq res");
+ return new_ret;
+ }
+ }
+
+ return Node::null();
+}
+
+int ExtendedRewriter::bitVectorSubsume(Node a, Node b, bool strict, bool tryNot)
+{
+ Assert(a.getType() == b.getType());
+ int max_ret = strict ? 2 : 1;
+ int curr_ret = 0;
+ Trace("q-ext-rewrite-debug2")
+ << "Subsume " << a << " " << b << "?" << std::endl;
+ // simple cases : a=b, a=max, b=0
+ if (a == b || isConstBv(a, true) || isConstBv(b, false))
+ {
+ curr_ret = 1;
+ }
+ else if (a.isConst() && b.isConst())
+ {
+ // TODO
+ }
+
+ if (curr_ret == max_ret)
+ {
+ return max_ret;
+ }
+
+ Kind ak = a.getKind();
+ Kind bk = b.getKind();
+
+ //------------------recurse on CONCAT
+ if (ak == BITVECTOR_CONCAT || bk == BITVECTOR_CONCAT)
+ {
+ unsigned counter = bv::utils::getSize(a) - 1;
+ bool reca = ak == BITVECTOR_CONCAT;
+ Node cterm = reca ? a : b;
+ Node oterm = reca ? b : a;
+ int ccurr = 1;
+ // each piece must subsume
+ for (const Node& ctermc : cterm)
+ {
+ unsigned csize = bv::utils::getSize(ctermc);
+ Assert((counter + 1) >= csize);
+ Node extract =
+ bv::utils::mkExtract(oterm, counter, (counter + 1) - csize);
+ extract = Rewriter::rewrite(extract);
+
+ // check subsume
+ int cret = bitVectorSubsume(
+ reca ? ctermc : extract, reca ? extract : ctermc, strict);
+ if (cret == 0)
+ {
+ ccurr = 0;
+ break;
+ }
+ else if (cret > ccurr)
+ {
+ ccurr = cret;
+ }
+ if (counter >= csize)
+ {
+ counter = counter - csize;
+ }
+ }
+ // if all pieces subsume, then we take the max and update the return value
+ if (ccurr == max_ret)
+ {
+ return max_ret;
+ }
+ else if (ccurr > curr_ret)
+ {
+ curr_ret = ccurr;
+ }
+ }
+
+ //---------------recurse BITVECTOR_NOT
+ if (ak == BITVECTOR_NOT && bk == BITVECTOR_NOT)
+ {
+ return bitVectorSubsume(b[0], a[0], strict);
+ }
+
+ //--------------recurse extract
+ if (ak == BITVECTOR_EXTRACT && bk == BITVECTOR_EXTRACT
+ && a.getOperator() == b.getOperator())
+ {
+ return bitVectorSubsume(a[0], b[0], strict);
+ }
+
+ //---------------recurse BITVECTOR_AND/OR
+ bool reca = false;
+ Node rec;
+ if (ak == BITVECTOR_OR)
+ {
+ rec = a;
+ reca = true;
+ }
+ else if (bk == BITVECTOR_AND)
+ {
+ rec = b;
+ }
+ if (!rec.isNull())
+ {
+ for (const Node& rc : rec)
+ {
+ int cret = bitVectorSubsume(reca ? rc : a, reca ? b : rc, strict);
+ if (cret == max_ret)
+ {
+ return max_ret;
+ }
+ else if (cret > curr_ret)
+ {
+ curr_ret = cret;
+ }
+ }
+ rec = Node::null();
+ }
+
+ //---------------recurse ITE
+ if (ak == ITE)
+ {
+ rec = a;
+ reca = true;
+ }
+ else if (bk == ITE)
+ {
+ rec = b;
+ reca = false;
+ }
+ if (!rec.isNull())
+ {
+ int r1 = bitVectorSubsume(reca ? a[1] : a, reca ? b : b[1], strict);
+ if (r1 > curr_ret)
+ {
+ int r2 = bitVectorSubsume(reca ? a[2] : a, reca ? b : b[2], strict);
+ if (r2 > curr_ret)
+ {
+ curr_ret = r2 > r1 ? r1 : r2;
+ if (curr_ret = max_ret)
+ {
+ return max_ret;
+ }
+ }
+ }
+ rec = Node::null();
+ }
+
+ //-----------------cases for b
+ if (bk == BITVECTOR_SHL)
+ {
+ if (b[0].getKind() == BITVECTOR_LSHR)
+ {
+ if (b[0][0] == a && b[0][1] == b[1])
+ {
+ // x subsumes bvshl( bvlshr( x, y ), y )
+ curr_ret = 1;
+ }
+ }
+ Node anot = TermUtil::mkNegate(BITVECTOR_NOT, a);
+ // x subsumes bvshl( y, z ) if z>=(~x).
+ if (bitVectorArithComp(b[1], anot))
+ {
+ curr_ret = 1;
+ }
+ }
+ else if (bk == BITVECTOR_LSHR)
+ {
+ if (b[0].getKind() == BITVECTOR_SHL)
+ {
+ if (b[0][0] == a && b[0][1] == b[1])
+ {
+ // x subsumes bvlshr( bvshl( x, y ), y )
+ curr_ret = 1;
+ }
+ }
+ }
+ else if (bk == BITVECTOR_NEG)
+ {
+ if (b[0].getKind() == BITVECTOR_SHL)
+ {
+ Node anot = TermUtil::mkNegate(BITVECTOR_NOT, a);
+ // x subsumes -bvshl( y, z ) if z>=(~x).
+ if (bitVectorArithComp(b[0][1], anot))
+ {
+ curr_ret = 1;
+ }
+ }
+ }
+
+ if (curr_ret == max_ret)
+ {
+ return max_ret;
+ }
+
+ if (tryNot)
+ {
+ if (ak == BITVECTOR_NOT || bk == BITVECTOR_NOT)
+ {
+ int ret = bitVectorSubsume(TermUtil::mkNegate(BITVECTOR_NOT, b),
+ TermUtil::mkNegate(BITVECTOR_NOT, a),
+ strict,
+ false);
+ if (ret > curr_ret)
+ {
+ curr_ret = ret;
+ }
+ }
+ }
+
+ return curr_ret;
+}
+
+int ExtendedRewriter::bitVectorArithComp(Node a, Node b, bool strict)
+{
+ Assert(a.getType() == b.getType());
+ int max_ret = strict ? 2 : 1;
+ Trace("q-ext-rewrite-debug2")
+ << "Arith comp " << a << " " << b << "?" << std::endl;
+ int curr_ret = bitVectorSubsume(a, b, strict);
+ if (curr_ret == max_ret)
+ {
+ return max_ret;
+ }
+ if (a.isConst() && b.isConst())
+ {
+ // TODO
+ }
+
+ // flip
+ if (a.getKind() == BITVECTOR_NEG && b.getKind() == BITVECTOR_NEG)
+ {
+ return bitVectorArithComp(b[0], a[0], strict);
+ }
+
+ // shifting right always shrinks
+ if (b.getKind() == BITVECTOR_LSHR)
+ {
+ if (bitVectorArithComp(a, b[0], strict) == max_ret)
+ {
+ curr_ret = 1;
+ // if strict, also must be greater than zero shift
+ if (strict)
+ {
+ Node zero = mkConstBv(b[1], false);
+ if (bitVectorArithComp(b[1], zero, strict))
+ {
+ return max_ret;
+ }
+ }
+ }
+ }
+ else if (b.getKind() == BITVECTOR_NOT)
+ {
+ if (a.getKind() == BITVECTOR_NOT)
+ {
+ // flipped?
+ }
+ }
+
+ return curr_ret;
+}
+
+bool ExtendedRewriter::bitVectorDisjoint(Node a, Node b)
+{
+ Assert(a.getType() == b.getType());
+ if (a.isConst() && b.isConst())
+ {
+ // TODO
+ }
+ // must be dually subsuming
+ for (unsigned r = 0; r < 2; r++)
+ {
+ Node x = r == 0 ? a : b;
+ Node y = r == 0 ? b : a;
+ x = TermUtil::mkNegate(BITVECTOR_NOT, x);
+ if (bitVectorSubsume(x, y) == 0)
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+Node ExtendedRewriter::decomposeRightAssocChain(Kind k,
+ Node n,
+ std::vector<Node>& children)
+{
+ Node curr = n;
+ while (curr.getKind() == k)
+ {
+ children.push_back(curr[1]);
+ curr = curr[0];
+ }
+ return curr;
+}
+
+Node ExtendedRewriter::mkRightAssocChain(Kind k,
+ Node base,
+ std::vector<Node>& children)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Node curr = base;
+ if (!children.empty())
+ {
+ // always sort
+ std::sort(children.begin(), children.end());
+ for (const Node& c : children)
+ {
+ curr = nm->mkNode(k, curr, c);
+ }
+ }
+ return curr;
+}
+
+Node ExtendedRewriter::mkConstBv(Node n, bool isNot)
+{
+ unsigned size = bv::utils::getSize(n);
+ return isNot ? bv::utils::mkOnes(size) : bv::utils::mkZero(size);
+}
+
+bool ExtendedRewriter::isConstBv(Node n, bool isNot)
+{
+ if (n.isConst())
+ {
+ Node const_n = mkConstBv(n, isNot);
+ return n == const_n;
+ }
+ return false;
+}
+
+Node ExtendedRewriter::getConstBvChild(Node n, std::vector<Node>& nconst)
+{
+ Node ret;
+ for (const Node& cn : n)
+ {
+ if (cn.isConst())
+ {
+ Assert(ret.isNull());
+ ret = cn;
+ }
+ else
+ {
+ nconst.push_back(cn);
+ }
+ }
+ return ret;
+}
+
+bool ExtendedRewriter::hasConstBvChild(Node n)
+{
+ for (const Node& cn : n)
+ {
+ if (cn.isConst())
+ {
+ return true;
+ }
+ }
+ return false;
+}
+
+Node ExtendedRewriter::normalizeBvMonomial(Node n)
+{
+ // general monomial normalization
+ std::map<Node, Node> msum;
+ getBvMonomialSum(n, msum);
+
+ if (Trace.isOn("q-ext-rewrite-bvarith"))
+ {
+ Trace("q-ext-rewrite-bvarith")
+ << "Monomial representation of " << n << " : " << std::endl;
+ for (std::pair<const Node, Node>& m : msum)
+ {
+ Assert(!m.second.isNull());
+ Assert(m.second.getType() == m.first.getType());
+ Node c = Rewriter::rewrite(m.second);
+ Trace("q-ext-rewrite-bvarith") << " " << m.first << " * " << c;
+ Trace("q-ext-rewrite-bvarith") << std::endl;
+ }
+ }
+
+ // monomial normalization technqiues
+
+ NodeManager* nm = NodeManager::currentNM();
+
+ // group terms that have the same factors
+ for (unsigned r = 0; r < 2; r++)
+ {
+ // the kind we are factoring
+ Kind fk = r == 0 ? BITVECTOR_SHL : BITVECTOR_MULT;
+ std::vector<Node> fcts;
+ std::map<Node, std::unordered_set<Node, NodeHashFunction> > fct_ms;
+ std::map<Node, std::vector<Node> > ms_to_fct;
+ std::map<Node, Node> ms_to_base;
+ for (std::pair<const Node, Node>& m : msum)
+ {
+ Node v = m.first;
+ bool success = false;
+ if (r == 0)
+ {
+ // BITVECTOR_SHL is decomposed as right-associative chain
+ if (v.getKind() == fk)
+ {
+ ms_to_base[v] = decomposeRightAssocChain(fk, v, ms_to_fct[v]);
+ success = true;
+ }
+ }
+ else if (r == 1)
+ {
+ success = true;
+ // BITVECTOR_MULT is decomposed as children or self
+ if (v.getKind() == fk)
+ {
+ for (const Node& vc : v)
+ {
+ ms_to_fct[v].push_back(vc);
+ }
+ }
+ else
+ {
+ ms_to_fct[v].push_back(v);
+ }
+ }
+ if (success)
+ {
+ std::vector<Node>& mts = ms_to_fct[v];
+ for (const Node& sl : mts)
+ {
+ fct_ms[sl].insert(v);
+ if (std::find(fcts.begin(), fcts.end(), sl) == fcts.end())
+ {
+ fcts.push_back(sl);
+ }
+ }
+ }
+ }
+ if (!fcts.empty())
+ {
+ unsigned size = bv::utils::getSize(n);
+ Node bvone = bv::utils::mkOne(size);
+ std::sort(fcts.begin(), fcts.end());
+ for (const Node& sl : fcts)
+ {
+ std::unordered_set<Node, NodeHashFunction>& sms = fct_ms[sl];
+ if (sms.size() > 1)
+ {
+ Trace("q-ext-rewrite-bvarith-debug")
+ << "Factoring " << sl << std::endl;
+ // reconsturct the monomial
+ std::map<Node, Node> msum_new;
+ std::vector<Node> group_children;
+ for (std::pair<const Node, Node>& m : msum)
+ {
+ Node v = m.first;
+ if (sms.find(v) == sms.end())
+ {
+ msum_new[v] = m.second;
+ }
+ else
+ {
+ // remove a factor (make a copy of vector here)
+ std::vector<Node> mts = ms_to_fct[v];
+ std::vector<Node>::iterator it =
+ std::find(mts.begin(), mts.end(), sl);
+ Assert(it != mts.end());
+ mts.erase(it);
+ Node gc;
+ if (r == 0)
+ {
+ gc = mkRightAssocChain(fk, ms_to_base[v], mts);
+ }
+ else
+ {
+ gc = mts.empty()
+ ? bvone
+ : (mts.size() == 1 ? mts[0]
+ : nm->mkNode(BITVECTOR_MULT, mts));
+ }
+ gc = nm->mkNode(BITVECTOR_MULT, gc, m.second);
+ Trace("q-ext-rewrite-bvarith-debug")
+ << "...group child : " << gc << std::endl;
+ group_children.push_back(gc);
+ }
+ }
+ Assert(group_children.size() > 1);
+ Node sgc = nm->mkNode(BITVECTOR_PLUS, group_children);
+ // FIXME : avoid this call?
+ sgc = extendedRewrite(sgc);
+ sgc = nm->mkNode(fk, sgc, sl);
+ msum_new[sgc] = bvone;
+ Node new_ret = mkNodeFromBvMonomial(n, msum_new);
+ new_ret = Rewriter::rewrite(new_ret);
+ if (new_ret != n)
+ {
+ Trace("q-ext-rewrite-bvarith")
+ << "Factored " << sl << " " << fk << " : " << n << " -> "
+ << new_ret << std::endl;
+ return new_ret;
+ }
+ }
+ }
+ }
+ }
+
+ Node new_ret = mkNodeFromBvMonomial(n, msum);
+ Trace("q-ext-rewrite-bvarith")
+ << "...got (pre-rewrite) : " << new_ret << std::endl;
+ new_ret = Rewriter::rewrite(new_ret);
+ if (new_ret != n)
+ {
+ return new_ret;
+ }
+ Trace("q-ext-rewrite-bvarith")
+ << "Monomial " << n << " did not normalize." << std::endl;
+ return Node::null();
+}
+
+void ExtendedRewriter::getBvMonomialSum(Node n, std::map<Node, Node>& msum)
+{
+ Trace("q-ext-rewrite-debug2") << "get bv monomial sum " << n << std::endl;
+ NodeManager* nm = NodeManager::currentNM();
+ unsigned size = bv::utils::getSize(n);
+ Node bv_one = bv::utils::mkOne(size);
+ std::map<Node, Node> n_msum;
+ if (n.isConst())
+ {
+ n_msum[bv_one] = n;
+ }
+ else
+ {
+ Kind k = n.getKind();
+ if (k == BITVECTOR_PLUS)
+ {
+ for (const Node& cn : n)
+ {
+ getBvMonomialSum(cn, msum);
+ }
+ }
+ else if (k == BITVECTOR_NEG)
+ {
+ getBvMonomialSum(n[0], n_msum);
+ for (std::map<Node, Node>::iterator it = n_msum.begin();
+ it != n_msum.end();
+ ++it)
+ {
+ Node coeff = it->second;
+ n_msum[it->first] = TermUtil::mkNegate(BITVECTOR_NEG, coeff);
+ }
+ }
+ else if (k == BITVECTOR_NOT)
+ {
+ Node rec =
+ nm->mkNode(BITVECTOR_NEG, nm->mkNode(BITVECTOR_PLUS, n[0], bv_one));
+ getBvMonomialSum(rec, msum);
+ }
+ else if (k == BITVECTOR_CONCAT)
+ {
+ unsigned nchildren = n.getNumChildren();
+ // if the last child is zero
+ if (isConstBv(n[nchildren - 1], false))
+ {
+ Node rec;
+ if (nchildren == 2 && n[0].getKind() == BITVECTOR_EXTRACT
+ && bv::utils::getExtractLow(n[0]) == 0)
+ {
+ rec = n[0][0];
+ }
+ else
+ {
+ std::vector<Node> rchildren;
+ for (unsigned j = 0; j < nchildren - 1; j++)
+ {
+ rchildren.push_back(n[j]);
+ }
+ rec = rchildren.size() == 1 ? rchildren[0]
+ : nm->mkNode(BITVECTOR_CONCAT, rchildren);
+ }
+ unsigned size_rec = bv::utils::getSize(rec);
+ // must ensure the same type
+ if (size_rec > size)
+ {
+ rec = bv::utils::mkExtract(rec, size - 1, 0);
+ rec = Rewriter::rewrite(rec);
+ }
+ else if (size_rec < size)
+ {
+ unsigned diff = size - size_rec;
+ Node bzero = bv::utils::mkZero(diff);
+ rec = nm->mkNode(BITVECTOR_CONCAT, bzero, rec);
+ rec = Rewriter::rewrite(rec);
+ }
+ Assert(rec.getType() == n.getType());
+
+ unsigned sizez = bv::utils::getSize(n[nchildren - 1]);
+ Integer powsizez = Integer(1).multiplyByPow2(sizez);
+ Node ccoeff = bv::utils::mkConst(size, powsizez);
+
+ getBvMonomialSum(rec, n_msum);
+ for (std::map<Node, Node>::iterator it = n_msum.begin();
+ it != n_msum.end();
+ ++it)
+ {
+ Node coeff = it->second;
+ Assert(coeff.getType() == ccoeff.getType());
+ n_msum[it->first] = nm->mkNode(BITVECTOR_MULT, coeff, ccoeff);
+ }
+ }
+ else
+ {
+ n_msum[n] = bv_one;
+ }
+ }
+ else if (k == BITVECTOR_MULT)
+ {
+ std::vector<Node> shls;
+ std::vector<Node> children;
+ Node coeff = bv_one;
+ for (const Node& cn : n)
+ {
+ Node cnb = cn;
+ if (cnb.getKind() == BITVECTOR_SHL)
+ {
+ cnb = decomposeRightAssocChain(BITVECTOR_SHL, cnb, shls);
+ }
+ std::map<Node, Node> cn_msum;
+ getBvMonomialSum(cnb, cn_msum);
+ if (cn_msum.size() == 1)
+ {
+ for (std::pair<const Node, Node>& mc : cn_msum)
+ {
+ Trace("q-ext-rewrite-debug2") << ".....factor : ";
+ if (!mc.first.isConst())
+ {
+ Trace("q-ext-rewrite-debug2") << mc.first << " * ";
+ children.push_back(mc.first);
+ }
+ Trace("q-ext-rewrite-debug2") << mc.second << std::endl;
+ if (mc.second != bv_one)
+ {
+ coeff = nm->mkNode(BITVECTOR_MULT, coeff, mc.second);
+ }
+ }
+ }
+ else
+ {
+ // don't distribute
+ children.push_back(cnb);
+ Trace("q-ext-rewrite-debug2") << ".....factor : " << cnb << std::endl;
+ }
+ }
+ Node v = bv_one;
+ if (children.size() == 1)
+ {
+ v = children[0];
+ }
+ else if (children.size() > 1)
+ {
+ v = nm->mkNode(BITVECTOR_MULT, children);
+ }
+ if (!shls.empty())
+ {
+ v = mkRightAssocChain(BITVECTOR_SHL, v, shls);
+ }
+ Trace("q-ext-rewrite-debug2")
+ << "...got " << v << " * " << coeff << std::endl;
+ n_msum[v] = coeff;
+ }
+ else if (k == BITVECTOR_SHL)
+ {
+ std::vector<Node> shls;
+ Node nn = decomposeRightAssocChain(BITVECTOR_SHL, n, shls);
+ std::map<Node, Node> nn_msum;
+ getBvMonomialSum(nn, nn_msum);
+ if (nn_msum.size() == 1)
+ {
+ for (std::pair<const Node, Node>& nnc : nn_msum)
+ {
+ Node v = mkRightAssocChain(BITVECTOR_SHL, nnc.first, shls);
+ n_msum[v] = nnc.second;
+ }
+ }
+ else
+ {
+ // do not distribute
+ n_msum[n] = bv_one;
+ }
+ }
+ else
+ {
+ n_msum[n] = bv_one;
+ }
+ }
+ // add the monomial sum for this node if we generated one
+ if (!n_msum.empty())
+ {
+ for (std::pair<const Node, Node>& m : n_msum)
+ {
+ Node v = m.first;
+ Node coeff = m.second;
+ std::map<Node, Node>::iterator itm = msum.find(v);
+ if (itm == msum.end())
+ {
+ msum[v] = coeff;
+ }
+ else
+ {
+ msum[v] = nm->mkNode(BITVECTOR_PLUS, coeff, itm->second);
+ }
+ }
+ }
+}
+
+Node ExtendedRewriter::mkNodeFromBvMonomial(Node n, std::map<Node, Node>& msum)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ std::vector<Node> sum;
+ for (std::pair<const Node, Node>& m : msum)
+ {
+ Node v = m.first;
+ Node c = Rewriter::rewrite(m.second);
+ Node mm = nm->mkNode(BITVECTOR_MULT, c, v);
+ mm = Rewriter::rewrite(mm);
+ sum.push_back(mm);
+ }
+ if (sum.empty())
+ {
+ return mkConstBv(n, false);
+ }
+ else if (sum.size() == 1)
+ {
+ return sum[0];
+ }
+ return nm->mkNode(BITVECTOR_PLUS, sum);
+}
+
+void ExtendedRewriter::spliceBv(Node a,
+ Node b,
+ std::vector<Node>& av,
+ std::vector<Node>& bv)
+{
+ Assert(a.getType() == b.getType());
+ if (a.getKind() == BITVECTOR_CONCAT || b.getKind() == BITVECTOR_CONCAT)
+ {
+ unsigned counter = bv::utils::getSize(a) - 1;
+ bool reca = a.getKind() == BITVECTOR_CONCAT;
+ Node cterm = reca ? a : b;
+ Node oterm = reca ? b : a;
+ for (const Node& ctermc : cterm)
+ {
+ // extract relevant portion of other child
+ unsigned csize = bv::utils::getSize(ctermc);
+ Assert((counter + 1) >= csize);
+ Node extract =
+ bv::utils::mkExtract(oterm, counter, (counter + 1) - csize);
+ extract = Rewriter::rewrite(extract);
+ // recurse
+ Node c1 = reca ? ctermc : extract;
+ Node c2 = reca ? extract : ctermc, strict;
+ spliceBv(c1, c2, av, bv);
+ // decrement counter
+ if (counter >= csize)
+ {
+ counter = counter - csize;
+ }
+ }
+ }
+ else
+ {
+ av.push_back(a);
+ bv.push_back(b);
+ }
+}
+
void ExtendedRewriter::debugExtendedRewrite(Node n,
Node ret,
const char* c) const
diff --git a/src/theory/quantifiers/extended_rewrite.h b/src/theory/quantifiers/extended_rewrite.h
index 937f522b2..c5fe2bc71 100644
--- a/src/theory/quantifiers/extended_rewrite.h
+++ b/src/theory/quantifiers/extended_rewrite.h
@@ -49,6 +49,7 @@ class ExtendedRewriter
public:
ExtendedRewriter(bool aggr = true);
~ExtendedRewriter() {}
+
/** return the extended rewritten form of n */
Node extendedRewrite(Node n);
@@ -212,7 +213,85 @@ class ExtendedRewriter
//--------------------------------------theory-specific top-level calls
/** extended rewrite arith */
Node extendedRewriteArith(Node ret, bool& pol);
+ /** extended rewrite bv */
+ Node extendedRewriteBv(Node ret, bool& pol);
//--------------------------------------end theory-specific top-level calls
+
+ //--------------------------------------bit-vectors
+ /** bitvector subsume
+ *
+ * If this function returns 1, then a's 1 bits are a superset of b's 1 bits,
+ * in other words, (bvand (bvnot a) b) = 0 holds.
+ *
+ * If this function returns 2, then additionally at least one bit of a
+ * is 1 that is 0 in bit, that is (bvand a (bvnot b)) != 0 holds.
+ *
+ * Otherwise, this function returns 0.
+ *
+ * If strict is false, then this function will only return 0 or 1.
+ *
+ * If tryNot is true, we will try to show the subsumption by calling
+ * bitVectorSubsume( ~b, ~a ).
+ */
+ int bitVectorSubsume(Node a, Node b, bool strict = false, bool tryNot = true);
+ /** bitvector arithmetic compare
+ *
+ * If this function returns 1, then bvuge( a, b ) holds.
+ *
+ * If this function returns 2, then bvugt( a, b ) holds.
+ *
+ * Otherwise this function returns 0.
+ *
+ * If strict is false, then this function will only return 0 or 1.
+ */
+ int bitVectorArithComp(Node a, Node b, bool strict = false);
+ /** bitvector disjoint
+ *
+ * Returns true if there are no bits where a and b are both 1.
+ * That is, if this function returns true, then
+ * (bvand a b) = 0.
+ * Note that this function is equivalent to
+ * bitvectorSubsume( ~a, b ) && bitvectorSubsume( ~b, a ).
+ */
+ bool bitVectorDisjoint(Node a, Node b);
+
+ /** mk const as the same type as n, 0 if !isNot, 1s if isNot */
+ Node mkConstBv(Node n, bool isNot);
+ /** is const bv zero
+ *
+ * Returns true if n is 0 and isNot = false,
+ * Returns true if n is max and isNot = true,
+ * return false otherwise.
+ */
+ bool isConstBv(Node n, bool isNot);
+ /** get const child */
+ Node getConstBvChild(Node n, std::vector<Node>& nconst);
+ /** has const child */
+ bool hasConstBvChild(Node n);
+ /** */
+ Node rewriteBvArith(Node ret);
+ /** */
+ Node rewriteBvShift(Node ret);
+ /** */
+ Node rewriteBvBool(Node ret);
+ /** */
+ Node normalizeBvMonomial(Node n);
+ /** get monomial sum */
+ void getBvMonomialSum(Node n, std::map<Node, Node>& msum);
+ /** mkNode */
+ Node mkNodeFromBvMonomial(Node n, std::map<Node, Node>& msum);
+ /** splice
+ *
+ * Adds k (non-concat) terms to n1v and n2v such that:
+ * n1 is equivalent to n1v[0] ++ ... ++ n1v[k-1] and
+ * n2 is equivalent to n2v[0] ++ ... ++ n2v[k-1],
+ * and n1v[i] and n2v[i] have equal width for i=0...k-1.
+ */
+ void spliceBv(Node n1,
+ Node n2,
+ std::vector<Node>& n1v,
+ std::vector<Node>& n2v);
+ //--------------------------------------end bit-vectors
};
} /* CVC4::theory::quantifiers namespace */
generated by cgit on debian on lair
contact matthew@masot.net with questions or feedback