summaryrefslogtreecommitdiff
path: root/upb/refcounted.c
diff options
context:
space:
mode:
Diffstat (limited to 'upb/refcounted.c')
-rw-r--r--upb/refcounted.c399
1 files changed, 217 insertions, 182 deletions
diff --git a/upb/refcounted.c b/upb/refcounted.c
index fa775ab..85b378c 100644
--- a/upb/refcounted.c
+++ b/upb/refcounted.c
@@ -30,17 +30,17 @@ const void *UPB_UNTRACKED_REF = &untracked_val;
/* arch-specific atomic primitives *******************************************/
-#ifdef UPB_THREAD_UNSAFE //////////////////////////////////////////////////////
+#ifdef UPB_THREAD_UNSAFE /*---------------------------------------------------*/
static void atomic_inc(uint32_t *a) { (*a)++; }
static bool atomic_dec(uint32_t *a) { return --(*a) == 0; }
-#elif defined(__GNUC__) || defined(__clang__) //////////////////////////////////
+#elif defined(__GNUC__) || defined(__clang__) /*------------------------------*/
static void atomic_inc(uint32_t *a) { __sync_fetch_and_add(a, 1); }
static bool atomic_dec(uint32_t *a) { return __sync_sub_and_fetch(a, 1) == 0; }
-#elif defined(WIN32) ///////////////////////////////////////////////////////////
+#elif defined(WIN32) /*-------------------------------------------------------*/
#include <Windows.h>
@@ -54,13 +54,13 @@ static bool atomic_dec(upb_atomic_t *a) {
Implement them or compile with UPB_THREAD_UNSAFE.
#endif
-// All static objects point to this refcount.
-// It is special-cased in ref/unref below.
+/* All static objects point to this refcount.
+ * It is special-cased in ref/unref below. */
uint32_t static_refcount = -1;
-// We can avoid atomic ops for statically-declared objects.
-// This is a minor optimization but nice since we can avoid degrading under
-// contention in this case.
+/* We can avoid atomic ops for statically-declared objects.
+ * This is a minor optimization but nice since we can avoid degrading under
+ * contention in this case. */
static void refgroup(uint32_t *group) {
if (group != &static_refcount)
@@ -87,21 +87,21 @@ static void upb_unlock() {}
#else
-// User must define functions that lock/unlock a global mutex and link this
-// file against them.
+/* User must define functions that lock/unlock a global mutex and link this
+ * file against them. */
void upb_lock();
void upb_unlock();
#endif
-// UPB_DEBUG_REFS mode counts on being able to malloc() memory in some
-// code-paths that can normally never fail, like upb_refcounted_ref(). Since
-// we have no way to propagage out-of-memory errors back to the user, and since
-// these errors can only occur in UPB_DEBUG_REFS mode, we immediately fail.
+/* UPB_DEBUG_REFS mode counts on being able to malloc() memory in some
+ * code-paths that can normally never fail, like upb_refcounted_ref(). Since
+ * we have no way to propagage out-of-memory errors back to the user, and since
+ * these errors can only occur in UPB_DEBUG_REFS mode, we immediately fail. */
#define CHECK_OOM(predicate) if (!(predicate)) { assert(predicate); exit(1); }
typedef struct {
- int count; // How many refs there are (duplicates only allowed for ref2).
+ int count; /* How many refs there are (duplicates only allowed for ref2). */
bool is_ref2;
} trackedref;
@@ -114,18 +114,19 @@ static trackedref *trackedref_new(bool is_ref2) {
}
static void track(const upb_refcounted *r, const void *owner, bool ref2) {
+ upb_value v;
+
assert(owner);
if (owner == UPB_UNTRACKED_REF) return;
upb_lock();
- upb_value v;
if (upb_inttable_lookupptr(r->refs, owner, &v)) {
trackedref *ref = upb_value_getptr(v);
- // Since we allow multiple ref2's for the same to/from pair without
- // allocating separate memory for each one, we lose the fine-grained
- // tracking behavior we get with regular refs. Since ref2s only happen
- // inside upb, we'll accept this limitation until/unless there is a really
- // difficult upb-internal bug that can't be figured out without it.
+ /* Since we allow multiple ref2's for the same to/from pair without
+ * allocating separate memory for each one, we lose the fine-grained
+ * tracking behavior we get with regular refs. Since ref2s only happen
+ * inside upb, we'll accept this limitation until/unless there is a really
+ * difficult upb-internal bug that can't be figured out without it. */
assert(ref2);
assert(ref->is_ref2);
ref->count++;
@@ -134,8 +135,8 @@ static void track(const upb_refcounted *r, const void *owner, bool ref2) {
bool ok = upb_inttable_insertptr(r->refs, owner, upb_value_ptr(ref));
CHECK_OOM(ok);
if (ref2) {
- // We know this cast is safe when it is a ref2, because it's coming from
- // another refcounted object.
+ /* We know this cast is safe when it is a ref2, because it's coming from
+ * another refcounted object. */
const upb_refcounted *from = owner;
assert(!upb_inttable_lookupptr(from->ref2s, r, NULL));
ok = upb_inttable_insertptr(from->ref2s, r, upb_value_ptr(NULL));
@@ -146,22 +147,25 @@ static void track(const upb_refcounted *r, const void *owner, bool ref2) {
}
static void untrack(const upb_refcounted *r, const void *owner, bool ref2) {
+ upb_value v;
+ bool found;
+ trackedref *ref;
+
assert(owner);
if (owner == UPB_UNTRACKED_REF) return;
upb_lock();
- upb_value v;
- bool found = upb_inttable_lookupptr(r->refs, owner, &v);
- // This assert will fail if an owner attempts to release a ref it didn't have.
+ found = upb_inttable_lookupptr(r->refs, owner, &v);
+ /* This assert will fail if an owner attempts to release a ref it didn't have. */
UPB_ASSERT_VAR(found, found);
- trackedref *ref = upb_value_getptr(v);
+ ref = upb_value_getptr(v);
assert(ref->is_ref2 == ref2);
if (--ref->count == 0) {
free(ref);
upb_inttable_removeptr(r->refs, owner, NULL);
if (ref2) {
- // We know this cast is safe when it is a ref2, because it's coming from
- // another refcounted object.
+ /* We know this cast is safe when it is a ref2, because it's coming from
+ * another refcounted object. */
const upb_refcounted *from = owner;
bool removed = upb_inttable_removeptr(from->ref2s, r, NULL);
assert(removed);
@@ -171,32 +175,41 @@ static void untrack(const upb_refcounted *r, const void *owner, bool ref2) {
}
static void checkref(const upb_refcounted *r, const void *owner, bool ref2) {
- upb_lock();
upb_value v;
- bool found = upb_inttable_lookupptr(r->refs, owner, &v);
+ bool found;
+ trackedref *ref;
+
+ upb_lock();
+ found = upb_inttable_lookupptr(r->refs, owner, &v);
UPB_ASSERT_VAR(found, found);
- trackedref *ref = upb_value_getptr(v);
+ ref = upb_value_getptr(v);
assert(ref->is_ref2 == ref2);
upb_unlock();
}
-// Populates the given UPB_CTYPE_INT32 inttable with counts of ref2's that
-// originate from the given owner.
+/* Populates the given UPB_CTYPE_INT32 inttable with counts of ref2's that
+ * originate from the given owner. */
static void getref2s(const upb_refcounted *owner, upb_inttable *tab) {
- upb_lock();
upb_inttable_iter i;
+
+ upb_lock();
upb_inttable_begin(&i, owner->ref2s);
for(; !upb_inttable_done(&i); upb_inttable_next(&i)) {
+ upb_value v;
+ upb_value count;
+ trackedref *ref;
+ bool ok;
+ bool found;
+
upb_refcounted *to = (upb_refcounted*)upb_inttable_iter_key(&i);
- // To get the count we need to look in the target's table.
- upb_value v;
- bool found = upb_inttable_lookupptr(to->refs, owner, &v);
+ /* To get the count we need to look in the target's table. */
+ found = upb_inttable_lookupptr(to->refs, owner, &v);
assert(found);
- trackedref *ref = upb_value_getptr(v);
- upb_value count = upb_value_int32(ref->count);
+ ref = upb_value_getptr(v);
+ count = upb_value_int32(ref->count);
- bool ok = upb_inttable_insertptr(tab, to, count);
+ ok = upb_inttable_insertptr(tab, to, count);
CHECK_OOM(ok);
}
upb_unlock();
@@ -210,15 +223,18 @@ typedef struct {
static void visit_check(const upb_refcounted *obj, const upb_refcounted *subobj,
void *closure) {
check_state *s = closure;
- assert(obj == s->obj);
- assert(subobj);
upb_inttable *ref2 = &s->ref2;
upb_value v;
- bool removed = upb_inttable_removeptr(ref2, subobj, &v);
- // The following assertion will fail if the visit() function visits a subobj
- // that it did not have a ref2 on, or visits the same subobj too many times.
+ bool removed;
+ int32_t newcount;
+
+ assert(obj == s->obj);
+ assert(subobj);
+ removed = upb_inttable_removeptr(ref2, subobj, &v);
+ /* The following assertion will fail if the visit() function visits a subobj
+ * that it did not have a ref2 on, or visits the same subobj too many times. */
assert(removed);
- int32_t newcount = upb_value_getint32(v) - 1;
+ newcount = upb_value_getint32(v) - 1;
if (newcount > 0) {
upb_inttable_insert(ref2, (uintptr_t)subobj, upb_value_int32(newcount));
}
@@ -226,19 +242,21 @@ static void visit_check(const upb_refcounted *obj, const upb_refcounted *subobj,
static void visit(const upb_refcounted *r, upb_refcounted_visit *v,
void *closure) {
- // In DEBUG_REFS mode we know what existing ref2 refs there are, so we know
- // exactly the set of nodes that visit() should visit. So we verify visit()'s
- // correctness here.
+ bool ok;
+
+ /* In DEBUG_REFS mode we know what existing ref2 refs there are, so we know
+ * exactly the set of nodes that visit() should visit. So we verify visit()'s
+ * correctness here. */
check_state state;
state.obj = r;
- bool ok = upb_inttable_init(&state.ref2, UPB_CTYPE_INT32);
+ ok = upb_inttable_init(&state.ref2, UPB_CTYPE_INT32);
CHECK_OOM(ok);
getref2s(r, &state.ref2);
- // This should visit any children in the ref2 table.
+ /* This should visit any children in the ref2 table. */
if (r->vtbl->visit) r->vtbl->visit(r, visit_check, &state);
- // This assertion will fail if the visit() function missed any children.
+ /* This assertion will fail if the visit() function missed any children. */
assert(upb_inttable_count(&state.ref2) == 0);
upb_inttable_uninit(&state.ref2);
if (r->vtbl->visit) r->vtbl->visit(r, v, closure);
@@ -302,27 +320,27 @@ static void visit(const upb_refcounted *r, upb_refcounted_visit *v,
if (r->vtbl->visit) r->vtbl->visit(r, v, closure);
}
-#endif // UPB_DEBUG_REFS
+#endif /* UPB_DEBUG_REFS */
/* freeze() *******************************************************************/
-// The freeze() operation is by far the most complicated part of this scheme.
-// We compute strongly-connected components and then mutate the graph such that
-// we preserve the invariants documented at the top of this file. And we must
-// handle out-of-memory errors gracefully (without leaving the graph
-// inconsistent), which adds to the fun.
+/* The freeze() operation is by far the most complicated part of this scheme.
+ * We compute strongly-connected components and then mutate the graph such that
+ * we preserve the invariants documented at the top of this file. And we must
+ * handle out-of-memory errors gracefully (without leaving the graph
+ * inconsistent), which adds to the fun. */
-// The state used by the freeze operation (shared across many functions).
+/* The state used by the freeze operation (shared across many functions). */
typedef struct {
int depth;
int maxdepth;
uint64_t index;
- // Maps upb_refcounted* -> attributes (color, etc). attr layout varies by
- // color.
+ /* Maps upb_refcounted* -> attributes (color, etc). attr layout varies by
+ * color. */
upb_inttable objattr;
- upb_inttable stack; // stack of upb_refcounted* for Tarjan's algorithm.
- upb_inttable groups; // array of uint32_t*, malloc'd refcounts for new groups
+ upb_inttable stack; /* stack of upb_refcounted* for Tarjan's algorithm. */
+ upb_inttable groups; /* array of uint32_t*, malloc'd refcounts for new groups */
upb_status *status;
jmp_buf err;
} tarjan;
@@ -331,15 +349,15 @@ static void release_ref2(const upb_refcounted *obj,
const upb_refcounted *subobj,
void *closure);
-// Node attributes /////////////////////////////////////////////////////////////
+/* Node attributes -----------------------------------------------------------*/
-// After our analysis phase all nodes will be either GRAY or WHITE.
+/* After our analysis phase all nodes will be either GRAY or WHITE. */
typedef enum {
- BLACK = 0, // Object has not been seen.
- GRAY, // Object has been found via a refgroup but may not be reachable.
- GREEN, // Object is reachable and is currently on the Tarjan stack.
- WHITE, // Object is reachable and has been assigned a group (SCC).
+ BLACK = 0, /* Object has not been seen. */
+ GRAY, /* Object has been found via a refgroup but may not be reachable. */
+ GREEN, /* Object is reachable and is currently on the Tarjan stack. */
+ WHITE /* Object is reachable and has been assigned a group (SCC). */
} color_t;
UPB_NORETURN static void err(tarjan *t) { longjmp(t->err, 1); }
@@ -367,7 +385,7 @@ static void setattr(tarjan *t, const upb_refcounted *r, uint64_t attr) {
}
static color_t color(tarjan *t, const upb_refcounted *r) {
- return trygetattr(t, r) & 0x3; // Color is always stored in the low 2 bits.
+ return trygetattr(t, r) & 0x3; /* Color is always stored in the low 2 bits. */
}
static void set_gray(tarjan *t, const upb_refcounted *r) {
@@ -375,11 +393,11 @@ static void set_gray(tarjan *t, const upb_refcounted *r) {
setattr(t, r, GRAY);
}
-// Pushes an obj onto the Tarjan stack and sets it to GREEN.
+/* Pushes an obj onto the Tarjan stack and sets it to GREEN. */
static void push(tarjan *t, const upb_refcounted *r) {
assert(color(t, r) == BLACK || color(t, r) == GRAY);
- // This defines the attr layout for the GREEN state. "index" and "lowlink"
- // get 31 bits, which is plenty (limit of 2B objects frozen at a time).
+ /* This defines the attr layout for the GREEN state. "index" and "lowlink"
+ * get 31 bits, which is plenty (limit of 2B objects frozen at a time). */
setattr(t, r, GREEN | (t->index << 2) | (t->index << 33));
if (++t->index == 0x80000000) {
upb_status_seterrmsg(t->status, "too many objects to freeze");
@@ -388,13 +406,13 @@ static void push(tarjan *t, const upb_refcounted *r) {
upb_inttable_push(&t->stack, upb_value_ptr((void*)r));
}
-// Pops an obj from the Tarjan stack and sets it to WHITE, with a ptr to its
-// SCC group.
+/* Pops an obj from the Tarjan stack and sets it to WHITE, with a ptr to its
+ * SCC group. */
static upb_refcounted *pop(tarjan *t) {
upb_refcounted *r = upb_value_getptr(upb_inttable_pop(&t->stack));
assert(color(t, r) == GREEN);
- // This defines the attr layout for nodes in the WHITE state.
- // Top of group stack is [group, NULL]; we point at group.
+ /* This defines the attr layout for nodes in the WHITE state.
+ * Top of group stack is [group, NULL]; we point at group. */
setattr(t, r, WHITE | (upb_inttable_count(&t->groups) - 2) << 8);
return r;
}
@@ -402,7 +420,7 @@ static upb_refcounted *pop(tarjan *t) {
static void tarjan_newgroup(tarjan *t) {
uint32_t *group = malloc(sizeof(*group));
if (!group) oom(t);
- // Push group and empty group leader (we'll fill in leader later).
+ /* Push group and empty group leader (we'll fill in leader later). */
if (!upb_inttable_push(&t->groups, upb_value_ptr(group)) ||
!upb_inttable_push(&t->groups, upb_value_ptr(NULL))) {
free(group);
@@ -430,21 +448,27 @@ static void set_lowlink(tarjan *t, const upb_refcounted *r, uint32_t lowlink) {
}
static uint32_t *group(tarjan *t, upb_refcounted *r) {
- assert(color(t, r) == WHITE);
- uint64_t groupnum = getattr(t, r) >> 8;
+ uint64_t groupnum;
upb_value v;
- bool found = upb_inttable_lookup(&t->groups, groupnum, &v);
+ bool found;
+
+ assert(color(t, r) == WHITE);
+ groupnum = getattr(t, r) >> 8;
+ found = upb_inttable_lookup(&t->groups, groupnum, &v);
UPB_ASSERT_VAR(found, found);
return upb_value_getptr(v);
}
-// If the group leader for this object's group has not previously been set,
-// the given object is assigned to be its leader.
+/* If the group leader for this object's group has not previously been set,
+ * the given object is assigned to be its leader. */
static upb_refcounted *groupleader(tarjan *t, upb_refcounted *r) {
- assert(color(t, r) == WHITE);
- uint64_t leader_slot = (getattr(t, r) >> 8) + 1;
+ uint64_t leader_slot;
upb_value v;
- bool found = upb_inttable_lookup(&t->groups, leader_slot, &v);
+ bool found;
+
+ assert(color(t, r) == WHITE);
+ leader_slot = (getattr(t, r) >> 8) + 1;
+ found = upb_inttable_lookup(&t->groups, leader_slot, &v);
UPB_ASSERT_VAR(found, found);
if (upb_value_getptr(v)) {
return upb_value_getptr(v);
@@ -456,10 +480,10 @@ static upb_refcounted *groupleader(tarjan *t, upb_refcounted *r) {
}
-// Tarjan's algorithm //////////////////////////////////////////////////////////
+/* Tarjan's algorithm --------------------------------------------------------*/
-// See:
-// http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
+/* See:
+ * http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm */
static void do_tarjan(const upb_refcounted *obj, tarjan *t);
static void tarjan_visit(const upb_refcounted *obj,
@@ -470,14 +494,14 @@ static void tarjan_visit(const upb_refcounted *obj,
upb_status_seterrf(t->status, "graph too deep to freeze (%d)", t->maxdepth);
err(t);
} else if (subobj->is_frozen || color(t, subobj) == WHITE) {
- // Do nothing: we don't want to visit or color already-frozen nodes,
- // and WHITE nodes have already been assigned a SCC.
+ /* Do nothing: we don't want to visit or color already-frozen nodes,
+ * and WHITE nodes have already been assigned a SCC. */
} else if (color(t, subobj) < GREEN) {
- // Subdef has not yet been visited; recurse on it.
+ /* Subdef has not yet been visited; recurse on it. */
do_tarjan(subobj, t);
set_lowlink(t, obj, UPB_MIN(lowlink(t, obj), lowlink(t, subobj)));
} else if (color(t, subobj) == GREEN) {
- // Subdef is in the stack and hence in the current SCC.
+ /* Subdef is in the stack and hence in the current SCC. */
set_lowlink(t, obj, UPB_MIN(lowlink(t, obj), idx(t, subobj)));
}
--t->depth;
@@ -485,7 +509,7 @@ static void tarjan_visit(const upb_refcounted *obj,
static void do_tarjan(const upb_refcounted *obj, tarjan *t) {
if (color(t, obj) == BLACK) {
- // We haven't seen this object's group; mark the whole group GRAY.
+ /* We haven't seen this object's group; mark the whole group GRAY. */
const upb_refcounted *o = obj;
do { set_gray(t, o); } while ((o = o->next) != obj);
}
@@ -500,15 +524,15 @@ static void do_tarjan(const upb_refcounted *obj, tarjan *t) {
}
-// freeze() ////////////////////////////////////////////////////////////////////
+/* freeze() ------------------------------------------------------------------*/
static void crossref(const upb_refcounted *r, const upb_refcounted *subobj,
void *_t) {
tarjan *t = _t;
assert(color(t, r) > BLACK);
if (color(t, subobj) > BLACK && r->group != subobj->group) {
- // Previously this ref was not reflected in subobj->group because they
- // were in the same group; now that they are split a ref must be taken.
+ /* Previously this ref was not reflected in subobj->group because they
+ * were in the same group; now that they are split a ref must be taken. */
refgroup(subobj->group);
}
}
@@ -516,10 +540,12 @@ static void crossref(const upb_refcounted *r, const upb_refcounted *subobj,
static bool freeze(upb_refcounted *const*roots, int n, upb_status *s,
int maxdepth) {
volatile bool ret = false;
+ int i;
+ upb_inttable_iter iter;
- // We run in two passes so that we can allocate all memory before performing
- // any mutation of the input -- this allows us to leave the input unchanged
- // in the case of memory allocation failure.
+ /* We run in two passes so that we can allocate all memory before performing
+ * any mutation of the input -- this allows us to leave the input unchanged
+ * in the case of memory allocation failure. */
tarjan t;
t.index = 0;
t.depth = 0;
@@ -531,64 +557,65 @@ static bool freeze(upb_refcounted *const*roots, int n, upb_status *s,
if (setjmp(t.err) != 0) goto err4;
- for (int i = 0; i < n; i++) {
+ for (i = 0; i < n; i++) {
if (color(&t, roots[i]) < GREEN) {
do_tarjan(roots[i], &t);
}
}
- // If we've made it this far, no further errors are possible so it's safe to
- // mutate the objects without risk of leaving them in an inconsistent state.
+ /* If we've made it this far, no further errors are possible so it's safe to
+ * mutate the objects without risk of leaving them in an inconsistent state. */
ret = true;
- // The transformation that follows requires care. The preconditions are:
- // - all objects in attr map are WHITE or GRAY, and are in mutable groups
- // (groups of all mutable objs)
- // - no ref2(to, from) refs have incremented count(to) if both "to" and
- // "from" are in our attr map (this follows from invariants (2) and (3))
-
- // Pass 1: we remove WHITE objects from their mutable groups, and add them to
- // new groups according to the SCC's we computed. These new groups will
- // consist of only frozen objects. None will be immediately collectible,
- // because WHITE objects are by definition reachable from one of "roots",
- // which the caller must own refs on.
- upb_inttable_iter i;
- upb_inttable_begin(&i, &t.objattr);
- for(; !upb_inttable_done(&i); upb_inttable_next(&i)) {
- upb_refcounted *obj = (upb_refcounted*)upb_inttable_iter_key(&i);
- // Since removal from a singly-linked list requires access to the object's
- // predecessor, we consider obj->next instead of obj for moving. With the
- // while() loop we guarantee that we will visit every node's predecessor.
- // Proof:
- // 1. every node's predecessor is in our attr map.
- // 2. though the loop body may change a node's predecessor, it will only
- // change it to be the node we are currently operating on, so with a
- // while() loop we guarantee ourselves the chance to remove each node.
+ /* The transformation that follows requires care. The preconditions are:
+ * - all objects in attr map are WHITE or GRAY, and are in mutable groups
+ * (groups of all mutable objs)
+ * - no ref2(to, from) refs have incremented count(to) if both "to" and
+ * "from" are in our attr map (this follows from invariants (2) and (3)) */
+
+ /* Pass 1: we remove WHITE objects from their mutable groups, and add them to
+ * new groups according to the SCC's we computed. These new groups will
+ * consist of only frozen objects. None will be immediately collectible,
+ * because WHITE objects are by definition reachable from one of "roots",
+ * which the caller must own refs on. */
+ upb_inttable_begin(&iter, &t.objattr);
+ for(; !upb_inttable_done(&iter); upb_inttable_next(&iter)) {
+ upb_refcounted *obj = (upb_refcounted*)upb_inttable_iter_key(&iter);
+ /* Since removal from a singly-linked list requires access to the object's
+ * predecessor, we consider obj->next instead of obj for moving. With the
+ * while() loop we guarantee that we will visit every node's predecessor.
+ * Proof:
+ * 1. every node's predecessor is in our attr map.
+ * 2. though the loop body may change a node's predecessor, it will only
+ * change it to be the node we are currently operating on, so with a
+ * while() loop we guarantee ourselves the chance to remove each node. */
while (color(&t, obj->next) == WHITE &&
group(&t, obj->next) != obj->next->group) {
- // Remove from old group.
+ upb_refcounted *leader;
+
+ /* Remove from old group. */
upb_refcounted *move = obj->next;
if (obj == move) {
- // Removing the last object from a group.
+ /* Removing the last object from a group. */
assert(*obj->group == obj->individual_count);
free(obj->group);
} else {
obj->next = move->next;
- // This may decrease to zero; we'll collect GRAY objects (if any) that
- // remain in the group in the third pass.
+ /* This may decrease to zero; we'll collect GRAY objects (if any) that
+ * remain in the group in the third pass. */
assert(*move->group >= move->individual_count);
*move->group -= move->individual_count;
}
- // Add to new group.
- upb_refcounted *leader = groupleader(&t, move);
+ /* Add to new group. */
+ leader = groupleader(&t, move);
if (move == leader) {
- // First object added to new group is its leader.
+ /* First object added to new group is its leader. */
move->group = group(&t, move);
move->next = move;
*move->group = move->individual_count;
} else {
- // Group already has at least one object in it.
+ /* Group already has at least one object in it. */
assert(leader->group == group(&t, move));
move->group = group(&t, move);
move->next = leader->next;
@@ -600,40 +627,42 @@ static bool freeze(upb_refcounted *const*roots, int n, upb_status *s,
}
}
- // Pass 2: GRAY and WHITE objects "obj" with ref2(to, obj) references must
- // increment count(to) if group(obj) != group(to) (which could now be the
- // case if "to" was just frozen).
- upb_inttable_begin(&i, &t.objattr);
- for(; !upb_inttable_done(&i); upb_inttable_next(&i)) {
- upb_refcounted *obj = (upb_refcounted*)upb_inttable_iter_key(&i);
+ /* Pass 2: GRAY and WHITE objects "obj" with ref2(to, obj) references must
+ * increment count(to) if group(obj) != group(to) (which could now be the
+ * case if "to" was just frozen). */
+ upb_inttable_begin(&iter, &t.objattr);
+ for(; !upb_inttable_done(&iter); upb_inttable_next(&iter)) {
+ upb_refcounted *obj = (upb_refcounted*)upb_inttable_iter_key(&iter);
visit(obj, crossref, &t);
}
- // Pass 3: GRAY objects are collected if their group's refcount dropped to
- // zero when we removed its white nodes. This can happen if they had only
- // been kept alive by virtue of sharing a group with an object that was just
- // frozen.
- //
- // It is important that we do this last, since the GRAY object's free()
- // function could call unref2() on just-frozen objects, which will decrement
- // refs that were added in pass 2.
- upb_inttable_begin(&i, &t.objattr);
- for(; !upb_inttable_done(&i); upb_inttable_next(&i)) {
- upb_refcounted *obj = (upb_refcounted*)upb_inttable_iter_key(&i);
+ /* Pass 3: GRAY objects are collected if their group's refcount dropped to
+ * zero when we removed its white nodes. This can happen if they had only
+ * been kept alive by virtue of sharing a group with an object that was just
+ * frozen.
+ *
+ * It is important that we do this last, since the GRAY object's free()
+ * function could call unref2() on just-frozen objects, which will decrement
+ * refs that were added in pass 2. */
+ upb_inttable_begin(&iter, &t.objattr);
+ for(; !upb_inttable_done(&iter); upb_inttable_next(&iter)) {
+ upb_refcounted *obj = (upb_refcounted*)upb_inttable_iter_key(&iter);
if (obj->group == NULL || *obj->group == 0) {
if (obj->group) {
- // We eagerly free() the group's count (since we can't easily determine
- // the group's remaining size it's the easiest way to ensure it gets
- // done).
+ upb_refcounted *o;
+
+ /* We eagerly free() the group's count (since we can't easily determine
+ * the group's remaining size it's the easiest way to ensure it gets
+ * done). */
free(obj->group);
- // Visit to release ref2's (done in a separate pass since release_ref2
- // depends on o->group being unmodified so it can test merged()).
- upb_refcounted *o = obj;
+ /* Visit to release ref2's (done in a separate pass since release_ref2
+ * depends on o->group being unmodified so it can test merged()). */
+ o = obj;
do { visit(o, release_ref2, NULL); } while ((o = o->next) != obj);
- // Mark "group" fields as NULL so we know to free the objects later in
- // this loop, but also don't try to delete the group twice.
+ /* Mark "group" fields as NULL so we know to free the objects later in
+ * this loop, but also don't try to delete the group twice. */
o = obj;
do { o->group = NULL; } while ((o = o->next) != obj);
}
@@ -643,9 +672,9 @@ static bool freeze(upb_refcounted *const*roots, int n, upb_status *s,
err4:
if (!ret) {
- upb_inttable_begin(&i, &t.groups);
- for(; !upb_inttable_done(&i); upb_inttable_next(&i))
- free(upb_value_getptr(upb_inttable_iter_value(&i)));
+ upb_inttable_begin(&iter, &t.groups);
+ for(; !upb_inttable_done(&iter); upb_inttable_next(&iter))
+ free(upb_value_getptr(upb_inttable_iter_value(&iter)));
}
upb_inttable_uninit(&t.groups);
err3:
@@ -664,21 +693,24 @@ static bool merged(const upb_refcounted *r, const upb_refcounted *r2) {
}
static void merge(upb_refcounted *r, upb_refcounted *from) {
+ upb_refcounted *base;
+ upb_refcounted *tmp;
+
if (merged(r, from)) return;
*r->group += *from->group;
free(from->group);
- upb_refcounted *base = from;
-
- // Set all refcount pointers in the "from" chain to the merged refcount.
- //
- // TODO(haberman): this linear algorithm can result in an overall O(n^2) bound
- // if the user continuously extends a group by one object. Prevent this by
- // using one of the techniques in this paper:
- // ftp://www.ncedc.org/outgoing/geomorph/dino/orals/p245-tarjan.pdf
+ base = from;
+
+ /* Set all refcount pointers in the "from" chain to the merged refcount.
+ *
+ * TODO(haberman): this linear algorithm can result in an overall O(n^2) bound
+ * if the user continuously extends a group by one object. Prevent this by
+ * using one of the techniques in this paper:
+ * ftp://www.ncedc.org/outgoing/geomorph/dino/orals/p245-tarjan.pdf */
do { from->group = r->group; } while ((from = from->next) != base);
- // Merge the two circularly linked lists by swapping their next pointers.
- upb_refcounted *tmp = r->next;
+ /* Merge the two circularly linked lists by swapping their next pointers. */
+ tmp = r->next;
r->next = base->next;
base->next = tmp;
}
@@ -698,11 +730,13 @@ static void release_ref2(const upb_refcounted *obj,
static void unref(const upb_refcounted *r) {
if (unrefgroup(r->group)) {
+ const upb_refcounted *o;
+
free(r->group);
- // In two passes, since release_ref2 needs a guarantee that any subobjs
- // are alive.
- const upb_refcounted *o = r;
+ /* In two passes, since release_ref2 needs a guarantee that any subobjs
+ * are alive. */
+ o = r;
do { visit(o, release_ref2, NULL); } while((o = o->next) != r);
o = r;
@@ -727,9 +761,9 @@ bool upb_refcounted_init(upb_refcounted *r,
const struct upb_refcounted_vtbl *vtbl,
const void *owner) {
#ifndef NDEBUG
- // Endianness check. This is unrelated to upb_refcounted, it's just a
- // convenient place to put the check that we can be assured will run for
- // basically every program using upb.
+ /* Endianness check. This is unrelated to upb_refcounted, it's just a
+ * convenient place to put the check that we can be assured will run for
+ * basically every program using upb. */
const int x = 1;
#ifdef UPB_BIG_ENDIAN
assert(*(char*)&x != 1);
@@ -772,7 +806,7 @@ void upb_refcounted_unref(const upb_refcounted *r, const void *owner) {
}
void upb_refcounted_ref2(const upb_refcounted *r, upb_refcounted *from) {
- assert(!from->is_frozen); // Non-const pointer implies this.
+ assert(!from->is_frozen); /* Non-const pointer implies this. */
track(r, from, true);
if (r->is_frozen) {
refgroup(r->group);
@@ -782,7 +816,7 @@ void upb_refcounted_ref2(const upb_refcounted *r, upb_refcounted *from) {
}
void upb_refcounted_unref2(const upb_refcounted *r, upb_refcounted *from) {
- assert(!from->is_frozen); // Non-const pointer implies this.
+ assert(!from->is_frozen); /* Non-const pointer implies this. */
untrack(r, from, true);
if (r->is_frozen) {
unref(r);
@@ -806,7 +840,8 @@ void upb_refcounted_checkref(const upb_refcounted *r, const void *owner) {
bool upb_refcounted_freeze(upb_refcounted *const*roots, int n, upb_status *s,
int maxdepth) {
- for (int i = 0; i < n; i++) {
+ int i;
+ for (i = 0; i < n; i++) {
assert(!roots[i]->is_frozen);
}
return freeze(roots, n, s, maxdepth);
generated by cgit on debian on lair
contact matthew@masot.net with questions or feedback