summaryrefslogtreecommitdiff
path: root/src/upb_table.c
diff options
context:
space:
mode:
authorJoshua Haberman <joshua@reverberate.org>2011-04-01 15:40:06 -0700
committerJoshua Haberman <joshua@reverberate.org>2011-04-01 15:40:06 -0700
commit9eb4d695c49a85f7f72ad68c3c31affd61fef984 (patch)
tree79b7fde57e6f31a19405688a5f9e29e3f9cf7ab2 /src/upb_table.c
parent19517cc6f39871abf4a0705b49cfed9049ca6033 (diff)
First rough version of the JIT.
It can successfully parse SpeedMessage1. Preliminary results: 750MB/s on Core2 2.4GHz. This number is 2.5x proto2. This isn't apples-to-apples, because proto2 is parsing to a struct and we are just doing stream parsing, but for apps that are currently using proto2, this is the improvement they would see if they could move to stream-based processing. Unfortunately perf-regression-test.py is broken, and I'm not 100% sure why. It would be nice to fix it first (to ensure that there are no performance regressions for the table-based decoder) but I'm really impatient to get the JIT checked in.
Diffstat (limited to 'src/upb_table.c')
-rw-r--r--src/upb_table.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/src/upb_table.c b/src/upb_table.c
index b9b9824..a754097 100644
--- a/src/upb_table.c
+++ b/src/upb_table.c
@@ -102,6 +102,7 @@ static void intinsert(upb_inttable *t, upb_inttable_key_t key, void *val) {
upb_inttable_value *table_val;
if (_upb_inttable_isarrkey(t, key)) {
table_val = UPB_INDEX(t->array, key, upb_table_valuesize(&t->t));
+ t->array_count++;
//printf("Inserting key %d to Array part! %p\n", key, table_val);
} else {
t->t.count++;
@@ -152,8 +153,8 @@ static void intinsert(upb_inttable *t, upb_inttable_key_t key, void *val) {
static void upb_inttable_insertall(upb_inttable *dst, upb_inttable *src) {
for(upb_inttable_iter i = upb_inttable_begin(src); !upb_inttable_done(i);
i = upb_inttable_next(src, i)) {
- //printf("load check: %d %d\n", upb_inttable_count(dst), upb_inttable_hashtablesize(dst));
- assert((double)(upb_inttable_count(dst)) /
+ //printf("load check: %d %d\n", upb_table_count(&dst->t), upb_inttable_hashtablesize(dst));
+ assert((double)(upb_table_count(&dst->t)) /
upb_inttable_hashtablesize(dst) <= MAX_LOAD);
intinsert(dst, upb_inttable_iter_key(i), upb_inttable_iter_value(i));
}
@@ -209,6 +210,7 @@ void upb_inttable_compact(upb_inttable *t) {
}
upb_inttable new_table;
int hash_size = (upb_inttable_count(t) - array_count + 1) / MAX_LOAD;
+ //printf("array_count: %d, array_size: %d, hash_size: %d, table size: %d\n", array_count, array_size, hash_size, upb_inttable_count(t));
upb_inttable_sizedinit(&new_table, array_size, hash_size,
upb_table_valuesize(&t->t));
//printf("For %d things, using array size=%d, hash_size = %d\n", upb_inttable_count(t), array_size, hash_size);
generated by cgit on debian on lair
contact matthew@masot.net with questions or feedback