3 sec laptop speed improvement in hash_map

This commit is contained in:
Nathan Braswell
2017-01-22 16:36:04 -05:00
parent ebb34d5ba3
commit beb50b8e25
4 changed files with 17 additions and 88 deletions

View File

@@ -1,6 +1,7 @@
import io:* import io:*
import mem:* import mem:*
import map:* import map:*
import hash_map:*
import stack:* import stack:*
import string:* import string:*
import util:* import util:*

View File

@@ -53,13 +53,13 @@ obj hash_map<T,U> (Object, Serializable) {
if (!data[(key_hash%data.size) cast int].contains_key(key)) { if (!data[(key_hash%data.size) cast int].contains_key(key)) {
size++ size++
if (size > data.size) { if (size > data.size) {
var new_data = vector::vector<map::map<T,U>>() var new_data.construct(size*2): vector::vector<map::map<T,U>>
for (var i = 0; i < size*2; i++;) for (var i = 0; i < size*2; i++;)
new_data.addEnd(map::map<T,U>()) new_data.addEnd(map::map<T,U>())
for_each(fun(key: T, value: U) { for_each(fun(key: T, value: U) {
new_data[(util::hash(key)%new_data.size) cast int].set(key, value) new_data[(util::hash(key)%new_data.size) cast int].set(key, value)
}) })
data = new_data data.swap(new_data)
} }
} }
data[(key_hash%data.size) cast int].set(key, value) data[(key_hash%data.size) cast int].set(key, value)
@@ -67,6 +67,9 @@ obj hash_map<T,U> (Object, Serializable) {
fun get(key: ref T): ref U { fun get(key: ref T): ref U {
return data[(util::hash(key)%data.size) cast int].get(key) return data[(util::hash(key)%data.size) cast int].get(key)
} }
fun get_ptr_or_null(key: ref T): *U {
return data[(util::hash(key)%data.size) cast int].get_ptr_or_null(key)
}
fun contains_key(key: ref T): bool { fun contains_key(key: ref T): bool {
return data[(util::hash(key)%data.size) cast int].contains_key(key) return data[(util::hash(key)%data.size) cast int].contains_key(key)
} }

View File

@@ -69,7 +69,6 @@ obj parser (Object) {
// if the zero state contains any reductions for state 0 and eof, then // if the zero state contains any reductions for state 0 and eof, then
// it must be reducing to the goal state // it must be reducing to the goal state
/*println("checking the bidness")*/
if (inputStr == "" && gram->parse_table.get(0, eof_symbol()).contains(action(action_type::reduce(), 0))) { if (inputStr == "" && gram->parse_table.get(0, eof_symbol()).contains(action(action_type::reduce(), 0))) {
println("Accept on no input for ") println("Accept on no input for ")
println(name) println(name)
@@ -80,8 +79,6 @@ obj parser (Object) {
lex.set_input(inputStr) lex.set_input(inputStr)
var current_symbol.construct(): symbol var current_symbol.construct(): symbol
for (current_symbol = lex.next(); current_symbol != eof_symbol() && current_symbol != invalid_symbol(); current_symbol = lex.next();) { for (current_symbol = lex.next(); current_symbol != eof_symbol() && current_symbol != invalid_symbol(); current_symbol = lex.next();) {
/*println("current_symbol is ")*/
/*println(current_symbol.to_string())*/
if (current_symbol != eof_symbol() && current_symbol != invalid_symbol()) if (current_symbol != eof_symbol() && current_symbol != invalid_symbol())
current_symbol.source = name current_symbol.source = name
input.addEnd(current_symbol) input.addEnd(current_symbol)
@@ -100,16 +97,9 @@ obj parser (Object) {
var null_symbol_tree = null<tree<symbol>>() var null_symbol_tree = null<tree<symbol>>()
gram->parse_table.get(0, input[0]).for_each(fun(act: action) { gram->parse_table.get(0, input[0]).for_each(fun(act: action) {
/*println("for each action")
act.print()
*/
if (act.act == action_type::push()) if (act.act == action_type::push())
to_shift.push(make_pair(v0, act.state_or_rule)) to_shift.push(make_pair(v0, act.state_or_rule))
/*else if (act.act == reduce && fully_reduces_to_null(gram.rules[act.state_or_rule])) {*/
else if (act.act == action_type::reduce() && act.rule_position == 0) { else if (act.act == action_type::reduce() && act.rule_position == 0) {
/*print("act == reduce() && == 0 Adding reduction from state: ")
println(v0->data)
*/
to_reduce.push(reduction(v0, gram->rules[act.state_or_rule].lhs, 0, null_symbol_tree, null_symbol_tree)) to_reduce.push(reduction(v0, gram->rules[act.state_or_rule].lhs, 0, null_symbol_tree, null_symbol_tree))
} }
}) })
@@ -128,19 +118,12 @@ obj parser (Object) {
return null<tree<symbol>>() return null<tree<symbol>>()
} }
SPPFStepNodes.clear() SPPFStepNodes.clear()
/*
print("to_reduce size: ")
println(to_reduce.size())
print("to_shift size: ")
println(to_shift.size())
*/
while (to_reduce.size()) while (to_reduce.size())
reducer(i) reducer(i)
shifter(i) shifter(i)
} }
var acc_state = gss.frontier_get_acc_state(input.size-1) var acc_state = gss.frontier_get_acc_state(input.size-1)
if (acc_state) { if (acc_state) {
/*println("ACCEPTED!")*/
return gss.get_edge(acc_state, v0) return gss.get_edge(acc_state, v0)
} }
@@ -157,37 +140,13 @@ obj parser (Object) {
} }
fun reducer(i: int) { fun reducer(i: int) {
var curr_reduction = to_reduce.pop() var curr_reduction = to_reduce.pop()
/*print("reducing from state: ")
println(curr_reduction.from->data)
print("curr_reduction.length (not length-1) is: ")
println(curr_reduction.length)
*/
gss.get_reachable_paths(curr_reduction.from, max(0, curr_reduction.length-1)). gss.get_reachable_paths(curr_reduction.from, max(0, curr_reduction.length-1)).
for_each(fun(path: ref vector<*tree<int>>) { for_each(fun(path: ref vector<*tree<int>>) {
/*println("in get_reachable_paths for_each loop")*/
var path_edges = range(path.size-1).map(fun(indx: int): *tree<symbol> { return gss.get_edge(path[indx], path[indx+1]);}).reverse() var path_edges = range(path.size-1).map(fun(indx: int): *tree<symbol> { return gss.get_edge(path[indx], path[indx+1]);}).reverse()
/*print("path ")
path.for_each(fun(part: *tree<int>) {
print(part->data)
print(" ")
})
println()
println("got path edges")
println("there are this many:")
println(path_edges.size)
*/
if (curr_reduction.length != 0) { if (curr_reduction.length != 0) {
path_edges.addEnd(curr_reduction.label) path_edges.addEnd(curr_reduction.label)
/*println("also adding the one from the reduction")
println(curr_reduction.label->data.to_string())
*/
} }
var curr_reached = path.last() var curr_reached = path.last()
/*print("checking shift for state ")
print(curr_reached->data)
print(" and ")
println(curr_reduction.sym.to_string())
*/
// if this is the Goal = a type reduction, then skip the actual reduction part. // if this is the Goal = a type reduction, then skip the actual reduction part.
// the shift lookup will fail, and likely other things, and this is our accept // the shift lookup will fail, and likely other things, and this is our accept
// criteria anyway // criteria anyway
@@ -197,7 +156,6 @@ obj parser (Object) {
return; return;
} }
var shift_to = gram->parse_table.get_shift(curr_reached->data, curr_reduction.sym).state_or_rule var shift_to = gram->parse_table.get_shift(curr_reached->data, curr_reduction.sym).state_or_rule
/*println("got shift to")*/
var new_label = null<tree<symbol>>() var new_label = null<tree<symbol>>()
if (curr_reduction.length == 0) { if (curr_reduction.length == 0) {
new_label = curr_reduction.nullable_parts new_label = curr_reduction.nullable_parts
@@ -222,16 +180,12 @@ obj parser (Object) {
// do non-null reductions // do non-null reductions
if (curr_reduction.length) { if (curr_reduction.length) {
gram->parse_table.get(shift_to, input[i]).for_each(fun(act: action) { gram->parse_table.get(shift_to, input[i]).for_each(fun(act: action) {
/*if (act.act == reduce && !fully_reduces_to_null(reduce_rule)) {*/
if (act.act == action_type::reduce() && act.rule_position != 0) { if (act.act == action_type::reduce() && act.rule_position != 0) {
var reduce_rule = gram->rules[act.state_or_rule] var reduce_rule = gram->rules[act.state_or_rule]
to_reduce.push(reduction(curr_reached, reduce_rule.lhs, to_reduce.push(reduction(curr_reached, reduce_rule.lhs,
act.rule_position, act.rule_position,
get_nullable_parts(reduce_rule), get_nullable_parts(reduce_rule),
new_label)) new_label))
/*print("(non null) Adding reduction from state: ")
println(curr_reached->data)
*/
} }
}) })
} }
@@ -251,16 +205,10 @@ obj parser (Object) {
to_reduce.push(reduction(shift_to_node, action_rule.lhs, 0, to_reduce.push(reduction(shift_to_node, action_rule.lhs, 0,
get_nullable_parts(action_rule), get_nullable_parts(action_rule),
null<tree<symbol>>() )) null<tree<symbol>>() ))
/*print("null reduces Adding reduction from state: ")
println(shift_to_node->data)
*/
} else if (curr_reduction.length != 0) { } else if (curr_reduction.length != 0) {
to_reduce.push(reduction(curr_reached, action_rule.lhs, act.rule_position, to_reduce.push(reduction(curr_reached, action_rule.lhs, act.rule_position,
get_nullable_parts(action_rule), get_nullable_parts(action_rule),
new_label )) new_label ))
/*print("null does not reduce Adding reduction from state: ")
println(curr_reached->data)
*/
} }
} }
}) })
@@ -270,73 +218,40 @@ obj parser (Object) {
}) })
} }
fun shifter(i: int) { fun shifter(i: int) {
/*println("shifting")*/
if (i >= input.size-1) if (i >= input.size-1)
return; // darn ambiguity return; // darn ambiguity
/*print("shifting on ")
println(input[i].to_string())
*/
var next_shifts = stack< pair<*tree<int>, int> >() var next_shifts = stack< pair<*tree<int>, int> >()
var new_label = new<tree<symbol>>()->construct(input[i]) var new_label = new<tree<symbol>>()->construct(input[i])
while (!to_shift.empty()) { while (!to_shift.empty()) {
/*println("to_shift not empty")*/
var shift = to_shift.pop() var shift = to_shift.pop()
/*println("post pop")*/
var shift_to_node = gss.in_frontier(i+1, shift.second) var shift_to_node = gss.in_frontier(i+1, shift.second)
/*println("post in_frontier")*/
if (shift_to_node) { if (shift_to_node) {
/*print("already in frontier ")
println(i+1)
*/
gss.add_edge(shift_to_node, shift.first, new_label) gss.add_edge(shift_to_node, shift.first, new_label)
gram->parse_table.get_reduces(shift.second, input[i+1]).for_each(fun(action: action) { gram->parse_table.get_reduces(shift.second, input[i+1]).for_each(fun(action: action) {
var reduce_rule = gram->rules[action.state_or_rule] var reduce_rule = gram->rules[action.state_or_rule]
/*if (!fully_reduces_to_null(reduce_rule)) {*/
if (action.rule_position != 0) { if (action.rule_position != 0) {
to_reduce.push(reduction(shift.first, reduce_rule.lhs, action.rule_position, to_reduce.push(reduction(shift.first, reduce_rule.lhs, action.rule_position,
get_nullable_parts(reduce_rule), get_nullable_parts(reduce_rule),
new_label )) new_label ))
/*print("if shift to node Adding reduction from state: ")
println(shift.first->data)
*/
} }
}) })
} else { } else {
/*print("adding to frontier ")
println(i+1)
*/
shift_to_node = gss.new_node(shift.second) shift_to_node = gss.new_node(shift.second)
gss.add_to_frontier(i+1, shift_to_node) gss.add_to_frontier(i+1, shift_to_node)
/*println("post add to frontier")*/
gss.add_edge(shift_to_node, shift.first, new_label) gss.add_edge(shift_to_node, shift.first, new_label)
/*println("post add edger")*/
gram->parse_table.get(shift.second, input[i+1]).for_each(fun(action: action) { gram->parse_table.get(shift.second, input[i+1]).for_each(fun(action: action) {
/*println("looking at an action")*/
if (action.act == action_type::push()) { if (action.act == action_type::push()) {
/*println("is push")*/
next_shifts.push(make_pair(shift_to_node, action.state_or_rule)) next_shifts.push(make_pair(shift_to_node, action.state_or_rule))
} else { } else {
/*println("is reduce")*/
var action_rule = gram->rules[action.state_or_rule] var action_rule = gram->rules[action.state_or_rule]
/*if (!fully_reduces_to_null(action_rule)) {*/
if (action.rule_position != 0) { if (action.rule_position != 0) {
/*println("does not reduce to null")*/
to_reduce.push(reduction(shift.first, action_rule.lhs, action.rule_position, to_reduce.push(reduction(shift.first, action_rule.lhs, action.rule_position,
get_nullable_parts(action_rule), get_nullable_parts(action_rule),
new_label )) new_label ))
/*print("not shift to, reduce, != 0 Adding reduction from state: ")
println(shift.first->data)
print("added ruduction rule+position: ")
println(action.rule_position)
*/
} else { } else {
/*println("does reduce to null")*/
to_reduce.push(reduction(shift_to_node, action_rule.lhs, 0, to_reduce.push(reduction(shift_to_node, action_rule.lhs, 0,
get_nullable_parts(action_rule), get_nullable_parts(action_rule),
null<tree<symbol>>() )) null<tree<symbol>>() ))
/*print("not shift to, reduce, == 0 Adding reduction from state: ")
println(shift_to_node->data)
*/
} }
} }
}) })
@@ -413,7 +328,6 @@ obj parser (Object) {
obj gss (Object) { obj gss (Object) {
var data: vector<vector<*tree<int>>> var data: vector<vector<*tree<int>>>
/*var edges: map< pair<*tree<int>, *tree<int>>, *tree<symbol> >*/
var edges: hash_map< pair<*tree<int>, *tree<int>>, *tree<symbol> > var edges: hash_map< pair<*tree<int>, *tree<int>>, *tree<symbol> >
fun construct(): *gss { fun construct(): *gss {

View File

@@ -43,6 +43,17 @@ obj vector<T> (Object, Serializable) {
memmove((data) cast *void, (old->data) cast *void, size * #sizeof<T>) memmove((data) cast *void, (old->data) cast *void, size * #sizeof<T>)
} }
} }
fun swap(other: ref vector<T>) {
var temp_data = data
var temp_size = size
var temp_available = available
data = other.data
size = other.size
available = other.available
other.data = temp_data
other.size = temp_size
other.available = temp_available
}
fun serialize(): vector<char> { fun serialize(): vector<char> {
var toRet = serialize(size) var toRet = serialize(size)
for (var i = 0; i < size; i++;) for (var i = 0; i < size; i++;)