2015-07-04 03:21:36 -04:00
|
|
|
import io:*
|
|
|
|
|
import grammer:*
|
2015-08-06 02:42:40 -04:00
|
|
|
import parser:*
|
2015-07-06 13:48:19 -04:00
|
|
|
import lexer:*
|
2015-07-04 03:21:36 -04:00
|
|
|
import string:*
|
2015-07-06 13:48:19 -04:00
|
|
|
import util:*
|
2015-07-13 12:16:30 -04:00
|
|
|
import symbol:*
|
2015-08-06 02:42:40 -04:00
|
|
|
import tree:*
|
2015-08-26 03:45:34 -04:00
|
|
|
import serialize:*
|
2015-07-04 03:21:36 -04:00
|
|
|
|
|
|
|
|
fun main():int {
|
2015-12-26 01:52:29 +00:00
|
|
|
|
2015-08-26 03:45:34 -04:00
|
|
|
var a.construct(): grammer
|
2015-07-13 12:16:30 -04:00
|
|
|
|
2015-12-26 01:52:29 +00:00
|
|
|
var file_name = string("../krakenGrammer.kgm")
|
|
|
|
|
/*var file_name = string("../simplifiedKrakenGrammer.kgm")*/
|
2015-08-26 03:45:34 -04:00
|
|
|
/*var file_name = string("grammer.kgm")*/
|
|
|
|
|
|
|
|
|
|
var compiled_name = file_name + string(".comp_new")
|
|
|
|
|
var file_contents = read_file(file_name)
|
|
|
|
|
var loaded_and_valid = false
|
|
|
|
|
|
|
|
|
|
if (file_exists(compiled_name)) {
|
|
|
|
|
println("cached file exists")
|
|
|
|
|
var pos = 0
|
|
|
|
|
var binary = read_file_binary(compiled_name)
|
|
|
|
|
println("read file!")
|
|
|
|
|
var cached_contents = string()
|
|
|
|
|
println("made tmp string!")
|
|
|
|
|
unpack(cached_contents, pos) = unserialize<string>(binary, pos)
|
|
|
|
|
println("unserialized the string!")
|
|
|
|
|
if (cached_contents == file_contents) {
|
|
|
|
|
println("loaded_and_valid, using cached version!")
|
|
|
|
|
loaded_and_valid = true
|
|
|
|
|
unpack(a, pos) = unserialize<grammer>(binary, pos)
|
|
|
|
|
println("finished unserializeing!!")
|
|
|
|
|
} else {
|
|
|
|
|
println("file contents do not match:")
|
|
|
|
|
println("CACHED:")
|
|
|
|
|
println(cached_contents)
|
|
|
|
|
println("REAL:")
|
|
|
|
|
println(file_contents)
|
|
|
|
|
println("END")
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
println("cached file does not exist")
|
|
|
|
|
}
|
|
|
|
|
if (!loaded_and_valid) {
|
|
|
|
|
println("Not loaded_and_valid, re-generating and writing out")
|
|
|
|
|
a = load_grammer(file_contents)
|
|
|
|
|
println("grammer loaded, calculate_first_set")
|
|
|
|
|
a.calculate_first_set()
|
|
|
|
|
println("grammer loaded, calculate_state_automaton")
|
|
|
|
|
a.calculate_state_automaton()
|
|
|
|
|
println("calculated, writing out")
|
|
|
|
|
write_file_binary(compiled_name, serialize(file_contents) + serialize(a))
|
|
|
|
|
println("done writing")
|
|
|
|
|
}
|
2015-12-07 13:43:22 -05:00
|
|
|
/*println(a.to_string())*/
|
2015-07-15 00:53:53 -04:00
|
|
|
var doFirstSet = fun() {
|
|
|
|
|
println("///////////////////START FIRST SET/////////////")
|
|
|
|
|
println("//TERMINALS//")
|
|
|
|
|
a.terminals.for_each( fun(terminal: util::pair<symbol::symbol, regex::regex>) {
|
|
|
|
|
var set_str = string::string("{ ")
|
|
|
|
|
a.first_set_map[terminal.first].for_each( fun(sym: symbol::symbol) {
|
|
|
|
|
set_str += sym.to_string() + ", "
|
|
|
|
|
})
|
|
|
|
|
set_str += "}"
|
|
|
|
|
print(terminal.first.to_string() + " first: " + set_str + "\n")
|
2015-07-13 12:16:30 -04:00
|
|
|
})
|
2015-07-15 00:53:53 -04:00
|
|
|
println("//NON TERMINALS//")
|
|
|
|
|
a.non_terminals.for_each( fun(non_terminal: symbol::symbol) {
|
|
|
|
|
var set_str = string::string("{ ")
|
|
|
|
|
a.first_set_map[non_terminal].for_each( fun(sym: symbol::symbol) {
|
|
|
|
|
set_str += sym.to_string() + ", "
|
|
|
|
|
})
|
|
|
|
|
set_str += "}"
|
|
|
|
|
print(non_terminal.to_string() + " first: " + set_str + "\n")
|
|
|
|
|
println()
|
2015-07-13 12:16:30 -04:00
|
|
|
})
|
2015-07-15 00:53:53 -04:00
|
|
|
println("///////////////////END FIRST SET/////////////")
|
|
|
|
|
}
|
2015-08-26 03:45:34 -04:00
|
|
|
/*doFirstSet()*/
|
2015-07-13 12:16:30 -04:00
|
|
|
|
2015-12-07 13:43:22 -05:00
|
|
|
println(a.to_string())
|
|
|
|
|
a.parse_table.print_string()
|
2015-07-13 12:16:30 -04:00
|
|
|
var lex = lexer(a.terminals)
|
|
|
|
|
|
2015-12-07 13:43:22 -05:00
|
|
|
lex.set_input(read_file(string("to_parse.krak")))
|
2015-07-15 13:56:57 -04:00
|
|
|
/*lex.set_input(string("ccdahas spacedhas*/
|
|
|
|
|
/*returndaaaaaaaaaaaaaa"))*/
|
2015-08-04 14:57:56 -04:00
|
|
|
//lex.set_input(string("hibyed"))
|
2015-07-06 13:48:19 -04:00
|
|
|
println("woo lexing:")
|
2015-12-07 13:43:22 -05:00
|
|
|
range(8).for_each(fun(i: int) { println(lex.next().to_string()); } )
|
2015-07-07 00:46:00 -04:00
|
|
|
/*range(80).for_each(fun(i: int) { println(lex.next().to_string()); } )*/
|
2015-08-26 03:45:34 -04:00
|
|
|
|
|
|
|
|
|
2015-08-06 02:42:40 -04:00
|
|
|
var parse.construct(a): parser
|
2015-08-13 01:48:35 -04:00
|
|
|
/*var result = parse.parse_input(string("a"), string("fun name"))*/
|
2015-11-25 15:35:06 -05:00
|
|
|
/*var result = parse.parse_input(read_file(string("test_adt.krak")), string("fun name"))*/
|
|
|
|
|
var result = parse.parse_input(read_file(string("to_parse.krak")), string("fun name"))
|
2015-08-12 23:15:41 -04:00
|
|
|
/*var result = parse.parse_input(string("inport a;"), string("fun name"))*/
|
2015-08-12 17:41:26 -04:00
|
|
|
/*var result = parse.parse_input(string("fun main():int { return 0; }"), string("fun name"))*/
|
2015-08-11 01:07:16 -04:00
|
|
|
/*var result = parse.parse_input(string("ad"), string("fun name"))*/
|
|
|
|
|
/*var result = parse.parse_input(string("hibyed"), string("fun name"))*/
|
2015-08-12 23:15:41 -04:00
|
|
|
/*var result = parse.parse_input(string("hmmhmmend"), string("fun name"))*/
|
2015-08-09 04:18:31 -04:00
|
|
|
/*var result = parse.parse_input(string("hid"), string("fun name"))*/
|
|
|
|
|
println("the tree")
|
|
|
|
|
println(syntax_tree_to_dot(result))
|
2015-08-11 01:07:16 -04:00
|
|
|
write_file(string("syntax_tree.dot"), syntax_tree_to_dot(result))
|
2015-08-06 02:42:40 -04:00
|
|
|
/*var parse.construct(): parser*/
|
2015-07-04 03:21:36 -04:00
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|