diff --git a/demo/demo.rpy b/demo/demo.rpy index 69be8ba..f41016c 100644 --- a/demo/demo.rpy +++ b/demo/demo.rpy @@ -1,3 +1,12 @@ -"Bob sat on the bench." -"Bob" "Good morning!" -eat \ No newline at end of file +show black amogus # this is a comment +# this is a full line comment +what the heck +"this is a string with a # comment" +"this is a string over +multiple lines" +"this is \"escaped\"" +'this is a single quote string' +'this also has escaped \'quotes\'' +this is cool # comment + +huh \ No newline at end of file diff --git a/demo/src/main.rs b/demo/src/main.rs index 4f9e293..4e2d100 100644 --- a/demo/src/main.rs +++ b/demo/src/main.rs @@ -1,6 +1,5 @@ +use renrs; + fn main() { - let commands = renrs::parse_file("demo.rpy"); - for command in commands { - println!("{:?}", command); - } + renrs::parse("demo.rpy"); } diff --git a/src/lib.rs b/src/lib.rs index 775617b..d6fb698 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,126 +7,40 @@ use pest_derive::Parser; #[grammar = "rpy.pest"] struct RpyParser; -// Raw script tokens -#[derive(Debug)] -pub enum Token { - Keyword(String), - Str(String), - Array(Vec), -} - -impl Token { - fn print(&self) -> &str { - match &self { - Keyword(keyword) => keyword, - Str(_) => "String", - Array(_) => "Array", - } - } -} - -use Token::*; - -// Parsed script commands -#[derive(Debug)] -pub enum Command { - Say { - name: Option, - text: String, - } -} - -use Command::*; - -// Tokenize raw script string -fn tokenize(script: &str) -> Vec> { - let file = RpyParser::parse(Rule::file, script) - .expect("unsuccessful parse") - .next().unwrap(); - let mut lines = Vec::new(); +pub fn parse(file_path: &str) { + let unparsed_file = fs::read_to_string(file_path).expect("cannot find file"); + let file = RpyParser::parse(Rule::file, &unparsed_file) + .expect("unsuccessful parse") // unwrap the parse result + .next().unwrap(); // get and unwrap the `file` rule; never fails for line in file.into_inner() { - let mut tokens = Vec::new(); match line.as_rule() { Rule::line => { + println!("Line:"); for token in line.into_inner() { - tokens.push(parse_pair(token)); + match token.as_rule() { + Rule::token => { + let token = token.into_inner().next().unwrap(); + match token.as_rule() { + Rule::string => { + let string_data = token.into_inner().next().unwrap(); + let str = string_data.as_str(); + println!("string: {}", match string_data.as_rule() { + Rule::single_quote_string_data => str.replace("\\'", "'"), + Rule::double_quote_string_data => str.replace("\\\"", "\""), + _ => unreachable!(), + }); + }, + Rule::keyword => println!("keyword: {}", token.as_str()), + _ => unreachable!(), + }; + }, + _ => unreachable!(), + } } + println!() }, Rule::EOI => (), _ => unreachable!(), } - // TODO: For some a blank final line is always parsed - if tokens.len() > 0 { - lines.push(tokens); - } - } - lines -} - -// Parse raw pest data into Token -fn parse_pair(pair: pest::iterators::Pair) -> Token { - let token = pair.as_rule(); - match token { - Rule::token => {}, - _ => panic!("Not a token!"), - }; - let contents = pair.into_inner().next().unwrap(); - let contents_rule = contents.as_rule(); - match contents_rule { - Rule::string => { - let data = contents.into_inner().next().unwrap(); - Token::Str(match data.as_rule() { - Rule::single_quote_string_data => data.as_str().replace("\\'", "'"), - Rule::double_quote_string_data => data.as_str().replace("\\\"", "\""), - _ => unreachable!(), - }) - }, - Rule::array => { - let mut array = Vec::new(); - for token in contents.into_inner() { - array.push(parse_pair(token)); - } - Token::Array(array) - } - Rule::keyword => Token::Keyword(contents.as_str().to_owned()), - __ => unreachable!(), } } - -// Tokenize file -fn tokenize_file(file_path: &str) -> Vec> { - let unparsed_file = fs::read_to_string(file_path).expect("cannot find file"); - tokenize(&unparsed_file) -} - -fn describe_line(line: &Vec) -> String { - let mut description = "[".to_owned(); - let mut iter = line.iter(); - description.push_str(&format!("{}", iter.next().unwrap().print())); - for token in iter { - description.push_str(&format!(", {}", token.print())); - } - description.push_str("]"); - description -} - -// Parse file into commands -pub fn parse_file(file_path: &str) -> Vec { - let token_lines = tokenize_file(file_path); - let mut commands = Vec::new(); - for line in token_lines { - println!("{:?}", line); - commands.push(match line.as_slice() { - [Str(text)] => Say { - name: None, - text: text.to_owned(), - }, - [Str(name), Str(text)] => Say { - name: Some(name.to_owned()), - text: text.to_owned(), - }, - _ => panic!("Unknown command {}", describe_line(&line)), - }); - } - commands -} diff --git a/src/rpy.pest b/src/rpy.pest index 5600fa6..6ffbdbf 100644 --- a/src/rpy.pest +++ b/src/rpy.pest @@ -8,13 +8,11 @@ char = { !NEWLINE ~ ANY } // http://pest.rs/book/grammars/syntax.html#atomic inner = @{ char* } -token = { string | array | keyword } +token = { string | keyword } -// KEYWORDS // has to be atomic for no implicit separate (spaces) keyword = @{ (!(WHITESPACE | NEWLINE) ~ ANY)+ } -// STRING single_quote_string_data = @{ ( "\\'" // Escaped single quotes | (!"'" ~ ANY) @@ -28,12 +26,6 @@ string = ${ | ("\"" ~ double_quote_string_data ~ "\"") } -// ARRAY -array = { - "[" ~ "]" - | "[" ~ NEWLINE* ~ token ~ ("," ~ NEWLINE* ~ token)* ~ NEWLINE* ~ "]" -} - // comments are a # followed by // any number of non-newline characters COMMENT = _{ "#" ~ char* } @@ -41,4 +33,4 @@ COMMENT = _{ "#" ~ char* } // lines are comprised of a statement line = { token+ } -file = { SOI ~ (line ~ (NEWLINE+ ~ line)*)? ~ NEWLINE* ~ EOI } \ No newline at end of file +file = { SOI ~ line ~ (NEWLINE+ ~ line)* ~ NEWLINE* ~ EOI } \ No newline at end of file