generated from ElnuDev/rust-project
Compare commits
2 commits
359c46d776
...
24e33aa550
Author | SHA1 | Date | |
---|---|---|---|
24e33aa550 | |||
46acbce60e |
7 changed files with 62 additions and 33 deletions
8
.vscode/settings.json
vendored
8
.vscode/settings.json
vendored
|
@ -1,5 +1,11 @@
|
||||||
{
|
{
|
||||||
"rust-analyzer.linkedProjects": [
|
"rust-analyzer.linkedProjects": [
|
||||||
"./demo/Cargo.toml"
|
"./demo/Cargo.toml"
|
||||||
]
|
],
|
||||||
|
"files.exclude": {
|
||||||
|
"**/*.rpyc": true,
|
||||||
|
"**/*.rpa": true,
|
||||||
|
"**/*.rpymc": true,
|
||||||
|
"**/cache/": true
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -1,5 +0,0 @@
|
||||||
65279,1179403647,1463895090
|
|
||||||
3.1415927,2.7182817,1.618034
|
|
||||||
-40,-273.15
|
|
||||||
13,42
|
|
||||||
65537
|
|
|
9
demo/demo.rpy
Normal file
9
demo/demo.rpy
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
show black amogus # this is a comment
|
||||||
|
# this is a full line comment
|
||||||
|
what the heck
|
||||||
|
"this is a string with a # comment"
|
||||||
|
"this is a string over
|
||||||
|
multiple lines"
|
||||||
|
this is cool # comment
|
||||||
|
|
||||||
|
huh
|
|
@ -1,5 +1,5 @@
|
||||||
use renrs;
|
use renrs;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
renrs::parse("demo.csv");
|
renrs::parse("demo.rpy");
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +0,0 @@
|
||||||
// + indicates one or more times
|
|
||||||
field = { (ASCII_DIGIT | "." | "-")+ }
|
|
||||||
// ~ indicates directly followed by
|
|
||||||
// * indicates zero or more times (optional)
|
|
||||||
record = { field ~ ("," ~ field)* }
|
|
||||||
// SOI - start of input
|
|
||||||
// END - end of input
|
|
||||||
// There may be trailing newlines at the end
|
|
||||||
file = { SOI ~ (record ~ ("\r\n" | "\n"))* ~ "\n"* ~ EOI }
|
|
34
src/lib.rs
34
src/lib.rs
|
@ -4,32 +4,32 @@ use pest::Parser;
|
||||||
use pest_derive::Parser;
|
use pest_derive::Parser;
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[grammar = "csv.pest"]
|
#[grammar = "rpy.pest"]
|
||||||
struct CSVParser;
|
struct RpyParser;
|
||||||
|
|
||||||
pub fn parse(file_path: &str) {
|
pub fn parse(file_path: &str) {
|
||||||
let unparsed_file = fs::read_to_string(file_path).expect("cannot find file");
|
let unparsed_file = fs::read_to_string(file_path).expect("cannot find file");
|
||||||
let file = CSVParser::parse(Rule::file, &unparsed_file)
|
let file = RpyParser::parse(Rule::file, &unparsed_file)
|
||||||
.expect("unsuccessful parse") // unwrap the parse result
|
.expect("unsuccessful parse") // unwrap the parse result
|
||||||
.next().unwrap(); // get and unwrap the `file` rule; never fails
|
.next().unwrap(); // get and unwrap the `file` rule; never fails
|
||||||
|
for line in file.into_inner() {
|
||||||
let mut field_sum = 0.0;
|
match line.as_rule() {
|
||||||
let mut record_count: u64 = 0;
|
Rule::line => {
|
||||||
|
println!("Line:");
|
||||||
for record in file.into_inner() {
|
for token in line.into_inner() {
|
||||||
match record.as_rule() {
|
match token.as_rule() {
|
||||||
Rule::record => {
|
Rule::token => {
|
||||||
record_count += 1;
|
println!("{}", token.as_str());
|
||||||
|
},
|
||||||
for field in record.into_inner() {
|
_ => {
|
||||||
field_sum += field.as_str().parse::<f64>().unwrap();
|
println!("{}", token.as_str());
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!()
|
||||||
},
|
},
|
||||||
Rule::EOI => (),
|
Rule::EOI => (),
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
println!("Sum of fields: {field_sum}");
|
|
||||||
println!("Number of records: {record_count}");
|
|
||||||
}
|
}
|
||||||
|
|
28
src/rpy.pest
Normal file
28
src/rpy.pest
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
// underscores mark are silent rules, are ignored
|
||||||
|
WHITESPACE = _{ " " }
|
||||||
|
|
||||||
|
// characters are anything but newlines
|
||||||
|
char = { !NEWLINE ~ ANY }
|
||||||
|
|
||||||
|
// token definition
|
||||||
|
// http://pest.rs/book/grammars/syntax.html#atomic
|
||||||
|
inner = @{ char* }
|
||||||
|
|
||||||
|
token = { string | keyword }
|
||||||
|
|
||||||
|
// has to be atomic for no implicit separate (spaces)
|
||||||
|
keyword = @{ (!(WHITESPACE | NEWLINE) ~ ANY)+ }
|
||||||
|
|
||||||
|
// strings cannot contain quotes
|
||||||
|
// TODO: escaped quotes
|
||||||
|
string_data = @{ (!"\"" ~ ANY)* }
|
||||||
|
string = ${ "\"" ~ string_data ~ "\"" }
|
||||||
|
|
||||||
|
// comments are a # followed by
|
||||||
|
// any number of non-newline characters
|
||||||
|
COMMENT = _{ "#" ~ char* }
|
||||||
|
|
||||||
|
// lines are comprised of a statement
|
||||||
|
line = { token+ }
|
||||||
|
|
||||||
|
file = { SOI ~ line ~ (NEWLINE+ ~ line)* ~ NEWLINE* ~ EOI }
|
Loading…
Add table
Reference in a new issue