diff options
author | JSDurand <mmemmew@gmail.com> | 2023-08-10 11:15:53 +0800 |
---|---|---|
committer | JSDurand <mmemmew@gmail.com> | 2023-08-10 11:15:53 +0800 |
commit | 55f7068e91182dfe458fad41c3251388ea461569 (patch) | |
tree | 115d02d0d9d97f68e4fe24334228dbda82558491 /examples | |
parent | b2ca9a285f09d90259aaa7c4d09bb7335211020b (diff) |
My plan is to build a little interpreter for an esoteric programming
language as the most basic application of this package, before
embarking on some more ambitious applications.
Diffstat (limited to 'examples')
-rw-r--r-- | examples/bfinterp.rs | 84 |
1 files changed, 84 insertions, 0 deletions
diff --git a/examples/bfinterp.rs b/examples/bfinterp.rs new file mode 100644 index 0000000..d5a0c2a --- /dev/null +++ b/examples/bfinterp.rs @@ -0,0 +1,84 @@ +fn main() -> Result<(), Box<dyn std::error::Error>> { + let input_str = if let Some(string) = std::env::args().skip(1).next() { + string + } else { + return Err("An input program is required.".into()); + }; + + let input = tokenize(&input_str)?; + + let grammar_string = "brainfuck = *unit + +unit = \">\" / \"<\" / \"+\" / \"-\" / \",\" / \";\" / + \"[\" brainfuck \"]\""; + + let grammar: grammar::Grammar = grammar_string.parse().map_err(|err| format!("{err}"))?; + + println!("grammar = {grammar}"); + + let atom: chain::atom::DefaultAtom = + chain::atom::DefaultAtom::from_grammar(grammar).map_err(|err| format!("{err}"))?; + + // atom.print_viz("nfa.gv")?; + + use chain::Chain; + + let mut chain: chain::default::DefaultChain = + chain::default::DefaultChain::unit(atom).map_err(|err| format!("{err}"))?; + + // let input = [0, 1, 6, 2, 6, 3, 4, 6, 5, 2, 7, 4, 7, 3, 2, 7, 2]; + + for (index, token) in input.iter().copied().enumerate() { + chain.chain(token, index, false)?; + // if (5..=12).contains(&index) { + // chain.print_current(&format!("chain {index}.gv"))?; + // } + } + + let _extracted = chain.end_of_input(input.len(), input[input.len() - 1])?; + + // use graph::Graph; + + // extracted.print_viz("bf.gv")?; + + Ok(()) +} + +fn tokenize(input: &str) -> Result<Vec<usize>, String> { + let mut result: Vec<usize> = Vec::with_capacity(input.len()); + + for (index, c) in input.chars().enumerate() { + match c { + '>' => { + result.push(0); + } + '<' => { + result.push(1); + } + '+' => { + result.push(2); + } + '-' => { + result.push(3); + } + ',' => { + result.push(4); + } + ';' => { + result.push(5); + } + '[' => { + result.push(6); + } + ']' => { + result.push(7); + } + ' ' | '\t' | '\n' | '\r' => {} + _ => { + return Err(format!("Unknown character {c} at {index}")); + } + } + } + + Ok(result) +} |