From debcf6ad6cc5cdb362ad1655008ab0dd89d4d14a Mon Sep 17 00:00:00 2001 From: MCorange Date: Sat, 21 Dec 2024 05:10:03 +0200 Subject: [PATCH] MCLang, now with testing! --- Cargo.lock | 7 + Cargo.toml | 1 + src/bin/test/logger.rs | 63 +++ src/bin/test/main.rs | 208 +++++++++- src/main.rs | 6 +- src/tokeniser/mod.rs | 75 ++-- tests/parser/enumerations.exp | 10 + tests/parser/{enums.mcl => enumerations.mcl} | 0 tests/parser/expressions.exp | 10 + tests/parser/{expr.mcl => expressions.mcl} | 0 tests/parser/functions.exp | 10 + tests/parser/{fn.mcl => functions.mcl} | 0 tests/parser/if-statements.exp | 10 + tests/parser/{if.mcl => if-statements.mcl} | 0 tests/parser/loops.exp | 10 + tests/parser/structs.exp | 10 + tests/tokeniser/comments.exp | 1 + tests/tokeniser/comments.mcl | 7 + tests/tokeniser/delimiters.exp | 62 +++ tests/tokeniser/delimiters.mcl | 3 + tests/tokeniser/keywords.exp | 212 ++++++++++ tests/tokeniser/keywords.mcl | 21 + tests/tokeniser/literals.exp | 96 +++++ tests/tokeniser/literals.mcl | 8 + tests/tokeniser/punctuation.exp | 382 +++++++++++++++++++ tests/tokeniser/punctuation.mcl | 39 ++ 26 files changed, 1210 insertions(+), 41 deletions(-) create mode 100644 src/bin/test/logger.rs create mode 100644 tests/parser/enumerations.exp rename tests/parser/{enums.mcl => enumerations.mcl} (100%) create mode 100644 tests/parser/expressions.exp rename tests/parser/{expr.mcl => expressions.mcl} (100%) create mode 100644 tests/parser/functions.exp rename tests/parser/{fn.mcl => functions.mcl} (100%) create mode 100644 tests/parser/if-statements.exp rename tests/parser/{if.mcl => if-statements.mcl} (100%) create mode 100644 tests/parser/loops.exp create mode 100644 tests/parser/structs.exp create mode 100644 tests/tokeniser/comments.exp create mode 100644 tests/tokeniser/comments.mcl create mode 100644 tests/tokeniser/delimiters.exp create mode 100644 tests/tokeniser/delimiters.mcl create mode 100644 tests/tokeniser/keywords.exp create mode 100644 tests/tokeniser/keywords.mcl create mode 100644 tests/tokeniser/literals.exp create mode 100644 tests/tokeniser/literals.mcl create mode 100644 tests/tokeniser/punctuation.exp create mode 100644 tests/tokeniser/punctuation.mcl diff --git a/Cargo.lock b/Cargo.lock index 5487249..9b644b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -63,6 +63,12 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "camino" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" + [[package]] name = "clap" version = "4.5.23" @@ -132,6 +138,7 @@ name = "mclangc" version = "0.1.0" dependencies = [ "anyhow", + "camino", "clap", "lazy_static", "parse_int", diff --git a/Cargo.toml b/Cargo.toml index 30d4b11..aefdba0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" [dependencies] anyhow = "1.0.94" +camino = "1.1.9" clap = { version = "4.5.23", features = ["derive"] } lazy_static = "1.5.0" parse_int = "0.6.0" diff --git a/src/bin/test/logger.rs b/src/bin/test/logger.rs new file mode 100644 index 0000000..660740e --- /dev/null +++ b/src/bin/test/logger.rs @@ -0,0 +1,63 @@ +#[repr(u8)] +#[derive(Debug, Default)] +pub enum Level { + Off = 0, + Error, + Warn, + #[default] + Info, + Help, + Debug +} + +const C_RESET: &'static str = "\x1B[0m"; +const C_ERROR: &'static str = "\x1B[1;31m"; +const C_WARN: &'static str = "\x1B[1;33m"; +const C_INFO: &'static str = "\x1B[1;32m"; +const C_DEBUG: &'static str = "\x1B[1;35m"; +const C_HELP: &'static str = "\x1B[1;36m"; + +pub fn _log(level: Level, str: &str) { + match level { + Level::Off => return, + Level::Error => println!("{C_ERROR}error{C_RESET}: {str}"), + Level::Warn => println!("{C_WARN}warn{C_RESET}: {str}"), + Level::Info => println!("{C_INFO}info{C_RESET}: {str}"), + Level::Help => println!("{C_HELP}help{C_RESET}: {str}"), + Level::Debug => println!("{C_DEBUG}debug{C_RESET}: {str}"), + } +} + +#[macro_use] +pub mod log { + #[macro_export] + macro_rules! error { + ($($arg:tt)*) => { + crate::logger::_log(crate::logger::Level::Error, &format!($($arg)*)) + }; + } + #[macro_export] + macro_rules! warn { + ($($arg:tt)*) => { + crate::logger::_log(crate::logger::Level::Warn, &format!($($arg)*)) + }; + } + #[macro_export] + macro_rules! info { + ($($arg:tt)*) => { + crate::logger::_log(crate::logger::Level::Info, &format!($($arg)*)) + }; + } + #[macro_export] + macro_rules! help { + ($($arg:tt)*) => { + crate::logger::_log(crate::logger::Level::Help, &format!($($arg)*)) + }; + } + #[macro_export] + macro_rules! debug { + ($($arg:tt)*) => { + crate::logger::_log(crate::logger::Level::Debug, &format!($($arg)*)) + }; + } +} diff --git a/src/bin/test/main.rs b/src/bin/test/main.rs index f024d80..230423d 100644 --- a/src/bin/test/main.rs +++ b/src/bin/test/main.rs @@ -1,8 +1,19 @@ +use std::{collections::HashMap, ffi::OsStr, io::Write, os::unix::ffi::OsStrExt, path::{Path, PathBuf}, process::ExitCode}; +use anyhow::bail; +use camino::Utf8PathBuf; +use clap::Parser; +use mclangc; + +#[macro_use] +mod logger; /// Testing program for mclangc, taken inspiration from porth, which was made by tsoding :3 #[derive(Debug, clap::Parser)] #[command(version, about, long_about = None)] struct CliArgs { + /// Path to the test folder + #[arg(long, short, default_value="./tests")] + path: Utf8PathBuf, #[clap(subcommand)] cmd: CliCmd } @@ -15,9 +26,196 @@ pub enum CliCmd { Compile } - - -fn main() -> anyhow::Result<()> { - - Ok(()) +struct CollectedFiles { + tokeniser: HashMap, + parser: HashMap, +} + +enum ExpTyp { + Text((PathBuf, String)), + Path(PathBuf), +} + +impl ExpTyp { + pub fn path(&self) -> &Path { + match self { + Self::Text((p, _)) => p, + Self::Path(p) => p, + } + } +} + +fn collect_files_for_single_type(path: &Path) -> anyhow::Result> { + let mut files = HashMap::new(); + for file in path.read_dir()? { + let file = file?; + if file.file_type()?.is_file() { + if file.path().extension() != Some(OsStr::from_bytes(b"mcl")) { + continue; + } + let src = std::fs::read_to_string(file.path())?; + let exp_p = file.path().with_extension("exp"); + let name = file.path().with_extension("").file_name().unwrap().to_string_lossy().to_string(); + if exp_p.exists() { + let exp = std::fs::read_to_string(&exp_p)?; + files.insert(name, (src, ExpTyp::Text((exp_p, exp)))); + } else { + files.insert(name, (src, ExpTyp::Path(exp_p))); + } + } + } + Ok(files) +} + +fn collect_all_files(path: &Path) -> anyhow::Result { + let path = path.to_path_buf(); + let mut tkn = path.clone(); + tkn.push("tokeniser"); + let mut parser = path.clone(); + parser.push("parser"); + Ok(CollectedFiles { + tokeniser: collect_files_for_single_type(&tkn)?, + parser: collect_files_for_single_type(&parser)?, + }) +} + +fn test_tokeniser(cf: &CollectedFiles, compile: bool) -> anyhow::Result { + let mut err_count = 0; + for (name, (src, expected)) in &cf.tokeniser { + let tokens = match mclangc::tokeniser::tokenise(src, &format!("tokeniser/{name}.mcl")) { + Ok(v) => v, + Err(e) => { + crate::error!("Test tokeniser/{name} had an error: {e}"); + err_count += 1; + continue; + } + }; + if compile { + let path = expected.path(); + if path.exists() { + crate::info!("Test tokeniser/{name} already has a *.exp file, overwriting"); + } else { + crate::info!("Test tokeniser/{name} doesnt a *.exp file, creating"); + } + let mut fp = std::fs::File::options() + .write(true) + .truncate(true) + .create(true) + .open(path)?; + write!(fp, "{tokens:#?}")?; + } else { + let ExpTyp::Text((_, exp)) = expected else { + crate::warn!("Test tokeniser/{name} doesnt have a *.exp file, please make it by running 'test compile'"); + continue; + }; + if format!("{tokens:#?}") == *exp { + crate::info!("Test tokeniser/{name}: OK"); + } else { + crate::error!("Test tokeniser/{name}: FAIL"); + crate::debug!("Expected: {exp}"); + crate::debug!("Got: {tokens:#?}"); + err_count += 1; + } + } + } + + + Ok(err_count) +} + +fn test_parser(cf: &CollectedFiles, compile: bool) -> anyhow::Result { + let mut err_count = 0; + for (name, (src, expected)) in &cf.parser { + let tokens = match mclangc::tokeniser::tokenise(src, &format!("parser/{name}.mcl")) { + Ok(v) => v, + Err(e) => { + crate::error!("Test parser/{name} had an error: {e}"); + err_count += 1; + continue; + } + }; + let ast = match mclangc::parser::parse_program(tokens) { + Ok(v) => v, + Err(e) => { + crate::error!("Test parser/{name} had an error: {e}"); + err_count += 1; + continue; + } + }; + if compile { + let path = expected.path(); + if path.exists() { + crate::info!("Test parser/{name} already has a *.exp file, overwriting"); + } else { + crate::info!("Test parser/{name} doesnt a *.exp file, creating"); + } + let mut fp = std::fs::File::options() + .write(true) + .truncate(true) + .create(true) + .open(path)?; + write!(fp, "{ast:#?}")?; + } else { + let ExpTyp::Text((_, exp)) = expected else { + crate::warn!("Test parser/{name} doesnt have a *.exp file, please make it by running 'test compile'"); + continue; + }; + if format!("{ast:#?}") == *exp { + crate::info!("Test parser/{name}: OK"); + } else { + crate::error!("Test parser/{name}: FAIL"); + crate::debug!("Expected: {exp}"); + crate::debug!("Got: {ast:#?}"); + err_count += 1; + } + } + } + + Ok(err_count) +} + +fn test(cf: &CollectedFiles, compile: bool) -> anyhow::Result { + let mut err_count = test_tokeniser(&cf, compile)?; + err_count += test_parser(&cf, compile)?; + + Ok(err_count) +} + +fn main() -> ExitCode { + let cli = CliArgs::parse(); + let cf = match collect_all_files(cli.path.as_std_path()) { + Ok(v) => v, + Err(e) => { + crate::error!("Failed to read directory '{}', do you have permission to read it?: {e}", cli.path); + return ExitCode::FAILURE; + } + }; + let ec = match cli.cmd { + CliCmd::Run => { + match test(&cf, false) { + Ok(v) => v, + Err(e) => { + crate::error!("Had an error: {e}"); + return ExitCode::FAILURE; + } + } + } + CliCmd::Compile => { + match test(&cf, true) { + Ok(v) => v, + Err(e) => { + crate::error!("Had an error: {e}"); + return ExitCode::FAILURE; + } + } + } + }; + + if ec > 0 { + crate::error!("Testing FAILED, had {ec} errors"); + return ExitCode::FAILURE; + } else { + crate::info!("Testing SUCCEEDED, had 0 errors"); + } + ExitCode::SUCCESS } diff --git a/src/main.rs b/src/main.rs index 79ff7cd..077467a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -4,8 +4,8 @@ fn main() -> anyhow::Result<()> { let data = std::fs::read_to_string("test.mcl").unwrap(); - let tokens = mclangc::tokeniser::tokenise(&data)?; - let prog = parser::parse_program(tokens)?; - validator::validate_code(&prog); + let tokens = mclangc::tokeniser::tokenise(&data, "test.mcl")?; + let prog = mclangc::parser::parse_program(tokens)?; + mclangc::validator::validate_code(&prog); Ok(()) } diff --git a/src/tokeniser/mod.rs b/src/tokeniser/mod.rs index 45d0b0d..577c6c3 100644 --- a/src/tokeniser/mod.rs +++ b/src/tokeniser/mod.rs @@ -33,8 +33,8 @@ impl Token { } -pub fn tokenise(s: &str) -> anyhow::Result> { - let mut loc = Loc::default(); +pub fn tokenise(s: &str, file_p: &str) -> anyhow::Result> { + let mut loc = Loc::new(file_p, 1, 1); let mut tokens = Vec::new(); let chars: Vec<_> = s.chars().collect(); let mut chars = chars.iter().peekable(); @@ -70,44 +70,52 @@ pub fn tokenise(s: &str) -> anyhow::Result> { // tokens.push(Token::new(TokenType::Comment(Comment::Line(buf.clone())), &loc)); } '\n' => loc.inc_line(), - '"' | '\'' | - 'c' if *c != 'c' || chars.peek() == Some(&&'"') => { - let str_typ = *c; - let mut sc = *c; - if *c == 'c' { - sc = '"'; - chars.peek(); - } + '"' => { let mut last = '\0'; let mut buf = String::new(); while let Some(c) = chars.next_if(|v| **v != '\n') { loc.inc_col(); - if *c == sc && last != '\\' { + if *c == '"' && last != '\\' { break; } buf.push(*c); last = *c; } - - match str_typ { - '"' => { - tokens.push(Token::new(TokenType::string(&buf, false), &loc)); + tokens.push(Token::new(TokenType::string(&buf, false), &loc)); + } + '\'' => { + let mut last = '\0'; + let mut buf = String::new(); + while let Some(c) = chars.next_if(|v| **v != '\n') { + loc.inc_col(); + if *c == '\'' && last != '\\' { + break; } - 'c' => { - tokens.push(Token::new(TokenType::string(&buf, true), &loc)); - } - '\'' => { - let buf = buf - .replace("\\n", "\n") - .replace("\\r", "\r"); - if buf.len() > 1 { - lerror!(&loc, "Chars can only have 1 byte"); - bail!("") - } - tokens.push(Token::new(TokenType::char(buf.chars().nth(0).unwrap()), &loc)); - } - _ => unreachable!() + buf.push(*c); + last = *c; } + let buf = buf + .replace("\\n", "\n") + .replace("\\r", "\r"); + if buf.len() > 1 { + lerror!(&loc, "Chars can only have 1 byte"); + bail!("") + } + tokens.push(Token::new(TokenType::char(buf.chars().nth(0).unwrap()), &loc)); + } + 'c' if chars.peek() == Some(&&'"') => { + chars.next(); + let mut last = '\0'; + let mut buf = String::new(); + while let Some(c) = chars.next_if(|v| **v != '\n') { + loc.inc_col(); + if *c == '"' && last != '\\' { + break; + } + buf.push(*c); + last = *c; + } + tokens.push(Token::new(TokenType::string(&buf, true), &loc)); } 'a'..='z' | 'A'..='Z' | '_' => { let mut buf = String::new(); @@ -139,12 +147,13 @@ pub fn tokenise(s: &str) -> anyhow::Result> { 'o' => radix = 8, _ => (), } + }, None => { tokens.push(Token::new(TokenType::number(parse(&buf).unwrap(), radix, signed), &loc)); } } - while let Some(c) = chars.next_if(|v| matches!(**v, '0'..='9' | '.' | 'a'..='f' | 'A'..='F')) { + while let Some(c) = chars.next_if(|v| matches!(**v, '0'..='9' | '.' | 'a'..='f' | 'A'..='F' | 'x' | 'o')) { loc.inc_col(); buf.push(*c); } @@ -178,7 +187,7 @@ pub fn tokenise(s: &str) -> anyhow::Result> { } tokens.push(Token::new(TokenType::number(parse(&buf).unwrap(), radix, signed), &loc)); } - 16 => { + 16 => { if buf.strip_prefix("0x").expect("Unreachable") .chars().filter(|v| !matches!(v, '0'..='9' | 'a'..='f' | 'A'..='F')).collect::>().len() > 0 { lerror!(&loc, "Invalid character in hex number"); @@ -268,8 +277,8 @@ lazy_static::lazy_static!( ("|", TokenType::Punct(Punctuation::Or)), (">", TokenType::Punct(Punctuation::Gt)), ("<", TokenType::Punct(Punctuation::Lt)), - (">=", TokenType::Punct(Punctuation::Ge)), - ("<=", TokenType::Punct(Punctuation::Le)), + (">=", TokenType::Punct(Punctuation::Ge)), + ("<=", TokenType::Punct(Punctuation::Le)), ("^", TokenType::Punct(Punctuation::Xor)), ("+=", TokenType::Punct(Punctuation::AddEq)), ("-=", TokenType::Punct(Punctuation::SubEq)), diff --git a/tests/parser/enumerations.exp b/tests/parser/enumerations.exp new file mode 100644 index 0000000..fe2419b --- /dev/null +++ b/tests/parser/enumerations.exp @@ -0,0 +1,10 @@ +Program { + ast: Block( + [], + ), + structs: {}, + enums: {}, + types: {}, + functions: {}, + member_functions: {}, +} \ No newline at end of file diff --git a/tests/parser/enums.mcl b/tests/parser/enumerations.mcl similarity index 100% rename from tests/parser/enums.mcl rename to tests/parser/enumerations.mcl diff --git a/tests/parser/expressions.exp b/tests/parser/expressions.exp new file mode 100644 index 0000000..fe2419b --- /dev/null +++ b/tests/parser/expressions.exp @@ -0,0 +1,10 @@ +Program { + ast: Block( + [], + ), + structs: {}, + enums: {}, + types: {}, + functions: {}, + member_functions: {}, +} \ No newline at end of file diff --git a/tests/parser/expr.mcl b/tests/parser/expressions.mcl similarity index 100% rename from tests/parser/expr.mcl rename to tests/parser/expressions.mcl diff --git a/tests/parser/functions.exp b/tests/parser/functions.exp new file mode 100644 index 0000000..fe2419b --- /dev/null +++ b/tests/parser/functions.exp @@ -0,0 +1,10 @@ +Program { + ast: Block( + [], + ), + structs: {}, + enums: {}, + types: {}, + functions: {}, + member_functions: {}, +} \ No newline at end of file diff --git a/tests/parser/fn.mcl b/tests/parser/functions.mcl similarity index 100% rename from tests/parser/fn.mcl rename to tests/parser/functions.mcl diff --git a/tests/parser/if-statements.exp b/tests/parser/if-statements.exp new file mode 100644 index 0000000..fe2419b --- /dev/null +++ b/tests/parser/if-statements.exp @@ -0,0 +1,10 @@ +Program { + ast: Block( + [], + ), + structs: {}, + enums: {}, + types: {}, + functions: {}, + member_functions: {}, +} \ No newline at end of file diff --git a/tests/parser/if.mcl b/tests/parser/if-statements.mcl similarity index 100% rename from tests/parser/if.mcl rename to tests/parser/if-statements.mcl diff --git a/tests/parser/loops.exp b/tests/parser/loops.exp new file mode 100644 index 0000000..fe2419b --- /dev/null +++ b/tests/parser/loops.exp @@ -0,0 +1,10 @@ +Program { + ast: Block( + [], + ), + structs: {}, + enums: {}, + types: {}, + functions: {}, + member_functions: {}, +} \ No newline at end of file diff --git a/tests/parser/structs.exp b/tests/parser/structs.exp new file mode 100644 index 0000000..fe2419b --- /dev/null +++ b/tests/parser/structs.exp @@ -0,0 +1,10 @@ +Program { + ast: Block( + [], + ), + structs: {}, + enums: {}, + types: {}, + functions: {}, + member_functions: {}, +} \ No newline at end of file diff --git a/tests/tokeniser/comments.exp b/tests/tokeniser/comments.exp new file mode 100644 index 0000000..0637a08 --- /dev/null +++ b/tests/tokeniser/comments.exp @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/tests/tokeniser/comments.mcl b/tests/tokeniser/comments.mcl new file mode 100644 index 0000000..3a4d842 --- /dev/null +++ b/tests/tokeniser/comments.mcl @@ -0,0 +1,7 @@ + +// Hello, this is a single line comment + +/* + And this is a multiline comment, which is + useful for longer documentation +*/ diff --git a/tests/tokeniser/delimiters.exp b/tests/tokeniser/delimiters.exp new file mode 100644 index 0000000..1310a98 --- /dev/null +++ b/tests/tokeniser/delimiters.exp @@ -0,0 +1,62 @@ +[ + Token { + loc: Loc { + file: "tokeniser/delimiters.mcl", + line: 3, + col: 4, + }, + tt: Delim( + ParenR, + ), + }, + Token { + loc: Loc { + file: "tokeniser/delimiters.mcl", + line: 3, + col: 2, + }, + tt: Delim( + ParenL, + ), + }, + Token { + loc: Loc { + file: "tokeniser/delimiters.mcl", + line: 2, + col: 4, + }, + tt: Delim( + CurlyR, + ), + }, + Token { + loc: Loc { + file: "tokeniser/delimiters.mcl", + line: 2, + col: 2, + }, + tt: Delim( + CurlyL, + ), + }, + Token { + loc: Loc { + file: "tokeniser/delimiters.mcl", + line: 1, + col: 4, + }, + tt: Delim( + SquareR, + ), + }, + Token { + loc: Loc { + file: "tokeniser/delimiters.mcl", + line: 1, + col: 2, + }, + tt: Delim( + SquareL, + ), + }, +] \ No newline at end of file diff --git a/tests/tokeniser/delimiters.mcl b/tests/tokeniser/delimiters.mcl new file mode 100644 index 0000000..a593f9c --- /dev/null +++ b/tests/tokeniser/delimiters.mcl @@ -0,0 +1,3 @@ +[ ] +{ } +( ) diff --git a/tests/tokeniser/keywords.exp b/tests/tokeniser/keywords.exp new file mode 100644 index 0000000..ec967f2 --- /dev/null +++ b/tests/tokeniser/keywords.exp @@ -0,0 +1,212 @@ +[ + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 21, + col: 5, + }, + tt: Keyword( + Loop, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 20, + col: 3, + }, + tt: Keyword( + As, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 19, + col: 7, + }, + tt: Keyword( + Return, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 18, + col: 7, + }, + tt: Keyword( + Extern, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 17, + col: 8, + }, + tt: Keyword( + Include, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 16, + col: 6, + }, + tt: Keyword( + False, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 15, + col: 5, + }, + tt: Keyword( + True, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 14, + col: 7, + }, + tt: Keyword( + Static, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 13, + col: 4, + }, + tt: Keyword( + Mut, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 12, + col: 6, + }, + tt: Keyword( + Const, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 11, + col: 4, + }, + tt: Keyword( + Let, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 10, + col: 9, + }, + tt: Keyword( + Continue, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 9, + col: 6, + }, + tt: Keyword( + Break, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 8, + col: 4, + }, + tt: Keyword( + For, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 7, + col: 6, + }, + tt: Keyword( + While, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 6, + col: 5, + }, + tt: Keyword( + Type, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 5, + col: 5, + }, + tt: Keyword( + Enum, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 4, + col: 7, + }, + tt: Keyword( + Struct, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 3, + col: 5, + }, + tt: Keyword( + Else, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 2, + col: 3, + }, + tt: Keyword( + If, + ), + }, + Token { + loc: Loc { + file: "tokeniser/keywords.mcl", + line: 1, + col: 3, + }, + tt: Keyword( + Fn, + ), + }, +] \ No newline at end of file diff --git a/tests/tokeniser/keywords.mcl b/tests/tokeniser/keywords.mcl new file mode 100644 index 0000000..82f54b7 --- /dev/null +++ b/tests/tokeniser/keywords.mcl @@ -0,0 +1,21 @@ +fn +if +else +struct +enum +type +while +for +break +continue +let +const +mut +static +true +false +include +extern +return +as +loop diff --git a/tests/tokeniser/literals.exp b/tests/tokeniser/literals.exp new file mode 100644 index 0000000..a514b6e --- /dev/null +++ b/tests/tokeniser/literals.exp @@ -0,0 +1,96 @@ +[ + Token { + loc: Loc { + file: "tokeniser/literals.mcl", + line: 7, + col: 11, + }, + tt: Number( + Number { + val: 173, + base: 2, + signed: false, + }, + ), + }, + Token { + loc: Loc { + file: "tokeniser/literals.mcl", + line: 6, + col: 8, + }, + tt: Number( + Number { + val: 13633, + base: 8, + signed: false, + }, + ), + }, + Token { + loc: Loc { + file: "tokeniser/literals.mcl", + line: 5, + col: 9, + }, + tt: Number( + Number { + val: 16759299, + base: 16, + signed: false, + }, + ), + }, + Token { + loc: Loc { + file: "tokeniser/literals.mcl", + line: 4, + col: 3, + }, + tt: Number( + Number { + val: 21, + base: 10, + signed: false, + }, + ), + }, + Token { + loc: Loc { + file: "tokeniser/literals.mcl", + line: 3, + col: 22, + }, + tt: String( + TString { + val: "this is a c string!", + cstr: true, + }, + ), + }, + Token { + loc: Loc { + file: "tokeniser/literals.mcl", + line: 2, + col: 27, + }, + tt: String( + TString { + val: "this is a normal string!", + cstr: false, + }, + ), + }, + Token { + loc: Loc { + file: "tokeniser/literals.mcl", + line: 1, + col: 4, + }, + tt: Char( + Char( + 'c', + ), + ), + }, +] \ No newline at end of file diff --git a/tests/tokeniser/literals.mcl b/tests/tokeniser/literals.mcl new file mode 100644 index 0000000..611ae66 --- /dev/null +++ b/tests/tokeniser/literals.mcl @@ -0,0 +1,8 @@ +'c' +"this is a normal string!" +c"this is a c string!" +21 +0xFfbA03 +0o32501 +0b10101101 + diff --git a/tests/tokeniser/punctuation.exp b/tests/tokeniser/punctuation.exp new file mode 100644 index 0000000..1bce0b7 --- /dev/null +++ b/tests/tokeniser/punctuation.exp @@ -0,0 +1,382 @@ +[ + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 38, + col: 2, + }, + tt: Punct( + Pathaccess, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 37, + col: 2, + }, + tt: Punct( + Fieldaccess, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 36, + col: 2, + }, + tt: Punct( + Neq, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 35, + col: 2, + }, + tt: Punct( + EqEq, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 34, + col: 2, + }, + tt: Punct( + Eq, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 33, + col: 2, + }, + tt: Punct( + XorEq, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 32, + col: 2, + }, + tt: Punct( + OrEq, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 31, + col: 2, + }, + tt: Punct( + AndEq, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 30, + col: 2, + }, + tt: Punct( + ShrEq, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 29, + col: 2, + }, + tt: Punct( + ShlEq, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 28, + col: 2, + }, + tt: Punct( + ModEq, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 27, + col: 2, + }, + tt: Punct( + MulEq, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 26, + col: 2, + }, + tt: Punct( + DivEq, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 25, + col: 2, + }, + tt: Punct( + SubEq, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 24, + col: 2, + }, + tt: Punct( + AddEq, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 23, + col: 2, + }, + tt: Punct( + Xor, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 22, + col: 2, + }, + tt: Punct( + Le, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 21, + col: 2, + }, + tt: Punct( + Ge, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 20, + col: 2, + }, + tt: Punct( + Lt, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 19, + col: 2, + }, + tt: Punct( + Gt, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 18, + col: 2, + }, + tt: Punct( + Or, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 17, + col: 2, + }, + tt: Punct( + OrOr, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 16, + col: 2, + }, + tt: Punct( + AndAnd, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 15, + col: 2, + }, + tt: Punct( + Shr, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 14, + col: 2, + }, + tt: Punct( + Shl, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 13, + col: 2, + }, + tt: Punct( + Mod, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 12, + col: 2, + }, + tt: Punct( + Div, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 11, + col: 2, + }, + tt: Punct( + Not, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 10, + col: 2, + }, + tt: Punct( + Star, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 9, + col: 2, + }, + tt: Punct( + Ampersand, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 8, + col: 2, + }, + tt: Punct( + Comma, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 7, + col: 2, + }, + tt: Punct( + Minus, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 6, + col: 2, + }, + tt: Punct( + Plus, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 5, + col: 2, + }, + tt: Punct( + FatArrow, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 4, + col: 2, + }, + tt: Punct( + Arrow, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 3, + col: 2, + }, + tt: Punct( + Pathaccess, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 2, + col: 2, + }, + tt: Punct( + Colon, + ), + }, + Token { + loc: Loc { + file: "tokeniser/punctuation.mcl", + line: 1, + col: 2, + }, + tt: Punct( + Semi, + ), + }, +] \ No newline at end of file diff --git a/tests/tokeniser/punctuation.mcl b/tests/tokeniser/punctuation.mcl new file mode 100644 index 0000000..8ab5df8 --- /dev/null +++ b/tests/tokeniser/punctuation.mcl @@ -0,0 +1,39 @@ +; +: +:: +-> +=> ++ +- +, +& +* +! +/ +% +<< +>> +&& +|| +| +> +< +>= +<= +^ ++= +-= +/= +*= +%= +<<= +>>= +&= +|= +^= += +== +!= +. +:: +