feat(epic): implement 1-1-lexer-and-tokenizer

Story: _bmad-output/implementation-artifacts/1-1-lexer-and-tokenizer.md
This commit is contained in:
2026-03-16 22:46:15 -04:00
parent bfe0e01254
commit baee33b2f1
7 changed files with 1006 additions and 1 deletions

733
src/lexer.rs Normal file
View File

@@ -0,0 +1,733 @@
use crate::token::{Operator, Span, Token, TokenKind};
/// A line-oriented lexer for CalcPad expressions.
///
/// The lexer scans a single line of input and produces a vector of [`Token`]s,
/// each annotated with its byte [`Span`] within the input.
pub struct Lexer<'a> {
/// The input string being scanned.
input: &'a str,
/// The input as a byte slice for fast access.
bytes: &'a [u8],
/// Current byte offset into the input.
pos: usize,
/// A pending token to emit on the next call to `scan_token`.
pending: Option<Token>,
}
impl<'a> Lexer<'a> {
/// Create a new lexer for the given input line.
pub fn new(input: &'a str) -> Self {
Self {
input,
bytes: input.as_bytes(),
pos: 0,
pending: None,
}
}
/// Tokenize the entire input line into a token stream.
///
/// If the input contains no calculable tokens, the entire line is returned
/// as a single [`TokenKind::Text`] token.
pub fn tokenize(&mut self) -> Vec<Token> {
// Empty or whitespace-only input produces no tokens.
if self.input.trim().is_empty() {
return Vec::new();
}
// First check for comment at the start (possibly after whitespace).
let trimmed = self.input.trim_start();
let trimmed_start = self.input.len() - trimmed.len();
if trimmed.starts_with("//") {
let comment_text = self.input[trimmed_start + 2..].to_string();
return vec![Token::new(
TokenKind::Comment(comment_text),
Span::new(trimmed_start, self.input.len()),
)];
}
let mut tokens = Vec::new();
loop {
// Emit any pending token first (e.g., Unit after Number).
if let Some(tok) = self.pending.take() {
tokens.push(tok);
continue;
}
if self.pos >= self.bytes.len() {
break;
}
self.skip_whitespace();
if self.pos >= self.bytes.len() {
break;
}
if let Some(tok) = self.scan_token() {
tokens.push(tok);
}
}
// If no calculable tokens were produced, return the whole line as Text.
if tokens.is_empty() && !self.input.trim().is_empty() {
return vec![Token::new(
TokenKind::Text(self.input.to_string()),
Span::new(0, self.input.len()),
)];
}
// Check if all tokens are identifiers/keywords with no numbers or operators —
// that's plain text.
let has_calculable = tokens.iter().any(|t| {
matches!(
t.kind,
TokenKind::Number(_)
| TokenKind::Operator(_)
| TokenKind::Assign
| TokenKind::Percent(_)
| TokenKind::CurrencySymbol(_)
| TokenKind::Unit(_)
| TokenKind::LParen
| TokenKind::RParen
)
});
if !has_calculable {
return vec![Token::new(
TokenKind::Text(self.input.to_string()),
Span::new(0, self.input.len()),
)];
}
tokens
}
/// Skip ASCII whitespace characters.
fn skip_whitespace(&mut self) {
while self.pos < self.bytes.len() && self.bytes[self.pos].is_ascii_whitespace() {
self.pos += 1;
}
}
/// Peek at the current byte without advancing.
fn peek(&self) -> Option<u8> {
self.bytes.get(self.pos).copied()
}
/// Peek at the byte at offset `n` from current position.
fn peek_ahead(&self, n: usize) -> Option<u8> {
self.bytes.get(self.pos + n).copied()
}
/// Advance by one byte and return it.
fn advance(&mut self) -> Option<u8> {
if self.pos < self.bytes.len() {
let b = self.bytes[self.pos];
self.pos += 1;
Some(b)
} else {
None
}
}
/// Check if the remaining input starting at `pos` matches the given string
/// (case-insensitive) followed by a word boundary.
fn matches_word(&self, word: &str) -> bool {
let remaining = &self.input[self.pos..];
if remaining.len() < word.len() {
return false;
}
if !remaining[..word.len()].eq_ignore_ascii_case(word) {
return false;
}
// Must be at end of input or followed by a non-alphanumeric character.
if remaining.len() == word.len() {
return true;
}
let next = remaining.as_bytes()[word.len()];
!next.is_ascii_alphanumeric() && next != b'_'
}
/// Scan a single token from the current position.
fn scan_token(&mut self) -> Option<Token> {
let b = self.peek()?;
// Comment: // ...
if b == b'/' && self.peek_ahead(1) == Some(b'/') {
return Some(self.scan_comment());
}
// Currency symbols (multi-byte UTF-8 or ASCII $)
if let Some(tok) = self.try_scan_currency() {
return Some(tok);
}
// Numbers
if b.is_ascii_digit() || (b == b'.' && self.peek_ahead(1).is_some_and(|c| c.is_ascii_digit())) {
return Some(self.scan_number());
}
// Operators and punctuation
match b {
b'+' => return Some(self.single_char_token(TokenKind::Operator(Operator::Plus))),
b'-' => return Some(self.single_char_token(TokenKind::Operator(Operator::Minus))),
b'*' => return Some(self.single_char_token(TokenKind::Operator(Operator::Star))),
b'/' => return Some(self.single_char_token(TokenKind::Operator(Operator::Slash))),
b'^' => return Some(self.single_char_token(TokenKind::Operator(Operator::Caret))),
b'(' => return Some(self.single_char_token(TokenKind::LParen)),
b')' => return Some(self.single_char_token(TokenKind::RParen)),
b'=' => return Some(self.single_char_token(TokenKind::Assign)),
_ => {}
}
// Alphabetic — could be natural language operator, keyword, unit, or identifier
if b.is_ascii_alphabetic() || b == b'_' {
return Some(self.scan_word());
}
// Percent sign alone (rare — usually after a number, but handle it)
if b == b'%' {
return Some(self.single_char_token(TokenKind::Operator(Operator::Percent)));
}
// Unknown character — skip it
self.advance();
None
}
/// Consume a single byte and produce a token.
fn single_char_token(&mut self, kind: TokenKind) -> Token {
let start = self.pos;
self.advance();
Token::new(kind, Span::new(start, self.pos))
}
/// Scan a `//` comment to end of line.
fn scan_comment(&mut self) -> Token {
let start = self.pos;
self.pos += 2; // skip //
let text = self.input[self.pos..].to_string();
self.pos = self.bytes.len();
Token::new(TokenKind::Comment(text), Span::new(start, self.pos))
}
/// Try to scan a currency symbol. Returns `None` if current position is not a currency symbol.
fn try_scan_currency(&mut self) -> Option<Token> {
let start = self.pos;
let remaining = &self.input[self.pos..];
// Multi-char: R$
if remaining.starts_with("R$") {
self.pos += 2;
return Some(Token::new(
TokenKind::CurrencySymbol("R$".to_string()),
Span::new(start, self.pos),
));
}
// Single ASCII: $
if remaining.starts_with('$') {
self.pos += 1;
return Some(Token::new(
TokenKind::CurrencySymbol("$".to_string()),
Span::new(start, self.pos),
));
}
// Multi-byte UTF-8 currency symbols: €, £, ¥
for sym in &["", "£", "¥"] {
if remaining.starts_with(sym) {
self.pos += sym.len();
return Some(Token::new(
TokenKind::CurrencySymbol(sym.to_string()),
Span::new(start, self.pos),
));
}
}
None
}
/// Scan a numeric literal, including decimals, scientific notation, SI suffixes,
/// and percent suffix.
fn scan_number(&mut self) -> Token {
let start = self.pos;
// Consume digits
self.consume_digits();
// Decimal point
if self.peek() == Some(b'.') && self.peek_ahead(1).is_some_and(|c| c.is_ascii_digit()) {
self.advance(); // consume '.'
self.consume_digits();
}
// Scientific notation: e/E followed by optional +/- and digits
if let Some(e) = self.peek() {
if e == b'e' || e == b'E' {
let next = self.peek_ahead(1);
let has_digits = match next {
Some(b'+') | Some(b'-') => self.peek_ahead(2).is_some_and(|c| c.is_ascii_digit()),
Some(c) => c.is_ascii_digit(),
None => false,
};
if has_digits {
self.advance(); // consume e/E
if let Some(b'+') | Some(b'-') = self.peek() {
self.advance(); // consume sign
}
self.consume_digits();
}
}
}
let number_end = self.pos;
let raw_number: f64 = self.input[start..number_end].parse().unwrap_or(0.0);
// Check for percent suffix
if self.peek() == Some(b'%') {
self.advance(); // consume %
return Token::new(
TokenKind::Percent(raw_number),
Span::new(start, self.pos),
);
}
// Check for SI scale suffix (k, M, B, T) — but only if NOT followed by more letters
// (to avoid matching "kg" as SI suffix "k" + "g")
if let Some(suffix) = self.peek() {
let scale = match suffix {
b'k' if !self.is_unit_suffix_start() => Some(1_000.0),
b'M' if !self.peek_ahead(1).is_some_and(|c| c.is_ascii_alphabetic()) => Some(1_000_000.0),
b'B' if !self.peek_ahead(1).is_some_and(|c| c.is_ascii_alphabetic()) => Some(1_000_000_000.0),
b'T' if !self.peek_ahead(1).is_some_and(|c| c.is_ascii_alphabetic()) => Some(1_000_000_000_000.0),
_ => None,
};
if let Some(multiplier) = scale {
self.advance(); // consume suffix
return Token::new(
TokenKind::Number(raw_number * multiplier),
Span::new(start, self.pos),
);
}
}
// Check for unit suffix directly after number (no space)
if let Some(b) = self.peek() {
if b.is_ascii_alphabetic() {
let unit_start = self.pos;
self.consume_alpha();
let unit_str = self.input[unit_start..self.pos].to_string();
// Store the Unit token as pending — it will be emitted on the next scan_token call.
self.pending = Some(Token::new(
TokenKind::Unit(unit_str),
Span::new(unit_start, self.pos),
));
return Token::new(
TokenKind::Number(raw_number),
Span::new(start, number_end),
);
}
}
Token::new(
TokenKind::Number(raw_number),
Span::new(start, number_end),
)
}
/// Check if current position starts what looks like a multi-letter unit suffix
/// (e.g., "kg" after a number — 'k' followed by more alpha).
fn is_unit_suffix_start(&self) -> bool {
// 'k' at current pos — check if next char is also alphabetic
self.peek_ahead(1).is_some_and(|c| c.is_ascii_alphabetic())
}
/// Consume a run of ASCII digits.
fn consume_digits(&mut self) {
while self.pos < self.bytes.len() && self.bytes[self.pos].is_ascii_digit() {
self.pos += 1;
}
}
/// Consume a run of ASCII alphabetic characters.
fn consume_alpha(&mut self) {
while self.pos < self.bytes.len() && self.bytes[self.pos].is_ascii_alphabetic() {
self.pos += 1;
}
}
/// Scan an alphabetic word: could be a natural language operator, keyword, unit, or identifier.
fn scan_word(&mut self) -> Token {
// Check for "divided by" two-word operator
if self.matches_word("divided") {
let start = self.pos;
self.pos += "divided".len();
self.skip_whitespace();
if self.matches_word("by") {
self.pos += "by".len();
return Token::new(
TokenKind::Operator(Operator::Slash),
Span::new(start, self.pos),
);
}
// Not "divided by" — treat "divided" as identifier
self.pos = start + "divided".len();
return Token::new(
TokenKind::Identifier("divided".to_string()),
Span::new(start, self.pos),
);
}
// Natural language operators
let nl_ops: &[(&str, Operator)] = &[
("plus", Operator::Plus),
("minus", Operator::Minus),
("times", Operator::Star),
];
for &(word, ref op) in nl_ops {
if self.matches_word(word) {
let start = self.pos;
self.pos += word.len();
return Token::new(
TokenKind::Operator(*op),
Span::new(start, self.pos),
);
}
}
// Keywords
let keywords = ["in", "to", "as", "of", "discount", "off", "euro", "usd", "gbp"];
for kw in &keywords {
if self.matches_word(kw) {
let start = self.pos;
self.pos += kw.len();
return Token::new(
TokenKind::Keyword(kw.to_string()),
Span::new(start, self.pos),
);
}
}
// Generic identifier (variable names, function names, or unit suffixes after space)
let start = self.pos;
while self.pos < self.bytes.len()
&& (self.bytes[self.pos].is_ascii_alphanumeric() || self.bytes[self.pos] == b'_')
{
self.pos += 1;
}
let word = self.input[start..self.pos].to_string();
// If this word immediately follows a number token, it's a unit
// But we handle that in scan_number — here it's just an identifier
Token::new(
TokenKind::Identifier(word),
Span::new(start, self.pos),
)
}
}
/// Convenience function to tokenize an input line.
pub fn tokenize(input: &str) -> Vec<Token> {
Lexer::new(input).tokenize()
}
#[cfg(test)]
mod tests {
use super::*;
// ===== Task 2: Core infrastructure tests =====
#[test]
fn empty_input() {
let tokens = tokenize("");
assert!(tokens.is_empty());
}
#[test]
fn whitespace_only() {
let tokens = tokenize(" ");
assert!(tokens.is_empty());
}
// ===== Task 3: Number tests =====
#[test]
fn integer() {
let tokens = tokenize("42");
assert_eq!(tokens.len(), 1);
assert_eq!(tokens[0].kind, TokenKind::Number(42.0));
assert_eq!(tokens[0].span, Span::new(0, 2));
}
#[test]
fn decimal() {
let tokens = tokenize("3.14");
assert_eq!(tokens.len(), 1);
assert_eq!(tokens[0].kind, TokenKind::Number(3.14));
assert_eq!(tokens[0].span, Span::new(0, 4));
}
#[test]
fn scientific_notation() {
let tokens = tokenize("6.022e23");
assert_eq!(tokens.len(), 1);
assert_eq!(tokens[0].kind, TokenKind::Number(6.022e23));
}
#[test]
fn scientific_notation_with_sign() {
let tokens = tokenize("1.5e-3");
assert_eq!(tokens.len(), 1);
assert_eq!(tokens[0].kind, TokenKind::Number(1.5e-3));
}
#[test]
fn si_suffix_k() {
let tokens = tokenize("5k");
assert_eq!(tokens.len(), 1);
assert_eq!(tokens[0].kind, TokenKind::Number(5000.0));
}
#[test]
fn si_suffix_m() {
let tokens = tokenize("2.5M");
assert_eq!(tokens.len(), 1);
assert_eq!(tokens[0].kind, TokenKind::Number(2_500_000.0));
}
#[test]
fn si_suffix_b() {
let tokens = tokenize("1B");
assert_eq!(tokens.len(), 1);
assert_eq!(tokens[0].kind, TokenKind::Number(1_000_000_000.0));
}
// ===== Task 4: Operator tests =====
#[test]
fn symbolic_operators() {
let input = "+ - * / ^ %";
let tokens = tokenize(input);
assert_eq!(tokens.len(), 6);
assert_eq!(tokens[0].kind, TokenKind::Operator(Operator::Plus));
assert_eq!(tokens[1].kind, TokenKind::Operator(Operator::Minus));
assert_eq!(tokens[2].kind, TokenKind::Operator(Operator::Star));
assert_eq!(tokens[3].kind, TokenKind::Operator(Operator::Slash));
assert_eq!(tokens[4].kind, TokenKind::Operator(Operator::Caret));
assert_eq!(tokens[5].kind, TokenKind::Operator(Operator::Percent));
}
#[test]
fn natural_language_plus() {
let tokens = tokenize("5 plus 3");
assert_eq!(tokens.len(), 3);
assert_eq!(tokens[0].kind, TokenKind::Number(5.0));
assert_eq!(tokens[1].kind, TokenKind::Operator(Operator::Plus));
assert_eq!(tokens[2].kind, TokenKind::Number(3.0));
}
#[test]
fn natural_language_minus() {
let tokens = tokenize("10 minus 4");
assert_eq!(tokens.len(), 3);
assert_eq!(tokens[1].kind, TokenKind::Operator(Operator::Minus));
}
#[test]
fn natural_language_times() {
let tokens = tokenize("6 times 7");
assert_eq!(tokens.len(), 3);
assert_eq!(tokens[1].kind, TokenKind::Operator(Operator::Star));
}
#[test]
fn natural_language_divided_by() {
let tokens = tokenize("20 divided by 4");
assert_eq!(tokens.len(), 3);
assert_eq!(tokens[0].kind, TokenKind::Number(20.0));
assert_eq!(tokens[1].kind, TokenKind::Operator(Operator::Slash));
assert_eq!(tokens[2].kind, TokenKind::Number(4.0));
}
#[test]
fn parentheses() {
let tokens = tokenize("(1 + 2)");
assert_eq!(tokens.len(), 5);
assert_eq!(tokens[0].kind, TokenKind::LParen);
assert_eq!(tokens[4].kind, TokenKind::RParen);
}
// ===== Task 5: Identifiers, assignments, currency, units =====
#[test]
fn variable_assignment() {
let tokens = tokenize("x = 10");
assert_eq!(tokens.len(), 3);
assert_eq!(tokens[0].kind, TokenKind::Identifier("x".to_string()));
assert_eq!(tokens[1].kind, TokenKind::Assign);
assert_eq!(tokens[2].kind, TokenKind::Number(10.0));
}
#[test]
fn currency_dollar() {
let tokens = tokenize("$20");
assert_eq!(tokens.len(), 2);
assert_eq!(tokens[0].kind, TokenKind::CurrencySymbol("$".to_string()));
assert_eq!(tokens[1].kind, TokenKind::Number(20.0));
}
#[test]
fn currency_euro() {
let tokens = tokenize("€15");
assert_eq!(tokens.len(), 2);
assert_eq!(tokens[0].kind, TokenKind::CurrencySymbol("".to_string()));
assert_eq!(tokens[1].kind, TokenKind::Number(15.0));
}
#[test]
fn currency_pound() {
let tokens = tokenize("£10");
assert_eq!(tokens.len(), 2);
assert_eq!(tokens[0].kind, TokenKind::CurrencySymbol("£".to_string()));
assert_eq!(tokens[1].kind, TokenKind::Number(10.0));
}
#[test]
fn currency_yen() {
let tokens = tokenize("¥500");
assert_eq!(tokens.len(), 2);
assert_eq!(tokens[0].kind, TokenKind::CurrencySymbol("¥".to_string()));
assert_eq!(tokens[1].kind, TokenKind::Number(500.0));
}
#[test]
fn currency_real() {
let tokens = tokenize("R$100");
assert_eq!(tokens.len(), 2);
assert_eq!(tokens[0].kind, TokenKind::CurrencySymbol("R$".to_string()));
assert_eq!(tokens[1].kind, TokenKind::Number(100.0));
}
#[test]
fn unit_suffix_kg() {
let tokens = tokenize("5kg");
assert_eq!(tokens.len(), 2);
assert_eq!(tokens[0].kind, TokenKind::Number(5.0));
assert_eq!(tokens[1].kind, TokenKind::Unit("kg".to_string()));
}
#[test]
fn unit_suffix_g() {
let tokens = tokenize("200g");
assert_eq!(tokens.len(), 2);
assert_eq!(tokens[0].kind, TokenKind::Number(200.0));
assert_eq!(tokens[1].kind, TokenKind::Unit("g".to_string()));
}
#[test]
fn unit_suffix_m() {
let tokens = tokenize("3.5m");
assert_eq!(tokens.len(), 2);
assert_eq!(tokens[0].kind, TokenKind::Number(3.5));
assert_eq!(tokens[1].kind, TokenKind::Unit("m".to_string()));
}
#[test]
fn percent_value() {
let tokens = tokenize("5%");
assert_eq!(tokens.len(), 1);
assert_eq!(tokens[0].kind, TokenKind::Percent(5.0));
}
// ===== Task 6: Comments and text =====
#[test]
fn comment_line() {
let tokens = tokenize("// this is a note");
assert_eq!(tokens.len(), 1);
assert_eq!(
tokens[0].kind,
TokenKind::Comment(" this is a note".to_string())
);
}
#[test]
fn plain_text() {
let tokens = tokenize("hello world this is text");
assert_eq!(tokens.len(), 1);
assert_eq!(
tokens[0].kind,
TokenKind::Text("hello world this is text".to_string())
);
}
// ===== Task 7: Integration tests =====
#[test]
fn negative_number() {
let tokens = tokenize("-7");
assert_eq!(tokens.len(), 2);
assert_eq!(tokens[0].kind, TokenKind::Operator(Operator::Minus));
assert_eq!(tokens[1].kind, TokenKind::Number(7.0));
}
#[test]
fn mixed_expression_with_spans() {
let tokens = tokenize("$20 in euro - 5% discount");
// Expect: CurrencySymbol($), Number(20), Keyword(in), Keyword(euro), Operator(-), Percent(5), Keyword(discount)
assert_eq!(tokens.len(), 7);
assert_eq!(tokens[0].kind, TokenKind::CurrencySymbol("$".to_string()));
assert_eq!(tokens[1].kind, TokenKind::Number(20.0));
assert_eq!(tokens[2].kind, TokenKind::Keyword("in".to_string()));
assert_eq!(tokens[3].kind, TokenKind::Keyword("euro".to_string()));
assert_eq!(tokens[4].kind, TokenKind::Operator(Operator::Minus));
assert_eq!(tokens[5].kind, TokenKind::Percent(5.0));
assert_eq!(tokens[6].kind, TokenKind::Keyword("discount".to_string()));
// Verify spans
assert_eq!(tokens[0].span, Span::new(0, 1)); // $
assert_eq!(tokens[1].span, Span::new(1, 3)); // 20
assert_eq!(tokens[4].span, Span::new(12, 13)); // -
}
#[test]
fn spans_are_correct() {
let tokens = tokenize("42 + 3.14");
assert_eq!(tokens[0].span, Span::new(0, 2)); // "42"
assert_eq!(tokens[1].span, Span::new(3, 4)); // "+"
assert_eq!(tokens[2].span, Span::new(5, 9)); // "3.14"
}
#[test]
fn multiple_spaces() {
let tokens = tokenize("1 + 2");
assert_eq!(tokens.len(), 3);
assert_eq!(tokens[0].kind, TokenKind::Number(1.0));
assert_eq!(tokens[1].kind, TokenKind::Operator(Operator::Plus));
assert_eq!(tokens[2].kind, TokenKind::Number(2.0));
}
#[test]
fn trailing_whitespace() {
let tokens = tokenize("42 ");
assert_eq!(tokens.len(), 1);
assert_eq!(tokens[0].kind, TokenKind::Number(42.0));
}
#[test]
fn inline_comment() {
let tokens = tokenize("42 + 8 // sum");
assert_eq!(tokens.len(), 4);
assert_eq!(tokens[0].kind, TokenKind::Number(42.0));
assert_eq!(tokens[1].kind, TokenKind::Operator(Operator::Plus));
assert_eq!(tokens[2].kind, TokenKind::Number(8.0));
assert_eq!(tokens[3].kind, TokenKind::Comment(" sum".to_string()));
}
#[test]
fn keyword_in() {
let tokens = tokenize("100 in usd");
assert_eq!(tokens.len(), 3);
assert_eq!(tokens[0].kind, TokenKind::Number(100.0));
assert_eq!(tokens[1].kind, TokenKind::Keyword("in".to_string()));
assert_eq!(tokens[2].kind, TokenKind::Keyword("usd".to_string()));
}
}

5
src/lib.rs Normal file
View File

@@ -0,0 +1,5 @@
pub mod token;
pub mod lexer;
pub use lexer::tokenize;
pub use token::{Operator, Span, Token, TokenKind};

129
src/token.rs Normal file
View File

@@ -0,0 +1,129 @@
/// Byte offset span within the input string.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Span {
/// Inclusive start byte offset.
pub start: usize,
/// Exclusive end byte offset.
pub end: usize,
}
impl Span {
/// Create a new span from start (inclusive) to end (exclusive).
pub fn new(start: usize, end: usize) -> Self {
Self { start, end }
}
/// Length of the span in bytes.
pub fn len(&self) -> usize {
self.end - self.start
}
/// Whether this span is empty.
pub fn is_empty(&self) -> bool {
self.start == self.end
}
}
/// Arithmetic operator kind.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Operator {
Plus,
Minus,
Star,
Slash,
Caret,
Percent,
}
/// The kind of token produced by the lexer.
#[derive(Debug, Clone, PartialEq)]
pub enum TokenKind {
/// A numeric literal (integer, decimal, scientific, or SI-scaled).
Number(f64),
/// An arithmetic operator.
Operator(Operator),
/// An identifier (variable name or function name).
Identifier(String),
/// The `=` assignment operator.
Assign,
/// A currency symbol ($, €, £, ¥, R$, etc.).
CurrencySymbol(String),
/// A unit suffix (kg, g, m, lb, etc.).
Unit(String),
/// A `//` comment — text after `//` to end of line.
Comment(String),
/// Plain text line with no calculable expression.
Text(String),
/// A keyword (in, to, as, of, discount, off, etc.).
Keyword(String),
/// A percentage value like `5%` — stores the number before `%`.
Percent(f64),
/// Left parenthesis `(`.
LParen,
/// Right parenthesis `)`.
RParen,
}
/// A single token with its kind and byte span in the input.
#[derive(Debug, Clone, PartialEq)]
pub struct Token {
/// What kind of token this is and its associated value.
pub kind: TokenKind,
/// Byte span of this token in the original input.
pub span: Span,
}
impl Token {
/// Create a new token.
pub fn new(kind: TokenKind, span: Span) -> Self {
Self { kind, span }
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn span_new_and_len() {
let s = Span::new(0, 5);
assert_eq!(s.start, 0);
assert_eq!(s.end, 5);
assert_eq!(s.len(), 5);
assert!(!s.is_empty());
}
#[test]
fn span_empty() {
let s = Span::new(3, 3);
assert!(s.is_empty());
assert_eq!(s.len(), 0);
}
#[test]
fn token_construction() {
let tok = Token::new(
TokenKind::Number(42.0),
Span::new(0, 2),
);
assert_eq!(tok.kind, TokenKind::Number(42.0));
assert_eq!(tok.span, Span::new(0, 2));
}
#[test]
fn token_operator() {
let tok = Token::new(
TokenKind::Operator(Operator::Plus),
Span::new(0, 1),
);
assert_eq!(tok.kind, TokenKind::Operator(Operator::Plus));
}
#[test]
fn token_kinds_equality() {
assert_eq!(TokenKind::Assign, TokenKind::Assign);
assert_eq!(TokenKind::LParen, TokenKind::LParen);
assert_eq!(TokenKind::RParen, TokenKind::RParen);
assert_ne!(TokenKind::LParen, TokenKind::RParen);
}
}