From d20f368eceaa2a2fe12e84c11a17ce356fc71376 Mon Sep 17 00:00:00 2001 From: Masahiro Honma Date: Tue, 9 Sep 2025 15:44:39 +0900 Subject: [PATCH 1/7] feat: add `Context` for handling free variables * Introduce `Context` to define free variables for parsing and display * Add `parse_with_context` to to resolve free variables based on a `Context` * Add `Term::with_context` for displaying terms using a given `Context` --- src/lib.rs | 2 +- src/parser.rs | 72 ++++++++++++---- src/term.rs | 200 ++++++++++++++++++++++++++++++++++++++++++--- tests/parser.rs | 18 ++-- tests/reduction.rs | 9 +- 5 files changed, 265 insertions(+), 36 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 803ab09..2d245b0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,7 +9,7 @@ pub mod combinators; pub mod parser; pub mod reduction; -pub use self::parser::parse; +pub use self::parser::{parse, parse_with_context}; pub use self::reduction::beta; pub use self::reduction::Order::*; pub use self::term::Notation::*; diff --git a/src/parser.rs b/src/parser.rs index 2f916a0..2afd37b 100644 --- a/src/parser.rs +++ b/src/parser.rs @@ -4,6 +4,7 @@ use self::CToken::*; use self::Expression::*; use self::ParseError::*; use self::Token::*; +use crate::term::Context; pub use crate::term::Notation::*; use crate::term::Term::*; use crate::term::{abs, app, Notation, Term}; @@ -16,6 +17,8 @@ use std::fmt; pub enum ParseError { /// lexical error; contains the invalid character and its index InvalidCharacter((usize, char)), + /// lexical error; an undefined free variable was found (classic notation only) + UndefinedFreeVariable, /// syntax error; the expression is invalid InvalidExpression, /// syntax error; the expression is empty @@ -28,6 +31,9 @@ impl fmt::Display for ParseError { ParseError::InvalidCharacter((idx, char)) => { write!(f, "lexical error; invalid character '{}' at {}", char, idx) } + ParseError::UndefinedFreeVariable => { + write!(f, "lexical error; an undefined free variable was used") + } ParseError::InvalidExpression => write!(f, "syntax error; the expression is invalid"), ParseError::EmptyExpression => write!(f, "syntax error; the expression is empty"), } @@ -142,15 +148,17 @@ pub fn tokenize_cla(input: &str) -> Result, ParseError> { } #[doc(hidden)] -pub fn convert_classic_tokens(tokens: &[CToken]) -> Vec { - _convert_classic_tokens(tokens, &mut VecDeque::with_capacity(tokens.len()), &mut 0) +pub fn convert_classic_tokens(ctx: &Context, tokens: &[CToken]) -> Result, ParseError> { + let mut stack = VecDeque::with_capacity(tokens.len()); + stack.extend(ctx.iter().rev()); + _convert_classic_tokens(tokens, &mut stack, &mut 0) } fn _convert_classic_tokens<'t>( tokens: &'t [CToken], stack: &mut VecDeque<&'t str>, pos: &mut usize, -) -> Vec { +) -> Result, ParseError> { let mut output = Vec::with_capacity(tokens.len() - *pos); let mut inner_stack_count = 0; @@ -164,28 +172,26 @@ fn _convert_classic_tokens<'t>( CLparen => { output.push(Lparen); *pos += 1; - output.append(&mut _convert_classic_tokens(tokens, stack, pos)); + output.append(&mut _convert_classic_tokens(tokens, stack, pos)?); } CRparen => { output.push(Rparen); stack.truncate(stack.len() - inner_stack_count); - return output; + return Ok(output); } CName(ref name) => { if let Some(index) = stack.iter().rev().position(|t| t == name) { output.push(Number(index + 1)) } else { - // a new free variable - stack.push_front(name); - // index of the last element + 1 - output.push(Number(stack.len())) + // a free variable not defined in the `Context` + return Err(UndefinedFreeVariable); } } } *pos += 1; } - output + Ok(output) } #[derive(Debug, PartialEq)] @@ -253,10 +259,42 @@ fn _get_ast(tokens: &[Token], pos: &mut usize) -> Result /// /// Returns a `ParseError` when a lexing or syntax error is encountered. pub fn parse(input: &str, notation: Notation) -> Result { + parse_with_context(&Context::empty(), input, notation) +} + +/// Attempts to parse the input `&str` using a provided context of free variables. +/// +/// This function is identical to `parse()`, but it allows defining a set of named +/// free variables that are considered valid during parsing in `Classic` notation. +/// +/// # Examples +/// ``` +/// use lambda_calculus::{*, term::Context}; +/// +/// let ctx = Context::new(&["x", "y"]); +/// +/// // `z` is not in the context, so it will be an error. +/// assert!(parse_with_context(&ctx, "z", Classic).is_err()); +/// +/// // `y` is in the context, so it's parsed as the outermost free variable (Var(2)). +/// assert_eq!(parse_with_context(&ctx, "y", Classic), Ok(Var(2))); +/// +/// // In `λa.y`, `y` is still the outermost free variable, but its index is now 3. +/// assert_eq!(parse_with_context(&ctx, "λa.y", Classic), Ok(abs(Var(3)))); +/// ``` +/// +/// # Errors +/// +/// Returns a `ParseError` when a lexing or syntax error is encountered. +pub fn parse_with_context( + ctx: &Context, + input: &str, + notation: Notation, +) -> Result { let tokens = if notation == DeBruijn { tokenize_dbr(input)? } else { - convert_classic_tokens(&tokenize_cla(input)?) + convert_classic_tokens(ctx, &tokenize_cla(input)?)? }; let ast = get_ast(&tokens)?; @@ -362,15 +400,19 @@ mod tests { assert!(tokens_dbr.is_ok()); assert_eq!( - convert_classic_tokens(&tokens_cla.unwrap()), + convert_classic_tokens(&Context::empty(), &tokens_cla.unwrap()).unwrap(), tokens_dbr.unwrap() ); } #[test] fn tokenization_success_classic_with_free_variables() { + let ctx = Context::new(&["a", "b"]); let blc_dbr = "12"; - let blc_cla = parse(blc_dbr, DeBruijn).unwrap().to_string(); + let blc_cla = parse(blc_dbr, DeBruijn) + .unwrap() + .with_context(&ctx) + .to_string(); let tokens_cla = tokenize_cla(&blc_cla); let tokens_dbr = tokenize_dbr(blc_dbr); @@ -379,8 +421,8 @@ mod tests { assert!(tokens_dbr.is_ok()); assert_eq!( - convert_classic_tokens(&tokens_cla.unwrap()), - tokens_dbr.unwrap() + tokens_cla.and_then(|tokens| convert_classic_tokens(&ctx, &tokens)), + tokens_dbr ); } diff --git a/src/term.rs b/src/term.rs index 34467e7..8858919 100644 --- a/src/term.rs +++ b/src/term.rs @@ -38,6 +38,100 @@ pub enum Notation { DeBruijn, } +/// A context holding a list of names for classic notation printing. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct Context(Vec); + +impl Context { + /// Creates a new `Context` from a slice of string-like items. + /// + /// This is the primary, most flexible constructor. It accepts anything + /// that can be borrowed as a string slice, like `&[&str]` or `&[String]`. + /// + /// # Examples + /// + /// ``` + /// use lambda_calculus::term::Context; + /// + /// // Create from an array of &str + /// let context1 = Context::new(&["a", "b", "c"]); + /// + /// // Create from a Vec + /// let names = vec!["a".to_string(), "b".to_string(), "c".to_string()]; + /// let context2 = Context::new(&names); + /// + /// assert_eq!(context1, context2); + /// ``` + pub fn new>(namings: &[S]) -> Self { + let owned = namings.iter().map(|s| s.as_ref().to_string()).collect(); + Context(owned) + } + + /// Creates an empty context. + pub fn empty() -> Self { + vec![].into() + } + + /// Returns an iterator over the names in the context, yielding `&str`. + pub fn iter(&self) -> impl DoubleEndedIterator { + self.0.iter().map(|s| s.as_str()) + } + + /// Returns the number of names in the context. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if the context contains no names. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns `true` if the context contains a name equivalent to the given value. + /// + /// This method is generic over `AsRef`, so it can be called with + /// a string slice (`&str`), a `String`, or other string-like types. + pub fn contains>(&self, name: S) -> bool { + self.iter().any(|item| item == name.as_ref()) + } + + /// Resolves a 1-based index to a free variable name from the context. + /// + /// The index is 1-based, where `1` refers to the first name defined in the context. + /// Returns `None` if the index is 0 or out of bounds. + /// + /// # Examples + /// + /// ``` + /// # use lambda_calculus::term::Context; + /// let ctx = Context::new(&["a", "b", "c"]); + /// + /// assert_eq!(ctx.resolve_free_var(1), Some("a")); + /// assert_eq!(ctx.resolve_free_var(3), Some("c")); + /// assert_eq!(ctx.resolve_free_var(0), None); + /// assert_eq!(ctx.resolve_free_var(4), None); + /// ``` + pub fn resolve_free_var(&self, idx: usize) -> Option<&str> { + if idx == 0 { + None + } else { + self.0.get(idx - 1).map(|s| s.as_str()) + } + } +} + +impl> From<&[S]> for Context { + fn from(namings: &[S]) -> Self { + Self::new(namings) + } +} + +impl From> for Context { + fn from(namings: Vec) -> Self { + Context(namings) + } +} + /// A lambda term that is either a variable with a De Bruijn index, an abstraction over a term or /// an applicaction of one term to another. #[derive(PartialEq, Clone, Hash, Eq)] @@ -470,6 +564,41 @@ impl Term { } } } + + /// Calculates the maximum index of any free variable in the term. + /// + /// The result corresponds to the number of names `Context` must supply to bind them all. + pub fn max_free_index(&self) -> usize { + self.max_free_index_helper(0) + } + + fn max_free_index_helper(&self, depth: usize) -> usize { + match self { + Var(x) => x.saturating_sub(depth), + Abs(p) => p.max_free_index_helper(depth + 1), + App(p_boxed) => { + let (ref f, ref a) = **p_boxed; + f.max_free_index_helper(depth) + .max(a.max_free_index_helper(depth)) + } + } + } + + /// Returns a helper struct that allows displaying the term with a given context. + /// + /// # Example + /// ``` + /// use lambda_calculus::{*, term::Context}; + /// + /// let term = abs(Var(2)); // λa.b + /// let ctx = Context::new(&["x"]); // Predefine "x" as a free variable + /// + /// // The context defines `Var(2)` as "x" instead of the default "b" + /// assert_eq!(term.with_context(&ctx).to_string(), "λa.x"); + /// ``` + pub fn with_context<'a>(&'a self, ctx: &'a Context) -> impl fmt::Display + 'a { + DisplayWithContext { term: self, ctx } + } } /// Wraps a `Term` in an `Abs`traction. Consumes its argument. @@ -499,10 +628,52 @@ pub fn app(lhs: Term, rhs: Term) -> Term { impl fmt::Display for Term { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", show_precedence_cla(self, 0, self.max_depth(), 0)) + let max_depth = self.max_depth(); + let max_free_index = self.max_free_index(); + let ctx = auto_generate_context(max_depth, max_free_index); + let binder_names = generate_binder_names(&ctx, self.max_depth()); + write!( + f, + "{}", + show_precedence_cla(&ctx, &binder_names, self, 0, 0) + ) } } +/// A helper function to generate a default context for displaying a term. +fn auto_generate_context(max_depth: u32, max_free_index: usize) -> Context { + let free_variables = (0..max_free_index) + .map(|i| base26_encode(max_depth + i as u32)) + .collect::>(); + free_variables.into() +} + +/// A helper struct for displaying a `Term` with an external `Context`. +struct DisplayWithContext<'a> { + term: &'a Term, + ctx: &'a Context, +} + +impl<'a> fmt::Display for DisplayWithContext<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let binder_names = generate_binder_names(self.ctx, self.term.max_depth()); + write!( + f, + "{}", + show_precedence_cla(self.ctx, &binder_names, self.term, 0, 0) + ) + } +} + +/// Generates a list of fresh names for binders, avoiding clashes with the given context. +fn generate_binder_names(ctx: &Context, number: u32) -> Vec { + (0..) + .map(|i| base26_encode(i as u32)) + .filter(|name| !ctx.contains(name)) + .take(number as usize) + .collect() +} + fn base26_encode(mut n: u32) -> String { let mut buf = Vec::::new(); n += 1; @@ -518,29 +689,36 @@ fn base26_encode(mut n: u32) -> String { } fn show_precedence_cla( + ctx: &Context, + binder_names: &Vec, term: &Term, context_precedence: usize, - max_depth: u32, depth: u32, ) -> String { match term { Var(0) => "undefined".to_owned(), Var(i) => { let i = *i as u32; - let ix = if i <= depth { - depth - i + if i <= depth { + binder_names + .get((depth - i) as usize) + .expect("[BUG] binder_names are insufficient") + .to_owned() } else { - max_depth + i - depth - 1 - }; - base26_encode(ix) + let idx = (i - depth) as usize; + ctx.resolve_free_var(idx) + .map_or(format!("", idx), |s| s.to_owned()) + } } Abs(ref t) => { let ret = { format!( "{}{}.{}", LAMBDA, - base26_encode(depth), - show_precedence_cla(t, 0, max_depth, depth + 1) + binder_names + .get(depth as usize) + .expect("[BUG] binder_names are insufficient"), + show_precedence_cla(ctx, binder_names, t, 0, depth + 1) ) }; parenthesize_if(&ret, context_precedence > 1).into() @@ -549,8 +727,8 @@ fn show_precedence_cla( let (ref t1, ref t2) = **boxed; let ret = format!( "{} {}", - show_precedence_cla(t1, 2, max_depth, depth), - show_precedence_cla(t2, 3, max_depth, depth) + show_precedence_cla(ctx, binder_names, t1, 2, depth), + show_precedence_cla(ctx, binder_names, t2, 3, depth) ); parenthesize_if(&ret, context_precedence == 3).into() } diff --git a/tests/parser.rs b/tests/parser.rs index dca14e9..e8e5ac1 100644 --- a/tests/parser.rs +++ b/tests/parser.rs @@ -1,32 +1,38 @@ use lambda_calculus::{ parse, - parser::ParseError, - term::Notation::{Classic, DeBruijn}, + parser::{parse_with_context, ParseError}, + term::{ + Context, + Notation::{Classic, DeBruijn}, + }, }; #[test] fn parse_debruijn_and_classic() -> Result<(), ParseError> { - for (dbr, cla) in [ - ("12", "a b"), - ("λλ21", "λs. λz. s z"), + for (ctx, dbr, cla) in [ + (Context::new(&["a", "b"]), "12", "a b"), + (Context::empty(), "λλ21", "λs. λz. s z"), ( + Context::new(&["w", "y", "z"]), "λ2134(λ3215(λ4321)3215)2134", "λx. w x y z (λy. w x y z (λz. w x y z) w x y z) w x y z", ), ( + Context::new(&["a", "b", "f", "z", "w", "y"]), // See: http://alexandria.tue.nl/repository/freearticles/597619.pdf "λ2(λ421(5(λ4127)λ8))67", // the free variable list is ..ywzfba "λx. a (λt. b x t (f (λu. a u t z) λs. w)) w y", ), ( + Context::new(&["s", "z"]), // apply `plus zero one` to `s` and `z` "(λλλλ42(321))(λλ1)(λλ21)12", "(λm.λn.λs.λz. m s (n s z)) (λs.λz. z) (λs.λz. s z) s z", ), ] { let term_dbr = parse(dbr, DeBruijn)?; - let term_cla = parse(cla, Classic)?; + let term_cla = parse_with_context(&ctx, cla, Classic)?; assert_eq!(term_dbr, term_cla); } Ok(()) diff --git a/tests/reduction.rs b/tests/reduction.rs index c32ee57..2eeda7b 100644 --- a/tests/reduction.rs +++ b/tests/reduction.rs @@ -1,7 +1,8 @@ extern crate lambda_calculus as lambda; use lambda::combinators::{I, O}; -use lambda::parser::ParseError; +use lambda::parser::{parse_with_context, ParseError}; +use lambda::term::Context; use lambda::*; use std::thread; @@ -51,7 +52,9 @@ fn reduction_cbv() { #[test] fn reduction_zero_plus_one() -> Result<(), ParseError> { - let mut expr = parse( + let ctx = Context::new(&["s", "z"]); + let mut expr = parse_with_context( + &ctx, "(λm.λn.λs.λz. m s (n s z)) (λs.λz. z) (λs.λz. s z) s z", Classic, )?; @@ -59,7 +62,7 @@ fn reduction_zero_plus_one() -> Result<(), ParseError> { assert_eq!(expr, parse("(λλ(λλ1)2((λλ21)21))12", DeBruijn)?); expr.reduce(CBV, 6); assert_eq!(expr, parse("12", DeBruijn)?); - assert_eq!(expr.to_string(), "a b"); + assert_eq!(expr.with_context(&ctx).to_string(), "s z"); Ok(()) } From 5187de60df845639da683e1c413b1e0b2e9cd489 Mon Sep 17 00:00:00 2001 From: Masahiro Honma Date: Wed, 10 Sep 2025 06:46:05 +0900 Subject: [PATCH 2/7] test: add unit tests for Context and related features --- src/parser.rs | 20 ++++++++++++ src/term.rs | 89 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+) diff --git a/src/parser.rs b/src/parser.rs index 2afd37b..d5c7fb3 100644 --- a/src/parser.rs +++ b/src/parser.rs @@ -426,6 +426,26 @@ mod tests { ); } + #[test] + fn parse_classic_with_undefined_variable_error() { + let ctx_with_y = Context::new(&["y"]); + + // "x" is not defined in the empty context + assert_eq!( + parse_with_context(&Context::empty(), "x", Classic), + Err(UndefinedFreeVariable) + ); + + // "x" is not defined in this context either + assert_eq!( + parse_with_context(&ctx_with_y, "λz.x", Classic), + Err(UndefinedFreeVariable) + ); + + // "y" is defined, so this should be OK + assert!(parse_with_context(&ctx_with_y, "y", Classic).is_ok()); + } + #[test] fn alternative_lambda_parsing() { assert_eq!(parse(r"\\\2(321)", DeBruijn), parse("λλλ2(321)", DeBruijn)) diff --git a/src/term.rs b/src/term.rs index 8858919..f94d150 100644 --- a/src/term.rs +++ b/src/term.rs @@ -829,6 +829,40 @@ mod tests { ); } + #[test] + fn context_methods() { + let ctx = Context::new(&["a", "b", "c"]); + let empty_ctx = Context::empty(); + + // len & is_empty + assert_eq!(ctx.len(), 3); + assert!(!ctx.is_empty()); + assert_eq!(empty_ctx.len(), 0); + assert!(empty_ctx.is_empty()); + + // contains + assert!(ctx.contains("b")); + assert!(ctx.contains(&"c".to_string())); + assert!(!ctx.contains("d")); + + // iter + let names: Vec<&str> = ctx.iter().collect(); + assert_eq!(names, vec!["a", "b", "c"]); + } + + #[test] + fn context_resolve_free_var() { + let ctx = Context::new(&["a", "b", "c"]); + + // 1-based index, forward lookup + assert_eq!(ctx.resolve_free_var(1), Some("a")); + assert_eq!(ctx.resolve_free_var(3), Some("c")); + + // Invalid cases + assert_eq!(ctx.resolve_free_var(0), None); // 0 is invalid + assert_eq!(ctx.resolve_free_var(4), None); // Out of bounds + } + #[test] fn abs_macro() { assert_eq!(abs!(4, Var(1)), abs(abs(abs(abs(Var(1)))))); @@ -905,6 +939,61 @@ mod tests { assert_eq!(&format!("{:?}", pred), "λλλ3(λλ1(24))(λ2)(λ1)"); } + #[test] + fn term_display_with_context() { + let ctx = Context::new(&["x", "y"]); + + // Term with only free variables: Var(1) -> x, Var(2) -> y + let term1 = app(Var(1), Var(2)); + assert_eq!(term1.with_context(&ctx).to_string(), "x y"); + + // Term with bound and free variables + // λa. a y (y is Var(2) from context) + let term2 = abs(app(Var(1), Var(3))); + assert_eq!(term2.with_context(&ctx).to_string(), "λa.a y"); + + let term3 = abs(Var(2)); + assert_eq!(term3.with_context(&ctx).to_string(), "λa.x"); + } + + #[test] + fn term_display_with_clashing_context() { + let ctx = Context::new(&["a", "c"]); + + let term1 = app(Var(1), Var(2)); + assert_eq!(term1.with_context(&ctx).to_string(), "a c"); + + let term2 = abs(app(Var(1), Var(3))); + assert_eq!(term2.with_context(&ctx).to_string(), "λb.b c"); + + let term3 = abs(Var(2)); + assert_eq!(term3.with_context(&ctx).to_string(), "λb.a"); + } + + #[test] + fn term_display_without_context() { + let term1 = app(Var(1), Var(2)); + assert_eq!(term1.to_string(), "a b"); + assert_eq!( + term1.with_context(&Context::empty()).to_string(), + " " + ); + + let term2 = abs(app(Var(1), Var(3))); + assert_eq!(term2.to_string(), "λa.a c"); + assert_eq!( + term2.with_context(&Context::empty()).to_string(), + "λa.a " + ); + + let term3 = abs(Var(2)); + assert_eq!(term3.to_string(), "λa.b"); + assert_eq!( + term3.with_context(&Context::empty()).to_string(), + "λa." + ); + } + #[test] fn is_supercombinator() { assert!(abs(Var(1)).is_supercombinator()); From afcc282d7f3a090fc13f96c3f769994f7f682189 Mon Sep 17 00:00:00 2001 From: Masahiro Honma Date: Tue, 9 Sep 2025 21:53:48 +0900 Subject: [PATCH 3/7] refactor: Remove unnecessary use of VecDeque revert 190c29f182c0d6e66a05a8c00bf23f46fd27bc66 --- src/parser.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/parser.rs b/src/parser.rs index d5c7fb3..b8ee978 100644 --- a/src/parser.rs +++ b/src/parser.rs @@ -8,7 +8,6 @@ use crate::term::Context; pub use crate::term::Notation::*; use crate::term::Term::*; use crate::term::{abs, app, Notation, Term}; -use std::collections::VecDeque; use std::error::Error; use std::fmt; @@ -149,14 +148,14 @@ pub fn tokenize_cla(input: &str) -> Result, ParseError> { #[doc(hidden)] pub fn convert_classic_tokens(ctx: &Context, tokens: &[CToken]) -> Result, ParseError> { - let mut stack = VecDeque::with_capacity(tokens.len()); + let mut stack = Vec::with_capacity(tokens.len()); stack.extend(ctx.iter().rev()); _convert_classic_tokens(tokens, &mut stack, &mut 0) } fn _convert_classic_tokens<'t>( tokens: &'t [CToken], - stack: &mut VecDeque<&'t str>, + stack: &mut Vec<&'t str>, pos: &mut usize, ) -> Result, ParseError> { let mut output = Vec::with_capacity(tokens.len() - *pos); @@ -166,7 +165,7 @@ fn _convert_classic_tokens<'t>( match *token { CLambda(ref name) => { output.push(Lambda); - stack.push_back(name); + stack.push(name); inner_stack_count += 1; } CLparen => { From fb412acac355d2d7c9466b73aebd2b6ee2a3e845 Mon Sep 17 00:00:00 2001 From: Masahiro Honma Date: Fri, 12 Sep 2025 06:09:33 +0900 Subject: [PATCH 4/7] refactor: use slice for function parameter Accepting a slice is more flexible and idiomatic than a vector reference (@ljedrz) Co-authored-by: ljedrz <3750347+ljedrz@users.noreply.github.com> --- src/term.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/term.rs b/src/term.rs index f94d150..6ffa1ea 100644 --- a/src/term.rs +++ b/src/term.rs @@ -690,7 +690,7 @@ fn base26_encode(mut n: u32) -> String { fn show_precedence_cla( ctx: &Context, - binder_names: &Vec, + binder_names: &[String], term: &Term, context_precedence: usize, depth: u32, From b76efc2692c35fcd3238533b5ae24d94dcac229b Mon Sep 17 00:00:00 2001 From: Masahiro Honma Date: Fri, 12 Sep 2025 06:56:37 +0900 Subject: [PATCH 5/7] fix: remove redundant tests causing clippy warnings --- src/term.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/term.rs b/src/term.rs index 6ffa1ea..982634b 100644 --- a/src/term.rs +++ b/src/term.rs @@ -842,7 +842,6 @@ mod tests { // contains assert!(ctx.contains("b")); - assert!(ctx.contains(&"c".to_string())); assert!(!ctx.contains("d")); // iter From b8a8145a830b40f529b351e2c6265fa4bed3d607 Mon Sep 17 00:00:00 2001 From: Masahiro Honma Date: Fri, 12 Sep 2025 06:17:43 +0900 Subject: [PATCH 6/7] fix: address clippy::doc_lazy_continuation warning in doc comments --- src/parser.rs | 2 +- src/reduction.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/parser.rs b/src/parser.rs index b8ee978..4f6bf55 100644 --- a/src/parser.rs +++ b/src/parser.rs @@ -236,7 +236,7 @@ fn _get_ast(tokens: &[Token], pos: &mut usize) -> Result /// Attempts to parse the input `&str` as a lambda `Term` encoded in the given `Notation`. /// /// - lambdas can be represented either with the greek letter (λ) or a backslash (\\ - -/// less aesthetic, but only one byte in size) +/// less aesthetic, but only one byte in size) /// - the identifiers in `Classic` notation are `String`s of alphabetic Unicode characters /// - `Classic` notation ignores whitespaces where unambiguous /// - the indices in the `DeBruijn` notation start with 1 and are hexadecimal digits diff --git a/src/reduction.rs b/src/reduction.rs index 9d8db14..65c8935 100644 --- a/src/reduction.rs +++ b/src/reduction.rs @@ -10,7 +10,7 @@ use std::{cmp, fmt, mem}; /// /// - the `NOR`, `HNO`, `APP` and `HAP` orders reduce expressions to their normal form /// - the `APP` order will fail to fully reduce expressions containing terms without a normal form, -/// e.g. the `Y` combinator (they will expand forever) +/// e.g. the `Y` combinator (they will expand forever) /// - the `CBN` order reduces to weak head normal form /// - the `CBV` order reduces to weak normal form /// - the `HSP` order reduces to head normal form From 1f59337fcfcfdac1cfeb8d5a28260b05b8f75f25 Mon Sep 17 00:00:00 2001 From: Masahiro Honma Date: Fri, 12 Sep 2025 06:34:27 +0900 Subject: [PATCH 7/7] fix: make lifetime syntax consistent per Clippy --- src/term.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/term.rs b/src/term.rs index 982634b..ca5e951 100644 --- a/src/term.rs +++ b/src/term.rs @@ -763,7 +763,7 @@ fn show_precedence_dbr(term: &Term, context_precedence: usize) -> String { } } -fn parenthesize_if(input: &str, condition: bool) -> Cow { +fn parenthesize_if(input: &str, condition: bool) -> Cow<'_, str> { if condition { format!("({})", input).into() } else {