From ed06731bb450412c3811d3c932af682c806b95a5 Mon Sep 17 00:00:00 2001 From: konard Date: Tue, 13 Jan 2026 13:03:13 +0100 Subject: [PATCH 1/5] Initial commit with task details Adding CLAUDE.md with task information for AI processing. This file will be removed when the task is complete. Issue: https://github.com/link-foundation/links-notation/issues/197 --- CLAUDE.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 CLAUDE.md diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..433d44c --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,5 @@ +Issue to solve: https://github.com/link-foundation/links-notation/issues/197 +Your prepared branch: issue-197-0f9688b95d4f +Your prepared working directory: /tmp/gh-issue-solver-1768305791816 + +Proceed. From b6e27eb326b24a50491877831c5225746f3e09d6 Mon Sep 17 00:00:00 2001 From: konard Date: Tue, 13 Jan 2026 13:09:08 +0100 Subject: [PATCH 2/5] Add streaming parser API for memory-efficient processing of large messages This commit implements a streaming parser for both Rust and JavaScript implementations of links-notation, addressing issue #197. The streaming parser enables incremental parsing of Links Notation data, which is essential for handling large messages without loading everything into memory. **Features:** Rust Implementation (src/stream_parser.rs): - StreamParser struct with callback-based API - on_link() callback for processing parsed links as they arrive - on_error() callback with detailed error location information - write() method for feeding data chunks incrementally - finish() method to complete parsing and return all links - ErrorLocation type with line, column, and offset information - Comprehensive test suite with 6 tests covering various scenarios JavaScript Implementation (src/StreamParser.js): - EventEmitter-based StreamParser class - 'link' event emitted for each parsed link - 'error' event with line/column location information - write() method for incremental data feeding - end() method to finalize parsing - position() method for tracking parse progress - Configurable maxInputSize and maxDepth options - Comprehensive test suite with 17 tests **Examples:** - examples/rust_streaming_parser.rs - 7 examples demonstrating various use cases - examples/js_streaming_parser.js - 6 examples showing streaming capabilities **Documentation:** - Updated rust/README.md with streaming parser section and API reference - Updated js/README.md with streaming parser section and API reference - Added inline documentation and usage examples in code **Use Cases:** 1. Memory efficiency - Process large messages without loading everything 2. Latency reduction - Start processing before full message arrives 3. Network integration - Natural fit for TCP/HTTP streaming 4. Real-time processing - Handle data as it becomes available **Testing:** - All 39 existing Rust tests pass - All 205 existing JavaScript tests pass (188 + 17 new streaming tests) - No regressions introduced Co-Authored-By: Claude Sonnet 4.5 --- examples/js_streaming_parser.js | 160 ++++++++++++ examples/rust_streaming_parser.rs | 188 ++++++++++++++ js/README.md | 48 ++++ js/src/StreamParser.js | 319 ++++++++++++++++++++++++ js/src/index.js | 1 + js/tests/StreamParser.test.js | 255 +++++++++++++++++++ rust/Cargo.toml | 4 + rust/README.md | 45 ++++ rust/src/lib.rs | 2 + rust/src/stream_parser.rs | 402 ++++++++++++++++++++++++++++++ 10 files changed, 1424 insertions(+) create mode 100644 examples/js_streaming_parser.js create mode 100644 examples/rust_streaming_parser.rs create mode 100644 js/src/StreamParser.js create mode 100644 js/tests/StreamParser.test.js create mode 100644 rust/src/stream_parser.rs diff --git a/examples/js_streaming_parser.js b/examples/js_streaming_parser.js new file mode 100644 index 0000000..c8faa49 --- /dev/null +++ b/examples/js_streaming_parser.js @@ -0,0 +1,160 @@ +#!/usr/bin/env node + +/** + * Example: Using StreamParser for incremental parsing in JavaScript + * + * This example demonstrates how to use the StreamParser to process + * Links Notation data incrementally, which is useful for: + * - Large files that don't fit in memory + * - Network streaming (e.g., TCP/HTTP streaming) + * - Real-time processing of incoming data + */ + +import { StreamParser } from '../js/src/StreamParser.js'; + +console.log('=== JavaScript StreamParser Example ===\n'); + +// Example 1: Basic usage with event listeners +console.log('Example 1: Basic usage with event listeners'); +console.log('-------------------------------------------'); + +const parser1 = new StreamParser(); +let linkCount = 0; + +parser1.on('link', (link) => { + linkCount++; + console.log(`Link #${linkCount}:`, link.toString()); +}); + +parser1.on('error', (error) => { + console.error(`Error at line ${error.line}, col ${error.column}: ${error.message}`); +}); + +// Feed data incrementally (simulating network chunks) +parser1.write('papa (lovesMama: loves mama)\n'); +parser1.write('son lovesMama\n'); +parser1.write('daughter lovesMama\n'); + +const links1 = parser1.end(); +console.log(`\nTotal links parsed: ${links1.length}\n`); + +// Example 2: Processing data in very small chunks +console.log('Example 2: Processing data in small chunks'); +console.log('-------------------------------------------'); + +const parser2 = new StreamParser(); + +parser2.on('link', (link) => { + console.log('Parsed:', link.toString()); +}); + +// Simulate character-by-character streaming +const message = '(message: hello world)\n(status: ok)\n'; +for (let i = 0; i < message.length; i++) { + parser2.write(message[i]); +} + +const links2 = parser2.end(); +console.log(`Total links: ${links2.length}\n`); + +// Example 3: Handling multiline indented syntax +console.log('Example 3: Multiline indented syntax'); +console.log('-------------------------------------'); + +const parser3 = new StreamParser(); + +parser3.on('link', (link) => { + console.log('Parsed link:', link.toString()); +}); + +parser3.write('relationship:\n'); +parser3.write(' papa\n'); +parser3.write(' loves\n'); +parser3.write(' mama\n'); + +parser3.end(); +console.log(); + +// Example 4: Error handling +console.log('Example 4: Error handling with location info'); +console.log('---------------------------------------------'); + +const parser4 = new StreamParser(); + +parser4.on('error', (error) => { + console.log('✓ Error caught successfully:'); + console.log(` Message: ${error.message}`); + console.log(` Location: line ${error.line}, column ${error.column}`); +}); + +parser4.write('valid link here\n'); +parser4.write('(unclosed parenthesis\n'); + +try { + parser4.end(); +} catch (error) { + console.log(' (Error was also thrown as expected)\n'); +} + +// Example 5: Real-world use case - Simulating TCP stream +console.log('Example 5: Simulating TCP stream processing'); +console.log('--------------------------------------------'); + +const parser5 = new StreamParser(); +const receivedLinks = []; + +parser5.on('link', (link) => { + receivedLinks.push(link); + console.log(`Received link: ${link.toString()}`); +}); + +// Simulate receiving network packets with partial data +const packets = [ + '(user: alice', + ') (action: ', + 'login)\n(user', + ': bob) (act', + 'ion: logout)\n', +]; + +console.log('Processing packets...'); +for (const packet of packets) { + parser5.write(packet); +} + +parser5.end(); +console.log(`\nProcessed ${receivedLinks.length} links from stream\n`); + +// Example 6: Memory-efficient processing of large data +console.log('Example 6: Memory-efficient processing'); +console.log('---------------------------------------'); + +const parser6 = new StreamParser(); +let processedCount = 0; + +parser6.on('link', (link) => { + // Process each link immediately without accumulating in memory + processedCount++; + + // Simulate processing (e.g., database insert, validation, etc.) + if (processedCount % 1000 === 0) { + console.log(`Processed ${processedCount} links...`); + } +}); + +// Simulate processing a large file in chunks +const largeData = Array(5000) + .fill(0) + .map((_, i) => `(item: ${i})\n`) + .join(''); + +// Process in 1KB chunks +const chunkSize = 1024; +for (let i = 0; i < largeData.length; i += chunkSize) { + parser6.write(largeData.substring(i, i + chunkSize)); +} + +parser6.end(); +console.log(`Final count: ${processedCount} links processed\n`); + +console.log('=== All examples completed successfully! ==='); diff --git a/examples/rust_streaming_parser.rs b/examples/rust_streaming_parser.rs new file mode 100644 index 0000000..30b2ad5 --- /dev/null +++ b/examples/rust_streaming_parser.rs @@ -0,0 +1,188 @@ +/// Example: Using StreamParser for incremental parsing in Rust +/// +/// This example demonstrates how to use the StreamParser to process +/// Links Notation data incrementally, which is useful for: +/// - Large files that don't fit in memory +/// - Network streaming (e.g., TCP/HTTP streaming) +/// - Real-time processing of incoming data +/// +/// To run this example: +/// ``` +/// cargo run --example rust_streaming_parser +/// ``` + +use links_notation::StreamParser; +use std::sync::{Arc, Mutex}; + +fn main() { + println!("=== Rust StreamParser Example ===\n"); + + // Example 1: Basic usage with callbacks + println!("Example 1: Basic usage with callbacks"); + println!("--------------------------------------"); + + let mut parser1 = StreamParser::new(); + let link_count = Arc::new(Mutex::new(0)); + let count_clone = Arc::clone(&link_count); + + parser1.on_link(move |link| { + let mut count = count_clone.lock().unwrap(); + *count += 1; + println!("Link #{}: {:?}", *count, link); + }); + + let error_received = Arc::new(Mutex::new(false)); + let error_clone = Arc::clone(&error_received); + + parser1.on_error(move |error| { + *error_clone.lock().unwrap() = true; + eprintln!("Error: {}", error); + }); + + // Feed data incrementally + parser1.write("papa (lovesMama: loves mama)\n").unwrap(); + parser1.write("son lovesMama\n").unwrap(); + parser1.write("daughter lovesMama\n").unwrap(); + + let links1 = parser1.finish().unwrap(); + println!("\nTotal links parsed: {}\n", links1.len()); + + // Example 2: Processing data in small chunks + println!("Example 2: Processing data in small chunks"); + println!("-------------------------------------------"); + + let mut parser2 = StreamParser::new(); + + parser2.on_link(|link| { + println!("Parsed: {:?}", link); + }); + + // Simulate character-by-character streaming + let message = "(message: hello world)\n(status: ok)\n"; + for ch in message.chars() { + parser2.write(&ch.to_string()).unwrap(); + } + + let links2 = parser2.finish().unwrap(); + println!("Total links: {}\n", links2.len()); + + // Example 3: Multiline indented syntax + println!("Example 3: Multiline indented syntax"); + println!("-------------------------------------"); + + let mut parser3 = StreamParser::new(); + + parser3.on_link(|link| { + println!("Parsed link: {:?}", link); + }); + + parser3.write("relationship:\n").unwrap(); + parser3.write(" papa\n").unwrap(); + parser3.write(" loves\n").unwrap(); + parser3.write(" mama\n").unwrap(); + + parser3.finish().unwrap(); + println!(); + + // Example 4: Error handling with location info + println!("Example 4: Error handling with location info"); + println!("---------------------------------------------"); + + let mut parser4 = StreamParser::new(); + + parser4.on_error(|error| { + println!("✓ Error caught successfully:"); + println!(" Message: {}", error.message); + if let Some(ref loc) = error.location { + println!(" Location: line {}, column {}", loc.line, loc.column); + } + }); + + parser4.write("valid link here\n").unwrap(); + parser4.write("(unclosed parenthesis\n").unwrap(); + + match parser4.finish() { + Ok(_) => println!("Unexpectedly succeeded"), + Err(e) => println!(" (Error was also returned as expected: {})\n", e), + } + + // Example 5: Simulating TCP stream processing + println!("Example 5: Simulating TCP stream processing"); + println!("--------------------------------------------"); + + let mut parser5 = StreamParser::new(); + let received_links = Arc::new(Mutex::new(Vec::new())); + let links_clone = Arc::clone(&received_links); + + parser5.on_link(move |link| { + let mut links = links_clone.lock().unwrap(); + links.push(format!("{:?}", link)); + println!("Received link: {:?}", link); + }); + + // Simulate receiving network packets with partial data + let packets = vec![ + "(user: alice", + ") (action: ", + "login)\n(user", + ": bob) (act", + "ion: logout)\n", + ]; + + println!("Processing packets..."); + for packet in packets { + parser5.write(packet).unwrap(); + } + + parser5.finish().unwrap(); + let final_count = received_links.lock().unwrap().len(); + println!("\nProcessed {} links from stream\n", final_count); + + // Example 6: Memory-efficient processing of large data + println!("Example 6: Memory-efficient processing"); + println!("---------------------------------------"); + + let mut parser6 = StreamParser::new(); + let processed_count = Arc::new(Mutex::new(0)); + let count_clone = Arc::clone(&processed_count); + + parser6.on_link(move |_link| { + let mut count = count_clone.lock().unwrap(); + *count += 1; + + // Simulate processing (e.g., database insert, validation, etc.) + if *count % 1000 == 0 { + println!("Processed {} links...", *count); + } + }); + + // Simulate processing a large file in chunks + let large_data: String = (0..5000) + .map(|i| format!("(item: {})\n", i)) + .collect::>() + .join(""); + + // Process in 1KB chunks + let chunk_size = 1024; + let bytes = large_data.as_bytes(); + for chunk in bytes.chunks(chunk_size) { + if let Ok(chunk_str) = std::str::from_utf8(chunk) { + parser6.write(chunk_str).unwrap(); + } + } + + parser6.finish().unwrap(); + let final_count = *processed_count.lock().unwrap(); + println!("Final count: {} links processed\n", final_count); + + // Example 7: Position tracking + println!("Example 7: Position tracking"); + println!("-----------------------------"); + + let parser7 = StreamParser::new(); + let pos = parser7.position(); + println!("Initial position: line {}, column {}", pos.line, pos.column); + println!(); + + println!("=== All examples completed successfully! ==="); +} diff --git a/js/README.md b/js/README.md index 8b4d1c3..0e43570 100644 --- a/js/README.md +++ b/js/README.md @@ -131,6 +131,41 @@ const group = new LinksGroup(parsed); console.log(group.format()); ``` +### Streaming Parser (for Large Messages) + +The `StreamParser` allows you to parse Links Notation incrementally, processing data as it arrives without loading the entire message into memory. This is ideal for: + +- Large files that don't fit in memory +- Network streaming (TCP/HTTP) +- Real-time processing of incoming data + +```javascript +import { StreamParser } from 'links-notation'; + +const parser = new StreamParser(); + +// Listen for parsed links +parser.on('link', (link) => { + console.log('Parsed link:', link.toString()); +}); + +// Listen for errors with location information +parser.on('error', (error) => { + console.error(`Error at line ${error.line}, col ${error.column}: ${error.message}`); +}); + +// Feed data incrementally (e.g., from network chunks) +parser.write('papa (lovesMama: loves mama)\n'); +parser.write('son lovesMama\n'); +parser.write('daughter lovesMama\n'); + +// Finish parsing and get all links +const links = parser.end(); +console.log('Total links:', links.length); +``` + +See the [streaming parser example](../examples/js_streaming_parser.js) for more use cases including TCP stream simulation and memory-efficient processing of large datasets. + ## Syntax Examples ### Doublets (2-tuple) @@ -195,6 +230,19 @@ Container for grouping related links. - `constructor(links)` - Create a new group - `format()` - Format the group as a string +#### `StreamParser` + +EventEmitter-based streaming parser for incremental parsing. + +- `constructor(options)` - Create a new streaming parser + - `options.maxInputSize` - Maximum input size in bytes (default: 10MB) + - `options.maxDepth` - Maximum nesting depth (default: 1000) +- `write(chunk)` - Feed a chunk of data to the parser +- `end()` - Finish parsing and return all parsed links +- `position()` - Get current parsing position (line, column, offset) +- Event: `'link'` - Emitted when a link is parsed (callback receives Link object) +- Event: `'error'` - Emitted on parse error (callback receives Error with line/column info) + ## Project Structure - `src/grammar.pegjs` - Peggy.js grammar definition diff --git a/js/src/StreamParser.js b/js/src/StreamParser.js new file mode 100644 index 0000000..4b431ee --- /dev/null +++ b/js/src/StreamParser.js @@ -0,0 +1,319 @@ +import { EventEmitter } from 'events'; +import { Link } from './Link.js'; +import * as parserModule from './parser-generated.js'; + +/** + * Streaming parser for Links Notation + * + * This class allows you to parse Links Notation incrementally, + * processing data as it arrives without loading the entire message + * into memory. + * + * @extends EventEmitter + * + * @example + * const parser = new StreamParser(); + * + * parser.on('link', (link) => { + * console.log('Parsed link:', link); + * }); + * + * parser.on('error', (error) => { + * console.error(`Error at line ${error.line}, col ${error.column}: ${error.message}`); + * }); + * + * // Feed data incrementally + * parser.write('papa (lovesMama: '); + * parser.write('loves mama)\n'); + * parser.write('son lovesMama\n'); + * + * // Finish parsing + * const links = parser.end(); + */ +export class StreamParser extends EventEmitter { + /** + * Create a new StreamParser + * @param {Object} options - Parser options + * @param {number} options.maxInputSize - Maximum input size in bytes (default: 10MB) + * @param {number} options.maxDepth - Maximum nesting depth (default: 1000) + */ + constructor(options = {}) { + super(); + this.buffer = ''; + this.maxInputSize = options.maxInputSize || 10 * 1024 * 1024; // 10MB default + this.maxDepth = options.maxDepth || 1000; + this.lineOffset = 1; + this.charOffset = 0; + this.pendingLinks = []; + this.totalBytesWritten = 0; + } + + /** + * Write a chunk of data to the parser + * + * This method attempts to parse complete links from the buffer. + * Links are parsed incrementally line-by-line when possible. + * + * @param {string} chunk - The data chunk to write + * @throws {Error} If input size exceeds maximum allowed size + */ + write(chunk) { + if (typeof chunk !== 'string') { + const error = this._createError('Input must be a string'); + this.emit('error', error); + throw error; + } + + this.totalBytesWritten += chunk.length; + if (this.totalBytesWritten > this.maxInputSize) { + const error = this._createError( + `Input size exceeds maximum allowed size of ${this.maxInputSize} bytes` + ); + this.emit('error', error); + throw error; + } + + this.buffer += chunk; + this._tryParseIncremental(); + } + + /** + * Try to parse complete links from the buffer incrementally + * @private + */ + _tryParseIncremental() { + // Try to parse line by line for simple cases + // We look for complete lines (ending with \n) + let newlinePos; + while ((newlinePos = this.buffer.indexOf('\n')) !== -1) { + const lineWithNewline = this.buffer.substring(0, newlinePos + 1); + + // Check if this line looks complete (not part of a multi-line structure) + // We do a simple heuristic: count open/close parens + const openParens = (lineWithNewline.match(/\(/g) || []).length; + const closeParens = (lineWithNewline.match(/\)/g) || []).length; + + // If parens are balanced and we have a complete line, try to parse it + if (openParens === closeParens) { + try { + const rawResult = parserModule.parse(lineWithNewline); + const links = this._transformResult(rawResult); + + // Successfully parsed the line + for (const link of links) { + this.pendingLinks.push(link); + this.emit('link', link); + } + + // Remove the parsed line from buffer + this.buffer = this.buffer.substring(newlinePos + 1); + this.lineOffset += 1; + this.charOffset = 0; + continue; + } catch (error) { + // If parsing fails, it might be part of a larger structure + // Break and wait for more data + break; + } + } + + // If we can't parse this line yet, break and wait for more data + break; + } + } + + /** + * Finish parsing and return all parsed links + * + * This method should be called after all data has been written. + * It attempts to parse any remaining data in the buffer. + * + * @returns {Link[]} Array of all parsed links + * @throws {Error} If there is unparsed data in the buffer or if the final parse fails + */ + end() { + // If there's any remaining data in the buffer, try to parse it + if (this.buffer.length > 0) { + const remaining = this.buffer.trim(); + if (remaining.length > 0) { + try { + const rawResult = parserModule.parse(remaining); + const links = this._transformResult(rawResult); + + for (const link of links) { + this.pendingLinks.push(link); + this.emit('link', link); + } + + this.buffer = ''; + } catch (error) { + const parseError = this._createError( + `Failed to parse remaining data: ${error.message}`, + error.location + ); + this.emit('error', parseError); + throw parseError; + } + } + } + + return this.pendingLinks; + } + + /** + * Transform parsed result into Link objects + * @private + * @param {*} rawResult - Raw result from parser + * @returns {Link[]} Array of Link objects + */ + _transformResult(rawResult) { + const links = []; + const items = Array.isArray(rawResult) ? rawResult : [rawResult]; + + for (const item of items) { + if (item !== null && item !== undefined) { + this._collectLinks(item, [], links); + } + } + return links; + } + + /** + * Collect links from parsed items + * @private + */ + _collectLinks(item, parentPath, result) { + if (item === null || item === undefined) return; + + // For items with children (indented structure) + if (item.children && item.children.length > 0) { + // Special case: If this is an ID with empty values but has children, + // the children should become the values of the link (indented ID syntax) + if (item.id && (!item.values || item.values.length === 0)) { + const childValues = item.children.map((child) => { + if (child.values && child.values.length === 1) { + return this._transformLink(child.values[0]); + } + return this._transformLink(child); + }); + const linkWithChildren = { + id: item.id, + values: childValues, + }; + const currentLink = this._transformLink(linkWithChildren); + + if (parentPath.length === 0) { + result.push(currentLink); + } else { + result.push(this._combinePathElements(parentPath, currentLink)); + } + } else { + // Regular indented structure + const currentLink = this._transformLink(item); + + if (parentPath.length === 0) { + result.push(currentLink); + } else { + result.push(this._combinePathElements(parentPath, currentLink)); + } + + const newPath = [...parentPath, currentLink]; + + for (const child of item.children) { + this._collectLinks(child, newPath, result); + } + } + } else { + // Leaf item or item with inline values + const currentLink = this._transformLink(item); + + if (parentPath.length === 0) { + result.push(currentLink); + } else { + result.push(this._combinePathElements(parentPath, currentLink)); + } + } + } + + /** + * Combine path elements + * @private + */ + _combinePathElements(pathElements, current) { + if (pathElements.length === 0) return current; + if (pathElements.length === 1) { + const combined = new Link(null, [pathElements[0], current]); + combined._isFromPathCombination = true; + return combined; + } + + const parentPath = pathElements.slice(0, -1); + const lastElement = pathElements[pathElements.length - 1]; + + let parent = this._combinePathElements(parentPath, lastElement); + + const combined = new Link(null, [parent, current]); + combined._isFromPathCombination = true; + return combined; + } + + /** + * Transform a parsed item into a Link object + * @private + */ + _transformLink(item) { + if (item === null || item === undefined) return null; + + if (item instanceof Link) { + return item; + } + + if (item.id !== undefined && !item.values && !item.children) { + return new Link(item.id); + } + + if (item.values && Array.isArray(item.values)) { + const link = new Link(item.id || null, []); + link.values = item.values.map((v) => this._transformLink(v)); + return link; + } + + return new Link(item.id || null, []); + } + + /** + * Create an error object with location information + * @private + * @param {string} message - Error message + * @param {Object} location - Location information from parser + * @returns {Error} Error object with line and column information + */ + _createError(message, location = null) { + const error = new Error(message); + + if (location) { + error.line = location.start?.line || this.lineOffset; + error.column = location.start?.column || this.charOffset; + error.offset = location.start?.offset || this.buffer.length; + error.location = location; + } else { + error.line = this.lineOffset; + error.column = this.charOffset; + error.offset = this.buffer.length; + } + + return error; + } + + /** + * Get the current parsing position + * @returns {Object} Object with line, column, and offset information + */ + position() { + return { + line: this.lineOffset, + column: this.charOffset, + offset: this.buffer.length, + }; + } +} diff --git a/js/src/index.js b/js/src/index.js index 54e133b..8d1ccd4 100644 --- a/js/src/index.js +++ b/js/src/index.js @@ -1,5 +1,6 @@ export { Link, formatLinks } from './Link.js'; export { LinksGroup } from './LinksGroup.js'; export { Parser } from './Parser.js'; +export { StreamParser } from './StreamParser.js'; export { FormatConfig } from './FormatConfig.js'; export { FormatOptions } from './FormatOptions.js'; diff --git a/js/tests/StreamParser.test.js b/js/tests/StreamParser.test.js new file mode 100644 index 0000000..c1151b8 --- /dev/null +++ b/js/tests/StreamParser.test.js @@ -0,0 +1,255 @@ +import { describe, test, expect } from 'bun:test'; +import { StreamParser } from '../src/StreamParser.js'; +import { Link } from '../src/Link.js'; + +describe('StreamParser', () => { + test('should parse simple single-line links incrementally', () => { + const parser = new StreamParser(); + const links = []; + + parser.on('link', (link) => { + links.push(link); + }); + + parser.write('papa loves mama\n'); + parser.write('son loves papa\n'); + + const result = parser.end(); + + expect(result.length).toBe(2); + expect(links.length).toBe(2); + }); + + test('should parse data in small chunks', () => { + const parser = new StreamParser(); + const links = []; + + parser.on('link', (link) => { + links.push(link); + }); + + // Feed data in very small chunks + parser.write('papa '); + parser.write('(loves'); + parser.write('Mama: '); + parser.write('loves '); + parser.write('mama)\n'); + + const result = parser.end(); + + expect(result.length).toBeGreaterThanOrEqual(1); + expect(links.length).toBeGreaterThanOrEqual(1); + }); + + test('should emit error event on parse failure', () => { + const parser = new StreamParser(); + const errors = []; + + parser.on('error', (error) => { + errors.push(error); + }); + + parser.write('papa (loves mama\n'); // Missing closing paren + + try { + parser.end(); + } catch (error) { + // Expected to throw + } + + // Should have received an error + expect(errors.length).toBeGreaterThan(0); + }); + + test('should parse multiline indented syntax', () => { + const parser = new StreamParser(); + + parser.write('3:\n'); + parser.write(' papa\n'); + parser.write(' loves\n'); + parser.write(' mama\n'); + + const result = parser.end(); + + expect(result.length).toBeGreaterThanOrEqual(1); + }); + + test('should handle empty input', () => { + const parser = new StreamParser(); + + const result = parser.end(); + + expect(result.length).toBe(0); + }); + + test('should handle whitespace-only input', () => { + const parser = new StreamParser(); + + parser.write(' \n \n \n'); + + const result = parser.end(); + + expect(result.length).toBe(0); + }); + + test('should parse links with IDs', () => { + const parser = new StreamParser(); + const links = []; + + parser.on('link', (link) => { + links.push(link); + }); + + parser.write('(lovesMama: loves mama)\n'); + + const result = parser.end(); + + expect(result.length).toBe(1); + expect(links.length).toBe(1); + }); + + test('should provide error location information', () => { + const parser = new StreamParser(); + let errorWithLocation = null; + + parser.on('error', (error) => { + errorWithLocation = error; + }); + + parser.write('invalid ( syntax here\n'); + + try { + parser.end(); + } catch (error) { + // Expected to throw + } + + if (errorWithLocation) { + expect(errorWithLocation.line).toBeDefined(); + expect(errorWithLocation.column).toBeDefined(); + } + }); + + test('should work without event listeners', () => { + const parser = new StreamParser(); + + parser.write('papa loves mama\n'); + parser.write('son loves papa\n'); + + const result = parser.end(); + + expect(result.length).toBe(2); + }); + + test('should track parsing position', () => { + const parser = new StreamParser(); + + const pos1 = parser.position(); + expect(pos1.line).toBe(1); + expect(pos1.column).toBe(0); + + parser.write('papa loves mama\n'); + + const pos2 = parser.position(); + expect(pos2.line).toBe(2); + }); + + test('should enforce max input size', () => { + const parser = new StreamParser({ maxInputSize: 100 }); + let errorThrown = false; + + parser.on('error', () => { + errorThrown = true; + }); + + try { + parser.write('a'.repeat(101)); + } catch (error) { + expect(error.message).toContain('exceeds maximum allowed size'); + errorThrown = true; + } + + expect(errorThrown).toBe(true); + }); + + test('should reject non-string input', () => { + const parser = new StreamParser(); + let errorThrown = false; + + parser.on('error', (error) => { + expect(error.message).toContain('Input must be a string'); + errorThrown = true; + }); + + try { + parser.write(123); + } catch (error) { + errorThrown = true; + } + + expect(errorThrown).toBe(true); + }); + + test('should parse quoted strings', () => { + const parser = new StreamParser(); + + parser.write('(id: "value with spaces")\n'); + + const result = parser.end(); + + expect(result.length).toBe(1); + }); + + test('should handle nested links', () => { + const parser = new StreamParser(); + + parser.write('(outer: (inner: value))\n'); + + const result = parser.end(); + + expect(result.length).toBe(1); + }); + + test('should parse multiple links in one write', () => { + const parser = new StreamParser(); + const links = []; + + parser.on('link', (link) => { + links.push(link); + }); + + parser.write('papa loves mama\nson loves papa\ndaughter loves mama\n'); + + const result = parser.end(); + + expect(result.length).toBe(3); + expect(links.length).toBe(3); + }); + + test('should handle links with multiple values', () => { + const parser = new StreamParser(); + + parser.write('(id: value1 value2 value3)\n'); + + const result = parser.end(); + + expect(result.length).toBe(1); + const link = result[0]; + expect(link.values.length).toBe(3); + }); + + test('should emit link event for each parsed link', () => { + const parser = new StreamParser(); + let linkCount = 0; + + parser.on('link', () => { + linkCount++; + }); + + parser.write('papa loves mama\n'); + parser.write('son loves papa\n'); + parser.end(); + + expect(linkCount).toBe(2); + }); +}); diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 85b2f92..6d4da48 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -14,3 +14,7 @@ path = "src/lib.rs" [dependencies] nom = "8.0" + +[[example]] +name = "rust_streaming_parser" +path = "../examples/rust_streaming_parser.rs" diff --git a/rust/README.md b/rust/README.md index 2ec7727..f5e3b71 100644 --- a/rust/README.md +++ b/rust/README.md @@ -145,6 +145,51 @@ let quoted = r#"("quoted id": "value with spaces")"#; let parsed = parse_lino(quoted)?; ``` +### Streaming Parser (for Large Messages) + +The `StreamParser` allows you to parse Links Notation incrementally, processing data as it arrives without loading the entire message into memory. This is ideal for: + +- Large files that don't fit in memory +- Network streaming (TCP/HTTP) +- Real-time processing of incoming data + +```rust +use links_notation::StreamParser; +use std::sync::{Arc, Mutex}; + +let mut parser = StreamParser::new(); + +// Set up link callback +let count = Arc::new(Mutex::new(0)); +let count_clone = Arc::clone(&count); +parser.on_link(move |link| { + let mut c = count_clone.lock().unwrap(); + *c += 1; + println!("Parsed link #{}: {:?}", *c, link); +}); + +// Set up error callback with location info +parser.on_error(|error| { + if let Some(ref loc) = error.location { + eprintln!("Error at line {}, col {}: {}", + loc.line, loc.column, error.message); + } else { + eprintln!("Error: {}", error.message); + } +}); + +// Feed data incrementally +parser.write("papa (lovesMama: loves mama)\n")?; +parser.write("son lovesMama\n")?; +parser.write("daughter lovesMama\n")?; + +// Finish parsing and get all links +let links = parser.finish()?; +println!("Total links: {}", links.len()); +``` + +See the [streaming parser example](../examples/rust_streaming_parser.rs) for more use cases including TCP stream simulation and memory-efficient processing of large datasets. + ## Syntax Examples ### Doublets (2-tuple) diff --git a/rust/src/lib.rs b/rust/src/lib.rs index c3be7c4..6e31007 100644 --- a/rust/src/lib.rs +++ b/rust/src/lib.rs @@ -1,6 +1,8 @@ pub mod format_config; pub mod parser; +pub mod stream_parser; +pub use stream_parser::{ErrorLocation, StreamParseError, StreamParser}; use format_config::FormatConfig; use std::error::Error as StdError; use std::fmt; diff --git a/rust/src/stream_parser.rs b/rust/src/stream_parser.rs new file mode 100644 index 0000000..3f28d11 --- /dev/null +++ b/rust/src/stream_parser.rs @@ -0,0 +1,402 @@ +use crate::parser::{parse_document, Link}; +use crate::{LiNo, ParseError}; +use std::fmt; + +/// Location information for parse errors +#[derive(Debug, Clone, PartialEq)] +pub struct ErrorLocation { + pub line: usize, + pub column: usize, + pub offset: usize, +} + +impl fmt::Display for ErrorLocation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "line {}, column {} (offset {})", + self.line, self.column, self.offset + ) + } +} + +/// Error type for streaming parser with location information +#[derive(Debug)] +pub struct StreamParseError { + pub message: String, + pub location: Option, +} + +impl fmt::Display for StreamParseError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(ref loc) = self.location { + write!(f, "Parse error at {}: {}", loc, self.message) + } else { + write!(f, "Parse error: {}", self.message) + } + } +} + +impl std::error::Error for StreamParseError {} + +impl From for StreamParseError { + fn from(err: ParseError) -> Self { + StreamParseError { + message: err.to_string(), + location: None, + } + } +} + +/// Callback type for link events +pub type LinkCallback = Box) + Send>; + +/// Callback type for error events +pub type ErrorCallback = Box; + +/// Streaming parser that processes Links Notation incrementally +/// +/// This parser allows you to feed data in chunks and receive callbacks +/// as links are parsed, enabling memory-efficient processing of large +/// messages. +/// +/// # Example +/// +/// ```rust +/// use links_notation::StreamParser; +/// +/// let mut parser = StreamParser::new(); +/// +/// // Set up link callback +/// parser.on_link(|link| { +/// println!("Parsed link: {:?}", link); +/// }); +/// +/// // Set up error callback +/// parser.on_error(|error| { +/// eprintln!("Parse error: {}", error); +/// }); +/// +/// // Feed data incrementally +/// parser.write("papa (lovesMama: ").unwrap(); +/// parser.write("loves mama)\n").unwrap(); +/// parser.write("son lovesMama\n").unwrap(); +/// +/// // Finish parsing +/// let links = parser.finish().unwrap(); +/// ``` +pub struct StreamParser { + buffer: String, + link_callback: Option, + error_callback: Option, + line_offset: usize, + char_offset: usize, + pending_links: Vec>, +} + +impl Default for StreamParser { + fn default() -> Self { + Self::new() + } +} + +impl StreamParser { + /// Create a new streaming parser + pub fn new() -> Self { + StreamParser { + buffer: String::new(), + link_callback: None, + error_callback: None, + line_offset: 1, + char_offset: 0, + pending_links: Vec::new(), + } + } + + /// Set callback for parsed links + /// + /// The callback will be invoked each time a complete link is parsed. + pub fn on_link(&mut self, callback: F) + where + F: FnMut(&LiNo) + Send + 'static, + { + self.link_callback = Some(Box::new(callback)); + } + + /// Set callback for parse errors + /// + /// The callback will be invoked when a parse error occurs, + /// with location information when available. + pub fn on_error(&mut self, callback: F) + where + F: FnMut(&StreamParseError) + Send + 'static, + { + self.error_callback = Some(Box::new(callback)); + } + + /// Write a chunk of data to the parser + /// + /// This method attempts to parse complete links from the buffer. + /// Links are parsed incrementally line-by-line when possible. + /// + /// # Errors + /// + /// Returns an error if parsing fails. The error will include + /// location information when available. + pub fn write(&mut self, chunk: &str) -> Result<(), StreamParseError> { + self.buffer.push_str(chunk); + self.try_parse_incremental()?; + Ok(()) + } + + /// Try to parse complete links from the buffer incrementally + fn try_parse_incremental(&mut self) -> Result<(), StreamParseError> { + // Try to parse line by line for simple cases + // We look for complete lines (ending with \n) + while let Some(newline_pos) = self.buffer.find('\n') { + let line_with_newline = &self.buffer[..=newline_pos]; + + // Check if this line looks complete (not part of a multi-line structure) + // We do a simple heuristic: count open/close parens + let open_parens = line_with_newline.matches('(').count(); + let close_parens = line_with_newline.matches(')').count(); + + // If parens are balanced and we have a complete line, try to parse it + if open_parens == close_parens { + let line_to_parse = line_with_newline.to_string(); + + // Try to parse this line + match parse_document(&line_to_parse) { + Ok((remaining, links)) => { + if remaining.is_empty() { + // Successfully parsed the line + for internal_link in links { + let lino = Self::convert_link_to_lino(&internal_link); + self.pending_links.push(lino.clone()); + + // Call the callback if set + if let Some(ref mut callback) = self.link_callback { + callback(&lino); + } + } + + // Remove the parsed line from buffer + self.buffer.drain(..=newline_pos); + self.line_offset += 1; + self.char_offset = 0; + continue; + } + } + Err(_) => { + // If parsing fails, it might be part of a larger structure + // Break and wait for more data + break; + } + } + } + + // If we can't parse this line yet, break and wait for more data + break; + } + + Ok(()) + } + + /// Finish parsing and return all parsed links + /// + /// This method should be called after all data has been written. + /// It attempts to parse any remaining data in the buffer. + /// + /// # Errors + /// + /// Returns an error if there is unparsed data in the buffer + /// or if the final parse fails. + pub fn finish(mut self) -> Result>, StreamParseError> { + // If there's any remaining data in the buffer, try to parse it + if !self.buffer.is_empty() { + let remaining = self.buffer.trim(); + if !remaining.is_empty() { + match parse_document(remaining) { + Ok((leftover, links)) => { + if !leftover.is_empty() { + let error = StreamParseError { + message: format!("Unexpected content: {}", leftover), + location: Some(ErrorLocation { + line: self.line_offset, + column: self.char_offset, + offset: self.buffer.len() - leftover.len(), + }), + }; + if let Some(ref mut callback) = self.error_callback { + callback(&error); + } + return Err(error); + } + + for internal_link in links { + let lino = Self::convert_link_to_lino(&internal_link); + self.pending_links.push(lino.clone()); + + if let Some(ref mut callback) = self.link_callback { + callback(&lino); + } + } + } + Err(e) => { + let error = StreamParseError { + message: format!("Failed to parse remaining data: {}", e), + location: Some(ErrorLocation { + line: self.line_offset, + column: self.char_offset, + offset: 0, + }), + }; + if let Some(ref mut callback) = self.error_callback { + callback(&error); + } + return Err(error); + } + } + } + } + + Ok(self.pending_links) + } + + /// Convert internal Link representation to public LiNo type + fn convert_link_to_lino(link: &Link) -> LiNo { + if link.values.is_empty() && link.children.is_empty() { + // Simple reference + if let Some(ref id) = link.id { + LiNo::Ref(id.clone()) + } else { + LiNo::Link { + id: None, + values: vec![], + } + } + } else { + // Link with values + let mut all_values = Vec::new(); + + // Add regular values + for value in &link.values { + all_values.push(Self::convert_link_to_lino(value)); + } + + // Add children as values (for indented syntax) + for child in &link.children { + all_values.push(Self::convert_link_to_lino(child)); + } + + LiNo::Link { + id: link.id.clone(), + values: all_values, + } + } + } + + /// Get the current parsing position for error reporting + pub fn position(&self) -> ErrorLocation { + ErrorLocation { + line: self.line_offset, + column: self.char_offset, + offset: self.buffer.len(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::sync::{Arc, Mutex}; + + #[test] + fn test_stream_parser_simple() { + let mut parser = StreamParser::new(); + let links_received = Arc::new(Mutex::new(Vec::new())); + let links_clone = Arc::clone(&links_received); + + parser.on_link(move |link| { + links_clone.lock().unwrap().push(format!("{:?}", link)); + }); + + parser.write("papa loves mama\n").unwrap(); + parser.write("son loves papa\n").unwrap(); + + let result = parser.finish().unwrap(); + assert_eq!(result.len(), 2); + assert_eq!(links_received.lock().unwrap().len(), 2); + } + + #[test] + fn test_stream_parser_incremental() { + let mut parser = StreamParser::new(); + let count = Arc::new(Mutex::new(0)); + let count_clone = Arc::clone(&count); + + parser.on_link(move |_link| { + *count_clone.lock().unwrap() += 1; + }); + + // Feed data in small chunks + parser.write("papa ").unwrap(); + parser.write("(loves").unwrap(); + parser.write("Mama: ").unwrap(); + parser.write("loves ").unwrap(); + parser.write("mama)\n").unwrap(); + + let result = parser.finish().unwrap(); + assert!(result.len() >= 1); + assert!(*count.lock().unwrap() >= 1); + } + + #[test] + fn test_stream_parser_error_callback() { + let mut parser = StreamParser::new(); + let error_received = Arc::new(Mutex::new(false)); + let error_clone = Arc::clone(&error_received); + + parser.on_error(move |_error| { + *error_clone.lock().unwrap() = true; + }); + + parser.write("papa (loves mama\n").unwrap(); // Missing closing paren + + let result = parser.finish(); + // Should have error due to unbalanced parentheses + assert!(result.is_err() || *error_received.lock().unwrap()); + } + + #[test] + fn test_stream_parser_multiline() { + let mut parser = StreamParser::new(); + + parser.write("3:\n").unwrap(); + parser.write(" papa\n").unwrap(); + parser.write(" loves\n").unwrap(); + parser.write(" mama\n").unwrap(); + + let result = parser.finish().unwrap(); + assert!(result.len() >= 1); + } + + #[test] + fn test_stream_parser_without_callbacks() { + let mut parser = StreamParser::new(); + + parser.write("papa loves mama\n").unwrap(); + parser.write("son loves papa\n").unwrap(); + + let result = parser.finish().unwrap(); + assert_eq!(result.len(), 2); + } + + #[test] + fn test_stream_parser_position_tracking() { + let parser = StreamParser::new(); + let pos = parser.position(); + assert_eq!(pos.line, 1); + assert_eq!(pos.column, 0); + } +} From 5783eae31abde251ddb67a17934c47973a63fb8b Mon Sep 17 00:00:00 2001 From: konard Date: Tue, 13 Jan 2026 13:10:48 +0100 Subject: [PATCH 3/5] Fix code formatting issues (rustfmt and prettier) - Apply rustfmt to Rust files - Apply prettier to README files - No functional changes Co-Authored-By: Claude Sonnet 4.5 --- README.md | 20 ++++++++++---------- examples/rust_streaming_parser.rs | 1 - js/README.md | 4 +++- rust/README.md | 4 ++-- rust/src/lib.rs | 2 +- 5 files changed, 16 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index e04cfb2..3930e4e 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,12 @@ # [links-notation](https://github.com/link-foundation/links-notation) (languages: en • [ru](README.ru.md)) -| [![Actions Status](https://github.com/link-foundation/links-notation/workflows/js/badge.svg)](https://github.com/link-foundation/links-notation/actions?workflow=js) | [![npm Version and Downloads count](https://img.shields.io/npm/v/links-notation?label=npm&style=flat)](https://www.npmjs.com/package/links-notation) | **[JavaScript](js/README.md)** | -|:-|-:|:-| -| [![Actions Status](https://github.com/link-foundation/links-notation/workflows/rust/badge.svg)](https://github.com/link-foundation/links-notation/actions?workflow=rust) | [![Crates.io Version and Downloads count](https://img.shields.io/crates/v/links-notation?label=crates.io&style=flat)](https://crates.io/crates/links-notation) | **[Rust](rust/README.md)** | -| [![Actions Status](https://github.com/link-foundation/links-notation/workflows/csharp/badge.svg)](https://github.com/link-foundation/links-notation/actions?workflow=csharp) | [![NuGet Version and Downloads count](https://img.shields.io/nuget/v/Link.Foundation.Links.Notation?label=nuget&style=flat)](https://www.nuget.org/packages/Link.Foundation.Links.Notation) | **[C#](csharp/README.md)** | -| [![Actions Status](https://github.com/link-foundation/links-notation/workflows/python/badge.svg)](https://github.com/link-foundation/links-notation/actions?workflow=python) | [![PyPI Version and Downloads count](https://img.shields.io/pypi/v/links-notation?label=pypi&style=flat)](https://pypi.org/project/links-notation/) | **[Python](python/README.md)** | -| [![Actions Status](https://github.com/link-foundation/links-notation/workflows/go/badge.svg)](https://github.com/link-foundation/links-notation/actions?workflow=go) | [![Go Reference](https://pkg.go.dev/badge/github.com/link-foundation/links-notation/go.svg)](https://pkg.go.dev/github.com/link-foundation/links-notation/go) | **[Go](go/README.md)** | -| [![Actions Status](https://github.com/link-foundation/links-notation/workflows/java/badge.svg)](https://github.com/link-foundation/links-notation/actions?workflow=java) | [![Maven Central Version](https://img.shields.io/maven-central/v/io.github.link-foundation/links-notation?label=maven&style=flat)](https://central.sonatype.com/artifact/io.github.link-foundation/links-notation) | **[Java](java/README.md)** | +| [![Actions Status](https://github.com/link-foundation/links-notation/workflows/js/badge.svg)](https://github.com/link-foundation/links-notation/actions?workflow=js) | [![npm Version and Downloads count](https://img.shields.io/npm/v/links-notation?label=npm&style=flat)](https://www.npmjs.com/package/links-notation) | **[JavaScript](js/README.md)** | +| :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------- | +| [![Actions Status](https://github.com/link-foundation/links-notation/workflows/rust/badge.svg)](https://github.com/link-foundation/links-notation/actions?workflow=rust) | [![Crates.io Version and Downloads count](https://img.shields.io/crates/v/links-notation?label=crates.io&style=flat)](https://crates.io/crates/links-notation) | **[Rust](rust/README.md)** | +| [![Actions Status](https://github.com/link-foundation/links-notation/workflows/csharp/badge.svg)](https://github.com/link-foundation/links-notation/actions?workflow=csharp) | [![NuGet Version and Downloads count](https://img.shields.io/nuget/v/Link.Foundation.Links.Notation?label=nuget&style=flat)](https://www.nuget.org/packages/Link.Foundation.Links.Notation) | **[C#](csharp/README.md)** | +| [![Actions Status](https://github.com/link-foundation/links-notation/workflows/python/badge.svg)](https://github.com/link-foundation/links-notation/actions?workflow=python) | [![PyPI Version and Downloads count](https://img.shields.io/pypi/v/links-notation?label=pypi&style=flat)](https://pypi.org/project/links-notation/) | **[Python](python/README.md)** | +| [![Actions Status](https://github.com/link-foundation/links-notation/workflows/go/badge.svg)](https://github.com/link-foundation/links-notation/actions?workflow=go) | [![Go Reference](https://pkg.go.dev/badge/github.com/link-foundation/links-notation/go.svg)](https://pkg.go.dev/github.com/link-foundation/links-notation/go) | **[Go](go/README.md)** | +| [![Actions Status](https://github.com/link-foundation/links-notation/workflows/java/badge.svg)](https://github.com/link-foundation/links-notation/actions?workflow=java) | [![Maven Central Version](https://img.shields.io/maven-central/v/io.github.link-foundation/links-notation?label=maven&style=flat)](https://central.sonatype.com/artifact/io.github.link-foundation/links-notation) | **[Java](java/README.md)** | [![Gitpod](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/link-foundation/links-notation) [![Open in GitHub Codespaces](https://img.shields.io/badge/GitHub%20Codespaces-Open-181717?logo=github)](https://github.com/codespaces/new?hide_repo_select=true&ref=main&repo=link-foundation/links-notation) @@ -38,7 +38,7 @@ var links = parser.Parse("papa (lovesMama: loves mama)"); ### JavaScript ```javascript -import { Parser } from 'links-notation'; +import { Parser } from "links-notation"; const parser = new Parser(); const links = parser.parse("papa (lovesMama: loves mama)"); ``` @@ -123,7 +123,7 @@ This is equivalent to: (3: papa loves mama) ``` -So that means that *this* text is also links notation. So most of the +So that means that _this_ text is also links notation. So most of the text in the world already may be parsed as links notation. That makes links notation the most easy an natural/intuitive/native one. @@ -134,7 +134,7 @@ structured data as links between ~~entities~~ references to links. It's designed to be: - **Natural**: Most text can already be parsed as links notation -- **Flexible**: Supports any number of references in each link +- **Flexible**: Supports any number of references in each link - **Universal**: Can represent doublets, triplets, and N-tuples - **Hierarchical**: Supports nested structures with indentation diff --git a/examples/rust_streaming_parser.rs b/examples/rust_streaming_parser.rs index 30b2ad5..e1744d1 100644 --- a/examples/rust_streaming_parser.rs +++ b/examples/rust_streaming_parser.rs @@ -10,7 +10,6 @@ /// ``` /// cargo run --example rust_streaming_parser /// ``` - use links_notation::StreamParser; use std::sync::{Arc, Mutex}; diff --git a/js/README.md b/js/README.md index 0e43570..2969ab5 100644 --- a/js/README.md +++ b/js/README.md @@ -151,7 +151,9 @@ parser.on('link', (link) => { // Listen for errors with location information parser.on('error', (error) => { - console.error(`Error at line ${error.line}, col ${error.column}: ${error.message}`); + console.error( + `Error at line ${error.line}, col ${error.column}: ${error.message}` + ); }); // Feed data incrementally (e.g., from network chunks) diff --git a/rust/README.md b/rust/README.md index f5e3b71..348ebbe 100644 --- a/rust/README.md +++ b/rust/README.md @@ -65,11 +65,11 @@ fn main() { son lovesMama daughter lovesMama all (love mama)"#; - + match parse_lino(input) { Ok(parsed) => { println!("Parsed: {}", parsed); - + // Access the structure if let LiNo::Link { values, .. } = parsed { for link in values { diff --git a/rust/src/lib.rs b/rust/src/lib.rs index 6e31007..9626449 100644 --- a/rust/src/lib.rs +++ b/rust/src/lib.rs @@ -2,10 +2,10 @@ pub mod format_config; pub mod parser; pub mod stream_parser; -pub use stream_parser::{ErrorLocation, StreamParseError, StreamParser}; use format_config::FormatConfig; use std::error::Error as StdError; use std::fmt; +pub use stream_parser::{ErrorLocation, StreamParseError, StreamParser}; /// Error type for Lino parsing #[derive(Debug)] From a3abb94062280cbdb8969ed510ce59110fdac346 Mon Sep 17 00:00:00 2001 From: konard Date: Tue, 13 Jan 2026 13:12:29 +0100 Subject: [PATCH 4/5] Fix clippy warnings in streaming parser tests Replace len() >= 1 with !is_empty() for better idiomatic Rust code. Co-Authored-By: Claude Sonnet 4.5 --- rust/src/stream_parser.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rust/src/stream_parser.rs b/rust/src/stream_parser.rs index 3f28d11..fac90be 100644 --- a/rust/src/stream_parser.rs +++ b/rust/src/stream_parser.rs @@ -347,7 +347,7 @@ mod tests { parser.write("mama)\n").unwrap(); let result = parser.finish().unwrap(); - assert!(result.len() >= 1); + assert!(!result.is_empty()); assert!(*count.lock().unwrap() >= 1); } @@ -378,7 +378,7 @@ mod tests { parser.write(" mama\n").unwrap(); let result = parser.finish().unwrap(); - assert!(result.len() >= 1); + assert!(!result.is_empty()); } #[test] From 5604d65617ab46d781ab957273120bec11678d6c Mon Sep 17 00:00:00 2001 From: konard Date: Tue, 13 Jan 2026 13:15:09 +0100 Subject: [PATCH 5/5] Revert "Initial commit with task details" This reverts commit ed06731bb450412c3811d3c932af682c806b95a5. --- CLAUDE.md | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 CLAUDE.md diff --git a/CLAUDE.md b/CLAUDE.md deleted file mode 100644 index 433d44c..0000000 --- a/CLAUDE.md +++ /dev/null @@ -1,5 +0,0 @@ -Issue to solve: https://github.com/link-foundation/links-notation/issues/197 -Your prepared branch: issue-197-0f9688b95d4f -Your prepared working directory: /tmp/gh-issue-solver-1768305791816 - -Proceed.