A fast, robust lexer designed to perform lexical analysis
efficiently converting raw text input or source code into a clean, digestible stream of tokens for parsing.
-
npm install @je-es/lexer
import * as Lexer from "@je-es/lexer";
-
// [1] create lexer rules const lexer_rules : Lexer.Rules = { // ═══ Whitespace ═══ ws : /\s+/, // ═══ Literals ═══ bin : /0b[01]+/, oct : /0o[0-7]+/, ... // ═══ Keywords ═══ try : 'try', catch : 'catch', ... // ═══ Types ═══ f_type : ['f16', 'f32', 'f64', 'f80', 'f128'], ... // ═══ Operators ═══ '==' : '==', '!=' : '!=', ... // ═══ Identifier ═══ ident : /[a-zA-Z_][a-zA-Z0-9_]*/, };
// [2] tokenize input using your rules const tokens = Lexer.tokenize('<input>', rules);
-
-
1.
@je-es/lexer3. @je-es/ast
8. @je-es/lsp
-

