A robust tool for syntactic analysis.
It consumes tokens generated by a lexer and constructs an AST for interpretation or compilation.
-
npm install @je-es/parser
import * as Parser from "@je-es/parser";
-
// [1] create parser rules const parser_rules : Parser.Rules = [ Parser.createRule('Root', Parser.oneOrMore(Parser.rule('Stmt')), { build: (data: Parser.Result) => { const arr = data.getRepeatResult()!; const stmts = arr.map((x) => x.getCustomData()! as AST.StmtNode); return Parser.Result.createAsCustom('passed', 'root', stmts, data.span); } } ), Parser.createRule('Ident', Parser.token('ident'), { build: (data: Parser.Result) => { const identResult = data.getTokenData()!; return Parser.Result.createAsCustom('passed', 'ident', AST.IdentNode.create( identResult.span, identResult.value!, false), data.span ); }, errors: [ Parser.error(0, "Expected identifier") ] } ), // Include required rules ...Type, ...Expr, ...Stmt, ];
// [2] create parser settings const parser_settings : Parser.ParserSettings = { startRule : 'Root', errorRecovery : { mode: 'resilient', maxErrors: 99 }, ignored : ['ws', 'comment'], debug : 'off', maxDepth : 9999, maxCacheSize : 1024, // 1GB };
// [3] parse tokens using your rules const parser_result = Parser.parse([<tokens>], parser_rules, parser_settings); // Hint: to get tokens use `@je-es/lexer`
-
-
1. @je-es/lexer
3. @je-es/ast
8. @je-es/lsp
-

