File size: 5,691 Bytes
6efa67a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
import { chevrotain } from '../../../lib.js';
import { MacroLexer } from './MacroLexer.js';
const { CstParser } = chevrotain;
/** @typedef {import('chevrotain').TokenType} TokenType */
/** @typedef {import('chevrotain').CstNode} CstNode */
/** @typedef {import('chevrotain').ILexingError} ILexingError */
/** @typedef {import('chevrotain').IRecognitionException} IRecognitionException */
/**
* The singleton instance of the MacroParser.
*
* @type {MacroParser}
*/
let instance;
export { instance as MacroParser };
class MacroParser extends CstParser {
/** @type {MacroParser} */ static #instance;
/** @type {MacroParser} */ static get instance() { return MacroParser.#instance ?? (MacroParser.#instance = new MacroParser()); }
/** @private */
constructor() {
super(MacroLexer.def, {
traceInitPerf: false,
nodeLocationTracking: 'full',
recoveryEnabled: true,
});
const Tokens = MacroLexer.tokens;
const $ = this;
// Top-level document rule that can handle both plaintext and macros
$.document = $.RULE('document', () => {
$.MANY(() => {
$.OR([
{ ALT: () => $.CONSUME(Tokens.Plaintext, { LABEL: 'plaintext' }) },
{ ALT: () => $.CONSUME(Tokens.PlaintextOpenBrace, { LABEL: 'plaintext' }) },
{ ALT: () => $.SUBRULE($.macro) },
{ ALT: () => $.CONSUME(Tokens.Macro.Start, { LABEL: 'plaintext' }) },
]);
});
});
// Basic Macro Structure
$.macro = $.RULE('macro', () => {
$.CONSUME(Tokens.Macro.Start);
$.OR([
{ ALT: () => $.CONSUME(Tokens.Macro.DoubleSlash, { LABEL: 'Macro.identifier' }) },
{ ALT: () => $.CONSUME(Tokens.Macro.Identifier, { LABEL: 'Macro.identifier' }) },
]);
$.OPTION(() => $.SUBRULE($.arguments));
$.CONSUME(Tokens.Macro.End);
});
// Arguments Parsing
$.arguments = $.RULE('arguments', () => {
$.OR([
{
ALT: () => {
$.CONSUME(Tokens.Args.DoubleColon, { LABEL: 'separator' });
$.AT_LEAST_ONE_SEP({
SEP: Tokens.Args.DoubleColon,
DEF: () => $.SUBRULE($.argument, { LABEL: 'argument' }),
});
},
},
{
ALT: () => {
$.OPTION(() => {
$.CONSUME(Tokens.Args.Colon, { LABEL: 'separator' });
});
$.SUBRULE($.argumentAllowingColons, { LABEL: 'argument' });
},
// So, this is a bit hacky. But implemented below, the argument capture does explicitly exclude double colons
// from being captured as the first token. The potential ambiguity chevrotain claims here is not possible.
// It says stuff like <Args.DoubleColon, Identifier/Macro/Unknown> is possible in both branches, but it is not.
IGNORE_AMBIGUITIES: true,
},
]);
});
// List the argument tokens here, as we need two rules, one to be able to parse with double colons and one without
const validArgumentTokens = [
{ ALT: () => $.SUBRULE($.macro) }, // Nested Macros
{ ALT: () => $.CONSUME(Tokens.Identifier) },
{ ALT: () => $.CONSUME(Tokens.Unknown) },
{ ALT: () => $.CONSUME(Tokens.Args.Colon) },
{ ALT: () => $.CONSUME(Tokens.Args.Equals) },
{ ALT: () => $.CONSUME(Tokens.Args.Quote) },
];
$.argument = $.RULE('argument', () => {
$.MANY(() => {
$.OR([...validArgumentTokens]);
});
});
$.argumentAllowingColons = $.RULE('argumentAllowingColons', () => {
$.AT_LEAST_ONE(() => {
$.OR([
...validArgumentTokens,
{ ALT: () => $.CONSUME(Tokens.Args.DoubleColon) },
]);
});
});
this.performSelfAnalysis();
}
/**
* Parses a document into a CST.
*
* @param {string} input
* @returns {{ cst: CstNode|null, errors: ({ message: string }|ILexingError|IRecognitionException)[] , lexingErrors: ILexingError[], parserErrors: IRecognitionException[] }}
*/
parseDocument(input) {
if (!input) {
return { cst: null, errors: [{ message: 'Input is empty' }], lexingErrors: [], parserErrors: [] };
}
const lexingResult = MacroLexer.tokenize(input);
this.input = lexingResult.tokens;
const cst = this.document();
const errors = [
...lexingResult.errors,
...this.errors,
];
return { cst, errors, lexingErrors: lexingResult.errors, parserErrors: this.errors };
}
test(input) {
const lexingResult = MacroLexer.tokenize(input);
// "input" is a setter which will reset the parser's state.
this.input = lexingResult.tokens;
const cst = this.macro();
// For testing purposes we need to actually persist the error messages in the object,
// otherwise the test cases cannot read those, as they don't have access to the exception object type.
const errors = this.errors.map(x => ({ message: x.message, ...x, stack: x.stack }));
return { cst, errors: errors };
}
}
instance = MacroParser.instance;
|