Add nearley 2.9 and moo 0.3 (#16937)

* Add definitions for nearley 2.9

* Add definitions for moo 0.3
This commit is contained in:
Nikita Litvin 2017-06-05 02:11:13 +05:00 committed by Mohamed Hegazy
parent d18c243d4e
commit 3ad0e1d227
8 changed files with 306 additions and 0 deletions

109
types/moo/index.d.ts vendored Normal file
View File

@ -0,0 +1,109 @@
// Type definitions for moo 0.3
// Project: https://github.com/tjvr/moo#readme
// Definitions by: Nikita Litvin <https://github.com/deltaidea>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
export as namespace moo;
/**
* Reserved token for indicating a parse fail.
*/
export const error: { error: true };
export function compile(rules: Rules): Lexer;
export function states(states: {[x: string]: Rules}, start?: string): Lexer;
export interface Rules {
[x: string]: RegExp | string | string[] | {
match: RegExp | string | string[],
/**
* Moo tracks detailed information about the input for you.
* It will track line numbers, as long as you apply the `lineBreaks: true`
* option to any tokens which might contain newlines. Moo will try to warn you if you forget to do this.
*/
lineBreaks?: boolean,
/**
* Moves the lexer to a new state, and pushes the old state onto the stack.
*/
push?: string,
/**
* Returns to a previous state, by removing one or more states from the stack.
*/
pop?: number,
/**
* Moves to a new state, but does not affect the stack.
*/
next?: string,
/**
* You can have a token type that both matches tokens and contains error values.
*/
error?: true
};
}
export interface Lexer {
/**
* Returns a string with a pretty error message.
*/
formatError(token: Token, message?: string): string;
/**
* Can be used by parsers like nearley to check whether a given token type can be parsed by this lexer.
*/
has(tokenType: string): boolean;
/**
* When you reach the end of Moo's internal buffer, next() will return undefined.
* You can always reset() it and feed it more data when that happens.
*/
next(): Token | undefined;
/**
* Empty the internal buffer of the lexer, and set the line, column, and offset counts back to their initial value.
*/
reset(chunk: string, state?: LexerState): void;
/**
* Returns current state, which you can later pass it as the second argument
* to reset() to explicitly control the internal state of the lexer.
*/
save(): LexerState;
}
export interface Token {
/**
* Returns value of the token, or its type if value isn't available.
*/
toString(): string;
/**
* The name of the group, as passed to compile.
*/
type?: string;
/**
* The contents of the capturing group (or the whole match, if the token RegExp doesn't define a capture).
*/
value: string;
/**
* The number of bytes from the start of the buffer where the match starts.
*/
offset: number;
/**
* The total length of the match (value may be shorter if you have capturing groups).
*/
size: number;
/**
* The number of line breaks found in the match. (Always zero if this rule has lineBreaks: false.)
*/
lineBreaks: boolean;
/**
* The line number of the beginning of the match, starting from 1.
*/
line: number;
/**
* The column where the match begins, starting from 1.
*/
col: number;
}
export interface LexerState {
line: number;
col: number;
state: string;
}

32
types/moo/moo-tests.ts Normal file
View File

@ -0,0 +1,32 @@
import * as moo from 'moo';
let lexer = moo.compile({
lparen: '(',
word: /[a-z]+/,
rparen: ')',
keyword: ['while', 'if', 'else', 'moo', 'cows']
});
lexer = moo.states({
main: {
strstart: {match: '`', push: 'lit'},
ident: /\w+/,
lbrace: {match: '{', push: 'main'},
rbrace: {match: '}', pop: 1},
colon: ':',
space: {match: /\s+/, lineBreaks: true},
},
lit: {
interp: {match: '${', push: 'main'},
escape: /\\./,
strend: {match: '`', pop: 1},
const: {match: /(?:[^$`]|\$(?!\{))+/, lineBreaks: true},
},
});
lexer.reset('some line\n');
let info = lexer.save();
lexer.next();
lexer.next();
lexer.reset('a different line\n', info);
lexer.next();

23
types/moo/tsconfig.json Normal file
View File

@ -0,0 +1,23 @@
{
"compilerOptions": {
"module": "commonjs",
"lib": [
"es6",
"dom"
],
"noImplicitAny": true,
"noImplicitThis": true,
"strictNullChecks": true,
"baseUrl": "../",
"typeRoots": [
"../"
],
"types": [],
"noEmit": true,
"forceConsistentCasingInFileNames": true
},
"files": [
"index.d.ts",
"moo-tests.ts"
]
}

1
types/moo/tslint.json Normal file
View File

@ -0,0 +1 @@
{ "extends": "dtslint/dt.json" }

99
types/nearley/index.d.ts vendored Normal file
View File

@ -0,0 +1,99 @@
// Type definitions for nearley 2.9
// Project: https://github.com/Hardmath123/nearley#readme
// Definitions by: Nikita Litvin <https://github.com/deltaidea>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
export as namespace nearley;
export class Parser {
constructor(rules: Rule[], start: string, options?: ParserOptions);
constructor(grammar: Grammar, options?: ParserOptions);
/**
* The Parser object can be fed data in parts with .feed(data).
* You can then find an array of parsings with the .results property.
* If results is empty, then there are no parsings.
* If results contains multiple values, then that combination is ambiguous.
*
* @throws If there are no possible parsings, nearley will throw an error
* whose offset property is the index of the offending token.
*/
feed(chunk: string): void;
finish(): any[];
restore(column: {[x: string]: any, lexerState: LexerState}): void;
save(): LexerState;
grammar: Grammar;
options: ParserOptions;
lexer: Lexer;
lexerState?: LexerState;
current: number;
/**
* An array of possible parsings. Each element is the thing returned by your grammar.
*
* Note that this is undefined before the first feed() call.
* It isn't typed as `any[] | undefined` to spare you the null checks when it's definitely an array.
*/
results: any[];
/**
* Reserved token for indicating a parse fail.
*/
static fail: {};
}
export interface ParserOptions {
keepHistory?: boolean;
lexer?: Lexer;
}
export class Rule {
constructor(name: any, symbols: any, postprocess: any);
toString(withCursorAt: any): any;
static highestId: number;
}
export class Grammar {
constructor(rules: Rule[], start: string);
rules: Rule[];
byName: {[x: string]: Rule};
}
export namespace Grammar {
function fromCompiled(rules: Rule[], start: string): Grammar;
}
export interface Lexer {
/**
* Sets the internal buffer to chunk, and restore line/col/state info taken from save().
*/
reset(chunk: string, state?: LexerState): void;
/**
* Returns e.g. {type, value, line, col, }. Only the value attribute is required.
*/
next(): Token | undefined;
/**
* Returns an object describing the current line/col etc. This allows us
* to preserve this information between feed() calls, and also to support Parser#rewind().
* The exact structure is lexer-specific; nearley doesn't care what's in it.
*/
save(): LexerState;
/**
* Returns a string with an error message describing the line/col of the offending token.
* You might like to include a preview of the line in question.
*/
formatError(token: Token): string;
/**
* Returns true if the lexer can emit tokens with that name.
* Used to resolve %-specifiers in compiled nearley grammars.
*/
has(tokenType: string): boolean;
}
export interface Token {
[x: string]: any;
value: string;
}
export interface LexerState {
[x: string]: any;
}

View File

@ -0,0 +1,18 @@
import { Parser, Grammar, Rule, Lexer } from 'nearley';
declare const parserRules: Rule[];
declare const parserStart: string;
declare const lexer: Lexer;
declare const grammar: Grammar;
let parser = new Parser(parserRules, parserStart, { lexer, keepHistory: false });
parser = new Parser(grammar);
try {
parser.feed("<123>");
if (parser.results) {
console.log(parser.results[0]);
}
} catch (error) {
console.log(error);
}

View File

@ -0,0 +1,23 @@
{
"compilerOptions": {
"module": "commonjs",
"lib": [
"es6",
"dom"
],
"noImplicitAny": true,
"noImplicitThis": true,
"strictNullChecks": true,
"baseUrl": "../",
"typeRoots": [
"../"
],
"types": [],
"noEmit": true,
"forceConsistentCasingInFileNames": true
},
"files": [
"index.d.ts",
"nearley-tests.ts"
]
}

View File

@ -0,0 +1 @@
{ "extends": "dtslint/dt.json" }