How to use the chevrotain.Lexer.NA function in chevrotain

To help you get started, we’ve selected a few chevrotain examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github SAP / chevrotain / examples / parser / content_assist / content_assist_complex.js View on Github external
*
 * Examples:
 * "Public static " --> ["function"]
 * "Public sta" --> ["static"]
 * "call f" --> ["foo"] // assuming foo is in the symbol table.
 */
const _ = require("lodash")
const {
  createToken,
  Lexer,
  CstParser,
  tokenMatcher,
  EMPTY_ALT
} = require("chevrotain")

const Keyword = createToken({ name: "Keyword", pattern: Lexer.NA })
const Private = createToken({
  name: "Private",
  pattern: /private/,
  categories: Keyword
})
const Public = createToken({
  name: "Public",
  pattern: /public/,
  categories: Keyword
})
const Static = createToken({
  name: "Static",
  pattern: /static/,
  categories: Keyword
})
const Declare = createToken({
github SAP / chevrotain / examples / parser / dynamic_tokens / dynamic_delimiters.js View on Github external
module.exports = function(text, dynamicDelimiterRegExp) {
  // make this parameter optional
  if (dynamicDelimiterRegExp === undefined) {
    dynamicDelimiterRegExp = Lexer.NA
  }

  // dynamically create Token classes which extend the BaseXXXDelimiters
  const dynamicDelimiter = createToken({
    name: "dynamicDelimiter",
    pattern: dynamicDelimiterRegExp,
    categories: BaseDelimiter
  })

  // dynamically create a Lexer which can Lex all our language including the dynamic delimiters.
  const dynamicDelimiterLexer = new Lexer(allTokens.concat([dynamicDelimiter]))

  // lex
  const lexResult = dynamicDelimiterLexer.tokenize(text)

  // setting the input will reset the parser's state
github SAP / chevrotain / examples / grammars / less / less2.js View on Github external
name: "PropertyVariable",
    pattern: /\$[\w-]+/
})

const NestedPropertyVariable = createToken({
    name: "NestedPropertyVariable",
    pattern: /\$@[\w-]+/
})

const ImportSym = createToken({
    name: "ImportSym",
    pattern: /@import/
})

// This group has to be defined BEFORE Ident as their prefix is a valid Ident
const Uri = createToken({ name: "Uri", pattern: Lexer.NA })
const UriString = createToken({
    name: "UriString",
    pattern: MAKE_PATTERN(
        "url\\((:?{{spaces}})?({{string1}}|{{string2}})(:?{{spaces}})?\\)"
    ),
    categories: Uri
})
const UriUrl = createToken({
    name: "UriUrl",
    pattern: MAKE_PATTERN("url\\((:?{{spaces}})?{{url}}(:?{{spaces}})?\\)"),
    categories: Uri
})

// must be after VariableCall
const VariableName = createToken({
    name: "VariableName",
github felipemanga / FemtoIDE / node_modules / java-parser / src / tokens.js View on Github external
return newTokenType;
}

function createKeywordLikeToken(options) {
  // A keyword 'like' token uses the "longer_alt" config option
  // to resolve ambiguities, see: http://sap.github.io/chevrotain/docs/features/token_alternative_matches.html
  options.longer_alt = Identifier;
  return createToken(options);
}

// Token Categories
// Used a Token Category to mark all restricted keywords.
// This could be used in syntax highlights implementation.
const RestrictedKeyword = createToken({
  name: "RestrictedKeyword",
  pattern: Lexer.NA
});

// Used a Token Category to mark all keywords.
// This could be used in syntax highlights implementation.
const Keyword = createToken({
  name: "Keyword",
  pattern: Lexer.NA
});

const AssignmentOperator = createToken({
  name: "AssignmentOperator",
  pattern: Lexer.NA
});

const BinaryOperator = createToken({
  name: "BinaryOperator",
github jhipster / prettier-java / packages / java-parser / src / tokens.js View on Github external
// Used a Token Category to mark all keywords.
// This could be used in syntax highlights implementation.
const Keyword = createToken({
  name: "Keyword",
  pattern: Lexer.NA
});

const AssignmentOperator = createToken({
  name: "AssignmentOperator",
  pattern: Lexer.NA
});

const BinaryOperator = createToken({
  name: "BinaryOperator",
  pattern: Lexer.NA
});

const UnaryPrefixOperator = createToken({
  name: "UnaryPrefixOperator",
  pattern: Lexer.NA
});
const UnaryPrefixOperatorNotPlusMinus = createToken({
  name: "UnaryPrefixOperatorNotPlusMinus",
  pattern: Lexer.NA
});

const UnarySuffixOperator = createToken({
  name: "UnarySuffixOperator",
  pattern: Lexer.NA
});
github SAP / chevrotain / examples / grammars / calculator / calculator_embedded_actions.js View on Github external
* https://github.com/SAP/chevrotain/blob/master/examples/grammars/calculator/calculator_pure_grammar.js
 */
const {
  createToken,
  Lexer,
  EmbeddedActionsParser,
  tokenMatcher
} = require("chevrotain")

// ----------------- lexer -----------------
// using the NA pattern marks this Token class as 'irrelevant' for the Lexer.
// AdditionOperator defines a Tokens category, The parser can match against such categories
// as a convenience to reduce verbosity.
const AdditionOperator = createToken({
  name: "AdditionOperator",
  pattern: Lexer.NA
})
const Plus = createToken({
  name: "Plus",
  pattern: /\+/,
  categories: AdditionOperator
})
const Minus = createToken({
  name: "Minus",
  pattern: /-/,
  categories: AdditionOperator
})

const MultiplicationOperator = createToken({
  name: "MultiplicationOperator",
  pattern: Lexer.NA
})
github SAP / chevrotain / examples / grammars / calculator / calculator_pure_grammar.js View on Github external
*
 * This is accomplished by using the automatic CST (Concrete Syntax Tree) output capabilities
 * of chevrotain.
 *
 * See farther details here:
 * https://github.com/SAP/chevrotain/blob/master/docs/concrete_syntax_tree.md
 */
const { createToken, tokenMatcher, Lexer, CstParser } = require("chevrotain")

// ----------------- lexer -----------------
// using the NA pattern marks this Token class as 'irrelevant' for the Lexer.
// AdditionOperator defines a Tokens hierarchy but only the leafs in this hierarchy define
// actual Tokens that can appear in the text
const AdditionOperator = createToken({
  name: "AdditionOperator",
  pattern: Lexer.NA
})
const Plus = createToken({
  name: "Plus",
  pattern: /\+/,
  categories: AdditionOperator
})
const Minus = createToken({
  name: "Minus",
  pattern: /-/,
  categories: AdditionOperator
})

const MultiplicationOperator = createToken({
  name: "MultiplicationOperator",
  pattern: Lexer.NA
})
github kevinastone / atom-grammar-test / lib / grammar.js View on Github external
static PATTERN = /<-/;
  static LABEL = '<-';
}
class Carat extends Position {
  static PATTERN = /\^/;
  static LABEL = '^';
}
class Period extends Token {
  static PATTERN = /[.]/;
  static LABEL = '.';
}
class Identifier extends Token {
  static PATTERN = /[a-zA-Z_][a-zA-Z0-9_-]*/;
}
class Modifier extends Token {
  static PATTERN = Lexer.NA;
}
class Only extends Modifier {
  static PATTERN = /only:|=/;
  static LABEL = '=';
}
class Not extends Modifier {
  static PATTERN = /not:|!/;
  static LABEL = '!';
}
class OpenParens extends Token {
  static PATTERN = /\(/;
  static LABEL = '(';
}
class CloseParens extends Token {
  static PATTERN = /\)/;
  static LABEL = ')';
github RokuRoad / bright / src / Tokens.ts View on Github external
const name = words.map(word => word.toUpperCase()).join('_')
  const re = new RegExp(`\\b${words.join('[ \\t]*')}\\b`, 'iy')

  const pattern = (text: string, startOffset: number) => {
    re.lastIndex = startOffset
    return re.exec(text)
  }

  const hint = term.substr(0, 1)
  const startHint = hint.toUpperCase() === hint.toLowerCase() ? [hint.toUpperCase()] : [hint.toUpperCase(), hint.toLowerCase()]

  return createToken({ name, pattern, longer_alt: IDENTIFIER, start_chars_hint: startHint, line_breaks: false, ...opts })
}

export const BASE_TYPE = createToken({ name: 'BASE_TYPE', pattern: Lexer.NA })
export const LITERAL = createToken({ name: 'LITERAL', pattern: Lexer.NA })
export const RELATIONAL_OPERATOR = createToken({ name: 'RELATIONAL_OPERATOR', pattern: Lexer.NA })
export const EQUALITY_OPERATOR = createToken({ name: 'EQUALITY_OPERATOR', pattern: Lexer.NA })
export const PRINT = createToken({ name: 'PRINT', pattern: Lexer.NA })

export const PUNCTUATION = createToken({ name: 'PUNCTUATION', pattern: Lexer.NA })

export const LOGIC_OPERATOR = createToken({ name: 'LOGIC_OPERATOR', pattern: Lexer.NA, categories: PUNCTUATION })
export const SHIFT_OPERATOR = createToken({ name: 'SHIFT_OPERATOR', pattern: Lexer.NA, categories: PUNCTUATION })
export const MULTI_OPERATOR = createToken({ name: 'MULTI_OPERATOR', pattern: Lexer.NA, categories: PUNCTUATION })

export const TERMINATOR = createToken({ name: 'TERMINATOR', pattern: Lexer.NA })

export const UNARY = createToken({ name: 'UNARY', pattern: Lexer.NA, categories: PUNCTUATION })
export const POSTFIX = createToken({ name: 'POSTFIX', pattern: Lexer.NA, categories: PUNCTUATION })
export const ADDICTIVE_OPERATOR = createToken({
github SAP / chevrotain / examples / grammars / calculator / calculator_pure_grammar.js View on Github external
pattern: Lexer.NA
})
const Plus = createToken({
  name: "Plus",
  pattern: /\+/,
  categories: AdditionOperator
})
const Minus = createToken({
  name: "Minus",
  pattern: /-/,
  categories: AdditionOperator
})

const MultiplicationOperator = createToken({
  name: "MultiplicationOperator",
  pattern: Lexer.NA
})
const Multi = createToken({
  name: "Multi",
  pattern: /\*/,
  categories: MultiplicationOperator
})
const Div = createToken({
  name: "Div",
  pattern: /\//,
  categories: MultiplicationOperator
})

const LParen = createToken({ name: "LParen", pattern: /\(/ })
const RParen = createToken({ name: "RParen", pattern: /\)/ })
const NumberLiteral = createToken({
  name: "NumberLiteral",