How to use the chevrotain.Lexer.SKIPPED function in chevrotain

To help you get started, we’ve selected a few chevrotain examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github SAP / chevrotain / examples / webpack / src / our_grammar.js View on Github external
const { Lexer, CstParser, createToken } = require("chevrotain")

// ----------------- lexer -----------------
const LSquare = createToken({ name: "LSquare", pattern: /\[/ })
const RSquare = createToken({ name: "RSquare", pattern: /]/ })
const Comma = createToken({ name: "Comma", pattern: /,/ })
const Integer = createToken({ name: "Integer", pattern: /\d+/ })
const WhiteSpace = createToken({
    name: "WhiteSpace",
    pattern: /\s+/,
    group: Lexer.SKIPPED
})

const allTokens = [WhiteSpace, LSquare, RSquare, Comma, Integer]
const ArrayLexer = new Lexer(allTokens)

// ----------------- parser -----------------
class ArrayParser extends CstParser {
    constructor(input) {
        super(allTokens)
        const $ = this

        $.RULE("array", () => {
            $.CONSUME(LSquare)
            $.OPTION(() => {
                $.CONSUME(Integer)
                $.MANY(() => {
github jhipster / prettier-java / packages / java-parser / src / tokens.js View on Github external
pattern: Lexer.NA
});

// https://docs.oracle.com/javase/specs/jls/se11/html/jls-3.html#jls-3.11
const Separators = createToken({
  name: "Separators",
  pattern: Lexer.NA
});

// https://docs.oracle.com/javase/specs/jls/se11/html/jls-3.html#jls-3.6
// Note [\\x09\\x20\\x0C] is equivalent to [\\t\\x20\\f] and that \\x20 represents
// space character
createToken({
  name: "WhiteSpace",
  pattern: MAKE_PATTERN("[\\x09\\x20\\x0C]|{{LineTerminator}}"),
  group: Lexer.SKIPPED
});
createToken({
  name: "LineComment",
  pattern: /\/\/[^\n\r]*/,
  group: "comments"
});
createToken({
  name: "TraditionalComment",
  pattern: /\/\*([^*]|\*(?!\/))*\*\//,
  group: "comments"
});
createToken({ name: "BinaryLiteral", pattern: /0[bB][01]([01_]*[01])?[lL]?/ });
createToken({
  name: "FloatLiteral",
  pattern: MAKE_PATTERN(
    "{{Digits}}\\.({{Digits}})?({{ExponentPart}})?({{FloatTypeSuffix}})?|" +
github SAP / chevrotain / examples / parser / backtracking / backtracking.js View on Github external
// by factoring out the common prefix, but for the sake of the example let us assume backtracking is required...

const { createToken, Lexer, CstParser } = require("chevrotain")

const Number = createToken({ name: "Number", pattern: /\d+/ })
const Element = createToken({ name: "Element", pattern: /element/ })
const Default = createToken({ name: "Default", pattern: /default/ })
const Dot = createToken({ name: "Dot", pattern: /\./ })
const Colon = createToken({ name: "Colon", pattern: /:/ })
const Equals = createToken({ name: "Equals", pattern: /=/ })
const SemiColon = createToken({ name: "SemiColon", pattern: /;/ })
const Ident = createToken({ name: "Ident", pattern: /[a-z]+/ })
const WhiteSpace = createToken({
  name: "WhiteSpace",
  pattern: /\s+/,
  group: Lexer.SKIPPED
})

const allTokens = [
  WhiteSpace,
  Number,
  Element,
  Default,
  Dot,
  Colon,
  Equals,
  SemiColon,
  Ident
]

const backtrackingLexer = new Lexer(allTokens)
github SAP / chevrotain / examples / implementation_languages / ecma6 / ecma6_json.js View on Github external
const StringLiteral = createToken({
  name: "StringLiteral",
  pattern: /"(?:[^\\"]|\\(?:[bfnrtv"\\/]|u[0-9a-fA-F]{4}))*"/
})

const NumberLiteral = createToken({
  name: "NumberLiteral",
  pattern: /-?(0|[1-9]\d*)(\.\d+)?([eE][+-]?\d+)?/
})

const WhiteSpace = createToken({
  name: "WhiteSpace",
  pattern: /[ \t\n\r]+/,
  // ignore whitespace
  group: Lexer.SKIPPED
})

const allTokens = [
  WhiteSpace,
  NumberLiteral,
  StringLiteral,
  LCurly,
  RCurly,
  LSquare,
  RSquare,
  Comma,
  Colon,
  True,
  False,
  Null
]
github SAP / chevrotain / examples / lexer / multi_mode_lexer / multi_mode_lexer.js View on Github external
const ExitLetter = createToken({
  name: "ExitLetter",
  pattern: /EXIT_LETTERS/,
  pop_mode: true
})

const ExitSigns = createToken({
  name: "ExitSigns",
  pattern: /EXIT_SIGNS/,
  pop_mode: true
})

const Whitespace = createToken({
  name: "Whitespace",
  pattern: /(\t| )/,
  group: Lexer.SKIPPED
})

// Each key defines a Lexer mode's name.
// And each value is an array of Tokens which are valid in this Lexer mode.
const multiModeLexerDefinition = {
  modes: {
    numbers_mode: [
      One,
      Two,
      Three,
      ExitNumbers, // encountering an ExitNumbers Token will cause the lexer to revert to the previous mode
      EnterLetters, // switch to "letter_mode" after encountering "ENTER_Letter" while in "numbers_mode"
      Whitespace
    ],
    letter_mode: [
      Alpha,
github SAP / chevrotain / examples / parser / dynamic_tokens / dynamic_delimiters.js View on Github external
// ----------------- lexer -----------------
const LSquare = createToken({ name: "LSquare", pattern: /\[/ })
const RSquare = createToken({ name: "RSquare", pattern: /]/ })

// base delimiter TokenTypes
const BaseDelimiter = createToken({ name: "BaseDelimiter", pattern: Lexer.NA })
const Comma = createToken({
  name: "Comma",
  pattern: /,/,
  categories: BaseDelimiter
})
const NumberLiteral = createToken({ name: "NumberLiteral", pattern: /\d+/ })
const WhiteSpace = createToken({
  name: "WhiteSpace",
  pattern: /\s+/,
  group: Lexer.SKIPPED
})

const allTokens = [
  WhiteSpace,
  LSquare,
  RSquare,
  BaseDelimiter,
  Comma,
  NumberLiteral
]

// ----------------- parser -----------------
// TODO: change to ES6 classes
function DynamicDelimiterParser() {
  // invoke super constructor
  EmbeddedActionsParser.call(this, allTokens, {
github SAP / chevrotain / examples / lexer / keywords_vs_identifiers / keywords_vs_identifiers.js View on Github external
})

const For = createToken({
  name: "For",
  pattern: /for/,
  longer_alt: Identifier
})
const Do = createToken({
  name: "Do",
  pattern: /do/,
  longer_alt: Identifier
})
const Whitespace = createToken({
  name: "Whitespace",
  pattern: /\s+/,
  group: Lexer.SKIPPED
})

keywordsVsIdentifiersLexer = new Lexer([
  Whitespace, // Whitespace is very common in most languages so placing it first generally speeds up the lexing.

  While, // the actual keywords (While/For/Do) must appear BEFORE the Identifier Token as they are all a valid prefix of it's PATTERN.
  For, // However the edge case of an Identifier with a prefix which is a valid keyword must still be handled, for example:
  Do, // 'do' vs 'done' or 'for' vs 'forEach'. This is solved by defining 'Keyword.LONGER_ALT = Identifier'/
  // thus each time a Keyword is detected the Lexer will also try to match a LONGER Identifier..

  Identifier // As mentioned above, the Identifier Token must appear after ALL the Keyword Tokens
])

module.exports = {
  Identifier: Identifier,
  While: While,
github SAP / chevrotain / examples / parser / parametrized_rules / parametrized.js View on Github external
const Hello = createToken({ name: "Hello", pattern: /hello/ })
const World = createToken({ name: "World", pattern: /world/ })

const Cruel = createToken({ name: "Cruel", pattern: /cruel/ })
const Bad = createToken({ name: "Bad", pattern: /bad/ })
const Evil = createToken({ name: "Evil", pattern: /evil/ })

const Good = createToken({ name: "Good", pattern: /good/ })
const Wonderful = createToken({ name: "Wonderful", pattern: /wonderful/ })
const Amazing = createToken({ name: "Amazing", pattern: /amazing/ })

const WhiteSpace = createToken({
  name: "WhiteSpace",
  pattern: /\s+/,
  group: Lexer.SKIPPED
})

const allTokens = [
  WhiteSpace,
  Hello,
  World,
  Cruel,
  Bad,
  Evil,
  Good,
  Wonderful,
  Amazing
]

const HelloLexer = new Lexer(allTokens)
github project-flogo / flogo-web / libs / parser / src / parser / tokens.ts View on Github external
export const InlineObjectExprClose = createToken({
  name: 'InlineObjectExprClose',
  label: 'Close inline object expression',
  pattern: /"/,
  pop_mode: true,
});
export const NumberLiteral = createToken({
  name: 'NumberLiteral',
  label: 'NumberLiteral',
  pattern: /-?(0|[1-9]\d*)(\.\d+)?([eE][+-]?\d+)?/,
});
export const WhiteSpace = createToken({
  name: 'WhiteSpace',
  label: 'Whitespace',
  pattern: /\s+/,
  group: Lexer.SKIPPED,
  line_breaks: true,
});
export const Lookup = createToken({
  name: 'Lookup',
  label: '$',
  pattern: /\$/,
});
const RESOLVER_PATTERN = new RegExp(
  `[_${UnicodeCategory.Letter}][_\.${UnicodeCategory.Letter}${
    UnicodeCategory.DecimalDigit
  }]*`
);

function matchResolverIdentifier(text: string, startOffset?: number, tokens?: IToken[]) {
  if (tokens.length < 3) {
    return null;
github jhipster / jhipster-core / lib / dsl / self_checks / parsing_system_checker.js View on Github external
  return _.reject(redundant, tokenType => tokenType.GROUP === Lexer.SKIPPED);
}