How to use the prismjs.tokenize function in prismjs

To help you get started, we’ve selected a few prismjs examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github springload / draftail / examples / components / PrismDecorator.js View on Github external
const language = block
      .getData()
      .get("language", this.options.defaultLanguage);

    // Allow for no syntax highlighting
    if (language == null) {
      return;
    }

    const blockKey = block.getKey();
    const blockText = block.getText();

    let tokens;

    try {
      tokens = Prism.tokenize(blockText, Prism.languages[language]);
    } catch (e) {
      // eslint-disable-next-line no-console
      console.error(e);
      return;
    }

    this.highlighted[blockKey] = {};

    let tokenCount = 0;
    tokens.reduce((startOffset, token) => {
      const endOffset = startOffset + token.length;

      if (typeof token !== "string") {
        tokenCount += 1;
        this.highlighted[blockKey][tokenCount] = token;
        callback(startOffset, endOffset);
github Pustelto / Bracketeer / src / parseBrackets.js View on Github external
beforeRangeEnd = getRangePosition(s, "start");
      afterRangeStart = getRangePosition(s, "end");
    }

    // Get vscode range for text before selection and for text after selection...
    const beforeRange = new vscode.Range(docStart, beforeRangeEnd);
    const afterRange = editor.document.validateRange(
      new vscode.Range(afterRangeStart, docEnd)
    );

    // ... and use them to get correcponding texts
    const beforeText = editor.document.getText(beforeRange);
    const afterText = editor.document.getText(afterRange);

    const tokenizedBeforeText = Prism.tokenize(beforeText, prismLang);
    const tokenizedAfterText = Prism.tokenize(afterText, prismLang);

    const [openPos, closePos, bracketType] = getBracketPairPositionsAndType(
      tokenizedBeforeText,
      tokenizedAfterText,
      languageDef
    );

    if (openPos === undefined || closePos === undefined) return;

    return {
      startPos: getPositionFromOffset(beforeRangeEnd, openPos, true),
      endPos: getPositionFromOffset(afterRangeStart, closePos, false),
      tokenType: bracketType,
      originalSelection: s
    };
  });
github jakoblo / ufo / src / js / view-folder / view-folder-editor / slate-extensions / slate-markdown / slate-markdown-plugin.js View on Github external
function prismDecorator(text, block) {
  let characters = text.characters.asMutable();
  const string = text.text;

  // Prism will split the string in nested tokes
  // These tokens contain the information to style the characters
  const tokens = Prism.tokenize(string, grammar);

  // Current Token position in the string
  const offset = 0;

  characters = mergePrismTokensInCharacters(characters, tokens, offset);

  return characters.asImmutable();
}
github pomber / git-history / src / git-providers / tokenizer.js View on Github external
export default function tokenize(code, language = "javascript") {
  const prismTokens = Prism.tokenize(code, Prism.languages[language]);
  const nestedTokens = tokenizeStrings(prismTokens);
  const tokens = flattenTokens(nestedTokens);

  let currentLine = [];
  const lines = [currentLine];
  tokens.forEach(token => {
    const contentLines = token.content.split(newlineRe);

    const firstContent = contentLines.shift();
    if (firstContent !== "") {
      currentLine.push({ type: token.type, content: firstContent });
    }
    contentLines.forEach(content => {
      currentLine = [];
      lines.push(currentLine);
      if (content !== "") {
github ianstormtaylor / slate / examples / code-highlighting / index.js View on Github external
decorateNode = (node, editor, next) => {
    const others = next() || []
    if (node.type !== 'code') return others

    const language = node.data.get('language')
    const texts = Array.from(node.texts())
    const string = texts.map(([n]) => n.text).join('\n')
    const grammar = Prism.languages[language]
    const tokens = Prism.tokenize(string, grammar)
    const decorations = []
    let startEntry = texts.shift()
    let endEntry = startEntry
    let startOffset = 0
    let endOffset = 0
    let start = 0

    for (const token of tokens) {
      startEntry = endEntry
      startOffset = endOffset

      const [startText, startPath] = startEntry
      const content = getContent(token)
      const newlines = content.split('\n').length - 1
      const length = content.length - newlines
      const end = start + length
github 30-seconds / 30-seconds-of-code / scripts / analyze.js View on Github external
data: snippetsArchiveData.data.map(snippet => {
    let tokens = prism.tokenize(
      snippet.attributes.codeBlocks[0],
      prism.languages.javascript,
      'javascript'
    );
    return {
      id: snippet.id,
      type: 'snippetAnalysis',
      attributes: {
        codeLength: snippet.attributes.codeBlocks[0].trim().length,
        tokenCount: tokens.length,
        functionCount: tokens.filter(t => t.type === 'function').length,
        operatorCount: tokens.filter(t => t.type === 'operator').length,
        keywordCount: tokens.filter(t => t.type === 'keyword').length,
        distinctFunctionCount: [
          ...new Set(tokens.filter(t => t.type === 'function').map(t => t.content))
        ].length
github twosigma / znai / td-documentation-reactjs / src / doc-elements / code-snippets / codeParser.js View on Github external
function parseCode(lang, code) {
    const prismLang = Prism.languages[adjustLang(lang)]

    const tokens = Prism.tokenize(code, prismLang ? prismLang : Prism.languages.clike)
    return tokens.map(t => normalizeToken(t))
}
github withspectrum / slate-markdown / src / decorator.js View on Github external
function markdownDecorator(text: any, block: any) {
  const characters = text.characters.asMutable();
  const language = 'markdown';
  const string = text.text;
  const grammar = Prism.languages[language];
  const tokens = Prism.tokenize(string, grammar);
  addMarks(characters, tokens);
  return characters.asImmutable();
}
github kucherenko / jscpd / src / tokenizer / prism.ts View on Github external
export function tokenize(code: string, language: string): IToken[] {
  let length = 0;
  let line = 1;
  let column = 1;

  initLanguages([language]);

  let tokens: IToken[] = [];

  PrismTokenize(code, languages[getLanguagePrismName(language)]).forEach(
    t => (tokens = tokens.concat(createTokens(t, language)))
  );

  function sanitizeLangName(name: string): string {
    return name && name.replace ? name.replace('language-', '') : 'unknown';
  }

  function createTokenFromString(token: string, lang: string): IToken[] {
    return [
      {
        format: lang,
        type: 'default',
        value: token,
        length: token.length
      } as IToken
    ];