How to use the bashlex.tokenizer function in bashlex

To help you get started, we’ve selected a few bashlex examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github idank / bashlex / tests / test-tokenizer.py View on Github external
s = '$(case a in (b) c ;; (d) e ;; esac)'
        self.assertTokens(s, [
                          t(tt.WORD, '$(case a in (b) c ;; (d) e ;; esac)',
                            [0, len(s)], hasdollarset)])

        s = '$(do )'
        self.assertTokens(s, [
                          t(tt.WORD, '$(do )', [0, len(s)], hasdollarset)])

        s = '$((a))'
        self.assertTokens(s, [
                          t(tt.WORD, '$((a))', [0, len(s)], hasdollarset)])

        s = '$('
        self.assertRaises(tokenizer.MatchedPairError,
                          tokenize, s)

        s = '$(;'
        self.assertRaises(tokenizer.MatchedPairError,
                          tokenize, s)

        s = '$(<'
        self.assertRaises(tokenizer.MatchedPairError,
                          tokenize, s)

        s = '$(<<'
        self.assertRaises(tokenizer.MatchedPairError,
                          tokenize, s)

        s = '$(a\\b)'
        self.assertTokens(s, [
github idank / bashlex / tests / test-tokenizer.py View on Github external
tokenize = lambda s: list(tokenizer.tokenizer(s, state.parserstate()))
github idank / bashlex / bashlex / parser.py View on Github external
import os, copy

from bashlex import yacc, tokenizer, state, ast, subst, flags, errors, heredoc

def _partsspan(parts):
    return parts[0].pos[0], parts[-1].pos[1]

tokens = [e.name for e in tokenizer.tokentype]
precedence = (
    ('left', 'AMPERSAND', 'SEMICOLON', 'NEWLINE', 'EOF'),
    ('left', 'AND_AND', 'OR_OR'),
    ('right', 'BAR', 'BAR_AND')
)

def p_inputunit(p):
    '''inputunit : simple_list simple_list_terminator
                 | NEWLINE
                 | error NEWLINE
                 | EOF'''
    # XXX
    if p.lexer._parserstate & flags.parser.CMDSUBST:
        p.lexer._parserstate.add(flags.parser.EOFTOKEN)

    if isinstance(p[1], ast.node):
github idank / bashlex / bashlex / subst.py View on Github external
def _parsedolparen(parserobj, base, sindex):
    copiedps = copy.copy(parserobj.parserstate)
    copiedps.add(flags.parser.CMDSUBST)
    copiedps.add(flags.parser.EOFTOKEN)
    string = base[sindex:]

    tokenizerargs = {'eoftoken' : tokenizer.token(tokenizer.tokentype.RIGHT_PAREN, ')'),
                     'parserstate' : copiedps,
                     'lastreadtoken' : parserobj.tok._last_read_token,
                     'tokenbeforethat' : parserobj.tok._token_before_that,
                     'twotokensago' : parserobj.tok._two_tokens_ago}

    node, endp = _recursiveparse(parserobj, base, sindex, tokenizerargs)

    if string[endp] != ')':
        while endp > 0 and string[endp-1] == '\n':
            endp -= 1

    return node, sindex + endp
github idank / bashlex / bashlex / parser.py View on Github external
def _makeparts(p):
    parts = []
    for i in range(1, len(p)):
        if isinstance(p[i], ast.node):
            parts.append(p[i])
        elif isinstance(p[i], list):
            parts.extend(p[i])
        elif isinstance(p.slice[i], tokenizer.token):
            if p.slice[i].ttype == tokenizer.tokentype.WORD:
                parserobj = p.context
                parts.append(_expandword(parserobj, p.slice[i]))
            else:
                parts.append(ast.node(kind='reservedword', word=p[i],
                                      pos=p.lexspan(i)))
        else:
            pass

    return parts