How to use the bashlex.tokenizer.tokentype.ASSIGNMENT_WORD function in bashlex

To help you get started, we’ve selected a few bashlex examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github idank / bashlex / tests / test-tokenizer.py View on Github external
def test_assignment(self):
        s = 'a=b'
        self.assertTokens(s, [
                          t(tt.ASSIGNMENT_WORD, 'a=b', [0, 3],
                            flags=set([flags.word.NOSPLIT, flags.word.ASSIGNMENT]))])

        s = 'a+=b'
        self.assertTokens(s, [
                          t(tt.ASSIGNMENT_WORD, 'a+=b', [0, 4],
                            flags=set([flags.word.NOSPLIT, flags.word.ASSIGNMENT]))])
github idank / bashlex / tests / test-tokenizer.py View on Github external
def test_assignment(self):
        s = 'a=b'
        self.assertTokens(s, [
                          t(tt.ASSIGNMENT_WORD, 'a=b', [0, 3],
                            flags=set([flags.word.NOSPLIT, flags.word.ASSIGNMENT]))])

        s = 'a+=b'
        self.assertTokens(s, [
                          t(tt.ASSIGNMENT_WORD, 'a+=b', [0, 4],
                            flags=set([flags.word.NOSPLIT, flags.word.ASSIGNMENT]))])
github idank / bashlex / bashlex / parser.py View on Github external
def p_simple_command_element(p):
    '''simple_command_element : WORD
                              | ASSIGNMENT_WORD
                              | redirection'''
    if isinstance(p[1], ast.node):
        p[0] = [p[1]]
        return

    parserobj = p.context
    p[0] = [_expandword(parserobj, p.slice[1])]

    # change the word node to an assignment if necessary
    if p.slice[1].ttype == tokenizer.tokentype.ASSIGNMENT_WORD:
        p[0][0].kind = 'assignment'
github idank / bashlex / bashlex / tokenizer.py View on Github external
tokenword.flags.add(wordflags.NOGLOB)

        # bashlex/parse.y L4865
        if self._command_token_position(self._last_read_token):
            pass

        if tokenword.value[0] == '{' and tokenword.value[-1] == '}' and c in '<>':
            if shutils.legal_identifier(tokenword.value[1:]):
                # XXX is this needed?
                tokenword.value = tokenword.value[1:]
                tokenword.ttype = tokentype.REDIR_WORD

            return tokenword

        if len(tokenword.flags & set([wordflags.ASSIGNMENT, wordflags.NOSPLIT])) == 2:
            tokenword.ttype = tokentype.ASSIGNMENT_WORD

        if self._last_read_token.ttype == tokentype.FUNCTION:
            self._parserstate.add(parserflags.ALLOWOPNBRC)
            self._function_dstart = self._line_number
        elif self._last_read_token.ttype in (tokentype.CASE, tokentype.SELECT, tokentype.FOR):
            pass # bashlex/parse.y L4907

        return tokenword
github idank / bashlex / bashlex / tokenizer.py View on Github external
def _command_token_position(self, token):
        return (token.ttype == tokentype.ASSIGNMENT_WORD or
                self._parserstate & parserflags.REDIRLIST or
                (token.ttype not in (tokentype.SEMI_SEMI, tokentype.SEMI_AND, tokentype.SEMI_SEMI_AND) and self._reserved_word_acceptable(token)))