How to use the bashlex.tokenizer.tokentype.WORD function in bashlex

To help you get started, we’ve selected a few bashlex examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github idank / bashlex / tests / test-tokenizer.py View on Github external
s = '$(a $(b))'
        self.assertTokens(s, [
                          t(tt.WORD, '$(a $(b))', [0, 9], hasdollarset)])

        s = '$(a ${b})'
        self.assertTokens(s, [
                          t(tt.WORD, '$(a ${b})', [0, 9], hasdollarset)])

        s = '$(a $[b])'
        self.assertTokens(s, [
                          t(tt.WORD, '$(a $[b])', [0, 9], hasdollarset)])

        s = '"$(a)"'
        self.assertTokens(s, [
                          t(tt.WORD, '"$(a)"', [0, 6], set([flags.word.HASDOLLAR,
                                                            flags.word.QUOTED]))])

        s = 'a $(! b)'
        self.assertTokens(s, [
                          t(tt.WORD, 'a', [0, 1]),
                          t(tt.WORD, '$(! b)', [2, 8], hasdollarset)])

        s = '$(!|!||)'
        self.assertTokens(s, [
                          t(tt.WORD, '$(!|!||)', [0, 8], hasdollarset)])

        s = '$(a <
github idank / bashlex / tests / test-tokenizer.py View on Github external
def test_simple(self):
        s = 'a b'
        self.assertTokens(s, [
                          t(tt.WORD, 'a', [0, 1]),
                          t(tt.WORD, 'b', [2, 3])])
github idank / bashlex / bashlex / parser.py View on Github external
def _makeparts(p):
    parts = []
    for i in range(1, len(p)):
        if isinstance(p[i], ast.node):
            parts.append(p[i])
        elif isinstance(p[i], list):
            parts.extend(p[i])
        elif isinstance(p.slice[i], tokenizer.token):
            if p.slice[i].ttype == tokenizer.tokentype.WORD:
                parserobj = p.context
                parts.append(_expandword(parserobj, p.slice[i]))
            else:
                parts.append(ast.node(kind='reservedword', word=p[i],
                                      pos=p.lexspan(i)))
        else:
            pass

    return parts
github idank / bashlex / bashlex / tokenizer.py View on Github external
def _specialcasetokens(self, tokstr):
        if (self._last_read_token.ttype == tokentype.WORD and
            self._token_before_that.ttype in (tokentype.FOR,
                                              tokentype.CASE,
                                              tokentype.SELECT) and
            tokstr == 'in'):
                if self._token_before_that.ttype == tokentype.CASE:
                    self._parserstate.add(parserflags.CASEPAT)
                    self._esacs_needed_count += 1
                return tokentype.IN

        if (self._last_read_token.ttype == tokentype.WORD and
            self._token_before_that.ttype in (tokentype.FOR, tokentype.SELECT) and
            tokstr == 'do'):
            return tokentype.DO

        if self._esacs_needed_count:
            self._esacs_needed_count -= 1
            if tokstr == 'esac':
                self._parserstate.discard(parserflags.CASEPAT)
                return tokentype.ESAC

        if self._parserstate & parserflags.ALLOWOPNBRC:
            self._parserstate.discard(parserflags.ALLOWOPNBRC)
            if tokstr == '{':
                self._open_brace_count += 1
                # bash/parse.y L2887
                return tokentype.LEFT_CURLY
github idank / bashlex / bashlex / tokenizer.py View on Github external
ps.discard(parserflags.CASEPAT)
                    ps.discard(parserflags.CASESTMT)
                elif ttype == tokentype.CASE:
                    ps.add(parserflags.CASESTMT)
                elif ttype == tokentype.COND_END:
                    ps.discard(parserflags.CONDCMD)
                    ps.discard(parserflags.CONDEXPR)
                elif ttype == tokentype.COND_START:
                    ps.add(parserflags.CONDCMD)
                elif ttype == tokentype.LEFT_CURLY:
                    self._open_brace_count += 1
                elif ttype == tokentype.RIGHT_CURLY and self._open_brace_count:
                    self._open_brace_count -= 1
                return self._createtoken(ttype, tokenword)

        tokenword = self._createtoken(tokentype.WORD, tokenword, utils.typedset(wordflags))
        if d['dollar_present']:
            tokenword.flags.add(wordflags.HASDOLLAR)
        if d['quoted']:
            tokenword.flags.add(wordflags.QUOTED)
        if d['compound_assignment'] and tokenword[-1] == ')':
            tokenword.flags.add(wordflags.COMPASSIGN)
        if self._is_assignment(tokenword.value, bool(self._parserstate & parserflags.COMPASSIGN)):
            tokenword.flags.add(wordflags.ASSIGNMENT)
            if self._assignment_acceptable(self._last_read_token):
                tokenword.flags.add(wordflags.NOSPLIT)
                if self._parserstate & parserflags.COMPASSIGN:
                    tokenword.flags.add(wordflags.NOGLOB)

        # bashlex/parse.y L4865
        if self._command_token_position(self._last_read_token):
            pass