How to use the sqlparse.tokens.Punctuation function in sqlparse

To help you get started, we’ve selected a few sqlparse examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github microsoft / pgtoolsservice / pgsqltoolsservice / language / sqlautocomplete / function_metadata.py View on Github external
and yields one metadata argument per argument / column.
    """

    # postgres function argument list syntax:
    #   " ( [ [ argmode ] [ argname ] argtype
    #               [ { DEFAULT | = } default_expr ] [, ...] ] )"

    mode_names = set(('IN', 'OUT', 'INOUT', 'VARIADIC'))
    parse_state = 'type'
    parens = 0
    field = TypedFieldMetadata()

    for tok in tokens:
        if tok.ttype in Whitespace or tok.ttype in Comment:
            continue
        elif tok.ttype in Punctuation:
            if parens == 0 and tok.value == ',':
                # End of the current field specification
                if field.type:
                    yield field
                # Initialize metadata holder for the next field
                field, parse_state = TypedFieldMetadata(), 'type'
            elif parens == 0 and tok.value == '=':
                parse_state = 'default'
            else:
                field[parse_state].append(tok)
                if tok.value == '(':
                    parens += 1
                elif tok.value == ')':
                    parens -= 1
        elif parens == 0:
            if tok.ttype in Keyword:
github freewizard / SublimeFormatSQL / sqlparse / engine / grouping.py View on Github external
if not isinstance(sgroup, cls)]
    idx = 0
    token = tlist.token_next_match(idx, ttype, value)
    while token:
        right = tlist.token_next(tlist.token_index(token))
        left = tlist.token_prev(tlist.token_index(token))
        if right is None or not check_right(right):
            token = tlist.token_next_match(tlist.token_index(token) + 1,
                                           ttype, value)
        elif left is None or not check_right(left):
            token = tlist.token_next_match(tlist.token_index(token) + 1,
                                           ttype, value)
        else:
            if include_semicolon:
                sright = tlist.token_next_match(tlist.token_index(right),
                                                T.Punctuation, ';')
                if sright is not None:
                    # only overwrite "right" if a semicolon is actually
                    # present.
                    right = sright
            tokens = tlist.tokens_between(left, right)[1:]
            if not isinstance(left, cls):
                new = cls([left])
                new_idx = tlist.token_index(left)
                tlist.tokens.remove(left)
                tlist.tokens.insert(new_idx, new)
                left = new
            left.tokens.extend(tokens)
            for t in tokens:
                tlist.tokens.remove(t)
            token = tlist.token_next_match(tlist.token_index(left) + 1,
                                           ttype, value)
github future-architect / Sublime-uroboroSQL-formatter / sqlparse / engine / grouping.py View on Github external
def group_brackets(tlist):
    """Group parentheses () or square brackets []

        This is just like _group_matching, but complicated by the fact that
        round brackets can contain square bracket groups and vice versa
    """

    if isinstance(tlist, (sql.Parenthesis, sql.SquareBrackets)):
        idx = 1
    else:
        idx = 0

    # Find the first opening bracket
    token = tlist.token_next_match(idx, T.Punctuation, ['(', '['])

    while token:
        start_val = token.value  # either '(' or '['
        if start_val == '(':
            end_val = ')'
            group_class = sql.Parenthesis
        else:
            end_val = ']'
            group_class = sql.SquareBrackets

        tidx = tlist.token_index(token)

        # Find the corresponding closing bracket
        end = _find_matching(tidx, tlist, T.Punctuation, start_val,
                             T.Punctuation, end_val)
github andialbrecht / sqlparse / sqlparse / engine / statement_splitter.py View on Github external
def _change_splitlevel(self, ttype, value):
        """Get the new split level (increase, decrease or remain equal)"""

        # parenthesis increase/decrease a level
        if ttype is T.Punctuation and value == '(':
            return 1
        elif ttype is T.Punctuation and value == ')':
            return -1
        elif ttype not in T.Keyword:  # if normal token return
            return 0

        # Everything after here is ttype = T.Keyword
        # Also to note, once entered an If statement you are done and basically
        # returning
        unified = value.upper()

        # three keywords begin with CREATE, but only one of them is DDL
        # DDL Create though can contain more words such as "or replace"
        if ttype is T.Keyword.DDL and unified.startswith('CREATE'):
            self._is_create = True
            return 0
github dbcli / pgcli / pgcli / packages / parseutils / tables.py View on Github external
def extract_from_part(parsed, stop_at_punctuation=True):
    tbl_prefix_seen = False
    for item in parsed.tokens:
        if tbl_prefix_seen:
            if is_subselect(item):
                for x in extract_from_part(item, stop_at_punctuation):
                    yield x
            elif stop_at_punctuation and item.ttype is Punctuation:
                raise StopIteration
            # An incomplete nested select won't be recognized correctly as a
            # sub-select. eg: 'SELECT * FROM (SELECT id FROM user'. This causes
            # the second FROM to trigger this elif condition resulting in a
            # StopIteration. So we need to ignore the keyword if the keyword
            # FROM.
            # Also 'SELECT * FROM abc JOIN def' will trigger this elif
            # condition. So we need to ignore the keyword JOIN and its variants
            # INNER JOIN, FULL OUTER JOIN, etc.
            elif item.ttype is Keyword and (
                    not item.value.upper() == 'FROM') and (
                    not item.value.upper().endswith('JOIN')):
                tbl_prefix_seen = False
            else:
                yield item
        elif item.ttype is Keyword or item.ttype is Keyword.DML:
github snarfed / mockfacebook / fql.py View on Github external
group = self.statement

    for tok in group.tokens:
      if isinstance(tok, sql.Function):
        assert isinstance(tok.tokens[0], sql.Identifier)
        name = tok.tokens[0].tokens[0]
        if name.value not in Fql.FUNCTIONS:
          raise InvalidFunctionError(name.value)

        # check number of params
        #
        # i wish i could use tok.get_parameters() here, but it doesn't work
        # with string parameters for some reason. :/
        assert isinstance(tok.tokens[1], sql.Parenthesis)
        params = [t for t in tok.tokens[1].flatten()
                  if t.ttype not in (tokens.Punctuation, tokens.Whitespace)]
        actual_num = len(params)
        expected_num = Fql.FUNCTIONS[name.value]
        if actual_num != expected_num:
          raise ParamMismatchError(name.value, expected_num, actual_num)

        # handle each function
        replacement = None
        if name.value == 'me':
          replacement = str(self.me)
        elif name.value == 'now':
          replacement = str(int(time.time()))
        elif name.value == 'strlen':
          # pass through to sqlite's length() function
          name.value = 'length'
        elif name.value == 'substr':
          # the index param is 0-based in FQL but 1-based in sqlite
github apache / bloodhound / bloodhound_multiproduct / multiproduct / dbcursor.py View on Github external
while token:
                if isinstance(token, Types.Parenthesis):
                    ptoken = self._token_first(token)
                    if not ptoken.match(Tokens.Punctuation, '('):
                        raise Exception("Invalid INSERT statement")
                    last_token = ptoken
                    while ptoken:
                        if not ptoken.match(Tokens.Punctuation, separators) and \
                           not ptoken.match(Tokens.Keyword, separators) and \
                           not ptoken.is_whitespace():
                            ptoken = self._expression_token_unwind_hack(token, ptoken, self._token_prev(token, ptoken))
                            self._eval_expression_value(token, ptoken)
                        last_token = ptoken
                        ptoken = self._token_next(token, ptoken)
                    if not last_token or \
                       not last_token.match(Tokens.Punctuation, ')'):
                        raise Exception("Invalid INSERT statement, unable to find column value parenthesis end")
                    insert_extra_column_value(tablename, token, last_token)
                elif not token.match(Tokens.Punctuation, separators) and\
                     not token.match(Tokens.Keyword, separators) and\
                     not token.is_whitespace():
                    raise Exception("Invalid INSERT statement, unable to parse VALUES section")
                token = self._token_next(parent, token)
        elif token.match(Tokens.DML, 'SELECT'):
            self._select(parent, token, insert_table=tablename)
        else:
            raise Exception("Invalid INSERT statement")
        return
github cmu-db / mongodb-d4 / libs / sqlparse / filters.py View on Github external
def _process_parenthesis(self, tlist):
        first = tlist.token_next(0)
        indented = False
        if first and first.ttype in (T.Keyword.DML, T.Keyword.DDL):
            self.indent += 1
            tlist.tokens.insert(0, self.nl())
            indented = True
        num_offset = self._get_offset(
            tlist.token_next_match(0, T.Punctuation, '('))
        self.offset += num_offset
        self._process_default(tlist, stmts=not indented)
        if indented:
            self.indent -= 1
        self.offset -= num_offset
github freewizard / SublimeFormatSQL / sqlparse / engine / grouping.py View on Github external
def group_typecasts(tlist):
    _group_left_right(tlist, T.Punctuation, '::', sql.Identifier)
github cmu-db / mongodb-d4 / libs / sqlparse / sql.py View on Github external
def get_real_name(self):
        """Returns the real name (object name) of this identifier."""
        # a.b
        dot = self.token_next_match(0, T.Punctuation, '.')
        if dot is None:
            return self.token_next_by_type(0, T.Name).value
        else:
            next_ = self.token_next_by_type(self.token_index(dot),
                                            (T.Name, T.Wildcard))
            if next_ is None:  # invalid identifier, e.g. "a."
                return None
            return next_.value