How to use pyparsing - 10 common examples

To help you get started, we’ve selected a few pyparsing examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github scVENUS / PeekabooAV / peekaboo / ruleset / expressions.py View on Github external
result.setParseAction(EvalResult)
        rvallist.setParseAction(EvalList)
        rvalset.setParseAction(EvalSet)

        identity_test = Keyword('is') + ~Keyword('not') | Combine(
            Keyword('is') + Keyword('not'), adjacent=False, joinString=' ')
        membership_test = Keyword('in') | Combine(
            Keyword('not') + Keyword('in'), adjacent=False, joinString=' ')
        comparison_op = oneOf('< <= > >= != == isdisjoint')
        comparison = identity_test | membership_test | comparison_op

        self.parser = infixNotation(operand, [
            (Literal('**'), 2, opAssoc.LEFT, EvalPower),
            (oneOf('+ - ~'), 1, opAssoc.RIGHT, EvalModifier),
            (oneOf('* / // %'), 2, opAssoc.LEFT, EvalArith),
            (oneOf('+ -'), 2, opAssoc.LEFT, EvalArith),
            (oneOf('<< >>'), 2, opAssoc.LEFT, EvalArith),
            (Literal('&'), 2, opAssoc.LEFT, EvalArith),
            (Literal('^'), 2, opAssoc.LEFT, EvalArith),
            (Literal('|'), 2, opAssoc.LEFT, EvalArith),
            (comparison, 2, opAssoc.LEFT, EvalLogic),
            (Keyword('not'), 1, opAssoc.RIGHT, EvalModifier),
            (Keyword('and'), 2, opAssoc.LEFT, EvalLogic),
            (Keyword('or'), 2, opAssoc.LEFT, EvalLogic),
            (Keyword('->'), 2, opAssoc.LEFT, EvalArith),
        ])
github google / coursebuilder-core / coursebuilder / modules / assessment_tags / gift.py View on Github external
left_curly +
        multi_choice_answers.setParseAction(set_multi_choice_answers) +
        right_curly
    ).setParseAction(set_multi_choice_question)

    multi_choice_question.ignore(comment)

    # True-false questions.
    # Sample:
    # // question: 0 name: TrueStatement using {T} style
    # ::TrueStatement about Grant::Grant was buried in a tomb in NY.{T}
    #
    # // question: 0 name: FalseStatement using {FALSE} style
    # ::FalseStatement about sun::The sun rises in the West.{FALSE}

    true_false_feedback = Combine(
        pound +
        SkipTo(right_curly).setParseAction(strip_spaces))

    true_false_answer = (
        left_curly +
        boolean('answer') +
        Optional(true_false_feedback, default='')('feedback') +
        right_curly)

    true_false_question = (
        Optional(title, default='') +
        task +
        true_false_answer
    ).setParseAction(set_true_false_question)

    true_false_question.ignore(comment)
github bdcht / amoco / amoco / cas / parser.py View on Github external
# published under GPLv2 license

from amoco.logger import Log
logger = Log(__name__)
logger.debug('loading module')

from .expressions import bot,top,reg,ext

# expression parser:
#-------------------

import pyparsing as pp

#terminals:
p_bottop  = pp.oneOf('⊥ T')
p_symbol  = pp.Word(pp.alphas)
p_extern  = pp.Suppress('@')+p_symbol
p_cst     = pp.Suppress('0x')+pp.Combine(pp.Optional('-')+pp.Regex('[0-9a-f]+'))
p_int     = pp.Word(pp.nums).setParseAction(lambda r:int(r[0]))
p_slc     = '['+p_int.setResultsName('start')+':'+p_int.setResultsName('stop')+']'
p_op1     = pp.oneOf('~ -')
p_op2     = pp.oneOf('+ - / // * & | ^ << >> < > == <= >= != ? :')
p_term    = p_bottop|p_symbol|p_extern|p_cst

#nested expressions:
p_expr    = pp.Forward()

p_csl     = pp.Suppress('|')+p_slc+pp.Suppress('->')
p_comp    = pp.Group(pp.Suppress('{')+pp.ZeroOrMore(p_expr)+pp.Suppress('| }'))
p_mem     = 'M'+p_int+pp.Optional(p_symbol)

operators = [(p_op1,1,pp.opAssoc.RIGHT),
github scanny / python-pptx / tests / unitutil / cxml.py View on Github external
def grammar():
    # terminals ----------------------------------
    colon = Literal(":")
    equal = Suppress("=")
    slash = Suppress("/")
    open_paren = Suppress("(")
    close_paren = Suppress(")")
    open_brace = Suppress("{")
    close_brace = Suppress("}")

    # np:tagName ---------------------------------
    nspfx = Word(alphas)
    local_name = Word(alphanums)
    tagname = Combine(nspfx + colon + local_name)

    # np:attr_name=attr_val ----------------------
    attr_name = Word(alphas + ":")
    attr_val = Word(alphanums + " %-./:_")
    attr_def = Group(attr_name + equal + attr_val)
    attr_list = open_brace + delimitedList(attr_def) + close_brace

    text = dblQuotedString.setParseAction(removeQuotes)
github seporaitis / mysqlparse / tests / grammar / test_data_type.py View on Github external
def test_varbinary(self):
        with self.assertRaises(pyparsing.ParseException):
            data_type_syntax.parseString("VARBINARY").data_type

        self.assertEquals(data_type_syntax.parseString("VARBINARY(8)").length[0], '8')
github PolySat / libproc / xdrgen / xdr / parser.py View on Github external
g(resolved_type_specifier) + identifier | \
          g(resolved_type_specifier) + lit('*') + identifier

      fielddocumentation = \
          s("{") + (P.Optional(g(kw("key") + identifier + s(";"))) & \
             P.Optional(g(kw("name") + P.QuotedString('"') + s(";"))) & \
             P.Optional(g(kw("unit") + P.QuotedString('"') + s(";"))) & \
             P.Optional(g(kw("offset") + decimal_constant + s(";"))) & \
             P.Optional(g(kw("divisor") + decimal_constant + s(";"))) & \
             P.Optional(g(kw("description") + P.QuotedString('"') + s(";"))) \
          ) + s("}")


      const_expr_value = constant | scopedidentifier
      constant_expr = (const_expr_value + (kw('+') | kw('-')) + const_expr_value) | const_expr_value
      enum_body = s("{") + g(P.delimitedList(g(enumValueIdentifier + s('=') + constant_expr))) + s("}")
      
      struct_body << s("{") + P.OneOrMore(g(declaration + g(P.Optional(fielddocumentation)) + s(";"))) + s("}")

      constant_def = kw("const") - scopedUpperIdentifier - s("=") - constant - s(";")
      namespace_def = kw("namespace") - identifier - s(";")
      namespace_def.setParseAction(self.namespaceParse)

      struct_def = kw("struct") - newscopedidentifier - g(struct_body) + P.Optional(s('=') - type_name) - s(";")
      struct_def.setParseAction(self.newStruct)

      command_options = \
             P.Optional(g(kw("summary") + P.QuotedString('"') + s(";"))) & \
             P.Optional(g(kw("param") + type_name + s(";")))  & \
             P.Optional(g(kw("types") + s("=") + g(type_name + P.ZeroOrMore(s(',') + type_name)) + s(";"))) & \
             P.Optional(g(kw("response") + g(type_name + P.ZeroOrMore(s(',') + type_name)) + s(";")))
github pyparsing / pyparsing / examples / lucene_grammar.py View on Github external
# at http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/docs/queryparsersyntax.html
#

import pyparsing as pp
from pyparsing import pyparsing_common as ppc

pp.ParserElement.enablePackrat()

COLON, LBRACK, RBRACK, LBRACE, RBRACE, TILDE, CARAT = map(pp.Literal, ":[]{}~^")
LPAR, RPAR = map(pp.Suppress, "()")
and_, or_, not_, to_ = map(pp.CaselessKeyword, "AND OR NOT TO".split())
keyword = and_ | or_ | not_ | to_

expression = pp.Forward()

valid_word = pp.Regex(
    r'([a-zA-Z0-9*_+.-]|\\\\|\\([+\-!(){}\[\]^"~*?:]|\|\||&&))+'
).setName("word")
valid_word.setParseAction(
    lambda t: t[0].replace("\\\\", chr(127)).replace("\\", "").replace(chr(127), "\\")
)

string = pp.QuotedString('"')

required_modifier = pp.Literal("+")("required")
prohibit_modifier = pp.Literal("-")("prohibit")
integer = ppc.integer()
proximity_modifier = pp.Group(TILDE + integer("proximity"))
number = ppc.fnumber()
fuzzy_modifier = TILDE + pp.Optional(number, default=0.5)("fuzzy")

term = pp.Forward().setName("field")
github robocomp / robocomp / tools / robocompdsl / parseSMDSL.py View on Github external
def fromString(inputText, verbose=False):
        if verbose: print(('Verbose:', verbose))

        (TRANSITIONS, INITIAL_STATE, END_STATE, STATES, PARALLEL) = list(map(CaselessKeyword, """
        		transitions initial_state end_state states parallel""".split()))

        semicolon = Suppress(Word(";"))
        op = Suppress(Word("{"))
        cl = Suppress(Word("}"))
        to = Suppress(CaselessLiteral("=>"))

        identifier = Word(alphas + "_", alphanums + "_")

        list_identifiers = delimitedList(identifier)

        # parse States
        stateslist = Group(Suppress(STATES) + list_identifiers + semicolon).setResultsName('states')

        # parse Transitions
        transition = identifier.setResultsName('src') + to + list_identifiers.setResultsName('dests') + semicolon
        transitions_list = Group(OneOrMore(Group(transition))).setResultsName("transitions")
        transitions = Suppress(TRANSITIONS) + op + transitions_list + cl + semicolon

        # parse initialstate and finalstate
        initialstate = Suppress(INITIAL_STATE) + identifier.setResultsName('initialstate') + semicolon
        finalstate = Suppress(END_STATE) + identifier.setResultsName('finalstate') + semicolon

        # parse machine
github kjellmf / dot2tex / dot2tex / dotparsing.py View on Github external
def define_dot_parser(self):
        """Define dot grammar

        Based on the grammar http://www.graphviz.org/doc/info/lang.html
        """
        # punctuation
        colon = Literal(":")
        lbrace = Suppress("{")
        rbrace = Suppress("}")
        lbrack = Suppress("[")
        rbrack = Suppress("]")
        lparen = Literal("(")
        rparen = Literal(")")
        equals = Suppress("=")
        comma = Literal(",")
        dot = Literal(".")
        slash = Literal("/")
        bslash = Literal("\\")
        star = Literal("*")
        semi = Suppress(";")
        at = Literal("@")
        minus = Literal("-")
        pluss = Suppress("+")

        # keywords
        strict_ = CaselessLiteral("strict")
        graph_ = CaselessLiteral("graph")
        digraph_ = CaselessLiteral("digraph")
        subgraph_ = CaselessLiteral("subgraph")
        node_ = CaselessLiteral("node")
        edge_ = CaselessLiteral("edge")