How to use the ply.lex function in ply

To help you get started, we’ve selected a few ply examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github python-cmd2 / cmd2 / tests / test_plyparsing.py View on Github external
def build_lexer(self, **kwargs):
        self.lexer = lex.lex(module=self, **kwargs)
github dabeaz / ply / test / lex_token4.py View on Github external
tokens = [
    "PLUS",
    "MINUS",
    "-",
    "NUMBER",
    ]

t_PLUS = r'\+'
t_MINUS = r'-'
t_NUMBER = r'\d+'

def t_error(t):
    pass

lex.lex()
github RyanFleck / Projects / py / rcf024_ply.py View on Github external
t.value = int(t.value)
    return t

# Ignored characters
t_ignore = " \t"

def t_newline(t):
    r'\n+'
    t.lexer.lineno += t.value.count("\n")

def t_error(t):
    print("Illegal character '%s'" % t.value[0])
    t.lexer.skip(1)

# Build the lexer
lex.lex()

# Precedence rules for the arithmetic operators
precedence = (
    ('left','PLUS','MINUS'),
    ('left','TIMES','DIVIDE'),
    ('right','UMINUS'),
    )

# dictionary of names (for storing variables)
names = { }

def p_statement_assign(p):
    'statement : NAME EQUALS expression'
    names[p[1]] = p[3]

def p_statement_expr(p):
github calmjs / calmjs.parse / src / calmjs / parse / lexers / es5.py View on Github external
def _create_semi_token(self, orig_token):
        token = ply.lex.LexToken()
        token.type = 'SEMI'
        token.value = ';'
        if orig_token is not None:
            token.lineno = orig_token.lineno
            token.lexpos = orig_token.lexpos
        else:
            token.lineno = 0
            token.lexpos = 0
        return token
github salspaugh / splparser / splparser / lexers / mvexpandlexer.py View on Github external
def tokenize(data, debug=False, debuglog=None):
    lexer = ply.lex.lex(debug=debug, debuglog=debuglog)
    lexer.input(data)
    lexer.begin('ipunchecked')
    tokens = []
    while True:
        tok = lexer.token()
        if not tok: break
        tokens.append(tok)
    return tokens
github tylergreen / mython / sandbox / mylisp / lisp_lexer.py View on Github external
r'\n+'
    t.lineno += len(t.value)
# Read in a symbol.  This rule must be practically last since there are so few
# rules concerning what constitutes a symbol.
# Important for Read Macros --Must specify what isn't a Symbol!
def t_SYMBOL(t):
    r'[^0-9()\'\`\,\@\.][^()\ \t\n]*'
    return t
# These are the things that should be ignored.
t_ignore = ' \t'
# Handle errors.
def t_error(t):
    raise SyntaxError("syntax error on line %d near '%s'" % 
        (t.lineno, t.value))
# Build the lexer.
lexer = lex.lex()
github kelp404 / Victory / application / static / jc / slimit / lexer.py View on Github external
def build(self, **kwargs):
        """Build the lexer."""
        self.lexer = ply.lex.lex(object=self, **kwargs)
github nucleic / enaml / enaml / styling / selectorparser.py View on Github external
def __init__(self):
        self.lexer = lex.lex(outputdir=_lex_dir, lextab=_lex_mod, optimize=1)
        self.token_stream = None
github DISTORTEC / distortos / scripts / pydts.py View on Github external
@ply.lex.TOKEN(quotedString)
def t_STRING_LITERAL(t):
	t.value = t.value[1:-1]
	return t
github rurseekatze / node-tileserver / mapcss_parser / lex.py View on Github external
# this is incomplete, but should work for now
def t_supportsselparen_SUP_STATEMENT(t):
	r'[^(){}\n]+(\([^(){}]*\)\n)*[^(){}\n]*'
	return t

# Error handling rule
def t_ANY_error(t):
	raise error.MapCSSError("Illegal character '%s' at line %i position %i" % (t.value[0], t.lexer.lineno, find_column(t.lexer.lexdata, t)))

# Define a rule so we can track line numbers
def t_ANY_newline(t):
	r'\r?\n'
	t.lexer.lineno += 1

lexer = lex.lex(reflags=re.DOTALL)

if __name__ == '__main__':
	lex.runmain()