Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, line):
self.line = line
def token(c):
return Literal(c).suppress()
name = Word(pyp.alphanums + '-_://.')
quote = QuotedString('"') | QuotedString("'")
header = Word('----')("name") + ZeroOrMore('-')
named_option = name("key") + token('=') + (name | quote)("value")
unnamed_option = (name | quote)("value")
option_token = Group(named_option | unnamed_option)
option = pyp.nestedExpr(content=option_token)
tag = token('@') + name('name') + Optional(option('options'))
MD_tags = OneOrMore('#') | Literal('+') | Literal('|')
MD_tags = Combine(MD_tags)('name') + Optional(option('options'))
cls = token('.') + name('name')
def build_foreign_key_parser_old():
# old-style foreign key parser. Superceded by expression-based syntax. See issue #436
# This will be deprecated in a future release.
left = pp.Literal('(').suppress()
right = pp.Literal(')').suppress()
attribute_name = pp.Word(pp.srange('[a-z]'), pp.srange('[a-z0-9_]'))
new_attrs = pp.Optional(left + pp.delimitedList(attribute_name) + right).setResultsName('new_attrs')
arrow = pp.Literal('->').suppress()
lbracket = pp.Literal('[').suppress()
rbracket = pp.Literal(']').suppress()
option = pp.Word(pp.srange('[a-zA-Z]'))
options = pp.Optional(lbracket + pp.delimitedList(option) + rbracket).setResultsName('options')
ref_table = pp.Word(pp.alphas, pp.alphanums + '._').setResultsName('ref_table')
ref_attrs = pp.Optional(left + pp.delimitedList(attribute_name) + right).setResultsName('ref_attrs')
return new_attrs + arrow + options + ref_table + ref_attrs
from monasca_analytics.banana.cli import const
EQUALS = p.Literal("=").suppress()
CONNECT = p.Literal("->").suppress()
DISCONNECT = p.Literal("!->").suppress()
LPAREN = p.Literal("(").suppress()
RPAREN = p.Literal(")").suppress()
LOAD = p.CaselessKeyword("load").suppress()
SAVE = p.CaselessKeyword("save").suppress()
REMOVE = p.CaselessKeyword("rm").suppress()
PRINT = p.CaselessKeyword("print").suppress()
LIST = p.CaselessKeyword("list").suppress()
HELP = p.CaselessKeyword("help").suppress()
DOT = p.Literal(".").suppress()
VARNAME = p.Word(p.alphas + "_", p.alphanums + "_")
PARAMETER = p.Word(p.alphanums + "_-")
MODULE_NAME = p.Word(p.alphanums + "_-")
VALUE = p.Word(p.alphanums + "_-.")
PATH = p.Word(p.alphanums + "_-/\.")
cmd_create = (VARNAME + EQUALS + MODULE_NAME)
cmd_connect = (VARNAME + CONNECT + VARNAME)
cmd_disconnect = (VARNAME + DISCONNECT + VARNAME)
cmd_modify = (VARNAME + p.OneOrMore(DOT + PARAMETER) + EQUALS + VALUE)
cmd_load = (LOAD + p.Optional(LPAREN) + PATH + p.Optional(RPAREN))
cmd_save = (SAVE + p.Optional(LPAREN) + p.Optional(RPAREN))
cmd_save_as = (SAVE + p.Optional(LPAREN) + PATH + p.Optional(RPAREN))
cmd_remove = (REMOVE + p.Optional(LPAREN) + VARNAME + p.Optional(RPAREN))
cmd_print = (PRINT + p.Optional(LPAREN) + p.Optional(VARNAME) +
p.Optional(RPAREN))
cmd_list = (LIST + p.Optional(LPAREN) + p.Optional(VARNAME) +
p.Optional(RPAREN))
).setParseAction(lambda toks: {
'source_format': 'calculated',
'source_tags': None,
'function': compile(toks[-1].strip(), '', 'eval'),
'type': 'calculated',
'decorators': toks.decorators.asDict()
}).setResultsName('calculated_def')
rule_sections = [Optional(creator | derived | calculated), ]
rule_sections.extend([Optional(p.parser.parse_element(indent_stack))
for p in parsers
if issubclass(p.parser, FieldBaseExtensionParser)])
json_id = (IDENT +
Optional(Suppress(',') +
delimitedList(Word(alphanums + '_'))) +
Suppress(':')
).setResultsName('field')\
.setParseAction(lambda toks: {'json_id': toks[0],
'aliases': toks[1:]})
rule = Group(Optional(rule_decorators) +
json_id +
indentedBlock(Each(rule_sections), indent_stack)
)
return OneOrMore(COMMENT.suppress() | rule)
@property
def value(self):
''' Returns the value (RHS) of the named argument '''
return self._value
@property
def is_string(self):
''' Returns True if the RHS of the named argument is a string '''
return self._quote is not None
# Construct a grammar using PyParsing
# A Fortran variable name starts with a letter and continues with
# letters, numbers and _. Can you start a name with _?
VAR_NAME = pparse.Word(pparse.alphas, pparse.alphanums+"_")
NAME = VAR_NAME | pparse.Literal(".false.") | pparse.Literal(".true.")
# Reference to a component of a derived type
DERIVED_TYPE_COMPONENT = pparse.Combine(VAR_NAME + "%" + VAR_NAME)
# An unsigned integer
UNSIGNED = pparse.Word(pparse.nums)
# In Fortran, a numerical constant can have its kind appended after an
# underscore. The kind can be a 'name' or just digits.
KIND = pparse.Word("_", exact=1) + (VAR_NAME | UNSIGNED)
# First arg to Word gives allowed initial chars, 2nd arg gives allowed
# body characters
SIGNED = pparse.Word("+-"+pparse.nums, pparse.nums)
INTEGER = pparse.Combine(SIGNED + pparse.Optional(KIND))
from pyparsing import alphas, alphanums, OneOrMore, Group, Literal, delimitedList
from pyparsing import Suppress as S
from pyparsing import Optional as O
from pyparsing import Word as W
from pyparsing import CaselessLiteral as CL
from subprocess import check_output
'''
Parser of CCF for message and grouped avp
'''
digits = '0123456789'
mul = Group(O(W(digits)) + '*' + O(W(digits)))
fixed_avp = '<' + W(alphanums + '_-') + '>'
mandatory_avp = '{' + W(alphanums + '_-') + '}'
optional_avp = '[' + W(alphanums + '_-') + ']'
avp = Group(O(mul) + (fixed_avp | mandatory_avp | optional_avp))
avps = Group(OneOrMore(avp))
flags = Group(delimitedList(Literal('REQ')|Literal('PXY')|Literal('ERR'), delim=','))
msg_decl = O(S('<')) + W(alphas + '_-') + O(S('>'))
msg_hdr = S('<') + S(CL('Diameter')) + S(O('-')) + S(CL('Header')) + S(':') + W(alphanums) + O(S(',')) + flags + O(S(',') + W(alphanums)) + S('>')
equals = S(':') + S(':') + S('=')
msg_ccf = msg_decl + equals + msg_hdr + avps
avp_decl = O(S('<')) + W(alphas + '_-') + O(S('>'))
avp_hdr = S('<') + S(CL('AVP')) + S(O('-')) + S(CL('Header')) + S(':') + W(alphanums) + S(O(',')) + O(W(alphanums)) + S('>')
FADE = pp.Literal('FADE')
WIPE = pp.Literal('WIPE')
MISC_TRANS = pp.oneOf('LATER', 'SAME SCENE', 'BACK TO SCENE')
FLASHBACK = pp.oneOf(['FLASHBACK', 'FLASHFORWARD'])
TRANSITIONS = pp.Combine(pp.Optional(CAPS) + pp.Or([CUT, DISSOLVE, FADE, WIPE, MISC_TRANS, FLASHBACK]) + pp.Optional(pp.Word(ALPHANUMS)) + pp.Optional(pp.Literal(':').suppress()), joinString=' ', adjacent=False).setResultsName('transition')
# Sound and Visual WORDS
# consider not using, because could be character's name or place?
# SVW = pp.oneOf('FLASH', "ROAR", 'CRACK', 'KNOCK', 'SMACK', 'THUMP', 'ROMP', 'SCREECH', 'PLOP', 'SPLASH', 'BEEP', 'BANG', 'SQUISH', 'FIZZ', 'OINK', 'TICK', 'TOCK', 'ZAP', 'VROOM', 'PING', 'HONK', 'FLUTTER', 'AWOOGA', 'OOM-PAH', 'CLANK', 'BAM', 'BOP')
#misc
mid_x = pp.Literal('mid').suppress() + pp.Word(pp.alphanums)
continuous_action = pp.Or(pp.Literal('CONTINUOUS ACTION'), pp.Literal('continuous action'))
enumerated_time_word = pp.oneOf(['sunrise', 'sunset', 'present', 'later', 'before', 'breakfast', 'lunch', 'dinner', 'past', 'spring', 'summer', 'fall', 'winter', 'easter', 'christmas', 'passover', 'eve', 'dusk', 'ramadan', 'birthday', 'purim', 'holi', 'equinox', 'kwanzaa', 'recent', 'annual', 'sundown', 'sun-down', 'sun-up', 'tonight', 'dawn']) + ~(~WH + pp.Word(pp.alphanums))
stop_words = ~pp.oneOf(['is', 'home', 'this', 'that', 'there', 'are', 'were', 'be', 'for', 'with', 'was', 'won\'t', 'aren\'t', 'ain\'t', 'isn\'t', 'not', 'on', 'above', 'into', 'around', 'over', 'in', 'number', 'another', 'third', 'fourth', 'anything', 'hear', 'wife', 'run', 'me', 'case', 'everyone', 'friends'])
def num_spaces(tokens):
return len(tokens[0])
spaces = pp.OneOrMore(pp.White(ws=' ', min=1)).addParseAction(num_spaces).setResultsName('indent')
min_2_spaces = pp.OneOrMore(pp.White(ws=' ', min=2)).addParseAction(num_spaces).setResultsName('indent')
w = pp.OneOrMore(pp.White(ws='\t\r\n', min=1, max=0, exact=0))
wall = w + spaces
one_word_title = pp.Word(ALPHANUMS, max=1) & pp.FollowedBy(pp.Word(lower))
pp.CaselessKeyword('xor') | pp.CaselessKeyword('or'))('bool')
# Parentheses
lpar = pp.Literal('(')('open_subgroup')
rpar = pp.Literal(')')('close_subgroup')
# Backend query: P{PuppetDB specific query}
query_start = pp.Combine(pp.oneOf(backend_keys, caseless=True)('backend') + pp.Literal('{'))
query_end = pp.Literal('}')
# Allow the backend specific query to use the end_query token as well, as long as it's in a quoted string
# and fail if there is a query_start token before the first query_end is reached
query = pp.SkipTo(query_end, ignore=pp.quotedString, failOn=query_start)('query')
backend_query = pp.Combine(query_start + query + query_end)
# Alias
alias = pp.Combine(pp.CaselessKeyword('A') + ':' + pp.Word(pp.alphanums + '-_.+')('alias'))
# Final grammar, see the docstring for its BNF based on the tokens defined above
# Group are used to have an easy dictionary access to the parsed results
full_grammar = pp.Forward()
item = backend_query | alias | lpar + full_grammar + rpar
full_grammar << pp.Group(item) + pp.ZeroOrMore(pp.Group(boolean + item)) # pylint: disable=expression-not-assigned
return full_grammar
def removeTags(taggedInformation):
taggedInformation2 = taggedInformation.decode('ascii','ignore')
goGrammar = pyp.Suppress('<' + pyp.Word(pyp.alphanums) + '>') + pyp.Word(pyp.alphanums + pyp.alphas8bit + ' ._-') + pyp.Suppress('')
tmp = goGrammar.parseString(str(taggedInformation2))
return tmp[0]
def execute(self, player, args):
# ... but the tradeoff is we have to do the validity checking down here.
obj_grammar = parser.ReachableOrUid(player)
attr_grammar = pyp.Word(pyp.alphas + "_", pyp.alphanums + "_")
try:
obj = obj_grammar.parseString(args["obj"], parseAll=True)[0]
except pyp.ParseException:
name = args["obj"].strip()
raise utils.UserError("I don't know what object you mean by '{}'"
.format(name))
try:
attr = attr_grammar.parseString(args["attr"], parseAll=True)[0]
except pyp.ParseException:
name = args["attr"].strip()
raise utils.UserError("'{}' is not a valid attribute name."
.format(name))
value_string = args["value"].strip()