Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
@event('update:app')
@authorization_required
@source
def update_app(self, data, key):
'''
Update fields of app structure
'''
APP_LOGGER.error(str(data))
yield Service("flow-app").enqueue("update", msgpack.packb(data))
self.emit(key, {"app": data})
def serialize(self, obj):
"""
Implements :func:`autobahn.wamp.interfaces.IObjectSerializer.serialize`
"""
data = msgpack.packb(obj, use_bin_type=self.ENABLE_V5)
if self._batched:
return struct.pack("!L", len(data)) + data
else:
return data
def pack_compress_gen_cb(data):
# noinspection PyArgumentList
yield zstd.ZstdCompressor(write_content_size=True, write_checksum=True, level=14).compress(msgpack.packb(data, use_bin_type=True))
#yield lz4.block.compress(msgpack.packb(data, use_bin_type = True))
def _try_write_meta_key(self, session):
couple = storage.couples[self.couple]
settings = couple.groupset_settings
settings['frozen'] = self.frozen
metakey = couple.compose_group_meta(couple, settings)
s = session.clone()
s.add_groups([g.group_id for g in couple.groups])
_, failed_groups = helpers.write_retry(
s,
keys.SYMMETRIC_GROUPS_KEY,
msgpack.packb(metakey),
retries=1, # retries will be performed by jobs processor itself
)
if failed_groups:
logger.error(
'Job {job_id}, task {task_id}: failed to write metakey to groups {groups}'.format(
job_id=self.parent_job.id,
task_id=self.id,
groups=failed_groups,
)
)
else:
logger.debug(
'Job {job_id}, task {task_id}: metakey is successfully written '
'to couple {couple}'.format(
job_id=self.parent_job.id,
task_id=self.id,
operations = list()
for tag, value in request_ticket_attributes:
if tag == 'confirm':
operations.append(('assign', value))
elif tag == 'repeal':
operations.append(('reject', value))
else:
new_request_ticket[tag] = value
new_request_ticket['operations'] = operations
if new_request_ticket['operations']:
new_request_tickets.append(new_request_ticket)
new_request_tickets_num = len(new_request_tickets)
new_request_tickets = serialize(new_request_tickets)
run_sql("""insert into aidPERSONIDDATA
(personid, tag, datablob, opt1)
values (%s, %s, %s, %s)""",
(pid, 'request_tickets', new_request_tickets, new_request_tickets_num) )
run_sql("""delete from aidPERSONIDDATA
where tag like %s""",
('rt_%', ))
def is_anomalously_anomalous(metric_name, ensemble, datapoint):
"""
This method runs a meta-analysis on the metric to determine whether the
metric has a past history of triggering. TODO: weight intervals based on datapoint
"""
# We want the datapoint to avoid triggering twice on the same data
new_trigger = [time(), datapoint]
# Get the old history
raw_trigger_history = redis_conn.get('trigger_history.' + metric_name)
if not raw_trigger_history:
redis_conn.set('trigger_history.' + metric_name, packb([(time(), datapoint)]))
return True
trigger_history = unpackb(raw_trigger_history)
# Are we (probably) triggering on the same data?
if (new_trigger[1] == trigger_history[-1][1] and
new_trigger[0] - trigger_history[-1][0] <= 300):
return False
# Update the history
trigger_history.append(new_trigger)
redis_conn.set('trigger_history.' + metric_name, packb(trigger_history))
# Should we surface the anomaly?
trigger_times = [x[0] for x in trigger_history]
intervals = [
def get_msgpack(data):
"""Get the msgpack of the encoded data."""
return msgpack.packb(data, use_bin_type=True)
def pack(self, message):
"""Pack a message into a binary packed message with datetime handling."""
return msgpack.packb(message, use_bin_type=True, default=self.encode_datetime)
def send_stream(self, name, data):
self.stream_out.send_multipart([name + self.node_id, msgpack.packb(data, default=encode_ndarray, use_bin_type=True)])