Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def submit(self, func, *args, **kwargs):
"""Submit a function for serialized execution on sqs
"""
self.op_sequence += 1
self.sqs.send_message(
QueueUrl=self.map_queue,
MessageBody=utils.dumps({'args': args, 'kwargs': kwargs}),
MessageAttributes={
'sequence_id': {
'StringValue': str(self.op_sequence),
'DataType': 'Number'},
'op': {
'StringValue': named(func),
'DataType': 'String',
},
'ser': {
'StringValue': 'json',
'DataType': 'String'}}
)
self.futures[self.op_sequence] = f = SQSFuture(
self.op_sequence)
return f
# Each commit represents a separate pr
for c in commits:
process_commit(c, r, repo_metrics, stats, since, now)
repo_metrics.dims = None
if stats['missing']:
repo_metrics.put_metric(
'RepoHookPending', stats['missing'], 'Count',
Hook=hook_context)
repo_metrics.put_metric(
'RepoHookLatency', stats['missing_time'], 'Seconds',
Hook=hook_context)
if not metrics:
print(dumps(repo_metrics.buf, indent=2))
return
else:
repo_metrics.BUF_SIZE = 20
repo_metrics.flush()
config = Config(read_timeout=self.data.get(
'timeout', 90), region_name=self.data.get('region', None))
client = utils.local_session(
self.manager.session_factory).client('lambda', config=config)
payload = {
'version': VERSION,
'event': event,
'action': self.data,
'policy': self.manager.data}
results = []
for resource_set in utils.chunks(resources, self.data.get('batch_size', 250)):
payload['resources'] = resource_set
params['Payload'] = utils.dumps(payload)
result = client.invoke(**params)
result['Payload'] = result['Payload'].read()
if isinstance(result['Payload'], bytes):
result['Payload'] = result['Payload'].decode('utf-8')
results.append(result)
return results
def format_json(self, resources, fh):
return dumps(resources, fh, indent=2)
policy.log.info(
"policy: %s resources: %s no resources found" % (
policy.name, policy.resource_type))
return
at = time.time()
for action in policy.resource_manager.actions:
policy.log.info(
"policy: %s invoking action: %s resources: %d",
policy.name, action.name, len(resources))
if isinstance(action, EventAction):
results = action.process(resources, event)
else:
results = action.process(resources)
policy._write_file(
"action-%s" % action.name, utils.dumps(results))
policy.ctx.metrics.put_metric(
"ActionTime", time.time() - at, "Seconds", Scope="Policy")
return resources
def process(self, resources, event=None):
resources = super(Time, self).process(resources)
if self.parse_errors and self.manager and self.manager.ctx.log_dir:
self.log.warning("parse errors %d", len(self.parse_errors))
with open(join(
self.manager.ctx.log_dir, 'parse_errors.json'), 'w') as fh:
dumps(self.parse_errors, fh=fh)
self.parse_errors = []
if self.opted_out and self.manager and self.manager.ctx.log_dir:
self.log.debug("disabled count %d", len(self.opted_out))
with open(join(
self.manager.ctx.log_dir, 'opted_out.json'), 'w') as fh:
dumps(self.opted_out, fh=fh)
self.opted_out = []
return resources
def _build_body(self, resource):
"""Create a JSON body and dump it to encoded bytes."""
if not self.body:
return None
return utils.dumps(jmespath.search(self.body, resource)).encode('utf-8')
failures = {}
# Play nice around aws having insufficient capacity...
for itype, t_instances in utils.group_by(
instances, 'InstanceType').items():
for izone, z_instances in utils.group_by(
t_instances, 'Placement.AvailabilityZone').items():
for batch in utils.chunks(z_instances, self.batch_size):
fails = self.process_instance_set(client, batch, itype, izone)
if fails:
failures["%s %s" % (itype, izone)] = [i['InstanceId'] for i in batch]
if failures:
fail_count = sum(map(len, failures.values()))
msg = "Could not start %d of %d instances %s" % (
fail_count, len(instances), utils.dumps(failures))
self.log.warning(msg)
raise RuntimeError(msg)
def add(self, keys):
self.count += len(keys)
if self.fh is None:
return
self.fh.write(dumps(keys))
self.fh.write(",\n")
def format_json(self, resources, fh):
return dumps(resources, fh, indent=2)