Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def run(self, config, comment, new_params, branch_name, dry_run=True):
c, result = None, False
# get title of process
title = ''
if setproctitle_enabled:
title = setproctitle.getproctitle()
# check if user Run function defined
if self.user_run is None:
log('COLOR.RED', 'Error: User Run function is not described. Use decorator @Experiment.set_run to set it')
return c, result
# check if user Score function defined
if self.user_score is None:
log('COLOR.RED',
'Error: User Score function is not described. Use decorator @Experiment.set_score to set it')
return c, result
# check commit removing after run and warning it
timer = None
dry_run_dur = self.testarium.config.get('dry_run.max_duration', 300)
if dry_run:
status = 'handled_error'
_logger.exception('Handled exception %s for method %s ', e.__class__.__name__, func.__name__)
except Exception as e:
Stats.tcp_stats['total_errors'] += 1
error = str(e)
status = 'unhandled_error'
success = False
failed = True
_logger.exception('Unhandled exception %s for method %s ', e.__class__.__name__, func.__name__)
else:
Stats.tcp_stats['total_responses'] += 1
end_time = int(time.time() * 1000)
hostname = socket.gethostname()
service_name = '_'.join(setproctitle.getproctitle().split('_')[1:-1])
logd = {
'endpoint': func.__name__,
'time_taken': end_time - start_time,
'hostname': hostname, 'service_name': service_name
}
_logger.debug('Time taken for %s is %d milliseconds', func.__name__, end_time - start_time)
# call to update aggregator, designed to replace the stats module.
Aggregator.update_stats(endpoint=func.__name__, status=status, success=success,
server_type='tcp', time_taken=end_time - start_time)
if not old_api:
return self._make_response_packet(request_id=rid, from_id=from_id, entity=entity, result=result,
error=error, failed=failed)
else:
except Exception as e:
Stats.tcp_stats['total_errors'] += 1
error = str(e)
status = 'unhandled_error'
success = False
failed = True
_logger.exception('Unhandled exception %s for method %s ', e.__class__.__name__, func.__name__)
else:
Stats.tcp_stats['total_responses'] += 1
end_time = int(time.time() * 1000)
hostname = socket.gethostname()
service_name = '_'.join(setproctitle.getproctitle().split('_')[:-1])
logd = {
'endpoint': func.__name__,
'time_taken': end_time - start_time,
'hostname': hostname, 'service_name': service_name
}
logging.getLogger('stats').debug(logd)
_logger.debug('Time taken for %s is %d milliseconds', func.__name__, end_time - start_time)
# call to update aggregator, designed to replace the stats module.
Aggregator.update_stats(endpoint=func.__name__, status=status, success=success,
server_type='tcp', time_taken=end_time - start_time)
if not old_api:
return self._make_response_packet(request_id=rid, from_id=from_id, entity=entity, result=result,
error=error, failed=failed)
def setp(check_id, entity, msg):
global orig_process_title
if orig_process_title == None:
try:
orig_process_title = setproctitle.getproctitle().split(' ')[2].split(':')[0].split('.')[0]
except:
orig_process_title = 'p34XX'
setproctitle.setproctitle('zmon-worker.{} check {} on {} {} {}'.format(orig_process_title, check_id, entity, msg,
datetime.now().strftime('%H:%M:%S.%f')))
Stats.http_stats['total_responses'] += 1
status = 'handled_exception'
_logger.error('Handled exception %s for method %s ', e.__class__.__name__, func.__name__)
raise e
except Exception as e:
Stats.http_stats['total_errors'] += 1
status = 'unhandled_exception'
success = False
_logger.exception('Unhandled exception %s for method %s ', e.__class__.__name__, func.__name__)
raise e
else:
t2 = time.time()
hostname = socket.gethostname()
service_name = '_'.join(setproctitle.getproctitle().split('_')[:-1])
status = result.status
logd = {
'status': result.status,
'time_taken': int((t2 - t1) * 1000),
'type': 'http',
'hostname': hostname, 'service_name': service_name
}
logging.getLogger('stats').debug(logd)
Stats.http_stats['total_responses'] += 1
return result
finally:
t2 = time.time()
Aggregator.update_stats(endpoint=func.__name__, status=status, success=success,
server_type='http', time_taken=int((t2 - t1) * 1000))
import caffe
import surgery, score
import numpy as np
import os
import sys
try:
import setproctitle
setproctitle.setproctitle(os.path.basename(os.getcwd()))
except:
pass
weights = '../ilsvrc-nets/vgg16-fcn.caffemodel'
# init
caffe.set_device(int(sys.argv[1]))
caffe.set_mode_gpu()
solver = caffe.SGDSolver('solver.prototxt')
solver.net.copy_from(weights)
# surgeries
interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
surgery.interp(solver.net, interp_layers)
def set_process_title(config):
setproctitle("%s" % (get_process_name()))
def main_loop():
setproctitle('webshelld')
dispatcher = SimpleXMLRPCDispatcher()
SOCKFILE = '/var/run/webshell.sock'
if os.path.exists(SOCKFILE):
os.unlink(SOCKFILE)
server = socketserver.UnixStreamServer(SOCKFILE, XMLRPCHandler)
os.chmod(SOCKFILE, 0o700)
dispatcher.register_instance(
Multiplex("/usr/local/bin/bash", "xterm-color"))
server.dispatcher = dispatcher
server.serve_forever()
def _run_plugins_parallel_helper(timeout):
setproctitle.setproctitle('Plugin-Runner')
processes = []
queue_lock = mp.Lock()
for Plugin in plugins:
process = mp.Process(target=self.run_plugin, name=Plugin.get_name(), args=(Plugin, output_queue, queue_lock))
process.start()
processes.append(process)
start_time = time.time()
should_end = False
while len(processes) > 0:
time_spent = time.time() - start_time
should_end = time_spent > timeout
try:
def main():
import argparse
import logging
import setproctitle
from kolejka.client.client import config_parser as client_parser
setproctitle.setproctitle('kolejka-client')
parser = argparse.ArgumentParser(description='KOLEJKA client')
parser.add_argument('-v', '--verbose', action='store_true', default=False, help='show more info')
parser.add_argument('-d', '--debug', action='store_true', default=False, help='show debug info')
parser.add_argument('--config-file', help='config file')
parser.add_argument('--config', help='config')
client_parser(parser)
args = parser.parse_args()
level=logging.WARNING
if args.verbose:
level = logging.INFO
if args.debug:
level = logging.DEBUG
logging.basicConfig(level = level)
args.execute(args)