Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
run_opts = {
'io_packet_size': job_config.io_packet_size,
'work_packet_size': job_config.work_packet_size,
}
ppw = job_config.pipelines_per_worker
if ppw != -1:
run_opts['pipeline_instances_per_node'] = ppw
run_pipeline(db, detach=True, run_opts=run_opts, **args)
# Wait until it succeeds or crashes
start = now()
log.info('Monitoring cluster')
result, metrics = db_wrapper.cluster.monitor(db)
end = now() - start
# If we crashed:
if not result:
# Restart the cluster if it's in a bad state
db_wrapper.cluster.start()
raise TestFailure("Out of memory")
# Write out profile if run succeeded
outputs = run_pipeline(db, no_execute=True, **args)
try:
outputs[0]._column._table.profiler().write_trace(
'/app/data/traces/{}.trace'.format(run_name(db_wrapper.cluster.config(), job_config)))
except Exception:
log.error('Failed to write trace')
# Start the Scanner job
log.info('Starting Scanner job')
run_opts = {
'io_packet_size': job_config.io_packet_size,
'work_packet_size': job_config.work_packet_size,
}
ppw = job_config.pipelines_per_worker
if ppw != -1:
run_opts['pipeline_instances_per_node'] = ppw
run_pipeline(db, detach=True, run_opts=run_opts, **args)
# Wait until it succeeds or crashes
start = now()
log.info('Monitoring cluster')
result, metrics = db_wrapper.cluster.monitor(db)
end = now() - start
# If we crashed:
if not result:
# Restart the cluster if it's in a bad state
db_wrapper.cluster.start()
raise TestFailure("Out of memory")
# Write out profile if run succeeded
outputs = run_pipeline(db, no_execute=True, **args)
try:
outputs[0]._column._table.profiler().write_trace(