Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def parse_elegant_log(run_dir):
path = run_dir.join(ELEGANT_LOG_FILE)
if not path.exists():
return '', 0
res = ''
last_element = None
text = pkio.read_text(str(path))
want_next_line = False
prev_line = ''
prev_err = ''
for line in text.split('\n'):
if line == prev_line:
continue
match = re.search('^Starting (\S+) at s\=', line)
if match:
name = match.group(1)
if not re.search('^M\d+\#', name):
last_element = name
if want_next_line:
res += line + '\n'
want_next_line = False
elif _is_ignore_error_text(line):
pass
def parse_synergia_log(run_dir):
if not run_dir.join(template_common.RUN_LOG).exists():
return None
text = pkio.read_text(run_dir.join(template_common.RUN_LOG))
errors = []
current = ''
for line in text.split("\n"):
if not line:
if current:
errors.append(current)
current = ''
continue
m = re.match('\*\*\* (WARR?NING|ERROR) \*\*\*(.*)', line)
if m:
if not current:
error_type = m.group(1)
if error_type == 'WARRNING':
error_type = 'WARNING'
current = '{}: '.format(error_type)
extra = m.group(2)
def _generate_parameters_file(data):
res = ''
names = {}
for line in pkio.read_text(_FLASH_UNITS_PATH[data.models.simulation.flashType]).split('\n'):
name = ''
#TODO(pjm): share with setup_params parser
for part in line.split('/'):
if not re.search('Main$', part):
name += (':' if len(name) else '') + part
names[name] = line
for m in sorted(data.models):
if m in names:
if m not in _SCHEMA.model:
# old model which was removed from schema
continue
schema = _SCHEMA.model[m]
heading = '# {}\n'.format(names[m])
has_heading = False
for f in sorted(data.models[m]):
if f not in schema:
def run_dir_status(self, run_dir):
"""Get the current status of whatever's happening in run_dir.
Returns:
Tuple of (jhash or None, status of that job)
"""
disk_in_path = run_dir.join('in.json')
disk_status_path = run_dir.join('status')
if disk_in_path.exists() and disk_status_path.exists():
# status should be recorded on disk XOR in memory
assert run_dir not in self.report_jobs
disk_in_text = pkio.read_text(disk_in_path)
disk_jhash = pkjson.load_any(disk_in_text).reportParametersHash
disk_status = pkio.read_text(disk_status_path)
if disk_status == 'pending':
# We never write this, so it must be stale, in which case
# the job is no longer pending...
pkdlog(
'found "pending" status, treating as "error" ({})',
disk_status_path,
)
disk_status = runner_client.JobStatus.ERROR
return disk_jhash, runner_client.JobStatus(disk_status)
elif run_dir in self.report_jobs:
job_info = self.report_jobs[run_dir]
return job_info.jhash, job_info.status
return None, runner_client.JobStatus.MISSING
data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
distribution = data['models']['bunch']['distribution']
run_with_mpi = distribution == 'lattice' or distribution == 'file'
try:
with pkio.save_chdir(cfg_dir):
if run_with_mpi:
mpi.run_script(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE))
else:
#TODO(pjm): MPI doesn't work with rsbeams distributions yet
exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals())
except Exception as e:
res = {
'error': str(e),
}
if run_with_mpi and 'error' in res:
text = pkio.read_text('mpi_run.out')
m = re.search(r'^Traceback .*?^\w*Error: (.*?)\n\n', text, re.MULTILINE|re.DOTALL)
if m:
res['error'] = m.group(1)
# remove output file - write_result() will not overwrite an existing error output
pkio.unchecked_remove(simulation_db.json_filename(template_common.OUTPUT_BASE_NAME))
simulation_db.write_result(res)
def _run_zgoubi(cfg_dir, python_file=template_common.PARAMETERS_PYTHON_FILE):
with pkio.save_chdir(cfg_dir):
exec(pkio.read_text(python_file), locals(), locals())
subprocess.call([_EXE_PATH])
def run_dir_status(self, run_dir):
"""Get the current status of whatever's happening in run_dir.
Returns:
Tuple of (jhash or None, status of that job)
"""
disk_in_path = run_dir.join('in.json')
disk_status_path = run_dir.join('status')
if disk_in_path.exists() and disk_status_path.exists():
# status should be recorded on disk XOR in memory
assert run_dir not in self.report_jobs
disk_in_text = pkio.read_text(disk_in_path)
disk_jhash = pkjson.load_any(disk_in_text).reportParametersHash
disk_status = pkio.read_text(disk_status_path)
if disk_status == 'pending':
# We never write this, so it must be stale, in which case
# the job is no longer pending...
pkdlog(
'found "pending" status, treating as "error" ({})',
disk_status_path,
)
disk_status = runner_client.JobStatus.ERROR
return disk_jhash, runner_client.JobStatus(disk_status)
elif run_dir in self.report_jobs:
job_info = self.report_jobs[run_dir]
return job_info.jhash, job_info.status
#TODO(robnagler): else: is unnecessary so remove it for less code and clearer flow
else:
return None, runner_client.JobStatus.MISSING
def main():
with open('in.json') as f:
data = json.load(f)
#TODO(pjm): need to properly escape data values, untrusted from client
# this defines the get_srw_params() and get_beamline_optics() functions
exec(pkio.read_text('srw_parameters.py'), locals(), locals())
v = srwl_bl.srwl_uti_parse_options(get_srw_params())
mag = setup_magnetic_field(v)
op = None
if data['report'] == 'intensityReport':
v.ss = True
outfile = v.ss_fn
elif data['report'] == 'fluxReport':
v.sm = True
outfile = v.sm_fn
elif data['report'] == 'powerDensityReport':
v.pw = True
outfile = v.pw_fn
elif data['report'] == 'initialIntensityReport':
v.si = True
outfile = v.si_fn
elif re.search('^watchpointReport', data['report']):