Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_exception():
Peaks.parallel = True
with tempfile.TemporaryDirectory() as temp_dir:
st = strax.Context(storage=strax.DataDirectory(temp_dir),
register=[Records, Peaks],
config=dict(crash=True))
# Check correct exception is thrown
with pytest.raises(SomeCrash):
st.make(run_id=run_id, targets='peaks',
max_workers=2)
# Check exception is recorded in metadata
# in both its original data type and dependents
for target in ('peaks', 'records'):
assert 'SomeCrash' in st.get_meta(run_id, target)['exception']
# Check corrupted data does not load
st.context_config['forbid_creation_of'] = ('peaks',)
with pytest.raises(strax.DataNotAvailable):
def test_processing():
"""Test ParallelSource plugin under several conditions"""
# It's always harder with a small mailbox:
strax.Mailbox.DEFAULT_MAX_MESSAGES = 2
for request_peaks in (True, False):
for peaks_parallel in (True, False):
for max_workers in (1, 2):
Peaks.parallel = peaks_parallel
print(f"\nTesting with request_peaks {request_peaks}, "
f"peaks_parallel {peaks_parallel}, "
f"max_workers {max_workers}")
mystrax = strax.Context(storage=[],
register=[Records, Peaks])
bla = mystrax.get_array(
run_id=run_id,
targets='peaks' if request_peaks else 'records',
max_workers=max_workers)
assert len(bla) == recs_per_chunk * n_chunks
assert bla.dtype == (
strax.peak_dtype() if request_peaks else strax.record_dtype())
def build_datastructure_doc():
out = page_header
pd.set_option('display.max_colwidth', -1)
st = strax.Context(register_all=strax.xenon.plugins)
# Too lazy to write proper graph sorter
plugins_by_deps = defaultdict(list)
for pn, p in st._plugin_class_registry.items():
plugins = st._get_plugins((pn,), run_id='0')
plugins_by_deps[len(plugins)].append(pn)
os.makedirs(this_dir + '/graphs', exist_ok=True)
for n_deps in list(reversed(sorted(list(plugins_by_deps.keys())))):
for data_type in plugins_by_deps[n_deps]:
plugins = st._get_plugins((data_type,), run_id='0')
# Create dependency graph
g = graphviz.Digraph(format='svg')
parser.add_argument('--sync_chunk_duration', default=0.2, type=float,
help='Synchronization chunk size in sec')
args = parser.parse_args()
n_readers = 8
n_channels = len(strax.xenon.common.to_pe)
channels_per_reader = np.ceil(n_channels / n_readers)
output_dir = './from_fake_daq'
if args.shm:
output_dir = '/dev/shm/from_fake_daq'
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
st = strax.Context(storage='./test_input_data')
st.register(strax.xenon.pax_interface.RecordsFromPax)
@numba.njit
def restore_baseline(records):
for r in records:
r['data'][:r['length']] = 16000 - r['data'][:r['length']]
def write_to_dir(c, outdir):
tempdir = outdir + '_temp'
os.makedirs(tempdir)
for reader_i, x in enumerate(c):
with open(f'{tempdir}/reader_{reader_i}', 'wb') as f:
f.write(copy(x)) # Copy needed for honest shm writing?
os.rename(tempdir, outdir)
run_id = '180423_1021'
if args.shm:
in_dir = '/dev/shm/from_fake_daq'
else:
in_dir = args.input
out_dir = args.output
# Clean all output dirs. This is of course temporary!
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir)
if args.no_super_raw:
strax.xenon.plugins.DAQReader.save_meta_only = True
st = strax.Context(
storage=strax.DataDirectory(out_dir),
config=dict(input_dir=in_dir,
erase=args.erase),
allow_rechunk=not args.norechunk)
st.register_all(strax.xenon.plugins)
gil_load.start(av_sample_interval=0.05)
start = time.time()
for i, events in enumerate(
st.get_iter(run_id, args.target,
max_workers=args.n)):
print(f"\t{i}: Found {len(events)} events")
end = time.time()