Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# Each bound gets its own pulse, in its own channel
recs = np.zeros(len(bs), dtype=strax.record_dtype(n_samples))
for i, (l, r) in enumerate(bs):
# Add waveform roughly in the center
length = r - l # Exclusive right bound, no + 1
pad = (n_samples - (r - l)) // 2
recs[i]['time'] = l
recs[i]['length'] = pad + length
recs[i]['data'][pad:pad+length] = 1
assert recs[i]['data'].sum() == length
recs[i]['channel'] = 0 if single_channel else i
if not single_channel:
assert len(np.unique(recs['channel'])) == len(bs)
else:
# Make a single record with 1 inside the bounds, 0 outside
recs = np.zeros(1, dtype=strax.record_dtype(n_samples))
for l, r in bs:
recs[0]['data'][l:r] = 1
recs[0]['time'] = 0
recs[0]['length'] = n_samples
recs['dt'] = 1
return recs
This fetches from run metadata, and if this fails, it
estimates it using data metadata from targets.
"""
try:
# Use run metadata, if it is available, to get
# the run start time (floored to seconds)
t0 = self.run_metadata(run_id, 'start')['start']
t0 = t0.replace(tzinfo=datetime.timezone.utc)
return int(t0.timestamp()) * int(1e9)
except (strax.RunMetadataNotAvailable, KeyError):
pass
# Get an approx start from the data itself,
# then floor it to seconds for consistency
if targets:
for t in strax.to_str_tuple(targets):
try:
t0 = self.get_meta(run_id, t)['chunks'][0]['start']
return (int(t0) // int(1e9)) * int(1e9)
except strax.DataNotAvailable:
pass
warnings.warn(
"Could not estimate run start time from "
"run metadata: assuming it is 0",
UserWarning)
return 0
assert strax.shm_pop(x, keep=False).sum() == 45
assert len(SharedArray.list()) == 0
# Support for dicts of arrays
inp = dict(x=np.arange(10), y='something')
f = ex.submit(square_2, inp)
result = f.result()
assert isinstance(result, dict)
assert result['x'].sum() == 285
assert result['y'] == 'something'
assert len(result) == 2
assert len(SharedArray.list()) == 0
# Support for structured arrays
x = np.zeros(2, dtype=strax.record_dtype())
x[0]['time'] = 42
f = ex.submit(nop, x)
result = f.result()
assert result[0]['time'] == 42
def test_overlap_plugin(input_peaks, split_i):
"""Counting the number of nearby peaks should not depend on how peaks are
chunked.
"""
chunks = np.split(input_peaks, [split_i])
chunks = [c for c in chunks]
class Peaks(strax.Plugin):
depends_on = tuple()
dtype = strax.interval_dtype
def compute(self, chunk_i):
return chunks[chunk_i]
# Hack to make peak output stop after a few chunks
def is_ready(self, chunk_i):
return chunk_i < len(chunks)
def source_finished(self):
return True
window = 10
# Note we must apply this to endtime, not time, since
def mailbox_tester(messages,
numbers=None,
reader_sleeps=0.,
max_messages=100,
expected_result=None,
timeout=SHORT_TIMEOUT,
result_timeout=LONG_TIMEOUT):
if numbers is None:
numbers = np.arange(len(messages))
if expected_result is None:
messages = np.asarray(messages)
expected_result = messages[np.argsort(numbers)]
mb = strax.Mailbox(max_messages=max_messages, timeout=timeout)
n_readers = 2
with concurrent.futures.ThreadPoolExecutor() as tp:
futures = [tp.submit(reader,
source=mb.subscribe(),
reader_sleeps=reader_sleeps)
for _ in range(n_readers)]
for i in range(len(messages)):
mb.send(messages[i], msg_number=numbers[i])
print(f"Sent message {i}. Now {len(mb._mailbox)} ms in mailbox.")
mb.close()
# Results must be equal
return window
def compute(self, peaks):
result = dict(
n_within_window=count_in_window(strax.endtime(peaks)))
return result
def iter(self, *args, **kwargs):
yield from super().iter(*args, **kwargs)
st = strax.Context(storage=[])
st.register(Peaks)
st.register(WithinWindow)
result = st.get_array(run_id='some_run', targets='within_window')
expected = count_in_window(strax.endtime(input_peaks))
assert len(expected) == len(input_peaks), "WTF??"
assert isinstance(result, np.ndarray), "Did not get an array"
assert len(result) == len(expected), "Result has wrong length"
np.testing.assert_equal(result['n_within_window'], expected,
"Counting went wrong")
def test_dsi(intvs):
bs = list(zip(intvs['time'].tolist(), strax.endtime(intvs).tolist()))
assert is_sorted(bs)
assert is_disjoint(bs)
disjoint=True).map(
partial(bounds_to_records, single_channel=True))
##
# Basic test plugins
##
@strax.takes_config(
strax.Option('crash', default=False),
strax.Option('secret_time_offset', default=0, track=False)
)
class Records(strax.Plugin):
provides = 'records'
parallel = 'process'
depends_on = tuple()
dtype = strax.record_dtype()
def source_finished(self):
return True
def is_ready(self, chunk_i):
return chunk_i < n_chunks
def compute(self, chunk_i):
if self.config['crash']:
raise SomeCrash("CRASH!!!!")
r = np.zeros(recs_per_chunk, self.dtype)
r['time'] = chunk_i + self.config['secret_time_offset']
r['length'] = r['dt'] = 1
r['channel'] = np.arange(len(r))
return r
def test_find_hits():
"""Tests the hitfinder with simple example pulses"""
for w, should_find_intervals in [
([], []),
([1], [(0, 1)]),
([1, 0], [(0, 1)]),
([1, 0, 1], [(0, 1), (2, 3)]),
([1, 0, 1, 0], [(0, 1), (2, 3)]),
([1, 0, 1, 0, 1], [(0, 1), (2, 3), (4, 5)]),
([0, 1, 2, 0, 4, -1, 60, 700, -4], [(1, 3), (4, 5), (6, 8)]),
([1, 1, 2, 0, 4, -1, 60, 700, -4], [(0, 3), (4, 5), (6, 8)]),
([1, 0, 2, 3, 4, -1, 60, 700, -4], [(0, 1), (2, 5), (6, 8)]),
([1, 0, 2, 3, 4, -1, 60, 700, 800], [(0, 1), (2, 5), (6, 9)]),
([0, 0, 2, 3, 4, -1, 60, 700, 800], [(2, 5), (6, 9)])]:
records = np.zeros(1, strax.record_dtype(9))
records[0]['data'][:len(w)] = w
records['dt'] = 1
records['length'] = 9
results = _find_hits(records)
assert len(results) == len(should_find_intervals)
assert results == should_find_intervals
r = np.zeros(recs_per_chunk, self.dtype)
r['time'] = chunk_i + self.config['secret_time_offset']
r['length'] = r['dt'] = 1
r['channel'] = np.arange(len(r))
return r
class SomeCrash(Exception):
pass
@strax.takes_config(
strax.Option('base_area', default=0),
strax.Option('give_wrong_dtype', default=False),
strax.Option('bonus_area', default_by_run=[(0, 0), (1, 1)]))
class Peaks(strax.Plugin):
provides = 'peaks'
data_kind = 'peaks'
depends_on = ('records',)
dtype = strax.peak_dtype()
parallel = True
def compute(self, records):
if self.config['give_wrong_dtype']:
return np.zeros(5, [('a', np.int), ('b', np.float)])
p = np.zeros(len(records), self.dtype)
p['time'] = records['time']
p['length'] = p['dt'] = 1
p['area'] = self.config['base_area'] + self.config['bonus_area']
return p