Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import strax
export, __all__ = strax.exporter()
__all__ += ['RUN_DEFAULTS_KEY']
RUN_DEFAULTS_KEY = 'strax_defaults'
@strax.takes_config(
strax.Option(name='storage_converter', default=False,
help='If True, save data that is loaded from one frontend '
'through all willing other storage frontends.'),
strax.Option(name='fuzzy_for', default=tuple(),
help='Tuple of plugin names for which no checks for version, '
'providing plugin, and config will be performed when '
'looking for data.'),
strax.Option(name='fuzzy_for_options', default=tuple(),
help='Tuple of config options for which no checks will be '
'performed when looking for data.'),
strax.Option(name='allow_incomplete', default=False,
help="Allow loading of incompletely written data, if the "
"storage systems support it"),
strax.Option(name='allow_rechunk', default=True,
help="Allow rechunking of data during writing."),
strax.Option(name='allow_multiprocess', default=False,
help="Allow multiprocessing."
"If False, will use multithreading only."),
strax.Option(name='allow_shm', default=False,
help="Allow use of /dev/shm for interprocess communication."),
strax.Option(name='allow_lazy', default=True,
help='Allow "lazy" processing. Saves memory, but incompatible '
'with multiprocessing and perhaps slightly slower.'),
strax.Option(name='forbid_creation_of', default=tuple(),
strax.Option(name='allow_shm', default=False,
help="Allow use of /dev/shm for interprocess communication."),
strax.Option(name='allow_lazy', default=True,
help='Allow "lazy" processing. Saves memory, but incompatible '
'with multiprocessing and perhaps slightly slower.'),
strax.Option(name='forbid_creation_of', default=tuple(),
help="If any of the following datatypes is requested to be "
"created, throw an error instead. Useful to limit "
"descending too far into the dependency graph."),
strax.Option(name='store_run_fields', default=tuple(),
help="Tuple of run document fields to store "
"during scan_run."),
strax.Option(name='check_available', default=tuple(),
help="Tuple of data types to scan availability for "
"during scan_run."),
strax.Option(name='max_messages', default=4,
help="Maximum number of mailbox messages, i.e. size of buffer "
"between plugins. Too high = RAM blows up. "
"Too low = likely deadlocks."),
strax.Option(name='timeout', default=24 * 3600,
help="Terminate processing if any one mailbox receives "
"no result for more than this many seconds"),
strax.Option(name='use_per_run_defaults', default=False,
help='Scan the run db for per-run defaults. '
'This is an experimental strax feature that will '
'possibly be removed, see issue #246'),
strax.Option(name='free_options', default=tuple(),
help='Do not warn if any of these options are passed, '
'even when no registered plugin takes them.')
)
@export
class Context:
result = np.ones((len(peaks), 2), dtype=np.float32) * float('nan')
with self.graph.as_default():
result[peak_mask, :] = self.nn.predict(x)
# Convert from mm to cm... why why why
result /= 10
return dict(x=result[:, 0], y=result[:, 1])
@export
@strax.takes_config(
strax.Option('s1_max_width', default=150,
help="Maximum (IQR) width of S1s"),
strax.Option('s1_min_n_channels', default=3,
help="Minimum number of PMTs that must contribute to a S1"),
strax.Option('s2_min_area', default=10,
help="Minimum area (PE) for S2s"),
strax.Option('s2_min_width', default=200,
help="Minimum width for S2s"))
class PeakClassification(strax.Plugin):
__version__ = '0.0.1'
depends_on = ('peak_basics',)
dtype = [
('type', np.int8, 'Classification of the peak.')]
parallel = True
def compute(self, peaks):
p = peaks
r = np.zeros(len(p), dtype=self.dtype)
is_s1 = p['n_channels'] > self.config['s1_min_n_channels']
is_s1 &= p['range_50p_area'] < self.config['s1_max_width']
r['type'][is_s1] = 1
is_s2 = p['area'] > self.config['s2_min_area']
is_s2 &= p['range_50p_area'] > self.config['s2_min_width']
r['type'][is_s2] = 2
return r
@strax.takes_config(
strax.Option('min_area_fraction', default=0.5,
help='The area of competing peaks must be at least '
'this fraction of that of the considered peak'),
strax.Option('nearby_window', default=int(1e7),
help='Peaks starting within this time window (on either side)'
'in ns count as nearby.'),
)
class NCompeting(strax.OverlapWindowPlugin):
depends_on = ('peak_basics',)
dtype = [
('n_competing', np.int32,
'Number of nearby larger or slightly smaller peaks')]
def get_window_size(self):
return 2 * self.config['nearby_window']
def compute(self, peaks):
return dict(n_competing=self.find_n_competing(
peaks,
window=self.config['nearby_window'],
data_kind = 'records' # TODO: indicate cuts have been done?
compressor = 'zstd'
parallel = True
rechunk_on_save = False
dtype = strax.record_dtype()
def compute(self, raw_records):
r = strax.exclude_tails(raw_records, to_pe)
hits = strax.find_hits(r)
strax.cut_outside_hits(r, hits)
return r
@export
@strax.takes_config(
strax.Option('diagnose_sorting', track=False, default=False,
help="Enable runtime checks for sorting and disjointness"))
class Peaks(strax.Plugin):
depends_on = ('records',)
data_kind = 'peaks'
parallel = True
rechunk_on_save = True
dtype = strax.peak_dtype(n_channels=len(to_pe))
def compute(self, records):
r = records
hits = strax.find_hits(r) # TODO: Duplicate work
hits = strax.sort_by_time(hits)
peaks = strax.find_peaks(hits, to_pe,
result_dtype=self.dtype)
strax.sum_waveform(peaks, r, to_pe)
@export
@strax.takes_config(
strax.Option('trigger_min_area', default=100,
help='Peaks must have more area (PE) than this to '
'cause events'),
strax.Option('trigger_max_competing', default=7,
help='Peaks must have FEWER nearby larger or slightly smaller'
' peaks to cause events'),
strax.Option('left_event_extension', default=int(1e6),
help='Extend events this many ns to the left from each '
'triggering peak'),
strax.Option('right_event_extension', default=int(1e6),
help='Extend events this many ns to the right from each '
'triggering peak'),
strax.Option('max_event_duration', default=int(1e7),
help='Events longer than this are forcefully ended, '
'triggers in the truncated part are lost!'),
)
class Events(strax.OverlapWindowPlugin):
depends_on = ['peak_basics', 'n_competing']
data_kind = 'events'
dtype = [
('event_number', np.int64, 'Event number in this dataset'),
('time', np.int64, 'Event start time in ns since the unix epoch'),
('endtime', np.int64, 'Event end time in ns since the unix epoch')]
events_seen = 0
def get_window_size(self):
return (2 * self.config['left_event_extension'] +
self.config['right_event_extension'])
r['data'][:n_store] = p.raw_data[offset:offset + n_store]
output_record_index += 1
results.append(records)
if len(results) >= events_per_chunk:
yield finish_results()
mypax.shutdown()
if len(results):
yield finish_results()
@export
@strax.takes_config(
strax.Option('pax_raw_dir', default='/data/xenon/raw', track=False,
help="Directory with raw pax datasets"),
strax.Option('stop_after_zips', default=0, track=False,
help="Convert only this many zip files. 0 = all."),
strax.Option('events_per_chunk', default=10, track=False,
help="Number of events to yield per chunk")
)
class RecordsFromPax(strax.Plugin):
provides = 'raw_records'
data_kind = 'raw_records'
depends_on = tuple()
dtype = strax.record_dtype()
parallel = False
def iter(self, *args, **kwargs):
if not os.path.exists(self.config['pax_raw_dir']):
raise FileNotFoundError(self.config['pax_raw_dir'])
@strax.takes_config(
strax.Option(
's1_relative_lce_map',
help="S1 relative LCE(x,y,z) map",
default_by_run=[
(0, pax_file('XENON1T_s1_xyz_lce_true_kr83m_SR0_pax-680_fdc-3d_v0.json')), # noqa
(first_sr1_run, pax_file('XENON1T_s1_xyz_lce_true_kr83m_SR1_pax-680_fdc-3d_v0.json'))]), # noqa
strax.Option(
's2_relative_lce_map',
help="S2 relative LCE(x, y) map",
default_by_run=[
(0, pax_file('XENON1T_s2_xy_ly_SR0_24Feb2017.json')),
(170118_1327, pax_file('XENON1T_s2_xy_ly_SR1_v2.2.json'))]),
strax.Option(
'electron_lifetime',
help="Electron lifetime (ns)",
default_by_run=get_elife)
)
class CorrectedAreas(strax.Plugin):
depends_on = ['event_basics', 'event_positions']
dtype = [('cs1', np.float32, 'Corrected S1 area (PE)'),
('cs2', np.float32, 'Corrected S2 area (PE)')]
def setup(self):
from .itp_map import InterpolatingMap
self.s1_map = InterpolatingMap(
get_resource(self.config['s1_relative_lce_map']))
self.s2_map = InterpolatingMap(
get_resource(self.config['s2_relative_lce_map']))
r['range_50p_area'] = p['width'][:, 5]
r['max_pmt'] = np.argmax(p['area_per_channel'], axis=1)
r['max_pmt_area'] = np.max(p['area_per_channel'], axis=1)
# TODO: get n_top_pmts from config...
area_top = (p['area_per_channel'][:, :127]
* to_pe[:127].reshape(1, -1)).sum(axis=1)
# Negative-area peaks get 0 AFT - TODO why not NaN?
m = p['area'] > 0
r['area_fraction_top'][m] = area_top[m]/p['area'][m]
return r
@export
@strax.takes_config(
strax.Option(
'nn_architecture',
help='Path to JSON of neural net architecture',
default_by_run=[
(0, pax_file('XENON1T_tensorflow_nn_pos_20171217_sr0.json')),
(first_sr1_run, pax_file('XENON1T_tensorflow_nn_pos_20171217_sr1.json'))]), # noqa
strax.Option(
'nn_weights',
help='Path to HDF5 of neural net weights',
default_by_run=[
(0, pax_file('XENON1T_tensorflow_nn_pos_weights_20171217_sr0.h5')),
(first_sr1_run, pax_file('XENON1T_tensorflow_nn_pos_weights_20171217_sr1.h5'))]), # noqa
strax.Option('min_reconstruction_area',
help='Skip reconstruction if area (PE) is less than this',
default=10)
strax.Option(name='storage_converter', default=False,
help='If True, save data that is loaded from one frontend '
'through all willing other storage frontends.'),
strax.Option(name='fuzzy_for', default=tuple(),
help='Tuple of plugin names for which no checks for version, '
'providing plugin, and config will be performed when '
'looking for data.'),
strax.Option(name='fuzzy_for_options', default=tuple(),
help='Tuple of config options for which no checks will be '
'performed when looking for data.'),
strax.Option(name='allow_incomplete', default=False,
help="Allow loading of incompletely written data, if the "
"storage systems support it"),
strax.Option(name='allow_rechunk', default=True,
help="Allow rechunking of data during writing."),
strax.Option(name='allow_multiprocess', default=False,
help="Allow multiprocessing."
"If False, will use multithreading only."),
strax.Option(name='allow_shm', default=False,
help="Allow use of /dev/shm for interprocess communication."),
strax.Option(name='allow_lazy', default=True,
help='Allow "lazy" processing. Saves memory, but incompatible '
'with multiprocessing and perhaps slightly slower.'),
strax.Option(name='forbid_creation_of', default=tuple(),
help="If any of the following datatypes is requested to be "
"created, throw an error instead. Useful to limit "
"descending too far into the dependency graph."),
strax.Option(name='store_run_fields', default=tuple(),
help="Tuple of run document fields to store "
"during scan_run."),
strax.Option(name='check_available', default=tuple(),
help="Tuple of data types to scan availability for "