Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
hdf5_compress = params.getboolean('data', 'hdf5_compress')
N_total = params.nb_channels
N_t = params.getint('detection', 'N_t')
dist_peaks = params.getint('detection', 'dist_peaks')
template_shift = params.getint('detection', 'template_shift')
file_out_suff = params.get('data', 'file_out_suff')
spike_thresh = params.getfloat('detection', 'spike_thresh')
spike_width = params.getfloat('detection', 'spike_width')
matched_filter = params.getboolean('detection', 'matched-filter')
matched_thresh = params.getfloat('detection', 'matched_thresh')
sign_peaks = params.get('detection', 'peaks')
do_temporal_whitening = params.getboolean('whitening', 'temporal')
do_spatial_whitening = params.getboolean('whitening', 'spatial')
chunk_size = detect_memory(params, whitening=True)
plot_path = os.path.join(params.get('data', 'file_out_suff'), 'plots')
nodes, edges = get_nodes_and_edges(params)
safety_time = params.getint('whitening', 'safety_time')
safety_space = params.getboolean('whitening', 'safety_space')
nb_temp_white = min(max(20, comm.size), N_e)
max_silence_1 = int(20*params.rate // comm.size)
max_silence_2 = 5000
inv_nodes = numpy.zeros(N_total, dtype=numpy.int32)
inv_nodes[nodes] = numpy.arange(len(nodes))
jitter_range = params.getint('detection', 'jitter_range')
template_shift_2 = template_shift + jitter_range
use_hanning = params.getboolean('detection', 'hanning')
rejection_threshold = params.getfloat('detection', 'rejection_threshold')
data_file.open()
#################################################################
if use_hanning:
hanning_filter = numpy.hanning(N_t)
def test_work_if_secondary_source(setup, launcher):
A = TestDriver('A')
B = TestDriver('B', speed_bump=True)
C = TestDriver('C')
setup.add(A)
setup.add(B)
setup.add(C)
circus = CircusClient(endpoint=get_circusctl_endpoint(setup.name))
try:
launcher()
circus.call({
'command': "stop",
'properties': {
'name': 'B',
'waiting': True
}
})
launcher.copy_file('default', 'test', 20, A, C)
loop = BooleanLoop()
def test_velocity(setup, launcher):
A = TestDriver('A', velocity=0.1)
B = TestDriver('B', velocity=0.4)
C = TestDriver('C', velocity=0.8)
D = TestDriver('D', velocity=0.7)
setup.add(A)
setup.add(B)
setup.add(C)
setup.add(D)
circus = CircusClient(endpoint=get_circusctl_endpoint(setup.name))
try:
launcher()
circus.call({
'command': "stop",
'properties': {
'name': 'A',
'waiting': True
}
})
launcher.copy_file('default', 'test', 20, B, C, D)
loop = CounterLoop(2)
def _check_filename(self, file_name):
if not os.path.exists(file_name):
if self.is_master:
print_and_log(["The file %s can not be found!" %file_name], 'error', logger)
sys.exit(1)
else:
all_times[elec, min_times[midx]:max_times[midx]] = True
comm.Barrier()
sys.stderr.flush()
print_and_log(['Node %d has collected %d spikes and rejected %d spikes' % (comm.rank, elt_count, rejected)], 'debug', logger)
gdata = all_gather_array(numpy.array([elt_count], dtype=numpy.float32), comm, 0)
gdata2 = gather_array(numpy.array([rejected], dtype=numpy.float32), comm, 0)
nb_elements = numpy.int64(numpy.sum(gdata))
nb_rejected = numpy.int64(numpy.sum(gdata2))
nb_total = numpy.int64(nb_elts*comm.size)
if ((smart_search and (gpass == 0)) or (not smart_search and (gpass == 1))) and nb_elements == 0:
if comm.rank == 0:
print_and_log(['No waveforms found! Are the data properly loaded??'], 'error', logger)
sys.exit(0)
if nb_elements == 0:
gpass = nb_repeats
if comm.rank == 0:
if gpass != 1:
print_and_log(["Found %d spikes over %d requested" %(nb_elements, nb_total)], 'default', logger)
if nb_elements == 0:
print_and_log(["No more spikes in the recording, stop searching"], 'info', logger)
else:
if isolation:
print_and_log(["Found %d isolated spikes over %d requested (%d rejected)" %(nb_elements, nb_total, nb_rejected)], 'default', logger)
else:
print_and_log(["Found %d spikes over %d requested (%d rejected)" %(nb_elements, nb_total, nb_rejected)], 'default', logger)
if nb_elements < 0.2*nb_total:
trig_in_ms = params.getboolean('triggers', 'trig_in_ms')
artefacts = numpy.loadtxt(params.get('triggers', 'trig_file'), comments =['#','//'])
windows = numpy.loadtxt(params.get('triggers', 'trig_windows'), comments =['#','//'])
make_plots = params.get('triggers', 'make_plots')
plot_path = os.path.join(params.get('data', 'file_out_suff'), 'plots')
if len(windows.shape) == 1:
windows = windows.reshape(1, 2)
if len(artefacts.shape) == 1:
artefacts = artefacts.reshape(1, 2)
if trig_in_ms:
if comm.rank == 0:
print_and_log(['Artefact times are read in ms'], 'debug', logger)
artefacts[:, 1] *= numpy.int64(data_file.sampling_rate*1e-3)
windows[:, 1] *= numpy.int64(data_file.sampling_rate*1e-3)
else:
if comm.rank == 0:
print_and_log(['Artefact times are read in timesteps'], 'debug', logger)
artefacts = artefacts.astype(numpy.int64)
windows = windows.astype(numpy.int64)
nb_stimuli = len(numpy.unique(artefacts[:, 0]))
mytest = numpy.all(numpy.in1d(numpy.unique(artefacts[:, 0]), numpy.unique(windows[:, 0])))
if not mytest:
if comm.rank == 0:
print_and_log(['Error in the trigger file: not all artefacts are defined'], 'error', logger)
sys.exit(0)
N_total = params.getint('data', 'N_total')
sampling_rate = params.getint('data', 'sampling_rate')
do_temporal_whitening = params.getboolean('whitening', 'temporal')
do_spatial_whitening = params.getboolean('whitening', 'spatial')
spike_thresh = params.getfloat('detection', 'spike_thresh')
file_out_suff = params.get('data', 'file_out_suff')
N_t = params.getint('detection', 'N_t')
nodes, edges = get_nodes_and_edges(params)
chunk_size = N_t
if do_spatial_whitening:
spatial_whitening = load_data(params, 'spatial_whitening')
if do_temporal_whitening:
temporal_whitening = load_data(params, 'temporal_whitening')
thresholds = load_data(params, 'thresholds')
try:
result = load_data(params, 'results')
except Exception:
result = {'spiketimes' : {}, 'amplitudes' : {}}
curve = numpy.zeros((len(triggers), len(result['spiketimes'].keys()), lims[1]+lims[0]), dtype=numpy.int32)
count = 0
for count, t_spike in enumerate(triggers):
for key in result['spiketimes'].keys():
elec = int(key.split('_')[1])
idx = numpy.where((result['spiketimes'][key] > t_spike - lims[0]) & (result['spiketimes'][key] < t_spike + lims[0]))
curve[count, elec, t_spike - result['spiketimes'][key][idx]] += 1
pylab.subplot(111)
pylab.imshow(numpy.mean(curve, 0), aspect='auto')
do_spatial_whitening = params.getboolean('whitening', 'spatial')
spike_thresh = params.getfloat('detection', 'spike_thresh')
file_out_suff = params.get('data', 'file_out_suff')
N_t = params.getint('detection', 'N_t')
nodes, edges = get_nodes_and_edges(params)
chunk_size = N_t
if do_spatial_whitening:
spatial_whitening = load_data(params, 'spatial_whitening')
if do_temporal_whitening:
temporal_whitening = load_data(params, 'temporal_whitening')
thresholds = load_data(params, 'thresholds')
try:
result = load_data(params, 'results')
except Exception:
result = {'spiketimes' : {}, 'amplitudes' : {}}
curve = numpy.zeros((len(triggers), len(result['spiketimes'].keys()), lims[1]+lims[0]), dtype=numpy.int32)
count = 0
for count, t_spike in enumerate(triggers):
for key in result['spiketimes'].keys():
elec = int(key.split('_')[1])
idx = numpy.where((result['spiketimes'][key] > t_spike - lims[0]) & (result['spiketimes'][key] < t_spike + lims[0]))
curve[count, elec, t_spike - result['spiketimes'][key][idx]] += 1
pylab.subplot(111)
pylab.imshow(numpy.mean(curve, 0), aspect='auto')
return curve
if args.version:
print(__version__)
sys.exit(0)
if args.plugin is None:
parser.print_usage()
sys.exit(0)
factory = resolve_name(args.plugin)
# configure the logger
configure_logger(logger, args.loglevel, args.logoutput, name=factory.name)
# load the plugin and run it.
logger.info('Loading the plugin...')
logger.info('Endpoint: %r' % args.endpoint)
logger.info('Pub/sub: %r' % args.pubsub)
plugin = factory(args.endpoint, args.pubsub,
args.check_delay, args.ssh,
**_str2cfg(args.config))
logger.info('Starting')
try:
plugin.start()
except KeyboardInterrupt:
pass
finally:
logger.info('Stopping')
plugin.stop()
sys.exit(0)
help="log output")
parser.add_argument('--version', action='store_true',
default=False,
help='Displays Circus version and exits.')
parser.add_argument('--ssh', default=None, help='SSH Server')
args = parser.parse_args()
if args.version:
print(__version__)
sys.exit(0)
# configure the logger
configure_logger(logger, args.loglevel, args.logoutput)
stats = StatsStreamer(args.endpoint, args.pubsub, args.statspoint,
args.ssh)
# Register some sighandlers to stop the loop when killed
for sig in SysHandler.SIGNALS:
signal.signal(
sig, lambda *_: stats.loop.add_callback_from_signal(stats.stop)
)
try:
stats.start()
finally:
stats.stop()
sys.exit(0)