Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# compute rates
auth_rates = get_auth_rates(TP, FP, TN, FN, thresholds).as_dict()
id_rates = get_id_rates(H, M, R, ns, thresholds).as_dict()
output = {
'authentication': {
'confusionMatrix': {'TP': TP, 'FP': FP, 'TN': TN, 'FN': FN},
'rates': auth_rates,
},
'identification': {
'confusionMatrix': {'H': H, 'M': M, 'R': R, 'CM': CM},
'rates': id_rates,
},
}
return utils.ReturnTuple((output,), ('assessment',))
if tf >= threshold: # alarm time
alarm_time = k
alarm = True
else: # now we have to check for the remaining rule to me bet - duration of active state
if tf >= threshold:
state_duration += 1
if state_duration == active_state_duration:
onset_time_list.append(alarm_time)
onset = True
alarm = False
state_duration = 0
onsets = np.union1d(onset_time_list,
offset_time_list)
return utils.ReturnTuple((onsets, tf_list), ('onsets', 'processed'))
Parameters
----------
fraction : float, optional
Fraction of samples to select from training data.
"""
ths = self.get_thresholds(force=True)
# gather data to test
data = {}
for subject, label in six.iteritems(self._subject2label):
# select a random fraction of the training data
aux = self.io_load(label)
indx = list(range(len(aux)))
use, _ = utils.random_fraction(indx, fraction, sort=True)
data[subject] = aux[use]
# evaluate classifier
_, res = self.evaluate(data, ths)
# choose thresholds at EER
for subject, label in six.iteritems(self._subject2label):
EER_auth = res['subject'][subject]['authentication']['rates']['EER']
self.set_auth_thr(label, EER_auth[self.EER_IDX, 0], ready=True)
EER_id = res['subject'][subject]['identification']['rates']['EER']
self.set_id_thr(label, EER_id[self.EER_IDX, 0], ready=True)
Notes
-----
.. If 'path' is a file handler, 'efile' will be ignored.
.. Creates file with automatic name generation if only an output path is provided.
.. Output file name may vary from input file name due changes made to avoid overwrting existing files (your
results are important after all!).
.. Existing files will not be overwritten, instead the new file will consist of the given file name with an
(incremented) identifier (e.g. '_1') that will be added at the end of the provided file name.
.. You can find the documentation for this function here:
https://pyhrv.readthedocs.io/en/latest/_pages/api/tools.html#hrv-export-hrv-export
"""
# Check input (if available & utils.ReturnTuple object)
if results is None:
raise TypeError("No results data provided. Please specify input data.")
elif results is not type(dict()) and isinstance(results, utils.ReturnTuple) is False:
raise TypeError("Unsupported data format: %s. "
"Please provide input data as Python dictionary or biosppy.utils.ReturnTuple object." % type(results))
if path is None:
raise TypeError("No file name or directory provided. Please specify at least an output directory.")
elif type(path) is str:
if efile is None:
# Generate automatic file name
efile = 'hrv_export' + dt.datetime.now().strftime('_%Y-%m-%d_%H-%M-%S') + '.json'
path += efile
else:
# Check if file name has an '.json' extension
_, fformat = os.path.splitext(efile)
if fformat != 'json':
path = path + efile + '.json'
else:
"""Deserialize data from a file using sklearn's joblib.
Parameters
----------
path : str
Source path.
Returns
-------
data : object
Deserialized object.
"""
# normalize path
path = utils.normpath(path)
return joblib.load(path)
nperseg = 300
# Compute power spectral density estimation (where the magic happens)
frequencies, powers = welch(
x=nn_interpol,
fs=fs,
window=window,
nperseg=nperseg,
nfft=nfft,
scaling='density'
)
# Metadata
args = (nfft, window, fs, 'cubic')
names = ('fft_nfft', 'fft_window', 'fft_resampling_frequency', 'fft_interpolation',)
meta = utils.ReturnTuple(args, names)
if mode not in ['normal', 'dev', 'devplot']:
warnings.warn("Unknown mode '%s'. Will proceed with 'normal' mode." % mode, stacklevel=2)
mode = 'normal'
# Normal Mode:
# Returns frequency parameters, PSD plot figure and no frequency & power series/arrays
if mode == 'normal':
# Compute frequency parameters
params, freq_i = _compute_parameters('fft', frequencies, powers, fbands)
# Plot PSD
figure = _plot_psd('fft', frequencies, powers, freq_i, params, show, show_param, legend)
figure = utils.ReturnTuple((figure, ), ('fft_plot', ))
# Output
# plot
if show:
plotting.plot_emg(ts=ts,
sampling_rate=1000.,
raw=signal,
filtered=filtered,
processed=None,
onsets=onsets,
path=None,
show=True)
# output
args = (ts, filtered, onsets)
names = ('ts', 'filtered', 'onsets')
return utils.ReturnTuple(args, names)
if metric_args is None:
metric_args = {}
# compute distances
D = metrics.pdist(data, metric=metric, **metric_args)
D = metrics.squareform(D)
# fit
db = skc.DBSCAN(eps=eps, min_samples=min_samples, metric='precomputed')
labels = db.fit_predict(D)
# get cluster indices
clusters = _extract_clusters(labels)
return utils.ReturnTuple((clusters,), ('clusters',))
start = idx[-1] + wrange
else:
start += size
# stop condition
if start > length:
break
# update stop
stop += size
if stop > length:
stop = length
idx = np.array(idx, dtype='int')
return utils.ReturnTuple((idx,), ('onsets',))