Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
for u in [15,16,17]:
st = np.random.randint(0, high=nframes, size=35)
sorting_err.add_unit(u, st)
return sorting_true, sorting_err
if __name__ == '__main__':
# just for check
sorting_true, sorting_err = generate_erroneous_sorting()
comp = sc.compare_sorter_to_ground_truth(sorting_true, sorting_err, exhaustive_gt=True)
sw.plot_agreement_matrix(comp, ordered=True)
plt.show()
from generate_erroneous_sorting import generate_erroneous_sorting
##############################################################################
# Here the agreement matrix
sorting_true, sorting_err = generate_erroneous_sorting()
comp = sc.compare_sorter_to_ground_truth(sorting_true, sorting_err, exhaustive_gt=True)
sw.plot_agreement_matrix(comp, ordered=False)
##############################################################################
# Here the same matrix but **ordered**
# It is now quite trivial to check that fake injected errors are enlighted here.
sw.plot_agreement_matrix(comp, ordered=True)
##############################################################################
# Here we can see that only Units 1 2 and 3 are well detected with 'accuracy'>0.75
print('well_detected', comp.get_well_detected_units(well_detected_score=0.75))
##############################################################################
# Here we can explore **"false positive units"** units that do not exists in ground truth
print('false_positive', comp.get_false_positive_units(redundant_score=0.2))
##############################################################################
# Here we can explore **"redundant units"** units that do not exists in ground truth
print('redundant', comp.get_redundant_units(redundant_score=0.2))
##############################################################################
recording, sorting_true = se.example_datasets.toy_example(num_channels=4, duration=10, seed=0)
sorting_MS4 = sorters.run_mountainsort4(recording)
##############################################################################
cmp_gt_MS4 = sc.compare_sorter_to_ground_truth(sorting_true, sorting_MS4, exhaustive_gt=True)
##############################################################################
# To have an overview of the match we can use the unordered agreement matrix
sw.plot_agreement_matrix(cmp_gt_MS4, ordered=False)
##############################################################################
# or ordered
sw.plot_agreement_matrix(cmp_gt_MS4, ordered=True)
##############################################################################
# This function first matches the ground-truth and spike sorted units, and
# then it computes several performance metrics.
#
# Once the spike trains are matched, each spike is labelled as: - true
# positive (tp): spike found both in :code:`gt_sorting` and :code:`tested_sorting`
# - false negative (fn): spike found in :code:`gt_sorting`, but not in
# :code:`tested_sorting` - false positive (fp): spike found in
# :code:`tested_sorting`, but not in :code:`gt_sorting` - misclassification errors
# (cl): spike found in :code:`gt_sorting`, not in :code:`tested_sorting`, found in
comp_MS4 = sc.compare_sorter_to_ground_truth(sorting_true, sorting_MS4)
comp_KL = sc.compare_sorter_to_ground_truth(sorting_true, sorting_KL)
##############################################################################
# plot_confusion_matrix()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
w_comp_MS4 = sw.plot_confusion_matrix(comp_MS4, count_text=False)
w_comp_KL = sw.plot_confusion_matrix(comp_KL, count_text=False)
##############################################################################
# plot_agreement_matrix()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
w_agr_MS4 = sw.plot_agreement_matrix(comp_MS4, count_text=False)
##############################################################################
# plot_sorting_performance()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# We can also plot a performance metric (e.g. accuracy, recall, precision) with respect to a quality metric, for
# example signal-to-noise ratio. Quality metrics can be computed using the :code:`toolkit.validation` submodule
import spikeinterface.toolkit as st
snrs = st.validation.compute_snrs(sorting_true, recording, save_as_property=True)
w_perf = sw.plot_sorting_performance(comp_MS4, property_name='snr', metric='accuracy')
##############################################################################
# Widgets using MultiSortingComparison