Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
from . import evaluations as ev
from ..datasets.bnci import BNCI2014001
from ..viz import Results
from .motor_imagery import LeftRightImagery
import unittest
from pyriemann.spatialfilters import CSP
from pyriemann.estimation import Covariances
from sklearn.pipeline import make_pipeline
import os
from collections import OrderedDict
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
pipelines = OrderedDict()
pipelines['C'] = make_pipeline(Covariances('oas'), CSP(8), LDA())
d = BNCI2014001()
d.selected_events = {k: d.event_id[k] for k in ['left_hand', 'right_hand']}
class Test_CrossSess(unittest.TestCase):
def tearDown(self):
if os.path.isfile('results.hd5'):
os.remove('results.hd5')
def return_eval(self):
return ev.CrossSessionEvaluation()
def test_eval_results(self):
e = self.return_eval()
r = Results(e,'results.hd5')
p = LeftRightImagery(pipelines, e, [d])
tmax,
proj=True,
picks=picks,
baseline=None,
preload=True,
verbose=False)
labels = epochs.events[:, -1] - 2
# get epochs
epochs_data = epochs.get_data()
###############################################################################
# Pairwise distance based permutation test
###############################################################################
covest = Covariances()
Fs = 160
window = 2 * Fs
Nwindow = 20
Ns = epochs_data.shape[2]
step = int((Ns - window) / Nwindow)
time_bins = range(0, Ns - window, step)
pv = []
Fv = []
# For each frequency bin, estimate the stats
t_init = time()
for t in time_bins:
covmats = covest.fit_transform(epochs_data[:, ::1, t:(t + window)])
p_test = PermutationDistance(1000, metric='riemann', mode='pairwise')
p, F = p_test.test(covmats, labels, verbose=False)
events,
event_id,
tmin,
tmax,
proj=True,
picks=picks,
baseline=None,
preload=True,
verbose=False)
labels = epochs.events[:, -1] - 2
# get epochs
epochs_data = epochs.get_data()
# compute covariance matrices
covmats = Covariances().fit_transform(epochs_data)
n_perms = 500
###############################################################################
# Pairwise distance based permutation test
###############################################################################
t_init = time()
p_test = PermutationDistance(n_perms, metric='riemann', mode='pairwise')
p, F = p_test.test(covmats, labels)
duration = time() - t_init
fig, axes = plt.subplots(1, 1, figsize=[6, 3], sharey=True)
p_test.plot(nbins=10, axes=axes)
plt.title('Pairwise distance - %.2f sec.' % duration)
print('p-value: %.3f' % p)
sns.despine()
events = find_events(raw, shortest_event=0, stim_channel='STI 014')
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=None, preload=True, add_eeg_ref=False, verbose=False)
labels = epochs.events[:, -1] - 2
# get epochs
epochs_data = epochs.get_data()
# compute covariance matrices
covmats = Covariances().fit_transform(epochs_data)
session = np.array([1, 2, 3]).repeat(15)
p_test = PermutationTestTwoWay(5000)
p, F = p_test.test(covmats, session, labels, ['session', 'handsVsFeets'])
p_test.plot()
print(p_test.summary())
plt.show()
import matplotlib.pyplot as plt
from collections import OrderedDict
from moabb.datasets.bnci import BNCI2014001
from moabb.datasets.alex_mi import AlexMI
from moabb.datasets.physionet_mi import PhysionetMI
datasets = [AlexMI(with_rest=True),
BNCI2014001(),
PhysionetMI(with_rest=True, feets=False)]
pipelines = OrderedDict()
pipelines['MDM'] = make_pipeline(Covariances('oas'), MDM())
pipelines['TS'] = make_pipeline(Covariances('oas'), TSclassifier())
pipelines['CSP+LDA'] = make_pipeline(Covariances('oas'), CSP(8), LDA())
context = MotorImageryMultiClasses(datasets=datasets, pipelines=pipelines)
results = context.evaluate(verbose=True)
for p in results.keys():
results[p].to_csv('../../results/MotorImagery/MultiClass/%s.csv' % p)
results = pd.concat(results.values())
print(results.groupby('Pipeline').mean())
res = results.pivot(values='Score', columns='Pipeline')
sns.lmplot(data=res, x='CSP+LDA', y='TS', fit_reg=False)
plt.xlim(0.25, 1)
plt.ylim(0.25, 1)
plt.plot([0.25, 1], [0.25, 1], ls='--', c='k')
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from pyriemann.spatialfilters import CSP
from pyriemann.estimation import Covariances
from sklearn.pipeline import make_pipeline
parameters = {'kernel': ('linear', 'rbf'), 'C': [0.1, 1, 10]}
clf = GridSearchCV(SVC(), parameters, cv=3)
pipe = make_pipeline(Covariances('oas'), CSP(6), clf)
# this is what will be loaded
PIPELINE = {'name': 'CSP + optSVM',
'paradigms': ['LeftRightImagery'],
'pipeline': pipe}
# ----------------
#
# Pipelines must be a dict of sklearn pipeline transformer.
#
# The csp implementation from MNE is used. We selected 8 CSP components, as
# usually done in the litterature.
#
# The riemannian geometry pipeline consists in covariance estimation, tangent
# space mapping and finaly a logistic regression for the classification.
pipelines = {}
pipelines['CSP + LDA'] = make_pipeline(CSP(n_components=8),
LDA())
pipelines['RG + LR'] = make_pipeline(Covariances(),
TangentSpace(),
LogisticRegression())
pipelines['CSP + LR'] = make_pipeline(CSP(n_components=8),
LogisticRegression())
pipelines['RG + LDA'] = make_pipeline(Covariances(),
TangentSpace(),
LDA())
##############################################################################
# Evaluation
# ----------
#
# We define the paradigm (LeftRightImagery) and the dataset (BNCI2014001).
# The evaluation will return a dataframe containing a single AUC score for
tmin,
tmax,
proj=True,
picks=picks,
baseline=None,
preload=True,
verbose=False)
labels = epochs.events[:, -1] - 2
# cross validation
cv = KFold(n_splits=10, random_state=42)
# get epochs
epochs_data_train = 1e6 * epochs.get_data()
# compute covariance matrices
cov_data_train = Covariances().transform(epochs_data_train)
###############################################################################
# Classification with Minimum distance to mean
mdm = MDM(metric=dict(mean='riemann', distance='riemann'))
# Use scikit-learn Pipeline with cross_val_score function
scores = cross_val_score(mdm, cov_data_train, labels, cv=cv, n_jobs=1)
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("MDM Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
class_balance))
###############################################################################
# Classification with Tangent Space Logistic Regression
# cue onset.
tmin, tmax = 1., 2.
event_id = dict(hands=2, feet=3)
subjects = range(1,110)
# There is subject where MNE can read the file
subject_to_remove = [88,89,92,100]
for s in subject_to_remove:
if s in subjects:
subjects.remove(s)
runs = [6, 10, 14] # motor imagery: hands vs feet
classifiers = {
'mdm' : make_pipeline(Covariances(),MDM(metric='riemann')),
'fgmdm' : make_pipeline(Covariances(),FgMDM(metric='riemann')),
'tsLR' : make_pipeline(Covariances(),TangentSpace(),LogisticRegression()),
'csp' : make_pipeline(CSP(n_components=4, reg=None, log=True),LDA())
}
# cross validation
results = np.zeros((len(subjects),len(classifiers)))
for s,subject in enumerate(subjects):
print('Processing Subject %s' %(subject))
raw_files = [read_raw_edf(f, preload=True,verbose=False) for f in eegbci.load_data(subject, runs)]
raw = concatenate_raws(raw_files)
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
# ----------------
#
# Pipelines must be a dict of sklearn pipeline transformer.
#
# The csp implementation from MNE is used. We selected 8 CSP components, as
# usually done in the litterature.
#
# The riemannian geometry pipeline consists in covariance estimation, tangent
# space mapping and finaly a logistic regression for the classification.
pipelines = {}
pipelines['CSP + LDA'] = make_pipeline(CSP(n_components=8),
LDA())
pipelines['RG + LR'] = make_pipeline(Covariances(),
TangentSpace(),
LogisticRegression(solver='lbfgs'))
##############################################################################
# Evaluation
# ----------
#
# We define the paradigm (LeftRightImagery) and the dataset (BNCI2014001).
# The evaluation will return a dataframe containing a single AUC score for
# each subject / session of the dataset, and for each pipeline.
#
# Results are saved into the database, so that if you add a new pipeline, it
# will not run again the evaluation unless a parameter has changed. Results can
# be overwrited if necessary.
paradigm = LeftRightImagery()