How to use braindecode - 10 common examples

To help you get started, we’ve selected a few braindecode examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github TNTLFreiburg / braindecode / braindecode / models / eegnet.py View on Github external
model.add_module(
            "conv_3",
            nn.Conv2d(
                n_filters_2,
                n_filters_3,
                self.third_kernel_size,
                stride=1,
                padding=(self.third_kernel_size[0] // 2, 0),
                bias=True,
            ),
        )
        model.add_module(
            "bnorm_3",
            nn.BatchNorm2d(n_filters_3, momentum=0.01, affine=True, eps=1e-3),
        )
        model.add_module("elu_3", Expression(elu))
        model.add_module(
            "pool_3", pool_class(kernel_size=(2, 4), stride=(2, 4))
        )
        model.add_module("drop_3", nn.Dropout(p=self.drop_prob))

        out = model(
            np_to_var(
                np.ones(
                    (1, self.in_chans, self.input_time_length, 1),
                    dtype=np.float32,
                )
            )
        )
        n_out_virtual_chans = out.cpu().data.numpy().shape[2]

        if self.final_conv_length == "auto":
github TNTLFreiburg / braindecode / test / acceptance_tests / from_notebooks / test_cropped_decoding.py View on Github external
eog=False,
                                      exclude='bads')

    # Extract trials, only using EEG channels
    epoched = mne.Epochs(raw, events, dict(hands=2, feet=3), tmin=1, tmax=4.1,
                         proj=False, picks=eeg_channel_inds,
                         baseline=None, preload=True)
    import numpy as np
    from braindecode.datautil.signal_target import SignalAndTarget
    # Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    train_set = SignalAndTarget(X[:60], y=y[:60])
    test_set = SignalAndTarget(X[60:], y=y[60:])
    from braindecode.models.shallow_fbcsp import ShallowFBCSPNet
    from torch import nn
    from braindecode.torch_ext.util import set_random_seeds
    from braindecode.models.util import to_dense_prediction_model

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_time_length = 450
    n_classes = 2
    in_chans = train_set.X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(in_chans=in_chans, n_classes=n_classes,
github TNTLFreiburg / braindecode / test / unit_tests / datautil / test_trial_segment.py View on Github external
def check_signal_target_from_start_and_ival(data, events, fs, name_to_codes,
                                           epoch_ival_ms, expected_X, expected_y):
    data = np.array(data)
    events = np.array(events)
    name_to_codes = OrderedDict(name_to_codes)
    out_set = _create_signal_target_from_start_and_ival(
        data, events, fs, name_to_codes, epoch_ival_ms,
        one_hot_labels=False, one_label_per_trial=True)
    np.testing.assert_array_equal(out_set.y, expected_y)
    np.testing.assert_allclose(out_set.X, expected_X)
github TNTLFreiburg / braindecode / test / unit_tests / datautil / test_trial_segment.py View on Github external
events,
        fs,
        break_start_code,
        break_stop_code,
        name_to_start_codes,
        name_to_stop_codes,
        min_break_length_ms,
        max_break_length_ms,
        break_start_offset_ms,
        break_stop_offset_ms,
        expected_events,
        ):
    events = np.array(events)
    name_to_start_codes = OrderedDict(name_to_start_codes)
    name_to_stop_codes = OrderedDict(name_to_stop_codes)
    events_with_breaks = add_breaks(
        events, fs, break_start_code, break_stop_code, name_to_start_codes,
        name_to_stop_codes, min_break_length_ms=min_break_length_ms,
        max_break_length_ms=max_break_length_ms,
        break_start_offset_ms=break_start_offset_ms, break_stop_offset_ms=break_stop_offset_ms)
    np.testing.assert_array_equal(events_with_breaks,
                                 expected_events)
github TNTLFreiburg / braindecode / test / acceptance_tests / from_notebooks / test_cropped_decoding.py View on Github external
from braindecode.models.shallow_fbcsp import ShallowFBCSPNet
    from torch import nn
    from braindecode.torch_ext.util import set_random_seeds
    from braindecode.models.util import to_dense_prediction_model

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_time_length = 450
    n_classes = 2
    in_chans = train_set.X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(in_chans=in_chans, n_classes=n_classes,
                            input_time_length=input_time_length,
                            final_conv_length=12).create_network()
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    from torch import optim

    optimizer = optim.Adam(model.parameters())
    from braindecode.torch_ext.util import np_to_var
    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
github TNTLFreiburg / braindecode / test / acceptance_tests / from_notebooks / test_cropped_decoding.py View on Github external
from braindecode.models.util import to_dense_prediction_model

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_time_length = 450
    n_classes = 2
    in_chans = train_set.X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(in_chans=in_chans, n_classes=n_classes,
                            input_time_length=input_time_length,
                            final_conv_length=12).create_network()
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    from torch import optim

    optimizer = optim.Adam(model.parameters())
    from braindecode.torch_ext.util import np_to_var
    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    print("{:d} predictions per input/trial".format(n_preds_per_input))
github TNTLFreiburg / braindecode / test / unit_tests / datautil / test_trial_segment.py View on Github external
def check_cnt_y_start_stop_samples(n_samples, events, fs, epoch_ival_ms,
                                   name_to_start_codes,
                                   name_to_stop_codes, cnt_y, start_stop):

    cnt_y = np.array(cnt_y).T
    real_cnt_y, real_start_stop = _create_cnt_y_and_trial_bounds_from_start_stop(
        n_samples, events ,fs, name_to_start_codes, epoch_ival_ms,
        name_to_stop_codes)
    np.testing.assert_array_equal(cnt_y, real_cnt_y)
    np.testing.assert_array_equal(start_stop, real_start_stop)
github TNTLFreiburg / braindecode / test / acceptance_tests / from_notebooks / test_cropped_decoding.py View on Github external
# Convert data from volt to millivolt
    # Pytorch expects float32 for input and int64 for labels.
    X = (epoched.get_data() * 1e6).astype(np.float32)
    y = (epoched.events[:, 2] - 2).astype(np.int64)  # 2,3 -> 0,1

    train_set = SignalAndTarget(X[:60], y=y[:60])
    test_set = SignalAndTarget(X[60:], y=y[60:])
    from braindecode.models.shallow_fbcsp import ShallowFBCSPNet
    from torch import nn
    from braindecode.torch_ext.util import set_random_seeds
    from braindecode.models.util import to_dense_prediction_model

    # Set if you want to use GPU
    # You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
    cuda = False
    set_random_seeds(seed=20170629, cuda=cuda)

    # This will determine how many crops are processed in parallel
    input_time_length = 450
    n_classes = 2
    in_chans = train_set.X.shape[1]
    # final_conv_length determines the size of the receptive field of the ConvNet
    model = ShallowFBCSPNet(in_chans=in_chans, n_classes=n_classes,
                            input_time_length=input_time_length,
                            final_conv_length=12).create_network()
    to_dense_prediction_model(model)

    if cuda:
        model.cuda()

    from torch import optim
github TNTLFreiburg / braindecode / test / acceptance_tests / from_notebooks / test_cropped_decoding.py View on Github external
model.cuda()

    from torch import optim

    optimizer = optim.Adam(model.parameters())
    from braindecode.torch_ext.util import np_to_var
    # determine output size
    test_input = np_to_var(
        np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
    if cuda:
        test_input = test_input.cuda()
    out = model(test_input)
    n_preds_per_input = out.cpu().data.numpy().shape[2]
    print("{:d} predictions per input/trial".format(n_preds_per_input))
    from braindecode.datautil.iterators import CropsFromTrialsIterator
    iterator = CropsFromTrialsIterator(batch_size=32,
                                       input_time_length=input_time_length,
                                       n_preds_per_input=n_preds_per_input)
    from braindecode.torch_ext.util import np_to_var, var_to_np
    import torch.nn.functional as F
    from numpy.random import RandomState
    import torch as th
    from braindecode.experiments.monitors import compute_preds_per_trial_from_crops
    rng = RandomState((2017, 6, 30))
    losses = []
    accuracies = []
    for i_epoch in range(4):
        # Set model to training mode
        model.train()
        for batch_X, batch_y in iterator.get_batches(train_set, shuffle=False):
            net_in = np_to_var(batch_X)
            if cuda:
github TNTLFreiburg / braindecode / examples / bcic_iv_2a.py View on Github external
)
    assert len(train_cnt.ch_names) == 22
    # lets convert to millvolt for numerical stability of next operations
    train_cnt = mne_apply(lambda a: a * 1e6, train_cnt)
    train_cnt = mne_apply(
        lambda a: bandpass_cnt(
            a,
            low_cut_hz,
            high_cut_hz,
            train_cnt.info["sfreq"],
            filt_order=3,
            axis=1,
        ),
        train_cnt,
    )
    train_cnt = mne_apply(
        lambda a: exponential_running_standardize(
            a.T,
            factor_new=factor_new,
            init_block_size=init_block_size,
            eps=1e-4,
        ).T,
        train_cnt,
    )

    test_cnt = test_cnt.drop_channels(["EOG-left", "EOG-central", "EOG-right"])
    assert len(test_cnt.ch_names) == 22
    test_cnt = mne_apply(lambda a: a * 1e6, test_cnt)
    test_cnt = mne_apply(
        lambda a: bandpass_cnt(
            a,
            low_cut_hz,