How to use skorch - 10 common examples

To help you get started, we’ve selected a few skorch examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github spring-epfl / mia / tests / test_estimators.py View on Github external
def torch_attack_model_fn():
    model = skorch.NeuralNetClassifier(
        module=AttackNet, max_epochs=5, criterion=nn.BCELoss, train_split=None
    )
    return model
github skorch-dev / skorch / skorch / callbacks / training.py View on Github external
--------

    Use ``Initializer`` to initialize all dense layer weights with
    values sampled from an uniform distribution on the beginning of
    the first epoch:

    >>> init_fn = partial(torch.nn.init.uniform_, a=-1e-3, b=1e-3)
    >>> cb = Initializer('dense*.weight', fn=init_fn)
    >>> net = Net(myModule, callbacks=[cb])
    """
    def __init__(self, *args, **kwargs):
        kwargs['at'] = kwargs.get('at', 1)
        super().__init__(*args, **kwargs)


class LoadInitState(Callback):
    """Loads the model, optimizer, and history from a checkpoint into a
    :class:`.NeuralNet` when training begins.

    Examples
    --------

    Consider running the following example multiple times:

    >>> cp = Checkpoint(monitor='valid_loss_best')
    >>> load_state = LoadInitState(cp)
    >>> net = NeuralNet(..., callbacks=[cp, load_state])
    >>> net.fit(X, y)

    On the first run, the :class:`.Checkpoint` saves the model, optimizer, and
    history when the validation loss is minimized. During the first run,
    there are no files on disk, thus :class:`.LoadInitState` will
github skorch-dev / skorch / skorch / callbacks / logging.py View on Github external
data = net.history[-1]
        verbose = net.verbose
        tabulated = self.table(data)

        if self.first_iteration_:
            header, lines = tabulated.split('\n', 2)[:2]
            self._sink(header, verbose)
            self._sink(lines, verbose)
            self.first_iteration_ = False

        self._sink(tabulated.rsplit('\n', 1)[-1], verbose)
        if self.sink is print:
            sys.stdout.flush()


class ProgressBar(Callback):
    """Display a progress bar for each epoch.

    The progress bar includes elapsed and estimated remaining time for
    the current epoch, the number of batches processed, and other
    user-defined metrics. The progress bar is erased once the epoch is
    completed.

    ``ProgressBar`` needs to know the total number of batches per
    epoch in order to display a meaningful progress bar. By default,
    this number is determined automatically using the dataset length
    and the batch size. If this heuristic does not work for some
    reason, you may either specify the number of batches explicitly
    or let the ``ProgressBar`` count the actual number of batches in
    the previous epoch.

    For jupyter notebooks a non-ASCII progress bar can be printed
github probml / pyprobml / scripts / mnist_skorch.py View on Github external
dropout=0.5,
    ):
        super(ClassifierModule, self).__init__()
        self.dropout = nn.Dropout(dropout)

        self.hidden = nn.Linear(input_dim, hidden_dim)
        self.output = nn.Linear(hidden_dim, output_dim)

    def forward(self, X, **kwargs):
        X = F.relu(self.hidden(X))
        X = self.dropout(X)
        X = F.softmax(self.output(X), dim=-1)
        return X
      
      
net = NeuralNetClassifier(
    ClassifierModule,
    max_epochs=10,
    lr=0.1,
    device=device,
)

time_start = time()
net.fit(X_train, y_train);
print('time spent training {}'.format(time() - time_start))
github fancompute / wavetorch / study / vowel_train_sklearn.py View on Github external
N_classes, cfg['geom']['px'], cfg['geom']['py'], cfg['geom']['pd'], 
                    cfg['geom']['Nx'], cfg['geom']['Ny'], cfg['geom']['pml']['N']
                    )
source = wavetorch.utils.setup_src_coords(
                    cfg['geom']['src_x'], cfg['geom']['src_y'], cfg['geom']['Nx'],
                    cfg['geom']['Ny'], cfg['geom']['pml']['N']
                    )

design_region = torch.zeros(cfg['geom']['Nx'], cfg['geom']['Ny'], dtype=torch.uint8)
design_region[source[0].x.item()+5:probes[0].x.item()-5] = 1

def my_train_split(ds, y):
    return ds, skorch.dataset.Dataset(corpus.valid[:200], y=None)

### Perform training
net = skorch.NeuralNetClassifier(
    module=wavetorch.WaveCell,

    # Training configuration
    max_epochs=cfg['training']['N_epochs'],
    batch_size=cfg['training']['batch_size'],
    lr=cfg['training']['lr'],
    # train_split=skorch.dataset.CVSplit(cfg['training']['N_folds'], stratified=True, random_state=cfg['seed']),
    optimizer=torch.optim.Adam,
    criterion=torch.nn.CrossEntropyLoss,
    callbacks=[
        ClipDesignRegion,
        skorch.callbacks.EpochScoring('accuracy', lower_is_better=False, on_train=True, name='train_acc'),
        skorch.callbacks.Checkpoint(monitor=None, fn_prefix='1234_', dirname='test', f_params="params_{last_epoch[epoch]}.pt", f_optimizer='optimizer.pt', f_history='history.json')
        ],
    callbacks__print_log__keys_ignored=None,
    train_split=None,
github probml / pyprobml / scripts / skorch_demo.py View on Github external
self.dense0 = nn.Linear(20, num_units)
        self.nonlin = nonlin
        self.dropout = nn.Dropout(0.5)
        self.dense1 = nn.Linear(num_units, 10)
        self.output = nn.Linear(10, 2)

    def forward(self, X, **kwargs):
        X = self.nonlin(self.dense0(X))
        X = self.dropout(X)
        X = F.relu(self.dense1(X))
        X = F.softmax(self.output(X), dim=-1)
        return X


net = NeuralNetClassifier(
    MyModule,
    max_epochs=3,
    lr=0.1,
)

net.fit(X, y)
y_proba = net.predict_proba(X) # (1000, 2)
github skorch-dev / skorch / examples / benchmarks / mnist.py View on Github external
def performance_skorch(
        X_train,
        X_test,
        y_train,
        y_test,
        batch_size,
        device,
        lr,
        max_epochs,
):
    torch.manual_seed(0)
    net = NeuralNetClassifier(
        ClassifierModule,
        batch_size=batch_size,
        optimizer=torch.optim.Adadelta,
        lr=lr,
        device=device,
        max_epochs=max_epochs,
        callbacks=[
            ('tr_acc', EpochScoring(
                'accuracy',
                lower_is_better=False,
                on_train=True,
                name='train_acc',
            )),
        ],
    )
    net.fit(X_train, y_train)
github fancompute / wavetorch / study / vowel_train_sklearn.py View on Github external
### Perform training
net = skorch.NeuralNetClassifier(
    module=wavetorch.WaveCell,

    # Training configuration
    max_epochs=cfg['training']['N_epochs'],
    batch_size=cfg['training']['batch_size'],
    lr=cfg['training']['lr'],
    # train_split=skorch.dataset.CVSplit(cfg['training']['N_folds'], stratified=True, random_state=cfg['seed']),
    optimizer=torch.optim.Adam,
    criterion=torch.nn.CrossEntropyLoss,
    callbacks=[
        ClipDesignRegion,
        skorch.callbacks.EpochScoring('accuracy', lower_is_better=False, on_train=True, name='train_acc'),
        skorch.callbacks.Checkpoint(monitor=None, fn_prefix='1234_', dirname='test', f_params="params_{last_epoch[epoch]}.pt", f_optimizer='optimizer.pt', f_history='history.json')
        ],
    callbacks__print_log__keys_ignored=None,
    train_split=None,

    # These al get passed as options to WaveCell
    module__Nx=cfg['geom']['Nx'],
    module__Ny=cfg['geom']['Ny'],
    module__h=cfg['geom']['h'],
    module__dt=cfg['geom']['dt'],
    module__init=cfg['geom']['init'], 
    module__c0=cfg['geom']['c0'], 
    module__c1=cfg['geom']['c1'], 
    module__sigma=cfg['geom']['pml']['max'], 
    module__N=cfg['geom']['pml']['N'], 
    module__p=cfg['geom']['pml']['p'],
    module__design_region=design_region,
github skorch-dev / skorch / examples / word_language_model / train.py View on Github external
help='Limit the input data to length N.')
parser.add_argument('--seed', type=int, default=1111,
                    help='random seed')
parser.add_argument('--no-cuda', dest='cuda', action='store_false',
                    help='use CUDA')
parser.add_argument('--save', type=str,  default='model.pt',
                    help='path to save the final model')
args = parser.parse_args()

torch.manual_seed(args.seed)

corpus = data.Corpus(args.data)
ntokens = len(corpus.dictionary)
device = 'cuda' if args.cuda else 'cpu'

class LRAnnealing(skorch.callbacks.Callback):
    def on_epoch_end(self, net, **kwargs):
        if not net.history[-1]['valid_loss_best']:
            net.lr /= 4.0

class ExamplePrinter(skorch.callbacks.Callback):
    def on_epoch_end(self, net, **kwargs):
        seed_sentence = "the meaning of"
        indices = [corpus.dictionary.word2idx[n] for n in seed_sentence.split()]
        indices = skorch.utils.to_tensor(
            torch.LongTensor([indices]).t(), device=device)
        sentence, _ = net.sample_n(num_words=10, input=indices)
        print(seed_sentence,
              " ".join([corpus.dictionary.idx2word[n] for n in sentence]))


def my_train_split(ds, y):
github dmlc / dgl / _backup / topdown / topdown.py View on Github external
for reg_coef in [0, 100, 1e-2, 0.1, 1, 1e-3]:
        print('Trying reg coef', reg_coef)
        net = Net(
                module=DFSGlimpseSingleObjectClassifier,
                criterion=None,
                max_epochs=50,
                reg_coef=reg_coef,
                optimizer=T.optim.RMSprop,
                #optimizer__weight_decay=1e-4,
                lr=1e-5,
                batch_size=batch_size,
                device='cuda' if USE_CUDA else 'cpu',
                callbacks=[
                    Dump(),
                    skorch.callbacks.Checkpoint(monitor='acc_best'),
                    skorch.callbacks.ProgressBar(postfix_keys=['train_loss', 'valid_loss', 'acc', 'reg']),
                    skorch.callbacks.GradientNormClipping(0.01),
                    #skorch.callbacks.LRScheduler('ReduceLROnPlateau'),
                    ],
                iterator_train=data_generator,
                iterator_train__shuffle=True,
                iterator_valid=data_generator,
                iterator_valid__shuffle=False,
                )

        #net.fit((mnist_train, mnist_valid), pretrain=True, epochs=50)
        net.partial_fit((mnist_train, mnist_valid), pretrain=False, epochs=500)