How to use Poutyne - 10 common examples

To help you get started, we’ve selected a few Poutyne examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github GRAAL-Research / poutyne / poutyne / framework / callbacks / policies.py View on Github external
"""
    steps_annealing = int(steps * finetune_fraction)
    steps_up = (steps - steps_annealing) // 2
    steps_down = steps - steps_annealing - steps_up
    return [
        Phase(
            lr=linspace(lr[0], lr[1], steps_up),
            momentum=linspace(momentum[0], momentum[1], steps_up),
        ),
        Phase(
            lr=linspace(lr[1], lr[0], steps_down),
            momentum=linspace(momentum[1], momentum[0], steps_down),
        ),
        Phase(
            lr=linspace(lr[0], finetune_lr, steps_annealing),
            momentum=linspace(momentum[0], momentum[0], steps_annealing),
        ),
github GRAAL-Research / poutyne / poutyne / framework / callbacks / policies.py View on Github external
`_
    """
    steps_annealing = int(steps * finetune_fraction)
    steps_up = (steps - steps_annealing) // 2
    steps_down = steps - steps_annealing - steps_up
    return [
        Phase(
            lr=linspace(lr[0], lr[1], steps_up),
            momentum=linspace(momentum[0], momentum[1], steps_up),
        ),
        Phase(
            lr=linspace(lr[1], lr[0], steps_down),
            momentum=linspace(momentum[1], momentum[0], steps_down),
        ),
        Phase(
            lr=linspace(lr[0], finetune_lr, steps_annealing),
            momentum=linspace(momentum[0], momentum[0], steps_annealing),
        ),
github GRAAL-Research / poutyne / poutyne / framework / callbacks / policies.py View on Github external
finetune_fraction (float): fraction of steps used for the fine tuning.
            Must be between 0 and 1.

    Returns:
        A list of configured :class:`~poutyne.framework.callbacks.policies.Phase` instances.

    References:
        `Super-Convergence: Very Fast Training of Neural Networks Using Large Learning Rates
        `_
    """
    steps_annealing = int(steps * finetune_fraction)
    steps_up = (steps - steps_annealing) // 2
    steps_down = steps - steps_annealing - steps_up
    return [
        Phase(
            lr=linspace(lr[0], lr[1], steps_up),
            momentum=linspace(momentum[0], momentum[1], steps_up),
        ),
        Phase(
            lr=linspace(lr[1], lr[0], steps_down),
            momentum=linspace(momentum[1], momentum[0], steps_down),
        ),
        Phase(
            lr=linspace(lr[0], finetune_lr, steps_annealing),
            momentum=linspace(momentum[0], momentum[0], steps_annealing),
        ),
github GRAAL-Research / poutyne / poutyne / framework / callbacks / policies.py View on Github external
References:
        `Super-Convergence: Very Fast Training of Neural Networks Using Large Learning Rates
        `_
    """
    steps_annealing = int(steps * finetune_fraction)
    steps_up = (steps - steps_annealing) // 2
    steps_down = steps - steps_annealing - steps_up
    return [
        Phase(
            lr=linspace(lr[0], lr[1], steps_up),
            momentum=linspace(momentum[0], momentum[1], steps_up),
        ),
        Phase(
            lr=linspace(lr[1], lr[0], steps_down),
            momentum=linspace(momentum[1], momentum[0], steps_down),
        ),
        Phase(
            lr=linspace(lr[0], finetune_lr, steps_annealing),
            momentum=linspace(momentum[0], momentum[0], steps_annealing),
        ),
github GRAAL-Research / poutyne / poutyne / framework / callbacks / policies.py View on Github external
`min(lr)`.
        finetune_fraction (float): fraction of steps used for the fine tuning.
            Must be between 0 and 1.

    Returns:
        A list of configured :class:`~poutyne.framework.callbacks.policies.Phase` instances.

    References:
        `Super-Convergence: Very Fast Training of Neural Networks Using Large Learning Rates
        `_
    """
    steps_annealing = int(steps * finetune_fraction)
    steps_up = (steps - steps_annealing) // 2
    steps_down = steps - steps_annealing - steps_up
    return [
        Phase(
            lr=linspace(lr[0], lr[1], steps_up),
            momentum=linspace(momentum[0], momentum[1], steps_up),
        ),
        Phase(
            lr=linspace(lr[1], lr[0], steps_down),
            momentum=linspace(momentum[1], momentum[0], steps_down),
        ),
        Phase(
            lr=linspace(lr[0], finetune_lr, steps_annealing),
            momentum=linspace(momentum[0], momentum[0], steps_annealing),
        ),
github GRAAL-Research / poutyne / tests / framework / callbacks / test_lr_scheduler_checkpoint.py View on Github external
def _test_checkpointer(self, checkpointer, lr_scheduler):
        scheduler_states = {}
        generator = some_data_generator(OptimizerCheckpointTest.batch_size)

        checkpointer.set_params({'epochs': OptimizerCheckpointTest.epochs, 'steps': 1})
        checkpointer.set_model(self.model)
        checkpointer.on_train_begin({})
        for epoch in range(1, OptimizerCheckpointTest.epochs + 1):
            checkpointer.on_epoch_begin(epoch, {})
            checkpointer.on_batch_begin(1, {})
            loss = self._update_model(generator)
            checkpointer.on_batch_end(1, {'batch': 1, 'size': OptimizerCheckpointTest.batch_size, 'loss': loss})
            checkpointer.on_epoch_end(epoch, {'epoch': epoch, 'loss': loss, 'val_loss': 1})
            filename = self.checkpoint_filename.format(epoch=epoch)
            self.assertTrue(os.path.isfile(filename))
            scheduler_states[epoch] = torch_to_numpy(lr_scheduler.scheduler.state_dict(), copy=True)
        checkpointer.on_train_end({})

        self._test_checkpoint(scheduler_states, lr_scheduler)
github GRAAL-Research / poutyne / tests / framework / callbacks / test_lr_scheduler_checkpoint.py View on Github external
def _test_checkpoint(self, scheduler_states, lr_scheduler):
        for epoch, epoch_scheduler_state in scheduler_states.items():
            filename = self.checkpoint_filename.format(epoch=epoch)
            lr_scheduler.load_state(filename)
            saved_scheduler_state = torch_to_numpy(lr_scheduler.scheduler.state_dict())

            self.assertEqual(epoch_scheduler_state, saved_scheduler_state)
github GRAAL-Research / poutyne / tests / framework / callbacks / test_best_model_restore.py View on Github external
best_epoch_weights = None
        checkpointer.set_params({'epochs': len(val_losses), 'steps': 1})
        checkpointer.set_model(self.model)
        checkpointer.on_train_begin({})
        for epoch, val_loss in enumerate(val_losses, 1):
            checkpointer.on_epoch_begin(epoch, {})
            checkpointer.on_batch_begin(1, {})
            loss = self._update_model(generator)
            checkpointer.on_batch_end(1, {'batch': 1, 'size': BestModelRestoreTest.batch_size, 'loss': loss})
            checkpointer.on_epoch_end(epoch, {'epoch': epoch, 'loss': loss, 'val_loss': val_loss})
            if epoch == best_epoch:
                best_epoch_weights = torch_to_numpy(self.model.get_weight_copies())
        checkpointer.on_train_end({})

        final_weights = torch_to_numpy(self.model.get_weight_copies())
        self.assertEqual(best_epoch_weights, final_weights)
github GRAAL-Research / poutyne / tests / framework / callbacks / test_best_model_restore.py View on Github external
def _test_restore_with_val_losses(self, checkpointer, val_losses, best_epoch):
        generator = some_data_generator(BestModelRestoreTest.batch_size)

        best_epoch_weights = None
        checkpointer.set_params({'epochs': len(val_losses), 'steps': 1})
        checkpointer.set_model(self.model)
        checkpointer.on_train_begin({})
        for epoch, val_loss in enumerate(val_losses, 1):
            checkpointer.on_epoch_begin(epoch, {})
            checkpointer.on_batch_begin(1, {})
            loss = self._update_model(generator)
            checkpointer.on_batch_end(1, {'batch': 1, 'size': BestModelRestoreTest.batch_size, 'loss': loss})
            checkpointer.on_epoch_end(epoch, {'epoch': epoch, 'loss': loss, 'val_loss': val_loss})
            if epoch == best_epoch:
                best_epoch_weights = torch_to_numpy(self.model.get_weight_copies())
        checkpointer.on_train_end({})

        final_weights = torch_to_numpy(self.model.get_weight_copies())
        self.assertEqual(best_epoch_weights, final_weights)
github GRAAL-Research / poutyne / tests / test_utils.py View on Github external
def test_tuple_2(self):
        """
        Test the concatenation of a [([], ([], []))]
        """
        obj = [(np.arange(5), (np.ones(5) * 2, np.ones(5) * 3))] * 5
        concat = _concat(obj)
        self.assertEqual(concat[0].shape, (25, ))
        self.assertEqual(concat[1][0].shape, (25, ))
        self.assertEqual(concat[1][1].shape, (25, ))
        for i in range(5):
            for j in range(5):
                self.assertTrue(concat[0][i * 5 + j] == j)
        self.assertTrue((concat[1][0] == 2).all())
        self.assertTrue((concat[1][1] == 3).all())