How to use the baal.utils.cuda_utils.to_cuda function in baal

To help you get started, we’ve selected a few baal examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ElementAI / baal / src / baal / modelwrapper.py View on Github external
Args:
            data (Tensor): the model input
            iterations (int): number of prediction to perform.
            cuda (bool): use cuda or not

        Returns:
            Tensor, the loss computed from the criterion.
                    shape = {batch_size, nclass, n_iteration}

        Raises:
            raises RuntimeError if CUDA rans out of memory during data replication.
        """
        with torch.no_grad():
            if cuda:
                data = to_cuda(data)
            if self.replicate_in_memory:
                input_shape = data.size()
                batch_size = input_shape[0]
                try:
                    data = torch.stack([data] * iterations)
                except RuntimeError as e:
                    raise RuntimeError(
                        '''CUDA ran out of memory while BaaL tried to replicate data. See the exception above.
                    Use `replicate_in_memory=False` in order to reduce the memory requirements.
                    Note that there will be some speed trade-offs''') from e
                data = data.view(batch_size * iterations, *input_shape[1:])
                try:
                    out = self.model(data)
                except RuntimeError as e:
                    raise RuntimeError(
                        '''CUDA ran out of memory while BaaL tried to replicate data. See the exception above.
github ElementAI / baal / src / baal / modelwrapper.py View on Github external
def train_on_batch(self, data, target, optimizer, cuda=False):
        """
        Train the current model on a batch using `optimizer`.

        Args:
            data (Tensor): the model input
            target (Tensor): the ground truth
            optimizer (optim.Optimizer): an optimizer
            cuda (bool): use cuda or not

        Returns:
            Tensor, the loss computed from the criterion.
        """

        if cuda:
            data, target = to_cuda(data), to_cuda(target)
        optimizer.zero_grad()
        output = self.model(data)
        loss = self.criterion(output, target)
        loss.backward()
        optimizer.step()
        self._update_metrics(output, target, loss, filter='train')
        return loss
github ElementAI / baal / src / baal / modelwrapper.py View on Github external
"""
        Test the current model on a batch.

        Args:
            data (Tensor): the model input
            target (Tensor): the ground truth
            cuda (bool): use cuda or not
            average_predictions (int): The number of predictions to average to
                compute the test loss.

        Returns:
            Tensor, the loss computed from the criterion.
        """
        with torch.no_grad():
            if cuda:
                data, target = to_cuda(data), to_cuda(target)
            if average_predictions == 1:
                preds = self.model(data)
                loss = self.criterion(preds, target)
            elif average_predictions > 1:
                preds = map_on_tensor(lambda p: p.mean(-1),
                                      self.predict_on_batch(data,
                                                            iterations=average_predictions,
                                                            cuda=cuda))
                loss = self.criterion(preds, target)
            self._update_metrics(preds, target, loss, 'test')
            return loss