How to use baal - 10 common examples

To help you get started, we’ve selected a few baal examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ElementAI / baal / src / baal / modelwrapper.py View on Github external
target (Tensor): the ground truth
            cuda (bool): use cuda or not
            average_predictions (int): The number of predictions to average to
                compute the test loss.

        Returns:
            Tensor, the loss computed from the criterion.
        """
        with torch.no_grad():
            if cuda:
                data, target = to_cuda(data), to_cuda(target)
            if average_predictions == 1:
                preds = self.model(data)
                loss = self.criterion(preds, target)
            elif average_predictions > 1:
                preds = map_on_tensor(lambda p: p.mean(-1),
                                      self.predict_on_batch(data,
                                                            iterations=average_predictions,
                                                            cuda=cuda))
                loss = self.criterion(preds, target)
            self._update_metrics(preds, target, loss, 'test')
            return loss
github ElementAI / baal / src / baal / modelwrapper.py View on Github external
self.eval()
        if len(dataset) == 0:
            return None

        log.info("Start Predict", dataset=len(dataset))
        collate_fn = collate_fn or default_collate
        loader = DataLoader(dataset,
                            batch_size,
                            False, num_workers=workers,
                            collate_fn=collate_fn)
        for idx, (data, _) in enumerate(tqdm(loader, total=len(loader), file=sys.stdout)):

            pred = self.predict_on_batch(data, iterations, use_cuda)
            pred = map_on_tensor(lambda x: x.detach(), pred)
            if half:
                pred = map_on_tensor(lambda x: x.half(), pred)
            yield map_on_tensor(lambda x: x.cpu().numpy(), pred)
github ElementAI / baal / src / baal / modelwrapper.py View on Github external
if len(dataset) == 0:
            return None

        log.info("Start Predict", dataset=len(dataset))
        collate_fn = collate_fn or default_collate
        loader = DataLoader(dataset,
                            batch_size,
                            False, num_workers=workers,
                            collate_fn=collate_fn)
        for idx, (data, _) in enumerate(tqdm(loader, total=len(loader), file=sys.stdout)):

            pred = self.predict_on_batch(data, iterations, use_cuda)
            pred = map_on_tensor(lambda x: x.detach(), pred)
            if half:
                pred = map_on_tensor(lambda x: x.half(), pred)
            yield map_on_tensor(lambda x: x.cpu().numpy(), pred)
github ElementAI / baal / src / baal / modelwrapper.py View on Github external
data = torch.stack([data] * iterations)
                except RuntimeError as e:
                    raise RuntimeError(
                        '''CUDA ran out of memory while BaaL tried to replicate data. See the exception above.
                    Use `replicate_in_memory=False` in order to reduce the memory requirements.
                    Note that there will be some speed trade-offs''') from e
                data = data.view(batch_size * iterations, *input_shape[1:])
                try:
                    out = self.model(data)
                except RuntimeError as e:
                    raise RuntimeError(
                        '''CUDA ran out of memory while BaaL tried to replicate data. See the exception above.
                    Use `replicate_in_memory=False` in order to reduce the memory requirements.
                    Note that there will be some speed trade-offs''') from e
                out = map_on_tensor(lambda o: o.view([iterations, batch_size, *o.size()[1:]]), out)
                out = map_on_tensor(lambda o: o.permute(1, 2, *range(3, o.ndimension()), 0), out)
            else:
                out = [self.model(data) for _ in range(iterations)]
                if isinstance(out[0], Sequence):
                    out = [torch.stack(ts, dim=-1) for ts in zip(*out)]
                else:
                    out = torch.stack(out, dim=-1)
            return out
github ElementAI / baal / src / baal / modelwrapper.py View on Github external
try:
                    data = torch.stack([data] * iterations)
                except RuntimeError as e:
                    raise RuntimeError(
                        '''CUDA ran out of memory while BaaL tried to replicate data. See the exception above.
                    Use `replicate_in_memory=False` in order to reduce the memory requirements.
                    Note that there will be some speed trade-offs''') from e
                data = data.view(batch_size * iterations, *input_shape[1:])
                try:
                    out = self.model(data)
                except RuntimeError as e:
                    raise RuntimeError(
                        '''CUDA ran out of memory while BaaL tried to replicate data. See the exception above.
                    Use `replicate_in_memory=False` in order to reduce the memory requirements.
                    Note that there will be some speed trade-offs''') from e
                out = map_on_tensor(lambda o: o.view([iterations, batch_size, *o.size()[1:]]), out)
                out = map_on_tensor(lambda o: o.permute(1, 2, *range(3, o.ndimension()), 0), out)
            else:
                out = [self.model(data) for _ in range(iterations)]
                if isinstance(out[0], Sequence):
                    out = [torch.stack(ts, dim=-1) for ts in zip(*out)]
                else:
                    out = torch.stack(out, dim=-1)
            return out
github ElementAI / baal / src / baal / modelwrapper.py View on Github external
Generators [batch_size, n_classes, ..., n_iterations]
        """
        self.eval()
        if len(dataset) == 0:
            return None

        log.info("Start Predict", dataset=len(dataset))
        collate_fn = collate_fn or default_collate
        loader = DataLoader(dataset,
                            batch_size,
                            False, num_workers=workers,
                            collate_fn=collate_fn)
        for idx, (data, _) in enumerate(tqdm(loader, total=len(loader), file=sys.stdout)):

            pred = self.predict_on_batch(data, iterations, use_cuda)
            pred = map_on_tensor(lambda x: x.detach(), pred)
            if half:
                pred = map_on_tensor(lambda x: x.half(), pred)
            yield map_on_tensor(lambda x: x.cpu().numpy(), pred)
github ElementAI / baal / src / baal / modelwrapper.py View on Github external
Args:
            data (Tensor): the model input
            iterations (int): number of prediction to perform.
            cuda (bool): use cuda or not

        Returns:
            Tensor, the loss computed from the criterion.
                    shape = {batch_size, nclass, n_iteration}

        Raises:
            raises RuntimeError if CUDA rans out of memory during data replication.
        """
        with torch.no_grad():
            if cuda:
                data = to_cuda(data)
            if self.replicate_in_memory:
                input_shape = data.size()
                batch_size = input_shape[0]
                try:
                    data = torch.stack([data] * iterations)
                except RuntimeError as e:
                    raise RuntimeError(
                        '''CUDA ran out of memory while BaaL tried to replicate data. See the exception above.
                    Use `replicate_in_memory=False` in order to reduce the memory requirements.
                    Note that there will be some speed trade-offs''') from e
                data = data.view(batch_size * iterations, *input_shape[1:])
                try:
                    out = self.model(data)
                except RuntimeError as e:
                    raise RuntimeError(
                        '''CUDA ran out of memory while BaaL tried to replicate data. See the exception above.
github ElementAI / baal / src / baal / modelwrapper.py View on Github external
def train_on_batch(self, data, target, optimizer, cuda=False):
        """
        Train the current model on a batch using `optimizer`.

        Args:
            data (Tensor): the model input
            target (Tensor): the ground truth
            optimizer (optim.Optimizer): an optimizer
            cuda (bool): use cuda or not

        Returns:
            Tensor, the loss computed from the criterion.
        """

        if cuda:
            data, target = to_cuda(data), to_cuda(target)
        optimizer.zero_grad()
        output = self.model(data)
        loss = self.criterion(output, target)
        loss.backward()
        optimizer.step()
        self._update_metrics(output, target, loss, filter='train')
        return loss
github ElementAI / baal / src / baal / modelwrapper.py View on Github external
"""
        Test the current model on a batch.

        Args:
            data (Tensor): the model input
            target (Tensor): the ground truth
            cuda (bool): use cuda or not
            average_predictions (int): The number of predictions to average to
                compute the test loss.

        Returns:
            Tensor, the loss computed from the criterion.
        """
        with torch.no_grad():
            if cuda:
                data, target = to_cuda(data), to_cuda(target)
            if average_predictions == 1:
                preds = self.model(data)
                loss = self.criterion(preds, target)
            elif average_predictions > 1:
                preds = map_on_tensor(lambda p: p.mean(-1),
                                      self.predict_on_batch(data,
                                                            iterations=average_predictions,
                                                            cuda=cuda))
                loss = self.criterion(preds, target)
            self._update_metrics(preds, target, loss, 'test')
            return loss
github ElementAI / baal / experiments / vgg_mcdropout_cifar10.py View on Github external
transforms.ToTensor(),
         transforms.Normalize(3 * [0.5], 3 * [0.5]), ])
    test_transform = transforms.Compose(
        [
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize(3 * [0.5], 3 * [0.5]),
        ]
    )
    # Note: We use the test set here as an example. You should make your own validation set.
    train_ds = datasets.CIFAR10('.', train=True,
                                transform=transform, target_transform=None, download=True)
    test_set = datasets.CIFAR10('.', train=False,
                                transform=test_transform, target_transform=None, download=True)

    active_set = ActiveLearningDataset(train_ds, pool_specifics={'transform': test_transform})

    # We start labeling randomly.
    active_set.label_randomly(initial_pool)
    return active_set, test_set