How to use the baal.utils.iterutils.map_on_tensor function in baal

To help you get started, we’ve selected a few baal examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ElementAI / baal / src / baal / modelwrapper.py View on Github external
target (Tensor): the ground truth
            cuda (bool): use cuda or not
            average_predictions (int): The number of predictions to average to
                compute the test loss.

        Returns:
            Tensor, the loss computed from the criterion.
        """
        with torch.no_grad():
            if cuda:
                data, target = to_cuda(data), to_cuda(target)
            if average_predictions == 1:
                preds = self.model(data)
                loss = self.criterion(preds, target)
            elif average_predictions > 1:
                preds = map_on_tensor(lambda p: p.mean(-1),
                                      self.predict_on_batch(data,
                                                            iterations=average_predictions,
                                                            cuda=cuda))
                loss = self.criterion(preds, target)
            self._update_metrics(preds, target, loss, 'test')
            return loss
github ElementAI / baal / src / baal / modelwrapper.py View on Github external
self.eval()
        if len(dataset) == 0:
            return None

        log.info("Start Predict", dataset=len(dataset))
        collate_fn = collate_fn or default_collate
        loader = DataLoader(dataset,
                            batch_size,
                            False, num_workers=workers,
                            collate_fn=collate_fn)
        for idx, (data, _) in enumerate(tqdm(loader, total=len(loader), file=sys.stdout)):

            pred = self.predict_on_batch(data, iterations, use_cuda)
            pred = map_on_tensor(lambda x: x.detach(), pred)
            if half:
                pred = map_on_tensor(lambda x: x.half(), pred)
            yield map_on_tensor(lambda x: x.cpu().numpy(), pred)
github ElementAI / baal / src / baal / modelwrapper.py View on Github external
if len(dataset) == 0:
            return None

        log.info("Start Predict", dataset=len(dataset))
        collate_fn = collate_fn or default_collate
        loader = DataLoader(dataset,
                            batch_size,
                            False, num_workers=workers,
                            collate_fn=collate_fn)
        for idx, (data, _) in enumerate(tqdm(loader, total=len(loader), file=sys.stdout)):

            pred = self.predict_on_batch(data, iterations, use_cuda)
            pred = map_on_tensor(lambda x: x.detach(), pred)
            if half:
                pred = map_on_tensor(lambda x: x.half(), pred)
            yield map_on_tensor(lambda x: x.cpu().numpy(), pred)
github ElementAI / baal / src / baal / modelwrapper.py View on Github external
data = torch.stack([data] * iterations)
                except RuntimeError as e:
                    raise RuntimeError(
                        '''CUDA ran out of memory while BaaL tried to replicate data. See the exception above.
                    Use `replicate_in_memory=False` in order to reduce the memory requirements.
                    Note that there will be some speed trade-offs''') from e
                data = data.view(batch_size * iterations, *input_shape[1:])
                try:
                    out = self.model(data)
                except RuntimeError as e:
                    raise RuntimeError(
                        '''CUDA ran out of memory while BaaL tried to replicate data. See the exception above.
                    Use `replicate_in_memory=False` in order to reduce the memory requirements.
                    Note that there will be some speed trade-offs''') from e
                out = map_on_tensor(lambda o: o.view([iterations, batch_size, *o.size()[1:]]), out)
                out = map_on_tensor(lambda o: o.permute(1, 2, *range(3, o.ndimension()), 0), out)
            else:
                out = [self.model(data) for _ in range(iterations)]
                if isinstance(out[0], Sequence):
                    out = [torch.stack(ts, dim=-1) for ts in zip(*out)]
                else:
                    out = torch.stack(out, dim=-1)
            return out
github ElementAI / baal / src / baal / modelwrapper.py View on Github external
try:
                    data = torch.stack([data] * iterations)
                except RuntimeError as e:
                    raise RuntimeError(
                        '''CUDA ran out of memory while BaaL tried to replicate data. See the exception above.
                    Use `replicate_in_memory=False` in order to reduce the memory requirements.
                    Note that there will be some speed trade-offs''') from e
                data = data.view(batch_size * iterations, *input_shape[1:])
                try:
                    out = self.model(data)
                except RuntimeError as e:
                    raise RuntimeError(
                        '''CUDA ran out of memory while BaaL tried to replicate data. See the exception above.
                    Use `replicate_in_memory=False` in order to reduce the memory requirements.
                    Note that there will be some speed trade-offs''') from e
                out = map_on_tensor(lambda o: o.view([iterations, batch_size, *o.size()[1:]]), out)
                out = map_on_tensor(lambda o: o.permute(1, 2, *range(3, o.ndimension()), 0), out)
            else:
                out = [self.model(data) for _ in range(iterations)]
                if isinstance(out[0], Sequence):
                    out = [torch.stack(ts, dim=-1) for ts in zip(*out)]
                else:
                    out = torch.stack(out, dim=-1)
            return out
github ElementAI / baal / src / baal / modelwrapper.py View on Github external
Generators [batch_size, n_classes, ..., n_iterations]
        """
        self.eval()
        if len(dataset) == 0:
            return None

        log.info("Start Predict", dataset=len(dataset))
        collate_fn = collate_fn or default_collate
        loader = DataLoader(dataset,
                            batch_size,
                            False, num_workers=workers,
                            collate_fn=collate_fn)
        for idx, (data, _) in enumerate(tqdm(loader, total=len(loader), file=sys.stdout)):

            pred = self.predict_on_batch(data, iterations, use_cuda)
            pred = map_on_tensor(lambda x: x.detach(), pred)
            if half:
                pred = map_on_tensor(lambda x: x.half(), pred)
            yield map_on_tensor(lambda x: x.cpu().numpy(), pred)