How to use the torchgan.models.model.Generator function in torchgan

To help you get started, we’ve selected a few torchgan examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github torchgan / torchgan / torchgan / logging / visualize.py View on Github external
def __init__(
        self,
        trainer,
        visdom_port=8097,
        log_dir=None,
        writer=None,
        test_noise=None,
        nrow=8,
    ):
        super(ImageVisualize, self).__init__(
            [], visdom_port=visdom_port, log_dir=log_dir, writer=writer
        )
        self.test_noise = []
        for model in trainer.model_names:
            if isinstance(getattr(trainer, model), Generator):
                self.test_noise.append(
                    getattr(trainer, model).sampler(trainer.sample_size, trainer.device)
                    if test_noise is None
                    else test_noise
                )
        self.step = 1
        self.nrow = nrow
github torchgan / torchgan / torchgan / logging / visualize.py View on Github external
def __call__(self, trainer, **kwargs):
        pos = 0
        for model in trainer.model_names:
            if isinstance(getattr(trainer, model), Generator):
                generator = getattr(trainer, model)
                with torch.no_grad():
                    image = generator(*self.test_noise[pos])
                    image = torchvision.utils.make_grid(
                        image, nrow=self.nrow, normalize=True, range=(-1, 1)
                    )
                    super(ImageVisualize, self).__call__(
                        trainer, image, model, **kwargs
                    )
                self.step -= 1
                pos = pos + 1
        self.step += 1 if pos > 0 else 0
github torchgan / torchgan / torchgan / models / dcgan.py View on Github external
from math import ceil, log2

import torch.nn as nn
import torch.nn.functional as F

from .model import Discriminator, Generator

__all__ = ["DCGANGenerator", "DCGANDiscriminator"]


class DCGANGenerator(Generator):
    r"""Deep Convolutional GAN (DCGAN) generator from
    `"Unsupervised Representation Learning With Deep Convolutional Generative Aversarial Networks
    by Radford et. al. " `_ paper

    Args:
        encoding_dims (int, optional): Dimension of the encoding vector sampled from the noise prior.
        out_size (int, optional): Height and width of the input image to be generated. Must be at
            least 16 and should be an exact power of 2.
        out_channels (int, optional): Number of channels in the output Tensor.
        step_channels (int, optional): Number of channels in multiples of which the DCGAN steps up
            the convolutional features. The step up is done as dim :math:`z \rightarrow d \rightarrow
            2 \times d \rightarrow 4 \times d \rightarrow 8 \times d` where :math:`d` = step_channels.
        batchnorm (bool, optional): If True, use batch normalization in the convolutional layers of
            the generator.
        nonlinearity (torch.nn.Module, optional): Nonlinearity to be used in the intermediate
            convolutional layers. Defaults to ``LeakyReLU(0.2)`` when None is passed.
github torchgan / torchgan / torchgan / trainer / base_trainer.py View on Github external
)
                    else:
                        # NOTE(avik-pal): We assume that it is a Discriminator Loss by default.
                        ldis, dis_iter = ldis + cur_loss, dis_iter + 1
                for model_name in self.model_names:
                    grad_logs.update_grads(model_name, getattr(self, model_name))
            elif isinstance(loss, GeneratorLoss):
                if self.loss_information["discriminator_iters"] % self.ncritic == 0:
                    cur_loss = loss.train_ops(
                        **self._get_arguments(self.loss_arg_maps[name])
                    )
                    loss_logs.logs[name].append(cur_loss)
                    lgen, gen_iter = lgen + cur_loss, gen_iter + 1
                for model_name in self.model_names:
                    model = getattr(self, model_name)
                    if isinstance(model, Generator):
                        grad_logs.update_grads(model_name, model)
            elif isinstance(loss, DiscriminatorLoss):
                if self.loss_information["generator_iters"] % self.ngen == 0:
                    cur_loss = loss.train_ops(
                        **self._get_arguments(self.loss_arg_maps[name])
                    )
                    loss_logs.logs[name].append(cur_loss)
                    ldis, dis_iter = ldis + cur_loss, dis_iter + 1
                for model_name in self.model_names:
                    model = getattr(self, model_name)
                    if isinstance(model, Discriminator):
                        grad_logs.update_grads(model_name, model)
        return lgen, ldis, gen_iter, dis_iter
github torchgan / torchgan / torchgan / models / autoencoding.py View on Github external
from math import ceil, log

import torch
import torch.nn as nn
import torch.nn.functional as F

from .model import Discriminator, Generator

__all__ = ["AutoEncodingGenerator", "AutoEncodingDiscriminator"]


class AutoEncodingGenerator(Generator):
    r"""Autoencoding Generator for Boundary Equilibrium GAN (BEGAN) from
    `"BEGAN : Boundary Equilibrium Generative Adversarial Networks
    by Berthelot et. al." `_ paper

    Args:
        encoding_dims (int, optional): Dimension of the encoding vector sampled from the noise prior.
        out_size (int, optional): Height and width of the input image to be generated. Must be at
            least 16 and should be an exact power of 2.
        out_channels (int, optional): Number of channels in the output Tensor.
        step_channels (int, optional): Number of channels in multiples of which the DCGAN steps up
            the convolutional features. The step up is done as dim :math:`z \rightarrow d \rightarrow
            2 \times d \rightarrow 4 \times d \rightarrow 8 \times d` where :math:`d` = step_channels.
        scale_factor (int, optional): The scale factor is used to infer properties of the model like
            ``upsample_pad``, ``upsample_filters``, ``upsample_stride`` and ``upsample_output_pad``.
        batchnorm (bool, optional): If True, use batch normalization in the convolutional layers of
            the generator.
github torchgan / torchgan / torchgan / models / model.py View on Github external
def __init__(self, encoding_dims, label_type="none"):
        super(Generator, self).__init__()
        self.encoding_dims = encoding_dims
        self.label_type = label_type