How to use the torchgan.models.dcgan.DCGANDiscriminator function in torchgan

To help you get started, we’ve selected a few torchgan examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github torchgan / torchgan / torchgan / models / conditional.py View on Github external
Returns:
            A 4D torch.Tensor of the generated Images conditioned on ``y``.
        """
        y_emb = self.label_embeddings(y.type(torch.LongTensor).to(y.device))
        return super(ConditionalGANGenerator, self).forward(
            torch.cat((z, y_emb), dim=1)
        )

    def sampler(self, sample_size, device):
        return [
            torch.randn(sample_size, self.encoding_dims, device=device),
            torch.randint(0, self.num_classes, (sample_size,), device=device),
        ]


class ConditionalGANDiscriminator(DCGANDiscriminator):
    r"""Condititional GAN (CGAN) discriminator based on a DCGAN model from
    `"Conditional Generative Adversarial Nets
    by Mirza et. al. " `_ paper

    Args:
        num_classes (int): Total classes present in the dataset.
        in_size (int, optional): Height and width of the input image to be evaluated. Must be at
            least 16 and should be an exact power of 2.
        in_channels (int, optional): Number of channels in the input Tensor.
        step_channels (int, optional): Number of channels in multiples of which the DCGAN steps up
            the convolutional features. The step up is done as dim :math:`z \rightarrow d \rightarrow
            2 \times d \rightarrow 4 \times d \rightarrow 8 \times d` where :math:`d` = step_channels.
        batchnorm (bool, optional): If True, use batch normalization in the convolutional layers of
            the generator.
        nonlinearity (torch.nn.Module, optional): Nonlinearity to be used in the intermediate
            convolutional layers. Defaults to ``LeakyReLU(0.2)`` when None is passed.
github torchgan / torchgan / torchgan / models / infogan.py View on Github external
last_nonlinearity,
        )
        self.encoding_dims = encoding_dims
        self.dim_cont = dim_cont
        self.dim_dis = dim_dis

    def forward(self, z, c_dis=None, c_cont=None):
        z_cat = (
            torch.cat([z, c_dis, c_cont], dim=1)
            if c_dis is not None and c_cont is not None
            else z
        )
        return super(InfoGANGenerator, self).forward(z_cat)


class InfoGANDiscriminator(DCGANDiscriminator):
    r"""Discriminator for InfoGAN based on the Deep Convolutional GAN (DCGAN) architecture, from
    `"InfoGAN : Interpretable Representation Learning With Information Maximizing Generative Aversarial Nets
    by Chen et. al. " `_ paper

    The approximate conditional probability distribution over the latent code Q(c|x) is chosen to be a factored
    Gaussian for the continuous latent code and a Categorical distribution for the discrete latent code

    Args:
        dim_dis (int): Dimension of the discrete latent code sampled from the prior.
        dim_cont (int): Dimension of the continuous latent code sampled from the prior.
        encoding_dims (int, optional): Dimension of the encoding vector sampled from the noise prior.
        in_size (int, optional): Height and width of the input image to be evaluated. Must be at
            least 16 and should be an exact power of 2.
        in_channels (int, optional): Number of channels in the input Tensor.
        step_channels (int, optional): Number of channels in multiples of which the DCGAN steps up
            the convolutional features. The step up is done as dim :math:`z \rightarrow d \rightarrow
github torchgan / torchgan / torchgan / models / acgan.py View on Github external
y (torch.Tensor): The labels corresponding to the encoding ``z``.

        Returns:
            A 4D torch.Tensor of the generated Images conditioned on ``y``.
        """
        y_emb = self.label_embeddings(y.type(torch.LongTensor).to(y.device))
        return super(ACGANGenerator, self).forward(torch.mul(y_emb, z))

    def sampler(self, sample_size, device):
        return [
            torch.randn(sample_size, self.encoding_dims, device=device),
            torch.randint(0, self.num_classes, (sample_size,), device=device),
        ]


class ACGANDiscriminator(DCGANDiscriminator):
    r"""Auxiliary Classifier GAN (ACGAN) discriminator based on a DCGAN model from
    `"Conditional Image Synthesis With Auxiliary Classifier GANs
    by Odena et. al. " `_ paper

    Args:
        num_classes (int): Total classes present in the dataset.
        in_size (int, optional): Height and width of the input image to be evaluated. Must be at
            least 16 and should be an exact power of 2.
        in_channels (int, optional): Number of channels in the input Tensor.
        step_channels (int, optional): Number of channels in multiples of which the DCGAN steps up
            the convolutional features. The step up is done as dim :math:`z \rightarrow d \rightarrow
            2 \times d \rightarrow 4 \times d \rightarrow 8 \times d` where :math:`d` = step_channels.
        batchnorm (bool, optional): If True, use batch normalization in the convolutional layers of
            the generator.
        nonlinearity (torch.nn.Module, optional): Nonlinearity to be used in the intermediate
            convolutional layers. Defaults to ``LeakyReLU(0.2)`` when None is passed.
github torchgan / torchgan / torchgan / models / dcgan.py View on Github external
def __init__(
        self,
        in_size=32,
        in_channels=3,
        step_channels=64,
        batchnorm=True,
        nonlinearity=None,
        last_nonlinearity=None,
        label_type="none",
    ):
        super(DCGANDiscriminator, self).__init__(in_channels, label_type)
        if in_size < 16 or ceil(log2(in_size)) != log2(in_size):
            raise Exception(
                "Input Image Size must be at least 16*16 and an exact power of 2"
            )
        num_repeats = in_size.bit_length() - 4
        self.n = step_channels
        use_bias = not batchnorm
        nl = nn.LeakyReLU(0.2) if nonlinearity is None else nonlinearity
        last_nl = nn.LeakyReLU(0.2) if last_nonlinearity is None else last_nonlinearity
        d = self.n
        model = [nn.Sequential(nn.Conv2d(self.input_dims, d, 4, 2, 1, bias=True), nl)]
        if batchnorm is True:
            for i in range(num_repeats):
                model.append(
                    nn.Sequential(
                        nn.Conv2d(d, d * 2, 4, 2, 1, bias=use_bias),