How to use the torchgan.utils.reduce function in torchgan

To help you get started, we’ve selected a few torchgan examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github torchgan / torchgan / torchgan / metrics / classifierscore.py View on Github external
def calculate_score(self, x):
        r"""
        Computes the Inception Score for the Input.

        Args:
            x (torch.Tensor) : Image in tensor format

        Returns:
            The Inception Score.
        """
        p = F.softmax(x, dim=1)
        q = torch.mean(p, dim=0)
        kl = torch.sum(p * (F.log_softmax(x, dim=1) - torch.log(q)), dim=1)
        return torch.exp(reduce(kl, "mean")).data
github torchgan / torchgan / torchgan / losses / functional.py View on Github external
def dragan_gradient_penalty(interpolate, d_interpolate, k=1.0, reduction="mean"):
    grad_outputs = torch.ones_like(d_interpolate)
    gradients = autograd.grad(
        outputs=d_interpolate,
        inputs=interpolate,
        grad_outputs=grad_outputs,
        create_graph=True,
        retain_graph=True,
        only_inputs=True,
        allow_unused=True,
    )[0]

    gradient_penalty = (gradients.norm(2) - k) ** 2
    return reduce(gradient_penalty, reduction)
github torchgan / torchgan / torchgan / losses / functional.py View on Github external
def wasserstein_discriminator_loss(fx, fgz, reduction="mean"):
    return reduce(fgz - fx, reduction)
github torchgan / torchgan / torchgan / losses / functional.py View on Github external
def energy_based_discriminator_loss(dx, dgz, margin, reduction="mean"):
    return reduce(dx + F.relu(-dgz + margin), reduction)
github torchgan / torchgan / torchgan / losses / functional.py View on Github external
def energy_based_generator_loss(dgz, reduction="mean"):
    return reduce(dgz, reduction)
github torchgan / torchgan / torchgan / losses / functional.py View on Github external
def mutual_information_penalty(c_dis, c_cont, dist_dis, dist_cont, reduction="mean"):
    log_probs = torch.Tensor(
        [
            torch.mean(dist.log_prob(c))
            for dist, c in zip((dist_dis, dist_cont), (c_dis, c_cont))
        ]
    )
    return reduce(-1.0 * log_probs, reduction)
github torchgan / torchgan / torchgan / losses / functional.py View on Github external
def wasserstein_gradient_penalty(interpolate, d_interpolate, reduction="mean"):
    grad_outputs = torch.ones_like(d_interpolate)
    gradients = autograd.grad(
        outputs=d_interpolate,
        inputs=interpolate,
        grad_outputs=grad_outputs,
        create_graph=True,
        retain_graph=True,
        only_inputs=True,
    )[0]

    gradient_penalty = (gradients.norm(2) - 1) ** 2
    return reduce(gradient_penalty, reduction)
github torchgan / torchgan / torchgan / losses / functional.py View on Github external
def boundary_equilibrium_discriminator_loss(dx, dgz, k, reduction="mean"):
    # NOTE(avik-pal): This is a bit peculiar compared to the other losses as it must return 3 values.
    loss_real = reduce(dx, reduction)
    loss_fake = reduce(dgz, reduction)
    loss_total = loss_real - k * loss_fake
    return loss_total, loss_real, loss_fake
github torchgan / torchgan / torchgan / losses / functional.py View on Github external
def boundary_equilibrium_discriminator_loss(dx, dgz, k, reduction="mean"):
    # NOTE(avik-pal): This is a bit peculiar compared to the other losses as it must return 3 values.
    loss_real = reduce(dx, reduction)
    loss_fake = reduce(dgz, reduction)
    loss_total = loss_real - k * loss_fake
    return loss_total, loss_real, loss_fake
github torchgan / torchgan / torchgan / losses / functional.py View on Github external
def least_squares_generator_loss(dgz, c=1.0, reduction="mean"):
    return 0.5 * reduce((dgz - c) ** 2, reduction)