How to use the torch.nn.Module function in torch

To help you get started, we’ve selected a few torch examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github eriklindernoren / PyTorch-GAN / implementations / ebgan / ebgan.py View on Github external
nn.Upsample(scale_factor=2),
            nn.Conv2d(128, 64, 3, stride=1, padding=1),
            nn.BatchNorm2d(64, 0.8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(64, opt.channels, 3, stride=1, padding=1),
            nn.Tanh(),
        )

    def forward(self, noise):
        out = self.l1(noise)
        out = out.view(out.shape[0], 128, self.init_size, self.init_size)
        img = self.conv_blocks(out)
        return img


class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()

        # Upsampling
        self.down = nn.Sequential(nn.Conv2d(opt.channels, 64, 3, 2, 1), nn.ReLU())
        # Fully-connected layers
        self.down_size = opt.img_size // 2
        down_dim = 64 * (opt.img_size // 2) ** 2

        self.embedding = nn.Linear(down_dim, 32)

        self.fc = nn.Sequential(
            nn.BatchNorm1d(32, 0.8),
            nn.ReLU(inplace=True),
            nn.Linear(32, down_dim),
            nn.BatchNorm1d(down_dim),
github yossigandelsman / DoubleDIP / net / layers.py View on Github external
# TODO: make it pad-able
    def __init__(self, patch_size=5, channels=1):
        self.patch_size = patch_size
        super(VarianceLayer, self).__init__()
        mean_mask = np.ones((channels, channels, patch_size, patch_size)) / (patch_size * patch_size)
        self.mean_mask = nn.Parameter(data=torch.cuda.FloatTensor(mean_mask), requires_grad=False)
        mask = np.zeros((channels, channels, patch_size, patch_size))
        mask[:, :, patch_size // 2, patch_size // 2] = 1.
        self.ones_mask = nn.Parameter(data=torch.cuda.FloatTensor(mask), requires_grad=False)

    def forward(self, x):
        Ex_E = F.conv2d(x, self.ones_mask) - F.conv2d(x, self.mean_mask)
        return F.conv2d((Ex_E) ** 2, self.mean_mask)


class CovarianceLayer(nn.Module):
    def __init__(self, patch_size=5, channels=1):
        self.patch_size = patch_size
        super(CovarianceLayer, self).__init__()
        mean_mask = np.ones((channels, channels, patch_size, patch_size)) / (patch_size * patch_size)
        self.mean_mask = nn.Parameter(data=torch.cuda.FloatTensor(mean_mask), requires_grad=False)
        mask = np.zeros((channels, channels, patch_size, patch_size))
        mask[:, :, patch_size // 2, patch_size // 2] = 1.
        self.ones_mask = nn.Parameter(data=torch.cuda.FloatTensor(mask), requires_grad=False)

    def forward(self, x, y):
        return F.conv2d((F.conv2d(x, self.ones_mask) - F.conv2d(x, self.mean_mask)) *
                        (F.conv2d(y, self.ones_mask) - F.conv2d(y, self.mean_mask)), self.mean_mask)

class GrayscaleLayer(nn.Module):
    def __init__(self):
        super(GrayscaleLayer, self).__init__()
github drscotthawley / signaltrain / signaltrain / transforms.py View on Github external
# Generate Matrices
        kvec = np.arange(0, freq_subbands) + 0.5
        nvec = np.arange(0, window_size) + 0.5 + freq_subbands/2
        cos_an = w * np.cos(np.pi / freq_subbands * kvec[np.newaxis].T * nvec) * np.sqrt(2. / freq_subbands)
        return cos_an

    method = 'scott'
    if ('scott' == method):
        cos_an = scott_method(freq_subbands, window_size, w)
    else:
        cos_an = orig_method(freq_subbands, window_size, w)

    return cos_an.astype(np.float32)


class Analysis(nn.Module):
    """
        Class for building the analysis part
        of the Front-End ('Fe').
    """
    def __init__(self, ft_size=1024, w_size=2048, hop_size=1024, shrink=False):
        super(Analysis, self).__init__()

        # Parameters
        self.batch_size = None
        self.time_domain_samples = None
        self.sz = ft_size
        self.wsz = w_size
        self.hop = hop_size
        self.shrink = shrink

        # Activation Funtion
github pytorch / botorch / botorch / models / model.py View on Github external
r"""
Abstract base module for all BoTorch models.
"""

from abc import ABC, abstractmethod
from typing import Any, List, Optional

from torch import Tensor
from torch.nn import Module

from .. import settings
from ..posteriors import Posterior
from ..sampling.samplers import MCSampler


class Model(Module, ABC):
    r"""Abstract base class for BoTorch models."""

    @abstractmethod
    def posterior(
        self,
        X: Tensor,
        output_indices: Optional[List[int]] = None,
        observation_noise: bool = False,
        **kwargs: Any,
    ) -> Posterior:
        r"""Computes the posterior over model outputs at the provided points.

        Args:
            X: A `b x q x d`-dim Tensor, where `d` is the dimension of the
                feature space, `q` is the number of points considered jointly,
                and `b` is the batch dimension.
github castorini / hedwig / vdpwi / model.py View on Github external
self.output = nn.Linear(n_maps, n_labels)
        self.input_len = None

    def forward(self, x):
        x = F.relu(self.conv0(x))
        old_x = x
        for i, conv in enumerate(self.convs):
            x = F.relu(conv(x))
            if i % 2 == 1:
                x += old_x
                old_x = x
        x = torch.mean(x.view(x.size(0), x.size(1), -1), 2)
        return F.log_softmax(self.output(x), 1)


class VDPWIConvNet(nn.Module):

    def __init__(self, config):
        super().__init__()
        def make_conv(n_in, n_out):
            conv = nn.Conv2d(n_in, n_out, 3, padding=1)
            conv.bias.data.zero_()
            nn.init.xavier_normal_(conv.weight)
            return conv
        self.conv1 = make_conv(12, 128)
        self.conv2 = make_conv(128, 164)
        self.conv3 = make_conv(164, 192)
        self.conv4 = make_conv(192, 192)
        self.conv5 = make_conv(192, 128)
        self.maxpool2 = nn.MaxPool2d(2, ceil_mode=True)
        self.dnn = nn.Linear(128, 128)
        self.output = nn.Linear(128, config['n_labels'])
github kenshohara / 3D-ResNets-PyTorch / models / densenet.py View on Github external
def __init__(self, num_input_features, num_output_features):
        super(_Transition, self).__init__()
        self.add_module('norm', nn.BatchNorm3d(num_input_features))
        self.add_module('relu', nn.ReLU(inplace=True))
        self.add_module('conv',
                        nn.Conv3d(
                            num_input_features,
                            num_output_features,
                            kernel_size=1,
                            stride=1,
                            bias=False))
        self.add_module('pool', nn.AvgPool3d(kernel_size=2, stride=2))


class DenseNet(nn.Module):
    """Densenet-BC model class
    Args:
        growth_rate (int) - how many filters to add each layer (k in paper)
        block_config (list of 4 ints) - how many layers in each pooling block
        num_init_features (int) - the number of filters to learn in the first convolution layer
        bn_size (int) - multiplicative factor for number of bottle neck layers
          (i.e. bn_size * k features in the bottleneck layer)
        drop_rate (float) - dropout rate after each dense layer
        num_classes (int) - number of classification classes
    """

    def __init__(self,
                 sample_size,
                 sample_duration,
                 growth_rate=32,
                 block_config=(6, 12, 24, 16),
github ssp573 / Contrastive-Predictive-Coding / resnet.py View on Github external
if stride != 1 or in_planes != self.expansion*planes:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(self.expansion*planes)
            )

    def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = F.relu(self.bn2(self.conv2(out)))
        out = self.bn3(self.conv3(out))
        out += self.shortcut(x)
        out = F.relu(out)
        return out


class ResNet(nn.Module):
    def __init__(self, block, num_blocks, num_classes=10):
        super(ResNet, self).__init__()
        self.in_planes = 64

        self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
        self.linear = nn.Linear(512 *block.expansion, num_classes)

    def _make_layer(self, block, planes, num_blocks, stride):
        strides = [stride] + [1]*(num_blocks-1)
        layers = []
        for stride in strides:
github naver / claf / claf / config / factory / optimizer.py View on Github external
def create(self, model):
        if not issubclass(type(model), torch.nn.Module):
            raise ValueError("optimizer model is must be subclass of torch.nn.Module.")

        if getattr(model, "bert", None):  # use bert or not
            model_parameters = self._group_parameters_for_bert(model)
        else:
            model_parameters = [param for param in model.parameters() if param.requires_grad]

        optimizer = get_optimizer_by_name(self.op_type)(model_parameters, **self.optimizer_params)
        op_dict = {"optimizer": optimizer}

        # learning_rate_scheduler
        if self.lr_scheduler_type:
            self.lr_scheduler_config["optimizer"] = op_dict["optimizer"]
            lr_scheduler = self.lr_schedulers[self.lr_scheduler_type](**self.lr_scheduler_config)

            if self.lr_scheduler_type == "reduce_on_plateau":
github vsimkus / voice-conversion / models / common_networks.py View on Github external
out = self.layer_dict['conv_{}'.format(i)](out)

        out = self.final_conv(out)

        return out

    def reset_parameters(self):
        """
        Re-initializes the networks parameters
        """
        for item in self.layer_dict.children():
            item.reset_parameters()
        
        self.final_conv.reset_parameters()

class QuantisedInputModuleWrapper(nn.Module):
    """
    Wrapper for any module that should take quantised (mu-law encoded) inputs
    """
    def __init__(self, num_input_quantization_channels, model):
        super(QuantisedInputModuleWrapper, self).__init__()
        print('Building Quantised input module.')
        self.d2a = Digital2Analog(num_input_quantization_channels)
        self.model = model
    
    def forward(self, digital_input, speaker):
        analog_input = self.d2a(digital_input)
        return self.model(analog_input, speaker)
    
    def reset_parameters(self):
        self.d2a.reset_parameters()
        self.model.reset_parameters()
github soeaver / pytorch-priv / models / imagenet / se_densenet.py View on Github external
super(Transition, self).__init__()
        self.bn = nn.BatchNorm2d(inplanes)
        self.relu = nn.ReLU(inplace=True)
        self.conv1 = nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=1,
                               bias=False)
        self.avgpool = nn.AvgPool2d(kernel_size=2, stride=2)

    def forward(self, x):
        out = self.bn(x)
        out = self.relu(out)
        out = self.conv1(out)
        out = self.avgpool(out)
        return out


class SE_DenseNet(nn.Module):
    def __init__(self, growthRate=32, head7x7=True, dropRate=0,
                 increasingRate=1, compressionRate=2, layers=(6, 12, 24, 16), num_classes=1000):
        """ Constructor
        Args:
            layers: config of layers, e.g., (6, 12, 24, 16)
            num_classes: number of classes
        """
        super(SE_DenseNet, self).__init__()

        block = SEDenseBottleneck
        self.growthRate = growthRate
        self.dropRate = dropRate
        self.increasingRate = increasingRate
        headplanes = growthRate * pow(increasingRate, 2)
        self.inplanes = headplanes * 2  # default 64