How to use torch - 10 common examples

To help you get started, we’ve selected a few torch examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github HazyResearch / learning-circuits / reconstruct_matrix.py View on Github external
def main():
    print('Initializing...')

    # TODO..
    parser = argparse.ArgumentParser('Butterfly matrix recovery')
    parser.add_argument('--method', type=str, choices=['none', 'butterfly', 'lowrank', 'sparse'], default='none', help='Compression method')
    parser.add_argument('--steps', type=int, default=25000, help='Number of training steps')
    args = parser.parse_args()

    state_dict = torch.load('model_optimizer.pth')['model']
    print('Loaded model')

    if args.method == 'none':
        print('Validating...')
        print(validate(state_dict))
        exit()

    for layer in range(4, 5):
        for block in range(2):
            for conv in range(1, 3):
                weight_name = f'layer{layer}.{block}.conv{conv}.weight'
                weight = state_dict[weight_name]
                print('Weight:', weight_name)
                for mat_id in range(weight[0,0].numel()):
                    mat_id_x = mat_id // weight[0,0,0].numel()
                    mat_id_y = mat_id % weight[0,0,0].numel()
github WenchaoliuMUC / Detection-of-Multiclass-Objects-in-Optical-Remote-Sensing-Images / deform_darknet.py View on Github external
out_filters.append(prev_filters)
                models.append(model)
            elif block['type'] == 'convolutional':
                conv_id = conv_id + 1
                batch_normalize = int(block['batch_normalize'])
                filters = int(block['filters'])
                kernel_size = int(block['size'])
                stride = int(block['stride'])
                is_pad = int(block['pad'])
                pad = (kernel_size - 1) // 2 if is_pad else 0
                activation = block['activation']
                model = nn.Sequential()
                if batch_normalize:
                    model.add_module('conv{0}'.format(conv_id),
                                     nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=False))
                    model.add_module('bn{0}'.format(conv_id), nn.BatchNorm2d(filters))
                else:
                    model.add_module('conv{0}'.format(conv_id),
                                     nn.Conv2d(prev_filters, filters, kernel_size, stride, pad))
                if activation == 'leaky':
                    model.add_module('leaky{0}'.format(conv_id), nn.LeakyReLU(0.1, inplace=True))
                elif activation == 'relu':
                    model.add_module('relu{0}'.format(conv_id), nn.ReLU(inplace=True))
                prev_filters = filters
                out_filters.append(prev_filters)
                models.append(model)
            elif block['type'] == 'trans_conv':
                conv_id = conv_id + 1
                batch_normalize = int(block['batch_normalize'])
                filters = int(block['filters'])
                kernel_size = int(block['size'])
                stride = int(block['stride'])
github blackfeather-wang / ISDA-for-Deep-Networks / networks / shake_shake.py View on Github external
alpha = torch.cuda.FloatTensor(x1.size(0)).uniform_()
            alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
        else:
            alpha = 0.5
        return alpha * x1 + (1 - alpha) * x2

    @staticmethod
    def backward(ctx, grad_output):
        beta = torch.cuda.FloatTensor(grad_output.size(0)).uniform_()
        beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
        beta = Variable(beta)

        return beta * grad_output, (1 - beta) * grad_output, None


class Shortcut(nn.Module):

    def __init__(self, in_ch, out_ch, stride):
        super(Shortcut, self).__init__()
        self.stride = stride
        self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1,
                               stride=1, padding=0, bias=False)
        self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1,
                               stride=1, padding=0, bias=False)
        self.bn = nn.BatchNorm2d(out_ch)

    def forward(self, x):
        h = F.relu(x)

        h1 = F.avg_pool2d(h, 1, self.stride)
        h1 = self.conv1(h1)
github StanfordVL / GibsonEnvV2 / gibson2 / learn / completion.py View on Github external
norm(nf * 4, momentum=alpha),
                nn.LeakyReLU(0.1),

                nn.ConvTranspose2d(nf * 4, nf, kernel_size=4, stride=2, padding=1),
                norm(nf, momentum=alpha),
                nn.LeakyReLU(0.1),

                nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1),
                norm(nf, momentum=alpha),
                nn.LeakyReLU(0.1),

                nn.ConvTranspose2d(nf, nf // 4, kernel_size=4, stride=2, padding=1),
                norm(nf // 4, momentum=alpha),
                nn.LeakyReLU(0.1),

                nn.Conv2d(nf // 4, nf // 4, kernel_size=3, stride=1, padding=1),
                norm(nf // 4, momentum=alpha),
                nn.LeakyReLU(0.1),
                nn.Conv2d(nf // 4, 3, kernel_size=3, stride=1, padding=1),
            )
github tribhuvanesh / knockoffnets / knockoff / models / cifar / wrn.py View on Github external
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
        n = (depth - 4) // 6
        block = BasicBlock
        # 1st conv before any network block
        self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
                               padding=1, bias=False)
        # 1st block
        self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
        # 2nd block
        self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
        # 3rd block
        self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
        # global average pooling and classifier
        self.bn1 = nn.BatchNorm2d(nChannels[3])
        self.relu = nn.ReLU(inplace=True)
        self.fc = nn.Linear(nChannels[3], num_classes)
        self.nChannels = nChannels[3]

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_()
github eriklindernoren / PyTorch-GAN / implementations / ebgan / ebgan.py View on Github external
nn.Upsample(scale_factor=2),
            nn.Conv2d(128, 64, 3, stride=1, padding=1),
            nn.BatchNorm2d(64, 0.8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(64, opt.channels, 3, stride=1, padding=1),
            nn.Tanh(),
        )

    def forward(self, noise):
        out = self.l1(noise)
        out = out.view(out.shape[0], 128, self.init_size, self.init_size)
        img = self.conv_blocks(out)
        return img


class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()

        # Upsampling
        self.down = nn.Sequential(nn.Conv2d(opt.channels, 64, 3, 2, 1), nn.ReLU())
        # Fully-connected layers
        self.down_size = opt.img_size // 2
        down_dim = 64 * (opt.img_size // 2) ** 2

        self.embedding = nn.Linear(down_dim, 32)

        self.fc = nn.Sequential(
            nn.BatchNorm1d(32, 0.8),
            nn.ReLU(inplace=True),
            nn.Linear(32, down_dim),
            nn.BatchNorm1d(down_dim),
github yossigandelsman / DoubleDIP / net / layers.py View on Github external
# TODO: make it pad-able
    def __init__(self, patch_size=5, channels=1):
        self.patch_size = patch_size
        super(VarianceLayer, self).__init__()
        mean_mask = np.ones((channels, channels, patch_size, patch_size)) / (patch_size * patch_size)
        self.mean_mask = nn.Parameter(data=torch.cuda.FloatTensor(mean_mask), requires_grad=False)
        mask = np.zeros((channels, channels, patch_size, patch_size))
        mask[:, :, patch_size // 2, patch_size // 2] = 1.
        self.ones_mask = nn.Parameter(data=torch.cuda.FloatTensor(mask), requires_grad=False)

    def forward(self, x):
        Ex_E = F.conv2d(x, self.ones_mask) - F.conv2d(x, self.mean_mask)
        return F.conv2d((Ex_E) ** 2, self.mean_mask)


class CovarianceLayer(nn.Module):
    def __init__(self, patch_size=5, channels=1):
        self.patch_size = patch_size
        super(CovarianceLayer, self).__init__()
        mean_mask = np.ones((channels, channels, patch_size, patch_size)) / (patch_size * patch_size)
        self.mean_mask = nn.Parameter(data=torch.cuda.FloatTensor(mean_mask), requires_grad=False)
        mask = np.zeros((channels, channels, patch_size, patch_size))
        mask[:, :, patch_size // 2, patch_size // 2] = 1.
        self.ones_mask = nn.Parameter(data=torch.cuda.FloatTensor(mask), requires_grad=False)

    def forward(self, x, y):
        return F.conv2d((F.conv2d(x, self.ones_mask) - F.conv2d(x, self.mean_mask)) *
                        (F.conv2d(y, self.ones_mask) - F.conv2d(y, self.mean_mask)), self.mean_mask)

class GrayscaleLayer(nn.Module):
    def __init__(self):
        super(GrayscaleLayer, self).__init__()
github marsermd / DeepestScatter / DeepestScatter_Train / DisneyDescriptorDataset.py View on Github external
def __getLightAngle(self, sampleId):
        sceneId = sampleId // BATCH_SIZE
        scene = self.lmdbDataset.get(SceneSetup, sceneId)

        sample = self.lmdbDataset.get(ScatterSample, sampleId)

        lightDirection = np.array([scene.light_direction.x, scene.light_direction.y, scene.light_direction.z])
        viewDirection = np.array([sample.view_direction.x, sample.view_direction.y, sample.view_direction.z])

        angle = np.math.acos(np.dot(normalized(lightDirection), normalized(viewDirection)))
        return torch.tensor(angle, dtype=torch.float32)
github AceCoooool / LEDNet / model / basic.py View on Github external
def basic_conv(in_channel, channel, kernel=3, stride=1):
    return nn.Sequential(
        nn.Conv2d(in_channel, channel, kernel, stride, kernel // 2, bias=False),
        nn.BatchNorm2d(channel), nn.ReLU(inplace=True)
    )
github espnet / espnet / espnet / nets / pytorch_backend / e2e_tts_fastspeech.py View on Github external
def _forward(self, xs, ilens, ys=None, olens=None, spembs=None, is_inference=False):
        # forward encoder
        x_masks = self._source_mask(ilens)
        hs, _ = self.encoder(xs, x_masks)  # (B, Tmax, adim)

        # integrate speaker embedding
        if self.spk_embed_dim is not None:
            hs = self._integrate_with_spk_embed(hs, spembs)

        # forward duration predictor and length regulator
        d_masks = make_pad_mask(ilens).to(xs.device)
        if is_inference:
            d_outs = self.duration_predictor.inference(hs, d_masks)  # (B, Tmax)
            hs = self.length_regulator(hs, d_outs, ilens)  # (B, Lmax, adim)
        else:
            with torch.no_grad():
                ds = self.duration_calculator(xs, ilens, ys, olens, spembs)  # (B, Tmax)
            d_outs = self.duration_predictor(hs, d_masks)  # (B, Tmax)
            hs = self.length_regulator(hs, ds, ilens)  # (B, Lmax, adim)

        # forward decoder
        if olens is not None:
            if self.reduction_factor > 1:
                olens_in = olens.new([olen // self.reduction_factor for olen in olens])
            else:
                olens_in = olens
            h_masks = self._source_mask(olens_in)
        else:
            h_masks = None
        zs, _ = self.decoder(hs, h_masks)  # (B, Lmax, adim)
        before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)  # (B, Lmax, odim)