How to use the torch.nn.Sequential function in torch

To help you get started, we’ve selected a few torch examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github shirgur / UnsupervisedDepthFromFocus / Models / resnet.py View on Github external
def _make_MG_unit(self, block, planes, blocks=[1, 2, 4], stride=1, rate=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, rate=blocks[0]*rate, downsample=downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, len(blocks)):
            layers.append(block(self.inplanes, planes, stride=1, rate=blocks[i]*rate))
            # layers.append(block(self.inplanes, planes, stride=1, rate=blocks[i]*rate))

        return nn.Sequential(*layers)
github spino17 / PyGlow / glow / models / network.py View on Github external
def _make_layer_unit(self, layer_obj):
        layers = []
        layers.append(layer_obj)
        return nn.Sequential(*layers)
github neptune-ml / steppy-toolkit / toolkit / pytorch_transformers / architectures / unet.py View on Github external
input_block = nn.Sequential(nn.Conv2d(in_channels=self.in_channels, out_channels=self.n_filters,
                                                  kernel_size=(self.conv_kernel, self.conv_kernel),
                                                  stride=stride, padding=padding),
                                        nn.BatchNorm2d(num_features=self.n_filters),
                                        nn.ReLU(),

                                        nn.Conv2d(in_channels=self.n_filters, out_channels=self.n_filters,
                                                  kernel_size=(self.conv_kernel, self.conv_kernel),
                                                  stride=stride, padding=padding),
                                        nn.BatchNorm2d(num_features=self.n_filters),
                                        nn.ReLU(),

                                        nn.Dropout(self.dropout),
                                        )
        else:
            input_block = nn.Sequential(nn.Conv2d(in_channels=self.in_channels, out_channels=self.n_filters,
                                                  kernel_size=(self.conv_kernel, self.conv_kernel),
                                                  stride=stride, padding=padding),
                                        nn.ReLU(),

                                        nn.Conv2d(in_channels=self.n_filters, out_channels=self.n_filters,
                                                  kernel_size=(self.conv_kernel, self.conv_kernel),
                                                  stride=stride, padding=padding),
                                        nn.ReLU(),

                                        nn.Dropout(self.dropout),
                                        )
        return input_block
github linbo0518 / BLSeg / llseg / unet / backbone.py View on Github external
def _add_stage(self, block, in_ch, out_ch, stride, repeat_times):
        assert repeat_times > 0 and isinstance(repeat_times, int)
        layers = [block(in_ch, out_ch, stride)]
        for _ in range(repeat_times - 1):
            layers.append(block(out_ch, out_ch, 1))
        return nn.Sequential(*layers)
github xingyizhou / CenterNet / src / lib / models / networks / large_hourglass.py View on Github external
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
        make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
        make_up_layer=make_layer, make_low_layer=make_layer, 
        make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
        make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
        make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer, 
        kp_layer=residual
    ):
        super(exkp, self).__init__()

        self.nstack    = nstack
        self.heads     = heads

        curr_dim = dims[0]

        self.pre = nn.Sequential(
            convolution(7, 3, 128, stride=2),
            residual(3, 128, 256, stride=2)
        ) if pre is None else pre

        self.kps  = nn.ModuleList([
            kp_module(
                n, dims, modules, layer=kp_layer,
                make_up_layer=make_up_layer,
                make_low_layer=make_low_layer,
                make_hg_layer=make_hg_layer,
                make_hg_layer_revr=make_hg_layer_revr,
                make_pool_layer=make_pool_layer,
                make_unpool_layer=make_unpool_layer,
                make_merge_layer=make_merge_layer
            ) for _ in range(nstack)
        ])
github yossigandelsman / DoubleDIP / net / layers.py View on Github external
downsampler = nn.MaxPool2d(stride, stride)
        elif downsample_mode in ['lanczos2', 'lanczos3']:
            downsampler = Downsampler(n_planes=out_f, factor=stride, kernel_type=downsample_mode, phase=0.5,
                                      preserve_size=True)
        stride = 1

    padder = None
    to_pad = int((kernel_size - 1) / 2)
    if pad == 'reflection':
        padder = nn.ReflectionPad2d(to_pad)
        to_pad = 0

    convolver = nn.Conv2d(in_f, out_f, kernel_size, stride, padding=to_pad, bias=bias)

    layers = [x for x in [padder, convolver, downsampler] if x is not None]
    return nn.Sequential(*layers)
github lzx1413 / PytorchSSD / models / RFB_Net_mobile.py View on Github external
def conv_dw(inp, oup, stride):
    return nn.Sequential(
        Conv2dDepthwise(inp, kernel_size=3, stride=stride, padding=1, bias=False),
        nn.BatchNorm2d(inp),
        nn.ReLU(inplace=True),

        nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
        nn.BatchNorm2d(oup),
        nn.ReLU(inplace=True),
    )
github freewym / espresso / fairseq / modules / downsampled_multihead_attention.py View on Github external
k_layers.append(Downsample(self.head_index))
            v_layers.append(Downsample(self.head_index))
            out_proj_size = self.head_dim
        else:
            out_proj_size = self.head_dim * self.num_heads
        if self.gated:
            k_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias))
            self.in_proj_q = GatedLinear(self.embed_dim, out_proj_size, bias=bias)
            v_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias))
        else:
            k_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias))
            self.in_proj_q = Linear(self.embed_dim, out_proj_size, bias=bias)
            v_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias))

        self.in_proj_k = nn.Sequential(*k_layers)
        self.in_proj_v = nn.Sequential(*v_layers)

        if self.downsample:
            self.out_proj = Linear(out_proj_size, self.head_dim, bias=bias)
        else:
            self.out_proj = Linear(out_proj_size, out_channels, bias=bias)

        self.scaling = self.head_dim**-0.5
github lukasc-ch / CBinfer / pycbinfer / __init__.py View on Github external
def mergeReLURecur(m):
    mout = nn.Sequential()
    for i, (nodeName, node) in enumerate(m.named_children()):
        
        # handle nn.Sequential containers through recursion
        if type(node) in [nn.Sequential]:
            mout.add_module(nodeName, mergeReLURecur(node))
            continue
        # enable built-in ReLU of CBconv
        elif type(node) in [CBConv2d]:
            chldrn = list(m.children())
            if len(chldrn) > i+1 and type(chldrn[i+1]) is torch.nn.modules.activation.ReLU:
                node.withReLU = True
        # remove ReLU if CBconv layer proceeded
        elif type(node) is torch.nn.modules.activation.ReLU and i >= 1 and type(list(m.children())[i-1]) is CBConv2d:
            print('merging ReLU layer')
            continue # i.e. don't add the module!!
github Yonv1943 / DL_RL_Zoo / RL / Beta / AgentNetwork.py View on Github external
def __init__(self, state_dim, action_dim, mid_dim, use_densenet, use_sn):
        super(CriticSN, self).__init__()

        if use_densenet:
            self.net = nn.Sequential(nn.Linear(state_dim + action_dim, mid_dim), nn.ReLU(),
                                     DenseNet(mid_dim),
                                     nn.Linear(mid_dim * 4, 1), )
        else:
            self.net = nn.Sequential(nn.Linear(state_dim + action_dim, mid_dim), nn.ReLU(),
                                     nn.Linear(mid_dim, mid_dim), nn.ReLU(),
                                     nn.Linear(mid_dim, 1), )

        if use_sn:  # NOTICE: spectral normalization is conflict with soft target update.
            # self.net[-1] = nn.utils.spectral_norm(nn.Linear(...)),
            self.net[-1] = nn.utils.spectral_norm(self.net[-1])