How to use the torch.nn.Linear function in torch

To help you get started, we’ve selected a few torch examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tribhuvanesh / knockoffnets / knockoff / models / cifar / wrn.py View on Github external
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
        n = (depth - 4) // 6
        block = BasicBlock
        # 1st conv before any network block
        self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
                               padding=1, bias=False)
        # 1st block
        self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
        # 2nd block
        self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
        # 3rd block
        self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
        # global average pooling and classifier
        self.bn1 = nn.BatchNorm2d(nChannels[3])
        self.relu = nn.ReLU(inplace=True)
        self.fc = nn.Linear(nChannels[3], num_classes)
        self.nChannels = nChannels[3]

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_()
github muhanzhang / pytorch_DGCNN / mlp_dropout.py View on Github external
def __init__(self, input_size, hidden_size, num_class, with_dropout=False):
        super(MLPClassifier, self).__init__()

        self.h1_weights = nn.Linear(input_size, hidden_size)
        self.h2_weights = nn.Linear(hidden_size, num_class)
        self.with_dropout = with_dropout

        weights_init(self)
github radiodee1 / awesome-chatbot / model / seq_2_seq_single.py View on Github external
self.embed = embed # nn.Embedding(target_vocab_size, embed_dim, padding_idx=1)
        self.attention_mod = Attn(hidden_dim)
        self.hidden_dim = hidden_dim
        self.word_mode = cancel_attention #False
        #self.word_mode_b = cancel_attention #False

        gru_in_dim = hidden_dim
        linear_in_dim = hidden_dim * 2
        if cancel_attention:
            gru_in_dim = hidden_dim
            linear_in_dim = hidden_dim

        batch_first = False #self.word_mode

        self.gru = nn.GRU(gru_in_dim, hidden_dim, self.n_layers, dropout=dropout, batch_first=batch_first, bidirectional=False)
        self.out_target = nn.Linear(hidden_dim, target_vocab_size)
        self.out_target_b = nn.Linear(self.hidden_dim, target_vocab_size)

        self.out_concat = nn.Linear(linear_in_dim, hidden_dim)
        self.out_attn = nn.Linear(hidden_dim * 3, hparams['tokens_per_sentence'])
        self.out_combine = nn.Linear(hidden_dim * 3, hidden_dim)
        self.out_concat_b = nn.Linear(hidden_dim * 2, hidden_dim)
        self.maxtokens = hparams['tokens_per_sentence']
        self.cancel_attention = cancel_attention
        self.decoder_hidden_z = None
        self.dropout_o = nn.Dropout(dropout)
        self.dropout_e = nn.Dropout(dropout)

        self.reset_parameters()
github nikhilbarhate99 / Deterministic-GAIL-PyTorch / GAIL.py View on Github external
def __init__(self, state_dim, action_dim, max_action):
        super(Actor, self).__init__()
        
        self.l1 = nn.Linear(state_dim, 400)
        self.l2 = nn.Linear(400, 300)
        self.l3 = nn.Linear(300, action_dim)
        
        self.max_action = max_action
github hunkim / PyTorchZeroToAll / 08_2_dataset_loade_logistic.py View on Github external
def __init__(self):
        """
        In the constructor we instantiate two nn.Linear module
        """
        super(Model, self).__init__()
        self.l1 = nn.Linear(8, 6)
        self.l2 = nn.Linear(6, 4)
        self.l3 = nn.Linear(4, 1)

        self.sigmoid = nn.Sigmoid()
github neale / HyperGAN / gnn_single_code_cifar4.py View on Github external
def __init__(self, args):
        super(DiscriminatorZ, self).__init__()
        for k, v in vars(args).items():
            setattr(self, k, v)
        
        self.name = 'Discriminator_wae'
        self.linear0 = nn.Linear(self.lcd*self.lcd*10, self.lcd*self.lcd)
        self.linear1 = nn.Linear(self.lcd*self.lcd, 256)
        #self.linear1 = nn.Linear(self.dim, 256)
        self.linear2 = nn.Linear(256, 1)
        self.relu = nn.LeakyReLU(.2, inplace=True)
        self.sigmoid = nn.Sigmoid()
github hsgodhia / squad_rasor_nn / rasor_model.py View on Github external
self.embed = nn.Embedding(config.vocab_size, config.emb_dim)
        self.embed.weight.requires_grad = False  # do not propagate into the pre-trained word embeddings
        self.embed.weight.data.copy_(emb_data)
        # used for eq(6) does FFNN(p_i)*FFNN(q_j)
        self.ff_align = nn.Linear(config.emb_dim, config.ff_dim)
        # used for eq(10) does FFNN(q_j')
        self.ff_q_indep = nn.Linear(2*config.hidden_dim, config.ff_dim)
        # used for eq(2) does FFNN(h_a) in a simplified form so that it can be re-used,
        # note: h_a = [u,v] where u and v are start and end words respectively
        # we have 2*config.hidden_dim since we are using a bi-directional LSTM
        self.p_end_ff = nn.Linear(2 * config.hidden_dim, config.ff_dim)
        self.p_start_ff = nn.Linear(2 * config.hidden_dim, config.ff_dim)
        # used for eq(2) plays the role of w_a
        self.w_a = nn.Linear(config.ff_dim, 1, bias=False)
        # used for eq(10) plays the role of w_q
        self.w_q = nn.Linear(config.ff_dim, 1, bias=False)
        self.relu = nn.ReLU()
        self.softmax = nn.Softmax()
        self.logsoftmax = nn.LogSoftmax()
        self.dropout = nn.Dropout(0.6)

        self.hidden_qindp = self.init_hidden(config.num_layers, config.hidden_dim, config.batch_size)
        self.hidden = self.init_hidden(config.num_layers, config.hidden_dim, config.batch_size)
        # since we are using q_align and p_emb as p_star we have input as 2*emb_dim
        # num_layers = 2 and dropout = 0.1
        self.gru = nn.LSTM(input_size = 2*config.emb_dim + 2*config.hidden_dim, hidden_size = config.hidden_dim, num_layers = config.num_layers, dropout=0.6, bidirectional=True)
        self.q_indep_bilstm = nn.LSTM(input_size = config.emb_dim, hidden_size = config.hidden_dim, num_layers = config.num_layers, dropout=0.6, bidirectional=True)
        #change init_hidden when you change this gru/lstm

        parameters = ifilter(lambda p: p.requires_grad, self.parameters())
        for p in parameters:
            self.init_param(p)
github yjxiong / action-detection / ssn_models.py View on Github external
def _prepare_ssn(self, num_class, stpp_cfg):
        feature_dim = getattr(self.base_model, self.base_model.last_layer_name).in_features
        if self.dropout == 0:
            setattr(self.base_model, self.base_model.last_layer_name, Identity())
        else:
            setattr(self.base_model, self.base_model.last_layer_name, nn.Dropout(p=self.dropout))

        self.stpp = StructuredTemporalPyramidPooling(feature_dim, True, configs=stpp_cfg)
        self.activity_fc = nn.Linear(self.stpp.activity_feat_dim(), num_class + 1)
        self.completeness_fc = nn.Linear(self.stpp.completeness_feat_dim(), num_class)

        nn.init.normal(self.activity_fc.weight.data, 0, 0.001)
        nn.init.constant(self.activity_fc.bias.data, 0)
        nn.init.normal(self.completeness_fc.weight.data, 0, 0.001)
        nn.init.constant(self.completeness_fc.bias.data, 0)

        self.test_fc = None
        if self.with_regression:
            self.regressor_fc = nn.Linear(self.stpp.completeness_feat_dim(), 2 * num_class)
            nn.init.normal(self.regressor_fc.weight.data, 0, 0.001)
            nn.init.constant(self.regressor_fc.bias.data, 0)
        else:
            self.regressor_fc = None

        return feature_dim
github ETIP-team / ETIP-Project / TOI-CNN / ETIP / 11-19 version_global_negative / model.py View on Github external
def __init__(self, pos_loss_method="smoothl1", lambd=1.0, prevent_over_fitting_method="l2_penalty"):
        super(RCNN, self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels=1,
                      out_channels=feature_maps_number,
                      kernel_size=(kernal_length, word_embedding_dim),
                      stride=1,
                      padding=(1, 0)
                      ),
            nn.ReLU(),
        )
        self.roi_pool = ROIPooling(output_size=(1, pooling_out))
        self.flatten_feature = feature_maps_number * pooling_out
        self.cls_fc1 = nn.Linear(self.flatten_feature, self.flatten_feature)
        self.cls_score = nn.Linear(self.flatten_feature, classes_num + 1)
        # self.cls_dropout =
        self.bbox_fc1 = nn.Linear(self.flatten_feature, self.flatten_feature)
        # attention there only 2* (classes_num+1)!
        self.bbox = nn.Linear(self.flatten_feature, 2 * (classes_num + 1))

        self.cross_entropy_loss = nn.CrossEntropyLoss()

        self.smooth_l1_loss = nn.SmoothL1Loss()
        self.mse_loss = nn.MSELoss()  # modify the loss calculation

        self.pos_loss_method = pos_loss_method
        self.prevent_over_fitting_method = prevent_over_fitting_method
        self.lambd = lambd
        self.optimizer = None
github singnet / semantic-vision / experiments / vqa / hypernets / basic_model / HyperNet.py View on Github external
inter_control_dim = control_dims[1]
        control_mat_size = control_dims[2]
        
        v_in_dim = main_dims[0]
        out_dim = main_dims[1]
        
        #control part
        hyper_control_layers = []
        hyper_control_layers.append(nn.Linear(q_in_dim, inter_control_dim))
        hyper_control_layers.append(nn.ReLU())
        hyper_control_layers.append(nn.Linear(inter_control_dim, control_mat_size*control_mat_size))
        self.hyper_control = nn.Sequential(*hyper_control_layers)

        #left half of main network
        hyper_main_layers_in = []
        hyper_main_layers_in.append(nn.Linear(v_in_dim, control_mat_size))
        hyper_main_layers_in.append(nn.ReLU())
        self.hyper_main_in = nn.Sequential(*hyper_main_layers_in)

        #right part of main network
        hyper_main_layers_out = []
        hyper_main_layers_out.append(nn.Linear(control_mat_size, out_dim))
        hyper_main_layers_out.append(nn.ReLU())
        self.hyper_main_out = nn.Sequential(*hyper_main_layers_out)