How to use the dnn.pytorch.layer.RNN_layer function in dnn

To help you get started, we’ve selected a few dnn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github NUSTM / pytorch-dnnnlp / pytorch / model.py View on Github external
"""
        nn.Module.__init__(self)
        base.base.__init__(self, args)

        self.n_hierarchy = n_hierarchy
        self.mode = mode
        self.bi_direction_num = 2 if self.bi_direction else 1

        self.emb_mat = layer.embedding_layer(emb_matrix, self.emb_type)
        self.drop_out = nn.Dropout(self.drop_prob)

        rnn_params = (self.n_hidden, self.n_layer, self.drop_prob, self.bi_direction, self.rnn_type)
        self.rnn = nn.ModuleList([layer.RNN_layer(self.emb_dim, *rnn_params)])
        self.att = nn.ModuleList([layer.self_attention_layer(self.bi_direction_num * self.n_hidden)])
        for _ in range(self.n_hierarchy - 1):
            self.rnn.append(layer.RNN_layer(self.bi_direction_num * self.n_hidden, *rnn_params))
            if self.use_attention:
                self.att.append(layer.self_attention_layer(self.bi_direction_num * self.n_hidden))
        self.predict = layer.softmax_layer(self.n_hidden * self.bi_direction_num, self.n_class)
github NUSTM / pytorch-dnnnlp / pytorch / model.py View on Github external
* emb_matrix [np.array]: word embedding matrix
        * args [dict]: all model arguments
        * mode [str]: use 'classify'/'sequence' to get the result
        """
        nn.Module.__init__(self)
        base.base.__init__(self, args)

        self.n_hierarchy = n_hierarchy
        self.mode = mode
        self.bi_direction_num = 2 if self.bi_direction else 1

        self.emb_mat = layer.embedding_layer(emb_matrix, self.emb_type)
        self.drop_out = nn.Dropout(self.drop_prob)

        rnn_params = (self.n_hidden, self.n_layer, self.drop_prob, self.bi_direction, self.rnn_type)
        self.rnn = nn.ModuleList([layer.RNN_layer(self.emb_dim, *rnn_params)])
        self.att = nn.ModuleList([layer.self_attention_layer(self.bi_direction_num * self.n_hidden)])
        for _ in range(self.n_hierarchy - 1):
            self.rnn.append(layer.RNN_layer(self.bi_direction_num * self.n_hidden, *rnn_params))
            if self.use_attention:
                self.att.append(layer.self_attention_layer(self.bi_direction_num * self.n_hidden))
        self.predict = layer.softmax_layer(self.n_hidden * self.bi_direction_num, self.n_class)
github NUSTM / pytorch-dnnnlp / learning_demo.py View on Github external
def __init__(self, emb_matrix, args):
        super(LSTM_model, self).__init__()

        # Embedding layer
        self.emb_mat = layer.embedding_layer(emb_mat, 'const')
        # Drop out layer
        self.drop_out = nn.Dropout(args.drop_prob)
        # LSTM layer
        self.lstm = layer.RNN_layer(args.emb_dim, args.n_hidden, args.n_layer,
                                    args.drop_prob, args.bi_direction, mode="LSTM")
        # SoftMax layer
        bi_direction_num = 2 if args.bi_direction else 1
        self.predictor = layer.softmax_layer(bi_direction_num * args.n_hidden, args.n_class)
github NUSTM / pytorch-dnnnlp / pytorch / model.py View on Github external
def init_weights(self):
        for m in self.modules():
            if isinstance(m, layer.RNN_layer):
                m.init_weights()
            if isinstance(m, layer.self_attention_layer):
                m.init_weights()
            if isinstance(m, layer.softmax_layer):
                m.init_weights()