How to use the dnn.pytorch.base function in dnn

To help you get started, we’ve selected a few dnn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github NUSTM / pytorch-dnnnlp / pytorch / exec.py View on Github external
def __init__(self, data_dict, args, class_name=None, device_id=0):
        """
        Initilize execution fuctions
        * data_dict [dict]: use key like 'x'/'vx'/'ty'/'lq' to store the data
        * args [dict]: all model arguments
        * class_name [list]: name of each class
        """
        base.base.__init__(self, args)

        self.data_dict = data_dict
        self.class_name = class_name
        self.device_id = device_id
        self.device = torch.device(device_id) if self.n_gpu and self.space_turbo else torch.device("cpu")
        self._init_display()
github NUSTM / pytorch-dnnnlp / pytorch / contrib.py View on Github external
def __init__(self, emb_matrix, args, n_time):
        nn.Module.__init__(self)
        base.base.__init__(self, args)

        self.n_time = n_time
        self.bi_direction_num = 2 if self.bi_direction else 1
        out_n_hidden = self.n_hidden * self.bi_direction_num
        self.drop_out = nn.Dropout(self.drop_prob)
        self.embedding_layer(emb_matrix)

        self.extractors = nn.ModuleList()
        self.attentions = nn.ModuleList()
        self.predictors = nn.ModuleList()
        for _ in range(n_time):
            self.extractors.append(
                nn.ModuleList([layer.CNN_layer(self.emb_dim, 1, self.n_hidden, kw) for kw in range(1, 3)])
            )  # index 0 -> (nt-1)
            self.attentions.append(layer.self_attention_layer(out_n_hidden))
            self.predictors.append(layer.softmax_layer(out_n_hidden, self.n_class))  # index 0 -> (nt-1)
github NUSTM / pytorch-dnnnlp / pytorch / contrib.py View on Github external
def __init__(self, data_dict, emb_matrix, args, n_time):
        self.data_dict = data_dict
        self.n_time = n_time
        base.base.__init__(self, args)

        self.model = RNN_diachronic_model(emb_matrix, args, n_time)
        if self.cuda_enable:
            self.model.cuda()
        self.model_init = deepcopy(self.model.state_dict())

        self.optimizer = torch.optim.Adam(
            filter(lambda p: p.requires_grad, self.model.parameters()),
            lr=self.learning_rate,
            weight_decay=self.l2_reg
        )
        self._init_display()
github NUSTM / pytorch-dnnnlp / pytorch / exec.py View on Github external
Ubuntu 16.04 & PyTorch 1.0
Last update: KzXuan, 2019.04.09
"""
import torch
import numpy as np
import torch.nn as nn
import easy_function as ef
import torch.utils.data as Data
import torch.nn.functional as F
from copy import deepcopy
from dnn.pytorch import base, model
from step_print import table_print, percent
from predict_analysis import predict_analysis


class exec(base.base):
    def __init__(self, data_dict, args, class_name=None, device_id=0):
        """
        Initilize execution fuctions
        * data_dict [dict]: use key like 'x'/'vx'/'ty'/'lq' to store the data
        * args [dict]: all model arguments
        * class_name [list]: name of each class
        """
        base.base.__init__(self, args)

        self.data_dict = data_dict
        self.class_name = class_name
        self.device_id = device_id
        self.device = torch.device(device_id) if self.n_gpu and self.space_turbo else torch.device("cpu")
        self._init_display()

    def _model_to_cuda(self):
github NUSTM / pytorch-dnnnlp / pytorch / model.py View on Github external
* seq_len [tensor]: sequence length
        - pred [tensor]: predict of the model
        """
        if self.emb_type is not None:
            inputs = self.emb_mat(inputs.long())
        now_batch_size, max_seq_len, emb_dim = inputs.size()

        outputs = self.drop_out(inputs)
        seq_len = torch.reshape(seq_len, [-1])
        outputs = torch.cat([c(outputs, seq_len, out_type='max') for c in self.cnn], -1)

        pred = self.predict(outputs)
        return pred


class RNN_model(nn.Module, base.base):
    def __init__(self, emb_matrix, args, n_hierarchy=1, mode='classify'):
        """
        Initilize the model data and layer
        * emb_matrix [np.array]: word embedding matrix
        * args [dict]: all model arguments
        * mode [str]: use 'classify'/'sequence' to get the result
        """
        nn.Module.__init__(self)
        base.base.__init__(self, args)

        self.n_hierarchy = n_hierarchy
        self.mode = mode
        self.bi_direction_num = 2 if self.bi_direction else 1

        self.emb_mat = layer.embedding_layer(emb_matrix, self.emb_type)
        self.drop_out = nn.Dropout(self.drop_prob)
github NUSTM / pytorch-dnnnlp / pytorch / model.py View on Github external
def __init__(self, emb_matrix, args, kernel_widths):
        """
        Initilize the model data and layer
        * emb_matrix [np.array]: word embedding matrix
        * args [dict]: all model arguments
        * kernel_widths [list]: list of kernel widths for cnn kernel
        """
        nn.Module.__init__(self)
        base.base.__init__(self, args)

        self.emb_mat = layer.embedding_layer(emb_matrix, self.emb_type)
        self.drop_out = nn.Dropout(self.drop_prob)
        self.cnn = nn.ModuleList()
        for kw in kernel_widths:
            self.cnn.append(layer.CNN_layer(self.emb_dim, 1, self.n_hidden, kw))
        self.predict = layer.softmax_layer(self.n_hidden * len(kernel_widths), self.n_class)