How to use the nemo.backends.pytorch.nm.TrainableNM.__init__ function in NEMO

To help you get started, we’ve selected a few NEMO examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github NVIDIA / NeMo / collections / nemo_nlp / nemo_nlp / bert.py View on Github external
def __init__(self,
                 hidden_size,
                 num_intents,
                 num_slots,
                 dropout,
                 **kwargs):
        TrainableNM.__init__(self, **kwargs)
        self.hidden_size = hidden_size
        self.num_intents = num_intents
        self.num_slots = num_slots
        self.dropout = nn.Dropout(dropout)
        self.intent_dense = nn.Linear(self.hidden_size, self.hidden_size)
        self.intent_classifier = nn.Linear(self.hidden_size, self.num_intents)
        self.slot_dense = nn.Linear(self.hidden_size, self.hidden_size)
        self.slot_classifier = nn.Linear(self.hidden_size, self.num_slots)
        self.apply(
            lambda module: transformer_weights_init(module, xavier=False))
        self.to(self._device)
github NVIDIA / NeMo / nemo / nemo / backends / pytorch / common / other.py View on Github external
def __init__(self, *, from_dim, to_dim, dropout=0.0, **kwargs):
        TrainableNM.__init__(self, **kwargs)

        self.from_dim = from_dim
        self.to_dim = to_dim
        self.dropout = dropout
        self.projection = nn.Linear(self.from_dim, self.to_dim, bias=False)
        if self.dropout != 0.0:
            self.embedding_dropout = nn.Dropout(self.dropout)
github NVIDIA / NeMo / collections / nemo_nlp / nemo_nlp / bert.py View on Github external
def __init__(self, *, d_model, num_classes, **kwargs):
        TrainableNM.__init__(self, **kwargs)

        self.log_softmax = ClassificationLogSoftmax(
            hidden_size=d_model,
            num_classes=num_classes
        )

        self.log_softmax.apply(transformer_weights_init)
        self.log_softmax.to(self._device)
github NVIDIA / NeMo / collections / nemo_nlp / nemo_nlp / huggingface / bert.py View on Github external
def __init__(self, *,
                 pretrained_model_name=None,
                 config_filename=None,
                 vocab_size=None,
                 hidden_size=768,
                 num_hidden_layers=12,
                 num_attention_heads=12,
                 intermediate_size=3072,
                 hidden_act="gelu",
                 max_position_embeddings=512,
                 **kwargs):
        TrainableNM.__init__(self, **kwargs)

        # Check that only one of pretrained_model_name, config_filename, and
        # vocab_size was passed in
        total = 0
        if pretrained_model_name is not None:
            total += 1
        if config_filename is not None:
            total += 1
        if vocab_size is not None:
            total += 1

        if total != 1:
            raise ValueError("Only one of pretrained_model_name, vocab_size, "
                             + "or config_filename should be passed into the "
                             + "BERT constructor.")
github NVIDIA / NeMo / nemo / nemo / backends / pytorch / common / other.py View on Github external
def __init__(self, *, voc_size, hidden_size, dropout=0.0, **kwargs):
        TrainableNM.__init__(self, **kwargs)

        self.voc_size = voc_size
        self.hidden_size = hidden_size
        self.dropout = dropout
        self.embedding = nn.Embedding(self.voc_size, self.hidden_size)
        if self.dropout != 0.0:
            self.embedding_dropout = nn.Dropout(self.dropout)
github NVIDIA / NeMo / collections / nemo_nlp / nemo_nlp / bert.py View on Github external
def __init__(self, **kwargs):
        TrainableNM.__init__(self, **kwargs)
github NVIDIA / NeMo / collections / nemo_nlp / nemo_nlp / transformer_nm.py View on Github external
def __init__(self, *, vocab_size, d_model, **kwargs):
        TrainableNM.__init__(self, **kwargs)

        self.log_softmax = TransformerLogSoftmax(
            vocab_size=vocab_size,
            hidden_size=d_model)

        self.log_softmax.apply(transformer_weights_init)
        self.log_softmax.to(self._device)
github NVIDIA / NeMo / collections / nemo_asr / nemo_asr / greedy_ctc_decoder.py View on Github external
def __init__(self, **kwargs):
        TrainableNM.__init__(self, **kwargs)
github NVIDIA / NeMo / collections / nemo_nlp / nemo_nlp / modules / transformer_nm.py View on Github external
def __init__(self,
                 vocab_size,
                 d_model,
                 d_inner,
                 num_layers,
                 max_seq_length,
                 num_attn_heads,
                 ffn_dropout=0.0,
                 embedding_dropout=0.0,
                 attn_score_dropout=0.0,
                 attn_layer_dropout=0.0,
                 learn_positional_encodings=False,
                 hidden_act='relu',
                 **kwargs):
        TrainableNM.__init__(self, **kwargs)

        self.embedding_layer = TransformerEmbedding(
            vocab_size=vocab_size,
            hidden_size=d_model,
            max_sequence_length=max_seq_length,
            embedding_dropout=embedding_dropout,
            learn_positional_encodings=learn_positional_encodings
        )
        self.decoder = TransformerDecoder(
            num_layers=num_layers,
            hidden_size=d_model,
            num_attention_heads=num_attn_heads,
            inner_size=d_inner,
            ffn_dropout=ffn_dropout,
            hidden_act=hidden_act,
            attn_score_dropout=attn_score_dropout,