How to use the allennlp.models.model.Model.register function in allennlp

To help you get started, we’ve selected a few allennlp examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github allenai / scibert / scibert / models / text_classifier.py View on Github external
from typing import Dict, Optional, List, Any

import torch
import torch.nn.functional as F
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, TextFieldEmbedder, Seq2SeqEncoder
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy, F1Measure
from overrides import overrides


@Model.register("text_classifier")
class TextClassifier(Model):
    """
    Implements a basic text classifier:
    1) Embed tokens using `text_field_embedder`
    2) Seq2SeqEncoder, e.g. BiLSTM
    3) Append the first and last encoder states
    4) Final feedforward layer

    Optimized with CrossEntropyLoss.  Evaluated with CategoricalAccuracy & F1.
    """
    def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 text_encoder: Seq2SeqEncoder,
                 classifier_feedforward: FeedForward,
                 verbose_metrics: False,
                 initializer: InitializerApplicator = InitializerApplicator(),
github raylin1000 / drop-bert / drop_bert / augmented_bert_templated_old.py View on Github external
import torch

from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.models.reading_comprehension.util import get_best_span
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import masked_softmax
from allennlp.training.metrics.drop_em_and_f1 import DropEmAndF1
from pytorch_pretrained_bert import BertModel, BertTokenizer
import pickle

from drop_bert.nhelpers import tokenlist_to_passage, beam_search, evaluate_postfix

logger = logging.getLogger(__name__)

@Model.register("nabertT")
class NumericallyAugmentedBERTT(Model):
    """
    This class augments BERT with some rudimentary numerical reasoning abilities. This is based on
    NAQANet, as published in the original DROP paper. The code is based on the AllenNLP 
    implementation of NAQANet
    """
    def __init__(self, 
                 vocab: Vocabulary, 
                 bert_pretrained_model: str, 
                 dropout_prob: float = 0.1, 
                 max_count: int = 10,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None,
                 answering_abilities: List[str] = None,
                 number_rep: str = 'first',
                 special_numbers : List[int] = None) -> None:
github raylin1000 / drop-bert / drop_bert / augmented_bert_templated.py View on Github external
import torch

from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.models.reading_comprehension.util import get_best_span
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import masked_softmax
from allennlp.training.metrics.drop_em_and_f1 import DropEmAndF1
from pytorch_pretrained_bert import BertModel, BertTokenizer
import pickle

from drop_bert.nhelpers import tokenlist_to_passage, beam_search, evaluate_postfix

logger = logging.getLogger(__name__)

@Model.register("nabert+T")
class NumericallyAugmentedBERTT(Model):
    """
    This class augments BERT with some rudimentary numerical reasoning abilities. This is based on
    NAQANet, as published in the original DROP paper. The code is based on the AllenNLP 
    implementation of NAQANet
    """
    def __init__(self, 
                 vocab: Vocabulary, 
                 bert_pretrained_model: str, 
                 dropout_prob: float = 0.1, 
                 max_count: int = 10,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None,
                 answering_abilities: List[str] = None,
                 number_rep: str = 'first',
                 special_numbers : List[int] = None) -> None:
github raylin1000 / drop-bert / drop_bert / augmented_bert_plus.py View on Github external
import torch

from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.models.reading_comprehension.util import get_best_span
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import masked_softmax
from allennlp.training.metrics.drop_em_and_f1 import DropEmAndF1
from pytorch_pretrained_bert import BertModel, BertTokenizer
import pickle

from drop_bert.nhelpers import tokenlist_to_passage, beam_search, evaluate_postfix

logger = logging.getLogger(__name__)

@Model.register("nabert+")
class NumericallyAugmentedBERTPlus(Model):
    """
    This class augments BERT with some rudimentary numerical reasoning abilities. This is based on
    NAQANet, as published in the original DROP paper. The code is based on the AllenNLP 
    implementation of NAQANet
    """
    def __init__(self, 
                 vocab: Vocabulary, 
                 bert_pretrained_model: str, 
                 dropout_prob: float = 0.1, 
                 max_count: int = 10,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None,
                 answering_abilities: List[str] = None,
                 number_rep: str = 'first',
                 arithmetic: str = 'base',
github allenai / vampire / vae / models / baselines / seq2seq_classifier.py View on Github external
from typing import Any, Dict, List, Optional

import torch
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Seq2SeqEncoder, TextFieldEmbedder
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import (get_final_encoder_states, get_text_field_mask,
                              masked_max, masked_mean)
from allennlp.training.metrics import CategoricalAccuracy

from vae.models.classifier import Classifier


@Classifier.register("seq2seq_classifier")
@Model.register("seq2seq_classifier")
class Seq2SeqClassifier(Classifier):
    """
    This ``Model`` implements a classifier with a seq2seq encoder of text.

    See allennlp.modules.seq2seq_encoders for available encoders.

    Parameters
    ----------
    vocab : ``Vocabulary``
    input_embedder : ``TextFieldEmbedder``
        Used to embed the ``TextField`` we get as input to the model.
    encoder : ``Seq2SeqEncoder``
        Used to encode the text
    classification_layer : ``FeedForward``
        This feedforward network computes the output logits.
    dropout : ``float``, optional (default=0.5)
github allenai / allennlp / allennlp / models / bert_for_classification.py View on Github external
from typing import Dict, Union, Optional

from overrides import overrides
import torch
from pytorch_pretrained_bert.modeling import BertModel

from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules.token_embedders.bert_token_embedder import PretrainedBertModel
from allennlp.nn.initializers import InitializerApplicator
from allennlp.nn import RegularizerApplicator
from allennlp.training.metrics import CategoricalAccuracy


@Model.register("bert_for_classification")
class BertForClassification(Model):
    """
    An AllenNLP Model that runs pretrained BERT,
    takes the pooled output, and adds a Linear layer on top.
    If you want an easy way to use BERT for classification, this is it.
    Note that this is a somewhat non-AllenNLP-ish model architecture,
    in that it essentially requires you to use the "bert-pretrained"
    token indexer, rather than configuring whatever indexing scheme you like.

    See `allennlp/tests/fixtures/bert/bert_for_classification.jsonnet`
    for an example of what your config might look like.

    Parameters
    ----------
    vocab : ``Vocabulary``
    bert_model : ``Union[str, BertModel]``
github allenai / vampire / vampire / models / classifier.py View on Github external
from typing import Dict, List

import torch
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import TextFieldEmbedder, FeedForward
from allennlp.nn import InitializerApplicator
from allennlp.nn.util import get_text_field_mask
from allennlp.training.metrics import CategoricalAccuracy

from vampire.modules.encoder import Encoder

@Model.register("classifier")
class Classifier(Model):

    def __init__(self,
                 vocab: Vocabulary,
                 input_embedder: TextFieldEmbedder,
                 encoder: Encoder,
                 output_layer: FeedForward,
                 dropout: float = None,
                 initializer: InitializerApplicator = InitializerApplicator()
                ) -> None:
        super().__init__(vocab)
        self._input_embedder = input_embedder
        if dropout:
            self._dropout = torch.nn.Dropout(dropout)
        else:
            self._dropout = None
github plasticityai / magnitude / pymagnitude / third_party / allennlp / models / reading_comprehension / bidaf.py View on Github external
for b in range(batch_size):  # pylint: disable=invalid-name
            for j in range(passage_length):
                val1 = span_start_logits[b, span_start_argmax[b]]
                if val1 < span_start_logits[b, j]:
                    span_start_argmax[b] = j
                    val1 = span_start_logits[b, j]

                val2 = span_end_logits[b, j]

                if val1 + val2 > max_span_log_prob[b]:
                    best_word_span[b, 0] = span_start_argmax[b]
                    best_word_span[b, 1] = j
                    max_span_log_prob[b] = val1 + val2
        return best_word_span

BidirectionalAttentionFlow = Model.register(u"bidaf")(BidirectionalAttentionFlow)
github huggingface / hmtl / hmtl / models / layerNerEmdCoref.py View on Github external
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder
from allennlp.nn import RegularizerApplicator, InitializerApplicator
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules import FeedForward
from allennlp.models.crf_tagger import CrfTagger

from hmtl.modules.text_field_embedders import ShortcutConnectTextFieldEmbedder
from hmtl.models import CoreferenceCustom

logger = logging.getLogger(__name__)  # pylint: disable=invalid-name


@Model.register("ner_emd_coref")
class LayerNerEmdCoref(Model):
    """
    A class that implement three tasks of HMTL model: NER (CRF Tagger), EMD (CRF Tagger) and Coreference Resolution.
    
    Parameters
    ----------
    vocab: ``allennlp.data.Vocabulary``, required.
        The vocabulary fitted on the data.
    params: ``allennlp.common.Params``, required
        Configuration parameters for the multi-task model.
    regularizer: ``allennlp.nn.RegularizerApplicator``, optional (default = None)
        A reguralizer to apply to the model's layers.
    """

    def __init__(self, vocab: Vocabulary, params: Params, regularizer: RegularizerApplicator = None):
github XinnuoXu / CVAE_Dial / coherence / models / dialogue_context_coherence_attention_classifier.py View on Github external
from overrides import overrides

from allennlp.common import Params
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Seq2VecEncoder, \
    TextFieldEmbedder, Seq2SeqEncoder, SimilarityFunction, \
    TimeDistributed, MatrixAttention
from allennlp.nn import InitializerApplicator, RegularizerApplicator

from allennlp.nn.util import get_text_field_mask, last_dim_softmax, weighted_sum
from allennlp.training.metrics import CategoricalAccuracy


@Model.register("dialogue_context_coherence_attention_classifier")
class DialogueContextCoherenceAttentionClassifier(Model):

    def __init__(self,
                 vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 attend_feedforward: FeedForward,
                 similarity_function: SimilarityFunction,
                 compare_feedforward: FeedForward,
                 classifier_feedforward: FeedForward,
                 context_encoder: Optional[Seq2SeqEncoder] = None,
                 response_encoder: Optional[Seq2SeqEncoder] = None,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super(DialogueContextCoherenceAttentionClassifier, self).__init__(vocab, regularizer)

        self.text_field_embedder = text_field_embedder