How to use the deeppavlov.core.common.log.get_logger function in deeppavlov

To help you get started, we’ve selected a few deeppavlov examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github deepmipt / DeepPavlov / utils / server_utils / server.py View on Github external
from flasgger import Swagger, swag_from
from flask import Flask, request, jsonify, redirect, Response
from flask_cors import CORS

from deeppavlov.core.commands.infer import build_model
from deeppavlov.core.commands.utils import parse_config
from deeppavlov.core.common.chainer import Chainer
from deeppavlov.core.common.file import read_json
from deeppavlov.core.common.log import get_logger
from deeppavlov.core.common.paths import get_settings_path
from deeppavlov.core.agent.dialog_logger import DialogLogger
from deeppavlov.core.data.utils import check_nested_dict_keys, jsonify_data

SERVER_CONFIG_FILENAME = 'server_config.json'

log = get_logger(__name__)

app = Flask(__name__)
Swagger(app)
CORS(app)

dialog_logger = DialogLogger(agent_name='dp_api')


def get_server_params(server_config_path, model_config):
    server_config = read_json(server_config_path)
    model_config = parse_config(model_config)

    server_params = server_config['common_defaults']

    if check_nested_dict_keys(model_config, ['metadata', 'labels', 'server_utils']):
        model_tag = model_config['metadata']['labels']['server_utils']
github deepmipt / DeepPavlov / deeppavlov / models / preprocessors / personachat_preprocessor.py View on Github external
import pickle

from nltk import word_tokenize
import numpy as np
from tqdm import tqdm

from deeppavlov.core.common.log import get_logger
from deeppavlov.core.common.registry import register
from deeppavlov.core.commands.utils import expand_path
from deeppavlov.core.data.utils import download
from deeppavlov.core.models.component import Component
from deeppavlov.core.models.estimator import Estimator

from deeppavlov.core.common.metrics_registry import register_metric

logger = get_logger(__name__)


def map_fn(fn, obj):
    if isinstance(obj, list) or isinstance(obj, tuple):
        return [map_fn(fn, o) for o in obj]
    else:
        return fn(obj)


def get_shape(obj):
    if isinstance(obj, list) or isinstance(obj, tuple):
        return (len(obj), *get_shape(obj[0]))
    else:
        return ()
github deepmipt / DeepPavlov / deeppavlov / models / kg_ranker / kg_manager.py View on Github external
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""

import random
import collections
import numpy as np

from deeppavlov.core.models.component import Component
from deeppavlov.core.common.registry import register
from deeppavlov.core.common.log import get_logger


log = get_logger(__name__)


@register('kg_manager')
class KudaGoDialogueManager(Component):
    def __init__(self, cluster_policy, n_top, min_num_events, max_num_filled_slots,
                 *args, **kwargs):
        self.cluster_policy = cluster_policy
        self.num_top = n_top
        self.min_num_events = min_num_events
        self.max_num_filled_slots = max_num_filled_slots
        self.thanks = ['спасибо', 'спс', 'пока', 'до свидания']

    def __call__(self, events, slots, utter_history):
        messages, out_events, new_slots, cluster_ids = [], [], [], []
        for events, slots, utter_history in zip(events, slots, utter_history):
            m = ""
github deepmipt / DeepPavlov / deeppavlov / models / classifiers / cos_sim_classifier.py View on Github external
from typing import List, Tuple, Union

import numpy as np
from scipy.sparse.linalg import norm as sparse_norm
from scipy.sparse import vstack
from scipy.sparse import csr_matrix

from deeppavlov.core.common.registry import register
from deeppavlov.core.common.log import get_logger
from deeppavlov.core.models.estimator import Estimator
from deeppavlov.core.common.file import save_pickle
from deeppavlov.core.common.file import load_pickle
from deeppavlov.core.commands.utils import expand_path, make_all_dirs
from deeppavlov.core.models.serializable import Serializable

logger = get_logger(__name__)


@register("cos_sim_classifier")
class CosineSimilarityClassifier(Estimator, Serializable):
    """
    Classifier based on cosine similarity between vectorized sentences

    Parameters:
        save_path: path to save the model
        load_path: path to load the model

    Returns:
        None
    """

    def __init__(self, top_n: int = 1, save_path: str = None, load_path: str = None, **kwargs) -> None:
github deepmipt / DeepPavlov / deeppavlov / skills / odqa / basic_neural_ranker.py View on Github external
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Union, List
import copy
import tensorflow as tf
import tensorflow_hub as hub
from nltk.tokenize import sent_tokenize

from deeppavlov.core.common.registry import register
from deeppavlov.core.models.tf_model import TFModel
from deeppavlov.core.common.log import get_logger

logger = get_logger(__name__)


@register('ranker_encoder')
class BasicNeuralRankerEncoder(TFModel):
    def __init__(self, save_path=None, load_path=None, **kwargs):
        self.hidden_size_dense_1 = 300
        self.hidden_size_dense_2 = 300
        self.hidden_size_dense_3 = 512
        self.learning_rate = 0.01
        self.question_pad_size = 3
        self.context_pad_size = 40
        self.emb_size = 512
        self.n_epochs = 1000

        # self.mode = kwargs.get('mode', None)
github deepmipt / DeepPavlov / deeppavlov / models / evolution / evolution_many_inputs_model.py View on Github external
from deeppavlov.core.models.keras_model import KerasModel
from deeppavlov.models.classifiers.intents.intent_model import KerasIntentModel
from deeppavlov.models.classifiers.intents.utils import labels2onehot, log_metrics, proba2labels
from deeppavlov.models.embedders.fasttext_embedder import FasttextEmbedder
from deeppavlov.models.classifiers.intents.utils import md5_hashsum
from deeppavlov.models.tokenizers.nltk_tokenizer import NLTKTokenizer
from deeppavlov.core.common.log import get_logger
from deeppavlov.models.evolution.check_binary_mask import number_to_type_layer, \
    find_sources_and_sinks, get_digraph_from_binary_mask, get_graph_and_plot
from deeppavlov.models.evolution.utils import expand_tile
from deeppavlov.core.common.file import save_json, read_json
from deeppavlov.core.layers.keras_layers import multiplicative_self_attention_init, \
    multiplicative_self_attention_get_output


log = get_logger(__name__)


@register('evolution_many_inputs_classification_model')
class KerasEvolutionClassificationManyInputsModel(KerasIntentModel):

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.opt["binary_mask"] = np.array(self.opt["binary_mask"])
        get_graph_and_plot(self.opt["nodes"], self.opt["binary_mask"], self.opt["n_types"],
                           path=str(self.save_path.resolve().parent))

    def texts2vec(self, sentences, i):
        """
        Convert texts to vector representations using embedder and padding up to self.opt["text_size"] tokens
        Args:
            sentences: list of lists of tokens
github deepmipt / DeepPavlov / deeppavlov / models / ranking / matching_models / matching_predictor.py View on Github external
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np
from typing import List, Iterable

from deeppavlov.core.common.log import get_logger
from deeppavlov.core.models.component import Component
from deeppavlov.models.ranking.matching_models.tf_base_matching_model import TensorflowBaseMatchingModel
from deeppavlov.core.common.registry import register
from deeppavlov.core.data.utils import zero_pad_truncate

log = get_logger(__name__)


@register('matching_predictor')
class MatchingPredictor(Component):
    """The class for ranking of the response given N context turns
    using the trained SMN or DAM neural network in the ``interact`` mode.

    Args:
        num_context_turns (int): A number N of ``context`` turns in data samples.
        max_sequence_length (int): A maximum length of text sequences in tokens.
            Longer sequences will be truncated and shorter ones will be padded.
        *args, **kwargs: Other parameters.
    """

    def __init__(self,
                 model: TensorflowBaseMatchingModel,
github deepmipt / DeepPavlov / deeppavlov / models / ranking / ranking_dict.py View on Github external
from abc import ABCMeta, abstractmethod
import numpy as np
from deeppavlov.core.commands.utils import expand_path
from keras.preprocessing.sequence import pad_sequences
from deeppavlov.core.common.log import get_logger


log = get_logger(__name__)


class RankingDict(metaclass=ABCMeta):

    def __init__(self, save_path, load_path,
                 max_sequence_length, padding, truncating):

        self.max_sequence_length = max_sequence_length
        self.padding = padding
        self.truncating = truncating

        save_path = expand_path(save_path).resolve().parent
        load_path = expand_path(load_path).resolve().parent

        self.tok_save_path = save_path / "tok2int.dict"
        self.tok_load_path = load_path / "tok2int.dict"
github deepmipt / DeepPavlov / deeppavlov / models / state_tracker / network.py View on Github external
# limitations under the License.

import json
import tensorflow as tf
from tensorflow.contrib.layers import xavier_initializer as xav
import numpy as np
from time import time
from typing import Tuple

from deeppavlov.core.common.registry import register
from deeppavlov.core.common.errors import ConfigError
from deeppavlov.core.models.tf_model import EnhancedTFModel
from deeppavlov.core.common.log import get_logger


log = get_logger(__name__)


@register("dst_network")
class StateTrackerNetwork(EnhancedTFModel):
    """
    Parameters:
        hidden_size: RNN hidden layer size.
        dense_sizes:
        num_slot_values:
        embedding_matrix:
        **kwargs: parameters passed to a parent
                  :class:`~deeppavlov.core.models.tf_model.TFModel` class.
    """

    GRAPH_PARAMS = ['hidden_size', 'dense_sizes', 'embedding_size',
                    'num_user_actions', 'num_system_actions', 'num_slot_values']
github deepmipt / DeepPavlov / deeppavlov / dataset_readers / google_dial_reader.py View on Github external
import json
from pathlib import Path
from typing import Dict, List, Tuple
from abc import abstractmethod

from overrides import overrides

from deeppavlov.core.common.registry import register
from deeppavlov.core.data.dataset_reader import DatasetReader
from deeppavlov.core.data.utils import download_decompress, mark_done
from deeppavlov.core.common.log import get_logger


log = get_logger(__name__)


class GoogleDialogsDatasetReader(DatasetReader):

    url = 'http://files.deeppavlov.ai/datasets/google_simulated_dialogues.tar.gz'

    @staticmethod
    @abstractmethod
    def _data_fname(datatype):
        pass

    @classmethod
    @overrides
    def read(self, data_path: str, mode: str = "basic") -> Dict[str, List]:
        """
        Parameters: