How to use the nemo.core function in NEMO

To help you get started, we’ve selected a few NEMO examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github NVIDIA / NeMo / examples / tts / waveglow.py View on Github external
batch_size=eval_batch_size,
            num_workers=cpu_per_dl,
            **eval_dl_params,
        )

        audio, audio_len, = data_layer_eval()
        spec_target, spec_target_len = data_preprocessor(
            input_signal=audio,
            length=audio_len)

        audio_pred, log_s_list, log_det_W_list = waveglow(
            mel_spectrogram=spec_target, audio=audio)

        # create corresponding eval callback
        tagname = os.path.basename(eval_dataset).split(".")[0]
        eval_callback = nemo.core.EvaluatorCallback(
            eval_tensors=[audio_pred, spec_target, spec_target_len],
            user_iter_callback=waveglow_process_eval_batch,
            user_epochs_done_callback=lambda x: x,
            tb_writer_func=partial(
                waveglow_eval_log_to_tb_func,
                tag=tagname,
                mel_fb=data_preprocessor.filter_banks),
            eval_step=eval_freq,
            tb_writer=neural_factory.tb_writer)

        callbacks.append(eval_callback)
    return callbacks
github NVIDIA / NeMo / examples / start_here / simplest_example.py View on Github external
nf = nemo.core.NeuralModuleFactory()

# instantiate necessary neural modules
# RealFunctionDataLayer defaults to f=torch.sin, sampling from x=[-4, 4]
dl = nemo.tutorials.RealFunctionDataLayer(
    n=10000, batch_size=128)
fx = nemo.tutorials.TaylorNet(dim=4)
loss = nemo.tutorials.MSELoss()

# describe activation's flow
x, y = dl()
p = fx(x=x)
lss = loss(predictions=p, target=y)

# SimpleLossLoggerCallback will print loss values to console.
callback = nemo.core.SimpleLossLoggerCallback(
    tensors=[lss],
    print_func=lambda x: print(f'Train Loss: {str(x[0].item())}'))

# Invoke "train" action
nf.train([lss], callbacks=[callback],
         optimization_params={"num_epochs": 3, "lr": 0.0003},
         optimizer="sgd")
github NVIDIA / NeMo / collections / nemo_cv / nemo_cv / examples / mnist_lenet5.py View on Github external
import math
import torch

import nemo
import torch

from nemo.core import NeuralType, DeviceType

from nemo_cv.modules.mnist_datalayer import MNISTDataLayer
from nemo_cv.modules.lenet5 import LeNet5
from nemo_cv.modules.nll_loss import NLLLoss


# 0. Instantiate Neural Factory with supported backend
nf = nemo.core.NeuralModuleFactory(placement=DeviceType.CPU)

# 1. Instantiate necessary neural modules
dl = MNISTDataLayer(
    batch_size=64,
    root="~/data/mnist",
    train=True,
    shuffle=True
)

lenet5 = LeNet5()

nll_loss = NLLLoss()

# 2. Describe activation's flow
x, y = dl()
p = lenet5(images=x)
github NVIDIA / NeMo / examples / asr / experimental / garnet.py View on Github external
def main():
    # Parse args
    args = parse_args()
    cfg = parse_cfg(args)
    name = construct_name(args, cfg)
    # instantiate Neural Factory with supported backend
    neural_factory = nemo.core.NeuralModuleFactory(
        backend=nemo.core.Backend.PyTorch,
        local_rank=args.local_rank,
        optimization_level=args.amp_opt_level,
        log_dir=name,
        checkpoint_dir=args.checkpoint_dir,
        create_tb_writer=args.create_tb_writer,
        files_to_copy=[args.model_config, __file__],
        cudnn_benchmark=args.cudnn_benchmark,
        tensorboard_dir=args.tensorboard_dir)

    logger = neural_factory.logger
    tb_writer = neural_factory.tb_writer
    args.checkpoint_dir = neural_factory.checkpoint_dir

    logger.info(f'Name:\n{name}')
    logger.info(f'Args to be passed to job #{args.local_rank}:')
github NVIDIA / NeMo / examples / nlp / lm_tutorial.py View on Github external
parser.add_argument("--label_smoothing", default=0.1, type=float)
parser.add_argument("--beam_size", default=4, type=int)
parser.add_argument("--tokenizer_model", default="vocab.txt", type=str)
parser.add_argument("--predict_last_k", default=16, type=int)
parser.add_argument("--interactive", action="store_true")
args = parser.parse_args()

# create TensorboardX logger to log training statistics
name = f"transformer-lm-lr_{args.lr}-optim_{args.optimizer}-" \
    f"warmup_{args.warmup_steps}-bs_{args.batch_size}"
tb_writer = None  # SummaryWriter(name)

# instantiate Neural Factory with supported backend
device = nemo.core.DeviceType.AllGpu if args.local_rank is not None \
    else nemo.core.DeviceType.GPU
neural_factory = nemo.core.NeuralModuleFactory(
    backend=nemo.core.Backend.PyTorch,
    local_rank=args.local_rank,
    optimization_level=nemo.core.Optimization.mxprO2,
    placement=device)

# define tokenizer, in this example we use word-level tokenizer
# we also adjust the vocabulary size to make it multiple of 8 to accelerate
# training in fp16 mode with the use of Tensor Cores
tokenizer = nemo_nlp.WordTokenizer(f"{args.data_root}/{args.tokenizer_model}")
vocab_size = 8 * math.ceil(tokenizer.vocab_size / 8)

# instantiate necessary modules for the whole translation pipeline, namely
# data layers, encoder, decoder, output log_softmax, beam_search_translator
# and loss function
train_data_layer = nemo_nlp.LanguageModelingDataLayer(
    factory=neural_factory,
github NVIDIA / NeMo / examples / nlp / joint_intent_slot_infer.py View on Github external
parser.add_argument("--dataset_name", default='snips-all', type=str)
parser.add_argument("--data_dir", default='data/nlu/snips', type=str)
parser.add_argument("--work_dir",
                    default='outputs/SNIPS-ALL/20191014-104316/checkpoints',
                    type=str)
parser.add_argument("--eval_file_prefix", default='test', type=str)
parser.add_argument("--amp_opt_level", default="O0",
                    type=str, choices=["O0", "O1", "O2"])
parser.add_argument("--do_lower_case", action='store_false')

args = parser.parse_args()

if not os.path.exists(args.data_dir):
    raise ValueError(f'Data not found at {args.data_dir}')

nf = nemo.core.NeuralModuleFactory(backend=nemo.core.Backend.PyTorch,
                                   local_rank=args.local_rank,
                                   optimization_level=args.amp_opt_level,
                                   log_dir=None)

""" Load the pretrained BERT parameters
See the list of pretrained models, call:
nemo_nlp.huggingface.BERT.list_pretrained_models()
"""
pretrained_bert_model = nemo_nlp.huggingface.BERT(
    pretrained_model_name=args.pretrained_bert_model)
hidden_size = pretrained_bert_model.local_parameters["hidden_size"]
tokenizer = BertTokenizer.from_pretrained(args.pretrained_bert_model)


data_desc = JointIntentSlotDataDesc(args.data_dir,
                                    args.do_lower_case,