How to use the nemo.core.EvaluatorCallback function in NEMO

To help you get started, we’ve selected a few NEMO examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github NVIDIA / NeMo / examples / asr / quartznet.py View on Github external
length=a_sig_length_e)
        encoded_e, encoded_len_e = encoder(
            audio_signal=processed_signal_e,
            length=p_length_e)
        log_probs_e = decoder(encoder_output=encoded_e)
        predictions_e = greedy_decoder(log_probs=log_probs_e)
        loss_e = ctc_loss(
            log_probs=log_probs_e,
            targets=transcript_e,
            input_length=encoded_len_e,
            target_length=transcript_len_e)

        # create corresponding eval callback
        tagname = os.path.basename(args.eval_datasets[i]).split(".")[0]

        eval_callback = nemo.core.EvaluatorCallback(
            eval_tensors=[loss_e, predictions_e,
                          transcript_e, transcript_len_e],
            user_iter_callback=partial(
                process_evaluation_batch,
                labels=vocab),
            user_epochs_done_callback=partial(
                process_evaluation_epoch,
                tag=tagname,
                logger=neural_factory.logger),
            eval_step=args.eval_freq,
            tb_writer=neural_factory.tb_writer)

        callbacks.append(eval_callback)

    return loss_t, callbacks, steps_per_epoch
github NVIDIA / NeMo / examples / tts / tacotron2.py View on Github external
mel_target=spec_target,
            target_len=spec_target_len)
        loss = t2_loss(
            mel_out=mel_decoder,
            mel_out_postnet=mel_postnet,
            gate_out=gate,
            mel_target=spec_target,
            gate_target=gate_target,
            target_len=spec_target_len,
            seq_len=audio_len)

        # create corresponding eval callback
        tagname = os.path.basename(eval_dataset).split(".")[0]
        eval_tensors = [loss, spec_target, mel_postnet, gate, gate_target,
                        alignments]
        eval_callback = nemo.core.EvaluatorCallback(
            eval_tensors=eval_tensors,
            user_iter_callback=tacotron2_process_eval_batch,
            user_epochs_done_callback=partial(
                tacotron2_process_final_eval,
                tag=tagname,
                logger=neural_factory.logger),
            tb_writer_func=partial(
                tacotron2_eval_log_to_tb_func,
                tag=tagname),
            eval_step=eval_freq,
            tb_writer=neural_factory.tb_writer)

        callbacks.append(eval_callback)
    return callbacks
github NVIDIA / NeMo / examples / asr / jasper.py View on Github external
input_signal=audio_signal_e,
            length=a_sig_length_e)
        encoded_e, encoded_len_e = jasper_encoder(
            audio_signal=processed_signal_e,
            length=p_length_e)
        log_probs_e = jasper_decoder(encoder_output=encoded_e)
        predictions_e = greedy_decoder(log_probs=log_probs_e)
        loss_e = ctc_loss(
            log_probs=log_probs_e,
            targets=transcript_e,
            input_length=encoded_len_e,
            target_length=transcript_len_e)

        # create corresponding eval callback
        tagname = os.path.basename(args.eval_datasets[i]).split(".")[0]
        eval_callback = nemo.core.EvaluatorCallback(
            eval_tensors=[loss_e, predictions_e,
                          transcript_e, transcript_len_e],
            user_iter_callback=partial(
                process_evaluation_batch,
                labels=vocab),
            user_epochs_done_callback=partial(
                process_evaluation_epoch,
                tag=tagname,
                logger=logger),
            eval_step=args.eval_freq,
            tb_writer=neural_factory.tb_writer)

        callbacks.append(eval_callback)
    return loss_t, callbacks, steps_per_epoch
github NVIDIA / NeMo / examples / asr / jasper_an4.py View on Github external
nf.logger.info(
        "Num of params in encoder: {0}".format(jasper_encoder.num_weights))

    # Callbacks to print info to console and Tensorboard
    train_callback = nemo.core.SimpleLossLoggerCallback(
        tensors=[loss, predictions, transcript, transcript_len],
        print_func=lambda x: monitor_asr_train_progress(x, labels=vocab),
        get_tb_values=lambda x: [["loss", x[0]]],
        tb_writer=tb_writer,
    )

    checkpointer_callback = nemo.core.CheckpointCallback(
        folder=checkpoint_dir, step_freq=args.checkpoint_save_freq)

    eval_tensors = [loss_e, predictions_e, transcript_e, transcript_len_e]
    eval_callback = nemo.core.EvaluatorCallback(
        eval_tensors=eval_tensors,
        user_iter_callback=lambda x, y: process_evaluation_batch(
            x, y, labels=vocab),
        user_epochs_done_callback=process_evaluation_epoch,
        eval_step=args.eval_freq,
        tb_writer=tb_writer)

    nf.train(
        tensors_to_optimize=[loss],
        callbacks=[train_callback, eval_callback, checkpointer_callback],
        optimizer=args.optimizer,
        lr_policy=CosineAnnealing(total_steps=total_steps),
        optimization_params={
            "num_epochs": args.num_epochs,
            "max_steps": args.max_steps,
            "lr": args.lr,
github NVIDIA / NeMo / examples / image / transfer_learning.py View on Github external
# NOTICE: unfreeze, top classification layer for fine-tuning
resnet.unfreeze(set(["fc.weight", "fc.bias"]))

images, labels = dl_train()
outputs = resnet(x=images)
train_loss = L_train(predictions=outputs, labels=labels)

e_images, e_labels = dl_eval()
e_outputs = resnet(x=e_images)
e_loss = L_eval(predictions=e_outputs, labels=e_labels)

callback = nemo.core.SimpleLossLoggerCallback(
    step_freq=20, tb_writer=tb_writer, tensor_list2str=lambda x: str(
        x[0].item()), tensor_list2str_evl=lambda x: compute_accuracy(x))

callback_eval = nemo.core.EvaluatorCallback(
    eval_tensors=[e_loss, e_outputs, e_labels],
    user_iter_callback=eval_iter_callback,
    user_epochs_done_callback=eval_epochs_done_callback,
    eval_step=30,
    tb_writer=tb_writer)


optimizer = neural_factory.get_trainer(
    params={
        "optimization_params": {
            "num_epochs": num_epochs,
            "lr": learning_rate,
            "max_steps": max_steps,
            "weight_decay": weight_decay,
            "momentum": momentum}})
github NVIDIA / NeMo / examples / nlp / lm_tutorial.py View on Github external
def print_loss(x):
    loss = str(x[0].item())
    print(f"Training loss: {loss}")


# callback which prints training loss once in a while
callback_train = nemo.core.SimpleLossLoggerCallback(
    tensors=[train_loss],
    step_freq=100,
    print_func=print_loss,
    get_tb_values=lambda x: [["loss", x[0]]],
    tb_writer=tb_writer)

# callback which calculates evaluation loss
callback_eval = nemo.core.EvaluatorCallback(
    eval_tensors=[eval_loss],
    user_iter_callback=eval_iter_callback,
    user_epochs_done_callback=eval_epochs_done_callback,
    eval_step=args.eval_freq,
    tb_writer=tb_writer)

# callback which saves checkpoints once in a while
callback_ckpt = nemo.core.CheckpointCallback(
    folder=args.checkpoint_dir,
    step_freq=args.checkpoint_save_freq,
    checkpoints_to_keep=-1)

# define learning rate decay policy
lr_policy = CosineAnnealing(args.max_steps, warmup_steps=args.warmup_steps)

# define and launch training algorithm (optimizer)
github NVIDIA / NeMo / examples / nlp / sentence_classification_with_bert.py View on Github external
eval_tensors, _, _, data_layer =\
    create_pipeline(num_samples=args.num_eval_samples,
                    batch_size=args.batch_size,
                    num_gpus=args.num_gpus,
                    local_rank=args.local_rank,
                    mode='eval')

# Create callbacks for train and eval modes
train_callback = nemo.core.SimpleLossLoggerCallback(
    tensors=train_tensors,
    print_func=lambda x: str(np.round(x[0].item(), 3)),
    tb_writer=nf.tb_writer,
    get_tb_values=lambda x: [["loss", x[0]]],
    step_freq=steps_per_epoch)

eval_callback = nemo.core.EvaluatorCallback(
    eval_tensors=eval_tensors,
    user_iter_callback=lambda x, y: eval_iter_callback(
        x, y, data_layer),
    user_epochs_done_callback=lambda x: eval_epochs_done_callback(
        x, f'{nf.work_dir}/graphs'),
    tb_writer=nf.tb_writer,
    eval_step=steps_per_epoch)

# Create callback to save checkpoints
ckpt_callback = nemo.core.CheckpointCallback(
    folder=nf.checkpoint_dir,
    epoch_freq=args.save_epoch_freq,
    step_freq=args.save_step_freq)

lr_policy_fn = get_lr_policy(args.lr_policy,
                             total_steps=args.num_epochs * steps_per_epoch,
github NVIDIA / NeMo / examples / nlp / glue_with_BERT.py View on Github external
eval_tensors=eval_tensors,
    user_iter_callback=lambda x, y: eval_iter_callback(x, y),
    user_epochs_done_callback=lambda x:
        eval_epochs_done_callback(x, args.work_dir, eval_task_names[0]),
    tb_writer=nf.tb_writer,
    eval_step=steps_per_epoch)]

"""
MNLI task has two dev sets: matched and mismatched
Create additional callback and data layer for MNLI mismatched dev set
"""
if args.task_name == 'mnli':
    _, _, eval_data_layer_mm, eval_tensors_mm = create_pipeline(
                                                evaluate=True,
                                                processor=task_processors[1])
    callbacks_eval.append(nemo.core.EvaluatorCallback(
        eval_tensors=eval_tensors_mm,
        user_iter_callback=lambda x, y: eval_iter_callback(x, y),
        user_epochs_done_callback=lambda x:
            eval_epochs_done_callback(x, args.work_dir, eval_task_names[1]),
        tb_writer=nf.tb_writer,
        eval_step=steps_per_epoch))

nf.logger.info(f"steps_per_epoch = {steps_per_epoch}")
callback_train = nemo.core.SimpleLossLoggerCallback(
    tensors=[train_loss],
    print_func=lambda x: print("Loss: {:.3f}".format(x[0].item())),
    get_tb_values=lambda x: [["loss", x[0]]],
    step_freq=args.loss_step_freq,
    tb_writer=nf.tb_writer)

ckpt_callback = nemo.core.CheckpointCallback(
github NVIDIA / NeMo / examples / asr / experimental / garnet_rnnlm.py View on Github external
dag_callbacks[0].tb_writer = tb_writer

    # Callbacks
    train_callback = nemo.core.SimpleLossLoggerCallback(
        tensors=[train_loss],
        print_func=lambda x: logger.info(f"Loss: {x[0].item()}"),
        get_tb_values=lambda x: [("loss", x[0])],
        tb_writer=tb_writer
    )
    log_callbacks = [train_callback]
    target = cfg['target']
    labels = target['labels']
    specials = {f'{ss.name}_id': target[f'{ss.name}_id'] for ss in sss}
    for name, tensors in evals:
        eval_callback = nemo.core.EvaluatorCallback(
            # TODO: Should be fixed soon, so we don't need to pass exactly list
            eval_tensors=list(tensors),
            user_iter_callback=partial(
                process_evaluation_batch,
                labels=labels,
                specials=specials,
                write_attn=False
            ),
            user_epochs_done_callback=partial(
                process_evaluation_epoch,
                tag=os.path.basename(name),
                logger=logger
            ),
            eval_step=args.eval_freq,
            tb_writer=tb_writer
        )
github NVIDIA / NeMo / examples / asr / experimental / garnet.py View on Github external
dag_callbacks[0].tb_writer = tb_writer
    dag_callbacks[1].tb_writer = tb_writer

    # Callbacks
    train_callback = nemo.core.SimpleLossLoggerCallback(
        tensors=[train_loss],
        print_func=lambda x: logger.info(f"Loss: {x[0].item()}"),
        get_tb_values=lambda x: [("loss", x[0])],
        tb_writer=tb_writer
    )
    log_callbacks = [train_callback]
    target = cfg['target']
    labels = target['labels']
    specials = {f'{ss.name}_id': target[f'{ss.name}_id'] for ss in sss}
    for name, tensors in evals:
        eval_callback = nemo.core.EvaluatorCallback(
            # TODO: Should be fixed soon, so we don't need to pass exactly list
            eval_tensors=list(tensors),
            user_iter_callback=partial(
                process_evaluation_batch,
                labels=labels,
                specials=specials,
                tb_writer=tb_writer,
                write_attn=False
            ),
            user_epochs_done_callback=partial(
                process_evaluation_epoch,
                tag=os.path.basename(name),
                calc_wer=True,
                logger=logger
            ),
            eval_step=args.eval_freq,