How to use the fairseq.options function in fairseq

To help you get started, we’ve selected a few fairseq examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github microsoft / MASS / MASS-fairseq / fairseq / tasks / xmass_seq2seq.py View on Github external
def prepare(cls, args, **kwargs):
        args.left_pad_source = options.eval_bool(args.left_pad_source)
        args.left_pad_target = options.eval_bool(args.left_pad_target)
        args.lang_tok        = options.eval_bool(args.lang_tok)
        s = args.word_mask_keep_rand.split(',')
        s = [float(x) for x in s]
        setattr(args, 'word_mask', s[0])
        setattr(args, 'word_rand', s[1])
        setattr(args, 'word_keep', s[2])
        setattr(args, 'pred_probs', torch.FloatTensor([s[0], s[1], s[2]]))
        
        args.langs = sorted(args.langs.split(','))
        args.source_langs = sorted(args.source_langs.split(','))
        args.target_langs = sorted(args.target_langs.split(','))

        for lang in args.source_langs:
            assert lang in args.langs
        for lang in args.target_langs:
            assert lang in args.langs
github ecchochan / roberta-squad / fairseq_train_embed_cn.py View on Github external
def cli_main():
    parser = options.get_training_parser()
##############################################################################
##############################################################################
####
####   Added an argument
####
##############################################################################
##############################################################################
    parser.add_argument('--lr_decay', default=1, type=float, 
                        help='Learning rate decay factor, 1.0 = no decay')
    parser.add_argument('--lr_decay_layers', default=24, type=int, 
                        help='Number of layers for learning rate decay')
    parser.add_argument('--freeze_transformer', dest='freeze_transformer', action='store_true',
                        help='Whether to freeze the weights in transformer')
    args = options.parse_args_and_arch(parser)

    if args.distributed_init_method is None:
github facebookresearch / ParlAI / parlai / agents / fairseq / fairseq.py View on Github external
default=1,
            type=int,
            metavar='N',
            help='pseudo random number generator seed',
        )
        agent.add_argument(
            '--skip-generation',
            default=False,
            type='bool',
            metavar='BOOL',
            help='Skips test time beam search. Much faster if you only need PPL',
        )

        # Check subargs for generation, optimizers, criterions, archs, etc
        options.add_generation_args(argparser)
        options.add_optimization_args(argparser)
        options.add_checkpoint_args(argparser)

        # restore any user set defaults that fairseq possibly overrode
        argparser.set_defaults(**old_defaults)
        known_args = argparser.parse_known_args(nohelp=True)[0]

        if hasattr(known_args, "optimizer"):
            optimizer = known_args.optimizer
            opt_group = argparser.add_argument_group(
                '{} optimizer arguments'.format(optimizer)
            )
            optim.OPTIMIZER_REGISTRY[optimizer].add_args(opt_group)
        if hasattr(known_args, "lr_scheduler"):
            lr_scheduler = known_args.lr_scheduler
            lr_group = argparser.add_argument_group(
                '{} scheduler arguments'.format(lr_scheduler)
github shibing624 / pycorrector / pycorrector / conv_seq2seq / infer.py View on Github external
def infer(model_path, vocab_dir, arch, test_data, max_len, temperature):
    parser = options.get_generation_parser(interactive=True)
    parser.set_defaults(arch=arch,
                        input=test_data,
                        max_tokens=max_len,
                        temperature=temperature,
                        path=model_path)
    args = options.parse_args_and_arch(parser, input_args=[vocab_dir])
    return interactive.main(args)
github pytorch / translate / pytorch_translate / research / knowledge_distillation / collect_top_k_probs.py View on Github external
def main():
    parser = get_parser_with_args()
    args = options.parse_args_and_arch(parser)
    save_top_k(args)
github zhiqwang / sightseq / sightseq / models / text_recognition_attn.py View on Github external
encoder = TextRecognitionEncoder(
            args=args,
        )
        decoder = LSTMDecoder(
            dictionary=task.target_dictionary,
            embed_dim=encoder.embed_dim,
            hidden_size=args.decoder_hidden_size,
            out_embed_dim=args.decoder_out_embed_dim,
            num_layers=args.decoder_layers,
            dropout_in=args.decoder_dropout_in,
            dropout_out=args.decoder_dropout_out,
            attention=options.eval_bool(args.decoder_attention),
            encoder_output_units=encoder.embed_dim,
            adaptive_softmax_cutoff=(
                options.eval_str_list(args.adaptive_softmax_cutoff, type=int)
                if args.criterion == 'adaptive_loss' else None
            ),
        )
        return cls(encoder, decoder)
github freewym / espresso / fairseq / models / transformer.py View on Github external
self.layers = nn.ModuleList([])
        self.layers.extend([
            TransformerDecoderLayer(args, no_encoder_attn)
            for _ in range(args.decoder_layers)
        ])

        self.adaptive_softmax = None

        self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \
            if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None

        if args.adaptive_softmax_cutoff is not None:
            self.adaptive_softmax = AdaptiveSoftmax(
                len(dictionary),
                self.output_embed_dim,
                options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
                dropout=args.adaptive_softmax_dropout,
                adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
                factor=args.adaptive_softmax_factor,
                tie_proj=args.tie_adaptive_proj,
            )
        elif not self.share_input_output_embed:
            self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim))
            nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)

        if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):
            self.layer_norm = LayerNorm(embed_dim)
        else:
            self.layer_norm = None
        if getattr(args, 'layernorm_embedding', False):
            self.layernorm_embedding = LayerNorm(embed_dim)
        else:
github pytorch / translate / pytorch_translate / generate.py View on Github external
def get_parser_with_args():
    parser = options.get_parser("Generation", default_task="pytorch_translate")
    pytorch_translate_options.add_verbosity_args(parser)
    pytorch_translate_options.add_dataset_args(parser, gen=True)
    generation_group = options.add_generation_args(parser)
    pytorch_translate_options.expand_generation_args(generation_group)

    # Adds args used by the standalone generate binary.
    generation_group.add_argument(
        "--source-vocab-file",
        default="",
        metavar="FILE",
        help="Path to text file representing the Dictionary to use.",
    )
    generation_group.add_argument(
        "--char-source-vocab-file",
        default="",
        metavar="FILE",