How to use the fire.Fire function in fire

To help you get started, we’ve selected a few fire examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github openai / gpt-2 / src / interactive_conditional_samples.py View on Github external
raw_text = input("Model prompt >>> ")
            context_tokens = enc.encode(raw_text)
            generated = 0
            for _ in range(nsamples // batch_size):
                out = sess.run(output, feed_dict={
                    context: [context_tokens for _ in range(batch_size)]
                })[:, len(context_tokens):]
                for i in range(batch_size):
                    generated += 1
                    text = enc.decode(out[i])
                    print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
                    print(text)
            print("=" * 80)

if __name__ == '__main__':
    fire.Fire(interact_model)
github fastai / fastai / courses / dl2 / imdb_scripts / train_clas.py View on Github external
print('Fine-tuning only the last layer...')
        learn.freeze_to(-1)

    if use_regular_schedule:
        print('Using regular schedule. Setting use_clr=None, n_cycles=cl, cycle_len=None.')
        use_clr = None
        n_cycles = cl
        cl = None
    else:
        n_cycles = 1
    learn.fit(lrs, n_cycles, wds=wd, cycle_len=cl, use_clr=(8,8) if use_clr else None)
    print('Plotting lrs...')
    learn.sched.plot_lr()
    learn.save(final_clas_file)

if __name__ == '__main__': fire.Fire(train_clas)
github RhinoSecurityLabs / ccat / modules / k8s__enum_subjects_roles_rolebindings / main.py View on Github external
out += 'Total {} K8s Cluster Role Bindings Enumerated\n'.format(data['payload']['cluster_role_bindings']['count'])
    out += 'K8s recources saved under {}.\n'.format(module_info['data_saved'])

    return out


def set_args():
    args = {}

    return args


if __name__ == "__main__":
    print('Running module {}...'.format(module_info['name']))

    args = fire.Fire(set_args)
    data = main(args)

    if data is not None:
        summary = summary(data)
        if len(summary) > 1000:
            raise ValueError('The {} module\'s summary is too long ({} characters). Reduce it to 1000 characters or fewer.'.format(module_info['name'], len(summary)))
        if not isinstance(summary, str):
            raise TypeError(' The {} module\'s summary is {}-type instead of str. Make summary return a string.'.format(module_info['name'], type(summary)))
        
        # print('RESULT:')
        # print(json.dumps(data, indent=4, default=str))

        print('{} completed.\n'.format(module_info['name']))
        print('MODULE SUMMARY:\n\n{}\n'.format(summary.strip('\n')))
github AppleHolic / source_separation / source_separation / train.py View on Github external
if case_name == 'dsd100':
            dataset_func = dsd100.get_datasets
        elif case_name == 'musdb18':
            dataset_func = musdb18.get_datasets
        elif case_name == 'voice_bank':
            dataset_func = voice_bank.get_datasets
            sr = 22050
        train_loader, valid_loader = dataset_func(
            meta_dir, batch_size=batch_size, num_workers=num_workers, fix_len=int(fix_len * sr), audio_mask=True
        )

    return train_loader, valid_loader, sr


if __name__ == '__main__':
    fire.Fire(main)
github fastai / fastai_dev / dev_course / dl2-moved / run_notebook.py View on Github external
#!/usr/bin/env python

import nbformat,fire
from nbconvert.preprocessors import ExecutePreprocessor

def run_notebook(path):
    "Executes notebook `path` and shows any exceptions. Useful for testing"
    nb = nbformat.read(open(path), as_version=nbformat.NO_CONVERT)
    ExecutePreprocessor(timeout=600).preprocess(nb, {})
    print('done')

if __name__ == '__main__': fire.Fire(run_notebook)
github lopuhin / transformer-lm / lm / main.py View on Github external
def fire_main():
    fire.Fire(only_allow_defined_args(main))
github andabi / parallel-wavenet-vocoder / train.py View on Github external
train_conf.session_init = SaverRestore(ckpt)

    if gpu is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, gpu))
        train_conf.nr_tower = len(gpu)

    if hp.train.num_gpu <= 1:
        trainer = SimpleTrainer()
    else:
        trainer = SyncMultiGPUTrainerReplicated(gpus=hp.train.num_gpu)

    launch_train_with_config(train_conf, trainer=trainer)


if __name__ == '__main__':
    fire.Fire(train)
github danieljl / keras-image-captioning / keras_image_captioning / training.py View on Github external
logging('Stopping training..')
        logging('(Training will stop after the current epoch)')
        try:
            training.stop_training()
        except:
            traceback.print_exc(file=sys.stderr)
    signal.signal(signal.SIGINT, handler)

    training.run()

    if _unit_test:
        return training


if __name__ == '__main__':
    fire.Fire(main)