How to use the configargparse.ArgParser function in ConfigArgParse

To help you get started, we’ve selected a few ConfigArgParse examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github petko-nikolov / pysemseg / pysemseg / train.py View on Github external
def define_args():
    parser = configargparse.ArgParser(description='PyTorch Segmentation Framework',
        config_file_parser_class=configargparse.YAMLConfigFileParser,
    )
    parser.add_argument(
        '--config', is_config_file=True, required=False, help='Config file')
    parser.add_argument('--model', type=str, required=True,
                        help=('A path to the model including the module. '
                              'Should be resolvable'))
    parser.add_argument('--model-args', type=ast.literal_eval, required=False, default={},
                        help=('Args passed to the model constructor'))
    parser.add_argument('--data-dir', type=str, required=True,
                        help='Path to the dataset root dir.')
    parser.add_argument('--model-dir', type=str, required=True,
                        help='Path to store output data.')
    parser.add_argument('--dataset', type=str, required=True,
                        help=('Path to the dataset class including the module'))
    parser.add_argument('--dataset-args', type=ast.literal_eval, default={},
github danielhers / tupa / semstr / scripts / domain_similarity.py View on Github external
counts = dict((key, int(value)) for key, value in csv.reader(f))
        print("Loaded '%s'" % filename)
    except IOError:
        counts = Counter()
        for p in tqdm(read_files_and_dirs(d), unit=" passages", desc="Reading %s" % d):
            for t in p.layer(layer0.LAYER_ID).all:
                counts[t.text] += 1
        with open(filename, "w", encoding="utf-8") as f:
            csv.writer(f).writerows(counts.most_common())
        print("Saved '%s'" % filename)
    s = sum(counts.values())
    return {key: float(value) / s for key, value in counts.items()}


if __name__ == '__main__':
    argparser = configargparse.ArgParser(description=desc)
    argparser.add_argument("dirs", nargs="+", help="directories with passages to compare")
    argparser.add_argument("-s", "--similarity", choices=("var", "cos", "euc", "js"), default="var")
    main(argparser.parse_args())
github danielhers / tupa / tupa / scripts / visualize_learning_curve.py View on Github external
def main():
    argparser = ArgParser(description="Visualize scores of a model over the dev set, saving to .png file.")
    argparser.add_argument("models", nargs="+", help="model file basename(s) to load")
    args = argparser.parse_args()
    for pattern in args.models:
        for filename in sorted(glob(pattern)) or [pattern]:
            basename, _ = os.path.splitext(filename)
            for div in "dev", "test":
                try:
                    scores = load_scores(basename, div=div)
                except OSError:
                    continue
                visualize(scores, basename, div=div)
github Mouse-Imaging-Centre / pydpiper / pydpiper / core / arguments.py View on Github external
def _mk_lsq12_parser():
    p = ArgParser(add_help=False)
    # group = parser.add_argument_group("LSQ12 registration options",
    #                                  "Options for performing a pairwise, affine registration")
    p.set_defaults(run_lsq12=True)
    p.set_defaults(generate_tournament_style_lsq12_avg=False)
    p.add_argument("--run-lsq12", dest="run_lsq12",
                   action="store_true",
                   help="Actually run the 12 parameter alignment [default = %(default)s]")
    p.add_argument("--no-run-lsq12", dest="run_lsq12",
                   action="store_false",
                   help="Opposite of --run-lsq12")
    p.add_argument("--lsq12-max-pairs", dest="max_pairs",
                   type=parse_nullable_int, default=25,
                   help="Maximum number of pairs to register together ('None' implies all pairs). "
                        "[Default = %(default)s]")
    p.add_argument("--lsq12-likefile", dest="like_file",
                   type=str, default=None,
github Mouse-Imaging-Centre / pydpiper / pydpiper / minc / beast.py View on Github external
def _mk_beast_parser(parser : ArgParser):
    group = parser.add_argument_group("BEaST options", "Options for running mincbeast.")
    group.add_argument("--library-dir", dest="library_dir",
                       type=str, help="Path to BEaST library dir, configured for mincbeast but with additional "
                                      "library.stx.native and library.masks.native files containing the template "
                                      "images and segmentations/masks at native resolution and such that "
                                      "'minc_modify_header -dinsert xspace:step=1 "
                                      "-dinsert yspace:step=1 -dinsert zspace:step=1' produces the 1mm images.")
    group.add_argument("--model-path", dest="model_path",
                       type=str, help="Path to '1mm' model (in mincbeast-compatible model dir).")
    group.add_argument("--beast-flags", dest="beast_flags", default="", help="Extra flags to pass to mincbeast")
    return parser


beast_parser = AnnotatedParser(parser=BaseParser(_mk_beast_parser(ArgParser(add_help=False)),
                                                 "beast"),
                               namespace="beast")
github fiaas / fiaas-deploy-daemon / fiaas_deploy_daemon / config.py View on Github external
def _parse_args(self, args):
        parser = configargparse.ArgParser(auto_env_var_prefix="",
                                          add_config_file_help=False,
                                          add_env_var_help=False,
                                          config_file_parser_class=configargparse.YAMLConfigFileParser,
                                          default_config_files=[DEFAULT_CONFIG_FILE],
                                          args_for_setting_config_path=["-c", "--config-file"],
                                          ignore_unknown_config_file_keys=True,
                                          description="%(prog)s deploys applications to Kubernetes",
                                          epilog=EPILOG,
                                          formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
        parser.add_argument("--secrets-directory", help="Load secrets from this directory (default: %(default)s)",
                            default=DEFAULT_SECRETS_DIR)
        parser.add_argument("--log-format", help="Set logformat (default: %(default)s)", choices=self.VALID_LOG_FORMAT,
                            default="plain")
        parser.add_argument("--proxy", help="Use proxy for requests to pipeline and getting fiaas-artifacts",
                            env_var="http_proxy")
        parser.add_argument("--debug", help="Enable a number of debugging options (including disable SSL-verification)",
github mattloose / RUscripts / ampliconSPLIT.py View on Github external
else:
        oper = 'linux'  # MS


    ## linux version
    if (oper is "linux"):
            config_file = os.path.join(os.path.sep, os.path.dirname(os.path.realpath('__file__')), 'amp.config')

    ## linux version
    if (oper is "windows"):
            config_file = os.path.join(os.path.sep, os.path.dirname(os.path.realpath('__file__')), 'ampW.config')

    __version__ = "1.1"
    __date__ = "1st May 2016"

    parser = configargparse.ArgParser(description='ampliconSPLIT: A program designed to identify and group individual amplicons from minION reads prior to base calling. The depth setting limits the number of reads copied to each sub folder. Developed by Matt Loose @mattloose or matt.loose@nottingham.ac.uk for help!',default_config_files=[config_file])
    parser.add('-fasta', '--reference_fasta_file', type=str, dest='fasta', required=True, default=None, help="The fasta format file for the reference sequence for your organism.")
    parser.add('-ids', '--reference_amplicon_positions', type=str, required=True, default=None, help="A file containing a list of amplicon positions defined for the reference sequence. 1 amplicon per line in the format fasta_sequence_name:start-stop e.g EM_079517:27-1938", dest='ids')
    parser.add('-w', '--watch-dir', type=str, required=True, default=None, help="The path to the folder containing the downloads directory with fast5 reads to analyse - e.g. C:\data\minion\downloads (for windows).", dest='watchdir')
    parser.add('-o', '--output-dir', type=str, required=True, default="prefiltered", help="The path to the destination folder for the preprocessed reads" , dest="targetpath")
    parser.add('-d', '--depth',type=int, required=True, default=None, help = 'The desired coverage depth for each amplicon. Note this is unlikely to be achieved for each amplicon and should probably be an overestimate of the minimum coverage required.', dest='depth')
    parser.add('-procs', '--proc_num', type=int, dest='procs',required=True, help = 'The number of processors to run this on.')
    parser.add('-t', '--template_model',type=str, required=True, help = 'The appropriate template model file to use. This file can be generated uing the getmodels.py script.', dest='temp_model')
    parser.add('-v', '--verbose-true', action='store_true', help="Print detailed messages while processing files.", default=False, dest='verbose')
    parser.add_argument('-ver', '--version', action='version',version=('%(prog)s version={version} date={date}').format(version=__version__,date=__date__))
    args = parser.parse_args()

    check_files((args.fasta,args.temp_model))
    checkfasta(args.fasta)

    if not os.path.isdir(args.watchdir):
        print "**! Sorry, but the folder "+args.watchdir+" cannot be found.\n\n**!  Please check you have entered the path correctly and try again.\n\n**!  This script will now terminate.\n"
github SouthEugeneRoboticsTeam / vision / vision / utils.py View on Github external
def get_args():
    default_config = []

    if '-cf' not in sys.argv and '--config' not in sys.argv:
        default_config = [os.getenv('VISION_CONFIG', os.path.join(os.path.dirname(__file__), '../config/config.ini'))]

    parser = configargparse.ArgParser(default_config_files=default_config, auto_env_var_prefix='VISION_')

    parser.add_argument('-i', '--image', help='path to image')
    parser.add_argument('-n', '--name', type=str, default='camera', help='the name of this camera')
    parser.add_argument('-s', '--source', type=str, default='/dev/video0',
                        help='path to video source (default=/dev/video0)')
    parser.add_argument('-c', '--camera', type=str, default='cameras/camera1_16-9.cam',
                        help='path to camera file (default=camera/camera1_4-3.cam)')
    parser.add_argument('-t', '--team', type=int, help='the team of the target roboRIO')
    parser.add_argument('-d', '--display', action='store_true', help='display results of processing in a new window')
    parser.add_argument('-na', '--min-area', type=int, help='minimum area for blobs')
    parser.add_argument('-xa', '--max-area', type=int, help='maximum area for blobs')
    parser.add_argument('-nf', '--min-full', type=float, help='minimum fullness of blobs')
    parser.add_argument('-xf', '--max-full', type=float, help='maximum fullness of blobs')
    parser.add_argument('-l', '--lower-color', action='append', nargs='+',
                        type=int, help='lower color threshold in HSV')
    parser.add_argument('-u', '--upper-color', action='append', nargs='+',
github megagonlabs / sato / model / train_sherlock.py View on Github external
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import ConcatDataset

torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# =============

if __name__ == "__main__":


    #################### 
    # Load configs
    #################### 
    p = configargparse.ArgParser()
    p.add('-c', '--config_file', required=True, is_config_file=True, help='config file path')

    # general configs
    p.add('--n_worker', type=int, default=4, help='# of workers for dataloader')
    p.add('--TYPENAME', type=str, help='Name of valid types', env_var='TYPENAME')

    # NN configs
    p.add('--epochs', type=int, default=100)
    p.add('--learning_rate', type=float, default=1e-4)
    p.add('--decay', type=float, default=1e-4)
    p.add('--dropout_rate', type=float, default=0.35)
    p.add('--batch_size', type=int, default=256, help='# of col in a batch')
    p.add('--patience', type=int, default=100, help='patience for early stopping')

    # sherlock configs
    p.add('--sherlock_feature_groups', nargs='+', default=['char','rest','par','word'])
github zimeon / iiif / iiif_reference_server.py View on Github external
def get_config(base_dir=''):
    """Get config from defaults, config file and/or parse arguments.

    Uses configargparse to allow argments to be set from a config file
    or via command line arguments.

      base_dir - set a specific base directory for file/path defaults.
    """
    p = configargparse.ArgParser(description='IIIF Image API Reference Service',
                                 default_config_files=[os.path.join(base_dir, 'iiif_reference_server.cfg')],
                                 formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
    add_shared_configs(p, base_dir)
    p.add('--scale-factors', default='auto',
          help="Set of tile scale factors or 'auto' to calculate for each image "
               "such that there are tiles up to the full image")
    p.add('--api-versions', default='1.0,1.1,2.0,2.1,3.0',
          help="Set of API versions to support")
    args = p.parse_args()

    if (args.debug):
        args.verbose = True
    elif (args.verbose):
        args.quiet = False

    # Split list arguments