How to use the mmcv.Config.fromfile function in mmcv

To help you get started, we’ve selected a few mmcv examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github musket-ml / segmentation_training_pipeline / segmentation_pipeline / mmdetection.py View on Github external
def initNativeConfig(self):

        atrs = self.all
        self.nativeConfig = Config.fromfile(self.getNativeConfigPath())
        cfg = self.nativeConfig
        cfg.gpus = self.gpus

        wd = os.path.dirname(self.path)
        cfg.work_dir = wd

        if 'bbox_head' in cfg.model and hasattr(atrs,'classes'):
            setCfgAttr(cfg.model.bbox_head, 'num_classes', atrs['classes']+1)

        if 'mask_head' in cfg.model and hasattr(atrs,'classes'):
            setCfgAttr(cfg.model.mask_head, 'num_classes', atrs['classes']+1)

        cfg.load_from = self.getWeightsPath()
        cfg.model.pretrained = self.getWeightsPath()
        cfg.total_epochs = None  # need to have more epoch then the checkpoint has been generated for
        cfg.data.imgs_per_gpu = max(1, self.batch // cfg.gpus)# batch size
github open-mmlab / mmaction / tools / test_localizer.py View on Github external
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    # reorganize stpp
    num_classes = (cfg.model.cls_head.num_classes -
                   1 if cfg.model.cls_head.with_bg
                   else cfg.model.cls_head.num_classes)
    stpp_feat_multiplier = 0
    for stpp_subcfg in cfg.model.segmental_consensus.stpp_cfg:
        _, mult = parse_stage_config(stpp_subcfg)
        stpp_feat_multiplier += mult
    cfg.model.segmental_consensus = dict(
        type="STPPReorganized",
        standalong_classifier=cfg.model.
github open-mmlab / mmaction / tools / test_recognizer.py View on Github external
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_recognizer(
            cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint, strict=True)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
github open-mmlab / mmdetection / tools / voc_eval.py View on Github external
def main():
    parser = ArgumentParser(description='VOC Evaluation')
    parser.add_argument('result', help='result file path')
    parser.add_argument('config', help='config file path')
    parser.add_argument(
        '--iou-thr',
        type=float,
        default=0.5,
        help='IoU threshold for evaluation')
    args = parser.parse_args()
    cfg = mmcv.Config.fromfile(args.config)
    test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets)
    voc_eval(args.result, test_dataset, args.iou_thr)
github open-mmlab / mmcv / examples / train_cifar10.py View on Github external
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)

    logger = get_logger(cfg.log_level)

    # init distributed environment if necessary
    if args.launcher == 'none':
        dist = False
        logger.info('Disabled distributed training.')
    else:
        dist = True
        init_dist(**cfg.dist_params)
        world_size = torch.distributed.get_world_size()
        rank = torch.distributed.get_rank()
        if rank != 0:
            logger.setLevel('ERROR')
        logger.info('Enabled distributed training.')
github open-mmlab / mmaction / tools / train_localizer.py View on Github external
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus
    if cfg.checkpoint_config is not None:
        # save mmaction version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmact_version=__version__, config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
github open-mmlab / mmaction / tools / eval_localize_results.py View on Github external
def main():
    args = parse_args()

    cfg = mmcv.Config.fromfile(args.config)

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))

    output_list = []
    for out in args.outputs:
        output_list.append(mmcv.load(out))

    if args.score_weights:
        weights = np.array(args.score_weights) / sum(args.score_weights)
    else:
        weights = [1. / len(output_list) for _ in output_list]

    def merge_scores(idx):
        def merge_part(arrs, index, weights):
            if arrs[0][index] is not None:
                return np.sum([a[index] * w for a, w in zip(arrs, weights)],
github open-mmlab / mmaction / tools / train_recognizer.py View on Github external
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus
    if cfg.checkpoint_config is not None:
        # save mmaction version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmact_version=__version__, config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':