How to use mmcv - 10 common examples

To help you get started, we’ve selected a few mmcv examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github musket-ml / segmentation_training_pipeline / segmentation_pipeline / mmdetection.py View on Github external
def initNativeConfig(self):

        atrs = self.all
        self.nativeConfig = Config.fromfile(self.getNativeConfigPath())
        cfg = self.nativeConfig
        cfg.gpus = self.gpus

        wd = os.path.dirname(self.path)
        cfg.work_dir = wd

        if 'bbox_head' in cfg.model and hasattr(atrs,'classes'):
            setCfgAttr(cfg.model.bbox_head, 'num_classes', atrs['classes']+1)

        if 'mask_head' in cfg.model and hasattr(atrs,'classes'):
            setCfgAttr(cfg.model.mask_head, 'num_classes', atrs['classes']+1)

        cfg.load_from = self.getWeightsPath()
        cfg.model.pretrained = self.getWeightsPath()
        cfg.total_epochs = None  # need to have more epoch then the checkpoint has been generated for
        cfg.data.imgs_per_gpu = max(1, self.batch // cfg.gpus)# batch size
github open-mmlab / mmaction / tools / test_localizer.py View on Github external
else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(localizers, model_args.pop('type'))
        outputs = parallel_test(
            model_type,
            model_args,
            args.checkpoint,
            dataset,
            _data_func,
            range(args.gpus),
            workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

    eval_type = args.eval
    if eval_type:
        print('Starting evaluate {}'.format(eval_type))

        detections = results2det(
            dataset, outputs, **cfg.test_cfg.ssn.evaluater)

        if not args.no_regression:
            print("Performing location regression")
            for cls in range(len(detections)):
                detections[cls] = {
                    k: perform_regression(v)
                    for k, v in detections[cls].items()
                }
            print("Regression finished")
github kemaloksuz / BoundingBoxGenerator / tools / test_robustness.py View on Github external
def multi_gpu_test(model, data_loader, tmpdir=None):
    model.eval()
    results = []
    dataset = data_loader.dataset
    rank, world_size = get_dist_info()
    if rank == 0:
        prog_bar = mmcv.ProgressBar(len(dataset))
    for i, data in enumerate(data_loader):
        with torch.no_grad():
            result = model(return_loss=False, rescale=True, **data)
        results.append(result)

        if rank == 0:
            batch_size = data['img'][0].size(0)
            for _ in range(batch_size * world_size):
                prog_bar.update()

    # collect results from all ranks
    results = collect_results(results, len(dataset), tmpdir)

    return results
github open-mmlab / mmdetection / tools / test_robustness.py View on Github external
def multi_gpu_test(model, data_loader, tmpdir=None):
    model.eval()
    results = []
    dataset = data_loader.dataset
    rank, world_size = get_dist_info()
    if rank == 0:
        prog_bar = mmcv.ProgressBar(len(dataset))
    for i, data in enumerate(data_loader):
        with torch.no_grad():
            result = model(return_loss=False, rescale=True, **data)
        results.append(result)

        if rank == 0:
            batch_size = data['img'][0].size(0)
            for _ in range(batch_size * world_size):
                prog_bar.update()

    # collect results from all ranks
    results = collect_results(results, len(dataset), tmpdir)

    return results
github open-mmlab / mmaction / tools / test_localizer.py View on Github external
def single_test(model, data_loader):
    model.eval()
    results = []
    dataset = data_loader.dataset
    prog_bar = mmcv.ProgressBar(len(dataset))
    for data in data_loader:
        with torch.no_grad():
            result = model(return_loss=False, **data)
        results.append(result)

        batch_size = data['img_group_0'].data[0].size(0)
        for _ in range(batch_size):
            prog_bar.update()
    return results
github open-mmlab / mmaction / tools / test_recognizer.py View on Github external
raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_recognizer(
            cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint, strict=True)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(recognizers, model_args.pop('type'))
        outputs = parallel_test(
            model_type,
github open-mmlab / mmfashion / mmfashion / apis / test_retriever.py View on Github external
def _non_dist_test(model, query_set, gallery_set, cfg, validate=False):

    model = MMDataParallel(model, device_ids=cfg.gpus.test).cuda()
    model.eval()

    query_embeds = _process_embeds(query_set, model, cfg)
    gallery_embeds = _process_embeds(gallery_set, model, cfg)

    query_embeds_np = np.array(query_embeds)
    print('query_embeds', query_embeds_np.shape)
    sio.savemat('query_embeds.mat', {'embeds': query_embeds_np})

    gallery_embeds_np = np.array(gallery_embeds)
    print('gallery_embeds', gallery_embeds_np.shape)
    sio.savemat('gallery_embeds.mat', {'embeds': gallery_embeds_np})

    e = Evaluator(cfg.data.query.id_file, cfg.data.gallery.id_file)
    e.evaluate(query_embeds_np, gallery_embeds_np)
github open-mmlab / mmaction / tools / test_localizer.py View on Github external
cfg.model.segmental_consensus = dict(
        type="STPPReorganized",
        standalong_classifier=cfg.model.
        segmental_consensus.standalong_classifier,
        feat_dim=num_classes + 1 + num_classes * 3 * stpp_feat_multiplier,
        act_score_len=num_classes + 1,
        comp_score_len=num_classes,
        reg_score_len=num_classes * 2,
        stpp_cfg=cfg.model.segmental_consensus.stpp_cfg)

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_localizer(
            cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint, strict=True)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(localizers, model_args.pop('type'))
        outputs = parallel_test(
            model_type,
            model_args,
github open-mmlab / mmfashion / mmfashion / apis / test_predictor.py View on Github external
def _non_dist_test(model, dataset, cfg, validate=False):
    data_loader = build_dataloader(
                   dataset,
                   cfg.data.imgs_per_gpu,
                   cfg.data.workers_per_gpu,
                   cfg.gpus.test,
                   dist=False,
                   shuffle=False)

    print('dataloader built')
 
    model = MMDataParallel(model, device_ids=range(cfg.gpus.test)).cuda()
    model.eval()
   
    #collector = build_collecter(cfg.class_num)
    calculator = Calculator(cfg.class_num)

    for batch_idx, testdata in enumerate(data_loader):
        imgs = testdata['img']
        landmarks = testdata['landmark']
        labels = testdata['label']
        
        predict = model(imgs, labels, landmarks, return_loss=False)        
        print('predict')
        print(predict.size())
        print(predict)
        calculator.collect_result(predict, labels)
github open-mmlab / mmaction / tools / test_localizer.py View on Github external
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    # reorganize stpp
    num_classes = (cfg.model.cls_head.num_classes -
                   1 if cfg.model.cls_head.with_bg
                   else cfg.model.cls_head.num_classes)
    stpp_feat_multiplier = 0
    for stpp_subcfg in cfg.model.segmental_consensus.stpp_cfg:
        _, mult = parse_stage_config(stpp_subcfg)
        stpp_feat_multiplier += mult
    cfg.model.segmental_consensus = dict(
        type="STPPReorganized",
        standalong_classifier=cfg.model.