Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _non_dist_test(model, query_set, gallery_set, cfg, validate=False):
model = MMDataParallel(model, device_ids=cfg.gpus.test).cuda()
model.eval()
query_embeds = _process_embeds(query_set, model, cfg)
gallery_embeds = _process_embeds(gallery_set, model, cfg)
query_embeds_np = np.array(query_embeds)
print('query_embeds', query_embeds_np.shape)
sio.savemat('query_embeds.mat', {'embeds': query_embeds_np})
gallery_embeds_np = np.array(gallery_embeds)
print('gallery_embeds', gallery_embeds_np.shape)
sio.savemat('gallery_embeds.mat', {'embeds': gallery_embeds_np})
e = Evaluator(cfg.data.query.id_file, cfg.data.gallery.id_file)
e.evaluate(query_embeds_np, gallery_embeds_np)
cfg.model.segmental_consensus = dict(
type="STPPReorganized",
standalong_classifier=cfg.model.
segmental_consensus.standalong_classifier,
feat_dim=num_classes + 1 + num_classes * 3 * stpp_feat_multiplier,
act_score_len=num_classes + 1,
comp_score_len=num_classes,
reg_score_len=num_classes * 2,
stpp_cfg=cfg.model.segmental_consensus.stpp_cfg)
dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
if args.gpus == 1:
model = build_localizer(
cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
load_checkpoint(model, args.checkpoint, strict=True)
model = MMDataParallel(model, device_ids=[0])
data_loader = build_dataloader(
dataset,
imgs_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
num_gpus=1,
dist=False,
shuffle=False)
outputs = single_test(model, data_loader)
else:
model_args = cfg.model.copy()
model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
model_type = getattr(localizers, model_args.pop('type'))
outputs = parallel_test(
model_type,
model_args,
def _non_dist_test(model, dataset, cfg, validate=False):
data_loader = build_dataloader(
dataset,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
cfg.gpus.test,
dist=False,
shuffle=False)
print('dataloader built')
model = MMDataParallel(model, device_ids=range(cfg.gpus.test)).cuda()
model.eval()
#collector = build_collecter(cfg.class_num)
calculator = Calculator(cfg.class_num)
for batch_idx, testdata in enumerate(data_loader):
imgs = testdata['img']
landmarks = testdata['landmark']
labels = testdata['label']
predict = model(imgs, labels, landmarks, return_loss=False)
print('predict')
print(predict.size())
print(predict)
calculator.collect_result(predict, labels)
def _non_dist_train(model, dataset, cfg, validate=False):
# prepare data loaders
data_loaders = [
build_dataloader(
dataset,
cfg.data.videos_per_gpu,
cfg.data.workers_per_gpu,
cfg.gpus,
dist=False)
]
# put model on gpus
model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
# build runner
runner = Runner(model, batch_processor, cfg.optimizer, cfg.work_dir,
cfg.log_level)
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
cfg.checkpoint_config, cfg.log_config)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
def main():
args = parse_args()
model = init_detector(args.config, args.checkpoint)
cfg = model.cfg
assert getattr(detectors, cfg.model['type']) is detectors.SingleStageDetector
model = MMDataParallel(model, device_ids=[0])
batch = torch.FloatTensor(1, 3, cfg.input_size, cfg.input_size).cuda()
input_shape = (cfg.input_size, cfg.input_size, 3)
scale = np.array([1, 1, 1, 1], dtype=np.float32)
data = dict(img=batch, img_meta=[{'img_shape': input_shape, 'scale_factor': scale}])
model.eval()
model.module.onnx_export = onnx_export.__get__(model.module)
model.module.forward = forward.__get__(model.module)
model.module.forward_export = forward_export_detector.__get__(model.module)
model.module.bbox_head.export_forward = export_forward_ssd_head.__get__(model.module.bbox_head)
model.module.bbox_head._prepare_cls_scores_bbox_preds = prepare_cls_scores_bbox_preds_ssd_head.__get__(model.module.bbox_head)
model.module.bbox_head.get_bboxes = get_bboxes_ssd_head.__get__(model.module.bbox_head)
model.module.onnx_export(export_name=args.output, **data)
def _non_dist_train(model, dataset, cfg, validate=False):
# prepare data loaders
data_loaders = [
build_dataloader(
dataset,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
len(cfg.gpus.train),
dist=False)
]
print('dataloader built')
model = MMDataParallel(model, device_ids=cfg.gpus.train).cuda()
print('model paralleled')
optimizer = build_optimizer(model, cfg.optimizer)
runner = Runner(model, batch_processor, optimizer, cfg.work_dir,
cfg.log_level)
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
cfg.checkpoint_config, cfg.log_config)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
def _non_dist_train(model, dataset, cfg, validate=False):
# prepare data loaders
data_loaders = [
build_dataloader(
dataset,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
cfg.gpus,
dist=False)
]
# put model on gpus
model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
# build runner
runner = MultiLRRunner(model, batch_processor, cfg.optimizer, cfg.work_dir,
cfg.log_level)
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
cfg.checkpoint_config, cfg.log_config)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
def _non_dist_train(
model, train_dataset, cfg,
eval_dataset=None, vis_dataset=None, validate=False, logger=None
):
# prepare data loaders
data_loaders = [
build_data_loader(
train_dataset,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
cfg.gpus,
dist=False)
]
# put model on gpus
model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
runner = Runner(
model, batch_processor, optimizer, cfg.work_dir, cfg.log_level, logger
)
logger.info("Register Optimizer Hook...")
runner.register_training_hooks(
cfg.lr_config, cfg.optimizer_config, cfg.checkpoint_config, cfg.log_config
)
logger.info("Register EmptyCache Hook...")
runner.register_hook(
EmptyCacheHook(before_epoch=True, after_iter=False, after_epoch=True),
priority='VERY_LOW'
)
if cfg.resume_from: