Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def multi_gpu_test(model, data_loader, tmpdir=None):
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = data['img'][0].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
results = collect_results(results, len(dataset), tmpdir)
return results
def multi_gpu_test(model, data_loader, tmpdir=None):
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = data['img'][0].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
results = collect_results(results, len(dataset), tmpdir)
return results
def single_test(model, data_loader):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for data in data_loader:
with torch.no_grad():
result = model(return_loss=False, **data)
results.append(result)
batch_size = data['img_group_0'].data[0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
def test_start(self):
out = StringIO()
bar_width = 20
# without total task num
prog_bar = mmcv.ProgressBar(bar_width=bar_width, file=out)
assert out.getvalue() == 'completed: 0, elapsed: 0s'
reset_string_io(out)
prog_bar = mmcv.ProgressBar(bar_width=bar_width, start=False, file=out)
assert out.getvalue() == ''
reset_string_io(out)
prog_bar.start()
assert out.getvalue() == 'completed: 0, elapsed: 0s'
# with total task num
reset_string_io(out)
prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, file=out)
assert out.getvalue() == '[{}] 0/10, elapsed: 0s, ETA:'.format(
' ' * bar_width)
reset_string_io(out)
prog_bar = mmcv.ProgressBar(
10, bar_width=bar_width, start=False, file=out)
assert out.getvalue() == ''
reset_string_io(out)
prog_bar.start()
assert out.getvalue() == '[{}] 0/10, elapsed: 0s, ETA:'.format(
' ' * bar_width)
if args.ResNet101:
dfc_resnet101 = resnet_models.Flow_Branch(66, 4)
dfc_resnet = nn.DataParallel(dfc_resnet101).cuda()
else:
dfc_resnet50 = resnet_models.Flow_Branch_Multi(input_chanels=66, NoLabels=4)
dfc_resnet = nn.DataParallel(dfc_resnet50).cuda()
dfc_resnet.eval()
resume_iter = load_ckpt(args.PRETRAINED_MODEL,
[('model', dfc_resnet)], strict=True)
print('Load Pretrained Model from', args.PRETRAINED_MODEL)
task_bar = ProgressBar(eval_dataset.__len__())
for i, item in enumerate(eval_dataloader):
with torch.no_grad():
input_x = item[0].cuda()
flow_masked = item[1].cuda()
gt_flow = item[2].cuda()
mask = item[3].cuda()
output_dir = item[4][0]
res_flow = dfc_resnet(input_x)
res_flow_f = res_flow[:, :2, :, :]
res_flow_r = res_flow[:, 2:, :, :]
res_complete_f = res_flow_f * mask[:, 10:11, :, :] + flow_masked[:, 10:12, :, :] * (1. - mask[:, 10:11, :, :])
res_complete_r = res_flow_r * mask[:,32:34,:,:] + flow_masked[:,32:34,:,:] * (1. - mask[:,32:34,:,:])
def test_update(self):
out = StringIO()
bar_width = 20
# without total task num
prog_bar = mmcv.ProgressBar(bar_width=bar_width, file=out)
time.sleep(1)
reset_string_io(out)
prog_bar.update()
assert out.getvalue() == 'completed: 1, elapsed: 1s, 1.0 tasks/s'
reset_string_io(out)
# with total task num
prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, file=out)
time.sleep(1)
reset_string_io(out)
prog_bar.update()
assert out.getvalue() == ('\r[{}] 1/10, 1.0 task/s, '
'elapsed: 1s, ETA: 9s'.format('>' * 2 +
' ' * 18))
workers = [
ctx.Process(
target=worker_func,
args=(model_cls, model_kwargs, checkpoint, dataset, data_func,
gpus[i % len(gpus)], idx_queue, result_queue))
for i in range(num_workers)
]
for w in workers:
w.daemon = True
w.start()
for i in range(len(dataset)):
idx_queue.put(i)
results = [None for _ in range(len(dataset))]
prog_bar = mmcv.ProgressBar(task_num=len(dataset))
for _ in range(len(dataset)):
idx, res = result_queue.get()
results[idx] = res
prog_bar.update()
print('\n')
for worker in workers:
worker.terminate()
return results
outfile_prefix (str): The filename prefix of the json files.
If the prefix is "somepath/xxx",
the txt files will be named "somepath/xxx.txt".
Returns:
list[str: str]: result txt files which contains corresponding
instance segmentation images.
"""
try:
import cityscapesscripts.helpers.labels as CSLabels
except ImportError:
raise ImportError('Please run "pip install citscapesscripts" to '
'install cityscapesscripts first.')
result_files = []
os.makedirs(outfile_prefix, exist_ok=True)
prog_bar = mmcv.ProgressBar(len(self))
for idx in range(len(self)):
result = results[idx]
filename = self.img_infos[idx]['filename']
basename = osp.splitext(osp.basename(filename))[0]
pred_txt = osp.join(outfile_prefix, basename + '_pred.txt')
bbox_result, segm_result = result
bboxes = np.vstack(bbox_result)
segms = mmcv.concat_list(segm_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
assert len(bboxes) == len(segms) == len(labels)
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
runner.model.eval()
results = [None for _ in range(len(self.dataset))]
if runner.rank == 0:
prog_bar = mmcv.ProgressBar(len(self.dataset))
for idx in range(runner.rank, len(self.dataset), runner.world_size):
data = self.dataset[idx]
data_gpu = scatter(
collate([data], samples_per_gpu=1),
[torch.cuda.current_device()])[0]
# compute output
with torch.no_grad():
result = runner.model(
return_loss=False, rescale=True, **data_gpu)
results[idx] = result
batch_size = runner.world_size
if runner.rank == 0:
for _ in range(batch_size):
prog_bar.update()
def after_train_epoch(self, runner:Runner):
runner.model.eval()
results = [None for _ in range(len(self.indices))]
if runner.rank == 0:
prog_bar = mmcv.ProgressBar(len(self.indices))
for idx in self.indices:
pi = self.dataset.ds[idx]
data = self.dataset[idx]
data_gpu = scatter(
collate([data], samples_per_gpu=1),
[torch.cuda.current_device()])[0]
# compute output
with torch.no_grad():
result = runner.model(
return_loss=False, rescale=True, **data_gpu)
results[idx] = (result, pi)
#batch_size = runner.world_size
if runner.rank == 0:
prog_bar.update()