Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def main(conf):
model_path = os.path.join(conf['exp_dir'], 'best_model.pth')
model = ConvTasNet.from_pretrained(model_path)
# Handle device placement
if conf['use_gpu']:
model.cuda()
model_device = next(model.parameters()).device
test_set = LibriMix(csv_dir=conf['test_dir'],
task=conf['task'],
sample_rate=conf['sample_rate'],
n_src=conf['train_conf']['data']['n_src'],
segment=None) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx')
# Randomly choose the indexes of sentences to save.
eval_save_dir = os.path.join(conf['exp_dir'], conf['out_dir'])
ex_save_dir = os.path.join(eval_save_dir, 'examples/')
if conf['n_save_ex'] == -1:
conf['n_save_ex'] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf['n_save_ex'])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix.unsqueeze(0))
loss, reordered_sources = loss_func(est_sources, sources[None],
return_est=True)
mix_np = mix.cpu().data.numpy()
def main(conf):
model = load_best_model(conf['train_conf'], conf['exp_dir'])
# Handle device placement
if conf['use_gpu']:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamRDataset(conf['test_dir'], conf['task'],
sample_rate=conf['sample_rate'],
nondefault_nsrc=model.n_src,
segment=None) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, mode='pairwise')
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf['exp_dir'], 'examples/')
if conf['n_save_ex'] == -1:
conf['n_save_ex'] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf['n_save_ex'])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None],
return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
conf, model_part=train_part, pretrained_filterbank=pretrained_filterbank
)
# Define scheduler
scheduler = None
if conf[train_part + '_training'][train_part[0] + '_half_lr']:
scheduler = ReduceLROnPlateau(optimizer=optimizer, factor=0.5,
patience=5)
# Just after instantiating, save the args. Easy loading in the future.
exp_dir, checkpoint_dir = get_encoded_paths(conf, train_part)
os.makedirs(exp_dir, exist_ok=True)
conf_path = os.path.join(exp_dir, 'conf.yml')
with open(conf_path, 'w') as outfile:
yaml.safe_dump(conf, outfile)
# Define Loss function.
loss_func = PITLossWrapper(PairwiseNegSDR('sisdr', zero_mean=False),
pit_from='pw_mtx')
system = SystemTwoStep(model=model, loss_func=loss_func,
optimizer=optimizer, train_loader=train_loader,
val_loader=val_loader, scheduler=scheduler,
config=conf, module=train_part)
# Define callbacks
checkpoint = ModelCheckpoint(checkpoint_dir, monitor='val_loss',
mode='min', save_top_k=1, verbose=1)
early_stopping = False
if conf[train_part + '_training'][train_part[0] + '_early_stop']:
early_stopping = EarlyStopping(monitor='val_loss', patience=10,
verbose=1)
# Don't ask GPU if they are not available.
if not torch.cuda.is_available():
print('No available GPU were found, set gpus to None')