Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
ex_save_dir = os.path.join(conf['exp_dir'], 'examples/')
if conf['n_save_ex'] == -1:
conf['n_save_ex'] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf['n_save_ex'])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None],
return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(mix_np, sources_np, est_sources_np,
sample_rate=conf['sample_rate'])
utt_metrics['mix_path'] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, 'ex_{}/'.format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0],
conf['sample_rate'])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(local_save_dir + "s{}.wav".format(src_idx+1), src,
conf['sample_rate'])
for src_idx, est_src in enumerate(est_sources_np):
sf.write(local_save_dir + "s{}_estimate.wav".format(src_idx+1),
conf['n_save_ex'] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf['n_save_ex'])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix.unsqueeze(0))
loss, reordered_sources = loss_func(est_sources, sources[None],
return_est=True)
mix_np = mix.cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze(0).cpu().data.numpy()
# For each utterance, we get a dictionary with the mixture path,
# the input and output metrics
utt_metrics = get_metrics(mix_np, sources_np, est_sources_np,
sample_rate=conf['sample_rate'],
metrics_list=compute_metrics)
utt_metrics['mix_path'] = test_set.mixture_path
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, 'ex_{}/'.format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np,
conf['sample_rate'])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(local_save_dir + "s{}.wav".format(src_idx), src,
conf['sample_rate'])
for src_idx, est_src in enumerate(est_sources_np):
ex_save_dir = os.path.join(conf['exp_dir'], 'examples/')
if conf['n_save_ex'] == -1:
conf['n_save_ex'] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf['n_save_ex'])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None],
return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(mix_np, sources_np, est_sources_np,
sample_rate=conf['sample_rate'])
utt_metrics['mix_path'] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, 'ex_{}/'.format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0],
conf['sample_rate'])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(local_save_dir + "s{}.wav".format(src_idx+1), src,
conf['sample_rate'])
for src_idx, est_src in enumerate(est_sources_np):
sf.write(local_save_dir + "s{}_estimate.wav".format(src_idx+1),
series_list = []
torch.no_grad().__enter__()
cnt = 0
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix.unsqueeze(0))
min_len = min(est_sources.shape[-1], sources.shape[-1], mix.shape[-1])
est_sources = est_sources[..., :min_len]
mix, sources = mix[..., :min_len], sources[..., :min_len]
loss, reordered_sources = loss_func(est_sources, sources[None],
return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze(0).cpu().data.numpy()
utt_metrics = get_metrics(mix_np, sources_np, est_sources_np,
sample_rate=conf['sample_rate'])
utt_metrics['mix_path'] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, 'ex_{}/'.format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0],
conf['sample_rate'])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(local_save_dir + "s{}.wav".format(src_idx+1), src,
conf['sample_rate'])
for src_idx, est_src in enumerate(est_sources_np):
sf.write(local_save_dir + "s{}_estimate.wav".format(src_idx+1),
mix = mix[..., 0]
sources = sources[...,0]
#noise = noise[..., 0]
if conf['train_conf']['training']['loss_alpha'] == 1:
# If Deep clustering only, use DC masks.
est_sources, dic_out = model.dc_head_separate(mix[None, None])
else:
# If Chimera, use mask-inference head masks
est_sources, dic_out = model.separate(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None],
return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(mix_np, sources_np, est_sources_np,
sample_rate=conf['sample_rate'],
metrics_list=compute_metrics)
utt_metrics['mix_path'] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, 'ex_{}/'.format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0],
conf['sample_rate'])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(local_save_dir + "s{}.wav".format(src_idx+1), src,
conf['sample_rate'])
for src_idx, est_src in enumerate(est_sources_np):
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
if conf['train_conf']['training']['loss_alpha'] == 1:
# If Deep clustering only, use DC masks.
est_sources, dic_out = model.dc_head_separate(mix[None, None])
else:
# If Chimera, use mask-inference head masks
est_sources, dic_out = model.separate(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None],
return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(mix_np, sources_np, est_sources_np,
sample_rate=conf['sample_rate'],
metrics_list=compute_metrics)
utt_metrics['mix_path'] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, 'ex_{}/'.format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0],
conf['sample_rate'])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(local_save_dir + "s{}.wav".format(src_idx+1), src,
conf['sample_rate'])
for src_idx, est_src in enumerate(est_sources_np):
model_device = next(model.parameters()).device
# Randomly choose the indexes of sentences to save.
if save_dir is None:
conf['n_save_ex'] = 0
if conf['n_save_ex'] == -1:
conf['n_save_ex'] = len(dict_list)
save_idx = random.sample(range(len(dict_list)), conf['n_save_ex'])
series_list = []
for idx, wav_dic in enumerate(tqdm(dict_list)):
# Forward the network on the mixture.
noisy_np, clean_np, fs = load_wav_dic(wav_dic)
with torch.no_grad():
net_input = torch.tensor(noisy_np)[None, None].to(model_device)
est_clean_np = model.denoise(net_input).squeeze().cpu().data.numpy()
utt_metrics = get_metrics(mix=noisy_np, clean=clean_np,
estimate=est_clean_np,
sample_rate=fs,
metrics_list=COMPUTE_METRICS)
utt_metrics['noisy_path'] = wav_dic['noisy']
utt_metrics['clean_path'] = wav_dic['clean']
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(save_dir, 'ex_{}/'.format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "noisy.wav", noisy_np,
fs)
sf.write(local_save_dir + "clean.wav", clean_np,
fs)
sf.write(local_save_dir + "estimate.wav", est_clean_np,
fs)