How to use the librosa.output.write_wav function in librosa

To help you get started, we’ve selected a few librosa examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Hiroshiba / realtime-yukarin / test_scripts / test_voice_changer.py View on Github external
start_time = 0
for i in range(len(raw_wave) // audio_config.chunk + 1):
    feature_out = wrapper.convert_next(time_length=audio_config.chunk / audio_config.in_rate)
    wrapper.voice_changer_stream.add_out_feature(start_time=start_time, feature=feature_out, frame_period=frame_period)
    start_time += audio_config.chunk / audio_config.in_rate
    print('cent', i, flush=True)

start_time = 0
for i in range(len(raw_wave) // audio_config.chunk + 1):
    wave_out = wrapper.post_convert_next(time_length=audio_config.chunk / audio_config.out_rate)
    wave_out_list.append(wave_out)
    start_time += audio_config.chunk / audio_config.out_rate
    print('post', i, flush=True)

out_wave = numpy.concatenate([w.wave for w in wave_out_list]).astype(numpy.float32)
librosa.output.write_wav(str(test_output_path), out_wave, sr=audio_config.out_rate)
github r9y9 / wavenet_vocoder / tests / test_model.py View on Github external
plt.figure(figsize=(16, 10))
    plt.subplot(4, 1, 1)
    librosa.display.waveplot(x_org, sr=sr)
    plt.subplot(4, 1, 2)
    librosa.display.waveplot(y_offline, sr=sr)
    plt.subplot(4, 1, 3)
    librosa.display.waveplot(y_online, sr=sr)
    plt.subplot(4, 1, 4)
    librosa.display.waveplot(y_inference, sr=sr)
    plt.show()

    save_audio = False
    if save_audio:
        librosa.output.write_wav("target.wav", x_org, sr=sr)
        librosa.output.write_wav("online.wav", y_online, sr=sr)
        librosa.output.write_wav("inference.wav", y_inference, sr=sr)
github bfs18 / nsynth_wavenet / tests / test_reader.py View on Github external
def test_tf_reader():
    dataset = reader.Dataset(tfr_path, is_training=False)
    inputs = dataset.get_batch(8)
    sess = tf.Session()
    sess.run(tf.local_variables_initializer())
    sess.run(tf.global_variables_initializer())
    tf.train.start_queue_runners(sess=sess)

    for i in range(10):
        in_vals = sess.run(inputs)
        print(in_vals['key'])
        wp = os.path.join('test_data', in_vals['key'][0].decode('utf-8') + '.wav')
        librosa.output.write_wav(wp, in_vals['wav'][0], sr=16000)
github NVIDIA / OpenSeq2Seq / scripts / change_sample_rate.py View on Github external
def convert_to_wav(flac_files,sample_rate,target_dir):
  """This function converts flac input to wav output of given sample rate"""
  for sound_file in flac_files:
    dir_tree = sound_file.split("/")[-4:]
    save_path = '/'.join(dir_tree[:-1])
    name = dir_tree[-1][:-4] + "wav"
    if not os.path.isdir(save_path):
      os.makedirs(save_path)
    sig, sr = librosa.load(sound_file, sample_rate)
    output_dir = target_dir+save_path
    if not os.path.isdir(output_dir):
      os.makedirs(output_dir)
    librosa.output.write_wav(output_dir + "/" + name, sig, sample_rate)
github leimao / Singing_Voice_Separation_RNN / evaluate.py View on Github external
stft_mono_magnitude = np.array([stft_mono_magnitude])

        y1_pred, y2_pred = model.test(x = stft_mono_magnitude)

        # ISTFT with the phase from mono
        y1_stft_hat = combine_magnitdue_phase(magnitudes = y1_pred[0], phases = stft_mono_phase)
        y2_stft_hat = combine_magnitdue_phase(magnitudes = y2_pred[0], phases = stft_mono_phase)

        y1_stft_hat = y1_stft_hat.transpose()
        y2_stft_hat = y2_stft_hat.transpose()

        y1_hat = librosa.istft(y1_stft_hat, hop_length = hop_length)
        y2_hat = librosa.istft(y2_stft_hat, hop_length = hop_length)

        librosa.output.write_wav(wav_mono_filepath, wav_mono, mir1k_sr)
        librosa.output.write_wav(wav_src1_hat_filepath, y1_hat, mir1k_sr)
        librosa.output.write_wav(wav_src2_hat_filepath, y2_hat, mir1k_sr)
github drscotthawley / panotti / utils / split_audio.py View on Github external
indices = np.arange(stride, signal_length,stride)   # where to split
            clip_list = np.split( signal, indices, axis=axis)       # do the splitting
            intended_length = stride
            clips = fix_last_element(clip_list, intended_length, axis) # what to do with last clip

            sections = int( np.ceil( signal.shape[axis] / stride) ) # just to check 
            if( sections != clips.shape[0]):                        # just in case
                print("              **** Warning: sections = "+str(sections)+", but clips.shape[0] = "+str(clips.shape[0]) )
            ndigits = len(str(sections))   # find out # digits needed to print section #s
            for i in range(sections):
                clip = clips[i]
                filename_no_ext = os.path.splitext(infile)[0]
                ext = os.path.splitext(infile)[1]
                outfile = filename_no_ext+"_s"+'{num:{fill}{width}}'.format(num=i+1, fill='0', width=ndigits)+ext
                print("        Saving file",outfile)
                librosa.output.write_wav(outfile,clip,sr)

            if remove_orig:
                os.remove(infile)
        else:
            print("     *** File",infile,"does not exist.  Skipping.")
    return
github HaiFengZeng / clari_wavenet_vocoder / train_student.py View on Github external
y = P.inv_mulaw(y, hparams.quantize_channels)

    # Mask by length
    y_hat[length:] = 0
    y[length:] = 0

    y_student = y_student[idx].view(-1).data.cpu().numpy()
    y_student[length:] = 0

    # Save audio
    audio_dir = join(checkpoint_dir, "audio")
    os.makedirs(audio_dir, exist_ok=True)
    path = join(audio_dir, "step{:09d}_teacher_predicted.wav".format(global_step))
    librosa.output.write_wav(path, y_hat, sr=hparams.sample_rate)
    path = join(audio_dir, "step{:09d}_student_predicted.wav".format(global_step))
    librosa.output.write_wav(path, y_student, sr=hparams.sample_rate)
    path = join(audio_dir, "step{:09d}_target.wav".format(global_step))
    librosa.output.write_wav(path, y, sr=hparams.sample_rate)
    path = join(audio_dir, "step{:09d}.jpg".format(global_step))
    save_waveplot(path,y_teacher=y_hat,y_student=y_student,y_target=y,writer=writer,global_step=global_step)
github sjlee7 / speech-dereverberation / model.py View on Github external
Sl = self.get_spectrum(x_lr, n_fft=2048)
                    Sh = self.get_spectrum(x_hr, n_fft=2048)
                    Sp = self.get_spectrum(x_pr, n_fft=2048)

                    S = np.concatenate((Sl.reshape(Sh.shape[0], Sh.shape[1]), Sh, Sp), axis=1)
                    fig = Figure(figsize=S.shape[::-1], dpi=1, frameon=False)
                    canvas = FigureCanvas(fig)
                    fig.figimage(S, cmap='jet')
                    fig.savefig(save_path + '/spec/' + 'valid_batch_index' + str(counter) + '-th_pr.png')

                    librosa.output.write_wav(save_path + '/wav/'+str(counter)+'_dereverb.wav', x_pr, 16000)

                    librosa.output.write_wav(save_path + '/wav/'+str(counter)+'_reverb.wav', x_lr, 16000)

                    librosa.output.write_wav(save_path + '/wav/'+str(counter)+'_orig.wav', x_hr, 16000)

                    canvas_w, s_reverb, s_nonreverb = self.sess.run([self.GG[0],self.gt_reverb[0], self.gt_nonreverb[0]], feed_dict={self.is_valid:False, self.is_train: True})


                    print ('max :', np.max(canvas_w[0]), 'min :', np.min(canvas_w[0]))



                    x_pr = canvas_w.flatten()
                    x_pr = x_pr[:int(len(x_pr)/8)]
                    x_lr = s_reverb.flatten()[:len(x_pr)]
                    x_hr = s_nonreverb.flatten()[:len(x_pr)]

                    Sl = self.get_spectrum(x_lr, n_fft=2048)
                    Sh = self.get_spectrum(x_hr, n_fft=2048)
                    Sp = self.get_spectrum(x_pr, n_fft=2048)
github zhf459 / P_wavenet_vocoder / train_student.py View on Github external
y_hat = P.inv_mulaw(y_hat, hparams.quantize_channels)
            y = P.inv_mulaw(y, hparams.quantize_channels)

    # Mask by length
    y_hat[length:] = 0
    y[length:] = 0
    y_student = y_student.data.cpu().numpy()
    y_student = y_student[idx].reshape(y_student.shape[-1])
    mu = to_numpy(mu)
    # Save audio
    audio_dir = join(checkpoint_dir, "audio")
    if global_step % 1000 == 0:
        audio_dir = join(checkpoint_dir, "audio")
        os.makedirs(audio_dir, exist_ok=True)
        path = join(audio_dir, "step{:09d}_teacher.wav".format(global_step))
        librosa.output.write_wav(path, y_hat, sr=hparams.sample_rate)
        path = join(audio_dir, "step{:09d}_target.wav".format(global_step))
        librosa.output.write_wav(path, y, sr=hparams.sample_rate)
        path = join(audio_dir, "step{:09d}_student.wav".format(global_step))
        librosa.output.write_wav(path, y_student, sr=hparams.sample_rate)
    # TODO save every 200 step,
    if global_step % 200 == 0:
        path = join(audio_dir, "wave_step{:09d}.png".format(global_step))
        save_waveplot(path, y_student=y_student, y_target=y, y_teacher=y_hat, student_mu=mu)
github zhf459 / P_wavenet_vocoder / train_student.py View on Github external
y_hat[length:] = 0
    y[length:] = 0
    y_student = y_student.data.cpu().numpy()
    y_student = y_student[idx].reshape(y_student.shape[-1])
    mu = to_numpy(mu)
    # Save audio
    audio_dir = join(checkpoint_dir, "audio")
    if global_step % 1000 == 0:
        audio_dir = join(checkpoint_dir, "audio")
        os.makedirs(audio_dir, exist_ok=True)
        path = join(audio_dir, "step{:09d}_teacher.wav".format(global_step))
        librosa.output.write_wav(path, y_hat, sr=hparams.sample_rate)
        path = join(audio_dir, "step{:09d}_target.wav".format(global_step))
        librosa.output.write_wav(path, y, sr=hparams.sample_rate)
        path = join(audio_dir, "step{:09d}_student.wav".format(global_step))
        librosa.output.write_wav(path, y_student, sr=hparams.sample_rate)
    # TODO save every 200 step,
    if global_step % 200 == 0:
        path = join(audio_dir, "wave_step{:09d}.png".format(global_step))
        save_waveplot(path, y_student=y_student, y_target=y, y_teacher=y_hat, student_mu=mu)