How to use the pyroomacoustics.normalize function in pyroomacoustics

To help you get started, we’ve selected a few pyroomacoustics examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github LCAV / pyroomacoustics / examples / noise_reduction_subspace.py View on Github external
start_time = time.time()
hop = frame_len // 2
while noisy_signal.shape[0] - n >= hop:

    processed_audio[n:n + hop, ] = scnr.apply(noisy_signal[n:n + hop])

    # update step
    n += hop

proc_time = time.time() - start_time
print("{} minutes".format((proc_time/60)))
# save to output file
enhanced_signal_fp = os.path.join(os.path.dirname(__file__), 'output_samples',
                                  'denoise_output_Subspace.wav')
wavfile.write(enhanced_signal_fp, fs,
              pra.normalize(processed_audio).astype(np.float32))


"""
Plot spectrogram
"""
print("Noisy and denoised file written to: '%s'" %
      os.path.join(os.path.dirname(__file__), 'output_samples'))

signal_norm = signal / np.abs(signal).max()

if plot_spec:
    min_val = -80
    max_val = -40
    plt.figure()
    plt.subplot(3, 1, 1)
    plt.specgram(noisy_signal[:n-hop], NFFT=256, Fs=fs,
github LCAV / pyroomacoustics / examples / bss_live.py View on Github external
def play(self, src):
            sd.play(pra.normalize(src) * 0.75, samplerate=self.fs, blocking=False)
github LCAV / pyroomacoustics / examples / bss_example.py View on Github external
def play(self, src):
                sd.play(pra.normalize(src) * 0.75, samplerate=self.fs, blocking=False)
github LCAV / pyroomacoustics / examples / beamforming_time_domain.py View on Github external
# Define the FFT length
N = 1024

# Create a microphone array
if shape is 'Circular':
    R = pra.circular_2D_array(mic1, M, phi, d*M/(2*np.pi)) 
else:
    R = pra.linear_2D_array(mic1, M, phi, d) 

# path to samples
path = os.path.dirname(__file__)

# The first signal (of interest) is singing
rate1, signal1 = wavfile.read(path + '/input_samples/singing_'+str(Fs)+'.wav')
signal1 = np.array(signal1, dtype=float)
signal1 = pra.normalize(signal1)
signal1 = pra.highpass(signal1, Fs)
delay1 = 0.

# The second signal (interferer) is some german speech
rate2, signal2 = wavfile.read(path + '/input_samples/german_speech_'+str(Fs)+'.wav')
signal2 = np.array(signal2, dtype=float)
signal2 = pra.normalize(signal2)
signal2 = pra.highpass(signal2, Fs)
delay2 = 1.

# Create the room
room_dim = [4, 6]
room1 = pra.ShoeBox(
    room_dim,
    absorption=absorption,
    fs=Fs,
github LCAV / pyroomacoustics / examples / bss_example.py View on Github external
plt.plot(np.arange(b.shape[0]) * 10, b[:,1], label='SIR Source 1', c='b', marker='o')
    plt.legend()

    plt.tight_layout(pad=0.5)

    ## GUI
    if not args.gui:
        plt.show()
    else:
        plt.show(block=False)

    if args.save:
        from scipy.io import wavfile

        wavfile.write('bss_iva_mix.wav', room.fs,
                pra.normalize(mics_signals[0,:], bits=16).astype(np.int16))
        for i, sig in enumerate(y):
            wavfile.write('bss_iva_source{}.wav'.format(i+1), room.fs,
                    pra.normalize(sig, bits=16).astype(np.int16))

    if args.gui:

        # Make a simple GUI to listen to the separated samples
        from tkinter import Tk, Button, Label
        import sounddevice as sd

        # Now comes the GUI part
        class PlaySoundGUI(object):
            def __init__(self, master, fs, mix, sources):
                self.master = master
                self.fs = fs
                self.mix = mix
github LCAV / pyroomacoustics / examples / beamforming_time_domain.py View on Github external
'''

# compute beamforming filters
mics = pra.Beamformer(R, Fs, N, Lg=Lg)
room1.add_microphone_array(mics)
room1.compute_rir()
room1.simulate()
mics.rake_perceptual_filters(good_sources, 
                    bad_sources, 
                    sigma2_n*np.eye(mics.Lg*mics.M), delay=delay)

# process the signal
output = mics.process()

# save to output file
out_RakePerceptual = pra.normalize(pra.highpass(output, Fs))
wavfile.write(path + '/output_samples/output_RakePerceptual.wav', Fs, out_RakePerceptual)

'''
Plot all the spectrogram
'''

dSNR = pra.dB(room1.direct_snr(mics.center[:,0], source=0), power=True)
print('The direct SNR for good source is ' + str(dSNR))

# remove a bit of signal at the end
n_lim = int(np.ceil(len(input_mic) - t_cut*Fs))
input_clean = signal1[:n_lim]
input_mic = input_mic[:n_lim]
out_DirectMVDR = out_DirectMVDR[:n_lim]
out_RakeMVDR = out_RakeMVDR[:n_lim]
out_DirectPerceptual = out_DirectPerceptual[:n_lim]
github onolab-tmu / overiva / overiva_oneshot.py View on Github external
)
    plt.legend()
    plt.tight_layout(pad=0.5)

    if not args.gui:
        plt.show()
    else:
        plt.show(block=False)

    if args.save:
        from scipy.io import wavfile

        wavfile.write(
            "bss_iva_mix.wav",
            room.fs,
            pra.normalize(mics_signals[0, :], bits=16).astype(np.int16),
        )
        for i, sig in enumerate(y_hat):
            wavfile.write(
                "bss_iva_source{}.wav".format(i + 1),
                room.fs,
                pra.normalize(sig, bits=16).astype(np.int16),
            )

    if args.gui:

        from tkinter import Tk

        # Make a simple GUI to listen to the separated samples
        root = Tk()
        my_gui = PlaySoundGUI(
            root, room.fs, mics_signals[0, :], y_hat.T, references=ref[:, :, 0]