How to use the sounddevice.stop function in sounddevice

To help you get started, we’ve selected a few sounddevice examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github qobi / ece57000 / speech_classifier.py View on Github external
def stop_recording():
    global waveform
    actual_time = time.time()-start_time
    sd.stop()
    samples = min(int(actual_time*sd.default.samplerate), len(waveform))
    waveform = waveform[0:samples, 0]
    sd.play(waveform)
    sd.wait()
    get_axes().clear()
    spectrum, freqs, t, im = get_axes().specgram(waveform,
                                                 Fs=sd.default.samplerate)
    redraw()
    return np.transpose(spectrum)
github qobi / ece57000 / speech_classifier_gui.py View on Github external
def stop_recording():
    global waveform
    actual_time = time.time()-start_time
    sd.stop()
    samples = min(int(actual_time*sd.default.samplerate), len(waveform))
    waveform = waveform[0:samples, 0]
    get_axes().clear()
    spectrum, freqs, t, im = get_axes().specgram(waveform,
                                                 Fs=sd.default.samplerate)
    redraw()
    sd.play(waveform)
    time.sleep(float(len(waveform))/sd.default.samplerate)
    return np.transpose(spectrum)
github qobi / ece57000 / speech_clusterer_gui.py View on Github external
def stop_recording():
    global waveform
    actual_time = time.time()-start_time
    sd.stop()
    samples = min(int(actual_time*sd.default.samplerate), len(waveform))
    waveform = waveform[0:samples, 0]
    get_axes().clear()
    spectrum, freqs, t, im = get_axes().specgram(waveform,
                                                 Fs=sd.default.samplerate)
    redraw()
    sd.play(waveform)
    time.sleep(float(len(waveform))/sd.default.samplerate)
    return waveform, np.transpose(spectrum)
github Clockmender / My-AN-Nodes / nodes / audio / audio_init.py View on Github external
self.chans = streamI.channels
        if self.runSD:
            if not streamI.active:
                streamI.start()
            self.message = str(streamI)
            myTuple = streamI.read(1)
            l_chan = round(myTuple[0][0][0] * self.multI,5)
            if self.chans == 2:
                r_chan = round(myTuple[0][0][1] * self.multI,5)
            else:
                r_chan = 0
        else:
            if streamI.active:
                # Shtdown Stream and close SD
                streamI.abort(ignore_errors=True)
                sd.stop(ignore_errors=True)
            self.message = 'Input Stream Closed'
            l_chan = 0
            r_chan = 0

        return l_chan, r_chan
github onolab-tmu / overiva / routines.py View on Github external
ref_sig = self.references[i, :]
                self.buttons.append(
                    Button(
                        master,
                        text="Ref " + str(i + 1),
                        command=lambda rs=self.references[i, :]: self.play(rs),
                    )
                )
                self.buttons[-1].grid(row=nrow, column=0)

            else:
                self.buttons[-1].grid(row=nrow, columnspan=2)

            nrow += 1

        self.stop_button = Button(master, text="Stop", command=sd.stop)
        self.stop_button.grid(row=nrow, columnspan=2)
        nrow += 1

        self.close_button = Button(master, text="Close", command=master.quit)
        self.close_button.grid(row=nrow, columnspan=2)
        nrow += 1
github samuelgarcia / HearingLossSimulator / hearinglosssimulator / gui / guitools.py View on Github external
device='default'
    dev = sd.query_devices(device=device)
    print(dev)
    
    sample_rate = dev['default_samplerate']
    
    
    length = int(sample_rate * duration)+1
    sound = hls.several_sinus(length, freqs=[freq], sample_rate=sample_rate, ampl = 1.)
    sound = np.tile(sound[:, None],(1, nb_channel))
    gain = 10**(dbgain/20.)
    sound *= gain
    
    sd.play(sound, device=device, blocking=True)
    
    sd.stop()
github CorentinJ / Real-Time-Voice-Cloning / sv2tts / toolbox / demo_umap.py View on Github external
        stop_button.clicked.connect(lambda: sd.stop())
        browser_grid.addWidget(QLabel("Dataset", ), 0, 0)