How to use the sounddevice.default.samplerate function in sounddevice

To help you get started, we’ve selected a few sounddevice examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github castorini / honk / keyword_spotting_data_generator / evaluation / evaluation_data_generator.py View on Github external
parser.add_argument(
        "-c",
        "--continue_from",
        type=str,
        help="url to start from in the given url file")

    parser.add_argument(
        "-o",
        "--output_file",
        type=str,
        help="csv file to append output to")

    args = parser.parse_args()
    keyword = args.keyword.lower()
    sd.default.samplerate = SAMPLE_RATE
    cp.print_progress("keyword is ", keyword)

    plural = inflect.engine()

    if args.url_file:
        # read in from the file
        print('fetching urls from the given file : ', args.url_file)
        url_fetcher = FileReader(args.url_file)
    else:
        # fetch using keywords
        print('fetching urls by searching youtube with keywords : ', keyword)
        url_fetcher = YoutubeSearcher(args.api_key, keyword)

    csv_writer = CsvWriter(keyword, args.output_file)

    total_cc_count = 0
github qobi / ece57000 / speech_classifier_gui.py View on Github external
def stop_recording():
    global waveform
    actual_time = time.time()-start_time
    sd.stop()
    samples = min(int(actual_time*sd.default.samplerate), len(waveform))
    waveform = waveform[0:samples, 0]
    get_axes().clear()
    spectrum, freqs, t, im = get_axes().specgram(waveform,
                                                 Fs=sd.default.samplerate)
    redraw()
    sd.play(waveform)
    time.sleep(float(len(waveform))/sd.default.samplerate)
    return np.transpose(spectrum)
github qobi / ece57000 / speech_clusterer_gui.py View on Github external
def stop_recording():
    global waveform
    actual_time = time.time()-start_time
    sd.stop()
    samples = min(int(actual_time*sd.default.samplerate), len(waveform))
    waveform = waveform[0:samples, 0]
    get_axes().clear()
    spectrum, freqs, t, im = get_axes().specgram(waveform,
                                                 Fs=sd.default.samplerate)
    redraw()
    sd.play(waveform)
    time.sleep(float(len(waveform))/sd.default.samplerate)
    return waveform, np.transpose(spectrum)
github qobi / ece57000 / speech_classifier_gui.py View on Github external
from gui import *
from distances import *
from nearest_neighbor_classifier import *
import sounddevice as sd
import numpy as np
import time

sd.default.samplerate = 8000
sd.default.channels = 1
points = []
labels = []

distance = dtw(L2_vector(L2_scalar))

def start_recording(maximum_duration, for_classify):
    def internal():
        if (not for_classify) or len(points)>0:
            global waveform, start_time
            message("")
            waveform = sd.rec(maximum_duration*sd.default.samplerate)
            start_time = time.time()
    return internal

def stop_recording():
github qobi / ece57000 / speech_classifier.py View on Github external
from gui import *
from distances_and_classifiers import *
import sounddevice as sd
import numpy as np
import time

sd.default.samplerate = 8000
sd.default.channels = 1
points = []
labels = []

def start_recording(maximum_duration):
    def internal():
        global waveform, start_time
        message("")
        waveform = sd.rec(maximum_duration*sd.default.samplerate)
        start_time = time.time()
    return internal

def stop_recording():
    global waveform
    actual_time = time.time()-start_time
    sd.stop()
github rhelmot / sound-machine / sound / __init__.py View on Github external
# pylint: disable=wildcard-import,redefined-builtin
import sounddevice as sd
SAMPLE_RATE = 44100
sd.default.samplerate = SAMPLE_RATE
sd.default.channels = 1

from . import sample, envelope, filter, instrument, notes, asyncplayer
github unique1o1 / Meta-Music / Metamusic / recognize.py View on Github external
def __init__(self, dejavu):
        super(MicrophoneRecognizer, self).__init__(dejavu)
        sd.default.samplerate = MicrophoneRecognizer.default_samplerate
        sd.default.channels = MicrophoneRecognizer.default_channels
        sd.default.dtype = MicrophoneRecognizer.default_format
        self.data = [[], []]
        self.recorded = False
github qobi / ece57000 / speech_classifier.py View on Github external
def internal():
        global waveform, start_time
        message("")
        waveform = sd.rec(maximum_duration*sd.default.samplerate)
        start_time = time.time()
    return internal
github qobi / ece57000 / speech_classifier_gui.py View on Github external
def internal():
        if (not for_classify) or len(points)>0:
            global waveform, start_time
            message("")
            waveform = sd.rec(maximum_duration*sd.default.samplerate)
            start_time = time.time()
    return internal
github castorini / honkling / personalization / collect_audio.py View on Github external
OKGREEN = '\033[92m'
    WARNING = '\033[93m'
    FAIL = '\033[91m'
    ENDC = '\033[0m'
    BOLD = '\033[1m'
    UNDERLINE = '\033[4m'

CHUNK = 1000
FORMAT = pyaudio.paInt16
SAMPLE_SIZE = 2
CHANNELS = 1
SAMPLE_RATE = 44100
INITIAL_NOISE_DROP_RATE = 0.045
INITIAL_NOISE_INDEX = math.floor(SAMPLE_RATE * INITIAL_NOISE_DROP_RATE)
RECORD_SECONDS = 1
sd.default.samplerate = SAMPLE_RATE

POS_COUNT = 50
NEG_COUNT = 10

KEYWORDS = ['bird', 'dog', 'eight', 'four', 'happy', 'left', 'marvin', 'no', 'on', 'seven', 'six', 'tree', 'up', 'wow', 'zero', 'bed', 'cat', 'down', 'five', 'go', 'house', 'nine', 'off', 'one', 'right', 'sheila', 'stop', 'three', 'two', 'yes']
POS_KEYWORDS = ['yes', 'no', 'up', 'down', 'left', 'right', 'on', 'off', 'stop', 'go']

def play_audio(keyword, file_name):
    audio_data, _ = librosa.core.load(file_name, SAMPLE_RATE)
    print(len(audio_data))

    print("\n--- playing recorded audio for " + keyword)
    sd.play(audio_data, SAMPLE_RATE, blocking=True)
    sd.stop()

def record_audio(keyword):