How to use stt - 10 common examples

To help you get started, we’ve selected a few stt examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github thomasweng15 / E.V.E. / jarvis / jarvis.py View on Github external
def main():
	speaker = tts.Google()

	# test internet connection
	if not internet_on():
		speaker.play_wav("./jarvis/wav/internet_err.wav")
		return

	try:
		speaker.play_wav("./jarvis/wav/yes.wav")

		audioInput = Microphone()
		audioInput.listen()

		# init new Voice class associated with audio 
		speech_to_text = stt.Google(audioInput)

		# convert audio file into text and init a new Job class with text
		recorded_text = speech_to_text.get_text()
		job = Job(recorded_text)

		# parse commands 
		first_word = (recorded_text.split(' ')[0]).lower() 
		second_word = ""

		if recorded_text.find(' ') >= 1:
			second_word = (recorded_text.split(' ')[1]).lower()

		# initialize controller for web browser
		controller = webbrowser.get()

		# execute commands based on first word in query
github thomasweng15 / E.V.E. / brain / listen.py View on Github external
def listen(self):
		try:
			audioInput = Microphone()
			audioInput.listen()
	 
			speech_to_text = stt.Google(audioInput)
			recorded_text = speech_to_text.get_text()
			job = Job(recorded_text)

			controller = webbrowser.get() # initialize controller for web browser

			# parse first and second words in command
			first_word = (recorded_text.split(' ')[0]).lower() 
			if recorded_text.find(' ') >= 1:
				second_word = (recorded_text.split(' ')[1]).lower()
			else:
				second_word = ""

			# execute commands based on first word in query
			if first_word == "stop" or first_word == "no" \
					or recorded_text.find('no stop') != -1:
				print "---Accidental recording---"
github thomasweng15 / E.V.E. / hal-e / hal-e.py View on Github external
def listen():
	print "Initializing..."
	speaker = tts.Google()
	
	if not internet_on():
		print("No Internet connection.")
		speaker.play_wav("./hal-e/wav/internet_err.wav")
		return

	speaker.play_wav("./hal-e/wav/yes.wav")

	try:
		audioInput = Microphone()
		audioInput.listen()
 
		speech_to_text = stt.Google(audioInput)

		recorded_text = speech_to_text.get_text()
		job = Job(recorded_text)

		# parse first and second words in command
		first_word = (recorded_text.split(' ')[0]).lower() 
		if recorded_text.find(' ') >= 1:
			second_word = (recorded_text.split(' ')[1]).lower()
		else:
			second_word = ""

		# initialize controller for web browser
		controller = webbrowser.get()

		# execute commands based on first word in query
		if first_word == "open":
github thomasweng15 / E.V.E. / brain / brain.py View on Github external
def set_job(self):
		"""
		Send audio input to Google's Speech to text
		engine to be interpreted, and then init a Job class 
		with the returned text if successful. 

		"""
		speech_to_text = stt.Google(self.audioInput)
		try:
			recorded_text = speech_to_text.get_text().lower()
			return Job(recorded_text)
		except NotUnderstoodException:
			print "Sorry, I didn't get that."
			self.speaker.play_wav("./wav/didntget.wav")
			return None
		except ConnectionLostException:
			print "No connection."
			self.speaker.play_wav("./wav/internet_err.wav")
			return None
github NaomiProject / Naomi / client / musicmode.py View on Github external
with open("sentences_spotify.txt", "w") as f:
            f.write("\n".join(original) + "\n")
            f.write("<s> \n </s> \n")
            f.close()

        # make language model
        os.system(
            "text2idngram -vocab sentences_spotify.txt &lt; sentences_spotify.txt -idngram spotify.idngram")
        os.system(
            "idngram2lm -idngram spotify.idngram -vocab sentences_spotify.txt -arpa languagemodel_spotify.lm")

        # create a new mic with the new music models
        self.mic = Mic(
            mic.speaker,
            stt.PocketSphinxSTT(lmd_music="languagemodel_spotify.lm", dictd_music="dictionary_spotify.dic"),
            stt.PocketSphinxSTT(lmd_music="languagemodel_spotify.lm", dictd_music="dictionary_spotify.dic")
        )
github NaomiProject / Naomi / client / musicmode.py View on Github external
with open("sentences_spotify.txt", "w") as f:
            f.write("\n".join(original) + "\n")
            f.write("<s> \n </s> \n")
            f.close()

        # make language model
        os.system(
            "text2idngram -vocab sentences_spotify.txt &lt; sentences_spotify.txt -idngram spotify.idngram")
        os.system(
            "idngram2lm -idngram spotify.idngram -vocab sentences_spotify.txt -arpa languagemodel_spotify.lm")

        # create a new mic with the new music models
        self.mic = Mic(
            mic.speaker,
            stt.PocketSphinxSTT(lmd_music="languagemodel_spotify.lm", dictd_music="dictionary_spotify.dic"),
            stt.PocketSphinxSTT(lmd_music="languagemodel_spotify.lm", dictd_music="dictionary_spotify.dic")
        )
github NaomiProject / Naomi / client / main.py View on Github external
print "==========================================================="

    profile = yaml.safe_load(open("profile.yml", "r"))

    try:
        api_key = profile['keys']['GOOGLE_SPEECH']
    except KeyError:
        api_key = None

    try:
        stt_engine_type = profile['stt_engine']
    except KeyError:
        print "stt_engine not specified in profile, defaulting to PocketSphinx"
        stt_engine_type = "sphinx"

    mic = Mic(speaker.newSpeaker(), stt.PocketSphinxSTT(),
              stt.newSTTEngine(stt_engine_type, api_key=api_key))

    addendum = ""
    if 'first_name' in profile:
        addendum = ", %s" % profile["first_name"]
    mic.say("How can I be of service%s?" % addendum)

    conversation = Conversation("JASPER", mic, profile)

    conversation.handleForever()
github NaomiProject / Naomi / client / main.py View on Github external
profile = yaml.safe_load(open("profile.yml", "r"))

    try:
        api_key = profile['keys']['GOOGLE_SPEECH']
    except KeyError:
        api_key = None

    try:
        stt_engine_type = profile['stt_engine']
    except KeyError:
        print "stt_engine not specified in profile, defaulting to PocketSphinx"
        stt_engine_type = "sphinx"

    mic = Mic(speaker.newSpeaker(), stt.PocketSphinxSTT(),
              stt.newSTTEngine(stt_engine_type, api_key=api_key))

    addendum = ""
    if 'first_name' in profile:
        addendum = ", %s" % profile["first_name"]
    mic.say("How can I be of service%s?" % addendum)

    conversation = Conversation("JASPER", mic, profile)

    conversation.handleForever()
github cnbeining / autotimecode / autotimecode_worker / task.py View on Github external
shutil.rmtree(tmp_dir, ignore_errors = True)
        return False

    try:
        request_srt_content = json.loads('{"srt":"' + stt_task.request_srt_content + '"}')['srt']
    except:
        stt_task.add_step(TaskStep(comment = 'SRT cannot be parsed'))
        shutil.rmtree(tmp_dir, ignore_errors = True)
        return False
    
    ## Conduct STT
    # convert SRT to list of Subtitle
    subtitle = list(parse(request_srt_content))
    
    # segment audio into smaller chunks
    flac_path_list = generate_audio_segments(media_file_path, subtitle, elastic = 0.5)
    stt_task.add_step(TaskStep(comment = 'File segmented'))
    
    lang = 'en'
    if stt_task.lang:
        lang = stt_task.lang
    
    # Call API
    logging.warning(flac_path_list)
    result_recognize = recognize_segments(flac_path_list, lang_code = lang)
    logging.warning(result_recognize)
    new_subtitle = merge_result(subtitle, result_recognize)
    stt_task.add_step(TaskStep(comment = 'STT conducted'))
    
    # Clean up and put back punctuations
    if lang == 'en' and stt_task.correct:
        new_subtitle = segment_subtitle(new_subtitle)

stt

A library for doing speech recognition using a Coqui STT model

MPL-2.0
Latest version published 2 years ago

Package Health Score

63 / 100
Full package analysis