How to use the microsoft-cognitiveservices-speech-sdk.SpeechRecognizer function in microsoft-cognitiveservices-speech-sdk

To help you get started, we’ve selected a few microsoft-cognitiveservices-speech-sdk examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Azure-Samples / cognitive-services-speech-sdk / quickstart / js-node / index.js View on Github external
pushStream.close();
  });
  
  // we are done with the setup
  console.log("Now recognizing from: " + filename);
  
  // now create the audio-config pointing to our stream and
  // the speech config specifying the language.
  var audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);
  var speechConfig = sdk.SpeechConfig.fromSubscription(subscriptionKey, serviceRegion);
  
  // setting the recognition language to English.
  speechConfig.speechRecognitionLanguage = "en-US";
  
  // create the speech recognizer.
  var recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);
  
  // start the recognizer and wait for a result.
  recognizer.recognizeOnceAsync(
    function (result) {
      console.log(result);
  
      recognizer.close();
      recognizer = undefined;
    },
    function (err) {
      console.trace("err - " + err);
  
      recognizer.close();
      recognizer = undefined;
    });
  // 
github ruslang02 / atomos / apps / official / start / menu.js View on Github external
root.AssistantButton.classList.remove("text-danger");

            }
        }
        if (root.AssistantButton.classList.contains("text-danger")) {
            if (recognizer) recognizer.dispose();
            root.AssistantButton.classList.remove("text-danger");
            if (root.Assistant.childNodes.length > 1) {
                root.Search.Input.placeholder = "Press on the microphone and start speaking".toLocaleString();
            } else showSection(root.AppsSection);
        }

        root.AssistantButton.classList.add("text-danger");
        root.Search.Input.placeholder = "Talk in the microphone...".toLocaleString();
        showSection(root.Assistant);
        recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);
        recognizer.recognizing = (sender, event) => {
            root.Search.Input.placeholder = event.result.text + "...";
        };
        recognizer.recognized = (sender, event) => {
            root.Search.Input.placeholder = "";
            root.AssistantButton.classList.remove("text-danger");
            if (event.result)
                root.Assistant.new(event.result);
            else {
                if (root.Assistant.childNodes.length > 1) {
                    root.Search.Input.placeholder = "Press on the microphone and start speaking".toLocaleString();
                } else showSection(root.AppsSection);
            }
        };
        recognizer.recognizeOnceAsync(
            function (result) {