How to use the microsoft-cognitiveservices-speech-sdk.SpeechConfig function in microsoft-cognitiveservices-speech-sdk

To help you get started, we’ve selected a few microsoft-cognitiveservices-speech-sdk examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Azure-Samples / cognitive-services-speech-sdk / quickstart / js-node / index.js View on Github external
var pushStream = sdk.AudioInputStream.createPushStream();
  
  // open the file and push it to the push stream.
  fs.createReadStream(filename).on('data', function(arrayBuffer) {
    pushStream.write(arrayBuffer.slice());
  }).on('end', function() {
    pushStream.close();
  });
  
  // we are done with the setup
  console.log("Now recognizing from: " + filename);
  
  // now create the audio-config pointing to our stream and
  // the speech config specifying the language.
  var audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);
  var speechConfig = sdk.SpeechConfig.fromSubscription(subscriptionKey, serviceRegion);
  
  // setting the recognition language to English.
  speechConfig.speechRecognitionLanguage = "en-US";
  
  // create the speech recognizer.
  var recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);
  
  // start the recognizer and wait for a result.
  recognizer.recognizeOnceAsync(
    function (result) {
      console.log(result);
  
      recognizer.close();
      recognizer = undefined;
    },
    function (err) {
github Azure-Samples / cognitive-services-speech-sdk / samples / js / node / synthesis.js View on Github external
main: function(settings, filename) {

        // now create the audio-config pointing to the output file.
        // You can also use audio output stream to initialize the audio config, see the docs for details.
        var audioConfig = sdk.AudioConfig.fromAudioFileOutput(filename);
        var speechConfig = sdk.SpeechConfig.fromSubscription(settings.subscriptionKey, settings.serviceRegion);

        // setting the synthesis language, voice name, and output audio format.
        // see https://aka.ms/speech/tts-languages for available languages and voices
        speechConfig.speechSynthesisLanguage = settings.language;
        speechConfig.speechSynthesisVoiceName = "en-US-AriaRUS";
        speechConfig.speechSynthesisOutputFormat = sdk.SpeechSynthesisOutputFormat.Audio16Khz32KBitRateMonoMp3;

        var rl = readline.createInterface({
            input: process.stdin,
            output: process.stdout
        });

        // create the speech synthesizer.
        var synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig);

        // Before beginning speech synthesis, setup the callbacks to be invoked when an event occurs.
github ruslang02 / atomos / apps / official / start / menu.js View on Github external
case "ready":
                        assistantWorker.postMessage({
                            action: "sendKeys",
                            keys: {
                                AccuWeatherkey,
                                SpeechSDKkey
                            }
                        });
                        break;
                    case "reply":
                        root.Assistant.add(e.data.reply, true);
                }
            };
            try {
                audioConfig = sdk.AudioConfig.fromDefaultMicrophoneInput();
                speechConfig = sdk.SpeechConfig.fromSubscription(SpeechSDKkey, serviceRegion);
                speechConfig.speechRecognitionLanguage = "en-US";
            } catch (e) {
                root.Assistant.add("Uh oh!<br><br>There is no <b>Azure Cognitive Services key</b> present in system. Add it in Settings and try again.", true);
                root.Search.Input.placeholder = "";
                root.AssistantButton.classList.remove("text-danger");

            }
        }
        if (root.AssistantButton.classList.contains("text-danger")) {
            if (recognizer) recognizer.dispose();
            root.AssistantButton.classList.remove("text-danger");
            if (root.Assistant.childNodes.length &gt; 1) {
                root.Search.Input.placeholder = "Press on the microphone and start speaking".toLocaleString();
            } else showSection(root.AppsSection);
        }