Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
// create the push stream we need for the speech sdk.
var pushStream = sdk.AudioInputStream.createPushStream();
// open the file and push it to the push stream.
fs.createReadStream(filename).on('data', function(arrayBuffer) {
pushStream.write(arrayBuffer.slice());
}).on('end', function() {
pushStream.close();
});
// we are done with the setup
console.log("Now recognizing from: " + filename);
// now create the audio-config pointing to our stream and
// the speech config specifying the language.
var audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);
var speechConfig = sdk.SpeechConfig.fromSubscription(subscriptionKey, serviceRegion);
// setting the recognition language to English.
speechConfig.speechRecognitionLanguage = "en-US";
// create the speech recognizer.
var recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);
// start the recognizer and wait for a result.
recognizer.recognizeOnceAsync(
function (result) {
console.log(result);
recognizer.close();
recognizer = undefined;
},
"use strict";
// pull in the required packages.
var sdk = require("microsoft-cognitiveservices-speech-sdk");
var fs = require("fs");
// replace with your own subscription key,
// service region (e.g., "westus"), and
// the name of the file you want to run
// through the speech recognizer.
var subscriptionKey = "YourSubscriptionKey";
var serviceRegion = "YourServiceRegion"; // e.g., "westus"
var filename = "YourAudioFile.wav"; // 16000 Hz, Mono
// create the push stream we need for the speech sdk.
var pushStream = sdk.AudioInputStream.createPushStream();
// open the file and push it to the push stream.
fs.createReadStream(filename).on('data', function(arrayBuffer) {
pushStream.write(arrayBuffer.slice());
}).on('end', function() {
pushStream.close();
});
// we are done with the setup
console.log("Now recognizing from: " + filename);
// now create the audio-config pointing to our stream and
// the speech config specifying the language.
var audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);
var speechConfig = sdk.SpeechConfig.fromSubscription(subscriptionKey, serviceRegion);
var pushStream = sdk.AudioInputStream.createPushStream();
// open the file and push it to the push stream.
fs.createReadStream(filename).on('data', function(arrayBuffer) {
pushStream.write(arrayBuffer.slice());
}).on('end', function() {
pushStream.close();
});
// we are done with the setup
console.log("Now recognizing from: " + filename);
// now create the audio-config pointing to our stream and
// the speech config specifying the language.
var audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);
var speechConfig = sdk.SpeechConfig.fromSubscription(subscriptionKey, serviceRegion);
// setting the recognition language to English.
speechConfig.speechRecognitionLanguage = "en-US";
// create the speech recognizer.
var recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);
// start the recognizer and wait for a result.
recognizer.recognizeOnceAsync(
function (result) {
console.log(result);
recognizer.close();
recognizer = undefined;
},
function (err) {
pushStream.close();
});
// we are done with the setup
console.log("Now recognizing from: " + filename);
// now create the audio-config pointing to our stream and
// the speech config specifying the language.
var audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);
var speechConfig = sdk.SpeechConfig.fromSubscription(subscriptionKey, serviceRegion);
// setting the recognition language to English.
speechConfig.speechRecognitionLanguage = "en-US";
// create the speech recognizer.
var recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);
// start the recognizer and wait for a result.
recognizer.recognizeOnceAsync(
function (result) {
console.log(result);
recognizer.close();
recognizer = undefined;
},
function (err) {
console.trace("err - " + err);
recognizer.close();
recognizer = undefined;
});
//
if (textNormalization) {
console.warn(
'botframework-directlinespeech: Text normalization is currently not supported; ignoring "textNormalization".'
);
}
if (userID || username) {
console.warn(
'botframework-directlinespeech: Custom "userId" and "username" are currently not supported and are ignored.'
);
}
let config;
if (authorizationToken) {
config = BotFrameworkConfig.fromAuthorizationToken(authorizationToken, region);
} else {
config = BotFrameworkConfig.fromSubscription(subscriptionKey, region);
}
// Supported options can be found in DialogConnectorFactory.js.
// Set the language used for recognition.
config.setProperty(PropertyId.SpeechServiceConnection_RecoLanguage, speechRecognitionLanguage);
// The following code sets the output format.
// As advised by the Speech team, this API may be subject to future changes.
// We are not enabling output format option because it does not send detailed output format to the bot, rendering this option useless.
// config.setProperty(PropertyId.SpeechServiceResponse_OutputFormatOption, OutputFormat[OutputFormat.Detailed]);
// Set the user ID for starting the conversation.
userID && config.setProperty(PropertyId.Conversation_From_Id, userID);
'botframework-directlinespeech: Text normalization is currently not supported; ignoring "textNormalization".'
);
}
if (userID || username) {
console.warn(
'botframework-directlinespeech: Custom "userId" and "username" are currently not supported and are ignored.'
);
}
let config;
if (authorizationToken) {
config = BotFrameworkConfig.fromAuthorizationToken(authorizationToken, region);
} else {
config = BotFrameworkConfig.fromSubscription(subscriptionKey, region);
}
// Supported options can be found in DialogConnectorFactory.js.
// Set the language used for recognition.
config.setProperty(PropertyId.SpeechServiceConnection_RecoLanguage, speechRecognitionLanguage);
// The following code sets the output format.
// As advised by the Speech team, this API may be subject to future changes.
// We are not enabling output format option because it does not send detailed output format to the bot, rendering this option useless.
// config.setProperty(PropertyId.SpeechServiceResponse_OutputFormatOption, OutputFormat[OutputFormat.Detailed]);
// Set the user ID for starting the conversation.
userID && config.setProperty(PropertyId.Conversation_From_Id, userID);
// Set Custom Speech and Custom Voice.
var audioConfig = sdk.AudioConfig.fromAudioFileOutput(filename);
var speechConfig = sdk.SpeechConfig.fromSubscription(settings.subscriptionKey, settings.serviceRegion);
// setting the synthesis language, voice name, and output audio format.
// see https://aka.ms/speech/tts-languages for available languages and voices
speechConfig.speechSynthesisLanguage = settings.language;
speechConfig.speechSynthesisVoiceName = "en-US-AriaRUS";
speechConfig.speechSynthesisOutputFormat = sdk.SpeechSynthesisOutputFormat.Audio16Khz32KBitRateMonoMp3;
var rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
// create the speech synthesizer.
var synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig);
// Before beginning speech synthesis, setup the callbacks to be invoked when an event occurs.
// The event synthesizing signals that a synthesized audio chunk is received.
// You will receive one or more synthesizing events as a speech phrase is synthesized.
// You can use this callback to streaming receive the synthesized audio.
synthesizer.synthesizing = function (s, e) {
var str = "(synthesizing) Reason: " + sdk.ResultReason[e.result.reason] + " Audio chunk length: " + e.result.audioData.byteLength;
console.log(str);
};
// The event synthesis completed signals that the synthesis is completed.
synthesizer.synthesisCompleted = function (s, e) {
console.log("(synthesized) Reason: " + sdk.ResultReason[e.result.reason] + " Audio length: " + e.result.audioData.byteLength);
};
main: function(settings, filename) {
// now create the audio-config pointing to the output file.
// You can also use audio output stream to initialize the audio config, see the docs for details.
var audioConfig = sdk.AudioConfig.fromAudioFileOutput(filename);
var speechConfig = sdk.SpeechConfig.fromSubscription(settings.subscriptionKey, settings.serviceRegion);
// setting the synthesis language, voice name, and output audio format.
// see https://aka.ms/speech/tts-languages for available languages and voices
speechConfig.speechSynthesisLanguage = settings.language;
speechConfig.speechSynthesisVoiceName = "en-US-AriaRUS";
speechConfig.speechSynthesisOutputFormat = sdk.SpeechSynthesisOutputFormat.Audio16Khz32KBitRateMonoMp3;
var rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
// create the speech synthesizer.
var synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig);
function openPushStream(filename) {
// create the push stream we need for the speech sdk.
var pushStream = sdk.AudioInputStream.createPushStream();
// open the file and push it to the push stream.
fs.createReadStream(filename).on('data', function(arrayBuffer) {
pushStream.write(arrayBuffer.slice());
}).on('end', function() {
pushStream.close();
});
return pushStream;
}
main: function(settings, filename) {
// now create the audio-config pointing to the output file.
// You can also use audio output stream to initialize the audio config, see the docs for details.
var audioConfig = sdk.AudioConfig.fromAudioFileOutput(filename);
var speechConfig = sdk.SpeechConfig.fromSubscription(settings.subscriptionKey, settings.serviceRegion);
// setting the synthesis language, voice name, and output audio format.
// see https://aka.ms/speech/tts-languages for available languages and voices
speechConfig.speechSynthesisLanguage = settings.language;
speechConfig.speechSynthesisVoiceName = "en-US-AriaRUS";
speechConfig.speechSynthesisOutputFormat = sdk.SpeechSynthesisOutputFormat.Audio16Khz32KBitRateMonoMp3;
var rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
// create the speech synthesizer.
var synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig);
// Before beginning speech synthesis, setup the callbacks to be invoked when an event occurs.