How to use the @google-cloud/speech.v1p1beta1 function in @google-cloud/speech

To help you get started, we’ve selected a few @google-cloud/speech examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github googleapis / nodejs-speech / samples / recognize.js View on Github external
async function syncRecognizeModelSelectionGCS(
  gcsUri,
  model,
  encoding,
  sampleRateHertz,
  languageCode
) {
  // [START speech_transcribe_model_selection_gcs]
  // Imports the Google Cloud client library for Beta API
  /**
   * TODO(developer): Update client library import to use new
   * version of API when desired features become available
   */
  const speech = require('@google-cloud/speech').v1p1beta1;

  // Creates a client
  const client = new speech.SpeechClient();

  /**
   * TODO(developer): Uncomment the following lines before running the sample.
   */
  // const gcsUri = 'gs://my-bucket/audio.raw';
  // const model = 'Model to use, e.g. phone_call, video, default';
  // const encoding = 'Encoding of the audio file, e.g. LINEAR16';
  // const sampleRateHertz = 16000;
  // const languageCode = 'BCP-47 language code, e.g. en-US';

  const config = {
    encoding: encoding,
    sampleRateHertz: sampleRateHertz,
github dtinth / discord-transcriber / bot.js View on Github external
// @ts-check
const Discord = require('discord.js')
const fs = require('fs')
const execFile = require('child_process').execFile
const config = JSON.parse(
  fs.readFileSync(require.resolve('./discord.config.json'), 'utf8')
)
// @ts-ignore
const speech = require('@google-cloud/speech').v1p1beta1
const speechClient = new speech.SpeechClient({
  keyFilename: 'google-cloud.credentials.json'
})

// This is our logger.
const pino = require('pino')({
  prettyPrint: true,
  level: 'trace'
})

// Crash when something unexpected happens.
// Let a process manager (e.g. pm2 or Docker) restart it.
process.on('unhandledRejection', up => {
  throw up
})
github googleapis / nodejs-speech / samples / v1p1beta1 / speech_contexts_classes_beta.js View on Github external
// DO NOT EDIT! This is a generated sample ("Request",  "speech_contexts_classes_beta")
'use strict';

// sample-metadata:
//   title:
//   description: Performs synchronous speech recognition with static context classes.
//   usage: node samples/v1p1beta1/speech_contexts_classes_beta.js [--sample_rate_hertz 24000] [--language_code "en-US"] [--phrase "$TIME"] [--uri_path "gs://cloud-samples-data/speech/time.mp3"]

// [START speech_contexts_classes_beta]
// [START speech_contexts_classes_beta_core]

const speech = require('@google-cloud/speech').v1p1beta1;

/**
 * Performs synchronous speech recognition with static context classes.
 *
 * @param sampleRateHertz {number} Sample rate in Hertz of the audio data sent in all
 * `RecognitionAudio` messages. Valid values are: 8000-48000.
 * @param languageCode {string} The language of the supplied audio.
 * @param phrase {string} Phrase "hints" help Speech-to-Text API recognize the specified phrases from
 * your audio data. In this sample we are using a static class phrase ($TIME). Classes represent
 * groups of words that represent common concepts that occur in natural language. We recommend
 * checking out the docs page for more info on static classes.
 * @param uriPath {string} Path to the audio file stored on GCS.
 */
function sampleRecognize(sampleRateHertz, languageCode, phrase, uriPath) {
  const client = new speech.SpeechClient();
  // const sampleRateHertz = 24000;
github googleapis / nodejs-speech / samples / recognize.v1p1beta1.js View on Github external
async function syncRecognizeWithMetaData(
  filename,
  encoding,
  sampleRateHertz,
  languageCode
) {
  // [START speech_transcribe_recognition_metadata_beta]
  // Imports the Google Cloud client library for Beta API
  /**
   * TODO(developer): Update client library import to use new
   * version of API when desired features become available
   */
  const speech = require('@google-cloud/speech').v1p1beta1;
  const fs = require('fs');

  // Creates a client
  const client = new speech.SpeechClient();

  /**
   * TODO(developer): Uncomment the following lines before running the sample.
   */
  // const filename = 'Local path to audio file, e.g. /path/to/audio.raw';
  // const encoding = 'Encoding of the audio file, e.g. LINEAR16';
  // const sampleRateHertz = 16000;
  // const languageCode = 'BCP-47 language code, e.g. en-US';

  const recognitionMetadata = {
    interactionType: 'DISCUSSION',
    microphoneDistance: 'NEARFIELD',
github googleapis / nodejs-speech / samples / betaFeatures.js View on Github external
async function speechTranscribeMultiChannel(fileName) {
  // [START speech_transcribe_multichannel_beta]
  const fs = require('fs');

  // Imports the Google Cloud client library
  const speech = require('@google-cloud/speech').v1p1beta1;

  // Creates a client
  const client = new speech.SpeechClient();

  /**
   * TODO(developer): Uncomment the following lines before running the sample.
   */
  // const fileName = 'Local path to audio file, e.g. /path/to/audio.raw';

  const config = {
    encoding: `LINEAR16`,
    languageCode: `en-US`,
    audioChannelCount: 2,
    enableSeparateRecognitionPerChannel: true,
  };
github googleapis / nodejs-speech / samples / infiniteStreaming.js View on Github external
// [START speech_transcribe_infinite_streaming]

  // const encoding = 'LINEAR16';
  // const sampleRateHertz = 16000;
  // const languageCode = 'en-US';
  // const streamingLimit = 10000; // ms - set to low number for demo purposes

  const chalk = require('chalk');
  const {Transform} = require('stream');

  // Node-Record-lpcm16
  const recorder = require('node-record-lpcm16');

  // Imports the Google Cloud client library
  // Currently, only v1p1beta1 contains result-end-time
  const speech = require('@google-cloud/speech').v1p1beta1;

  const client = new speech.SpeechClient();

  const config = {
    encoding: encoding,
    sampleRateHertz: sampleRateHertz,
    languageCode: languageCode,
  };

  const request = {
    config,
    interimResults: true,
  };

  let recognizeStream = null;
  let restartCounter = 0;
github googleapis / nodejs-speech / samples / v1p1beta1 / speech_quickstart_beta.js View on Github external
// DO NOT EDIT! This is a generated sample ("Request",  "speech_quickstart_beta")
'use strict';

// sample-metadata:
//   title:
//   description: Performs synchronous speech recognition on an audio file.
//   usage: node samples/v1p1beta1/speech_quickstart_beta.js [--sample_rate_hertz 44100] [--language_code "en-US"] [--uri_path "gs://cloud-samples-data/speech/brooklyn_bridge.mp3"]

// [START speech_quickstart_beta]
// [START speech_quickstart_beta_core]

const speech = require('@google-cloud/speech').v1p1beta1;

/**
 * Performs synchronous speech recognition on an audio file.
 *
 * @param sampleRateHertz {number} Sample rate in Hertz of the audio data sent in all
 * `RecognitionAudio` messages. Valid values are: 8000-48000.
 * @param languageCode {string} The language of the supplied audio.
 * @param uriPath {string} Path to the audio file stored on GCS.
 */
function sampleRecognize(sampleRateHertz, languageCode, uriPath) {
  const client = new speech.SpeechClient();
  // const sampleRateHertz = 44100;
  // const languageCode = 'en-US';
  // const uriPath = 'gs://cloud-samples-data/speech/brooklyn_bridge.mp3';
  const encoding = 'MP3';
  const config = {
github googleapis / nodejs-speech / samples / v1p1beta1 / speech_adaptation_beta.js View on Github external
// DO NOT EDIT! This is a generated sample ("Request",  "speech_adaptation_beta")
'use strict';

// sample-metadata:
//   title:
//   description: Performs synchronous speech recognition with speech adaptation.
//   usage: node samples/v1p1beta1/speech_adaptation_beta.js [--sample_rate_hertz 44100] [--language_code "en-US"] [--phrase "Brooklyn Bridge"] [--boost 20.0] [--uri_path "gs://cloud-samples-data/speech/brooklyn_bridge.mp3"]

// [START speech_adaptation_beta]
// [START speech_adaptation_beta_core]

const speech = require('@google-cloud/speech').v1p1beta1;

/**
 * Performs synchronous speech recognition with speech adaptation.
 *
 * @param sampleRateHertz {number} Sample rate in Hertz of the audio data sent in all
 * `RecognitionAudio` messages. Valid values are: 8000-48000.
 * @param languageCode {string} The language of the supplied audio.
 * @param phrase {string} Phrase "hints" help Speech-to-Text API recognize the specified phrases from
 * your audio data.
 * @param boost {number} Positive value will increase the probability that a specific phrase will be
 * recognized over other similar sounding phrases.
 * @param uriPath {string} Path to the audio file stored on GCS.
 */
function sampleRecognize(
  sampleRateHertz,
  languageCode,
github dtinth / vx / vx.js View on Github external
#!/usr/bin/env node
require('dotenv').config({ path: __dirname + '/.env' })
const record = require('node-record-lpcm16')
const speech = require('@google-cloud/speech').v1p1beta1
const ora = require('ora')

function streamingMicRecognize(opts) {
  const client = new speech.SpeechClient()
  const spinner = ora('Initializing...').start()
  const request = {
    config: {
      encoding: 'LINEAR16',
      sampleRateHertz: 16000,
      languageCode: opts.th ? 'th' : 'en-US',
      model: opts.th ? 'default' : 'video',
      useEnhanced: true,
      enableAutomaticPunctuation: true,
      alternativeLanguageCodes: opts.th ? ['en-US'] : []
    },
    interimResults: true
github CitizensFoundation / active-citizen / workers / speech_to_text.js View on Github external
const i18n = require('../utils/i18n');
const toJson = require('../utils/to_json');
const _ = require('lodash');
const getAnonymousUser = require('../utils/get_anonymous_system_user');
var downloadFile = require('download-file');
const fs = require('fs');

let speech, Storage;
let GOOGLE_APPLICATION_CREDENTIALS;
if (process.env.GOOGLE_APPLICATION_CREDENTIALS_JSON && process.env.GOOGLE_TRANSCODING_FLAC_BUCKET) {
  const config = {
    projectId: 'neon-particle-735',
    credentials: JSON.parse(process.env.GOOGLE_APPLICATION_CREDENTIALS_JSON)
  };
  GOOGLE_APPLICATION_CREDENTIALS=JSON.parse(process.env.GOOGLE_APPLICATION_CREDENTIALS_JSON);
  speech = require('@google-cloud/speech').v1p1beta1;
  Storage = require('@google-cloud/storage').Storage;
}

let airbrake = null;
if(process.env.AIRBRAKE_PROJECT_ID) {
  airbrake = require('../utils/airbrake');
}

let VoiceToTextWorker = function () {};

const supportedGoogleLanguges = [
  ['af-ZA',true,false],
  ['am-ET',true,false],
  ['hy-AM',true,false],
  ['az-AZ',true,false],
  ['id-ID',true,false],