How to use the @tensorflow/tfjs.nextFrame function in @tensorflow/tfjs

To help you get started, we’ve selected a few @tensorflow/tfjs examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ml5js / ml5-library / src / ObjectDetector / CocoSsd / index.js View on Github external
async detect(inputOrCallback, cb) {
        await this.ready;
        await tf.nextFrame();

        let imgToPredict;
        let callback = cb;

        if (isInstanceOfSupportedElement(inputOrCallback)) {
            imgToPredict = inputOrCallback;
        } else if (typeof inputOrCallback === "object" && isInstanceOfSupportedElement(inputOrCallback.elt)) {
            imgToPredict = inputOrCallback.elt; // Handle p5.js image and video.
        } else if (typeof inputOrCallback === "object" && isInstanceOfSupportedElement(inputOrCallback.canvas)) {
            imgToPredict = inputOrCallback.canvas; // Handle p5.js image and video.
        } else if (typeof inputOrCallback === "function") {
            imgToPredict = this.video;
            callback = inputOrCallback;
        }

        return callCallback(this.detectInternal(imgToPredict), callback);
github brangerbriz / tf-electron / examples / mnist / index.js View on Github external
accuracyValues.push({'batch': i, 'accuracy': accuracy, 'set': 'train'});
      ui.plotAccuracies(accuracyValues);
    }

    // Call dispose on the training/test tensors to free their GPU memory.
    batch.xs.dispose();
    batch.labels.dispose();
    if (testBatch != null) {
      testBatch.xs.dispose();
      testBatch.labels.dispose();
    }

    // tf.nextFrame() returns a promise that resolves at the next call to
    // requestAnimationFrame(). By awaiting this promise we keep our model
    // training from blocking the main UI thread and freezing the browser.
    await tf.nextFrame();
  }
}
github ml5js / ml5-library / src / SoundClassifier / index.js View on Github external
async classifyInternal(numberOfClasses, callback) {
    // Wait for the model to be ready
    await this.ready;
    await tf.nextFrame();

    return this.model.classify(numberOfClasses, callback);
  }
github ml5js / ml5-library / src / YOLO / index.js View on Github external
async detectInternal(imgToPredict) {
    await this.ready;
    await tf.nextFrame();

    this.isPredicting = true;
    const [allBoxes, boxConfidence, boxClassProbs] = tf.tidy(() => {
      const input = imgToTensor(imgToPredict, [imageSize, imageSize]);
      const activation = this.model.predict(input);
      const [boxXY, boxWH, bConfidence, bClassProbs] = head(activation, ANCHORS, 80);
      const aBoxes = boxesToCorners(boxXY, boxWH);
      return [aBoxes, bConfidence, bClassProbs];
    });

    const [boxes, scores, classes] = await filterBoxes(allBoxes, boxConfidence, boxClassProbs, this.filterBoxesThreshold);

    // If all boxes have been filtered out
    if (boxes == null) {
      return [];
    }
github ml5js / ml5-library / src / FaceApi / index.js View on Github external
async detectSingleInternal(imgToClassify, faceApiOptions) {
        await this.ready;
        await tf.nextFrame();

        if (this.video && this.video.readyState === 0) {
            await new Promise(resolve => {
                this.video.onloadeddata = () => resolve();
            });
        }

        // sets the return options if any are passed in during .detect() or .detectSingle()
        this.config = this.setReturnOptions(faceApiOptions);

        const {
            withLandmarks,
            withExpressions,
            withDescriptors
        } = this.config
github Machine-Learning-Tokyo / tfjs-workshop / char-rnn / src / index.js View on Github external
console.log('generating');
    let generated = this.inputSeed.value;
    this.generatedSentence.innerText = generated;
    this.generateButton.disabled = true;
    this.generateButton.innerText = "Pay attention to Nietzsche's words"
    for (let i = 0; i < CHARS_TO_GENERATE; i++) {
      const indexTensor = tf.tidy(() => {
        const input = this.convert(generated);
        const prediction = this.model.predict(input).squeeze();
        return this.sample(prediction);
      })
      const index = await indexTensor.data();
      indexTensor.dispose();
      generated += indices_char[index];
      this.generatedSentence.innerText = generated;
      await tf.nextFrame();
    }
    this.enableGeneration();
  }
github ml5js / ml5-library / src / FeatureExtractor / Mobilenet.js View on Github external
async classifyInternal(imgToPredict) {
    if (this.usageType !== 'classifier') {
      throw new Error('Mobilenet Feature Extraction has not been set to be a classifier.');
    }
    await tf.nextFrame();
    this.isPredicting = true;
    const predictedClasses = tf.tidy(() => {
      const imageResize = (imgToPredict === this.video) ? null : [IMAGE_SIZE, IMAGE_SIZE];
      const processedImg = imgToTensor(imgToPredict, imageResize);
      const predictions = this.jointModel.predict(processedImg);
      return Array.from(predictions.as1D().dataSync());
    });
    const results = await predictedClasses.map((confidence, index) => {
      const label = (this.mapStringToIndex.length > 0 && this.mapStringToIndex[index]) ? this.mapStringToIndex[index] : index;
      return {
        label,
        confidence,
      };
    }).sort((a, b) => b.confidence - a.confidence);
    return results;
  }
github ml5js / ml5-library / src / Pix2pix / index.js View on Github external
const layerInput = tf.concat([layers[layers.length - 1], layers[0]], 2);
      let rectified2 = tf.relu(layerInput);
      filter = this.variables['generator/decoder_1/conv2d_transpose/kernel'];
      const bias3 = this.variables['generator/decoder_1/conv2d_transpose/bias'];
      convolved = Pix2pix.deconv2d(rectified2, filter, bias3);
      rectified2 = tf.tanh(convolved);
      layers.push(rectified2);

      const output = layers[layers.length - 1];
      const deprocessedOutput = Pix2pix.deprocess(output);

      return deprocessedOutput;
    }));

    await tf.nextFrame();
    return result;
  }
github ixartz / handwritten-digit-recognition-tensorflowjs / src / classifiers / handwriting-digits-classifier.js View on Github external
validationData,
        epochs: 1,
      });

      const loss = history.history.loss[0];
      const accuracy = history.history.acc[0];

      yield put(addLossPoint(i, loss));

      if (validationData != null) {
        yield put(addAccuracyPoint(i, accuracy));
      }

      tf.dispose([batch, validationData]);

      yield tf.nextFrame();
    }

    if (save) {
      yield this.model.save(`downloads://${HandwritingDigitsClassifier.CLASSIFIER_NAME}`);
    }
  }
github MindExMachina / smartgeometry / projects / tiny-yolo-follow / src / index.js View on Github external
}

                log__coordinates.innerHTML = '[' + Math.round(relativeX) + ', ' + Math.round(relativeY) + ']';
                log__actions.innerHTML = actionsText;
            }
        });

        if (!foundTrackingClass) {

            log__coordinates.innerHTML = 'Looking for ' + trackingClass + '..';
            log__actions.innerHTML = '<span class="u-idle">idle</span>';

        }

        await tf.nextFrame();
    }
}