How to use the @tensorflow/tfjs.browser function in @tensorflow/tfjs

To help you get started, we’ve selected a few @tensorflow/tfjs examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Machine-Learning-Tokyo / tfjs-workshop / web-js / src / index.js View on Github external
const logits = tf.tidy(() => {
    // tf.browser.fromPixels() returns a Tensor from an image element.
    const img = tf.browser.fromPixels(imgElement).toFloat();

    const offset = tf.scalar(127.5);
    // Normalize the image from [0, 255] to [-1, 1].
    const normalized = img.sub(offset).div(offset);

    // Reshape to a single-element batch so we can pass it to predict.
    // const batched = normalized.reshape([1, IMAGE_SIZE, IMAGE_SIZE, 3]);
    let resized = normalized;
    if (img.shape[0] !== IMAGE_SIZE || img.shape[1] !== IMAGE_SIZE) {
      const alignCorners = true;
      resized = tf.image.resizeBilinear(
          normalized, [IMAGE_SIZE, IMAGE_SIZE], alignCorners);
    }    
    const batched = resized.reshape([-1, IMAGE_SIZE, IMAGE_SIZE, 3]);

    startTime2 = performance.now();
github tensorflow / tfjs-models / posenet / demos / coco.js View on Github external
async function testImageAndEstimatePoses(net) {
  setStatusText('Predicting...');
  document.getElementById('results').style.display = 'none';

  // Purge prevoius variables and free up GPU memory
  disposePoses();

  // Load an example image
  image = await loadImage(guiState.image);

  // Creates a tensor from an image
  const input = tf.browser.fromPixels(image);

  // Estimates poses
  const poses = await net.estimatePoses(input, {
    flipHorizontal: false,
    decodingMethod: 'multi-person',
    maxDetections: guiState.multiPoseDetection.maxDetections,
    scoreThreshold: guiState.multiPoseDetection.minPartConfidence,
    nmsRadius: guiState.multiPoseDetection.nmsRadius
  });
  predictedPoses = poses;

  // Draw poses.
  drawMultiplePosesResults();

  setStatusText('');
  document.getElementById('results').style.display = 'block';
github cloud-annotations / object-tracking-js / src / index.js View on Github external
const [_rect, _Ai, _Bi, gaussFourier, fourierMatrix] = tf.tidy(() => {
      // Process image.
      const image = tf.browser.fromPixels(frame)
      const greyscaleImage = np.rgbToGrayscale(image)
      const imageCrop = greyscaleImage.slice([ymin, xmin], [height, width])
      const processedImage = np.preprocessImage(imageCrop)

      // Create gaussian blur centered at the region of interest.
      const center = [ymin + height / 2, xmin + width / 2]
      const gaussTensor = np.gauss(image.shape, center, SIGMA)
      const gaussCrop = gaussTensor.slice([ymin, xmin], [height, width])

      // The rectangle is always the same size so we can just calculate the
      // fourier matrix once.
      const fourierMatrix = np.calculateFourierMatrix([height, width])

      // Calculate Ai and Bi.
      const gaussFourier = np.dft(gaussCrop, fourierMatrix)
      const imageFourier = np.dft(imageCrop, fourierMatrix)
github tensorflow / tfjs-models / knn-classifier / demo / index.js View on Github external
async function animate() {
  stats.begin();

  // Get image data from video element
  const image = tf.browser.fromPixels(video);
  let logits;
  // 'conv_preds' is the logits activation of MobileNet.
  const infer = () => mobilenet.infer(image, 'conv_preds');

  // Train class if one of the buttons is held down
  if (training != -1) {
    logits = infer();
    // Add current image to classifier
    classifier.addExample(logits, training);

    // Reset the training bit so we only collect during clicks.
    training = -1;
  }

  // If the classifier has examples for any classes, make a prediction!
  const numClasses = classifier.getNumClasses();
github ml5js / ml5-library / src / UNET / index.js View on Github external
} = tf.tidy(() => {
      // preprocess the input image
      const tfImage = tf.browser.fromPixels(imgToPredict).toFloat();
      const resizedImg = tf.image.resizeBilinear(tfImage, [this.config.imageSize, this.config.imageSize]);
      let normTensor = resizedImg.div(tf.scalar(255));
      const batchedImage = normTensor.expandDims(0);
      // get the segmentation
      const pred = this.model.predict(batchedImage);
      
      // add back the alpha channel to the normalized input image
      const alpha = tf.ones([128, 128, 1]).tile([1,1,1])
      normTensor = normTensor.concat(alpha, 2)

      // TODO: optimize these redundancies below, e.g. repetitive squeeze() etc
      // get the background mask;
      let maskBackgroundInternal = pred.squeeze([0]);
      maskBackgroundInternal = maskBackgroundInternal.tile([1, 1, 4]);
      maskBackgroundInternal = maskBackgroundInternal.sub(0.3).sign().relu().neg().add(1);
      const featureMaskInternal = maskBackgroundInternal.mul(normTensor);
github tensorflow / tfjs-examples / fashion-mnist-vae / client.js View on Github external
cols.each(async function(colZ, colIndex) {
      const canvas = d3Select(this).select('canvas').node();
      const imageTensor = imageTensors[colIndex];

      // Render the results to the canvas
      tf.browser.toPixels(imageTensor, canvas).then(() => {
        tf.dispose([imageTensor]);
      });
    });
  });
github tensorflow / tfjs-examples / mnist-acgan / index.js View on Github external
// Generate one fake image for each digit.
    const sampledLabels = tf.tensor2d([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 1]);
    // The output has pixel values in the [-1, 1] interval. Normalize it
    // to the unit interval ([0, 1]).
    const t0 = tf.util.now();
    const generatedImages =
        generator.predict([latentVectors, sampledLabels]).add(1).div(2);
    generatedImages.dataSync();  // For accurate timing benchmark.
    const elapsed = tf.util.now() - t0;
    fakeImagesSpan.textContent =
        `Fake images (generation took ${elapsed.toFixed(2)} ms)`;
    // Concatenate the images horizontally into a single image.
    return tf.concat(tf.unstack(generatedImages), 1);
  });

  await tf.browser.toPixels(combinedFakes, fakeCanvas);
  tf.dispose(combinedFakes);
}
github vzhou842 / easy-VQA-demo / src / model.js View on Github external
return loadModelPromise.then(model => {
    let imageTensor = tf.browser.fromPixels(imageData, 3);
    imageTensor = imageTensor.expandDims(0);
    imageTensor = imageTensor.div(255).sub(0.5);

    let questionTensor = tf.tensor(questionBOW);
    questionTensor = questionTensor.expandDims(0);

    let output = model.predict([imageTensor, questionTensor]);

    let [answerIndex] = output.argMax(1).arraySync();
    return ANSWERS[answerIndex];
  })
  .catch(console.error);
github ml5js / ml5-library / src / StyleTransfer / index.js View on Github external
async transferInternal(input) {
    const image = tf.browser.fromPixels(input);
    const result = array3DToImage(tf.tidy(() => {
      const conv1 = this.convLayer(image, 1, true, 0);
      const conv2 = this.convLayer(conv1, 2, true, 3);
      const conv3 = this.convLayer(conv2, 2, true, 6);
      const res1 = this.residualBlock(conv3, 9);
      const res2 = this.residualBlock(res1, 15);
      const res3 = this.residualBlock(res2, 21);
      const res4 = this.residualBlock(res3, 27);
      const res5 = this.residualBlock(res4, 33);
      const convT1 = this.convTransposeLayer(res5, 64, 2, 39);
      const convT2 = this.convTransposeLayer(convT1, 32, 2, 42);
      const convT3 = this.convLayer(convT2, 1, false, 45);
      const outTanh = tf.tanh(convT3);
      const scaled = tf.mul(this.timesScalar, outTanh);
      const shifted = tf.add(this.plusScalar, scaled);
      const clamped = tf.clipByValue(shifted, 0, 255);
github piximi / application / src / network.ts View on Github external
return tensorflow.tidy(() => {
    return tensorflow.browser
      .fromPixels(imageToSquare(data.getCanvas(), 224))
      .toFloat()
      .sub(tensorflow.scalar(127.5))
      .div(tensorflow.scalar(127.5))
      .reshape([1, 224, 224, 3]);
  });
};