How to use the @tensorflow/tfjs-node-gpu.layers function in @tensorflow/tfjs-node-gpu

To help you get started, we’ve selected a few @tensorflow/tfjs-node-gpu examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tensorflow / tfjs-examples / mnist-acgan / gan.js View on Github external
padding: 'valid',
    activation: 'relu',
    kernelInitializer: 'glorotNormal'
  }));
  cnn.add(tf.layers.batchNormalization());

  // Upsample to [14, 14, ...].
  cnn.add(tf.layers.conv2dTranspose({
    filters: 96,
    kernelSize: 5,
    strides: 2,
    padding: 'same',
    activation: 'relu',
    kernelInitializer: 'glorotNormal'
  }));
  cnn.add(tf.layers.batchNormalization());

  // Upsample to [28, 28, ...].
  cnn.add(tf.layers.conv2dTranspose({
    filters: 1,
    kernelSize: 5,
    strides: 2,
    padding: 'same',
    activation: 'tanh',
    kernelInitializer: 'glorotNormal'
  }));

  // Unlike most TensorFlow.js models, the generator part of an ACGAN has
  // two inputs:
  //   1. The latent vector that is used as the "seed" of the fake image
  //      generation.
  //   2. A class label that controls which of the ten MNIST digit classes
github tensorflow / tfjs-examples / mnist-acgan / gan.js View on Github external
const latent = tf.input({shape: [latentSize]});

  // The desired label of the generated image, an integer in the interval
  // [0, NUM_CLASSES).
  const imageClass = tf.input({shape: [1]});

  // The desired label is converted to a vector of length `latentSize`
  // through embedding lookup.
  const classEmbedding = tf.layers.embedding({
    inputDim: NUM_CLASSES,
    outputDim: latentSize,
    embeddingsInitializer: 'glorotNormal'
  }).apply(imageClass);

  // Hadamard product between z-space and a class conditional embedding.
  const h = tf.layers.multiply().apply([latent, classEmbedding]);

  const fakeImage = cnn.apply(h);
  return tf.model({inputs: [latent, imageClass], outputs: fakeImage});
}
github tensorflow / tfjs-examples / mnist-acgan / gan.js View on Github external
cnn.add(tf.layers.conv2d(
      {filters: 64, kernelSize: 3, padding: 'same', strides: 1}));
  cnn.add(tf.layers.leakyReLU({alpha: 0.2}));
  cnn.add(tf.layers.dropout({rate: 0.3}));

  cnn.add(tf.layers.conv2d(
      {filters: 128, kernelSize: 3, padding: 'same', strides: 2}));
  cnn.add(tf.layers.leakyReLU({alpha: 0.2}));
  cnn.add(tf.layers.dropout({rate: 0.3}));

  cnn.add(tf.layers.conv2d(
      {filters: 256, kernelSize: 3, padding: 'same', strides: 1}));
  cnn.add(tf.layers.leakyReLU({alpha: 0.2}));
  cnn.add(tf.layers.dropout({rate: 0.3}));

  cnn.add(tf.layers.flatten());

  const image = tf.input({shape: [IMAGE_SIZE, IMAGE_SIZE, 1]});
  const features = cnn.apply(image);

  // Unlike most TensorFlow.js models, the discriminator has two outputs.

  // The 1st output is the probability score assigned by the discriminator to
  // how likely the input example is a real MNIST image (as versus
  // a "fake" one generated by the generator).
  const realnessScore =
      tf.layers.dense({units: 1, activation: 'sigmoid'}).apply(features);
  // The 2nd output is the softmax probabilities assign by the discriminator
  // for the 10 MNIST digit classes (0 through 9). "aux" stands for "auxiliary"
  // (the namesake of ACGAN) and refers to the fact that unlike a standard GAN
  // (which performs just binary real/fake classification), the discriminator
  // part of ACGAN also performs multi-class classification.
github tensorflow / tfjs-examples / mnist-acgan / gan.js View on Github external
latentSize > 0 && Number.isInteger(latentSize),
      `Expected latent-space size to be a positive integer, but ` +
          `got ${latentSize}.`);

  const cnn = tf.sequential();

  // The number of units is chosen so that when the output is reshaped
  // and fed through the subsequent conv2dTranspose layers, the tensor
  // that comes out at the end has the exact shape that matches MNIST
  // images ([28, 28, 1]).
  cnn.add(tf.layers.dense(
      {units: 3 * 3 * 384, inputShape: [latentSize], activation: 'relu'}));
  cnn.add(tf.layers.reshape({targetShape: [3, 3, 384]}));

  // Upsample from [3, 3, ...] to [7, 7, ...].
  cnn.add(tf.layers.conv2dTranspose({
    filters: 192,
    kernelSize: 5,
    strides: 1,
    padding: 'valid',
    activation: 'relu',
    kernelInitializer: 'glorotNormal'
  }));
  cnn.add(tf.layers.batchNormalization());

  // Upsample to [14, 14, ...].
  cnn.add(tf.layers.conv2dTranspose({
    filters: 96,
    kernelSize: 5,
    strides: 2,
    padding: 'same',
    activation: 'relu',
github tensorflow / tfjs-examples / mnist-acgan / gan.js View on Github external
function buildGenerator(latentSize) {
  tf.util.assert(
      latentSize > 0 && Number.isInteger(latentSize),
      `Expected latent-space size to be a positive integer, but ` +
          `got ${latentSize}.`);

  const cnn = tf.sequential();

  // The number of units is chosen so that when the output is reshaped
  // and fed through the subsequent conv2dTranspose layers, the tensor
  // that comes out at the end has the exact shape that matches MNIST
  // images ([28, 28, 1]).
  cnn.add(tf.layers.dense(
      {units: 3 * 3 * 384, inputShape: [latentSize], activation: 'relu'}));
  cnn.add(tf.layers.reshape({targetShape: [3, 3, 384]}));

  // Upsample from [3, 3, ...] to [7, 7, ...].
  cnn.add(tf.layers.conv2dTranspose({
    filters: 192,
    kernelSize: 5,
    strides: 1,
    padding: 'valid',
    activation: 'relu',
    kernelInitializer: 'glorotNormal'
  }));
  cnn.add(tf.layers.batchNormalization());

  // Upsample to [14, 14, ...].
  cnn.add(tf.layers.conv2dTranspose({
github tensorflow / tfjs-examples / mnist-acgan / gan.js View on Github external
// that comes out at the end has the exact shape that matches MNIST
  // images ([28, 28, 1]).
  cnn.add(tf.layers.dense(
      {units: 3 * 3 * 384, inputShape: [latentSize], activation: 'relu'}));
  cnn.add(tf.layers.reshape({targetShape: [3, 3, 384]}));

  // Upsample from [3, 3, ...] to [7, 7, ...].
  cnn.add(tf.layers.conv2dTranspose({
    filters: 192,
    kernelSize: 5,
    strides: 1,
    padding: 'valid',
    activation: 'relu',
    kernelInitializer: 'glorotNormal'
  }));
  cnn.add(tf.layers.batchNormalization());

  // Upsample to [14, 14, ...].
  cnn.add(tf.layers.conv2dTranspose({
    filters: 96,
    kernelSize: 5,
    strides: 2,
    padding: 'same',
    activation: 'relu',
    kernelInitializer: 'glorotNormal'
  }));
  cnn.add(tf.layers.batchNormalization());

  // Upsample to [28, 28, ...].
  cnn.add(tf.layers.conv2dTranspose({
    filters: 1,
    kernelSize: 5,
github tensorflow / tfjs-examples / mnist-acgan / gan.js View on Github external
function buildDiscriminator() {
  const cnn = tf.sequential();

  cnn.add(tf.layers.conv2d({
    filters: 32,
    kernelSize: 3,
    padding: 'same',
    strides: 2,
    inputShape: [IMAGE_SIZE, IMAGE_SIZE, 1]
  }));
  cnn.add(tf.layers.leakyReLU({alpha: 0.2}));
  cnn.add(tf.layers.dropout({rate: 0.3}));

  cnn.add(tf.layers.conv2d(
      {filters: 64, kernelSize: 3, padding: 'same', strides: 1}));
  cnn.add(tf.layers.leakyReLU({alpha: 0.2}));
  cnn.add(tf.layers.dropout({rate: 0.3}));

  cnn.add(tf.layers.conv2d(
      {filters: 128, kernelSize: 3, padding: 'same', strides: 2}));
  cnn.add(tf.layers.leakyReLU({alpha: 0.2}));
  cnn.add(tf.layers.dropout({rate: 0.3}));

  cnn.add(tf.layers.conv2d(
      {filters: 256, kernelSize: 3, padding: 'same', strides: 1}));
  cnn.add(tf.layers.leakyReLU({alpha: 0.2}));
  cnn.add(tf.layers.dropout({rate: 0.3}));
github tensorflow / tfjs-examples / mnist-acgan / gan.js View on Github external
function buildDiscriminator() {
  const cnn = tf.sequential();

  cnn.add(tf.layers.conv2d({
    filters: 32,
    kernelSize: 3,
    padding: 'same',
    strides: 2,
    inputShape: [IMAGE_SIZE, IMAGE_SIZE, 1]
  }));
  cnn.add(tf.layers.leakyReLU({alpha: 0.2}));
  cnn.add(tf.layers.dropout({rate: 0.3}));

  cnn.add(tf.layers.conv2d(
      {filters: 64, kernelSize: 3, padding: 'same', strides: 1}));
  cnn.add(tf.layers.leakyReLU({alpha: 0.2}));
  cnn.add(tf.layers.dropout({rate: 0.3}));

  cnn.add(tf.layers.conv2d(
      {filters: 128, kernelSize: 3, padding: 'same', strides: 2}));
github tensorflow / tfjs-examples / mnist-acgan / gan.js View on Github external
function buildDiscriminator() {
  const cnn = tf.sequential();

  cnn.add(tf.layers.conv2d({
    filters: 32,
    kernelSize: 3,
    padding: 'same',
    strides: 2,
    inputShape: [IMAGE_SIZE, IMAGE_SIZE, 1]
  }));
  cnn.add(tf.layers.leakyReLU({alpha: 0.2}));
  cnn.add(tf.layers.dropout({rate: 0.3}));

  cnn.add(tf.layers.conv2d(
      {filters: 64, kernelSize: 3, padding: 'same', strides: 1}));
  cnn.add(tf.layers.leakyReLU({alpha: 0.2}));
  cnn.add(tf.layers.dropout({rate: 0.3}));

  cnn.add(tf.layers.conv2d(
      {filters: 128, kernelSize: 3, padding: 'same', strides: 2}));
  cnn.add(tf.layers.leakyReLU({alpha: 0.2}));
  cnn.add(tf.layers.dropout({rate: 0.3}));

  cnn.add(tf.layers.conv2d(
      {filters: 256, kernelSize: 3, padding: 'same', strides: 1}));
  cnn.add(tf.layers.leakyReLU({alpha: 0.2}));
  cnn.add(tf.layers.dropout({rate: 0.3}));