How to use the @tensorflow/tfjs.model function in @tensorflow/tfjs

To help you get started, we’ve selected a few @tensorflow/tfjs examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tensorflow / tfjs-examples / fashion-mnist-vae / model.js View on Github external
const {originalDim, intermediateDim, latentDim} = opts;

  // The decoder model has a linear topology and hence could be constructed
  // with `tf.sequential()`. But we use the functional-model API (i.e.,
  // `tf.model()`) here nonetheless, for consistency with the encoder model
  // (see `encoder()` above).
  const input = tf.input({shape: [latentDim]});
  let y = tf.layers.dense({
    units: intermediateDim,
    activation: 'relu'
  }).apply(input);
  y = tf.layers.dense({
    units: originalDim,
    activation: 'sigmoid'
  }).apply(y);
  const dec = tf.model({inputs: input, outputs: y});

  // console.log('Decoder Summary');
  // dec.summary();
  return dec;
}
github victordibia / anomagram / app / src / components / train / Train.jsx View on Github external
mse.array().then(array => {
            array.forEach((element, i) => {
                // console.log({ "mse": element, "label": yTest[i] });
                mseDataHolder.push({ "mse": element, "label": this.yTest[i] })
                // console.log(mseDataHolder.length)
            });
            self.setState({ mseData: mseDataHolder })

            // console.log(mseDataHolder); 

        });



        // Generate encoder output 
        this.encoder = tf.model({ inputs: this.createdModel.inputs, outputs: this.createdModel.getLayer("encoder").getOutputAt(1) });
        let encoderPredictions = this.encoder.predict(this.xsTest)


        let encPredHolder = []
        encoderPredictions.array().then(array => {
            array.forEach((element, i) => {
                encPredHolder.push({ x: element[0], y: element[1], "label": this.yTest[i] })
            });
            self.setState({ encodedData: encPredHolder })
        })


        preds.dispose()
        encoderPredictions.dispose()
        mse.dispose()
        // console.log(tf.memory());
github tensorflow / tfjs-examples / simple-object-detection / train.js View on Github external
async function buildObjectDetectionModel() {
  const {truncatedBase, fineTuningLayers} = await loadTruncatedBase();

  // Build the new head model.
  const newHead = buildNewHead(truncatedBase.outputs[0].shape.slice(1));
  const newOutput = newHead.apply(truncatedBase.outputs[0]);
  const model = tf.model({inputs: truncatedBase.inputs, outputs: newOutput});

  return {model, fineTuningLayers};
}
github tensorflow / tfjs-examples / visualize-convnet / filters.js View on Github external
return tf.tidy(() => {
    const imageH = model.inputs[0].shape[1];
    const imageW = model.inputs[0].shape[2];
    const imageDepth = model.inputs[0].shape[3];

    // Create an auxiliary model of which input is the same as the original
    // model but the output is the output of the convolutional layer of
    // interest.
    const layerOutput = model.getLayer(layerName).output;
    const auxModel = tf.model({inputs: model.inputs, outputs: layerOutput});

    // This function calculates the value of the convolutional layer's
    // output at the designated filter index.
    const lossFunction = (input) =>
        auxModel.apply(input, {training: true}).gather([filterIndex], 3);

    // This returned function (`gradFunction`) calculates the gradient of the
    // convolutional filter's output with respect to the input image.
    const gradFunction = tf.grad(lossFunction);

    // Form a random image as the starting point of the gradient ascent.
    let image = tf.randomUniform([1, imageH, imageW, imageDepth], 0, 1)
                    .mul(20)
                    .add(128);

    for (let i = 0; i < iterations; ++i) {
github tensorflow / tfjs-examples / webcam-transfer-learning / index.js View on Github external
async function loadTruncatedMobileNet() {
  const mobilenet = await tf.loadLayersModel(
      'https://storage.googleapis.com/tfjs-models/tfjs/mobilenet_v1_0.25_224/model.json');

  // Return a model that outputs an internal activation.
  const layer = mobilenet.getLayer('conv_pw_13_relu');
  return tf.model({inputs: mobilenet.inputs, outputs: layer.output});
}
github ml5js / ml5-library / src / FeatureExtractor / Mobilenet.js View on Github external
async loadModel() {
    this.mobilenet = await tf.loadLayersModel(`${BASE_URL}${this.config.version}_${this.config.alpha}_${IMAGE_SIZE}/model.json`);
    this.model = await tf.loadGraphModel(this.url, {fromTFHub: true});


    const layer = this.mobilenet.getLayer(this.config.layer);
    this.mobilenetFeatures = await tf.model({ inputs: this.mobilenet.inputs, outputs: layer.output });
    if (this.video) {
      await this.mobilenetFeatures.predict(imgToTensor(this.video)); // Warm up
    }
    return this;
  }
github lucylow / salty-wet-man / filters.js View on Github external
return tf.tidy(() => {
    const imageH = model.inputs[0].shape[1];
    const imageW = model.inputs[0].shape[2];
    const imageDepth = model.inputs[0].shape[3];

    // Create an auxiliary model of which input is the same as the original
    // model but the output is the output of the convolutional layer of
    // interest.
    const layerOutput = model.getLayer(layerName).output;
    const auxModel = tf.model({inputs: model.inputs, outputs: layerOutput});

    // This function calculates the value of the convolutional layer's
    // output at the designated filter index.
    const lossFunction = (input) =>
        auxModel.apply(input, {training: true}).gather([filterIndex], 3);

    // This returned function (`gradFunction`) calculates the gradient of the
    // convolutional filter's output with respect to the input image.
    const gradFunction = tf.grad(lossFunction);

    // Form a random image as the starting point of the gradient ascent.
    let image = tf.randomUniform([1, imageH, imageW, imageDepth], 0, 1)
                    .mul(20)
                    .add(128);

    for (let i = 0; i < iterations; ++i) {
github tensorflow / tfjs-examples / translation / index.js View on Github external
const decoderStateInputC =
        tf.input({shape: [latentDim], name: 'decoder_state_input_c'});
    const decoderStateInputs = [decoderStateInputH, decoderStateInputC];

    const decoderLSTM = model.layers[3];
    const decoderInputs = decoderLSTM.input[0];
    const applyOutputs =
        decoderLSTM.apply(decoderInputs, {initialState: decoderStateInputs});
    let decoderOutputs = applyOutputs[0];
    const decoderStateH = applyOutputs[1];
    const decoderStateC = applyOutputs[2];
    const decoderStates = [decoderStateH, decoderStateC];

    const decoderDense = model.layers[4];
    decoderOutputs = decoderDense.apply(decoderOutputs);
    this.decoderModel = tf.model({
      inputs: [decoderInputs].concat(decoderStateInputs),
      outputs: [decoderOutputs].concat(decoderStates)
    });
  }
github rodrigopivi / aida / typescript / src / pipelines / zebraWings / embeddings / EmbeddingsModel.ts View on Github external
public modelInput = () => {
        if (!this.inputModel) {
            const input = tf.layers.input({ shape: [this.maxWords, this.maxNgrams], dtype: 'int32' });
            const embedded = this.model.apply(input) as tf.SymbolicTensor;
            this.inputModel = tf.model({ inputs: input, outputs: embedded });
        }
        return this.inputModel;
    };