How to use @tensorflow/tfjs-node - 10 common examples

To help you get started, we’ve selected a few @tensorflow/tfjs-node examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tensorflow / tfjs / tfjs / integration_tests / models / common.ts View on Github external
export async function loadGraphModel(modelName: string):
    Promise {
  if (inNodeJS()) {
    // tslint:disable-next-line:no-require-imports
    const fileSystem = require('@tensorflow/tfjs-node/dist/io/file_system');
    return tfconverter.loadGraphModel(
        fileSystem.fileSystem(`./data/${modelName}/model.json`));
  } else {
    return tfconverter.loadGraphModel(
        `${DATA_SERVER_ROOT}/${modelName}/model.json`);
  }
}
github victordibia / anomagram / experiments / ecg.js View on Github external
[model, encoder, decoder] = ae_model.buildModel(modelParams)
encoder.summary()
decoder.summary()
model.summary()



console.log(" >> Train/Test Split | Train:", trainEcg.length, " Test:", testEcg.length);
// console.log(" >> Features per data point ", ecg[0].data.length)
// console.log(trainEcg[0]);


const xs = tf.tensor2d(trainEcg.map(item => item.data
), [trainEcg.length, trainEcg[0].data.length])

const xsTest = tf.tensor2d(testEcg.map(item => item.data
), [testEcg.length, testEcg[0].data.length])

yTest = testEcg.map(item => item.target + "" === 1 + "" ? 0 : 1)



// console.log(xs, xsTest);


async function train_data(model) {
    for (let i = 0; i < numSteps; i++) {
        startTime = new Date();
        const res = await model.fit(xs,
            xs, { epochs: numEpochs, verbose: 0, batchSize: batchSize, validationData: [xsTest, xsTest] });
        endTime = new Date();
        elapsedTime = (endTime - startTime) / 1000
github victordibia / anomagram / experiments / ecg.js View on Github external
async function loadSavedModel() {
    model = await tf.loadLayersModel(modelSavePath + "/model.json");
    console.log("model loaded");

    // const ae = tf.model({ inputs: input, outputs: output, name: "autoencoder" })
    const optimizer = tf.train.adam(modelParams.learningRate, modelParams.adamBeta1)

    model.compile({ optimizer: optimizer, loss: "meanSquaredError" })

    for (let i = 0; i < numSteps; i++) {
        const res = await model.fit(xs,
            xs, { epochs: numEpochs, verbose: 0, batchSize: batchSize });
        console.log("Step loss", i, res.history.loss[0]);
    }

    await model.save(modelSavePath);
    await model.save("file://../app/public/webmodel/ecg");
}
github victordibia / anomagram / experiments / ecg.js View on Github external
async function loadSavedModel() {
    model = await tf.loadLayersModel(modelSavePath + "/model.json");
    console.log("model loaded");

    // const ae = tf.model({ inputs: input, outputs: output, name: "autoencoder" })
    const optimizer = tf.train.adam(modelParams.learningRate, modelParams.adamBeta1)

    model.compile({ optimizer: optimizer, loss: "meanSquaredError" })

    for (let i = 0; i < numSteps; i++) {
        const res = await model.fit(xs,
            xs, { epochs: numEpochs, verbose: 0, batchSize: batchSize });
        console.log("Step loss", i, res.history.loss[0]);
    }

    await model.save(modelSavePath);
    await model.save("file://../app/public/webmodel/ecg");
}
github adwellj / node-tfjs-retrain / model.js View on Github external
);

        // We parameterize batch size as a fraction of the entire dataset because the
        // number of examples that are collected depends on how many examples the user
        // collects. This allows us to have a flexible batch size.
        const batchSize = Math.floor(
            dataset.images.shape[0] * trainingParams.batchSizeFraction
        );
        if (!(batchSize > 0)) {
            throw new Error(
                `Batch size is 0 or NaN. Please choose a non-zero fraction.`
            );
        }

        const shuffledIndices = new Int32Array(
            tf.util.createShuffledIndices(dataset.labels.shape[0])
        );

        // Train the model! Model.fit() will shuffle xs & ys so we don't have to.
        console.time("Training Time");
        return this.model.fit(
            dataset.images.gather(shuffledIndices),
            dataset.labels.gather(shuffledIndices),
            {
                batchSize,
                epochs: trainingParams.epochs,
                validationSplit: 0.15,
                callbacks: {
                    onBatchEnd: async (batch, logs) => {
                        trainingParams.trainStatus(
                            "Loss: " + logs.loss.toFixed(5)
                        );
github loretoparisi / tensorflow-node-examples / sentiment / index.js View on Github external
let wordIndex = sentimentMetadata['word_index'];

        console.log('indexFrom = ' + indexFrom);
        console.log('maxLen = ' + maxLen);

        console.log('model_type', sentimentMetadata['model_type']);
        console.log('vocabulary_size', sentimentMetadata['vocabulary_size']);
        console.log('max_len', sentimentMetadata['max_len']);

        const inputText =
            text.trim().toLowerCase().replace(/(\.|\,|\!)/g, '').split(/\s+/g); // tokenized

        console.log(inputText);

        // Look up word indices.
        const inputBuffer = tf.buffer([1, maxLen], 'float32');
        for (let i = 0; i < inputText.length; ++i) {
            const word = inputText[i];
            if (typeof wordIndex[word] == 'undefined') { // TODO(cais): Deal with OOV words.
                console.log(word, wordIndex[word]);
            }
            inputBuffer.set(wordIndex[word] + indexFrom, 0, i);
        }
        const input = inputBuffer.toTensor();

        console.log(text, "\n", input);

        const beginMs = performance.now();
        const predictOut = model.predict(input);
        const score = predictOut.dataSync()[0];
        predictOut.dispose();
        const endMs = performance.now();
github charliegerard / gestures-ml-js / daydream / examples / harry-potter / train.js View on Github external
const createModel = async(xTrain, yTrain, xTest, yTest) => {
  const params = {learningRate: 0.1, epochs: 40};
  // Define the topology of the model: two dense layers.
  const model = tf.sequential();
  model.add(tf.layers.dense({units: 10, activation: 'sigmoid', inputShape: [xTrain.shape[1]]}));
  model.add(tf.layers.dense({units: numClasses, activation: 'softmax'}));
  model.summary();

  const optimizer = tf.train.adam(params.learningRate);
  model.compile({
    optimizer: optimizer,
    loss: 'categoricalCrossentropy',
    metrics: ['accuracy'],
  });

  await model.fit(xTrain, yTrain, {
    epochs: params.epochs,
    validationData: [xTest, yTest],
  });
  
  await model.save('file://model');
  return model;
github victordibia / anomagram / experiments / models / ae.js View on Github external
const hiddenLayers = params.hiddenLayers
    const latentDim = params.latentDim
    const hiddenDim = params.hiddenDim
    const learningRate = params.learningRate, adamBeta1 = params.adamBeta1
    const outputActivation = "sigmoid"
    // console.log(numFeatures);

    // Specify encoder
    const input = tf.input({ shape: [numFeatures] })
    let encoderHidden = tf.layers.dense({ units: hiddenDim[0], activation: "relu" }).apply(input);
    let i = 1
    while (i < hiddenDim.length) {
        encoderHidden = tf.layers.dense({ units: hiddenDim[i], activation: "relu" }).apply(encoderHidden);
        i++
    }
    const z_ = tf.layers.dense({ units: latentDim }).apply(encoderHidden);
    const encoder = tf.model({ inputs: input, outputs: z_, name: "encoder" })


    // Specify decoder
    const latentInput = tf.input({ shape: [latentDim] })
    let decoderHidden = tf.layers.dense({ units: hiddenDim[hiddenDim.length - 1], activation: "relu" }).apply(latentInput);
    let j = hiddenDim.length - 1
    while (j > 0) {
        j--;
        decoderHidden = tf.layers.dense({ units: hiddenDim[j], activation: "relu" }).apply(decoderHidden);

    }

    const decoderOutput = tf.layers.dense({ units: numFeatures, activation: outputActivation }).apply(decoderHidden);
    const decoder = tf.model({ inputs: latentInput, outputs: decoderOutput, name: "decoder" })
github charliegerard / gestures-ml-js / phone / examples / game / train.js View on Github external
const createModel = async(xTrain, yTrain, xTest, yTest) => {
  const params = {learningRate: 0.1, epochs: 40};
  // Define the topology of the model: two dense layers.
  const model = tf.sequential();
  model.add(tf.layers.dense({units: 10, activation: 'sigmoid', inputShape: [xTrain.shape[1]]}));
  model.add(tf.layers.dense({units: numClasses, activation: 'softmax'}));
  model.summary();

  const optimizer = tf.train.adam(params.learningRate);
  model.compile({
    optimizer: optimizer,
    loss: 'categoricalCrossentropy',
    metrics: ['accuracy'],
  });

  await model.fit(xTrain, yTrain, {
    epochs: params.epochs,
    validationData: [xTest, yTest],
  });
  
  await model.save('file://model');
  return model;
github jainsamyak / Stockifier / src / js / prediction.js View on Github external
for (let index = 10; index < prices.length; index++) {
            lookbackPrices[index - 10] = prices.slice(index - 10, index);
            targets.push(prices[index]);
        }
        tfPrices = tf.tensor2d(lookbackPrices);
        global.pred = tf.tensor2d(lookbackPrices[0], [1, 10]);
        global.pred = tf.reshape(global.pred, [1, 10, 1]);
        tfTargets = tf.tensor1d(targets);
        tfPrices = tf.reshape(tfPrices, [prices.length - 10, 10, 1]);
        //tfPrices.print();
        //tfTargets.print();


        const model = tf.sequential();
        model.add(tf.layers.lstm({ units: 32, inputShape: [10, 1] }));
        model.add(tf.layers.dense({ units: 1, activation: 'linear' }));
        $lr = parseFloat($('#txtLearningRate').val());
        const lr = $lr;
        const opt = tf.train.adam(lr);
        const loss = 'meanSquaredError';
        openSnackbar("Compiling model");
        model.compile({ optimizer: opt, loss: loss, metrics: ['mae', 'mse'] }); /* Using Mean Absolute Error as metrics for accuracy of model */

        async function fit() {
            t = targets.map((el) => minMaxInverseScaler(el, min, max));
            t = t.slice(t.length - 100, t.length);
            predictChart.data.labels = dates.slice(dates.length - 100, dates.length);

            var loss = Infinity;
            var epochs = 1;
            var targetEpochs = parseFloat($('#txtNumEpochs').val());
            while (epochs < targetEpochs && window.startStop == 1) {