Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
[model, encoder, decoder] = ae_model.buildModel(modelParams)
encoder.summary()
decoder.summary()
model.summary()
console.log(" >> Train/Test Split | Train:", trainEcg.length, " Test:", testEcg.length);
// console.log(" >> Features per data point ", ecg[0].data.length)
// console.log(trainEcg[0]);
const xs = tf.tensor2d(trainEcg.map(item => item.data
), [trainEcg.length, trainEcg[0].data.length])
const xsTest = tf.tensor2d(testEcg.map(item => item.data
), [testEcg.length, testEcg[0].data.length])
yTest = testEcg.map(item => item.target + "" === 1 + "" ? 0 : 1)
// console.log(xs, xsTest);
async function train_data(model) {
for (let i = 0; i < numSteps; i++) {
startTime = new Date();
const res = await model.fit(xs,
xs, { epochs: numEpochs, verbose: 0, batchSize: batchSize, validationData: [xsTest, xsTest] });
endTime = new Date();
elapsedTime = (endTime - startTime) / 1000
function convertToTensors(featuresData, labelData, testSplit) {
if (featuresData.length !== labelData.length) {
throw new Error('features set and labels set have different numbers of examples');
}
const [shuffledFeatures, shuffledLabels] = shuffleData(featuresData, labelData);
const featuresTensor = tf.tensor2d(shuffledFeatures, [numSamplesPerGesture, totalNumDataPerFile]);
// Create a 1D `tf.Tensor` to hold the labels, and convert the number label
// from the set {0, 1, 2} into one-hot encoding (.e.g., 0 --> [1, 0, 0]).
const labelsTensor = tf.oneHot(tf.tensor1d(shuffledLabels).toInt(), numClasses);
return split(featuresTensor, labelsTensor, testSplit);
}
// The dataset has 4177 rows. Split them into 2 groups, one for training and
// one for validation. Take about 3500 rows as train dataset, and the rest as
// validation dataset.
const trainBatches = Math.floor(3500 / batchSize);
const dataset = datasetObj.dataset.shuffle(1000).batch(batchSize);
const trainDataset = dataset.take(trainBatches);
const validationDataset = dataset.skip(trainBatches);
await model.fitDataset(
trainDataset, {epochs: epochs, validationData: validationDataset});
await model.save(savePath);
const loadedModel = await tf.loadLayersModel(savePath + '/model.json');
const result = loadedModel.predict(
tf.tensor2d([[0, 0.625, 0.495, 0.165, 1.262, 0.507, 0.318, 0.39]]));
console.log(
'The actual test abalone age is 10, the inference result from the model is ' +
result.dataSync());
}
function convertToTensors(featuresData, labelData, testSplit) {
if (featuresData.length !== labelData.length) {
throw new Error('features set and labels set have different numbers of examples');
}
const [shuffledFeatures, shuffledLabels] = shuffleData(featuresData, labelData);
const featuresTensor = tf.tensor2d(shuffledFeatures, [numSamplesPerGesture, totalNumDataPerFile]);
// Create a 1D `tf.Tensor` to hold the labels, and convert the number label
// from the set {0, 1, 2} into one-hot encoding (.e.g., 0 --> [1, 0, 0]).
const labelsTensor = tf.oneHot(tf.tensor1d(shuffledLabels).toInt(), numClasses);
return split(featuresTensor, labelsTensor, testSplit);
}
// const tf = require('@tensorflow/tfjs');
const _ = require('lodash');
const tf = require('@tensorflow/tfjs-node');
iris = require("./data/iris.json")
iris = _.shuffle(iris)
iris_train = iris.slice(0, 130)
iris_test = iris.slice(130, iris.length)
const trainingData = tf.tensor2d(iris_train.map(item => [
item.sepalLength, item.sepalWidth, item.petalLength, item.petalWidth
]
), [iris_train.length, 4])
const testData = tf.tensor2d(iris_test.map(item => [
item.sepalLength, item.sepalWidth, item.petalLength, item.petalWidth
]
), [iris_test.length, 4])
const outputData = tf.tensor2d(iris_train.map(item => [
item.species === 'setosa' ? 1 : 0,
item.species === 'virginica' ? 1 : 0,
item.species === 'versicolor' ? 1 : 0
]), [iris_train.length, 3])
stockapi.getStockHistoricalDaily($stock, (data) => {
prices = data[0];
let min = Math.min.apply(null, prices);
let max = Math.max.apply(null, prices);
prices = prices.map((el) => minMaxScaler(el, min, max));
dates = data[1];
var lookbackPrices = [];
var targets = [];
for (let index = 10; index < prices.length; index++) {
lookbackPrices[index - 10] = prices.slice(index - 10, index);
targets.push(prices[index]);
}
tfPrices = tf.tensor2d(lookbackPrices);
global.pred = tf.tensor2d(lookbackPrices[0], [1, 10]);
global.pred = tf.reshape(global.pred, [1, 10, 1]);
tfTargets = tf.tensor1d(targets);
tfPrices = tf.reshape(tfPrices, [prices.length - 10, 10, 1]);
//tfPrices.print();
//tfTargets.print();
const model = tf.sequential();
model.add(tf.layers.lstm({ units: 32, inputShape: [10, 1] }));
model.add(tf.layers.dense({ units: 1, activation: 'linear' }));
$lr = parseFloat($('#txtLearningRate').val());
const lr = $lr;
const opt = tf.train.adam(lr);
const loss = 'meanSquaredError';
openSnackbar("Compiling model");
model.compile({ optimizer: opt, loss: loss, metrics: ['mae', 'mse'] }); /* Using Mean Absolute Error as metrics for accuracy of model */
const outputs = [];
for (let ix = 0; ix < trainLabels.length; ix++) {
const output = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
output[trainLabels[ix]] = 1;
outputs.push(output);
}
const trainLabelsTensor = tf.tensor2d(outputs);
const testOutputs = [];
for (let ix = 0; ix < testLabels.length; ix++) {
const output = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
output[testLabels[ix]] = 1;
testOutputs.push(output);
}
const testLabelsTensor = tf.tensor2d(testOutputs);
const model = createConvModel();
model.compile({
optimizer: 'rmsprop',
loss: 'categoricalCrossentropy',
metrics: ['accuracy'],
});
const batchSize = 320;
const trainEpochs = 20;
await model.fit(trainDigitsTensor, trainLabelsTensor, {
batchSize,
validationData: [testDigitsTensor, testLabelsTensor],
shuffle: true,
]
), [iris_train.length, 4])
const testData = tf.tensor2d(iris_test.map(item => [
item.sepalLength, item.sepalWidth, item.petalLength, item.petalWidth
]
), [iris_test.length, 4])
const outputData = tf.tensor2d(iris_train.map(item => [
item.species === 'setosa' ? 1 : 0,
item.species === 'virginica' ? 1 : 0,
item.species === 'versicolor' ? 1 : 0
]), [iris_train.length, 3])
const y_test = tf.tensor2d(iris_test.map(item => [
item.species === 'setosa' ? 1 : 0,
item.species === 'virginica' ? 1 : 0,
item.species === 'versicolor' ? 1 : 0
]), [iris_test.length, 3])
const model = tf.sequential();
model.add(tf.layers.dense({
inputShape: [4],
activation: "sigmoid",
units: 10,
name: "layer1"
}))
model.add(tf.layers.dense({
const testLabels = await mnist.getTestLabels();
const testDigits = await mnist.getTestImages();
const trainDigitsTensor = tf.tensor4d(trainDigits.map(mnist.normalize),
[trainDigits.length / (28 * 28), 28, 28, 1]);
const testDigitsTensor = tf.tensor4d(testDigits.map(mnist.normalize),
[testDigits.length / (28 * 28), 28, 28, 1]);
const outputs = [];
for (let ix = 0; ix < trainLabels.length; ix++) {
const output = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
output[trainLabels[ix]] = 1;
outputs.push(output);
}
const trainLabelsTensor = tf.tensor2d(outputs);
const testOutputs = [];
for (let ix = 0; ix < testLabels.length; ix++) {
const output = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
output[testLabels[ix]] = 1;
testOutputs.push(output);
}
const testLabelsTensor = tf.tensor2d(testOutputs);
const model = createConvModel();
model.compile({
optimizer: 'rmsprop',
loss: 'categoricalCrossentropy',
metrics: ['accuracy'],
});