How to use numjs - 10 common examples

To help you get started, we’ve selected a few numjs examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github huan / node-facenet / src / cache / embedding-cache.spec.ts View on Github external
test('Cache', async t => {
  const EXPECTED_EMBEDDING = nj.arange(128)

  const sandbox = sinon.createSandbox()

  const embeddingStub = sandbox.stub(
    Facenet.prototype,
    'embedding',
  )
  // embeddingStub.returns(Promise.resolve(EXPECTED_EMBEDDING))
  embeddingStub.callsFake(() => {
    // console.log('fake')
    return Promise.resolve(EXPECTED_EMBEDDING)
  })

  const hitSpy  = sandbox.spy()
  const missSpy = sandbox.spy()
github grimmer0125 / alphago-zero-tictactoe-js / src / tictactoe / TicTacToeGame.js View on Github external
getNextState(boardNdArray, player, action) {
    // # if player takes action on board, return next (board,player)
    // # action must be a valid move
    if (action === this.n * this.n) {
      // return (board, -player)
      console.log('invalid action');
      return { boardNdArray, player: -player };
    }

    const b = new Board(this.n);
    // b.pieces = np.copy(board), Python
    b.pieces = boardNdArray.tolist();

    const move = { x: Math.floor(action / this.n), y: (action % this.n) };
    b.execute_move(move, player);
    return { boardNdArray: nj.array(b.pieces), curPlayer: -player };
  }
github eimg / hello-nn-js / hello.dnn.gd.js View on Github external
}
    }

    return result;
}

/* === Training === */
train(inputs, test_result, 60000);

/* === Testing === */
var test_data = [
    [1, 0, 0],
    [1, 1, 0]
];

console.log( think( nj.array(test_data) ) );
github eimg / burmese-text-classifier / train.js View on Github external
function train(X, y, hidden_neurons, alpha, epochs, dropout, dropout_percent) {
    var start_time = new Date();

    var X_arr = X.tolist();

    console.log("training with " + hidden_neurons + " neurons, alpha: " + alpha);
    console.log("input matrix: " + X_arr.length + "x" + X_arr[0].length);
    console.log("output matrix: 1x" + classes.length);
    console.log('------');

    var last_mean_error = 1;

    var synapse_0 = nj.array( rand(X_arr[0].length, hidden_neurons) );
    var synapse_1 = nj.array( rand(hidden_neurons, classes.length) );

    var prev_synapse_0_weight_update = nj.zeros(synapse_0.shape);
    var prev_synapse_1_weight_update = nj.zeros(synapse_1.shape);

    var synapse_0_direction_count = nj.zeros(synapse_0.shape);
    var synapse_1_direction_count = nj.zeros(synapse_1.shape);

    for(var j = 0; j < epochs + 1; j++) {

        var layer_0 = X;
        var layer_1 = nj.sigmoid(nj.dot(layer_0, synapse_0));

        if(dropout) {
            // I don't understand what this does yet
            // layer_1 *= nj.random.binomial([np.ones((len(X),hidden_neurons))], 1-dropout_percent)[0] * (1.0/(1-dropout_percent));
        }
github eimg / burmese-text-classifier / train.js View on Github external
var prev_synapse_1_weight_update = nj.zeros(synapse_1.shape);

    var synapse_0_direction_count = nj.zeros(synapse_0.shape);
    var synapse_1_direction_count = nj.zeros(synapse_1.shape);

    for(var j = 0; j < epochs + 1; j++) {

        var layer_0 = X;
        var layer_1 = nj.sigmoid(nj.dot(layer_0, synapse_0));

        if(dropout) {
            // I don't understand what this does yet
            // layer_1 *= nj.random.binomial([np.ones((len(X),hidden_neurons))], 1-dropout_percent)[0] * (1.0/(1-dropout_percent));
        }

        var layer_2 = nj.sigmoid(nj.dot(layer_1, synapse_1));
        var layer_2_error = y.subtract(layer_2);

        if( (j % 10000) == 0 && j > 5000 ) {
            // if this 10k iteration's error is greater than
            // the last iteration, break out
            if (nj.mean(nj.abs(layer_2_error)) < last_mean_error) {
                console.log("delta after " + j + " iterations:" + nj.mean(nj.abs(layer_2_error)) );
                last_mean_error = nj.mean(nj.abs(layer_2_error));
            } else {
                console.log ("break:" + nj.mean(nj.abs(layer_2_error)) + ">" + last_mean_error );
                break;
            }
        }

        var layer_2_delta = layer_2_error.multiply( curve(layer_2) );
        var layer_1_error = layer_2_delta.dot(synapse_1.T);
github eimg / hello-nn-js / hello.dnn.gd.js View on Github external
function train(inputs, test_result, iterations) {
    for(var i = 0; i < iterations; i++) {
        var layer_zero = inputs;

        var layer_one = nj.sigmoid( layer_zero.dot(weights_zero) );
        var layer_two = nj.sigmoid( layer_one.dot(weights_one) );

        var layer_two_error = test_result.subtract(layer_two);

        if ((i % 10000) == 0) {
            console.log(i + " - Error: " + nj.mean(nj.abs(layer_two_error)));
        }

        // Backpropagation (sending back layer_two errors to layer_one)
        var layer_two_delta = layer_two_error.multiply( curve(layer_two) );
        var layer_one_error = layer_two_delta.dot( weights_one.T );
        var layer_one_delta = layer_one_error.multiply( curve(layer_one) );

        // Adjusting weights
        weights_one = weights_one.add(
            layer_one.T.dot(layer_two_delta).multiply(alpha)
        );
github eimg / burmese-text-classifier / train.js View on Github external
var last_mean_error = 1;

    var synapse_0 = nj.array( rand(X_arr[0].length, hidden_neurons) );
    var synapse_1 = nj.array( rand(hidden_neurons, classes.length) );

    var prev_synapse_0_weight_update = nj.zeros(synapse_0.shape);
    var prev_synapse_1_weight_update = nj.zeros(synapse_1.shape);

    var synapse_0_direction_count = nj.zeros(synapse_0.shape);
    var synapse_1_direction_count = nj.zeros(synapse_1.shape);

    for(var j = 0; j < epochs + 1; j++) {

        var layer_0 = X;
        var layer_1 = nj.sigmoid(nj.dot(layer_0, synapse_0));

        if(dropout) {
            // I don't understand what this does yet
            // layer_1 *= nj.random.binomial([np.ones((len(X),hidden_neurons))], 1-dropout_percent)[0] * (1.0/(1-dropout_percent));
        }

        var layer_2 = nj.sigmoid(nj.dot(layer_1, synapse_1));
        var layer_2_error = y.subtract(layer_2);

        if( (j % 10000) == 0 && j > 5000 ) {
            // if this 10k iteration's error is greater than
            // the last iteration, break out
            if (nj.mean(nj.abs(layer_2_error)) < last_mean_error) {
                console.log("delta after " + j + " iterations:" + nj.mean(nj.abs(layer_2_error)) );
                last_mean_error = nj.mean(nj.abs(layer_2_error));
            } else {
github eimg / burmese-text-classifier / train.js View on Github external
function train(X, y, hidden_neurons, alpha, epochs, dropout, dropout_percent) {
    var start_time = new Date();

    var X_arr = X.tolist();

    console.log("training with " + hidden_neurons + " neurons, alpha: " + alpha);
    console.log("input matrix: " + X_arr.length + "x" + X_arr[0].length);
    console.log("output matrix: 1x" + classes.length);
    console.log('------');

    var last_mean_error = 1;

    var synapse_0 = nj.array( rand(X_arr[0].length, hidden_neurons) );
    var synapse_1 = nj.array( rand(hidden_neurons, classes.length) );

    var prev_synapse_0_weight_update = nj.zeros(synapse_0.shape);
    var prev_synapse_1_weight_update = nj.zeros(synapse_1.shape);

    var synapse_0_direction_count = nj.zeros(synapse_0.shape);
    var synapse_1_direction_count = nj.zeros(synapse_1.shape);

    for(var j = 0; j < epochs + 1; j++) {

        var layer_0 = X;
        var layer_1 = nj.sigmoid(nj.dot(layer_0, synapse_0));

        if(dropout) {
            // I don't understand what this does yet
            // layer_1 *= nj.random.binomial([np.ones((len(X),hidden_neurons))], 1-dropout_percent)[0] * (1.0/(1-dropout_percent));
        }

        var layer_2 = nj.sigmoid(nj.dot(layer_1, synapse_1));
github eimg / burmese-text-classifier / train.js View on Github external
console.log("training with " + hidden_neurons + " neurons, alpha: " + alpha);
    console.log("input matrix: " + X_arr.length + "x" + X_arr[0].length);
    console.log("output matrix: 1x" + classes.length);
    console.log('------');

    var last_mean_error = 1;

    var synapse_0 = nj.array( rand(X_arr[0].length, hidden_neurons) );
    var synapse_1 = nj.array( rand(hidden_neurons, classes.length) );

    var prev_synapse_0_weight_update = nj.zeros(synapse_0.shape);
    var prev_synapse_1_weight_update = nj.zeros(synapse_1.shape);

    var synapse_0_direction_count = nj.zeros(synapse_0.shape);
    var synapse_1_direction_count = nj.zeros(synapse_1.shape);

    for(var j = 0; j < epochs + 1; j++) {

        var layer_0 = X;
        var layer_1 = nj.sigmoid(nj.dot(layer_0, synapse_0));

        if(dropout) {
            // I don't understand what this does yet
            // layer_1 *= nj.random.binomial([np.ones((len(X),hidden_neurons))], 1-dropout_percent)[0] * (1.0/(1-dropout_percent));
        }

        var layer_2 = nj.sigmoid(nj.dot(layer_1, synapse_1));
        var layer_2_error = y.subtract(layer_2);

        if( (j % 10000) == 0 && j > 5000 ) {
            // if this 10k iteration's error is greater than
github eimg / burmese-text-classifier / train.js View on Github external
var X_arr = X.tolist();

    console.log("training with " + hidden_neurons + " neurons, alpha: " + alpha);
    console.log("input matrix: " + X_arr.length + "x" + X_arr[0].length);
    console.log("output matrix: 1x" + classes.length);
    console.log('------');

    var last_mean_error = 1;

    var synapse_0 = nj.array( rand(X_arr[0].length, hidden_neurons) );
    var synapse_1 = nj.array( rand(hidden_neurons, classes.length) );

    var prev_synapse_0_weight_update = nj.zeros(synapse_0.shape);
    var prev_synapse_1_weight_update = nj.zeros(synapse_1.shape);

    var synapse_0_direction_count = nj.zeros(synapse_0.shape);
    var synapse_1_direction_count = nj.zeros(synapse_1.shape);

    for(var j = 0; j < epochs + 1; j++) {

        var layer_0 = X;
        var layer_1 = nj.sigmoid(nj.dot(layer_0, synapse_0));

        if(dropout) {
            // I don't understand what this does yet
            // layer_1 *= nj.random.binomial([np.ones((len(X),hidden_neurons))], 1-dropout_percent)[0] * (1.0/(1-dropout_percent));
        }

        var layer_2 = nj.sigmoid(nj.dot(layer_1, synapse_1));
        var layer_2_error = y.subtract(layer_2);

        if( (j % 10000) == 0 && j > 5000 ) {