How to use the numjs.abs function in numjs

To help you get started, we’ve selected a few numjs examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github eimg / hello-nn-js / hello.dnn.js View on Github external
function train(inputs, test_result, iterations) {
    for(var i = 0; i < iterations; i++) {
        var layer_zero = inputs;

        var layer_one = nj.sigmoid( layer_zero.dot(weights_zero) );
        var layer_two = nj.sigmoid( layer_one.dot(weights_one) );

        var layer_two_error = test_result.subtract(layer_two);

        if ((i % 10000) == 0) {
            console.log(i + " - Error: " + nj.mean(nj.abs(layer_two_error)));
        }

        // Backpropagation (sending back layer_two errors to layer_one)
        var layer_two_delta = layer_two_error.multiply( curve(layer_two) );
        var layer_one_error = layer_two_delta.dot( weights_one.T );
        var layer_one_delta = layer_one_error.multiply( curve(layer_one) );

        // Adjusting weights
        weights_one = weights_one.add( layer_one.T.dot(layer_two_delta) );
        weights_zero = weights_zero.add( layer_zero.T.dot(layer_one_delta) );
    }
}
github eimg / burmese-text-classifier / train.js View on Github external
var layer_0 = X;
        var layer_1 = nj.sigmoid(nj.dot(layer_0, synapse_0));

        if(dropout) {
            // I don't understand what this does yet
            // layer_1 *= nj.random.binomial([np.ones((len(X),hidden_neurons))], 1-dropout_percent)[0] * (1.0/(1-dropout_percent));
        }

        var layer_2 = nj.sigmoid(nj.dot(layer_1, synapse_1));
        var layer_2_error = y.subtract(layer_2);

        if( (j % 10000) == 0 && j > 5000 ) {
            // if this 10k iteration's error is greater than
            // the last iteration, break out
            if (nj.mean(nj.abs(layer_2_error)) < last_mean_error) {
                console.log("delta after " + j + " iterations:" + nj.mean(nj.abs(layer_2_error)) );
                last_mean_error = nj.mean(nj.abs(layer_2_error));
            } else {
                console.log ("break:" + nj.mean(nj.abs(layer_2_error)) + ">" + last_mean_error );
                break;
            }
        }

        var layer_2_delta = layer_2_error.multiply( curve(layer_2) );
        var layer_1_error = layer_2_delta.dot(synapse_1.T);
        var layer_1_delta = layer_1_error.multiply( curve(layer_1) );

        var synapse_1_weight_update = (layer_1.T.dot(layer_2_delta));
        var synapse_0_weight_update = (layer_0.T.dot(layer_1_delta));

        if(j > 0) {
github eimg / hello-nn-js / hello.dnn.gd.js View on Github external
function train(inputs, test_result, iterations) {
    for(var i = 0; i < iterations; i++) {
        var layer_zero = inputs;

        var layer_one = nj.sigmoid( layer_zero.dot(weights_zero) );
        var layer_two = nj.sigmoid( layer_one.dot(weights_one) );

        var layer_two_error = test_result.subtract(layer_two);

        if ((i % 10000) == 0) {
            console.log(i + " - Error: " + nj.mean(nj.abs(layer_two_error)));
        }

        // Backpropagation (sending back layer_two errors to layer_one)
        var layer_two_delta = layer_two_error.multiply( curve(layer_two) );
        var layer_one_error = layer_two_delta.dot( weights_one.T );
        var layer_one_delta = layer_one_error.multiply( curve(layer_one) );

        // Adjusting weights
        weights_one = weights_one.add(
            layer_one.T.dot(layer_two_delta).multiply(alpha)
        );

        weights_zero = weights_zero.add(
            layer_zero.T.dot(layer_one_delta).multiply(alpha)
        );
    }
github eimg / burmese-text-classifier / train.js View on Github external
var layer_1 = nj.sigmoid(nj.dot(layer_0, synapse_0));

        if(dropout) {
            // I don't understand what this does yet
            // layer_1 *= nj.random.binomial([np.ones((len(X),hidden_neurons))], 1-dropout_percent)[0] * (1.0/(1-dropout_percent));
        }

        var layer_2 = nj.sigmoid(nj.dot(layer_1, synapse_1));
        var layer_2_error = y.subtract(layer_2);

        if( (j % 10000) == 0 && j > 5000 ) {
            // if this 10k iteration's error is greater than
            // the last iteration, break out
            if (nj.mean(nj.abs(layer_2_error)) < last_mean_error) {
                console.log("delta after " + j + " iterations:" + nj.mean(nj.abs(layer_2_error)) );
                last_mean_error = nj.mean(nj.abs(layer_2_error));
            } else {
                console.log ("break:" + nj.mean(nj.abs(layer_2_error)) + ">" + last_mean_error );
                break;
            }
        }

        var layer_2_delta = layer_2_error.multiply( curve(layer_2) );
        var layer_1_error = layer_2_delta.dot(synapse_1.T);
        var layer_1_delta = layer_1_error.multiply( curve(layer_1) );

        var synapse_1_weight_update = (layer_1.T.dot(layer_2_delta));
        var synapse_0_weight_update = (layer_0.T.dot(layer_1_delta));

        if(j > 0) {
            synapse_0_direction_count = synapse_0_direction_count.add(
                nj.abs(
github eimg / burmese-text-classifier / train.js View on Github external
} else {
                console.log ("break:" + nj.mean(nj.abs(layer_2_error)) + ">" + last_mean_error );
                break;
            }
        }

        var layer_2_delta = layer_2_error.multiply( curve(layer_2) );
        var layer_1_error = layer_2_delta.dot(synapse_1.T);
        var layer_1_delta = layer_1_error.multiply( curve(layer_1) );

        var synapse_1_weight_update = (layer_1.T.dot(layer_2_delta));
        var synapse_0_weight_update = (layer_0.T.dot(layer_1_delta));

        if(j > 0) {
            synapse_0_direction_count = synapse_0_direction_count.add(
                nj.abs(
                    binary_array(synapse_0_weight_update).subtract(
                        binary_array(prev_synapse_0_weight_update)
                    )
                )
            );

            synapse_1_direction_count = synapse_1_direction_count.add(
                nj.abs(
                    binary_array(synapse_1_weight_update).subtract(
                        binary_array(prev_synapse_1_weight_update)
                    )
                )
            );
        }

        synapse_1 = synapse_1.add( synapse_1_weight_update.multiply(alpha) );