How to use the nengo.decoders function in nengo

To help you get started, we’ve selected a few nengo examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github hunse / nef-rbm / nlif-deep.py View on Github external
if 1:
    # deep.backprop(train, test, n_epochs=100)
    deep.sgd(train, test, n_epochs=50)
    print "mean error", deep.test(test).mean()

# --- try to get autoencoder back
if 0:
    deep.auto_sgd_down(train_images, test_images, rate=0.6, n_epochs=30)
    print "recons error", rms(test_images - recons, axis=1).mean()

if 0:
    # Try to learn linear reconstructor (doesn't work too well)
    import nengo

    codes = deep.encode(train_images)
    decoders, info = nengo.decoders.LstsqL2()(codes, train_images)
    print info['rmses'].mean()

    recons = np.dot(codes, decoders)
    print rms(train_images - recons, axis=1).mean()

    plt.figure(99)
    plt.clf()
    show_recons(test_images, recons)

if 0:
    # save parameters
    d = {}
    d['weights'] = [auto.W.get_value() for auto in deep.autos]
    d['biases'] = [auto.c.get_value() for auto in deep.autos]
    if all(hasattr(auto, 'V') for auto in deep.autos):
        d['rec_weights'] = [auto.V.get_value() for auto in deep.autos]
github hunse / nef-rbm / auto / train_lif.py View on Github external
# deep.backprop(train, test, n_epochs=50, noise=0.5, shift=True)

    deep.sgd(train, test, n_epochs=50, tradeoff=1, noise=0.3, shift=True)
    print "mean error", deep.test(test).mean()

# --- try to get autoencoder back
if 0:
    deep.auto_sgd_down(train_images, test_images, rate=0.6, n_epochs=30)
    print "recons error", rms(test_images - recons, axis=1).mean()

if 0:
    # Try to learn linear reconstructor (doesn't work too well)
    import nengo

    codes = deep.encode(train_images)
    decoders, info = nengo.decoders.LstsqL2()(codes, train_images)
    print info['rmses'].mean()

    recons = np.dot(codes, decoders)
    print rms(train_images - recons, axis=1).mean()

    plt.figure(99)
    plt.clf()
    show_recons(test_images, recons)

if 0:
    # save parameters
    d = {}
    d['weights'] = [auto.W.get_value() for auto in deep.autos]
    d['biases'] = [auto.c.get_value() for auto in deep.autos]
    if all(hasattr(auto, 'V') for auto in deep.autos):
        d['rec_weights'] = [auto.V.get_value() for auto in deep.autos]
github hunse / nef-rbm / sigmoid-rbm / find_neuron_params.py View on Github external
def residual(encoders, max_rates, intercepts, eval_points, show=False):
    radius = 5
    neurons = nengo.LIF(N)
    gains, biases = neurons.gain_bias(max_rates, intercepts)
    A = neurons.rates(np.dot(eval_points, encoders.T), gains, biases)
    y = sigmoid_radius(eval_points)
    d, _ = nengo.decoders.LstsqL2()(A, y)
    r = np.dot(A, d) - y
    r2 = np.sqrt(np.dot(r.T, r))

    if show:
        plt.figure(101)
        plt.clf()
        x = np.linspace(-1, 1, 501).reshape(-1, 1)
        a = neurons.rates(np.dot(x, encoders.T), gains, biases)
        y = sigmoid_radius(x)
        yhat = np.dot(a, d)
        plt.plot(x, y, 'k--')
        plt.plot(x, yhat)

    return r2
github hunse / nef-rbm / deep-auto.py View on Github external
def pretrain(self, images):
        acts = self.encode(images)
        solver = nengo.decoders.LstsqL2()
        decoders, info = solver(acts, images)

        decoders = decoders.astype(theano.config.floatX)
        self.decoders = theano.shared(decoders, name='decoders')

        print "Trained RBM: %0.3f" % (info['rmses'].mean())
github hunse / nef-rbm / encoder-learning.py View on Github external
mask = create_mask(n_hid, (28, 28), rf_shape)
    weights = rng.normal(size=(n_hid, n_vis)) * mask

weights = weights.T
mask = mask.T
weights /= norm(weights, axis=0, keepdims=True)

neurons = nengo.LIF()
gain, bias = neurons.gain_bias(200, -0.5)

def encode(x):
    return neurons.rates(np.dot(x, weights), gain, bias)

# --- determine initial decoders
x = train_images[:1000]
decoders, _ = nengo.decoders.LstsqL2()(encode(x), x)

# x = train_images[:1000]
# A = encode(x)
# dshape = (n_hid, n_vis)

# def func(d):
#     n = x.shape[0]
#     xhat = np.dot(A, d.reshape(dshape))
#     E = xhat - x
#     error = 0.5 * (E**2).sum() / n
#     grad = np.dot(A.T, E) / n
#     return error, grad.flatten()

# from scipy.optimize.lbfgsb import fmin_l_bfgs_b as lbfgsb
# d0 = np.random.normal(scale=0.000001, size=dshape).flatten()
# decoders, _, _ = lbfgsb(func, d0, maxiter=100, iprint=2)