How to use the cgt.matrix function in cgt

To help you get started, we’ve selected a few cgt examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github joschu / cgt / test / test_multi_output.py View on Github external
def runTest(self):
        for x in (cgt.scalar('x'), cgt.vector('x'), cgt.matrix('x')):
            for cls in (SinCos, SinCos2):
                y,z = core.unpack(core.Result(cls(), [x]))
                xnum = np.ones((3,)*x.ndim, cgt.floatX)
                correct = (np.sin(xnum),np.cos(xnum))
                yznum = cgt.numeric_eval([y,z], {x:xnum})
                np.testing.assert_allclose(yznum, correct)
                f = cgt.function([x],[y,z])
                np.testing.assert_allclose(f(xnum), correct)
github joschu / cgt / examples / bench / cgt_gru.py View on Github external
import time
from cgt.utils import Message
import numpy as np

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--horizon",type=int)
    args = parser.parse_args()
    horizon = args.horizon
    assert horizon is not None    
    size=128
    batchsize=64
    cell = GRUCell([size],size)
    X = cgt.tensor3()
    init = cgt.matrix()

    prev_h = init
    for i in xrange(horizon):
        prev_h = cell(X[i], prev_h)
    loss = prev_h.sum()

    with Message("compiling"):
        f = cgt.function([X, init],cgt.grad(loss, cell.params()))
    with Message("running"):
        xval = np.zeros((horizon,batchsize,size),cgt.floatX)
        initval = np.zeros((batchsize, size), cgt.floatX)
        for i in xrange(100): 
            f(xval, initval)
github joschu / cgt / examples / demo_char_rnn.py View on Github external
final_hiddens = cur_hiddens

    loss = loss / (n_unroll * size_batch)

    params = network.get_parameters()
    gradloss = cgt.grad(loss, params)

    flatgrad = flatcat(gradloss)

    with utils.Message("compiling loss+grad"):
        f_loss_and_grad = cgt.function([x_tnk, targ_tnk] + init_hiddens, [loss, flatgrad] + final_hiddens)
    f_loss = cgt.function([x_tnk, targ_tnk] + init_hiddens, loss)

    assert len(init_hiddens) == len(final_hiddens)

    x_nk = cgt.matrix('x')
    outputs = network([x_nk] + init_hiddens)

    f_step = cgt.function([x_nk]+init_hiddens, outputs)

    # print "node count", cgt.count_nodes(flatgrad)
    return network, f_loss, f_loss_and_grad, f_step
github dementrock / tensorfuse / tensorfuse / backend / cgt / tensor / api.py View on Github external
def imatrix(name):
    return cgt.matrix(name, dtype='int32')
github joschu / cgt / examples / demo_variational_autoencoder.py View on Github external
def __init__(self, xdim, args, dec="bernoulli"):
        self.xdim = xdim
        self.hdim = args.hdim
        self.zdim = args.zdim
        self.lmbda = args.lmbda  # weight decay coefficient * 2
        self.x = cgt.matrix("x", dtype=cgt.floatX)
        self.eps = cgt.matrix("eps", dtype=cgt.floatX)

        self.enc_mlp = GaussianMLP(self.x, self.xdim, self.hdim, self.zdim, nlayers=args.nlayers, eps=self.eps)
        if dec == "bernoulli":
            # log p(x | z) defined as -CE(x, y) = dec_mlp.cost(y)
            self.dec_mlp = BernoulliMLP(self.enc_mlp.out, self.zdim, self.hdim, self.xdim, nlayers=args.nlayers, y=self.x)
        elif dec == "gaussian":
            self.dec_mlp = GaussianMLP(self.enc_mlp.out, self.zdim, self.hdim, self.xdim, nlayers=args.nlayers, y=self.x)
        else:
            raise RuntimeError("unrecognized decoder %" % dec)

        self.cost = (-cgt.sum(kld_unit_mvn(self.enc_mlp.mu, self.enc_mlp.var)) + self.dec_mlp.cost) / args.batch_size
        self.params = self.enc_mlp.params + self.dec_mlp.params
        # L2 regularization
        self.gparams = [cgt.grad(self.cost, [p])[0] + self.lmbda * p for p in self.params]
        self.gaccums = [cgt.shared(np.zeros(p.op.get_value().shape, dtype=cgt.floatX)) for p in self.params]
github dementrock / tensorfuse / tensorfuse / backend / cgt / tensor / api.py View on Github external
def icol(name):
    return cgt.matrix(name, dtype='int32', fixed_shape=(None, 1))
github alexlee-gk / visual_dynamics / visual_dynamics / predictors / net_cgt.py View on Github external
def build_bilinear_net(input_shapes, **kwargs):
    x_shape, u_shape = input_shapes
    X = cgt.tensor4('X', fixed_shape=(None,) + x_shape)
    U = cgt.matrix('U', fixed_shape=(None,) + u_shape)

    X_diff_pred = Bilinear(input_shapes, b=None, name='bilinear')(X, U)
    X_next_pred = X + X_diff_pred
    Y = X.reshape((X.shape[0], cgt.mul_multi(X.shape[1:])))
    Y_diff_pred = X_diff_pred.reshape((X_diff_pred.shape[0], cgt.mul_multi(X_diff_pred.shape[1:])))

    X_diff = cgt.tensor4('X_diff', fixed_shape=(None,) + x_shape)
    X_next = X + X_diff
    loss = ((X_next - X_next_pred) ** 2).mean(axis=0).sum() / 2.

    net_name = 'BilinearNet'
    input_vars = OrderedDict([(var.name, var) for var in [X, U, X_diff]])
    pred_vars = OrderedDict([('Y_diff_pred', Y_diff_pred), ('Y', Y), ('X_next_pred', X_next_pred)])
    return net_name, input_vars, pred_vars, loss
github joschu / cgt / sandbox / parallel_interp.py View on Github external
def matmuls(seq):
    m = 8
    d = 1000

    # build graph

    X = cgt.matrix("X")
    Y = cgt.matrix("Y")
    loss=0
    for k in xrange(m):
        # loss = loss+cgt.sin(X*Y+k).sum()
        loss = loss+(X.dot(Y+k)).sum()

    with cgt.scoped_update_config(parallel_interp = not seq):
        f = cgt.function([X,Y], loss)

    # test things out!

    seed(0)

    X_val = randn(d, d)
    Y_val = randn(d, d)
    vals = [X_val, Y_val]