How to use the cgt.nn.SpatialConvolution function in cgt

To help you get started, we’ve selected a few cgt examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github joschu / cgt / examples / cgt_theano_feedforward_comparison.py View on Github external
def build_convnet_return_loss(X, y):
        np.random.seed(0)        
        conv1 = nn.rectify(
            nn.SpatialConvolution(1, 32, kernelshape=(3,3), pad=(0,0), 
            weight_init=nn.IIDGaussian(std=.1))(X))
        pool1 = nn.max_pool_2d(conv1, kernelshape=(3,3), stride=(2,2))
        conv2 = nn.rectify(
            nn.SpatialConvolution(32, 32, kernelshape=(3,3), pad=(0,0), 
            weight_init=nn.IIDGaussian(std=.1))(pool1))
        pool2 = nn.max_pool_2d(conv2, kernelshape=(3,3), stride=(2,2))
        d0,d1,d2,d3 = pool2.shape
        flatlayer = pool2.reshape([d0,d1*d2*d3])
        nfeats = cgt.infer_shape(flatlayer)[1]
        logprobs = nn.logsoftmax(nn.Affine(nfeats, 10)(flatlayer))
        loss = -logprobs[cgt.arange(X.shape[0]), y].mean()
        return loss
github alexlee-gk / visual_dynamics / visual_dynamics / predictors / net_cgt.py View on Github external
X = cgt.tensor4('X', fixed_shape=(None,) + x_shape)
    U = cgt.matrix('U', fixed_shape=(None,) + u_shape)

    # encoding
    Xlevels = {}
    for level in range(levels[-1]+1):
        if level == 0:
            Xlevel = X
        else:
            if level == 1:
                xlevelm1_c_dim = x_c_dim
                xlevel_c_dim = x1_c_dim
            else:
                xlevelm1_c_dim = xlevel_c_dim
                xlevel_c_dim = 2 * xlevel_c_dim
            Xlevel_1 = nn.rectify(nn.SpatialConvolution(xlevelm1_c_dim, xlevel_c_dim, kernelshape=(3,3), pad=(1,1), stride=(1,1), name='conv%d_1'%level,
                                                        weight_init=nn.IIDGaussian(std=0.01))(Xlevels[level-1]))
            Xlevel_2 = nn.rectify(nn.SpatialConvolution(xlevel_c_dim, xlevel_c_dim, kernelshape=(3,3), pad=(1,1), stride=(1,1), name='conv%d_2'%level,
                                                        weight_init=nn.IIDGaussian(std=0.01))(Xlevel_1))
            Xlevel = nn.max_pool_2d(Xlevel_2, kernelshape=(2,2), pad=(0,0), stride=(2,2))
        Xlevels[level] = Xlevel 

    # bilinear
    Xlevels_next_pred_0 = {}
    Ylevels = OrderedDict()
    Ylevels_diff_pred = OrderedDict()
    for level in levels:
        Xlevel = Xlevels[level]
        Xlevel_diff_pred = Bilinear(input_shapes, b=None, axis=2, name='bilinear%d'%level)(Xlevel, U)
        Xlevels_next_pred_0[level] = Xlevel + Xlevel_diff_pred
        Ylevels[level] = Xlevel.reshape((Xlevel.shape[0], cgt.mul_multi(Xlevel.shape[1:])))
        Ylevels_diff_pred[level] = Xlevel_diff_pred.reshape((Xlevel_diff_pred.shape[0], cgt.mul_multi(Xlevel_diff_pred.shape[1:])))
github joschu / cgt / examples / cgt_theano_feedforward_comparison.py View on Github external
def build_convnet_return_loss(X, y):
        np.random.seed(0)        
        conv1 = nn.rectify(
            nn.SpatialConvolution(1, 32, kernelshape=(3,3), pad=(0,0), 
            weight_init=nn.IIDGaussian(std=.1))(X))
        pool1 = nn.max_pool_2d(conv1, kernelshape=(3,3), stride=(2,2))
        conv2 = nn.rectify(
            nn.SpatialConvolution(32, 32, kernelshape=(3,3), pad=(0,0), 
            weight_init=nn.IIDGaussian(std=.1))(pool1))
        pool2 = nn.max_pool_2d(conv2, kernelshape=(3,3), stride=(2,2))
        d0,d1,d2,d3 = pool2.shape
        flatlayer = pool2.reshape([d0,d1*d2*d3])
        nfeats = cgt.infer_shape(flatlayer)[1]
        logprobs = nn.logsoftmax(nn.Affine(nfeats, 10)(flatlayer))
        loss = -logprobs[cgt.arange(X.shape[0]), y].mean()
        return loss
github joschu / cgt / examples / demo_cifar.py View on Github external
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--profile",action="store_true")
    parser.add_argument("--unittest",action="store_true")
    parser.add_argument("--epochs",type=int,default=10)
    parser.add_argument("--devtype",choices=["cpu","gpu"],default="cpu")
    args = parser.parse_args()

    cgt.update_config(default_device=cgt.core.Device(devtype=args.devtype), backend="native")

    batchsize = 64
    Xshape = (batchsize, 3, 32, 32)
    X = cgt.tensor4("X", fixed_shape = Xshape)
    y = cgt.vector("y", fixed_shape = (batchsize,), dtype='i4')

    conv1 = nn.SpatialConvolution(3, 32, kernelshape=(5,5), pad=(2,2), 
        weight_init=nn.IIDGaussian(std=1e-4))(X)
    relu1 = nn.rectify(conv1)
    pool1 = nn.max_pool_2d(relu1, kernelshape=(3,3), stride=(2,2))
    conv2 = nn.SpatialConvolution(32, 32, kernelshape=(5,5), pad=(2,2), 
        weight_init=nn.IIDGaussian(std=0.01))(pool1)
    relu2 = nn.rectify(conv2)
    pool2 = nn.max_pool_2d(relu2, kernelshape=(3,3), stride=(2,2))
    conv3 = nn.SpatialConvolution(32, 64, kernelshape=(5,5), pad=(2,2), 
        weight_init=nn.IIDGaussian(std=0.01))(pool2)
    pool3 = nn.max_pool_2d(conv3, kernelshape=(3,3), stride=(2,2))
    relu3 = nn.rectify(pool3)
    d0,d1,d2,d3 = relu3.shape
    flatlayer = relu3.reshape([d0,d1*d2*d3])
    nfeats = cgt.infer_shape(flatlayer)[1]
    ip1 = nn.Affine(nfeats, 10)(flatlayer)
    logprobs = nn.logsoftmax(ip1)
github joschu / cgt / examples / demo_cifar.py View on Github external
parser.add_argument("--epochs",type=int,default=10)
    parser.add_argument("--devtype",choices=["cpu","gpu"],default="cpu")
    args = parser.parse_args()

    cgt.update_config(default_device=cgt.core.Device(devtype=args.devtype), backend="native")

    batchsize = 64
    Xshape = (batchsize, 3, 32, 32)
    X = cgt.tensor4("X", fixed_shape = Xshape)
    y = cgt.vector("y", fixed_shape = (batchsize,), dtype='i4')

    conv1 = nn.SpatialConvolution(3, 32, kernelshape=(5,5), pad=(2,2), 
        weight_init=nn.IIDGaussian(std=1e-4))(X)
    relu1 = nn.rectify(conv1)
    pool1 = nn.max_pool_2d(relu1, kernelshape=(3,3), stride=(2,2))
    conv2 = nn.SpatialConvolution(32, 32, kernelshape=(5,5), pad=(2,2), 
        weight_init=nn.IIDGaussian(std=0.01))(pool1)
    relu2 = nn.rectify(conv2)
    pool2 = nn.max_pool_2d(relu2, kernelshape=(3,3), stride=(2,2))
    conv3 = nn.SpatialConvolution(32, 64, kernelshape=(5,5), pad=(2,2), 
        weight_init=nn.IIDGaussian(std=0.01))(pool2)
    pool3 = nn.max_pool_2d(conv3, kernelshape=(3,3), stride=(2,2))
    relu3 = nn.rectify(pool3)
    d0,d1,d2,d3 = relu3.shape
    flatlayer = relu3.reshape([d0,d1*d2*d3])
    nfeats = cgt.infer_shape(flatlayer)[1]
    ip1 = nn.Affine(nfeats, 10)(flatlayer)
    logprobs = nn.logsoftmax(ip1)
    loss = -logprobs[cgt.arange(batchsize), y].mean()


    params = nn.get_parameters(loss)