How to use the cntk.layers.Convolution2D function in cntk

To help you get started, we’ve selected a few cntk examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github microsoft / CNTK / Examples / Image / GettingStarted / 07_Deconvolution_PY.py View on Github external
def deconv_mnist(max_epochs=3):
    image_height = 28
    image_width  = 28
    num_channels = 1
    input_dim = image_height * image_width * num_channels
    num_output_classes = 10

    # Input variable and normalization
    input_var = C.ops.input_variable((num_channels, image_height, image_width), np.float32)
    scaled_input = C.ops.element_times(C.ops.constant(0.00390625), input_var, name="input_node")

    # Define the auto encoder model
    cMap = 1
    conv1   = C.layers.Convolution2D  ((5,5), cMap, pad=True, activation=C.ops.relu)(scaled_input)
    pool1   = C.layers.MaxPooling   ((4,4), (4,4), name="pooling_node")(conv1)
    unpool1 = C.layers.MaxUnpooling ((4,4), (4,4))(pool1, conv1)
    z       = C.layers.ConvolutionTranspose2D((5,5), num_channels, pad=True, bias=False, init=C.glorot_uniform(0.001), name="output_node")(unpool1)

    # define rmse loss function (should be 'err = C.ops.minus(deconv1, scaled_input)')
    f2        = C.ops.element_times(C.ops.constant(0.00390625), input_var)
    err       = C.ops.reshape(C.ops.minus(z, f2), (784))
    sq_err    = C.ops.element_times(err, err)
    mse       = C.ops.reduce_mean(sq_err)
    rmse_loss = C.ops.sqrt(mse)
    rmse_eval = C.ops.sqrt(mse)

    reader_train = create_reader(os.path.join(data_path, 'Train-28x28_cntk_text.txt'), True, input_dim, num_output_classes)

    # training config
    epoch_size = 60000
github mdabros / SharpLearning / python / src / CntkPython / ConvNet_MNIST.py View on Github external
# Input variables denoting the features and label data
    input_var = C.ops.input_variable(input_shape, np.float32)
    label_var = C.ops.input_variable(num_output_classes, np.float32)

    # Instantiate the feedforward classification model
    scaled_input = C.ops.element_times(C.ops.constant(0.00390625), input_var)

    # setup initializer
    init = uniform(scale= 0.1, seed=32)

    with C.layers.default_options(activation=C.ops.relu, pad=False):
        conv1 = C.layers.Convolution2D((5,5), 32, init=init, bias=False, pad=True)(scaled_input)
        pool1 = C.layers.MaxPooling((3,3), (2,2))(conv1)
        conv2 = C.layers.Convolution2D((3,3), 48, init=init, bias=False)(pool1)
        pool2 = C.layers.MaxPooling((3,3), (2,2))(conv2)
        conv3 = C.layers.Convolution2D((3,3), 64, init=init, bias=False)(pool2)
        dense4 = C.layers.Dense(96, init=init, bias=False)(conv3)
        drop4 = C.layers.Dropout(0.5, seed=32)(dense4)
        model = C.layers.Dense(num_output_classes, activation=None, init=init, bias=False)(drop4)
    
    # Define loss and error metric.
    ce = C.losses.cross_entropy_with_softmax(model, label_var)
    pe = C.metrics.classification_error(model, label_var)

    # Training config.
    minibatch_size = 64
    minibatch_iterations = 200

    # Instantiate progress writers.
    training_progress_output_freq = 100

    # Instantiate the trainer object to drive the model training.
github Azure / BatchAI / recipes / CNTK / CNTK-GPU-Python-Distributed / ConvNet_CIFAR10_DataAug_Distributed.py View on Github external
For(range(2), lambda : [
                Convolution2D((3,3), 64),
                Convolution2D((3,3), 64),
                MaxPooling((3,3), strides=2)
            ]),
github microsoft / CNTK / Examples / Image / Classification / AlexNet / Python / AlexNet_ImageNet_Distributed.py View on Github external
feature_var = input((num_channels, image_height, image_width))
    label_var = input((num_classes))

    # apply model to input
    # remove mean value 
    mean_removed_features = minus(feature_var, constant(114), name='mean_removed_input')
    
    with default_options(activation=None, pad=True, bias=True):
        z = Sequential([
            # we separate Convolution and ReLU to name the output for feature extraction (usually before ReLU) 
            Convolution2D((11,11), 96, init=normal(0.01), pad=False, strides=(4,4), name='conv1'),
            Activation(activation=relu, name='relu1'),
            LocalResponseNormalization(1.0, 2, 0.0001, 0.75, name='norm1'),
            MaxPooling((3,3), (2,2), name='pool1'),

            Convolution2D((5,5), 192, init=normal(0.01), init_bias=0.1, name='conv2'), 
            Activation(activation=relu, name='relu2'),
            LocalResponseNormalization(1.0, 2, 0.0001, 0.75, name='norm2'),
            MaxPooling((3,3), (2,2), name='pool2'),

            Convolution2D((3,3), 384, init=normal(0.01), name='conv3'), 
            Activation(activation=relu, name='relu3'),
            Convolution2D((3,3), 384, init=normal(0.01), init_bias=0.1, name='conv4'), 
            Activation(activation=relu, name='relu4'),
            Convolution2D((3,3), 256, init=normal(0.01), init_bias=0.1, name='conv5'), 
            Activation(activation=relu, name='relu5'), 
            MaxPooling((3,3), (2,2), name='pool5'), 

            Dense(4096, init=normal(0.005), init_bias=0.1, name='fc6'),
            Activation(activation=relu, name='relu6'),
            Dropout(0.5, name='drop6'),
            Dense(4096, init=normal(0.005), init_bias=0.1, name='fc7'),
github Azure / MachineLearningNotebooks / how-to-use-azureml / training-with-deep-learning / distributed-cntk-with-custom-docker / cntk_distr_mnist.py View on Github external
"""Creates and trains a feedforward classification model for MNIST images."""
    image_height = 28
    image_width = 28
    num_channels = 1
    input_dim = image_height * image_width * num_channels
    num_output_classes = 10

    # Input variables denoting the features and label data
    input_var = C.ops.input_variable((num_channels, image_height, image_width), np.float32)
    label_var = C.ops.input_variable(num_output_classes, np.float32)

    # Instantiate the feedforward classification model
    scaled_input = C.ops.element_times(C.ops.constant(0.00390625), input_var)

    with C.layers.default_options(activation=C.ops.relu, pad=False):
        conv1 = C.layers.Convolution2D((5, 5), 32, pad=True)(scaled_input)
        pool1 = C.layers.MaxPooling((3, 3), (2, 2))(conv1)
        conv2 = C.layers.Convolution2D((3, 3), 48)(pool1)
        pool2 = C.layers.MaxPooling((3, 3), (2, 2))(conv2)
        conv3 = C.layers.Convolution2D((3, 3), 64)(pool2)
        f4 = C.layers.Dense(96)(conv3)
        drop4 = C.layers.Dropout(0.5)(f4)
        z = C.layers.Dense(num_output_classes, activation=None)(drop4)

    ce = C.losses.cross_entropy_with_softmax(z, label_var)
    pe = C.metrics.classification_error(z, label_var)

    # Load train data
    reader_train = create_reader(os.path.join(data_dir, 'Train-28x28_cntk_text.txt'), True,
                                 input_dim, num_output_classes, max_epochs * epoch_size)
    # Load test data
    reader_test = create_reader(os.path.join(data_dir, 'Test-28x28_cntk_text.txt'), False,
github microsoft / samples-for-ai / examples / cntk / python / MNIST / ConvNet_MNIST.py View on Github external
input_dim = image_height * image_width * num_channels
    num_output_classes = 10

    # Input variables denoting the features and label data
    input_var = C.ops.input_variable((num_channels, image_height, image_width), np.float32)
    label_var = C.ops.input_variable(num_output_classes, np.float32)

    # Instantiate the feedforward classification model
    scaled_input = C.ops.element_times(C.ops.constant(0.00390625), input_var)

    with C.layers.default_options(activation=C.ops.relu, pad=False):
        conv1 = C.layers.Convolution2D((5,5), 32, pad=True)(scaled_input)
        pool1 = C.layers.MaxPooling((3,3), (2,2))(conv1)
        conv2 = C.layers.Convolution2D((3,3), 48)(pool1)
        pool2 = C.layers.MaxPooling((3,3), (2,2))(conv2)
        conv3 = C.layers.Convolution2D((3,3), 64)(pool2)
        f4    = C.layers.Dense(96)(conv3)
        drop4 = C.layers.Dropout(0.5)(f4)
        z     = C.layers.Dense(num_output_classes, activation=None)(drop4)

    ce = C.losses.cross_entropy_with_softmax(z, label_var)
    pe = C.metrics.classification_error(z, label_var)

    reader_train = create_reader(os.path.join(data_path, 'Train-28x28_cntk_text.txt'), True, input_dim, num_output_classes)

    # Set learning parameters
    lr_per_sample    = [0.001]*10 + [0.0005]*10 + [0.0001]
    lr_schedule      = C.learning_rate_schedule(lr_per_sample, C.learners.UnitType.sample, epoch_size)
    mm_time_constant = [0]*5 + [1024]
    mm_schedule      = C.learners.momentum_as_time_constant_schedule(mm_time_constant, epoch_size)

    # Instantiate the trainer object to drive the model training
github Azure / BatchAI / recipes / CNTK / CNTK-GPU-Python / ConvNet_MNIST.py View on Github external
input_dim = image_height * image_width * num_channels
    num_output_classes = 10

    # Input variables denoting the features and label data
    input_var = cntk.ops.input((num_channels, image_height, image_width), np.float32)
    label_var = cntk.ops.input(num_output_classes, np.float32)

    # Instantiate the feedforward classification model
    scaled_input = cntk.ops.element_times(cntk.ops.constant(0.00390625), input_var)

    with cntk.layers.default_options(activation=cntk.ops.relu, pad=False): 
        conv1 = cntk.layers.Convolution2D((5,5), 32, pad=True)(scaled_input)
        pool1 = cntk.layers.MaxPooling((3,3), (2,2))(conv1)
        conv2 = cntk.layers.Convolution2D((3,3), 48)(pool1)
        pool2 = cntk.layers.MaxPooling((3,3), (2,2))(conv2)
        conv3 = cntk.layers.Convolution2D((3,3), 64)(pool2)
        f4    = cntk.layers.Dense(96)(conv3)
        drop4 = cntk.layers.Dropout(0.5)(f4)
        z     = cntk.layers.Dense(num_output_classes, activation=None)(drop4)

    ce = cntk.losses.cross_entropy_with_softmax(z, label_var)
    pe = cntk.metrics.classification_error(z, label_var)

    reader_train = create_reader(os.path.join(data_path, 'Train-28x28_cntk_text.txt'), True, input_dim, num_output_classes)

    # Training config
    epoch_size = 60000                    # For now we manually specify epoch size
    minibatch_size = 64
    max_epochs = 40

    # Set learning parameters
    lr_per_sample    = [0.001]*10 + [0.0005]*10 + [0.0001]
github microsoft / CNTK / Examples / Image / Classification / ConvNet / Python / ConvNet_CIFAR10_DataAug_Distributed.py View on Github external
cntk.layers.For(range(2), lambda : [
                cntk.layers.Convolution2D((3,3), 64),
                cntk.layers.Convolution2D((3,3), 64),
                cntk.layers.MaxPooling((3,3), (2,2))
            ]),