How to use the cntk.layers.Sequential function in cntk

To help you get started, we’ve selected a few cntk examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github microsoft / CNTK / bindings / python / cntk / contrib / deeprl / agent / shared / customized_models.py View on Github external
Returns: a Python dictionary with string-valued keys including
        'inputs', 'outputs', 'loss' and 'f'.
    """
    # input/output
    inputs = C.ops.placeholder(shape=shape_of_inputs) \
        if use_placeholder_for_input \
        else C.ops.input_variable(shape=shape_of_inputs, dtype=np.float32)
    outputs = C.ops.input_variable(
        shape=(number_of_outputs,), dtype=np.float32)

    # network structure
    centered_inputs = inputs - 128
    scaled_inputs = centered_inputs / 256

    with C.layers.default_options(activation=C.ops.relu):
        q = C.layers.Sequential([
            C.layers.Convolution((8, 8), 32, strides=4),
            C.layers.Convolution((4, 4), 64, strides=2),
            C.layers.Convolution((3, 3), 64, strides=2),
            C.layers.Dense((512,)),
            C.layers.Dense(number_of_outputs, activation=None)
        ])(scaled_inputs)

    if loss_function is None:
        loss = C.losses.squared_error(q, outputs)
    else:
        loss = loss_function(q, outputs)

    return {
        'inputs': inputs,
        'outputs': outputs,
        'f': q,
github microsoft / samples-for-ai / examples / cntk / python / ptb / word_rnn.py View on Github external
def create_model(input_sequence, label_sequence, vocab_dim, hidden_dim):
    # Create the rnn that computes the latent representation for the next token.
    rnn_with_latent_output = Sequential([
        C.layers.Embedding(hidden_dim),   
        For(range(num_layers), lambda: 
            Sequential([Stabilizer(), Recurrence(LSTM(hidden_dim), go_backwards=False)])),
        ])

    
    # Apply it to the input sequence. 
    latent_vector = rnn_with_latent_output(input_sequence)

    # Connect the latent output to (sampled/full) softmax.
    if use_sampled_softmax:
        weights = load_sampling_weights(token_frequencies_file_path)
        smoothed_weights = np.float32( np.power(weights, alpha))
        sampling_weights = C.reshape(C.Constant(smoothed_weights), shape = (1,vocab_dim))
        z, ce, errs = cross_entropy_with_sampled_softmax(latent_vector, label_sequence, vocab_dim, hidden_dim, softmax_sample_size, sampling_weights)
    else:
github microsoft / CNTK / Examples / Text / BidirectionalAttentionFlow / msmarco / team_xyz / script / polymath.py View on Github external
def modeling_layer(self, attention_context):
        att_context = C.placeholder(shape=(8*self.hidden_dim,))
        #modeling layer
        # todo: use dropout in optimized_rnn_stack from cudnn once API exposes it
        mod_context = C.layers.Sequential([
            C.layers.Dropout(self.dropout),
            OptimizedRnnStack(self.hidden_dim, bidirectional=True, use_cudnn=self.use_cudnn, name='model_rnn0'),
            C.layers.Dropout(self.dropout),
            OptimizedRnnStack(self.hidden_dim, bidirectional=True, use_cudnn=self.use_cudnn, name='model_rnn1')])(att_context)

        return C.as_block(
            mod_context,
            [(att_context, attention_context)],
            'modeling_layer',
            'modeling_layer')
github microsoft / CNTK / Examples / Image / Detection / FastRCNN / FastRCNN_train.py View on Github external
def clone_conv_layers(base_model, cfg):
    feature_node_name = cfg["MODEL"].FEATURE_NODE_NAME
    start_train_conv_node_name = cfg["MODEL"].START_TRAIN_CONV_NODE_NAME
    last_conv_node_name = cfg["MODEL"].LAST_CONV_NODE_NAME
    if not cfg.TRAIN_CONV_LAYERS:
        conv_layers = clone_model(base_model, [feature_node_name], [last_conv_node_name], CloneMethod.freeze)
    elif feature_node_name == start_train_conv_node_name:
        conv_layers = clone_model(base_model, [feature_node_name], [last_conv_node_name], CloneMethod.clone)
    else:
        fixed_conv_layers = clone_model(base_model, [feature_node_name], [start_train_conv_node_name],
                                        CloneMethod.freeze)
        train_conv_layers = clone_model(base_model, [start_train_conv_node_name], [last_conv_node_name],
                                        CloneMethod.clone)
        conv_layers = Sequential([fixed_conv_layers, train_conv_layers])
    return conv_layers
github zlsh80826 / MSMARCO / script / polymath.py View on Github external
def charcnn(self, x):
        embedding = C.layers.Embedding(self.char_emb_dim)
        dropout = C.layers.Dropout(self.dropout),
        conv2d = C.layers.Convolution2D((5, self.char_emb_dim), 
                self.convs, 
                activation=C.relu, 
                init=C.glorot_uniform(), 
                bias=True, 
                init_bias=0, 
                name='charcnn_conv')
        conv_out = C.layers.Sequential([
            embedding,
            dropout,
            conv2d])(x)
        return C.reduce_max(conv_out, axis=1)
github microsoft / CNTK / Examples / Image / Detection / YOLOv2 / YOLOv2.py View on Github external
model_fn = os.path.normpath(os.path.join(par.par_abs_path, "..", "..",
        "PretrainedModels", "ResNet18_ImageNet_CNTK.model"))
    if not os.path.exists(model_fn):
        raise ValueError('Model %s does not exist'%model_fn)
    loaded_model = load_model(model_fn)

    feature_layer = find_by_name(loaded_model, "features")
    fe_output_layer = find_by_name(loaded_model, "z.x.x.r")
    #ph = placeholder(shape=(par.par_num_channels, par.par_image_width, par.par_image_height), name="input_ph")
    #net = combine([fe_output_layer.owner]).clone(CloneMethod.clone, {feature_layer: ph})
    ph = placeholder(shape=(100, 100, 100), name="input_ph")
    net = combine([fe_output_layer.owner]).clone(CloneMethod.freeze, {feature_layer: ph})

    #plot(net, "ResNet18_s.pdf")

    return Sequential([
        [lambda x: x - par.par_input_bias]
        ,net])
github microsoft / CNTK / Examples / Text / BidirectionalAttentionFlow / squad / polymath.py View on Github external
def charcnn(self, x):
        conv_out = C.layers.Sequential([
            C.layers.Embedding(self.char_emb_dim),
            C.layers.Dropout(self.dropout),
            C.layers.Convolution2D((5,self.char_emb_dim), self.convs, activation=C.relu, init=C.glorot_uniform(), bias=True, init_bias=0, name='charcnn_conv')])(x)
        return C.reduce_max(conv_out, axis=1) # workaround cudnn failure in GlobalMaxPooling
github microsoft / CNTK / Examples / Image / Classification / VGG / Python / VGG16_ImageNet_Distributed.py View on Github external
def create_vgg16():

    # Input variables denoting the features and label data
    feature_var = input_variable((num_channels, image_height, image_width))
    label_var = input_variable((num_classes))

    # apply model to input
    # remove mean value 
    input = minus(feature_var, constant([[[104]], [[117]], [[124]]]), name='mean_removed_input')
    
    with default_options(activation=None, pad=True, bias=True):
        z = Sequential([
            # we separate Convolution and ReLU to name the output for feature extraction (usually before ReLU) 
            For(range(2), lambda i: [
                Convolution2D((3,3), 64, name='conv1_{}'.format(i)), 
                Activation(activation=relu, name='relu1_{}'.format(i)), 
            ]),
            MaxPooling((2,2), (2,2), name='pool1'),

            For(range(2), lambda i: [
                Convolution2D((3,3), 128, name='conv2_{}'.format(i)), 
                Activation(activation=relu, name='relu2_{}'.format(i)), 
            ]),
            MaxPooling((2,2), (2,2), name='pool2'),

            For(range(3), lambda i: [
                Convolution2D((3,3), 256, name='conv3_{}'.format(i)), 
                Activation(activation=relu, name='relu3_{}'.format(i)),
github microsoft / CNTK / Examples / Text / WordLMWithSampledSoftmax / word_rnn.py View on Github external
For(range(num_layers), lambda: 
            Sequential([Stabilizer(), Recurrence(LSTM(hidden_dim), go_backwards=False)])),
        ])
github microsoft / CNTK / Examples / Image / Classification / MLP / Python / SimpleMNIST.py View on Github external
def simple_mnist(tensorboard_logdir=None):
    input_dim = 784
    num_output_classes = 10
    num_hidden_layers = 1
    hidden_layers_dim = 200

    # Input variables denoting the features and label data
    feature = C.input_variable(input_dim, np.float32)
    label = C.input_variable(num_output_classes, np.float32)

    # Instantiate the feedforward classification model
    scaled_input = element_times(constant(0.00390625), feature)

    z = Sequential([For(range(num_hidden_layers), lambda i: Dense(hidden_layers_dim, activation=relu)),
                    Dense(num_output_classes)])(scaled_input)

    ce = cross_entropy_with_softmax(z, label)
    pe = classification_error(z, label)

    data_dir = os.path.join(abs_path, "..", "..", "..", "DataSets", "MNIST")

    path = os.path.normpath(os.path.join(data_dir, "Train-28x28_cntk_text.txt"))
    check_path(path)

    reader_train = create_reader(path, True, input_dim, num_output_classes)

    input_map = {
        feature  : reader_train.streams.features,
        label  : reader_train.streams.labels
    }