How to use the cntk.ops function in cntk

To help you get started, we’ve selected a few cntk examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github microsoft / CNTK / contrib / Python / cntk / examples / MNIST / mnist_one_layer.py View on Github external
labels = C.input(label_dim)
    labels.tag = 'label'
    labels.name = 'labels'

    traning_reader = C.CNTKTextFormatReader(training_filename)
    test_reader = C.CNTKTextFormatReader(test_filename)

    h1 = add_dnn_sigmoid_layer(feat_dim, hidden_dim, feats_scaled, 1)
    out = add_dnn_layer(hidden_dim, label_dim, h1, 1)
    out.tag = 'output'

    ec = C.cross_entropy_with_softmax(labels, out)
    ec.name = criterion_name
    ec.tag = 'criterion'
    
    eval = C.ops.square_error(labels, out)
    eval.name = eval_name
    eval.tag = 'eval'
    
    # Specify the training parameters (settings are scaled down)
    my_sgd = C.SGDParams(epoch_size=600, minibatch_size=32,
                       learning_rates_per_mb=0.1, max_epochs=5, momentum_per_mb=0)

    # Create a context or re-use if already there
    with C.LocalExecutionContext('mnist_one_layer', clean_up=True) as ctx:
        # CNTK actions
         ctx.train(
            root_nodes=[ec, eval],
            training_params=my_sgd,
            input_map=traning_reader.map(labels, alias='labels', dim=label_dim).map(features, alias='features', dim=feat_dim))
            
         result = ctx.test(
github microsoft / CNTK / Examples / ReasoNet / reasonet_stop_gradient.py View on Github external
def seq_softmax(x, name = ''):
  x_exp = ops.exp((x-seq_max(x))*10)
  x_softmax = ops.element_divide(x_exp, sequence.broadcast_as(sequence.reduce_sum(x_exp), x), name = name)
  return x_softmax
github microsoft / CNTK / Examples / Image / Classification / GoogLeNet / InceptionV3 / Python / InceptionV3_ImageNet.py View on Github external
def create_inception_v3():

    # Input variables denoting the features and label data
    feature_var = C.ops.input_variable((NUM_CHANNELS, IMAGE_HEIGHT, IMAGE_WIDTH))
    label_var = C.ops.input_variable((NUM_CLASSES))

    drop_rate = 0.2
    bn_time_const = 4096
    out = inception_v3_norm_model(feature_var, NUM_CLASSES, drop_rate, bn_time_const)

    # loss and metric
    aux_weight = 0.3
    ce_aux = C.losses.cross_entropy_with_softmax(out['aux'], label_var)
    ce_z = C.losses.cross_entropy_with_softmax(out['z'], label_var)
    ce = C.ops.plus(C.ops.element_times(ce_aux, aux_weight), ce_z)
    pe = C.metrics.classification_error(out['z'], label_var)
    pe5 = C.metrics.classification_error(out['z'], label_var, topN=5)

    C.logging.log_number_of_parameters(out['z'])
    print()

    return {
        'feature'   : feature_var,
        'label'     : label_var,
        'ce'        : ce,
        'pe'        : pe,
        'pe5'       : pe5,
        'output'    : out['z'],
        'outputAux' : out['aux']
    }
github Azure-Samples / MachineLearningSamples-AerialImageClassification / Code / 01_Data_Acquisition_and_Understanding / 02_Batch_AI_Training_Provisioning / retrain_model_distributed.py View on Github external
def load_alexnet_model(image_input, num_classes, model_filename,
					   retraining_type):
	''' Load pretrained AlexNet for desired level of retraining '''
	loaded_model = cntk.load_model(model_filename)

	# Load the convolutional layers, freezing if desired
	feature_node = cntk.logging.graph.find_by_name(loaded_model, 'features')
	last_conv_node = cntk.logging.graph.find_by_name(loaded_model, 'conv5.y')
	conv_layers = cntk.ops.combine([last_conv_node.owner]).clone(
		cntk.ops.functions.CloneMethod.clone if retraining_type == 'all' \
			else cntk.ops.functions.CloneMethod.freeze,
		{feature_node: cntk.ops.placeholder()})

	# Load the fully connected layers, freezing if desired
	last_node = cntk.logging.graph.find_by_name(loaded_model, 'h2_d')
	fully_connected_layers = cntk.ops.combine([last_node.owner]).clone(
		cntk.ops.functions.CloneMethod.freeze if retraining_type == \
			'last_only' else cntk.ops.functions.CloneMethod.clone,
		{last_conv_node: cntk.ops.placeholder()})

	# Define the network using the loaded layers
	feat_norm = image_input - cntk.layers.Constant(114)
	conv_out = conv_layers(feat_norm)
	fc_out = fully_connected_layers(conv_out)
	new_model = cntk.layers.Dense(shape=num_classes, name='last_layer')(fc_out)
	return(new_model)
github microsoft / CNTK / Examples / ReasoNet / reasonet_stop_gradient.py View on Github external
def seq_max(x):
  m = placeholder_variable(shape=(1,), dynamic_axes = x.dynamic_axes, name='max')
  o = element_select(greater(x, ops.future_value(m)), x, ops.future_value(m))
  rlt = o.replace_placeholders({m:sanitize_input(o)})
  max_v = sequence.is_first(x)
  pv = placeholder_variable(shape=(1,), dynamic_axes = x.dynamic_axes, name='max_seq')
  max_seq = element_select(sequence.is_first(x), utils.sanitize_input(rlt), ops.past_value(pv))
  max_br = max_seq.replace_placeholders({pv:utils.sanitize_input(max_seq)})
  return utils.sanitize_input(max_br)
github microsoft / CNTK / Examples / Image / GettingStarted / 07_Deconvolution_PY.py View on Github external
# Input variable and normalization
    input_var = C.ops.input_variable((num_channels, image_height, image_width), np.float32)
    scaled_input = C.ops.element_times(C.ops.constant(0.00390625), input_var, name="input_node")

    # Define the auto encoder model
    cMap = 1
    conv1   = C.layers.Convolution2D  ((5,5), cMap, pad=True, activation=C.ops.relu)(scaled_input)
    pool1   = C.layers.MaxPooling   ((4,4), (4,4), name="pooling_node")(conv1)
    unpool1 = C.layers.MaxUnpooling ((4,4), (4,4))(pool1, conv1)
    z       = C.layers.ConvolutionTranspose2D((5,5), num_channels, pad=True, bias=False, init=C.glorot_uniform(0.001), name="output_node")(unpool1)

    # define rmse loss function (should be 'err = C.ops.minus(deconv1, scaled_input)')
    f2        = C.ops.element_times(C.ops.constant(0.00390625), input_var)
    err       = C.ops.reshape(C.ops.minus(z, f2), (784))
    sq_err    = C.ops.element_times(err, err)
    mse       = C.ops.reduce_mean(sq_err)
    rmse_loss = C.ops.sqrt(mse)
    rmse_eval = C.ops.sqrt(mse)

    reader_train = create_reader(os.path.join(data_path, 'Train-28x28_cntk_text.txt'), True, input_dim, num_output_classes)

    # training config
    epoch_size = 60000
    minibatch_size = 64

    # Set learning parameters
    lr_schedule = C.learning_parameter_schedule_per_sample([0.00015], epoch_size=epoch_size)
    mm_schedule = C.learners.momentum_schedule_per_sample([0.9983347214509387], epoch_size=epoch_size)

    # Instantiate the trainer object to drive the model training
    learner = C.learners.momentum_sgd(z.parameters, lr_schedule, mm_schedule, unit_gain=True)
github microsoft / samples-for-ai / examples / cntk / python / MNIST / ConvNet_MNIST.py View on Github external
def convnet_mnist(debug_output=False, epoch_size=60000, minibatch_size=64, max_epochs=40):
    image_height = 28
    image_width  = 28
    num_channels = 1
    input_dim = image_height * image_width * num_channels
    num_output_classes = 10

    # Input variables denoting the features and label data
    input_var = C.ops.input_variable((num_channels, image_height, image_width), np.float32)
    label_var = C.ops.input_variable(num_output_classes, np.float32)

    # Instantiate the feedforward classification model
    scaled_input = C.ops.element_times(C.ops.constant(0.00390625), input_var)

    with C.layers.default_options(activation=C.ops.relu, pad=False):
        conv1 = C.layers.Convolution2D((5,5), 32, pad=True)(scaled_input)
        pool1 = C.layers.MaxPooling((3,3), (2,2))(conv1)
        conv2 = C.layers.Convolution2D((3,3), 48)(pool1)
        pool2 = C.layers.MaxPooling((3,3), (2,2))(conv2)
        conv3 = C.layers.Convolution2D((3,3), 64)(pool2)
        f4    = C.layers.Dense(96)(conv3)
        drop4 = C.layers.Dropout(0.5)(f4)
        z     = C.layers.Dense(num_output_classes, activation=None)(drop4)

    ce = C.losses.cross_entropy_with_softmax(z, label_var)
github microsoft / CNTK / Examples / ReasoNet / reasonet_stop_gradient.py View on Github external
def contractive_reward(labels, predAndStop):
  base = None
  avg_rewards = None
  for step in range(len(predAndStop)):
    pred = predAndStop[step][0]
    stop = predAndStop[step][1]
    if base is None:
      base = ops.element_times(pred, stop)
    else:
      base = ops.plus(ops.element_times(pred, stop), base)
  avg_rewards = ops.stop_gradient(sequence.reduce_sum(base*labels))
  base_reward = sequence.broadcast_as(avg_rewards, base, name = 'base_line')
  #Maxium rewards => minimal -rewards
  step_cr = ops.stop_gradient(1- ops.element_divide(labels, base_reward))
  cr = ops.element_times(base, step_cr)
  rewards = sequence.reduce_sum(cr) + avg_rewards
  return rewards
github microsoft / CNTK / Examples / LanguageUnderstanding / ReasoNet / reasonet.py View on Github external
"""
  base = None
  avg_rewards = None
  for step in range(len(predictions_and_stop_probabilities)):
    pred = predictions_and_stop_probabilities[step][0]
    stop = predictions_and_stop_probabilities[step][1]
    if base is None:
      base = ops.element_times(pred, stop)
    else:
      base = ops.plus(ops.element_times(pred, stop), base)
  avg_rewards = ops.stop_gradient(sequence.reduce_sum(base*labels))
  base_reward = sequence.broadcast_as(avg_rewards, base, name = 'base_line')
  # While  the learner will mimize the loss by default, we want it to maxiumize the rewards
  # Maxium rewards => minimal -rewards
  # So we use (1-r/b) as the rewards instead of (r/b-1)
  step_cr = ops.stop_gradient(1- ops.element_divide(labels, base_reward))
  normalized_contractive_rewards = ops.element_times(base, step_cr)
  rewards = sequence.reduce_sum(normalized_contractive_rewards) + avg_rewards
  return rewards
github microsoft / CNTK / Examples / LanguageUnderstanding / ReasoNet / reasonet.py View on Github external
def accuracy_func(prediction, label, name='accuracy'):
  """
  Compute the accuracy of the prediction
  """
  pred_max = ops.hardmax(prediction, name='pred_max')
  norm_label = ops.equal(label, [1], name='norm_label')
  acc = ops.times_transpose(pred_max, norm_label, name='accuracy')
  return acc