How to use the cntk.input_variable function in cntk

To help you get started, we’ve selected a few cntk examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github zlsh80826 / MSMARCO / script / polymath.py View on Github external
def model(self):
        c = C.Axis.new_unique_dynamic_axis('c')
        q = C.Axis.new_unique_dynamic_axis('q')
        b = C.Axis.default_batch_axis()
        self.c_axis = c
        self.q_axis = q
        self.b_axis = b
        cgw = C.input_variable(self.wg_dim, dynamic_axes=[b,c], is_sparse=self.use_sparse, name='cgw')
        cnw = C.input_variable(self.wn_dim, dynamic_axes=[b,c], is_sparse=self.use_sparse, name='cnw')
        qgw = C.input_variable(self.wg_dim, dynamic_axes=[b,q], is_sparse=self.use_sparse, name='qgw')
        qnw = C.input_variable(self.wn_dim, dynamic_axes=[b,q], is_sparse=self.use_sparse, name='qnw')
        cc = C.input_variable((1,self.word_size), dynamic_axes=[b,c], name='cc')
        qc = C.input_variable((1,self.word_size), dynamic_axes=[b,q], name='qc')
        ab = C.input_variable(self.a_dim, dynamic_axes=[b,c], name='ab')
        ae = C.input_variable(self.a_dim, dynamic_axes=[b,c], name='ae')

        #input layer
        c_processed, q_processed = self.input_layer(cgw,cc,qgw,qc,qnw,cnw).outputs

        # attention layer
        att_context_0 = self.attention_layer(c_processed, q_processed, 'attention0')
        att_context_1 = self.attention_layer(att_context_0, q_processed, 'attention1')
        att_context_2 = self.attention_layer(att_context_1, q_processed, 'attention2')

        # output layer
        start_logits, end_logits = self.rnet_output_layer(att_context_2, q_processed).outputs

        # loss
        loss = all_spans_loss(start_logits, ab, end_logits, ae)

        return C.combine([start_logits, end_logits]), loss
github Hzzone / CNTK_Realtime_Multi-Person_Pose_Estimation / python / demo.py View on Github external
import math
import time
import util
import matplotlib
import pylab as plt
import os
from numpy import ma
from scipy.ndimage.filters import gaussian_filter


base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))


# base_model = load_model("../model/pose_net.cntkmodel")
base_model = load_model(os.path.join(base_dir, "model", "pose_net.cntkmodel"))
data = C.input_variable(shape=(3, C.FreeDimension, C.FreeDimension), name="data")
def clone_model(base_model, from_node_names, to_node_names, clone_method):
    from_nodes = [find_by_name(base_model, node_name) for node_name in from_node_names]
    if None in from_nodes:
        print("Error: could not find all specified 'from_nodes' in clone. Looking for {}, found {}"
              .format(from_node_names, from_nodes))
    to_nodes = [find_by_name(base_model, node_name) for node_name in to_node_names]
    if None in to_nodes:
        print("Error: could not find all specified 'to_nodes' in clone. Looking for {}, found {}"
              .format(to_node_names, to_nodes))
    input_placeholders = dict(zip(from_nodes, [placeholder() for x in from_nodes]))
    cloned_net = combine(to_nodes).clone(clone_method, input_placeholders)
    return cloned_net

predictor = clone_model(base_model, ['data'], ["Mconv7_stage6_L1", "Mconv7_stage6_L2"], CloneMethod.freeze)
pred_net = predictor(data)
Mconv7_stage6_L1 = pred_net.outputs[0]
github microsoft / CNTK / Examples / Image / Classification / ResNet / Python / TrainResNet_ImageNet_Distributed.py View on Github external
def create_resnet_network(network_name, fp16):
    # Input variables denoting the features and label data
    input_var = C.input_variable((num_channels, image_height, image_width))
    label_var = C.input_variable((num_classes))

    dtype = np.float16 if fp16 else np.float32
    if fp16:
        graph_input = C.cast(input_var, dtype=np.float16)
        graph_label = C.cast(label_var, dtype=np.float16)
    else:
        graph_input = input_var
        graph_label = label_var

    with C.default_options(dtype=dtype):
        stride1x1 = (1, 1)
        stride3x3 = (2, 2)

        # create model, and configure learning parameters
        if network_name == 'resnet18':
            z = create_imagenet_model_basic(graph_input, [2, 1, 1, 2], num_classes)
github microsoft / DistributedDeepLearning / CNTK / src / imagenet_cntk.py View on Github external
def model_fn():
    # Input variables denoting the features and label data
    graph_input = C.input_variable((_CHANNELS, _HEIGHT, _WIDTH))
    graph_label = C.input_variable((_NUMCLASSES))

    with C.default_options(dtype=np.float32):
        stride1x1 = (1, 1)
        stride3x3 = (2, 2)

        # create model, and configure learning parameters for ResNet50
        z = create_imagenet_model_bottleneck(graph_input, [2, 3, 5, 2],
                                             _NUMCLASSES, stride1x1, stride3x3)

        # loss and metric
        ce = cross_entropy_with_softmax(z, graph_label)
        errs = classification_error(z, graph_label, topN=1)

    return {
        'name': 'resnet50',
        'feature': graph_input,
github microsoft / CNTK / Examples / Image / Detection / FasterRCNN / FasterRCNN.py View on Github external
def eval_faster_rcnn_mAP(eval_model):
    img_map_file = globalvars['test_map_file']
    roi_map_file = globalvars['test_roi_file']
    classes = globalvars['classes']
    image_input = input_variable((num_channels, image_height, image_width), dynamic_axes=[Axis.default_batch_axis()], name=feature_node_name)
    roi_input = input_variable((cfg["CNTK"].INPUT_ROIS_PER_IMAGE, 5), dynamic_axes=[Axis.default_batch_axis()])
    dims_input = input_variable((6), dynamic_axes=[Axis.default_batch_axis()])
    frcn_eval = eval_model(image_input, dims_input)

    # Create the minibatch source
    minibatch_source = ObjectDetectionMinibatchSource(
        img_map_file, roi_map_file,
        max_annotations_per_image=cfg["CNTK"].INPUT_ROIS_PER_IMAGE,
        pad_width=image_width, pad_height=image_height, pad_value=img_pad_value,
        randomize=False, use_flipping=False,
        max_images=cfg["CNTK"].NUM_TEST_IMAGES)

    # define mapping from reader streams to network inputs
    input_map = {
        minibatch_source.image_si: image_input,
        minibatch_source.roi_si: roi_input,
        minibatch_source.dims_si: dims_input
github microsoft / CNTK / Examples / LanguageUnderstanding / ReasoNet / reasonet.py View on Github external
def predict(model, params):
  """
  Compute the prediction result of the given model
  """
  model_args = {arg.name:arg for arg in model.arguments}
  context = model_args['context']
  entity_ids_mask = model_args['entity_ids_mask']
  entity_condition = greater(entity_ids_mask, 0, name='condidion')
  # Get all the enities in the paragraph via gather operator, which will create a new dynamic sequence axis 
  entities_all = sequence.gather(entity_condition, entity_condition, name='entities_all')
  # The generated dynamic axis has the same length as the input enity id sequence, 
  # so we asign it as the entity id's dynamic axis.
  entity_ids = C.input_variable(shape=(params.entity_dim), is_sparse=True,
                              dynamic_axes=entities_all.dynamic_axes, name='entity_ids')
  wordvocab_dim = params.vocab_dim
  answers = sequence.scatter(sequence.gather(model.outputs[-1], entity_condition), entities_all, name='Final_Ans')
  entity_id_matrix = ops.slice(ops.reshape(entity_ids, params.entity_dim), -1, 1, params.entity_dim)
  expand_pred = sequence.reduce_sum(element_times(answers, entity_id_matrix))
  pred_max = ops.hardmax(expand_pred, name='pred_max')
  return pred_max
github zlsh80826 / MSMARCO / script / polymath.py View on Github external
def model(self):
        c = C.Axis.new_unique_dynamic_axis('c')
        q = C.Axis.new_unique_dynamic_axis('q')
        b = C.Axis.default_batch_axis()
        self.c_axis = c
        self.q_axis = q
        self.b_axis = b
        cgw = C.input_variable(self.wg_dim, dynamic_axes=[b,c], is_sparse=self.use_sparse, name='cgw')
        cnw = C.input_variable(self.wn_dim, dynamic_axes=[b,c], is_sparse=self.use_sparse, name='cnw')
        qgw = C.input_variable(self.wg_dim, dynamic_axes=[b,q], is_sparse=self.use_sparse, name='qgw')
        qnw = C.input_variable(self.wn_dim, dynamic_axes=[b,q], is_sparse=self.use_sparse, name='qnw')
        cc = C.input_variable((1,self.word_size), dynamic_axes=[b,c], name='cc')
        qc = C.input_variable((1,self.word_size), dynamic_axes=[b,q], name='qc')
        ab = C.input_variable(self.a_dim, dynamic_axes=[b,c], name='ab')
        ae = C.input_variable(self.a_dim, dynamic_axes=[b,c], name='ae')

        #input layer
        c_processed, q_processed = self.input_layer(cgw,cc,qgw,qc,qnw,cnw).outputs

        # attention layer
        att_context_0 = self.attention_layer(c_processed, q_processed, 'attention0')
        att_context_1 = self.attention_layer(att_context_0, q_processed, 'attention1')
        att_context_2 = self.attention_layer(att_context_1, q_processed, 'attention2')

        # output layer
        start_logits, end_logits = self.rnet_output_layer(att_context_2, q_processed).outputs
github microsoft / CNTK / bindings / python / cntk / ops / functions.py View on Github external
def to_input(arg):
            if isinstance(arg, cntk_py.Variable):
                return arg
            else:
                #from cntk import input
                return cntk.input_variable(arg)
github microsoft / CNTK / Examples / Image / Detection / FasterRCNN / FasterRCNN_eval.py View on Github external
def compute_test_set_aps(eval_model, cfg):
    num_test_images = cfg["DATA"].NUM_TEST_IMAGES
    classes = cfg["DATA"].CLASSES
    image_input = input_variable(shape=(cfg.NUM_CHANNELS, cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH),
                                 dynamic_axes=[Axis.default_batch_axis()],
                                 name=cfg["MODEL"].FEATURE_NODE_NAME)
    roi_input = input_variable((cfg.INPUT_ROIS_PER_IMAGE, 5), dynamic_axes=[Axis.default_batch_axis()])
    dims_input = input_variable((6), dynamic_axes=[Axis.default_batch_axis()])
    frcn_eval = eval_model(image_input, dims_input)

    # Create the minibatch source
    minibatch_source = ObjectDetectionMinibatchSource(
        cfg["DATA"].TEST_MAP_FILE,
        cfg["DATA"].TEST_ROI_FILE,
        max_annotations_per_image=cfg.INPUT_ROIS_PER_IMAGE,
        pad_width=cfg.IMAGE_WIDTH,
        pad_height=cfg.IMAGE_HEIGHT,
        pad_value=cfg["MODEL"].IMG_PAD_COLOR,
        randomize=False, use_flipping=False,
        max_images=cfg["DATA"].NUM_TEST_IMAGES,
        num_classes=cfg["DATA"].NUM_CLASSES,
        proposal_provider=None)

    # define mapping from reader streams to network inputs
github microsoft / CNTK / Examples / Image / Detection / FasterRCNN / FasterRCNN.py View on Github external
def eval_faster_rcnn_mAP(eval_model):
    img_map_file = globalvars['test_map_file']
    roi_map_file = globalvars['test_roi_file']
    classes = globalvars['classes']
    image_input = input_variable((num_channels, image_height, image_width), dynamic_axes=[Axis.default_batch_axis()], name=feature_node_name)
    roi_input = input_variable((cfg["CNTK"].INPUT_ROIS_PER_IMAGE, 5), dynamic_axes=[Axis.default_batch_axis()])
    dims_input = input_variable((6), dynamic_axes=[Axis.default_batch_axis()])
    frcn_eval = eval_model(image_input, dims_input)

    # Create the minibatch source
    minibatch_source = ObjectDetectionMinibatchSource(
        img_map_file, roi_map_file,
        max_annotations_per_image=cfg["CNTK"].INPUT_ROIS_PER_IMAGE,
        pad_width=image_width, pad_height=image_height, pad_value=img_pad_value,
        randomize=False, use_flipping=False,
        max_images=cfg["CNTK"].NUM_TEST_IMAGES)

    # define mapping from reader streams to network inputs
    input_map = {
        minibatch_source.image_si: image_input,
        minibatch_source.roi_si: roi_input,
        minibatch_source.dims_si: dims_input
    }