How to use the coremltools.models.datatypes function in coremltools

To help you get started, we’ve selected a few coremltools examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ANRGUSC / Jupiter / app_specific_files / demotest_backup_circe / scripts / utils / onnx2coreml.py View on Github external
# ]
        #
        # builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
        # builder.add_softmax(name="softmax_pcls",
        #                     dim=(0, 3, 2, 1),
        #                     input_name="scores",
        #                     output_name="permute_scores_output")
        # softmax_model = coremltools.models.MLModel(builder.spec)
        # softmax_model.save("softmax.mlmodel")

        # 4. Pipeline models togethor
        from coremltools.models import datatypes
        # from coremltools.models import neural_network
        from coremltools.models.pipeline import Pipeline

        input_features = [('0', datatypes.Array(3, 416, 416)),
                          ('iouThreshold', datatypes.Double()),
                          ('confidenceThreshold', datatypes.Double())]

        output_features = ['confidence', 'coordinates']

        pipeline = Pipeline(input_features, output_features)

        # Add 3rd dimension of size 1 (apparently not needed, produces error on compile)
        yolov3_output = yolov3_model._spec.description.output
        yolov3_output[0].type.multiArrayType.shape[:] = [num_anchors, num_classes, 1]
        yolov3_output[1].type.multiArrayType.shape[:] = [num_anchors, 4, 1]

        nms_input = nms_model._spec.description.input
        for i in range(2):
            nms_input[i].type.multiArrayType.shape[:] = yolov3_output[i].type.multiArrayType.shape[:]
github apple / coremltools / coremltools / models / feature_vectorizer.py View on Github external
new_feature = feature_vectorizer.inputList.add()
        new_feature.inputColumn = n
        new_feature.inputDimensions = dim

    if not isinstance(output_feature_name, _string_types):
        if (is_valid_feature_list(output_feature_name) 
                and len(output_feature_name) == 1
                and output_feature_name[0][1] == datatypes.Array(num_output_dimensions)):

            output_feature_name = output_feature_name[0][0]

        else:
            raise TypeError("Output feature must be specified as a "
                    "feature name or correct output feature list.")

    output_features = [(output_feature_name, datatypes.Array(num_output_dimensions))]
    set_transform_interface_params(spec, input_features, output_features)

    return spec, num_output_dimensions
github apple / turicreate / src / unity / python / turicreate / toolkits / image_similarity / image_similarity.py View on Github external
scene_print.visionFeaturePrint.scene.version = 1

            input = scene_print.description.input.add()
            input.name = self.feature
            input.type.imageType.width = 299
            input.type.imageType.height = 299
            input.type.imageType.colorSpace = BGR_VALUE

            feature_layer = 'VisionFeaturePrint_Scene_output'
            output = scene_print.description.output.add()
            output.name = feature_layer
            output.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE
            output.type.multiArrayType.shape.append(2048)

            # Neural network builder
            input_features = [(feature_layer, _datatypes.Array(2048))]
            builder = _neural_network.NeuralNetworkBuilder(input_features, output_features)

        # To add the nearest neighbors model we add calculation of the euclidean 
        # distance between the newly extracted query features (denoted by the vector u)
        # and each extracted reference feature (denoted by the rows of matrix V).
        # Calculation of sqrt((v_i-u)^2) = sqrt(v_i^2 - 2v_i*u + u^2) ensues.
        V = reference_data
        v_squared = (V * V).sum(axis=1)
        builder.add_inner_product('v^2-2vu', W=-2 * V, b=v_squared, has_bias=True,
                                  input_channels=embedding_size, output_channels=num_examples,
                                  input_name=feature_layer, output_name='v^2-2vu')

        builder.add_unary('element_wise-u^2', mode='power', alpha=2,
                          input_name=feature_layer, output_name='element_wise-u^2')

        # Produce a vector of length num_examples with all values equal to u^2
github hpi-xnor / BMXNet-v2 / tools / coreml / converter / _mxnet_converter.py View on Github external
shapes = net.infer_shape(**input_shape)
    arg_names = net.list_arguments()
    output_names = net.list_outputs()
    aux_names = net.list_auxiliary_states()
    shape_dict = {}
    for idx, op in enumerate(arg_names):
        shape_dict[op] = shapes[0][idx]
    for idx, op in enumerate(output_names):
        shape_dict[op] = shapes[1][idx]
    for idx, op in enumerate(aux_names):
        shape_dict[op] = shapes[2][idx]

    # Get the inputs and outputs
    output_dims = shapes[1]
    input_types = [_datatypes.Array(*dim) for dim in input_dims]
    output_types = [_datatypes.Array(*dim) for dim in output_dims]

    # Make the builder
    input_features = zip(input_names, input_types)
    output_features = zip(output_names, output_types)
    builder = _neural_network.NeuralNetworkBuilder(input_features, output_features, mode)
    # Get out the layers
    net = _json.loads(net.tojson())
    nodes = net['nodes']

    for i, node in enumerate(nodes):
        node['id'] = i

        if node['name'] in shape_dict:
            node['shape'] = shape_dict[node['name']]

        node['outputs'] = []
github apple / coremltools / coremltools / converters / sklearn / _converter_internal.py View on Github external
# If the first component of the object list is the sklearn dict vectorizer,
    # which is unique in that it accepts a list of dictionaries, then we can
    # get the feature type mapping from that.  This then may require the addition
    # of several OHE steps, so those need to be processed in the first stage.
    if isinstance(obj_list[0].sk_obj, _dict_vectorizer.sklearn_class):

        dv_obj = obj_list[0].sk_obj
        output_dim = len(_dict_vectorizer.get_input_feature_names(dv_obj))
 
        if not isinstance(input_features, _string_types):
            raise TypeError("If the first transformer in a pipeline is a "
                            "DictVectorizer, then the input feature must be the name "
                            "of the input dictionary.")

        input_features = [(input_features, datatypes.Dictionary(str))]
       
        if len(obj_list) > 1:
            output_feature_name = _PIPELINE_INTERNAL_FEATURE_NAME 

        else:
            if output_feature_names is None:
                output_feature_name = "transformed_features"

            elif isinstance(output_feature_names, _string_types):
                output_feature_name = output_feature_names
            
            else:
                raise TypeError(
                    "For a transformer pipeline, the "
                    "output_features needs to be None or a string "
                    "for the predicted value.")
github apple / turicreate / src / python / turicreate / toolkits / image_similarity / image_similarity.py View on Github external
scene_print.visionFeaturePrint.scene.version = 1

            input = scene_print.description.input.add()
            input.name = self.feature
            input.type.imageType.width = 299
            input.type.imageType.height = 299
            input.type.imageType.colorSpace = BGR_VALUE

            feature_layer = 'VisionFeaturePrint_Scene_output'
            output = scene_print.description.output.add()
            output.name = feature_layer
            output.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE
            output.type.multiArrayType.shape.append(2048)

            # Neural network builder
            input_features = [(feature_layer, _datatypes.Array(2048))]
            builder = _neural_network.NeuralNetworkBuilder(input_features, output_features)

        # To add the nearest neighbors model we add calculation of the euclidean
        # distance between the newly extracted query features (denoted by the vector u)
        # and each extracted reference feature (denoted by the rows of matrix V).
        # Calculation of sqrt((v_i-u)^2) = sqrt(v_i^2 - 2v_i*u + u^2) ensues.
        V = reference_data
        v_squared = (V * V).sum(axis=1)
        builder.add_inner_product('v^2-2vu', W=-2 * V, b=v_squared, has_bias=True,
                                  input_channels=embedding_size, output_channels=num_examples,
                                  input_name=feature_layer, output_name='v^2-2vu')

        builder.add_unary('element_wise-u^2', mode='power', alpha=2,
                          input_name=feature_layer, output_name='element_wise-u^2')

        # Produce a vector of length num_examples with all values equal to u^2
github apple / coremltools / coremltools / converters / keras / _keras_converter.py View on Github external
# Retrieve output shapes from model
    if type(model.output_shape) is list:
        output_dims = [list(filter(None, x)) for x in model.output_shape]
    else:
        output_dims = [list(filter(None, model.output_shape[1:]))]

    for idx, dim in enumerate(output_dims):
        if len(dim) == 1:
            output_dims[idx] = dim
        elif len(dim) == 2:  # [Seq, D]
            output_dims[idx] = (dim[1],)
        elif len(dim) == 3:
            output_dims[idx] = (dim[2], dim[1], dim[0])

    input_types = [datatypes.Array(*dim) for dim in input_dims]
    output_types = [datatypes.Array(*dim) for dim in output_dims]

    # Some of the feature handling is sensitive about string vs. unicode
    input_names = map(str, input_names)
    output_names = map(str, output_names)
    is_classifier = class_labels is not None
    if is_classifier:
        mode = 'classifier'
    else:
        mode = None

    # assuming these match
    input_features = list(zip(input_names, input_types))
    output_features = list(zip(output_names, output_types))

    builder = _NeuralNetworkBuilder(input_features, output_features, mode = mode)
github onnx / onnx-coreml / onnx_coreml / converter.py View on Github external
features = []
    outputs = graph.outputs
    op_types = graph.blob_from_op_type
    ops_allowing_zerod_output = {'Size'}

    for output_ in outputs:
        if op_types[output_[0]] in ops_allowing_zerod_output and len(output_[2]) == 0:
            output_ = list(output_)
            output_[2] = (1,)

        if disable_coreml_rank5_mapping:
            shape = output_[2]
            if len(shape) > 5:
                raise ValueError('ONNX output %s has a rank greater than 5, which is not supported in CoreML framework' % str(output_[0]))
            else:
                features.append((str(output_[0]), datatypes.Array(*shape)))
            continue

        if not forceShape:
            features.append((str(output_[0]), None))
        else:
            shape = output_[2]
            if len(shape) == 0:
                shape = [1, 1, 1]
            elif len(shape) == 1:
                pass
            elif len(shape) == 3:
                if output_[0] in op_types and \
                        str(op_types[output_[0]]) in _SEQUENCE_LAYERS_REGISTRY:
                    # onnx shape: (Seq,B,C)
                    shape = [shape[2]]
            elif len(shape) == 4:  # (B,C,H,W) --> (C,H,W)
github apple / turicreate / src / unity / python / turicreate / toolkits / object_detector / object_detector.py View on Github external
# Copy over params from net
        mod.init_params()
        arg_params, aux_params = mod.get_params()
        net_params = net.collect_params()
        new_arg_params = {}
        for k, param in arg_params.items():
            new_arg_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
        new_aux_params = {}
        for k, param in aux_params.items():
            new_aux_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
        mod.set_params(new_arg_params, new_aux_params)

        input_names = [self.feature]
        input_dims = [list(self.input_image_shape)]
        input_types = [datatypes.Array(*dim) for dim in input_dims]
        input_features = list(zip(input_names, input_types))

        num_spatial = self._grid_shape[0] * self._grid_shape[1]
        num_bounding_boxes = num_anchors * num_spatial
        CONFIDENCE_STR = ("raw_confidence" if include_non_maximum_suppression 
            else "confidence")
        COORDINATES_STR = ("raw_coordinates" if include_non_maximum_suppression 
            else "coordinates")
        output_names = [
            CONFIDENCE_STR,
            COORDINATES_STR
        ]
        output_dims = [
            (num_bounding_boxes, num_classes),
            (num_bounding_boxes, 4),
        ]