How to use the coremltools.models.datatypes.Array function in coremltools

To help you get started, we’ve selected a few coremltools examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / onnxmltools / tests / coreml / test_cml_AllNeuralNetworkConverters.py View on Github external
def test_split_converter(self):
        input_dim = (8, 1, 1)
        output_dim = (4, 1, 1)
        inputs = [('input', datatypes.Array(*input_dim))]
        outputs = [('output1', datatypes.Array(*output_dim)), ('output2', datatypes.Array(*output_dim))]
        builder = NeuralNetworkBuilder(inputs, outputs)
        builder.add_split(name='Split', input_name='input', output_names=['output1', 'output2'])
        model_onnx = convert_coreml(builder.spec)
        self.assertTrue(model_onnx is not None)
github apache / incubator-tvm / tests / python / frontend / coreml / test_forward.py View on Github external
def verify_convolution(input_dim, filter, padding):
    dtype = 'float32'
    N, C, H, W = input_dim
    OC, _, KH, KW = filter
    a_np = np.random.uniform(size=input_dim).astype(dtype)
    w_np = np.random.uniform(size=(OC, C, KH, KW)).astype(dtype)
    w_np_cm = np.transpose(w_np, axes=(2, 3, 1, 0))
    b_np = conv2d_nchw_python(a_np, w_np, [1, 1], padding)
    inputs = [('input1', datatypes.Array(C, H, W))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_convolution(name='conv', kernel_channels=3, output_channels=OC,
                            height=KH, width=KW, stride_height=1, stride_width=1,
                            border_mode=padding.lower(), groups=1,
                            W=w_np_cm, b=None, has_bias=False,
                            is_deconv=False,
                            input_name='input1',
                            output_name='output')
    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, target, ctx, [a_np],
                            ['input1'], output_shape=None)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
github apple / coremltools / coremltools / converters / sklearn / _converter_internal.py View on Github external
output_feature_name = _PIPELINE_INTERNAL_FEATURE_NAME 

        else:
            if output_feature_names is None:
                output_feature_name = "transformed_features"

            elif isinstance(output_feature_names, _string_types):
                output_feature_name = output_feature_names
            
            else:
                raise TypeError(
                    "For a transformer pipeline, the "
                    "output_features needs to be None or a string "
                    "for the predicted value.")
 
        output_features = [(output_feature_name, datatypes.Array(output_dim))]

        spec = _dict_vectorizer.convert(dv_obj, input_features, output_features)._spec
        pipeline_list.append(Output(spec, input_features, output_features) )

        # Set up the environment for the rest of the pipeline
        current_input_features = output_features
        current_num_dimensions = output_dim
    
        # In the corner case that it's only the dict vectorizer here, just return
        # and exit with that at this point. 
        if len(obj_list) == 1:
            return spec
        else:
            del obj_list[0]

    else:
github apple / coremltools / coremltools / converters / sklearn / _converter_internal.py View on Github external
features = _fm.process_or_validate_features(input_features, num_dimensions)
        current_num_dimensions = _fm.dimension_of_array_features(features)

        # Add in a feature vectorizer that consolodates all of the feature inputs
        # into the form expected by scipy's pipelines.  Essentially this is a
        # translation layer between the coreML form with named arguments and the
        # scikit learn variable form.
        if len(features) == 1 and isinstance(features[0][1], datatypes.Array):
            current_input_features = features
        else:
            spec, _output_dimension = create_feature_vectorizer(
                    features, _PIPELINE_INTERNAL_FEATURE_NAME)

            assert _output_dimension == current_num_dimensions
            ft_out_features = [(_PIPELINE_INTERNAL_FEATURE_NAME, 
                                datatypes.Array(current_num_dimensions))]
            pipeline_list.append( Output(spec, features, ft_out_features) )
            current_input_features = ft_out_features

    # Now, validate the sequence of transformers to make sure we have something
    # that can work with all of this.
    for i, (_, _, m) in enumerate(obj_list[:-1]):
        if m.model_type != "transformer":
            raise ValueError("Only a sequence of transformer classes followed by a "
                    "single transformer, regressor, or classifier is currently supported. "
                    "(object in position %d interpreted as %s)" % (i, m.model_type))

    overall_mode = obj_list[-1].module.model_type
    assert overall_mode in ('transformer', 'regressor', 'classifier')

    # Now, go through each transformer in the sequence of transformers and add
    # it to the pipeline.
github apple / coremltools / coremltools / converters / sklearn / _converter_internal.py View on Github external
output_features = [("prediction", datatypes.Double())]
        elif isinstance(output_feature_names, _string_types):
            output_features = [(output_feature_names, datatypes.Double())]
        else:
            raise TypeError("For a regressor object or regressor pipeline, the "
                            "output_features needs to be None or a string for the predicted value.")

    else:   # transformer
        final_output_dimension = last_sk_m.update_dimension(last_sk_obj, current_num_dimensions)

        if output_feature_names is None:
            output_features = [("transformed_features",
                                datatypes.Array(final_output_dimension))]

        elif isinstance(output_feature_names, _string_types):
            output_features = [(output_feature_names, datatypes.Array(final_output_dimension))]

        else:
            raise TypeError("For a transformer object or transformer pipeline, the "
                            "output_features needs to be None or a string for the "
                            "name of the transformed value.")

    last_spec = last_sk_m.convert(last_sk_obj, current_input_features, output_features)._spec

    pipeline_list.append( Output(last_spec, current_input_features, output_features) )

    # Now, create the pipeline and return the spec for it.

    # If it's just one element, we can return it.
    if len(pipeline_list) == 1:
        return pipeline_list[0].spec
github apple / turicreate / src / python / turicreate / toolkits / image_similarity / image_similarity.py View on Github external
>>> ml_model.predict(data={'image': image})
        {'distance': array([ 0., 28.453125, 24.96875 ])}
        """
        import numpy as _np
        from copy import deepcopy
        import coremltools as _cmt
        from coremltools.models import datatypes as _datatypes, neural_network as _neural_network
        from turicreate.toolkits import _coreml_utils

        # Get the reference data from the model
        proxy = self.similarity_model.__proxy__
        reference_data = _np.array(_tc.extensions._nearest_neighbors._nn_get_reference_data(proxy))
        num_examples, embedding_size = reference_data.shape

        output_name = 'distance'
        output_features = [(output_name, _datatypes.Array(num_examples))]

        if self.model != 'VisionFeaturePrint_Scene':
            # Get the Core ML spec for the feature extractor
            ptModel = _pre_trained_models.IMAGE_MODELS[self.model]()
            feature_extractor = _image_feature_extractor.TensorFlowFeatureExtractor(ptModel)
            feature_extractor_spec = feature_extractor.get_coreml_model().get_spec()

            input_name = feature_extractor.coreml_data_layer
            input_features = [(input_name, _datatypes.Array(*(self.input_image_shape)))]

            # Convert the neuralNetworkClassifier to a neuralNetwork
            layers = deepcopy(feature_extractor_spec.neuralNetworkClassifier.layers)
            for l in layers:
                feature_extractor_spec.neuralNetwork.layers.append(l)

            builder = _neural_network.NeuralNetworkBuilder(input_features, output_features,
github hollance / coreml-survival-guide / NeuralNetworkBuilder / convert_to_coreml.py View on Github external
def get_weights(layer_name):
    weights = f[layer_name + "/0"][...]
    
    # Transpose the weights for a convolutional layer.
    if weights.ndim == 4:
        weights = weights.transpose(2, 3, 1, 0)

    biases = f[layer_name + "/1"][...]
    return weights, biases


import coremltools
from coremltools.models import datatypes
from coremltools.models import neural_network

input_features = [ ("image", datatypes.Array(3, 32, 32)) ]
output_features = [ ("labelProbs", None) ]

builder = neural_network.NeuralNetworkBuilder(input_features, 
                                              output_features, 
                                              mode="classifier")

builder.set_pre_processing_parameters(image_input_names=["image"], 
                                      is_bgr=False,
                                      red_bias=-125.3,
                                      green_bias=-122.95,
                                      blue_bias=-113.87)

cifar10_labels = ["airplane", "automobile", "bird", "cat", "deer", 
                  "dog", "frog", "horse", "ship", "truck"]

builder.set_class_labels(class_labels=cifar10_labels,
github tf-coreml / tf-coreml / tfcoreml / _tf_coreml_converter.py View on Github external
%(input_name))
      shape = context.shape_dict_rank_4[input_name]

    if len(shape) == 4 and shape[0] != 1:
      sequence_inputs[input_name] = shape[0]

    # if the consumer of input_tensor is an one-hot encoding op,
    # treat it as a sequence.
    consumer_op = input_tensor.consumers()[0]
    if consumer_op.type == 'OneHot':
      shape = [1,]
      sequence_inputs[input_name] = -1
    else:
      shape = _infer_coreml_input_shape(shape)
    input_features.append(
        (compat.as_str_any(input_name), datatypes.Array(*shape)))

  # Set classifier flag
  is_classifier = class_labels is not None
  mode = 'classifier' if is_classifier else None

  # Convert the TF graph with builder
  input_features = list(input_features)
  output_features = list(output_features)
  builder = NeuralNetworkBuilder(input_features, output_features, mode=mode)
  context.builder = builder
  context.session = sess
  context.input_feed_dict = input_feed_dict
  context.unused_ops = unused_ops
  context.effectively_constant_ops = effectively_constant_ops
  context.add_custom_layers = add_custom_layers
  context.custom_conversion_functions = custom_conversion_functions
github apple / turicreate / src / python / turicreate / toolkits / image_similarity / image_similarity.py View on Github external
# Get the reference data from the model
        proxy = self.similarity_model.__proxy__
        reference_data = _np.array(_tc.extensions._nearest_neighbors._nn_get_reference_data(proxy))
        num_examples, embedding_size = reference_data.shape

        output_name = 'distance'
        output_features = [(output_name, _datatypes.Array(num_examples))]

        if self.model != 'VisionFeaturePrint_Scene':
            # Get the Core ML spec for the feature extractor
            ptModel = _pre_trained_models.IMAGE_MODELS[self.model]()
            feature_extractor = _image_feature_extractor.TensorFlowFeatureExtractor(ptModel)
            feature_extractor_spec = feature_extractor.get_coreml_model().get_spec()

            input_name = feature_extractor.coreml_data_layer
            input_features = [(input_name, _datatypes.Array(*(self.input_image_shape)))]

            # Convert the neuralNetworkClassifier to a neuralNetwork
            layers = deepcopy(feature_extractor_spec.neuralNetworkClassifier.layers)
            for l in layers:
                feature_extractor_spec.neuralNetwork.layers.append(l)

            builder = _neural_network.NeuralNetworkBuilder(input_features, output_features,
                                                            spec=feature_extractor_spec)
            feature_layer = feature_extractor.coreml_feature_layer

        else:     # self.model == VisionFeaturePrint_Scene
            # Create a pipleline that contains a VisionFeaturePrint followed by a
            # neural network.
            BGR_VALUE = _cmt.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('BGR')
            DOUBLE_ARRAY_VALUE = _cmt.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value('DOUBLE')
            INPUT_IMAGE_SHAPE = 299