How to use the coremltools.proto.FeatureTypes_pb2 function in coremltools

To help you get started, we’ve selected a few coremltools examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ANRGUSC / Jupiter / app_specific_files / demotest_backup_circe / scripts / utils / onnx2coreml.py View on Github external
# Print a human readable representation of the graph
        print(onnx.helper.printable_graph(model.graph))

        model_file = open(f, 'rb')
        model_proto = onnx_pb.ModelProto()
        model_proto.ParseFromString(model_file.read())
        yolov3_model = convert(model_proto, image_input_names=['0'], preprocessing_args={'image_scale': 1. / 255})

        # 2. Reduce model to FP16, change outputs to DOUBLE and save
        import coremltools

        spec = yolov3_model.get_spec()
        for i in range(2):
            spec.description.output[i].type.multiArrayType.dataType = \
                coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value('DOUBLE')

        spec = coremltools.utils.convert_neural_network_spec_weights_to_fp16(spec)
        yolov3_model = coremltools.models.MLModel(spec)

        name_out0 = spec.description.output[0].name
        name_out1 = spec.description.output[1].name

        num_classes = 80
        num_anchors = 507  # 507 for yolov3-tiny,
        spec.description.output[0].type.multiArrayType.shape.append(num_anchors)
        spec.description.output[0].type.multiArrayType.shape.append(num_classes)
        # spec.description.output[0].type.multiArrayType.shape.append(1)

        spec.description.output[1].type.multiArrayType.shape.append(num_anchors)
        spec.description.output[1].type.multiArrayType.shape.append(4)
        # spec.description.output[1].type.multiArrayType.shape.append(1)
github apple / turicreate / src / python / turicreate / toolkits / image_similarity / image_similarity.py View on Github external
input_name = feature_extractor.coreml_data_layer
            input_features = [(input_name, _datatypes.Array(*(self.input_image_shape)))]

            # Convert the neuralNetworkClassifier to a neuralNetwork
            layers = deepcopy(feature_extractor_spec.neuralNetworkClassifier.layers)
            for l in layers:
                feature_extractor_spec.neuralNetwork.layers.append(l)

            builder = _neural_network.NeuralNetworkBuilder(input_features, output_features,
                                                            spec=feature_extractor_spec)
            feature_layer = feature_extractor.coreml_feature_layer

        else:     # self.model == VisionFeaturePrint_Scene
            # Create a pipleline that contains a VisionFeaturePrint followed by a
            # neural network.
            BGR_VALUE = _cmt.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('BGR')
            DOUBLE_ARRAY_VALUE = _cmt.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value('DOUBLE')
            INPUT_IMAGE_SHAPE = 299

            top_spec = _cmt.proto.Model_pb2.Model()
            top_spec.specificationVersion = 3
            desc = top_spec.description

            input = desc.input.add()
            input.name = self.feature
            input.type.imageType.width = INPUT_IMAGE_SHAPE
            input.type.imageType.height = INPUT_IMAGE_SHAPE
            input.type.imageType.colorSpace = BGR_VALUE

            output = desc.output.add()
            output.name = output_name
            output.type.multiArrayType.shape.append(num_examples)
github onnx / onnx-coreml / onnx_coreml / _backend_rep.py View on Github external
def _set_dtypes(input_dict, #type: Dict[Text, np._ArrayLike[Any]]
                model, #type: MLModel
                ):
    # type: (...) -> None
    spec = model.get_spec()
    for input_ in spec.description.input:
        if input_.type.HasField('multiArrayType') and input_.name in input_dict:
            if input_.type.multiArrayType.dataType == ft.ArrayFeatureType.INT32:
                input_dict[input_.name] = input_dict[input_.name].astype(np.int32)
            if input_.type.multiArrayType.dataType == ft.ArrayFeatureType.FLOAT32:
                input_dict[input_.name] = input_dict[input_.name].astype(np.float32)
            if input_.type.multiArrayType.dataType == ft.ArrayFeatureType.DOUBLE:
                input_dict[input_.name] = input_dict[input_.name].astype(np.float64)
github apple / coremltools / coremltools / proto / DataStructures_pb2.py View on Github external
_sym_db = _symbol_database.Default()


from . import FeatureTypes_pb2 as FeatureTypes__pb2

from .FeatureTypes_pb2 import *

DESCRIPTOR = _descriptor.FileDescriptor(
  name='DataStructures.proto',
  package='CoreML.Specification',
  syntax='proto3',
  serialized_pb=_b('\n\x14\x44\x61taStructures.proto\x12\x14\x43oreML.Specification\x1a\x12\x46\x65\x61tureTypes.proto\"|\n\x10StringToInt64Map\x12<\n\x03map\x18\x01 \x03(\x0b\x32/.CoreML.Specification.StringToInt64Map.MapEntry\x1a*\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\"|\n\x10Int64ToStringMap\x12<\n\x03map\x18\x01 \x03(\x0b\x32/.CoreML.Specification.Int64ToStringMap.MapEntry\x1a*\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"~\n\x11StringToDoubleMap\x12=\n\x03map\x18\x01 \x03(\x0b\x32\x30.CoreML.Specification.StringToDoubleMap.MapEntry\x1a*\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\"|\n\x10Int64ToDoubleMap\x12<\n\x03map\x18\x01 \x03(\x0b\x32/.CoreML.Specification.Int64ToDoubleMap.MapEntry\x1a*\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\"\x1e\n\x0cStringVector\x12\x0e\n\x06vector\x18\x01 \x03(\t\"\x1d\n\x0bInt64Vector\x12\x0e\n\x06vector\x18\x01 \x03(\x03\"\x1d\n\x0b\x46loatVector\x12\x0e\n\x06vector\x18\x01 \x03(\x02\"\x1e\n\x0c\x44oubleVector\x12\x0e\n\x06vector\x18\x01 \x03(\x01\"0\n\nInt64Range\x12\x10\n\x08minValue\x18\x01 \x01(\x03\x12\x10\n\x08maxValue\x18\x02 \x01(\x03\"\x1a\n\x08Int64Set\x12\x0e\n\x06values\x18\x01 \x03(\x03\"1\n\x0b\x44oubleRange\x12\x10\n\x08minValue\x18\x01 \x01(\x01\x12\x10\n\x08maxValue\x18\x02 \x01(\x01\x42\x02H\x03P\x00\x62\x06proto3')
  ,
  dependencies=[FeatureTypes__pb2.DESCRIPTOR,],
  public_dependencies=[FeatureTypes__pb2.DESCRIPTOR,])




_STRINGTOINT64MAP_MAPENTRY = _descriptor.Descriptor(
  name='MapEntry',
  full_name='CoreML.Specification.StringToInt64Map.MapEntry',
  filename=None,
  file=DESCRIPTOR,
  containing_type=None,
  fields=[
    _descriptor.FieldDescriptor(
      name='key', full_name='CoreML.Specification.StringToInt64Map.MapEntry.key', index=0,
      number=1, type=9, cpp_type=9, label=1,
      has_default_value=False, default_value=_b("").decode('utf-8'),
      message_type=None, enum_type=None, containing_type=None,
github tf-coreml / tf-coreml / utils / evaluate_mlmodel.py View on Github external
import coremltools

image_input_coreml = np.expand_dims(image_input, axis = 0) #(1,1,256,256,3)
image_input_coreml = np.transpose(image_input_coreml, (0,1,4,2,3)) #(1,1,3,256,256)
index_input_coreml = np.reshape(index_input, (1,1,1,1,26)) #(seq, batch, C, H, W) == (1,1,1,1,26)

mlmodel = coremltools.models.MLModel('style.mlmodel')

if 0:
    spec = mlmodel.get_spec()
    for out in output_name:
        #add the output name
        new_output = spec.description.output.add()
        new_output.name = out
        new_output_params = new_output.type.multiArrayType
        new_output_params.dataType = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value('DOUBLE')
        coremltools.utils.save_spec(spec, "style.mlmodel")

    mlmodel = coremltools.models.MLModel('style.mlmodel')

coreml_input = {'input:0': image_input_coreml, 'style_num:0': index_input_coreml}
coreml_pred = mlmodel.predict(coreml_input)
for out in output_name:
    coreml_out = coreml_pred[out]
    print 'coreml_out: ', out , ' : ', coreml_out.shape, coreml_out.flatten()[:10]
github apple / turicreate / src / python / turicreate / toolkits / style_transfer / style_transfer.py View on Github external
def _export_coreml_image(self, image, array_shape):
        from coremltools.proto import FeatureTypes_pb2 as ft

        channels, height, width = array_shape
        if channels == 1:
            image.type.imageType.colorSpace = ft.ImageFeatureType.ColorSpace.Value('GRAYSCALE')
        elif channels == 3:
            image.type.imageType.colorSpace = ft.ImageFeatureType.ColorSpace.Value('RGB')
        else:
            raise ValueError("Channel Value %d not supported for image inputs" % channels)

        image.type.imageType.width = width
        image.type.imageType.height = height
github apple / turicreate / src / unity / python / turicreate / toolkits / image_similarity / image_similarity.py View on Github external
feature_extractor.data_layer,
                feature_extractor.feature_layer,
                feature_extractor.context,
                self.input_image_shape
            )
            batch_input_shape = (1, ) + self.input_image_shape
            _mxnet_converter.convert(mx_feature_extractor, mode=None,
                                     input_shape=[(input_name, batch_input_shape)],
                                     builder=builder, verbose=False)
            feature_layer = feature_extractor.feature_layer

        else:     # self.model == VisionFeaturePrint_Scene
            # Create a pipleline that contains a VisionFeaturePrint followed by a
            # neural network.
            BGR_VALUE = _cmt.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('BGR')
            DOUBLE_ARRAY_VALUE = _cmt.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value('DOUBLE')
            INPUT_IMAGE_SHAPE = 299

            top_spec = _cmt.proto.Model_pb2.Model()
            top_spec.specificationVersion = 3
            desc = top_spec.description

            input = desc.input.add()
            input.name = self.feature
            input.type.imageType.width = INPUT_IMAGE_SHAPE
            input.type.imageType.height = INPUT_IMAGE_SHAPE
            input.type.imageType.colorSpace = BGR_VALUE

            output = desc.output.add()
            output.name = output_name
            output.type.multiArrayType.shape.append(num_examples)
            output.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE
github apple / turicreate / src / python / turicreate / toolkits / image_similarity / image_similarity.py View on Github external
input_features = [(input_name, _datatypes.Array(*(self.input_image_shape)))]

            # Convert the neuralNetworkClassifier to a neuralNetwork
            layers = deepcopy(feature_extractor_spec.neuralNetworkClassifier.layers)
            for l in layers:
                feature_extractor_spec.neuralNetwork.layers.append(l)

            builder = _neural_network.NeuralNetworkBuilder(input_features, output_features,
                                                            spec=feature_extractor_spec)
            feature_layer = feature_extractor.coreml_feature_layer

        else:     # self.model == VisionFeaturePrint_Scene
            # Create a pipleline that contains a VisionFeaturePrint followed by a
            # neural network.
            BGR_VALUE = _cmt.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('BGR')
            DOUBLE_ARRAY_VALUE = _cmt.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value('DOUBLE')
            INPUT_IMAGE_SHAPE = 299

            top_spec = _cmt.proto.Model_pb2.Model()
            top_spec.specificationVersion = 3
            desc = top_spec.description

            input = desc.input.add()
            input.name = self.feature
            input.type.imageType.width = INPUT_IMAGE_SHAPE
            input.type.imageType.height = INPUT_IMAGE_SHAPE
            input.type.imageType.colorSpace = BGR_VALUE

            output = desc.output.add()
            output.name = output_name
            output.type.multiArrayType.shape.append(num_examples)
            output.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE
github onnx / onnx-coreml / onnx_coreml / converter.py View on Github external
def _update_multiarray_to_float32(feature, #type: Any
                                 ): # type : (...) -> None
  if feature.type.HasField('multiArrayType'):
    feature.type.multiArrayType.dataType = ft.ArrayFeatureType.FLOAT32
github apple / coremltools / coremltools / models / neural_network / builder.py View on Github external
if not isinstance(image_scale, dict): image_scale = dict.fromkeys(image_input_names, image_scale)
        
        # Add image inputs
        for input_ in spec.description.input:
            if input_.name in image_input_names:
                if input_.type.WhichOneof('Type') == 'multiArrayType':
                    array_shape = tuple(input_.type.multiArrayType.shape)
                    channels, height, width = array_shape
                    if channels == 1:
                        input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('GRAYSCALE')
                    elif channels == 3:
                        if input_.name in is_bgr:
                            if is_bgr[input_.name]:
                                input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('BGR')
                            else:
                                input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('RGB')    
                        else:
                            input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('RGB')
                    else:
                        raise ValueError("Channel Value %d not supported for image inputs" % channels)
                    input_.type.imageType.width = width
                    input_.type.imageType.height = height
                    
                preprocessing = self.nn_spec.preprocessing.add()
                preprocessing.featureName = input_.name
                scaler = preprocessing.scaler
                if input_.name in image_scale:
                    scaler.channelScale = image_scale[input_.name]
                else:
                    scaler.channelScale = 1.0
                if input_.name in red_bias: scaler.redBias = red_bias[input_.name]
                if input_.name in blue_bias: scaler.blueBias = blue_bias[input_.name]