How to use the coremltools.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value function in coremltools

To help you get started, we’ve selected a few coremltools examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / onnxmltools / tests / coreml / test_cml_AllNeuralNetworkConverters.py View on Github external
def test_image_input_type_converter(self):
        dim = (3, 15, 25)
        inputs = [('input', datatypes.Array(*dim))]
        outputs = [('output', datatypes.Array(*dim))]
        builder = NeuralNetworkBuilder(inputs, outputs)
        builder.add_elementwise(name='Identity', input_names=['input'],
                                output_name='output', mode='ADD', alpha=0.0)
        spec = builder.spec
        input = spec.description.input[0]
        input.type.imageType.height = dim[1]
        input.type.imageType.width = dim[2]
        for coreml_colorspace, onnx_colorspace in (('RGB', 'Rgb8'), ('BGR', 'Bgr8'), ('GRAYSCALE', 'Gray8')):
            input.type.imageType.colorSpace = ImageFeatureType.ColorSpace.Value(coreml_colorspace)
            model_onnx = convert_coreml(spec)
            dims = [(d.dim_param or d.dim_value) for d in model_onnx.graph.input[0].type.tensor_type.shape.dim]
            self.assertEqual(dims, ['None', 1 if onnx_colorspace == 'Gray8' else 3, 15, 25])

            if StrictVersion(onnx.__version__) >= StrictVersion('1.2.1'):
                metadata = {prop.key: prop.value for prop in model_onnx.metadata_props}
                self.assertEqual(metadata, { 'Image.BitmapPixelFormat': onnx_colorspace })
                self.assertEqual(model_onnx.graph.input[0].type.denotation, 'IMAGE')
                channel_denotations = [d.denotation for d in model_onnx.graph.input[0].type.tensor_type.shape.dim]
                self.assertEqual(channel_denotations, ['DATA_BATCH', 'DATA_CHANNEL', 'DATA_FEATURE', 'DATA_FEATURE'])
github onnx / onnx-coreml / onnx_coreml / converter.py View on Github external
.format(array_shape,)
                    )
                array_shape = array_shape[1:]

            channels, height, width = array_shape

            if channels == 1:
                output.type.imageType.colorSpace = \
                    ft.ImageFeatureType.ColorSpace.Value('GRAYSCALE')
            elif channels == 3:
                if is_bgr:
                    output.type.imageType.colorSpace = \
                        ft.ImageFeatureType.ColorSpace.Value('BGR')
                else:
                    output.type.imageType.colorSpace = \
                        ft.ImageFeatureType.ColorSpace.Value('RGB')
            else:
                raise ValueError(
                    "Channel Value {} is not supported for image output"
                    .format(channels,)
                )

        output.type.imageType.width = width
        output.type.imageType.height = height
github apple / turicreate / src / unity / python / turicreate / toolkits / style_transfer / style_transfer.py View on Github external
def _export_coreml_image(self, image, array_shape):
        from coremltools.proto import FeatureTypes_pb2 as ft

        channels, height, width = array_shape
        if channels == 1:
            image.type.imageType.colorSpace = ft.ImageFeatureType.ColorSpace.Value('GRAYSCALE')
        elif channels == 3:
            image.type.imageType.colorSpace = ft.ImageFeatureType.ColorSpace.Value('RGB')
        else:
            raise ValueError("Channel Value %d not supported for image inputs" % channels)

        image.type.imageType.width = width
        image.type.imageType.height = height
github prisma-ai / torch2coreml / torch2coreml / _utils.py View on Github external
height, width = array_shape
            output.type.imageType.colorSpace = \
                ft.ImageFeatureType.ColorSpace.Value('GRAYSCALE')
        else:
            channels, height, width = array_shape

            if channels == 1:
                output.type.imageType.colorSpace = \
                    ft.ImageFeatureType.ColorSpace.Value('GRAYSCALE')
            elif channels == 3:
                if is_bgr:
                    output.type.imageType.colorSpace = \
                        ft.ImageFeatureType.ColorSpace.Value('BGR')
                else:
                    output.type.imageType.colorSpace = \
                        ft.ImageFeatureType.ColorSpace.Value('RGB')
            else:
                raise ValueError(
                    "Channel Value {} not supported for image inputs"
                    .format(channels,)
                )

        output.type.imageType.width = width
        output.type.imageType.height = height
github prisma-ai / torch2coreml / torch2coreml / _utils.py View on Github external
def _convert_multiarray_output_to_image(spec, feature_name, is_bgr=False):
    for output in spec.description.output:
        if output.name != feature_name:
            continue
        if output.type.WhichOneof('Type') != 'multiArrayType':
            raise ValueError(
                "{} is not a multiarray type".format(output.name,)
            )
        array_shape = tuple(output.type.multiArrayType.shape)
        if len(array_shape) == 2:
            height, width = array_shape
            output.type.imageType.colorSpace = \
                ft.ImageFeatureType.ColorSpace.Value('GRAYSCALE')
        else:
            channels, height, width = array_shape

            if channels == 1:
                output.type.imageType.colorSpace = \
                    ft.ImageFeatureType.ColorSpace.Value('GRAYSCALE')
            elif channels == 3:
                if is_bgr:
                    output.type.imageType.colorSpace = \
                        ft.ImageFeatureType.ColorSpace.Value('BGR')
                else:
                    output.type.imageType.colorSpace = \
                        ft.ImageFeatureType.ColorSpace.Value('RGB')
            else:
                raise ValueError(
                    "Channel Value {} not supported for image inputs"
github onnx / onnx-coreml / onnx_coreml / converter.py View on Github external
if array_shape[0] != 1:
                    raise ValueError(
                        "Shape {} is not supported for image output"
                        .format(array_shape,)
                    )
                array_shape = array_shape[1:]

            channels, height, width = array_shape

            if channels == 1:
                output.type.imageType.colorSpace = \
                    ft.ImageFeatureType.ColorSpace.Value('GRAYSCALE')
            elif channels == 3:
                if is_bgr:
                    output.type.imageType.colorSpace = \
                        ft.ImageFeatureType.ColorSpace.Value('BGR')
                else:
                    output.type.imageType.colorSpace = \
                        ft.ImageFeatureType.ColorSpace.Value('RGB')
            else:
                raise ValueError(
                    "Channel Value {} is not supported for image output"
                    .format(channels,)
                )

        output.type.imageType.width = width
        output.type.imageType.height = height
github apple / coremltools / coremltools / models / neural_network / builder.py View on Github external
if not isinstance(green_bias, dict): green_bias = dict.fromkeys(image_input_names, green_bias) 
        if not isinstance(gray_bias, dict): gray_bias = dict.fromkeys(image_input_names, gray_bias) 
        if not isinstance(image_scale, dict): image_scale = dict.fromkeys(image_input_names, image_scale)
        
        # Add image inputs
        for input_ in spec.description.input:
            if input_.name in image_input_names:
                if input_.type.WhichOneof('Type') == 'multiArrayType':
                    array_shape = tuple(input_.type.multiArrayType.shape)
                    channels, height, width = array_shape
                    if channels == 1:
                        input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('GRAYSCALE')
                    elif channels == 3:
                        if input_.name in is_bgr:
                            if is_bgr[input_.name]:
                                input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('BGR')
                            else:
                                input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('RGB')    
                        else:
                            input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('RGB')
                    else:
                        raise ValueError("Channel Value %d not supported for image inputs" % channels)
                    input_.type.imageType.width = width
                    input_.type.imageType.height = height
                    
                preprocessing = self.nn_spec.preprocessing.add()
                preprocessing.featureName = input_.name
                scaler = preprocessing.scaler
                if input_.name in image_scale:
                    scaler.channelScale = image_scale[input_.name]
                else:
                    scaler.channelScale = 1.0
github apple / turicreate / src / python / turicreate / toolkits / image_classifier / image_classifier.py View on Github external
#
            top_spec = coremltools.proto.Model_pb2.Model()
            top_spec.specificationVersion = 3

            desc = top_spec.description
            desc.output.add().name = prob_name
            desc.output.add().name = self.target

            desc.predictedFeatureName = self.target
            desc.predictedProbabilitiesName = prob_name

            input = desc.input.add()
            input.name = self.feature
            input.type.imageType.width = 299
            input.type.imageType.height = 299
            BGR_VALUE = coremltools.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('BGR')
            input.type.imageType.colorSpace = BGR_VALUE

            #
            # VisionFeaturePrint extractor
            #
            pipelineClassifier = top_spec.pipelineClassifier
            scene_print = pipelineClassifier.pipeline.models.add()
            scene_print.specificationVersion = 3
            scene_print.visionFeaturePrint.scene.version = 1

            input = scene_print.description.input.add()
            input.name = self.feature
            input.type.imageType.width = 299
            input.type.imageType.height = 299
            input.type.imageType.colorSpace = BGR_VALUE