How to use the coremltools.models.neural_network.NeuralNetworkBuilder function in coremltools

To help you get started, we’ve selected a few coremltools examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / onnxmltools / tests / coreml / test_cml_AllNeuralNetworkConverters.py View on Github external
def test_flatten_converter(self):
        input_dim = (1, 2, 3)
        output_dim = (6, 1, 1)
        inputs = [('input', datatypes.Array(*input_dim))]
        outputs = [('output', datatypes.Array(*output_dim))]
        builder = NeuralNetworkBuilder(inputs, outputs)
        builder.add_flatten(name='Flatten', input_name='input', output_name='output', mode=1)
        model_onnx = convert_coreml(builder.spec)
        self.assertTrue(model_onnx is not None)
github onnx / onnxmltools / tests / coreml / test_cml_AllNeuralNetworkConverters.py View on Github external
def test_gru_converter(self):
        input_dim = (1, 8)
        output_dim = (1, 2)
        inputs = [('input', datatypes.Array(*input_dim))]
        outputs = [('output', datatypes.Array(*output_dim))]
        builder = NeuralNetworkBuilder(inputs, outputs)
        W_h = [numpy.random.rand(2, 2), numpy.random.rand(2, 2), numpy.random.rand(2, 2)]
        W_x = [numpy.random.rand(2, 8), numpy.random.rand(2, 8), numpy.random.rand(2, 8)]
        b = [numpy.random.rand(2, 1), numpy.random.rand(2, 1), numpy.random.rand(2, 1)]
        builder.add_gru(name='GRU', W_h=W_h, W_x=W_x, b=b, hidden_size=2, input_size=8, input_names=['input'],
                        output_names=['output'], activation='TANH', inner_activation='SIGMOID_HARD', output_all=False,
                        reverse_input=False)
        model_onnx = convert_coreml(builder.spec)
        self.assertTrue(model_onnx is not None)
github onnx / onnx-coreml / tests / test_mlmodel_passes.py View on Github external
def test_load_constant_remove(self):
        input_features = [('data', datatypes.Array(*(3, 4)))]
        output_features = [('out', None)]
        builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
        builder.add_activation('relu1', 'RELU', 'data', 'relu1')
        builder.add_load_constant_nd('const1', 'c1', constant_value=np.ones((5,)), shape=(5,))
        builder.add_activation('relu2', 'RELU', 'relu1', 'out')
        builder.add_load_constant_nd('const2', 'c2', constant_value=np.ones((5,)), shape=(5,))
        builder.add_load_constant_nd('const3', 'c3', constant_value=np.ones((5,)), shape=(5,))
        spec = builder.spec
        np.testing.assert_equal(5, len(spec.neuralNetwork.layers))
        remove_disconnected_layers(spec)
        np.testing.assert_equal(2, len(spec.neuralNetwork.layers))
github onnx / onnxmltools / tests / coreml / test_cml_AllNeuralNetworkConverters.py View on Github external
def test_bias_converter(self):
        input_dim = (2, 1, 1)
        output_dim = (2, 1, 1)
        input = [('input', datatypes.Array(*input_dim))]
        output = [('output', datatypes.Array(*output_dim))]
        builder = NeuralNetworkBuilder(input, output)
        bias = numpy.ndarray(shape=(2,))
        bias[:] = [1, 2]
        builder.add_bias(name='Bias', b=bias, input_name='input', output_name='output', shape_bias=[2])
        model_onnx = convert_coreml(builder.spec)
        self.assertTrue(model_onnx is not None)
github onnx / onnxmltools / tests / coreml / test_cml_AllNeuralNetworkConverters.py View on Github external
def test_embedding_converter(self):
        input_dim = (1, 1, 1, 1)
        output_dim = (1, 2, 1, 1)
        input = [('input', datatypes.Array(*input_dim))]
        output = [('output', datatypes.Array(*output_dim))]
        builder = NeuralNetworkBuilder(input, output)
        weights = numpy.zeros(shape=(2))
        weights[:] = [-1, 1]
        bias = numpy.zeros(shape=(2))
        bias[:] = [-100, 100]
        builder.add_embedding(name='Embed', input_dim=1, W=weights, b=bias, output_channels=2, has_bias=True,
                              input_name='input', output_name='output')
        model_onnx = convert_coreml(builder.spec)
        self.assertTrue(model_onnx is not None)
github onnx / onnxmltools / tests / coreml / test_cml_AllNeuralNetworkConverters.py View on Github external
def test_unary_function_converter(self):
        input_dim = (3,)
        output_dim = (3,)
        input = [('input', datatypes.Array(*input_dim))]
        output = [('output', datatypes.Array(*output_dim))]
        builder = NeuralNetworkBuilder(input, output)
        builder.add_unary(name='Unary1', input_name='input', output_name='mid1', mode='abs')
        builder.add_unary(name='Unary2', input_name='mid1', output_name='mid2', mode='sqrt')
        builder.add_unary(name='Unary3', input_name='mid2', output_name='mid3', mode='rsqrt')
        builder.add_unary(name='Unary4', input_name='mid3', output_name='mid4', mode='inverse')
        builder.add_unary(name='Unary5', input_name='mid4', output_name='mid5', mode='power', alpha=2)
        builder.add_unary(name='Unary6', input_name='mid5', output_name='mid6', mode='exp')
        builder.add_unary(name='Unary7', input_name='mid6', output_name='mid7', mode='log')
        builder.add_unary(name='Unary8', input_name='mid7', output_name='output', mode='threshold')
        model_onnx = convert_coreml(builder.spec)
        self.assertTrue(model_onnx is not None)
github apple / turicreate / src / python / turicreate / toolkits / image_similarity / image_similarity.py View on Github external
input = scene_print.description.input.add()
            input.name = self.feature
            input.type.imageType.width = 299
            input.type.imageType.height = 299
            input.type.imageType.colorSpace = BGR_VALUE

            feature_layer = 'VisionFeaturePrint_Scene_output'
            output = scene_print.description.output.add()
            output.name = feature_layer
            output.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE
            output.type.multiArrayType.shape.append(2048)

            # Neural network builder
            input_features = [(feature_layer, _datatypes.Array(2048))]
            builder = _neural_network.NeuralNetworkBuilder(input_features, output_features)

        # To add the nearest neighbors model we add calculation of the euclidean
        # distance between the newly extracted query features (denoted by the vector u)
        # and each extracted reference feature (denoted by the rows of matrix V).
        # Calculation of sqrt((v_i-u)^2) = sqrt(v_i^2 - 2v_i*u + u^2) ensues.
        V = reference_data
        v_squared = (V * V).sum(axis=1)
        builder.add_inner_product('v^2-2vu', W=-2 * V, b=v_squared, has_bias=True,
                                  input_channels=embedding_size, output_channels=num_examples,
                                  input_name=feature_layer, output_name='v^2-2vu')

        builder.add_unary('element_wise-u^2', mode='power', alpha=2,
                          input_name=feature_layer, output_name='element_wise-u^2')

        # Produce a vector of length num_examples with all values equal to u^2
        builder.add_inner_product('u^2', W=_np.ones((embedding_size, num_examples)),
github cloud-annotations / training / trainer / src / convert / build_decoder.py View on Github external
# MLMultiArray inputs of neural networks must have 1 or 3 dimensions. 
    # We only have 2, so add an unused dimension of size one at the back.
    input_features = [
        ("scores", datatypes.Array(num_classes + 1, num_anchors, 1)),
        ("boxes", datatypes.Array(4, num_anchors, 1))
    ]

    # The outputs of the decoder model should match the inputs of the next
    # model in the pipeline, NonMaximumSuppression. This expects the number
    # of bounding boxes in the first dimension.
    output_features = [
        ("raw_confidence", datatypes.Array(num_anchors, num_classes)),
        ("raw_coordinates", datatypes.Array(num_anchors, 4))
    ]

    builder = neural_network.NeuralNetworkBuilder(input_features, output_features)

    # (num_classes+1, num_anchors, 1) --> (1, num_anchors, num_classes+1)
    builder.add_permute(
        name="permute_scores",
        dim=(0, 3, 2, 1),
        input_name="scores",
        output_name="permute_scores_output")

    # Strip off the "unknown" class (at index 0).
    builder.add_slice(
        name="slice_scores",
        input_name="permute_scores_output",
        output_name="raw_confidence",
        axis="width",
        start_index=1,
        end_index=num_classes + 1)
github mittagessen / kraken / kraken / lib / vgsl.py View on Github external
def save_model(self, path: str):
        """
        Serializes the model into path.

        Args:
            path (str): Target destination
        """
        inputs = [('input', datatypes.Array(*self.input))]
        outputs = [('output', datatypes.Array(*self.output))]
        net_builder = NeuralNetworkBuilder(inputs, outputs)
        input = 'input'
        prev_device = next(next(self.nn.children()).parameters()).device
        try:
            for name, layer in self.nn.to('cpu').named_children():
                input = layer.serialize(name, input, net_builder)
            mlmodel = MLModel(net_builder.spec)
            mlmodel.short_description = 'kraken recognition model'
            mlmodel.user_defined_metadata['vgsl'] = '[' + ' '.join(self.named_spec) + ']'
            if self.codec:
                mlmodel.user_defined_metadata['codec'] = json.dumps(self.codec.c2l)
            if self.user_metadata:
                mlmodel.user_defined_metadata['kraken_meta'] = json.dumps(self.user_metadata)
            mlmodel.save(path)
        finally:
            self.nn.to(prev_device)