How to use the onnxmltools.convert.common.model_util function in onnxmltools

To help you get started, we’ve selected a few onnxmltools examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / onnxmltools / tests / common / test_NodeBuilder.py View on Github external
def test_initializer(self):
        context = ConvertContext()
        nb = NodeBuilder(context, "bar")
        nb.add_input("Input")
        nb.add_output("Output")

        test_array = [1,2,3]
        tensor = model_util.make_tensor('classes', onnx_proto.TensorProto.FLOAT, [1,len(test_array)], test_array)
        nb.add_initializer(tensor)
        node = nb.make_node()

        self.assertEqual(len(node.initializers), 1)
        self.assertEqual(node.initializers[0].name, 'bar_classes')
github onnx / onnxmltools / onnxmltools / convert / sklearn / SVMConverter.py View on Github external
def convert(context, sk_node, inputs):
        nb = SVMConverter.convert(context, sk_node, inputs, "SVMRegressor")
        nb.add_attribute('post_transform', "NONE")
        output_dim = None
        try:
            if len(inputs[0].type.tensor_type.shape.dim) > 0:
                output_dim = [1, inputs[0].type.tensor_type.shape.dim[0].dim_value]
        except AttributeError as e:
            raise ValueError('Invalid or missing input dimension.')
        nb.add_attribute('n_supports', len(sk_node.support_))
        nb.add_output(model_util.make_tensor_value_info(nb.name, onnx_proto.TensorProto.FLOAT, output_dim))
        return nb.make_node()
github onnx / onnxmltools / onnxmltools / convert / coreml / datatype.py View on Github external
def _handle_scalar_feature(cm_value, doc_string=''):
    which_type = cm_value.type.WhichOneof('Type')
    onnx_type = _convert(which_type)
    onnx_shape = [1]
    return model_util.make_tensor_value_info(cm_value.name, onnx_type, onnx_shape, doc_string)
github onnx / onnxmltools / onnxmltools / convert / sklearn / DictVectorizerConverter.py View on Github external
value_type = onnx_proto.TensorProto.FLOAT
            elif utils.is_numeric_type(feature_name):
                int64_vocabulary.append(feature_name)
                key_type = onnx_proto.TensorProto.INT64
                value_type = onnx_proto.TensorProto.FLOAT
            else:
                raise ValueError("Invalid or unsupported DictVectorizer type.")

        if len(string_vocabulary) > 0:
            nb.add_attribute('string_vocabulary', string_vocabulary)

        if len(int64_vocabulary) > 0:
            nb.add_attribute('int64_vocabulary', int64_vocabulary)

        nb.extend_inputs(inputs)
        nb.add_output(model_util.make_tensor_value_info(nb.name, value_type, [len(sk_node.feature_names_)]))

        return nb.make_node()
github onnx / onnxmltools / onnxmltools / convert / sklearn / GLMRegressorConverter.py View on Github external
nb.add_attribute('intercepts', intercepts)

        nb.extend_inputs(inputs)
        try:
            output_type = inputs[0].type.tensor_type.elem_type
        except AttributeError as e:
            raise ValueError('Invalid or missing input type for GLMRegressor.')
        if output_type == onnx_proto.TensorProto.STRING:
            raise ValueError('Invalid or missing input type for GLMRegressor.')
        output_dim = None
        try:
            if len(inputs[0].type.tensor_type.shape.dim) > 0:
                output_dim = [1, len(intercepts)]
        except AttributeError as e:
            raise ValueError('Invalid or missing input dimension for GLMRegressor.')
        nb.add_output(model_util.make_tensor_value_info(nb.name, output_type, output_dim))

        return nb.make_node()
github onnx / onnxmltools / onnxmltools / convert / sklearn / GLMClassifierConverter.py View on Github external
# Add normalizer in the case of multi-class.
        if multi_class > 0 and sk_node.__class__.__name__ != 'LinearSVC':
            appended_node_normalizer, output_name = add_normalizer(prob_input, output_type, "L1", context)

        # Add a ZipMap to handle the map output.
        if len(classes) > 2 or sk_node.__class__.__name__ != 'LinearSVC':
            appended_node_zipmap = add_zipmap(output_name, output_type, class_labels, context)
        else:
            score_selector = NodeBuilder(context, 'Slice', op_version=2)
            score_selector.add_input(output_name)
            select_output = context.get_unique_name(output_name)
            score_selector.add_output(select_output)
            score_selector.add_attribute('starts', [0, 1])
            score_selector.add_attribute('ends', [1, 2])
            selector_output = model_util.make_tensor_value_info(select_output, onnx_proto.TensorProto.FLOAT, [1])
            context.add_output(selector_output)
            appended_node_zipmap = score_selector.make_node()

        if appended_node_normalizer != None:
            return [nb.make_node(), appended_node_normalizer, appended_node_zipmap]
        else:
            return [nb.make_node(), appended_node_zipmap]
github onnx / onnxmltools / onnxmltools / convert / coreml / ArrayFeatureExtractorConverter.py View on Github external
def convert(context, cm_node, inputs, outputs):
        nb = NodeBuilder(context, 'ArrayFeatureExtractor', op_domain='ai.onnx.ml')
        nb.extend_inputs(inputs)
        target_index = cm_node.arrayFeatureExtractor.extractIndex
        index_tensor = model_util.make_tensor('TargetIndex', onnx_proto.TensorProto.INT64, [len(target_index)], target_index)
        nb.add_initializer(index_tensor)
        nb.extend_outputs(outputs)

        return nb.make_node()
github onnx / onnxmltools / onnxmltools / convert / common / ModelBuilder.py View on Github external
def make_model(self):
        return model_util.make_model(self._name,
                                     onnx_proto.IR_VERSION,
                                     model_util.get_producer(),
                                     model_util.get_producer_version(),
                                     model_util.get_domain(),
                                     model_util.get_model_version(),
                                     self._doc_string,
                                     self._metadata_props,
                                     self._operator_domain_version_pairs,
                                     self._nodes,
                                     self._inputs,
                                     self._outputs,
                                     self._values,
                                     self._initializers)
github onnx / onnxmltools / onnxmltools / convert / sklearn / BinarizerConverter.py View on Github external
def convert(context, sk_node, inputs):
        nb = NodeBuilder(context, "Binarizer", op_domain='ai.onnx.ml')
        if isinstance(sk_node.threshold, list):
            raise ValueError(
                "Model which we try to convert contains multiple thresholds in Binarizer"
                "According to documentation only one threshold is allowed")
        nb.add_attribute('threshold', float(sk_node.threshold))

        nb.extend_inputs(inputs)
        try:
            output_type = inputs[0].type.tensor_type.elem_type
            output_dim = [d.dim_value for d in inputs[0].type.tensor_type.shape.dim]
        except:
            raise ValueError('Invalid/missing input for Binarizer.')
        nb.add_output(model_util.make_tensor_value_info(nb.name, output_type, output_dim))

        return nb.make_node()
github onnx / onnxmltools / onnxmltools / convert / sklearn / ImputerConverter.py View on Github external
def convert(context, sk_node, inputs):

        # Always use floats for the imputer -- to ensure this, any integer input
        # will be converted to a float using a scaler operation
        imputer_inputs = []
        nodes = []
        num_features = 0
        for inp in inputs:
            if inp.type.tensor_type.elem_type in model_util.onnx_integer_types:
                # Add the scaler node for int-to-float conversion
                scaler = model_util.create_scaler(inp, inp.name, 1.0, 0.0, context)
                nodes.append(scaler)
                imputer_inputs.append(scaler.outputs[0])
            else:
                imputer_inputs.append(inp)
            num_features += model_util.get_feature_count(imputer_inputs[-1])

        nb = NodeBuilder(context, 'Imputer', op_domain='ai.onnx.ml')
        nb.add_attribute('imputed_value_floats', sk_node.statistics_)

        replaced_value = 0.0
        if isinstance(sk_node.missing_values, str):
            if sk_node.missing_values == 'NaN':
                replaced_value = np.NaN
        elif isinstance(sk_node.missing_values, float):
            replaced_value = float(sk_node.missing_values)
        else:
            raise RuntimeError('Unsupported missing value')
        nb.add_attribute('replaced_value_float', replaced_value)

        nb.extend_inputs(imputer_inputs)
        nb.add_output(model_util.make_tensor_value_info(nb.name, onnx_proto.TensorProto.FLOAT, [1, num_features]))