How to use the onnxmltools.convert.common.NodeBuilder function in onnxmltools

To help you get started, we’ve selected a few onnxmltools examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / onnxmltools / tests / common / test_ModelBuilder.py View on Github external
def test_intitializers_on_multiple_nodes(self):
        context = ConvertContext()
        mb = ModelBuilder()
        nb = NodeBuilder(context, 'bar')
        nb.add_input('Input')
        nb.add_output('Output')

        test_array = [1,2,3]
        tensor = model_util.make_tensor('classes', onnx_proto.TensorProto.FLOAT, [1,len(test_array)], test_array)
        nb.add_initializer(tensor)
        node = nb.make_node()

        nb2 = NodeBuilder(context, 'bar2')
        nb2.add_input('Output')
        nb2.add_output('Output2')
        tensor2 = model_util.make_tensor('classes2', onnx_proto.TensorProto.FLOAT, [1,len(test_array)], test_array)
        nb2.add_initializer(tensor2)
        node2 = nb2.make_node()

        mb.add_nodes([node.onnx_node, node2.onnx_node])
github onnx / onnxmltools / tests / common / test_ModelBuilder.py View on Github external
def test_initializers(self):

        context = ConvertContext()
        # create nodes with initializers
        mb = ModelBuilder()
        nb = NodeBuilder(context, 'bar')
        nb.add_input('Input')
        nb.add_output('Output')

        test_array = [1,2,3]
        tensor = model_util.make_tensor('classes', onnx_proto.TensorProto.FLOAT, [1,len(test_array)], test_array)
        nb.add_initializer(tensor)
        node = nb.make_node()

        mb.add_nodes([node.onnx_node])
        mb.add_initializers(node.initializers)
        mb.add_inputs([model_util.make_tensor_value_info('Input', onnx_proto.TensorProto.FLOAT, [1])])
        mb.add_outputs([model_util.make_tensor_value_info('Output', onnx_proto.TensorProto.FLOAT, [1])])
        model = mb.make_model()
        self.assertEqual(len(model.graph.initializer), 1)
        self.assertEqual(model.graph.initializer[0].name, 'bar_classes')
github onnx / onnxmltools / tests / common / test_NodeBuilder.py View on Github external
def test_add_inputs(self):
        context = ConvertContext()
        nb = NodeBuilder(context, "foo")
        nb.add_input('test')
        nb.add_empty_input()
        nb.add_input(model_util.make_tensor_value_info('value_test', onnx_proto.TensorProto.FLOAT, [1, 3]))

        test_array = [1,2,3]
        init = model_util.make_tensor('init', onnx_proto.TensorProto.FLOAT, [1, len(test_array)], test_array)
        nb.add_initializer(init)

        value = model_util.make_tensor('value', onnx_proto.TensorProto.FLOAT, [1, len(test_array)], test_array)
        nb.add_value(value)
        node = nb.make_node()

        input_names = node.input_names
        self.assertEqual(len(input_names),5)

        # Confirm the order of the names based upon when added
github onnx / onnxmltools / tests / common / test_ModelBuilder.py View on Github external
def test_intitializers_on_multiple_nodes(self):
        context = ConvertContext()
        mb = ModelBuilder()
        nb = NodeBuilder(context, 'bar')
        nb.add_input('Input')
        nb.add_output('Output')

        test_array = [1,2,3]
        tensor = model_util.make_tensor('classes', onnx_proto.TensorProto.FLOAT, [1,len(test_array)], test_array)
        nb.add_initializer(tensor)
        node = nb.make_node()

        nb2 = NodeBuilder(context, 'bar2')
        nb2.add_input('Output')
        nb2.add_output('Output2')
        tensor2 = model_util.make_tensor('classes2', onnx_proto.TensorProto.FLOAT, [1,len(test_array)], test_array)
        nb2.add_initializer(tensor2)
        node2 = nb2.make_node()

        mb.add_nodes([node.onnx_node, node2.onnx_node])
        mb.add_initializers(node.initializers)
        mb.add_initializers(node2.initializers)
        mb.add_inputs([model_util.make_tensor_value_info('Input', onnx_proto.TensorProto.FLOAT, [1])])
        mb.add_outputs([model_util.make_tensor_value_info('Output', onnx_proto.TensorProto.FLOAT, [1])])
        model = mb.make_model()
        self.assertEqual(len(model.graph.initializer), 2)
        self.assertEqual(model.graph.initializer[0].name, 'bar_classes')
        self.assertEqual(model.graph.initializer[1].name, 'bar2_classes2')
github onnx / onnxmltools / onnxmltools / convert / coreml / NeuralNetwork / biDirectionalLSTM.py View on Github external
post_nb3.add_attribute('axis', 0)
                post_nb3.add_input(Y_h_reshaped_name)
                post_nb3.add_output(outputs[1])
                post_nb3.add_output(outputs[3])

        if len(outputs) > 2:
            Y_c_name = context.get_unique_name('Y_c')
            nb.add_output(Y_c_name)
            post_nb4 = NodeBuilder(context, 'Reshape')
            builder_list.append(post_nb4)
            post_nb4.add_attribute('shape', [2, hidden_size])
            post_nb4.add_input(Y_c_name)
            Y_c_reshaped_name = context.get_unique_name('Y_c_reshaped')
            post_nb4.add_output(Y_c_reshaped_name)

            post_nb5 = NodeBuilder(context, 'Split', op_version=2)
            builder_list.append(post_nb5)
            post_nb5.add_attribute('split', [1, 1])
            post_nb5.add_attribute('axis', 0)
            post_nb5.add_input(Y_c_reshaped_name)
            post_nb5.add_output(outputs[2])
            post_nb5.add_output(outputs[4])

        return [builder.make_node() for builder in builder_list]
github onnx / onnxmltools / onnxmltools / convert / coreml / ImputerConverter.py View on Github external
def convert(context, cm_node, inputs, outputs):
        """
        Converts a CoreML Imputer to ONNX
        """
        nb = NodeBuilder(context, 'Imputer', op_domain='ai.onnx.ml')

        if cm_node.imputer.HasField('replaceDoubleValue'):
            nb.add_attribute('replaced_value_float',
                             cm_node.imputer.replaceDoubleValue)
        elif cm_node.imputer.HasField('replaceInt64Value'):
            nb.add_attribute('replaced_value_int64',
                             cm_node.imputer.replaceInt64Value)
        if cm_node.imputer.HasField('imputedDoubleArray'):
            nb.add_attribute('imputed_value_floats',
                             cm_node.imputer.imputedDoubleArray.vector)
        elif cm_node.imputer.HasField('imputedInt64Array'):
            nb.add_attribute('imputed_value_int64s',
                             cm_node.imputer.imputedInt64Array.vector)

        nb.extend_inputs(inputs)
        nb.extend_outputs(outputs)
github onnx / onnxmltools / onnxmltools / convert / sklearn / GLMClassifierConverter.py View on Github external
prob_input = context.get_unique_name('classProbability')
        nb.add_output(prob_input)

        output_name = prob_input
        appended_node_normalizer = None

        # Add normalizer in the case of multi-class.
        if multi_class > 0 and sk_node.__class__.__name__ != 'LinearSVC':
            appended_node_normalizer, output_name = add_normalizer(prob_input, output_type, "L1", context)

        # Add a ZipMap to handle the map output.
        if len(classes) > 2 or sk_node.__class__.__name__ != 'LinearSVC':
            appended_node_zipmap = add_zipmap(output_name, output_type, class_labels, context)
        else:
            score_selector = NodeBuilder(context, 'Slice', op_version=2)
            score_selector.add_input(output_name)
            select_output = context.get_unique_name(output_name)
            score_selector.add_output(select_output)
            score_selector.add_attribute('starts', [0, 1])
            score_selector.add_attribute('ends', [1, 2])
            selector_output = model_util.make_tensor_value_info(select_output, onnx_proto.TensorProto.FLOAT, [1])
            context.add_output(selector_output)
            appended_node_zipmap = score_selector.make_node()

        if appended_node_normalizer != None:
            return [nb.make_node(), appended_node_normalizer, appended_node_zipmap]
        else:
            return [nb.make_node(), appended_node_zipmap]
github onnx / onnxmltools / onnxmltools / convert / coreml / NeuralNetwork / pooling.py View on Github external
builders = [nb]
        if params.type == Params.AVERAGE and not params.avgPoolExcludePadding:
            # Case 5. See comment above.
            pooled_buffer_name = context.get_unique_name('pooled_buffer')
            nb.add_output(pooled_buffer_name)

            constant_tensor_name = context.get_unique_name('constant')
            kernel_size_map = context.get_unique_name('kernel_size_map')
            scaler_builder = NodeBuilder(context, 'Affine')
            scaler_builder.add_attribute('alpha', 0.)
            scaler_builder.add_attribute('beta', 1. / (kernel_shape[0] * kernel_shape[1]))
            scaler_builder.add_input(inputs[0])
            scaler_builder.add_output(constant_tensor_name)
            builders.append(scaler_builder)

            lp_pool_builder = NodeBuilder(context, 'LpPool', op_version=2)
            lp_pool_builder.add_attribute('kernel_shape', kernel_shape)
            lp_pool_builder.add_attribute('strides', strides)
            lp_pool_builder.add_attribute('p', 1)
            if pads is not None:
                lp_pool_builder.add_attribute('pads', pads)
            if auto_pad is not None:
                lp_pool_builder.add_attribute('auto_pad', auto_pad)
            lp_pool_builder.add_input(constant_tensor_name)
            lp_pool_builder.add_output(kernel_size_map)
            builders.append(lp_pool_builder)

            adjuster_builder = NodeBuilder(context, 'Mul')
            adjuster_builder.add_input(pooled_buffer_name)
            adjuster_builder.add_input(kernel_size_map)
            adjuster_builder.add_output(outputs[0])
            builders.append(adjuster_builder)
github onnx / onnxmltools / onnxmltools / convert / coreml / DictVectorizerConverter.py View on Github external
def convert(context, cm_node, inputs, outputs):
        """
        Converts a CoreML DictVectorizer to ONNX
        """

        nb = NodeBuilder(context, 'DictVectorizer', op_domain='ai.onnx.ml')
        if cm_node.dictVectorizer.HasField('stringToIndex'):
            nb.add_attribute('string_vocabulary', cm_node.dictVectorizer.stringToIndex.vector)
        else:
            nb.add_attribute('int64_vocabulary', cm_node.dictVectorizer.int64ToIndex.vector)

        nb.extend_inputs(inputs)
        nb.extend_outputs(outputs)
        return nb.make_node()
github onnx / onnxmltools / onnxmltools / convert / coreml / NeuralNetwork / padding.py View on Github external
def convert(context, cm_node, inputs, outputs):
        extend_inputs_from_2d_to_4d(context, inputs)

        params = cm_node.padding

        nb = NodeBuilder(context, 'Pad', op_version=2)

        pad_table = {'constant': 'constant',
                     'reflection': 'reflect',
                     'replication': 'edge'}

        pad_type = params.WhichOneof('PaddingType')
        if pad_type not in pad_table:
            raise ValueError('Unsupported padding mode: {}'.format(pad_type))
        nb.add_attribute('mode', pad_table[pad_type])

        # CoreML only pads for their H- and W- axes. Here we assume the shape of the tensor to be padded
        # is [N, C, H, W], so we have 8 padding amounts
        #     pads = [N_begin_index, C_begin_index, H_begin_index, W_begin_index,
        #             N_end_index,   C_end_index,   H_end_index,   W_end_index]
        # Because only H- and W-axes are padded in CoreML, we leave padding amounts of N- and C-axes zeros.
        pads = [0, 0, 0, 0, 0, 0, 0, 0]