How to use the skl2onnx.proto.onnx_proto.TensorProto function in skl2onnx

To help you get started, we’ve selected a few skl2onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / sklearn-onnx / skl2onnx / operator_converters / naive_bayes.py View on Github external
argmax_output_name = scope.get_unique_variable_name('argmax_output')
    cast2_result_name = scope.get_unique_variable_name('cast2_result')
    reshaped_result_name = scope.get_unique_variable_name('reshaped_result')
    classes_name = scope.get_unique_variable_name('classes')
    reduce_log_sum_exp_result_name = scope.get_unique_variable_name(
        'reduce_log_sum_exp_result')
    log_prob_name = scope.get_unique_variable_name('log_prob')
    array_feature_extractor_result_name = scope.get_unique_variable_name(
        'array_feature_extractor_result')

    class_type = onnx_proto.TensorProto.STRING
    if np.issubdtype(classes.dtype, np.floating):
        class_type = onnx_proto.TensorProto.INT32
        classes = classes.astype(np.int32)
    elif np.issubdtype(classes.dtype, np.signedinteger):
        class_type = onnx_proto.TensorProto.INT32
    else:
        classes = np.array([s.encode('utf-8') for s in classes])

    container.add_initializer(classes_name, class_type, classes.shape, classes)

    if operator.type != 'SklearnGaussianNB':
        class_log_prior_name = scope.get_unique_variable_name(
            'class_log_prior')
        feature_log_prob_name = scope.get_unique_variable_name(
            'feature_log_prob')

        class_log_prior = nb_op.class_log_prior_.astype(
            float_dtype).reshape((1, -1))
        feature_log_prob = nb_op.feature_log_prob_.T.astype(float_dtype)

        container.add_initializer(
github onnx / sklearn-onnx / skl2onnx / operator_converters / calibrated_classifier_cv.py View on Github external
if op_type in ('SklearnLinearSVC', 'SklearnSVC'):
                df_input_name = scope.get_unique_variable_name('df_input')
                merged_input_name = scope.get_unique_variable_name(
                    'merged_input')

                apply_reshape(scope, df_inp,
                              df_input_name, container,
                              desired_shape=(-1, 1))
                apply_concat(scope, [df_input_name, df_input_name],
                             merged_input_name, container, axis=1)
                df_inp = merged_input_name
        k_name = scope.get_unique_variable_name('k')
        df_col_name = scope.get_unique_variable_name('transposed_df_col')
        prob_name[k] = scope.get_unique_variable_name('prob_{}'.format(k))

        container.add_initializer(k_name, onnx_proto.TensorProto.INT64,
                                  [], [cur_k])

        container.add_node(
            'ArrayFeatureExtractor', [df_inp, k_name], df_col_name,
            name=scope.get_unique_operator_name('ArrayFeatureExtractor'),
            op_domain='ai.onnx.ml')
        T = (_transform_sigmoid(scope, container, model, df_col_name, k)
             if model.method == 'sigmoid' else
             _transform_isotonic(scope, container, model, df_col_name, k))

        prob_name[k] = T
        if n_classes == 2:
            break

    if n_classes == 2:
        zeroth_col_name = scope.get_unique_variable_name('zeroth_col')
github onnx / sklearn-onnx / skl2onnx / operator_converters / k_bins_discretiser.py View on Github external
onnx_proto.TensorProto.FLOAT,
                                  [len(item)], item)

        container.add_node(
            'ArrayFeatureExtractor',
            [operator.inputs[0].full_name, column_index_name], column_name,
            name=scope.get_unique_operator_name('ArrayFeatureExtractor'),
            op_domain='ai.onnx.ml')
        apply_cast(scope, column_name, cast_column_name,
                   container, to=onnx_proto.TensorProto.FLOAT)
        container.add_node(
            'Less', [cast_column_name, range_column_name],
            less_result_name,
            name=scope.get_unique_operator_name('Less'))
        apply_cast(scope, less_result_name, cast_result_name,
                   container, to=onnx_proto.TensorProto.FLOAT)

        if last_column_name is None:
            last_column_name = scope.get_unique_variable_name('last_column')
            zero_float = scope.get_unique_variable_name('zero_float')
            one_float = scope.get_unique_variable_name('one_float')
            zero_column = scope.get_unique_variable_name('zero_column')
            container.add_initializer(
                one_float, onnx_proto.TensorProto.FLOAT,
                [1], np.ones(1))
            container.add_initializer(
                zero_float, onnx_proto.TensorProto.FLOAT,
                [1], np.zeros(1))
            apply_mul(scope, [cast_column_name, zero_float], zero_column,
                      container, broadcast=1)
            apply_add(scope, [zero_column, one_float], last_column_name,
                      container, broadcast=1)
github onnx / sklearn-onnx / skl2onnx / operator_converters / gradient_boosting.py View on Github external
tree_weight = op.learning_rate
    n_est = (op.n_estimators_ if hasattr(op, 'n_estimators_') else
             op.n_estimators)
    for i in range(n_est):
        tree = op.estimators_[i][0].tree_
        tree_id = i
        add_tree_to_attribute_pairs(attrs, False, tree, tree_id, tree_weight,
                                    0, False, True, dtype=container.dtype)

    input_name = operator.input_full_names
    if type(operator.inputs[0].type) == Int64TensorType:
        cast_input_name = scope.get_unique_variable_name('cast_input')

        apply_cast(scope, operator.input_full_names, cast_input_name,
                   container, to=onnx_proto.TensorProto.FLOAT)
        input_name = cast_input_name

    container.add_node(op_type, input_name,
                       operator.output_full_names, op_domain='ai.onnx.ml',
                       **attrs)
github onnx / sklearn-onnx / skl2onnx / algebra / graph_state.py View on Github external
elif dtype == np.float64:
                ty = onnx_proto.TensorProto.DOUBLE
                astype = np.float64
            elif dtype == np.int64:
                ty = onnx_proto.TensorProto.INT64
                astype = np.int64
            elif dtype == np.int32:
                ty = onnx_proto.TensorProto.INT32
                astype = np.int64
            elif dtype == np.bool:
                ty = onnx_proto.TensorProto.BOOL
                astype = np.bool
            else:
                st = str(dtype).lower()
                if st.startswith('u') or st.startswith("
github onnx / sklearn-onnx / skl2onnx / operator_converters / feature_selection.py View on Github external
def convert_sklearn_feature_selection(scope, operator, container):
    op = operator.raw_operator
    # Get indices of the features selected
    index = op.get_support(indices=True)
    needs_cast = not isinstance(operator.inputs[0].type,
                                (FloatTensorType, FloatType))
    if needs_cast:
        output_name = scope.get_unique_variable_name('output')
    else:
        output_name = operator.outputs[0].full_name

    if index.any():
        column_indices_name = scope.get_unique_variable_name('column_indices')

        container.add_initializer(column_indices_name,
                                  onnx_proto.TensorProto.INT64,
                                  [len(index)], index)

        container.add_node(
            'ArrayFeatureExtractor',
            [operator.inputs[0].full_name, column_indices_name],
            output_name, op_domain='ai.onnx.ml',
            name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
    else:
        container.add_node('ConstantOfShape', operator.inputs[0].full_name,
                           output_name, op_version=9)
    if needs_cast:
        apply_cast(scope, output_name, operator.outputs[0].full_name,
                   container, to=onnx_proto.TensorProto.FLOAT)
github onnx / sklearn-onnx / skl2onnx / operator_converters / multilayer_perceptron.py View on Github external
def convert_sklearn_mlp_classifier(scope, operator, container):
    """
    Converter for MLPClassifier.
    This function calls _predict() which returns the probability scores
    of the positive class in case of binary labels and class
    probabilities in case of multi-class. It then calculates probability
    scores for the negative class in case of binary labels. It
    calculates the class labels and sets the output.
    """
    mlp_op = operator.raw_operator
    classes = mlp_op.classes_
    class_type = onnx_proto.TensorProto.STRING

    classes_name = scope.get_unique_variable_name('classes')
    argmax_output_name = scope.get_unique_variable_name('argmax_output')
    array_feature_extractor_result_name = scope.get_unique_variable_name(
        'array_feature_extractor_result')

    y_pred = _predict(scope, operator.inputs[0].full_name, container, mlp_op)

    if np.issubdtype(mlp_op.classes_.dtype, np.floating):
        class_type = onnx_proto.TensorProto.INT32
        classes = classes.astype(np.int32)
    elif np.issubdtype(mlp_op.classes_.dtype, np.signedinteger):
        class_type = onnx_proto.TensorProto.INT32
    else:
        classes = np.array([s.encode('utf-8') for s in classes])
github onnx / sklearn-onnx / skl2onnx / operator_converters / decomposition.py View on Github external
# Subtract mean from input tensor
            apply_sub(scope, [input_name, mean_name],
                      sub_result_name, container, broadcast=1)
        else:
            sub_result_name = input_name
        if svd.whiten:
            explained_variance_name = scope.get_unique_variable_name(
                'explained_variance')
            explained_variance_root_name = scope.get_unique_variable_name(
                'explained_variance_root')
            matmul_result_name = scope.get_unique_variable_name(
                'matmul_result')

            container.add_initializer(
                explained_variance_name, onnx_proto.TensorProto.FLOAT,
                svd.explained_variance_.shape, svd.explained_variance_)

            container.add_node(
                'MatMul', [sub_result_name, transform_matrix_name],
                matmul_result_name,
                name=scope.get_unique_operator_name('MatMul'))
            apply_sqrt(scope, explained_variance_name,
                       explained_variance_root_name, container)
            apply_div(scope,
                      [matmul_result_name, explained_variance_root_name],
                      operator.outputs[0].full_name, container, broadcast=1)
        else:
            container.add_node(
                'MatMul', [sub_result_name, transform_matrix_name],
                operator.outputs[0].full_name,
                name=scope.get_unique_operator_name('MatMul'))