How to use the skl2onnx.common._apply_operation.apply_mul function in skl2onnx

To help you get started, we’ve selected a few skl2onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / sklearn-onnx / skl2onnx / operator_converters / calibrated_classifier_cv.py View on Github external
masked_reduced_prob_name = scope.get_unique_variable_name(
        'masked_reduced_prob')

    container.add_initializer(n_classes_name, onnx_proto.TensorProto.FLOAT,
                              [], [n_classes])

    apply_cast(scope, reduced_prob_name, cast_prob_name, container,
               to=onnx_proto.TensorProto.BOOL)
    container.add_node('Not', cast_prob_name,
                       bool_not_cast_prob_name,
                       name=scope.get_unique_operator_name('Not'))
    apply_cast(scope, bool_not_cast_prob_name, mask_name, container,
               to=onnx_proto.TensorProto.FLOAT)
    apply_add(scope, [concatenated_prob_name, mask_name],
              masked_concatenated_prob_name, container, broadcast=1)
    apply_mul(scope, [mask_name, n_classes_name], reduced_prob_mask_name,
              container, broadcast=1)
    apply_add(scope, [reduced_prob_name, reduced_prob_mask_name],
              masked_reduced_prob_name, container, broadcast=0)
    return masked_concatenated_prob_name, masked_reduced_prob_name
github onnx / sklearn-onnx / skl2onnx / operator_converters / calibrated_classifier_cv.py View on Github external
exp_parameter_name = scope.get_unique_variable_name(
        'exp_parameter')
    exp_result_name = scope.get_unique_variable_name('exp_result')
    unity_name = scope.get_unique_variable_name('unity')
    denominator_name = scope.get_unique_variable_name('denominator')
    sigmoid_predict_result_name = scope.get_unique_variable_name(
        'sigmoid_predict_result')

    container.add_initializer(a_name, onnx_proto.TensorProto.FLOAT,
                              [], [model.calibrators_[k].a_])
    container.add_initializer(b_name, onnx_proto.TensorProto.FLOAT,
                              [], [model.calibrators_[k].b_])
    container.add_initializer(unity_name, onnx_proto.TensorProto.FLOAT,
                              [], [1])

    apply_mul(scope, [a_name, df_col_name], a_df_prod_name, container,
              broadcast=0)
    apply_add(scope, [a_df_prod_name, b_name], exp_parameter_name,
              container, broadcast=0)
    apply_exp(scope, exp_parameter_name, exp_result_name, container)
    apply_add(scope, [unity_name, exp_result_name], denominator_name,
              container, broadcast=0)
    apply_div(scope, [unity_name, denominator_name],
              sigmoid_predict_result_name, container, broadcast=0)
    return sigmoid_predict_result_name
github onnx / sklearn-onnx / skl2onnx / operator_converters / sgd_classifier.py View on Github external
reduced_proba_updated_name = scope.get_unique_variable_name(
        'reduced_proba_updated')

    container.add_initializer(num_classes_name, onnx_proto.TensorProto.FLOAT,
                              [], [num_classes])

    apply_cast(scope, reduced_proba, bool_reduced_proba_name, container,
               to=onnx_proto.TensorProto.BOOL)
    container.add_node('Not', bool_reduced_proba_name,
                       bool_not_reduced_proba_name,
                       name=scope.get_unique_operator_name('Not'))
    apply_cast(scope, bool_not_reduced_proba_name, not_reduced_proba_name,
               container, to=onnx_proto.TensorProto.FLOAT)
    apply_add(scope, [proba, not_reduced_proba_name],
              proba_updated_name, container, broadcast=1)
    apply_mul(scope, [not_reduced_proba_name, num_classes_name],
              mask_name, container, broadcast=1)
    apply_add(scope, [reduced_proba, mask_name],
              reduced_proba_updated_name, container, broadcast=0)
    return proba_updated_name, reduced_proba_updated_name
github onnx / sklearn-onnx / skl2onnx / operator_converters / sgd_classifier.py View on Github external
1. / (1. + exp(-scores))
        multiclass is handled by normalising that over all classes.
    """
    negate_name = scope.get_unique_variable_name('negate')
    negated_scores_name = scope.get_unique_variable_name('negated_scores')
    exp_result_name = scope.get_unique_variable_name('exp_result')
    unity_name = scope.get_unique_variable_name('unity')
    add_result_name = scope.get_unique_variable_name('add_result')
    proba_name = scope.get_unique_variable_name('proba')

    container.add_initializer(negate_name, onnx_proto.TensorProto.FLOAT,
                              [], [-1])
    container.add_initializer(unity_name, onnx_proto.TensorProto.FLOAT,
                              [], [1])

    apply_mul(scope, [scores, negate_name],
              negated_scores_name, container, broadcast=1)
    apply_exp(scope, negated_scores_name, exp_result_name, container)
    apply_add(scope, [exp_result_name, unity_name],
              add_result_name, container, broadcast=1)
    apply_reciprocal(scope, add_result_name, proba_name, container)
    return _normalise_proba(scope, operator, container, proba_name,
                            num_classes, unity_name)
github onnx / sklearn-onnx / skl2onnx / operator_converters / ada_boost.py View on Github external
[], [n_classes - 1])

    apply_clip(
        scope, proba_name, clipped_proba_name, container,
        operator_name=scope.get_unique_operator_name('Clip'),
        min=np.finfo(float).eps)
    container.add_node(
        'Log', clipped_proba_name, log_proba_name,
        name=scope.get_unique_operator_name('Log'))
    container.add_node(
        'ReduceSum', log_proba_name, reduced_proba_name, axes=[1],
        name=scope.get_unique_operator_name('ReduceSum'))
    apply_reshape(scope, reduced_proba_name,
                  reshaped_result_name, container,
                  desired_shape=(-1, 1))
    apply_mul(scope, [reshaped_result_name, inverted_n_classes_name],
              prod_result_name, container, broadcast=1)
    apply_sub(scope, [log_proba_name, prod_result_name],
              sub_result_name, container, broadcast=1)
    apply_mul(scope, [sub_result_name, n_classes_minus_one_name],
              samme_proba_name, container, broadcast=1)
    return samme_proba_name