Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
this_operator.inputs = operator.inputs
label_name = scope.declare_local_variable('label_%d' % index)
proba_name = scope.declare_local_variable('proba_%d' % index,
FloatTensorType())
this_operator.outputs.append(label_name)
this_operator.outputs.append(proba_name)
proba_output_name = (proba_name.onnx_name if has_proba
else label_name.onnx_name)
reshape_dim_val = len(model.classes_) if has_proba else 1
reshaped_proba_name = scope.get_unique_variable_name('reshaped_proba')
apply_reshape(scope, proba_output_name, reshaped_proba_name,
container, desired_shape=(1, -1, reshape_dim_val))
proba_list.append(reshaped_proba_name)
merged_proba_name = scope.get_unique_variable_name('merged_proba')
apply_concat(scope, proba_list,
merged_proba_name, container, axis=0)
if has_proba:
container.add_node('ReduceMean', merged_proba_name,
final_proba_name,
name=scope.get_unique_operator_name('ReduceMean'),
axes=[0], keepdims=0)
else:
n_estimators_name = scope.get_unique_variable_name('n_estimators')
class_labels_name = scope.get_unique_variable_name('class_labels')
equal_result_name = scope.get_unique_variable_name('equal_result')
cast_output_name = scope.get_unique_variable_name('cast_output')
reduced_proba_name = scope.get_unique_variable_name('reduced_proba')
container.add_initializer(
n_estimators_name, onnx_proto.TensorProto.FLOAT, [],
[len(model.estimators_)])
else:
classes = np.array([s.encode('utf-8') for s in classes])
container.add_initializer(classes_name, class_type,
classes.shape, classes)
if len(classes) == 2:
unity_name = scope.get_unique_variable_name('unity')
negative_class_proba_name = scope.get_unique_variable_name(
'negative_class_proba')
container.add_initializer(unity_name, container.proto_dtype,
[], [1])
apply_sub(scope, [unity_name, y_pred],
negative_class_proba_name, container, broadcast=1)
apply_concat(scope, [negative_class_proba_name, y_pred],
operator.outputs[1].full_name, container, axis=1)
else:
apply_identity(scope, y_pred,
operator.outputs[1].full_name, container)
if mlp_op._label_binarizer.y_type_ == 'multilabel-indicator':
container.add_node('Binarizer', y_pred, operator.outputs[0].full_name,
threshold=0.5, op_domain='ai.onnx.ml')
else:
container.add_node('ArgMax', operator.outputs[1].full_name,
argmax_output_name, axis=1,
name=scope.get_unique_operator_name('ArgMax'))
container.add_node(
'ArrayFeatureExtractor', [classes_name, argmax_output_name],
array_feature_extractor_result_name, op_domain='ai.onnx.ml',
name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
def _normalise_proba(scope, operator, container, proba, num_classes,
unity_name):
reduced_proba_name = scope.get_unique_variable_name('reduced_proba')
sub_result_name = scope.get_unique_variable_name('sub_result')
if num_classes == 2:
apply_sub(scope, [unity_name, proba],
sub_result_name, container, broadcast=1)
apply_concat(scope, [sub_result_name, proba],
operator.outputs[1].full_name, container, axis=1)
else:
container.add_node('ReduceSum', proba,
reduced_proba_name, axes=[1],
name=scope.get_unique_operator_name('ReduceSum'))
proba_updated, reduced_proba_updated = _handle_zeros(
scope, container, proba, reduced_proba_name, num_classes)
apply_div(scope, [proba_updated, reduced_proba_updated],
operator.outputs[1].full_name, container, broadcast=1)
return operator.outputs[1].full_name
len(classes))
elif sgd_op.loss == 'modified_huber':
proba = _predict_proba_modified_huber(
scope, operator, container, scores, len(classes))
else:
if len(classes) == 2:
negate_name = scope.get_unique_variable_name('negate')
negated_scores_name = scope.get_unique_variable_name(
'negated_scores')
container.add_initializer(
negate_name, onnx_proto.TensorProto.FLOAT, [], [-1])
apply_mul(scope, [scores, negate_name],
negated_scores_name, container, broadcast=1)
apply_concat(scope, [negated_scores_name, scores],
operator.outputs[1].full_name, container, axis=1)
else:
apply_identity(scope, scores,
operator.outputs[1].full_name, container)
proba = operator.outputs[1].full_name
container.add_node('ArgMax', proba,
predicted_label_name,
name=scope.get_unique_operator_name('ArgMax'), axis=1)
container.add_node(
'ArrayFeatureExtractor', [classes_name, predicted_label_name],
final_label_name, op_domain='ai.onnx.ml',
name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
if class_type == onnx_proto.TensorProto.INT32:
reshaped_final_label_name = scope.get_unique_variable_name(
'reshaped_final_label')
if op.encode == 'onehot-dense':
onehot_result_name = scope.get_unique_variable_name(
'onehot_result')
container.add_node(
'OneHotEncoder', argmax_output_name,
onehot_result_name,
name=scope.get_unique_operator_name('OneHotEncoder'),
cats_int64s=list(range(op.n_bins_[i])),
op_domain='ai.onnx.ml')
apply_reshape(scope, onehot_result_name, digitised_output_name[i],
container, desired_shape=(-1, op.n_bins_[i]))
else:
apply_cast(scope, argmax_output_name, digitised_output_name[i],
container, to=onnx_proto.TensorProto.FLOAT)
apply_concat(scope, digitised_output_name,
operator.outputs[0].full_name, container, axis=1)
"You may raise an issue at "
"https://github.com/onnx/sklearn-onnx/issues")
proba_list = []
for index, estimator in enumerate(bagging_op.estimators_):
op_type = sklearn_operator_name_map[type(estimator)]
this_operator = scope.declare_local_operator(op_type)
this_operator.raw_operator = estimator
this_operator.inputs = operator.inputs
label_name = scope.declare_local_variable('label_%d' % index)
this_operator.outputs.append(label_name)
reshaped_proba_name = scope.get_unique_variable_name('reshaped_proba')
apply_reshape(scope, label_name.onnx_name, reshaped_proba_name,
container, desired_shape=(1, -1, 1))
proba_list.append(reshaped_proba_name)
merged_proba_name = scope.get_unique_variable_name('merged_proba')
apply_concat(scope, proba_list,
merged_proba_name, container, axis=0)
container.add_node('ReduceMean', merged_proba_name,
operator.outputs[0].full_name,
name=scope.get_unique_operator_name('ReduceMean'),
axes=[0], keepdims=0)
container.add_initializer(unit_float_tensor_name,
onnx_proto.TensorProto.FLOAT, [], [1.0])
apply_sub(scope, [unit_float_tensor_name, prob_name[0]],
zeroth_col_name, container, broadcast=1)
apply_concat(scope, [zeroth_col_name, prob_name[0]],
merged_prob_name, container, axis=1)
class_prob_tensor_name = merged_prob_name
else:
concatenated_prob_name = scope.get_unique_variable_name(
'concatenated_prob')
reduced_prob_name = scope.get_unique_variable_name('reduced_prob')
calc_prob_name = scope.get_unique_variable_name('calc_prob')
apply_concat(scope, prob_name, concatenated_prob_name,
container, axis=1)
container.add_node('ReduceSum', concatenated_prob_name,
reduced_prob_name, axes=[1],
name=scope.get_unique_operator_name('ReduceSum'))
num, deno = _handle_zeros(scope, container, concatenated_prob_name,
reduced_prob_name, n_classes)
apply_div(scope, [num, deno],
calc_prob_name, container, broadcast=1)
class_prob_tensor_name = calc_prob_name
return class_prob_tensor_name
'ArrayFeatureExtractor', [transposed_result_name, k_name],
out_k_name, op_domain='ai.onnx.ml',
name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
container.add_node(
'ArgMax', out_k_name, argmax_output_name,
name=scope.get_unique_operator_name('ArgMax'), axis=1)
apply_reshape(scope, argmax_output_name, reshaped_result_name,
container, desired_shape=(1, -1))
container.add_node(
'ArrayFeatureExtractor', [classes_name, reshaped_result_name],
preds_name, op_domain='ai.onnx.ml',
name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
apply_reshape(scope, preds_name, reshaped_preds_name,
container, desired_shape=(-1, 1))
predictions.append(reshaped_preds_name)
apply_concat(scope, predictions, operator.outputs[0].full_name,
container, axis=1)
input_name = operator.inputs
estimators_results_list = []
for i, estimator in enumerate(model.estimators_):
estimator_label_name = scope.declare_local_variable(
'est_label_%d' % i, FloatTensorType([None, 1]))
op_type = sklearn_operator_name_map[type(estimator)]
this_operator = scope.declare_local_operator(op_type)
this_operator.raw_operator = estimator
this_operator.inputs = input_name
this_operator.outputs.append(estimator_label_name)
estimators_results_list.append(estimator_label_name.onnx_name)
apply_concat(scope, estimators_results_list, concatenated_labels_name,
container, axis=1)
return concatenated_labels_name
container.add_node('ConstantOfShape', shape_name, unit_name,
value=make_tensor(
'ONE', TensorProto.FLOAT, [1], [1.]),
op_version=9)
if (operator.inputs[0].type._get_element_onnx_type()
== onnx_proto.TensorProto.INT64):
concat_result_name = scope.get_unique_variable_name('concat_result')
apply_concat(scope, [t for t in transformed_columns],
concat_result_name, container, axis=1)
apply_cast(scope, concat_result_name,
operator.outputs[0].full_name, container,
to=onnx_proto.TensorProto.INT64)
else:
apply_concat(scope, [t for t in transformed_columns],
operator.outputs[0].full_name, container, axis=1)