Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _normalise_proba(scope, operator, container, proba, num_classes,
unity_name):
reduced_proba_name = scope.get_unique_variable_name('reduced_proba')
sub_result_name = scope.get_unique_variable_name('sub_result')
if num_classes == 2:
apply_sub(scope, [unity_name, proba],
sub_result_name, container, broadcast=1)
apply_concat(scope, [sub_result_name, proba],
operator.outputs[1].full_name, container, axis=1)
else:
container.add_node('ReduceSum', proba,
reduced_proba_name, axes=[1],
name=scope.get_unique_operator_name('ReduceSum'))
proba_updated, reduced_proba_updated = _handle_zeros(
scope, container, proba, reduced_proba_name, num_classes)
apply_div(scope, [proba_updated, reduced_proba_updated],
operator.outputs[1].full_name, container, broadcast=1)
return operator.outputs[1].full_name
apply_clip(
scope, proba_name, clipped_proba_name, container,
operator_name=scope.get_unique_operator_name('Clip'),
min=np.finfo(float).eps)
container.add_node(
'Log', clipped_proba_name, log_proba_name,
name=scope.get_unique_operator_name('Log'))
container.add_node(
'ReduceSum', log_proba_name, reduced_proba_name, axes=[1],
name=scope.get_unique_operator_name('ReduceSum'))
apply_reshape(scope, reduced_proba_name,
reshaped_result_name, container,
desired_shape=(-1, 1))
apply_mul(scope, [reshaped_result_name, inverted_n_classes_name],
prod_result_name, container, broadcast=1)
apply_sub(scope, [log_proba_name, prod_result_name],
sub_result_name, container, broadcast=1)
apply_mul(scope, [sub_result_name, n_classes_minus_one_name],
samme_proba_name, container, broadcast=1)
return samme_proba_name
_transform_isotonic(scope, container, model, df_col_name, k))
prob_name[k] = T
if n_classes == 2:
break
if n_classes == 2:
zeroth_col_name = scope.get_unique_variable_name('zeroth_col')
merged_prob_name = scope.get_unique_variable_name('merged_prob')
unit_float_tensor_name = scope.get_unique_variable_name(
'unit_float_tensor')
container.add_initializer(unit_float_tensor_name,
onnx_proto.TensorProto.FLOAT, [], [1.0])
apply_sub(scope, [unit_float_tensor_name, prob_name[0]],
zeroth_col_name, container, broadcast=1)
apply_concat(scope, [zeroth_col_name, prob_name[0]],
merged_prob_name, container, axis=1)
class_prob_tensor_name = merged_prob_name
else:
concatenated_prob_name = scope.get_unique_variable_name(
'concatenated_prob')
reduced_prob_name = scope.get_unique_variable_name('reduced_prob')
calc_prob_name = scope.get_unique_variable_name('calc_prob')
apply_concat(scope, prob_name, concatenated_prob_name,
container, axis=1)
container.add_node('ReduceSum', concatenated_prob_name,
reduced_prob_name, axes=[1],
name=scope.get_unique_operator_name('ReduceSum'))
num, deno = _handle_zeros(scope, container, concatenated_prob_name,
container.add_initializer(theta_name, proto_type, theta.shape,
theta.ravel())
container.add_initializer(sigma_name, proto_type, sigma.shape,
sigma.ravel())
container.add_initializer(jointi_name, proto_type, [1, jointi.shape[0]],
jointi)
container.add_initializer(
sigma_sum_log_name, proto_type,
[1, sigma_sum_log.shape[0]], sigma_sum_log.ravel())
container.add_initializer(exponent_name, proto_type, [], [2])
container.add_initializer(prod_operand_name, proto_type, [], [0.5])
apply_reshape(scope, input_name, reshaped_input_name, container,
desired_shape=[-1, 1, features])
apply_sub(scope, [reshaped_input_name, theta_name], subtracted_input_name,
container, broadcast=1)
apply_pow(scope, [subtracted_input_name, exponent_name], pow_result_name,
container, broadcast=1)
apply_div(scope, [pow_result_name, sigma_name], div_result_name,
container, broadcast=1)
container.add_node('ReduceSum', div_result_name,
reduced_sum_name, axes=[2], keepdims=0,
name=scope.get_unique_operator_name('ReduceSum'))
apply_mul(scope, [reduced_sum_name, prod_operand_name], mul_result_name,
container, broadcast=1)
apply_sub(scope, [sigma_sum_log_name, mul_result_name],
part_log_likelihood_name,
container, broadcast=1)
apply_add(scope, [jointi_name, part_log_likelihood_name],
sum_result_name, container, broadcast=1)
return sum_result_name
container.add_initializer(prod_operand_name, proto_type, [], [0.5])
apply_reshape(scope, input_name, reshaped_input_name, container,
desired_shape=[-1, 1, features])
apply_sub(scope, [reshaped_input_name, theta_name], subtracted_input_name,
container, broadcast=1)
apply_pow(scope, [subtracted_input_name, exponent_name], pow_result_name,
container, broadcast=1)
apply_div(scope, [pow_result_name, sigma_name], div_result_name,
container, broadcast=1)
container.add_node('ReduceSum', div_result_name,
reduced_sum_name, axes=[2], keepdims=0,
name=scope.get_unique_operator_name('ReduceSum'))
apply_mul(scope, [reduced_sum_name, prod_operand_name], mul_result_name,
container, broadcast=1)
apply_sub(scope, [sigma_sum_log_name, mul_result_name],
part_log_likelihood_name,
container, broadcast=1)
apply_add(scope, [jointi_name, part_log_likelihood_name],
sum_result_name, container, broadcast=1)
return sum_result_name
condition_name, name=scope.get_unique_operator_name('Greater'),
op_version=9)
apply_cast(scope, condition_name, cast_values_name, container,
to=proto_type)
apply_add(scope, [zero_tensor_name, cast_values_name],
binarised_input_name, container, broadcast=1)
input_name = binarised_input_name
apply_exp(scope, feature_log_prob_name, exp_result_name, container)
apply_sub(scope, [constant_name, exp_result_name], sub_result_name,
container, broadcast=1)
apply_log(scope, sub_result_name, neg_prob_name, container)
container.add_node('ReduceSum', neg_prob_name,
sum_neg_prob_name, axes=[0],
name=scope.get_unique_operator_name('ReduceSum'))
apply_sub(scope, [feature_log_prob_name, neg_prob_name],
difference_matrix_name, container)
container.add_node(
'MatMul', [input_name, difference_matrix_name],
dot_prod_name, name=scope.get_unique_operator_name('MatMul'))
apply_add(scope, [dot_prod_name, sum_neg_prob_name],
partial_sum_result_name, container)
apply_add(scope, [partial_sum_result_name, class_log_prior_name],
sum_result_name, container)
return sum_result_name
absolute_distance_name = scope.get_unique_variable_name(
'absolute_distance')
nearest_x_index_name = scope.get_unique_variable_name(
'nearest_x_index')
nearest_y_name = scope.get_unique_variable_name('nearest_y')
container.add_initializer(
calibrator_x_name, onnx_proto.TensorProto.FLOAT,
[len(model.calibrators_[k]._X_)], model.calibrators_[k]._X_)
container.add_initializer(
calibrator_y_name, onnx_proto.TensorProto.FLOAT,
[len(model.calibrators_[k]._y_)], model.calibrators_[k]._y_)
apply_reshape(scope, T, reshaped_df_name, container,
desired_shape=(-1, 1))
apply_sub(scope, [reshaped_df_name, calibrator_x_name],
distance_name, container, broadcast=1)
apply_abs(scope, distance_name, absolute_distance_name, container)
container.add_node('ArgMin', absolute_distance_name,
nearest_x_index_name, axis=1,
name=scope.get_unique_operator_name('ArgMin'))
container.add_node(
'ArrayFeatureExtractor',
[calibrator_y_name, nearest_x_index_name],
nearest_y_name, op_domain='ai.onnx.ml',
name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
nearest_y_name_reshaped = scope.get_unique_variable_name(
'nearest_y_name_reshaped')
apply_reshape(scope, nearest_y_name,
nearest_y_name_reshaped, container,
desired_shape=(-1, 1))