Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
groups = [3, 4, 1, 0, 0, 0, 3, 2, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
weight = [1, 2, 3, 1, 2, 3, 4, 2, 3, 3, 2, 1, 2, 3, 1, 2, 3, 4]
group2 = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# =======================================================
# Define as a dictionary so that the actual name can be seen
# when pytest builds the tests
supported_metrics_weighted = [(skm.accuracy_score, metrics.group_accuracy_score),
(skm.confusion_matrix, metrics.group_confusion_matrix),
(skm.zero_one_loss, metrics.group_zero_one_loss)]
# The following only work with binary data when called with their default arguments
supported_metrics_weighted_binary = [(skm.precision_score, metrics.group_precision_score),
(skm.recall_score, metrics.group_recall_score),
(skm.roc_auc_score, metrics.group_roc_auc_score),
(skm.mean_squared_error, metrics.group_mean_squared_error)]
supported_metrics_weighted_binary = supported_metrics_weighted_binary + supported_metrics_weighted
metrics_no_sample_weights = [(skm.max_error, metrics.group_max_error),
(skm.mean_absolute_error, metrics.group_mean_absolute_error),
(skm.mean_squared_log_error, metrics.group_mean_squared_log_error),
(skm.median_absolute_error, metrics.group_median_absolute_error)]
supported_metrics_unweighted = metrics_no_sample_weights + supported_metrics_weighted_binary
# =======================================================
def test_group_precision_score_ternary():
result = metrics.group_precision_score(Y_true_ternary, Y_pred_ternary, group2, average=None)
expected_overall = skm.precision_score(Y_true_ternary, Y_pred_ternary, average=None)
assert np.array_equal(result.overall, expected_overall)
def test_group_precision_score_pos_label():
result = metrics.group_precision_score(Y_true, Y_pred, groups, pos_label=0)
expected_overall = skm.precision_score(Y_true, Y_pred, pos_label=0)
assert np.array_equal(result.overall, expected_overall)
:type predicted_ys: numpy.array or list[][]
:param sensitive_feature_names: Feature names
:type sensitive_feature_names: numpy.array or list[]
"""
self._widget_instance = FairlearnWidget()
if sensitive_features is None or true_y is None or predicted_ys is None:
raise ValueError("Required parameters not provided")
self._metric_methods = {
"accuracy_score": {
"model_type": ["classification"],
"function": group_accuracy_score
},
"precision_score": {
"model_type": ["classification"],
"function": group_precision_score
},
"recall_score": {
"model_type": ["classification"],
"function": group_recall_score
},
"zero_one_loss": {
"model_type": ["classification"],
"function": group_zero_one_loss
},
"specificity_score": {
"model_type": ["classification"],
"function": group_specificity_score
},
"miss_rate": {
"model_type": ["classification"],
"function": group_miss_rate
self._widget_instance = FairlearnWidget()
if sensitive_features is None or y_true is None or y_pred is None:
raise ValueError("Required parameters not provided")
self._metric_methods = {
"accuracy_score": {
"model_type": ["classification"],
"function": group_accuracy_score
},
"balanced_accuracy_score": {
"model_type": ["classification"],
"function": group_roc_auc_score
},
"precision_score": {
"model_type": ["classification"],
"function": group_precision_score
},
"recall_score": {
"model_type": ["classification"],
"function": group_recall_score
},
"zero_one_loss": {
"model_type": [],
"function": group_zero_one_loss
},
"specificity_score": {
"model_type": [],
"function": group_specificity_score
},
"miss_rate": {
"model_type": [],
"function": group_miss_rate