How to use the fairlearn.metrics.group_recall_score function in fairlearn

To help you get started, we’ve selected a few fairlearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github fairlearn / fairlearn / test / unit / metrics / test_group_sklearn_wrappers.py View on Github external
def test_group_recall_score_pos_label():
    result = metrics.group_recall_score(Y_true, Y_pred, groups, pos_label=0)
    expected_overall = skm.recall_score(Y_true, Y_pred, pos_label=0)

    assert np.array_equal(result.overall, expected_overall)
github fairlearn / fairlearn / test / unit / metrics / test_group_sklearn_wrappers.py View on Github external
groups = [3, 4, 1, 0, 0, 0, 3, 2, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
weight = [1, 2, 3, 1, 2, 3, 4, 2, 3, 3, 2, 1, 2, 3, 1, 2, 3, 4]
group2 = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]

# =======================================================

# Define as a dictionary so that the actual name can be seen
# when pytest builds the tests

supported_metrics_weighted = [(skm.accuracy_score, metrics.group_accuracy_score),
                              (skm.confusion_matrix, metrics.group_confusion_matrix),
                              (skm.zero_one_loss, metrics.group_zero_one_loss)]

# The following only work with binary data when called with their default arguments
supported_metrics_weighted_binary = [(skm.precision_score, metrics.group_precision_score),
                                     (skm.recall_score, metrics.group_recall_score),
                                     (skm.roc_auc_score, metrics.group_roc_auc_score),
                                     (skm.mean_squared_error, metrics.group_mean_squared_error)]
supported_metrics_weighted_binary = supported_metrics_weighted_binary + supported_metrics_weighted


metrics_no_sample_weights = [(skm.max_error, metrics.group_max_error),
                             (skm.mean_absolute_error, metrics.group_mean_absolute_error),
                             (skm.mean_squared_log_error, metrics.group_mean_squared_log_error),
                             (skm.median_absolute_error, metrics.group_median_absolute_error)]

supported_metrics_unweighted = metrics_no_sample_weights + supported_metrics_weighted_binary

# =======================================================


@pytest.mark.parametrize("func_tuple", supported_metrics_unweighted)
github fairlearn / fairlearn / test / unit / metrics / test_group_sklearn_wrappers.py View on Github external
def test_group_recall_score_ternary():
    result = metrics.group_recall_score(Y_true_ternary, Y_pred_ternary, group2, average=None)
    expected_overall = skm.recall_score(Y_true_ternary, Y_pred_ternary, average=None)

    assert np.array_equal(result.overall, expected_overall)
github fairlearn / fairlearn / fairlearn / widget / fairlearnDashboard.py View on Github external
self._widget_instance = FairlearnWidget()
        if sensitive_features is None or true_y is None or predicted_ys is None:
            raise ValueError("Required parameters not provided")

        self._metric_methods = {
            "accuracy_score": {
                "model_type": ["classification"],
                "function": group_accuracy_score
            },
            "precision_score": {
                "model_type": ["classification"],
                "function": group_precision_score
            },
            "recall_score": {
                "model_type": ["classification"],
                "function": group_recall_score
            },
            "zero_one_loss": {
                "model_type": ["classification"],
                "function": group_zero_one_loss
            },
            "specificity_score": {
                "model_type": ["classification"],
                "function": group_specificity_score
            },
            "miss_rate": {
                "model_type": ["classification"],
                "function": group_miss_rate
            },
            "fallout_rate": {
                "model_type": ["classification"],
                "function": group_fallout_rate
github fairlearn / fairlearn / fairlearn / widget / _fairlearn_dashboard.py View on Github external
self._metric_methods = {
            "accuracy_score": {
                "model_type": ["classification"],
                "function": group_accuracy_score
            },
            "balanced_accuracy_score": {
                "model_type": ["classification"],
                "function": group_roc_auc_score
            },
            "precision_score": {
                "model_type": ["classification"],
                "function": group_precision_score
            },
            "recall_score": {
                "model_type": ["classification"],
                "function": group_recall_score
            },
            "zero_one_loss": {
                "model_type": [],
                "function": group_zero_one_loss
            },
            "specificity_score": {
                "model_type": [],
                "function": group_specificity_score
            },
            "miss_rate": {
                "model_type": [],
                "function": group_miss_rate
            },
            "fallout_rate": {
                "model_type": [],
                "function": group_fallout_rate