How to use the fairlearn.reductions.EqualizedOdds function in fairlearn

To help you get started, we’ve selected a few fairlearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github fairlearn / fairlearn / test / unit / reductions / moments / test_moments_equalized_odds.py View on Github external
def test_construct_and_load():
    eqo = EqualizedOdds()
    assert eqo.short_name == "EqualizedOdds"

    num_samples_a0 = 10
    num_samples_a1 = 30
    num_samples = num_samples_a0 + num_samples_a1

    a0_threshold = 0.2
    a1_threshold = 0.7

    a0_label = "a0"
    a1_label = "a1"

    X, Y, A = simple_binary_threshold_data(num_samples_a0, num_samples_a1,
                                           a0_threshold, a1_threshold,
                                           a0_label, a1_label)
github fairlearn / fairlearn / test / unit / reductions / exponentiated_gradient / test_exponentiatedgradient_smoke.py View on Github external
{"cons_class": DemographicParity, "eps": 0.010,
                        "best_gap": 0.000000, "last_t": 5,
                        "best_t": 5, "disp": 0.010000,
                        "error": 0.354174, "n_oracle_calls": 22,
                        "n_classifiers": 5},
                       {"cons_class": DemographicParity, "eps": 0.005,
                        "best_gap": 0.000000, "last_t": 5,
                        "best_t": 5, "disp": 0.005000,
                        "error": 0.365130, "n_oracle_calls": 22,
                        "n_classifiers": 5},
                       {"cons_class": EqualizedOdds, "eps": 0.100,
                        "best_gap": 0.000000, "last_t": 5,
                        "best_t": 5, "disp": 0.100000,
                        "error": 0.309333, "n_oracle_calls": 21,
                        "n_classifiers": 4},
                       {"cons_class": EqualizedOdds, "eps": 0.050,
                        "best_gap": 0.000000, "last_t": 5,
                        "best_t": 5, "disp": 0.050000,
                        "error": 0.378827, "n_oracle_calls": 19,
                           "n_classifiers": 6},
                       {"cons_class": EqualizedOdds, "eps": 0.020,
                        "best_gap": 0.000000, "last_t": 5,
                        "best_t": 5, "disp": 0.020000,
                        "error": 0.421531, "n_oracle_calls": 19,
                           "n_classifiers": 6},
                       {"cons_class": EqualizedOdds, "eps": 0.010,
                        "best_gap": 0.000000, "last_t": 5,
                        "best_t": 5, "disp": 0.010000,
                        "error": 0.435765, "n_oracle_calls": 19,
                           "n_classifiers": 6},
                       {"cons_class": EqualizedOdds, "eps": 0.005,
                        "best_gap": 0.000000, "last_t": 5,
github fairlearn / fairlearn / test / unit / reductions / grid_search / test_grid_search_arguments.py View on Github external
def setup_method(self, method):
        self.estimator = LogisticRegression(solver='liblinear')
        self.disparity_criterion = EqualizedOdds()
github fairlearn / fairlearn / test / unit / reductions / moments / test_moments_equalized_odds.py View on Github external
def test_project_lambda_smoke_negatives():
    eqo = EqualizedOdds()

    events = ['label=False', 'label=True']
    signs = ['+', '-']
    labels = ['a', 'b']
    midx = pd.MultiIndex.from_product(
        [signs, events, labels],
        names=[_SIGN, _EVENT, _GROUP_ID])

    df = pd.DataFrame()
    # Note that the '-' labels are larger
    df = 0 + pd.Series([1, 2, 11, 19, 1001, 1110, 1230, 1350], index=midx)

    ls = eqo.project_lambda(df)

    expected = pd.DataFrame()
    expected = 0 + pd.Series([0, 0, 0, 0, 1000, 1108, 1219, 1331], index=midx)
github fairlearn / fairlearn / test / unit / reductions / moments / test_moments_equalized_odds.py View on Github external
def test_project_lambda_smoke_positives():
    # This is a repeat of the _negatives method but with
    # the '+' indices larger
    eqo = EqualizedOdds()

    events = ['label=False', 'label=True']
    signs = ['+', '-']
    labels = ['a', 'b']
    midx = pd.MultiIndex.from_product(
        [signs, events, labels],
        names=[_SIGN, _EVENT, _GROUP_ID])

    df = pd.DataFrame()
    # Note that the '-' indices are now smaller
    df = 0 + pd.Series([200, 300, 100, 600, 4, 5, 6, 7], index=midx)

    ls = eqo.project_lambda(df)

    expected = pd.DataFrame()
    expected = 0 + pd.Series([196, 295, 94, 593, 0, 0, 0, 0], index=midx)
github fairlearn / fairlearn / test / unit / reductions / moments / test_moments_equalized_odds.py View on Github external
def test_signed_weights():
    eqo = EqualizedOdds()
    assert eqo.short_name == "EqualizedOdds"

    num_samples_a0 = 10
    num_samples_a1 = 30
    num_samples = num_samples_a0 + num_samples_a1

    a0_threshold = 0.2
    a1_threshold = 0.7

    a0_label = "OneThing"
    a1_label = "AnotherThing"

    X, Y, A = simple_binary_threshold_data(num_samples_a0, num_samples_a1,
                                           a0_threshold, a1_threshold,
                                           a0_label, a1_label)