How to use the fairlearn.metrics._metrics_engine.make_group_metric function in fairlearn

To help you get started, we’ve selected a few fairlearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github fairlearn / fairlearn / fairlearn / metrics / __init__.py View on Github external
"""

group_balanced_root_mean_squared_error = make_group_metric(
    balanced_root_mean_squared_error)
"""A grouped wrapper around the :py:func:`balanced_root_mean_squared_error` routine
"""

group_mean_prediction = make_group_metric(mean_prediction)
"""A grouped wrapper around the :py:func:`mean_prediction` routine
"""

group_mean_overprediction = make_group_metric(mean_overprediction)
"""A grouped wrapper around the :py:func:`mean_overprediction` routine
"""

group_mean_underprediction = make_group_metric(mean_underprediction)
"""A grouped wapper around the :py:func:`mean_underprediction` routine
"""

# -------------------------------------------

_extra_metrics = [
    "balanced_root_mean_squared_error",
    "fallout_rate",
    "mean_prediction",
    "mean_overprediction",
    "mean_underprediction",
    "miss_rate",
    "selection_rate",
    "specificity_score"
]
github fairlearn / fairlearn / fairlearn / metrics / __init__.py View on Github external
# Classification metrics
group_specificity_score = make_group_metric(specificity_score)
"""A grouped metric for the :py:func:`specificity_score`
"""

group_miss_rate = make_group_metric(miss_rate)
"""A grouped metric for the :py:func:`miss_rate`
"""

group_fallout_rate = make_group_metric(fallout_rate)
"""A grouped metric for the :py:func:`fallout_rate`
"""

# Regression metrics
group_max_error = make_group_metric(skm.max_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.max_error` routine
"""

group_mean_absolute_error = make_group_metric(skm.mean_absolute_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.mean_absolute_error` routine
"""

group_mean_squared_log_error = make_group_metric(skm.mean_squared_log_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.mean_squared_log_error` routine
"""

group_median_absolute_error = make_group_metric(skm.median_absolute_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.median_absolute_error` routine
"""

group_balanced_root_mean_squared_error = make_group_metric(
github fairlearn / fairlearn / fairlearn / metrics / __init__.py View on Github external
"""

group_fallout_rate = make_group_metric(fallout_rate)
"""A grouped metric for the :py:func:`fallout_rate`
"""

# Regression metrics
group_max_error = make_group_metric(skm.max_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.max_error` routine
"""

group_mean_absolute_error = make_group_metric(skm.mean_absolute_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.mean_absolute_error` routine
"""

group_mean_squared_log_error = make_group_metric(skm.mean_squared_log_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.mean_squared_log_error` routine
"""

group_median_absolute_error = make_group_metric(skm.median_absolute_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.median_absolute_error` routine
"""

group_balanced_root_mean_squared_error = make_group_metric(
    balanced_root_mean_squared_error)
"""A grouped wrapper around the :py:func:`balanced_root_mean_squared_error` routine
"""

group_mean_prediction = make_group_metric(mean_prediction)
"""A grouped wrapper around the :py:func:`mean_prediction` routine
"""
github fairlearn / fairlearn / fairlearn / metrics / __init__.py View on Github external
from ._group_metric_result import GroupMetricResult  # noqa: F401
from ._metrics_engine import make_group_metric, metric_by_group  # noqa: F401

# -------------------------------------------

# Classification metrics
group_specificity_score = make_group_metric(specificity_score)
"""A grouped metric for the :py:func:`specificity_score`
"""

group_miss_rate = make_group_metric(miss_rate)
"""A grouped metric for the :py:func:`miss_rate`
"""

group_fallout_rate = make_group_metric(fallout_rate)
"""A grouped metric for the :py:func:`fallout_rate`
"""

# Regression metrics
group_max_error = make_group_metric(skm.max_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.max_error` routine
"""

group_mean_absolute_error = make_group_metric(skm.mean_absolute_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.mean_absolute_error` routine
"""

group_mean_squared_log_error = make_group_metric(skm.mean_squared_log_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.mean_squared_log_error` routine
"""
github fairlearn / fairlearn / fairlearn / metrics / __init__.py View on Github external
"""

# Regression metrics
group_max_error = make_group_metric(skm.max_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.max_error` routine
"""

group_mean_absolute_error = make_group_metric(skm.mean_absolute_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.mean_absolute_error` routine
"""

group_mean_squared_log_error = make_group_metric(skm.mean_squared_log_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.mean_squared_log_error` routine
"""

group_median_absolute_error = make_group_metric(skm.median_absolute_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.median_absolute_error` routine
"""

group_balanced_root_mean_squared_error = make_group_metric(
    balanced_root_mean_squared_error)
"""A grouped wrapper around the :py:func:`balanced_root_mean_squared_error` routine
"""

group_mean_prediction = make_group_metric(mean_prediction)
"""A grouped wrapper around the :py:func:`mean_prediction` routine
"""

group_mean_overprediction = make_group_metric(mean_overprediction)
"""A grouped wrapper around the :py:func:`mean_overprediction` routine
"""
github fairlearn / fairlearn / fairlearn / metrics / __init__.py View on Github external
"""

group_median_absolute_error = make_group_metric(skm.median_absolute_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.median_absolute_error` routine
"""

group_balanced_root_mean_squared_error = make_group_metric(
    balanced_root_mean_squared_error)
"""A grouped wrapper around the :py:func:`balanced_root_mean_squared_error` routine
"""

group_mean_prediction = make_group_metric(mean_prediction)
"""A grouped wrapper around the :py:func:`mean_prediction` routine
"""

group_mean_overprediction = make_group_metric(mean_overprediction)
"""A grouped wrapper around the :py:func:`mean_overprediction` routine
"""

group_mean_underprediction = make_group_metric(mean_underprediction)
"""A grouped wapper around the :py:func:`mean_underprediction` routine
"""

# -------------------------------------------

_extra_metrics = [
    "balanced_root_mean_squared_error",
    "fallout_rate",
    "mean_prediction",
    "mean_overprediction",
    "mean_underprediction",
    "miss_rate",
github fairlearn / fairlearn / fairlearn / metrics / __init__.py View on Github external
from ._skm_wrappers import group_roc_auc_score, group_zero_one_loss  # noqa: F401
from ._skm_wrappers import group_mean_squared_error  # noqa: F401
from ._skm_wrappers import group_root_mean_squared_error  # noqa: F401
from ._skm_wrappers import group_r2_score  # noqa: F401

from ._group_metric_result import GroupMetricResult  # noqa: F401
from ._metrics_engine import make_group_metric, metric_by_group  # noqa: F401

# -------------------------------------------

# Classification metrics
group_specificity_score = make_group_metric(specificity_score)
"""A grouped metric for the :py:func:`specificity_score`
"""

group_miss_rate = make_group_metric(miss_rate)
"""A grouped metric for the :py:func:`miss_rate`
"""

group_fallout_rate = make_group_metric(fallout_rate)
"""A grouped metric for the :py:func:`fallout_rate`
"""

# Regression metrics
group_max_error = make_group_metric(skm.max_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.max_error` routine
"""

group_mean_absolute_error = make_group_metric(skm.mean_absolute_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.mean_absolute_error` routine
"""
github fairlearn / fairlearn / fairlearn / metrics / __init__.py View on Github external
"""

group_miss_rate = make_group_metric(miss_rate)
"""A grouped metric for the :py:func:`miss_rate`
"""

group_fallout_rate = make_group_metric(fallout_rate)
"""A grouped metric for the :py:func:`fallout_rate`
"""

# Regression metrics
group_max_error = make_group_metric(skm.max_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.max_error` routine
"""

group_mean_absolute_error = make_group_metric(skm.mean_absolute_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.mean_absolute_error` routine
"""

group_mean_squared_log_error = make_group_metric(skm.mean_squared_log_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.mean_squared_log_error` routine
"""

group_median_absolute_error = make_group_metric(skm.median_absolute_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.median_absolute_error` routine
"""

group_balanced_root_mean_squared_error = make_group_metric(
    balanced_root_mean_squared_error)
"""A grouped wrapper around the :py:func:`balanced_root_mean_squared_error` routine
"""
github fairlearn / fairlearn / fairlearn / metrics / __init__.py View on Github external
from ._selection_rate import group_selection_rate  # noqa: F401

from ._skm_wrappers import group_accuracy_score, group_confusion_matrix  # noqa: F401
from ._skm_wrappers import group_precision_score, group_recall_score  # noqa: F401
from ._skm_wrappers import group_roc_auc_score, group_zero_one_loss  # noqa: F401
from ._skm_wrappers import group_mean_squared_error  # noqa: F401
from ._skm_wrappers import group_root_mean_squared_error  # noqa: F401
from ._skm_wrappers import group_r2_score  # noqa: F401

from ._group_metric_result import GroupMetricResult  # noqa: F401
from ._metrics_engine import make_group_metric, metric_by_group  # noqa: F401

# -------------------------------------------

# Classification metrics
group_specificity_score = make_group_metric(specificity_score)
"""A grouped metric for the :py:func:`specificity_score`
"""

group_miss_rate = make_group_metric(miss_rate)
"""A grouped metric for the :py:func:`miss_rate`
"""

group_fallout_rate = make_group_metric(fallout_rate)
"""A grouped metric for the :py:func:`fallout_rate`
"""

# Regression metrics
group_max_error = make_group_metric(skm.max_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.max_error` routine
"""
github fairlearn / fairlearn / fairlearn / metrics / __init__.py View on Github external
"""A grouped wrapper around the :py:func:`sklearn.metrics.max_error` routine
"""

group_mean_absolute_error = make_group_metric(skm.mean_absolute_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.mean_absolute_error` routine
"""

group_mean_squared_log_error = make_group_metric(skm.mean_squared_log_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.mean_squared_log_error` routine
"""

group_median_absolute_error = make_group_metric(skm.median_absolute_error)
"""A grouped wrapper around the :py:func:`sklearn.metrics.median_absolute_error` routine
"""

group_balanced_root_mean_squared_error = make_group_metric(
    balanced_root_mean_squared_error)
"""A grouped wrapper around the :py:func:`balanced_root_mean_squared_error` routine
"""

group_mean_prediction = make_group_metric(mean_prediction)
"""A grouped wrapper around the :py:func:`mean_prediction` routine
"""

group_mean_overprediction = make_group_metric(mean_overprediction)
"""A grouped wrapper around the :py:func:`mean_overprediction` routine
"""

group_mean_underprediction = make_group_metric(mean_underprediction)
"""A grouped wapper around the :py:func:`mean_underprediction` routine
"""