How to use the fairlearn.metrics._input_manipulations._convert_to_ndarray_and_squeeze function in fairlearn

To help you get started, we’ve selected a few fairlearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github fairlearn / fairlearn / fairlearn / metrics / _metrics_engine.py View on Github external
_check_array_sizes(y_true, y_pred, 'y_true', 'y_pred')
    _check_array_sizes(y_true, group_membership, 'y_true', 'group_membership')
    if sample_weight is not None:
        _check_array_sizes(y_true, sample_weight, 'y_true', 'sample_weight')

    result = GroupMetricResult()

    # Make everything a numpy array
    # This allows for fast slicing of the groups
    y_a = _convert_to_ndarray_and_squeeze(y_true)
    y_p = _convert_to_ndarray_and_squeeze(y_pred)
    g_d = _convert_to_ndarray_and_squeeze(group_membership)

    s_w = None
    if sample_weight is not None:
        s_w = _convert_to_ndarray_and_squeeze(sample_weight)

    # Evaluate the overall metric with the numpy arrays
    # This ensures consistency in how metric_function is called
    if s_w is not None:
        result.overall = metric_function(y_a, y_p, sample_weight=s_w)
    else:
        result.overall = metric_function(y_a, y_p)

    groups = np.unique(group_membership)
    for group in groups:
        group_indices = (group == g_d)
        group_actual = y_a[group_indices]
        group_predict = y_p[group_indices]
        group_weight = None
        if s_w is not None:
            group_weight = s_w[group_indices]
github fairlearn / fairlearn / fairlearn / metrics / _balanced_root_mean_squared_error.py View on Github external
def balanced_root_mean_squared_error(y_true, y_pred, sample_weight=None):
    r"""Calculate the mean of the root mean squared error (RMSE) for the positive and negative cases.

    Used for binary logistic regression, this computes the error as

    .. math::
       \frac{\text{RMSE}(Y=0) + \text{RMSE}(Y=1)}{2}

    The classes are constrained to be :math:`\in {0, 1}`. The :code:`y_true` values must
    always be one of these, while :code:`y_pred` can be a continuous probability
    (which could be thresholded to get a predicted class).

    Internally, this builds on the
    :py:func:`sklearn.metrics.mean_squared_error` routine.
    """
    y_ta = _convert_to_ndarray_and_squeeze(y_true)
    y_pa = _convert_to_ndarray_and_squeeze(y_pred)
    s_w = np.ones(len(y_ta))
    if sample_weight is not None:
        s_w = _convert_to_ndarray_and_squeeze(sample_weight)

    y_ta_values = np.unique(y_ta)
    if not np.array_equal(y_ta_values, [0, 1]):
        raise ValueError(_Y_TRUE_NOT_0_1)

    errs = np.zeros(2)
    for i in range(0, 2):
        indices = (y_ta == i)
        y_ta_s = y_ta[indices]
        y_pa_s = y_pa[indices]
        s_w_s = s_w[indices]
        errs[i] = math.sqrt(skm.mean_squared_error(y_ta_s, y_pa_s, s_w_s))
github fairlearn / fairlearn / fairlearn / metrics / _balanced_root_mean_squared_error.py View on Github external
.. math::
       \frac{\text{RMSE}(Y=0) + \text{RMSE}(Y=1)}{2}

    The classes are constrained to be :math:`\in {0, 1}`. The :code:`y_true` values must
    always be one of these, while :code:`y_pred` can be a continuous probability
    (which could be thresholded to get a predicted class).

    Internally, this builds on the
    :py:func:`sklearn.metrics.mean_squared_error` routine.
    """
    y_ta = _convert_to_ndarray_and_squeeze(y_true)
    y_pa = _convert_to_ndarray_and_squeeze(y_pred)
    s_w = np.ones(len(y_ta))
    if sample_weight is not None:
        s_w = _convert_to_ndarray_and_squeeze(sample_weight)

    y_ta_values = np.unique(y_ta)
    if not np.array_equal(y_ta_values, [0, 1]):
        raise ValueError(_Y_TRUE_NOT_0_1)

    errs = np.zeros(2)
    for i in range(0, 2):
        indices = (y_ta == i)
        y_ta_s = y_ta[indices]
        y_pa_s = y_pa[indices]
        s_w_s = s_w[indices]
        errs[i] = math.sqrt(skm.mean_squared_error(y_ta_s, y_pa_s, s_w_s))

    return errs.mean()
github fairlearn / fairlearn / fairlearn / metrics / _mean_predictions.py View on Github external
def mean_overprediction(y_true, y_pred, sample_weight=None):
    """Calculate the (weighted) mean overprediction.

    This is the (weighted) mean of the error where any negative
    errors (i.e. underpredictions) are set to zero
    """
    y_t = _convert_to_ndarray_and_squeeze(y_true)
    y_p = _convert_to_ndarray_and_squeeze(y_pred)
    s_w = np.ones(len(y_p))
    if sample_weight is not None:
        s_w = _convert_to_ndarray_and_squeeze(sample_weight)

    err = y_p - y_t
    err[err < 0] = 0

    return np.dot(err, s_w) / s_w.sum()
github fairlearn / fairlearn / fairlearn / metrics / _metrics_engine.py View on Github external
and to each group identified in ``group_membership``.
        If the ``metric_function`` returns a scalar, then additional fields are populated
    :rtype: :class:`GroupMetricResult`
    """
    _check_array_sizes(y_true, y_pred, 'y_true', 'y_pred')
    _check_array_sizes(y_true, group_membership, 'y_true', 'group_membership')
    if sample_weight is not None:
        _check_array_sizes(y_true, sample_weight, 'y_true', 'sample_weight')

    result = GroupMetricResult()

    # Make everything a numpy array
    # This allows for fast slicing of the groups
    y_a = _convert_to_ndarray_and_squeeze(y_true)
    y_p = _convert_to_ndarray_and_squeeze(y_pred)
    g_d = _convert_to_ndarray_and_squeeze(group_membership)

    s_w = None
    if sample_weight is not None:
        s_w = _convert_to_ndarray_and_squeeze(sample_weight)

    # Evaluate the overall metric with the numpy arrays
    # This ensures consistency in how metric_function is called
    if s_w is not None:
        result.overall = metric_function(y_a, y_p, sample_weight=s_w)
    else:
        result.overall = metric_function(y_a, y_p)

    groups = np.unique(group_membership)
    for group in groups:
        group_indices = (group == g_d)
        group_actual = y_a[group_indices]
github fairlearn / fairlearn / fairlearn / metrics / _mean_predictions.py View on Github external
def mean_underprediction(y_true, y_pred, sample_weight=None):
    """Calculate the (weighted) mean underprediction.

    This is the (weighted) mean of the error where any
    positive errors (i.e. overpredictions) are set to zero.
    The absolute value of the underpredictions is used, so the
    returned value is always positive.
    """
    y_t = _convert_to_ndarray_and_squeeze(y_true)
    y_p = _convert_to_ndarray_and_squeeze(y_pred)
    s_w = np.ones(len(y_p))
    if sample_weight is not None:
        s_w = _convert_to_ndarray_and_squeeze(sample_weight)

    err = y_p - y_t
    err[err > 0] = 0

    # Error metrics should decrease to 0 so have to flip sign
    return -np.dot(err, s_w) / s_w.sum()
github fairlearn / fairlearn / fairlearn / metrics / _mean_predictions.py View on Github external
def mean_prediction(y_true, y_pred, sample_weight=None):
    """Calculate the (weighted) mean prediction.

    The true values are ignored, but required as an argument in order
    to maintain a consistent interface
    """
    y_p = _convert_to_ndarray_and_squeeze(y_pred)
    s_w = np.ones(len(y_p))
    if sample_weight is not None:
        s_w = _convert_to_ndarray_and_squeeze(sample_weight)

    return np.dot(y_p, s_w) / s_w.sum()
github fairlearn / fairlearn / fairlearn / metrics / _mean_predictions.py View on Github external
def mean_overprediction(y_true, y_pred, sample_weight=None):
    """Calculate the (weighted) mean overprediction.

    This is the (weighted) mean of the error where any negative
    errors (i.e. underpredictions) are set to zero
    """
    y_t = _convert_to_ndarray_and_squeeze(y_true)
    y_p = _convert_to_ndarray_and_squeeze(y_pred)
    s_w = np.ones(len(y_p))
    if sample_weight is not None:
        s_w = _convert_to_ndarray_and_squeeze(sample_weight)

    err = y_p - y_t
    err[err < 0] = 0

    return np.dot(err, s_w) / s_w.sum()
github fairlearn / fairlearn / fairlearn / metrics / _metrics_engine.py View on Github external
:return: Object containing the result of applying ``metric_function`` to the entire dataset
        and to each group identified in ``group_membership``.
        If the ``metric_function`` returns a scalar, then additional fields are populated
    :rtype: :class:`GroupMetricResult`
    """
    _check_array_sizes(y_true, y_pred, 'y_true', 'y_pred')
    _check_array_sizes(y_true, group_membership, 'y_true', 'group_membership')
    if sample_weight is not None:
        _check_array_sizes(y_true, sample_weight, 'y_true', 'sample_weight')

    result = GroupMetricResult()

    # Make everything a numpy array
    # This allows for fast slicing of the groups
    y_a = _convert_to_ndarray_and_squeeze(y_true)
    y_p = _convert_to_ndarray_and_squeeze(y_pred)
    g_d = _convert_to_ndarray_and_squeeze(group_membership)

    s_w = None
    if sample_weight is not None:
        s_w = _convert_to_ndarray_and_squeeze(sample_weight)

    # Evaluate the overall metric with the numpy arrays
    # This ensures consistency in how metric_function is called
    if s_w is not None:
        result.overall = metric_function(y_a, y_p, sample_weight=s_w)
    else:
        result.overall = metric_function(y_a, y_p)

    groups = np.unique(group_membership)
    for group in groups:
        group_indices = (group == g_d)