How to use the fklearn.validation.evaluators.r2_evaluator function in fklearn

To help you get started, we’ve selected a few fklearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github nubank / fklearn / tests / validation / test_evaluators.py View on Github external
def test_r2_evaluator():
    predictions = pd.DataFrame(
        {
            'target': [0, 1, 2],
            'prediction': [0.5, 0.9, 1.5]
        }
    )

    result = r2_evaluator(predictions)

    assert result['r2_evaluator__target'] == 0.745
github nubank / fklearn / tests / validation / test_evaluators.py View on Github external
def test_combined_evaluators():
    predictions = pd.DataFrame(
        {
            'target': [0, 1, 2],
            'prediction': [0.5, 0.9, 1.5]
        }
    )

    eval_fn1 = r2_evaluator
    eval_fn2 = mse_evaluator

    result = combined_evaluators(predictions, [eval_fn1, eval_fn2])

    assert result['mse_evaluator__target'] == 0.17
    assert result['r2_evaluator__target'] == 0.745
github nubank / fklearn / tests / metrics / test_pd_extractors.py View on Github external
def test_extract():
    boston = load_boston()
    df = pd.DataFrame(boston['data'], columns=boston['feature_names'])
    df['target'] = boston['target']
    df['time'] = pd.date_range(start='2015-01-01', periods=len(df))
    np.random.seed(42)
    df['space'] = np.random.randint(0, 100, size=len(df))

    # Define train function
    train_fn = linear_regression_learner(features=boston['feature_names'].tolist(), target="target")

    # Define evaluator function
    base_evaluator = combined_evaluators(evaluators=[
        r2_evaluator(target_column='target', prediction_column='prediction'),
        spearman_evaluator(target_column='target', prediction_column='prediction')
    ])

    splitter = split_evaluator(eval_fn=base_evaluator, split_col='RAD', split_values=[4.0, 5.0, 24.0])
    temporal_week_splitter = temporal_split_evaluator(eval_fn=base_evaluator, time_col='time', time_format='%Y-%W')
    temporal_year_splitter = temporal_split_evaluator(eval_fn=base_evaluator, time_col='time', time_format='%Y')

    eval_fn = combined_evaluators(evaluators=[base_evaluator, splitter])
    temporal_week_eval_fn = combined_evaluators(evaluators=[base_evaluator, temporal_week_splitter])
    temporal_year_eval_fn = combined_evaluators(evaluators=[base_evaluator, temporal_year_splitter])

    # Define splitters
    cv_split_fn = out_of_time_and_space_splitter(
        n_splits=5, in_time_limit='2016-01-01', time_column='time', space_column='space'
    )
github nubank / fklearn / tests / validation / test_evaluators.py View on Github external
def test_permutation_evaluator():
    test_df = pd.DataFrame(
        {
            'a': [1, 1, 0],
            'bb': [2, 0, 0],
            'target': [0, 1, 2]
        }
    )

    base_eval = r2_evaluator

    def fake_predict(df):
        return df.assign(prediction=[0.5, 0.9, 1.5])

    expected_results = {'r2_evaluator__target': 0.745}

    pimp1 = permutation_evaluator(test_df, fake_predict, base_eval, features=["a"], baseline=True,
                                  shuffle_all_at_once=False)

    assert pimp1['permutation_importance']['a'] == expected_results
    assert pimp1['permutation_importance_baseline'] == expected_results

    pimp2 = permutation_evaluator(test_df, fake_predict, base_eval, features=["a", "bb"], baseline=False,
                                  shuffle_all_at_once=False)

    assert pimp2['permutation_importance']['a'] == expected_results