How to use the fklearn.metrics.pd_extractors.evaluator_extractor function in fklearn

To help you get started, we’ve selected a few fklearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github nubank / fklearn / tests / tuning / test_utils.py View on Github external
def base_extractor():
    return evaluator_extractor(evaluator_name='roc_auc_evaluator__target')
github nubank / fklearn / tests / metrics / test_pd_extractors.py View on Github external
feature3_evaluator = split_evaluator(eval_fn=mse_evaluator, split_col="feature3")
    feature3_date_evaluator = split_evaluator(eval_fn=feature3_evaluator, split_col="date")

    results = feature3_date_evaluator(data)

    date_values = [
        np.datetime64("2015-01-06T00:00:00.000000000"),
        np.datetime64("2015-01-14T00:00:00.000000000"),
        np.datetime64("2015-01-22T00:00:00.000000000"),
        np.datetime64("2015-01-30T00:00:00.000000000"),
        np.datetime64("2015-03-08T00:00:00.000000000"),
        np.datetime64("2015-03-09T00:00:00.000000000"),
        np.datetime64("2015-04-04T00:00:00.000000000"),
    ]

    base_evaluator = evaluator_extractor(evaluator_name="mse_evaluator__target")
    feature3_extractor = split_evaluator_extractor(
        base_extractor=base_evaluator, split_col="feature3", split_values=["a", "b"]
    )
    feature3_date_extractor = split_evaluator_extractor(
        base_extractor=feature3_extractor, split_col="date", split_values=date_values
    )

    actual_df = feature3_date_extractor(results).reset_index(drop=True)
    pd.testing.assert_frame_equal(actual_df, expected_df, check_like=True)
github nubank / fklearn / tests / tuning / test_samplers.py View on Github external
def base_extractor():
    return evaluator_extractor(evaluator_name='roc_auc_evaluator__target')
github nubank / fklearn / tests / metrics / test_pd_extractors.py View on Github external
# Validate results
    cv_results = validator(df, cv_split_fn, train_fn, eval_fn)['validator_log']
    tlc_results = validator(df, tlc_split_fn, train_fn, eval_fn)['validator_log']
    sc_results = validator(df, sc_split_fn, train_fn, eval_fn)['validator_log']
    fw_sc_results = validator(df, fw_sc_split_fn, train_fn, eval_fn)['validator_log']

    # temporal evaluation results
    predict_fn, _, _ = train_fn(df)
    temporal_week_results = temporal_week_eval_fn(predict_fn(df))
    temporal_year_results = temporal_year_eval_fn(predict_fn(df))

    # Define extractors
    base_extractors = combined_evaluator_extractor(base_extractors=[
        evaluator_extractor(evaluator_name="r2_evaluator__target"),
        evaluator_extractor(evaluator_name="spearman_evaluator__target")
    ])

    splitter_extractor = split_evaluator_extractor(split_col='RAD', split_values=[4.0, 5.0, 24.0],
                                                   base_extractor=base_extractors)

    temporal_week_splitter_extractor = temporal_split_evaluator_extractor(
        time_col='time', time_format='%Y-%W', base_extractor=base_extractors)

    temporal_year_splitter_extractor = temporal_split_evaluator_extractor(
        time_col='time', time_format='%Y', base_extractor=base_extractors)

    assert extract(cv_results, base_extractors).shape == (5, 9)
    assert extract(cv_results, splitter_extractor).shape == (15, 10)

    assert extract(tlc_results, base_extractors).shape == (12, 9)
    assert extract(tlc_results, splitter_extractor).shape == (36, 10)
github nubank / fklearn / tests / tuning / test_selectors.py View on Github external
def base_extractor():
    return evaluator_extractor(evaluator_name='roc_auc_evaluator__target')
github nubank / fklearn / tests / tuning / test_stoppers.py View on Github external
def base_extractor():
    return evaluator_extractor(evaluator_name='roc_auc_evaluator__target')