How to use the causalml.inference.meta.XGBTRegressor function in causalml

To help you get started, we’ve selected a few causalml examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github uber / causalml / tests / test_datasets.py View on Github external
def test_get_synthetic_preds():
    preds_dict = get_synthetic_preds(synthetic_data_func=simulate_nuisance_and_easy_treatment,
                                     n=1000,
                                     estimators={'S Learner (LR)': LRSRegressor(), 'T Learner (XGB)': XGBTRegressor()})

    assert preds_dict['S Learner (LR)'].shape[0] == preds_dict['T Learner (XGB)'].shape[0]
github uber / causalml / tests / test_datasets.py View on Github external
def test_get_synthetic_auuc():
    preds_dict = get_synthetic_preds(synthetic_data_func=simulate_nuisance_and_easy_treatment,
                                     n=1000,
                                     estimators={'S Learner (LR)': LRSRegressor(), 'T Learner (XGB)': XGBTRegressor()})

    auuc_df = get_synthetic_auuc(preds_dict, plot=False)
    print(auuc_df)
github uber / causalml / tests / test_datasets.py View on Github external
def test_get_synthetic_summary():
    summary = get_synthetic_summary(synthetic_data_func=simulate_nuisance_and_easy_treatment,
                                    estimators={'S Learner (LR)': LRSRegressor(), 'T Learner (XGB)': XGBTRegressor()})

    print(summary)
github uber / causalml / tests / test_datasets.py View on Github external
def test_get_synthetic_preds_holdout():
    preds_train, preds_valid = get_synthetic_preds_holdout(synthetic_data_func=simulate_nuisance_and_easy_treatment,
                                                           n=1000,
                                                           estimators={'S Learner (LR)': LRSRegressor(),
                                                                       'T Learner (XGB)': XGBTRegressor()})

    assert preds_train['S Learner (LR)'].shape[0] == preds_train['T Learner (XGB)'].shape[0]
    assert preds_valid['S Learner (LR)'].shape[0] == preds_valid['T Learner (XGB)'].shape[0]
github uber / causalml / tests / test_meta_learners.py View on Github external
def test_XGBTRegressor(generate_regression_data):
    y, X, treatment, tau, b, e = generate_regression_data()

    learner = XGBTRegressor()

    # check the accuracy of the ATE estimation
    ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)
    assert (ate_p >= lb) and (ate_p <= ub)
    assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD

    # check the accuracy of the CATE estimation with the bootstrap CI
    cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
    assert gini(tau, cate_p.flatten()) > .5