How to use the causalml.inference.meta.BaseRRegressor function in causalml

To help you get started, we’ve selected a few causalml examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github uber / causalml / tests / test_meta_learners.py View on Github external
def test_BaseRRegressor(generate_regression_data):
    y, X, treatment, tau, b, e = generate_regression_data()

    learner = BaseRRegressor(learner=XGBRegressor())

    # check the accuracy of the ATE estimation
    ate_p, lb, ub = learner.estimate_ate(X=X, p=e, treatment=treatment, y=y)
    assert (ate_p >= lb) and (ate_p <= ub)
    assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD

    # check the accuracy of the CATE estimation with the bootstrap CI
    cate_p, _, _ = learner.fit_predict(X=X, p=e, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)
    assert gini(tau, cate_p.flatten()) > .5
github uber / causalml / causalml / dataset / synthetic.py View on Github external
preds_dict = {}
    preds_dict[KEY_ACTUAL] = tau
    preds_dict[KEY_GENERATED_DATA] = {'y': y, 'X': X, 'w': w, 'tau': tau, 'b': b, 'e': e}

    # Predict p_hat because e would not be directly observed in real-life
    p_model = ElasticNetPropensityModel()
    p_hat = p_model.fit_predict(X, w)

    if estimators:
        for name, learner in estimators.items():
            try:
                preds_dict[name] = learner.fit_predict(X=X, p=p_hat, treatment=w, y=y).flatten()
            except TypeError:
                preds_dict[name] = learner.fit_predict(X=X, treatment=w, y=y).flatten()
    else:
        for base_learner, label_l in zip([BaseSRegressor, BaseTRegressor, BaseXRegressor, BaseRRegressor],
                                         ['S', 'T', 'X', 'R']):
            for model, label_m in zip([LinearRegression, XGBRegressor], ['LR', 'XGB']):
                learner = base_learner(model())
                model_name = '{} Learner ({})'.format(label_l, label_m)
                try:
                    preds_dict[model_name] = learner.fit_predict(X=X, p=p_hat, treatment=w, y=y).flatten()
                except TypeError:
                    preds_dict[model_name] = learner.fit_predict(X=X, treatment=w, y=y).flatten()

        learner = CausalTreeRegressor(random_state=RANDOM_SEED)
        preds_dict['Causal Tree'] = learner.fit_predict(X=X, treatment=w, y=y).flatten()

    return preds_dict
github inovex / justcause / src / justcause / learners / meta / rlearner.py View on Github external
"""Setup an RLearner

        Args:
            learner: default learner for both outcome and effect
            outcome_learner: specific learner for outcome
            effect_learner: specific learner for effect
            random_state: RandomState or int to be used for K-fold splitting. NOT used
                in the learners, this has to be done by the user.
        """
        from causalml.inference.meta import BaseRRegressor

        if learner is None and (outcome_learner is None and effect_learner is None):
            learner = LinearRegression()

        self.random_state = check_random_state(random_state)
        self.model = BaseRRegressor(
            learner, outcome_learner, effect_learner, random_state=random_state
        )