How to use the ngboost.distns.Normal function in ngboost

To help you get started, we’ve selected a few ngboost examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github stanfordmlgroup / ngboost / figures / toy_single.py View on Github external
argparser = ArgumentParser()
    argparser.add_argument("--n-estimators", type=int, default=301)
    argparser.add_argument("--lr", type=float, default=0.03)
    argparser.add_argument("--minibatch-frac", type=float, default=0.1)
    argparser.add_argument("--natural", action="store_true")
    args = argparser.parse_args()

    x_tr, y_tr, _ = gen_data(n=50)

    poly_transform = PolynomialFeatures(1)
    x_tr = poly_transform.fit_transform(x_tr)

    ngb = NGBoost(
        Base=default_tree_learner,
        Dist=Normal,
        Score=MLE,
        n_estimators=args.n_estimators,
        learning_rate=args.lr,
        natural_gradient=args.natural,
        minibatch_frac=args.minibatch_frac,
        verbose=True,
    )

    ngb.fit(x_tr, y_tr)

    x_te, y_te, _ = gen_data(n=1000, bound=1.3)
    x_te = poly_transform.transform(x_te)
    preds = ngb.pred_dist(x_te)

    pctles, obs, _, _ = calibration_regression(preds, y_te)
github stanfordmlgroup / ngboost / figures / toy.py View on Github external
argparser = ArgumentParser()
    argparser.add_argument("--n-estimators", type=int, default=(1 + BLK * 100))
    argparser.add_argument("--lr", type=float, default=0.03)
    argparser.add_argument("--minibatch-frac", type=float, default=0.1)
    argparser.add_argument("--natural", action="store_true")
    args = argparser.parse_args()

    x_tr, y_tr, _ = gen_data(n=100)

    poly_transform = PolynomialFeatures(1)
    x_tr = poly_transform.fit_transform(x_tr)

    ngb = NGBoost(
        Base=default_tree_learner,
        Dist=Normal,
        Score=MLE,
        n_estimators=args.n_estimators,
        learning_rate=args.lr,
        natural_gradient=args.natural,
        minibatch_frac=args.minibatch_frac,
        verbose=True,
    )

    blk = int(args.n_estimators / 100)
    ngb.fit(x_tr, y_tr)

    x_te, y_te, _ = gen_data(n=1000, bound=1.3)
    x_te = poly_transform.transform(x_te)
    preds = ngb.pred_dist(x_te)

    pctles, obs, _, _ = calibration_regression(preds, y_te)
github stanfordmlgroup / ngboost / examples / visualizations / vis_crps_logscale.py View on Github external
    logscale_crps_fn = lambda p: Normal(p, temp_scale = 1.0).crps(np.log(rvs)).mean()
    lognorm_crps_fn = lambda p: LogNormal(p, temp_scale = 1.0).crps(rvs).mean()
github stanfordmlgroup / ngboost / ngboost / ngboost.py View on Github external
    def __init__(self, Dist=Normal, Score=LogScore,
                 Base=default_tree_learner, gradient='natural',
                 n_estimators=500, learning_rate=0.01, minibatch_frac=1.0,
                 verbose=True, verbose_eval=100, tol=1e-4,
                 random_state=None):
        self.Dist = Dist
        self.Score = Score
        self.Base = Base
        self.Manifold = manifold(Score, Dist)
        self.gradient = gradient
        self.n_estimators = n_estimators
        self.learning_rate = learning_rate
        self.minibatch_frac = minibatch_frac
        self.verbose = verbose
        self.verbose_eval = verbose_eval
        self.init_params = None
        self.base_models = []
github stanfordmlgroup / ngboost / ngboost / sklearn_api.py View on Github external
def __init__(self, *args, **kwargs):
        super(NGBRegressor, self).__init__(Dist=Normal, *args, **kwargs)