How to use the pymc3.HalfCauchy function in pymc3

To help you get started, we’ve selected a few pymc3 examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pymc-devs / symbolic-pymc / symbolic_pymc / theano / pymc3.py View on Github external
@convert_dist_to_rv.register(pm.HalfCauchy, object)
def convert_dist_to_rv_HalfCauchy(dist, rng):
    size = dist.shape.astype(int)[HalfCauchyRV.ndim_supp :]
    res = HalfCauchyRV(np.array(0.0, dtype=dist.dtype), dist.beta, size=size, rng=rng)
    return res
github gift-surg / NiftyMIC / niftymic / utilities / robust_motion_estimator.py View on Github external
def _run_robust_gaussian_process_regression_map(x, y):
        x = np.array(x)
        with pymc3.Model() as model:
            ell = pymc3.Gamma("ell", alpha=2, beta=1)
            eta = pymc3.HalfCauchy("eta", beta=5)

            cov = eta**2 * pymc3.gp.cov.Matern52(1, ell)
            gp = pymc3.gp.Latent(cov_func=cov)

            f = gp.prior("f", X=x.reshape(-1, 1))

            sigma = pymc3.HalfCauchy("sigma", beta=2)
            # sigma = pymc3.Normal("sigma")
            # sigma = 0.1
            nu = pymc3.Gamma("nu", alpha=2, beta=1)
            # sigma = 0.01
            # nu = 0.01
            # y_ = pymc3.StudentT("y", mu=f, lam=1.0/sigma, nu=nu, observed=y)
            y_ = pymc3.StudentT("y", mu=f, sd=sigma, nu=nu, observed=y)

            # res = pymc3.find_MAP(model=model, method="L-BFGS-B")
github kearnz / autoimpute / autoimpute / imputations / series / bayesian_regression.py View on Github external
Returns:
            self. Instance of the class.
        """
        _not_num_series(self.strategy, y)
        nc = len(X.columns)

        # initialize model for bayesian linear reg. Default vals for priors
        # assume data is scaled and centered. Convergence can struggle or fail
        # if not the case and proper values for the priors are not specified
        # separately, also assumes each beta is normal and "independent"
        # while betas likely not independent, this is technically a rule of OLS
        with pm.Model() as fit_model:
            alpha = pm.Normal("alpha", self.am, sd=self.asd)
            beta = pm.Normal("beta", self.bm, sd=self.bsd, shape=nc)
            sigma = pm.HalfCauchy("σ", self.sig)
            mu = alpha+beta.dot(X.T)
            score = pm.Normal("score", mu, sd=sigma, observed=y)
        self.statistics_ = {"param": fit_model, "strategy": self.strategy}
        return self
github luke14free / pm-prophet / pmprophet / model.py View on Github external
def generate_priors(self):
        """Set up the priors for the model."""
        with self.model:
            if "sigma" not in self.priors:
                self.priors["sigma"] = pm.HalfCauchy(
                    "sigma_%s" % self.name, 10, testval=1.0
                )

            if "seasonality" not in self.priors and self.seasonality:
                self.priors["seasonality"] = pm.Laplace(
                    "seasonality_%s" % self.name,
                    0,
                    self.seasonality_prior_scale,
                    shape=len(self.seasonality),
                )
            if "holidays" not in self.priors and self.holidays:
                self.priors["holidays"] = pm.Laplace(
                    "holidays_%s" % self.name,
                    0,
                    self.holidays_prior_scale,
                    shape=len(self.holidays),
github kearnz / autoimpute / autoimpute / imputations / series / lrd.py View on Github external
# means for the priors. This will greatly speed up posterior sampling
        # and help ensure that convergence occurs
        if self.am is None:
            self.am = self.lm.intercept_
        if self.bm is None:
            self.bm = self.lm.coef_

        # initialize model for bayesian linear reg. Default vals for priors
        # assume data is scaled and centered. Convergence can struggle or fail
        # if not the case and proper values for the priors are not specified
        # separately, also assumes each beta is normal and "independent"
        # while betas likely not independent, this is technically a rule of OLS
        with pm.Model() as fit_model:
            alpha = pm.Normal("alpha", self.am, sd=self.asd)
            beta = pm.Normal("beta", self.bm, sd=self.bsd, shape=nc)
            sigma = pm.HalfCauchy("σ", self.sig)
            mu = alpha+beta.dot(X.T)
            score = pm.Normal("score", mu, sd=sigma, observed=y)
        params = {"model": fit_model, "y_obs": y_df}
        self.statistics_ = {"param": params, "strategy": self.strategy}
        return self
github kearnz / autoimpute / autoimpute / imputations / series / pmm.py View on Github external
# means for the priors. This will greatly speed up posterior sampling
        # and help ensure that convergence occurs
        if self.am is None:
            self.am = self.lm.intercept_
        if self.bm is None:
            self.bm = self.lm.coef_

        # initialize model for bayesian linear reg. Default vals for priors
        # assume data is scaled and centered. Convergence can struggle or fail
        # if not the case and proper values for the priors are not specified
        # separately, also assumes each beta is normal and "independent"
        # while betas likely not independent, this is technically a rule of OLS
        with pm.Model() as fit_model:
            alpha = pm.Normal("alpha", self.am, sd=self.asd)
            beta = pm.Normal("beta", self.bm, sd=self.bsd, shape=nc)
            sigma = pm.HalfCauchy("σ", self.sig)
            mu = alpha+beta.dot(X.T)
            score = pm.Normal("score", mu, sd=sigma, observed=y)
        params = {"model": fit_model, "y_obs": y_df}
        self.statistics_ = {"param": params, "strategy": self.strategy}
        return self
github pymc-devs / pymc3 / benchmarks / benchmarks / benchmarks.py View on Github external
def glm_hierarchical_model(random_seed=123):
    """Sample glm hierarchical model to use in benchmarks"""
    np.random.seed(random_seed)
    data = pd.read_csv(pm.get_data('radon.csv'))
    data['log_radon'] = data['log_radon'].astype(theano.config.floatX)
    county_idx = data.county_code.values

    n_counties = len(data.county.unique())
    with pm.Model() as model:
        mu_a = pm.Normal('mu_a', mu=0., sd=100**2)
        sigma_a = pm.HalfCauchy('sigma_a', 5)
        mu_b = pm.Normal('mu_b', mu=0., sd=100**2)
        sigma_b = pm.HalfCauchy('sigma_b', 5)
        a = pm.Normal('a', mu=0, sd=1, shape=n_counties)
        b = pm.Normal('b', mu=0, sd=1, shape=n_counties)
        a = mu_a + sigma_a * a
        b = mu_b + sigma_b * b
        eps = pm.HalfCauchy('eps', 5)
        radon_est = a[county_idx] + b[county_idx] * data.floor.values
        pm.Normal('radon_like', mu=radon_est, sd=eps, observed=data.log_radon)
    return model
github pymc-devs / pymc3 / benchmarks / benchmarks / benchmarks.py View on Github external
np.random.seed(random_seed)
    data = pd.read_csv(pm.get_data('radon.csv'))
    data['log_radon'] = data['log_radon'].astype(theano.config.floatX)
    county_idx = data.county_code.values

    n_counties = len(data.county.unique())
    with pm.Model() as model:
        mu_a = pm.Normal('mu_a', mu=0., sd=100**2)
        sigma_a = pm.HalfCauchy('sigma_a', 5)
        mu_b = pm.Normal('mu_b', mu=0., sd=100**2)
        sigma_b = pm.HalfCauchy('sigma_b', 5)
        a = pm.Normal('a', mu=0, sd=1, shape=n_counties)
        b = pm.Normal('b', mu=0, sd=1, shape=n_counties)
        a = mu_a + sigma_a * a
        b = mu_b + sigma_b * b
        eps = pm.HalfCauchy('eps', 5)
        radon_est = a[county_idx] + b[county_idx] * data.floor.values
        pm.Normal('radon_like', mu=radon_est, sd=eps, observed=data.log_radon)
    return model