How to use the pymc.Lambda function in pymc

To help you get started, we’ve selected a few pymc examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github aflaxman / gbd / tests / test_rate_model.py View on Github external
def test_log_normal_model_sim(N=16):
    # simulate negative binomial data
    pi_true = 2.
    sigma_true = .1

    n = pl.array(pl.exp(mc.rnormal(10, 1**-2, size=N)), dtype=int)
    p = pl.exp(mc.rnormal(pl.log(pi_true), 1./(sigma_true**2 + 1./n), size=N))

    # create model and priors
    vars = dict(mu_age=mc.Uniform('mu_age', 0., 1000., value=.01),
                sigma=mc.Uniform('sigma', 0., 10000., value=1000.))
    vars['mu_interval'] = mc.Lambda('mu_interval', lambda mu=vars['mu_age']: mu*pl.ones(N))
    vars.update(rate_model.log_normal_model('sim', vars['mu_interval'], vars['sigma'], p, 1./pl.sqrt(n)))

    # fit model
    m = mc.MCMC(vars)
    m.sample(1)
github aflaxman / gbd / book / talk_splines.py View on Github external
pl.plot(X, Y, 'kx', ms=15, mew=8)

pl.axis([-5, 105, 0., 1.7])
pl.savefig('/media/windows/t/prior-fig-1.png')


for params in [dict(label='$\mu(a)$ unconstrained', value=dict(increasing=dict(age_start=0, age_end=0), decreasing=dict(age_start=0, age_end=0)), color=colors[0]),
               dict(label='$\mu(a)$ decreasing for $a \leq 50$', value=dict(increasing=dict(age_start=0, age_end=0), decreasing=dict(age_start=0, age_end=50)), color=colors[1]),
               dict(label='$\mu(a)$ increasing for $a \leq 50$', value=dict(increasing=dict(age_start=0, age_end=50), decreasing=dict(age_start=0, age_end=0)), color=colors[2]),
               ]:
    #
    vars = age_pattern.age_pattern('t', ages=ages, knots=knots, smoothing=pl.inf)
    vars.update(expert_prior_model.derivative_constraints('t',
                                                          params.pop('value'),
                                                          vars['mu_age'], ages))
    vars['mu_pred'] = mc.Lambda('mu_pred', lambda mu_age=vars['mu_age'], X=X : mu_age[X])
    vars['Y'] = mc.Normal('Y', mu=vars['mu_pred'], tau=tau, value=Y, observed=True)
    #
    for i, k_i in enumerate(knots):
        vars['gamma'][i].value = Y_true[k_i]
    mc.MAP(vars).fit(method='fmin_powell', verbose=1)
    pl.plot(ages, vars['mu_age'].value, 'w-', linewidth=10, **params)

    pl.axis([-5, 105, 0., 1.7])
    pl.savefig('/media/windows/t/prior-fig-%s.png'%params['color'])

pl.show()
github aflaxman / gbd / dismod3 / neg_binom_model.py View on Github external
print 'adjusting prior on zeta effect coefficients for %s' % key
        mu_zeta = dm.params[prior_key]['mean']
        sigma_zeta = dm.params[prior_key]['std']
    
    if mu_delta != 0.:
        if sigma_delta != 0.:
            log_delta = mc.Normal('log_dispersion_%s' % key, mu=mu_log_delta, tau=sigma_log_delta**-2, value=3.)
            zeta = mc.Normal('zeta_%s'%key, mu=mu_zeta, tau=sigma_zeta**-2, value=mu_zeta)
            delta = mc.Lambda('dispersion_%s' % key, lambda x=log_delta: 50. + 10.**x)
            vars.update(dispersion=delta, log_dispersion=log_delta, zeta=zeta, dispersion_step_sd=.1*log_delta.parents['tau']**-.5)
        else:
            delta = mc.Lambda('dispersion_%s' % key, lambda x=mu_delta: mu_delta)
            vars.update(dispersion=delta)
        
    else:
        delta = mc.Lambda('dispersion_%s' % key, lambda mu=mu_delta: 0)
        vars.update(dispersion=delta)

    if len(sigma_gamma) == 1:
        sigma_gamma = sigma_gamma[0]*pl.ones(len(est_mesh))

    # create varible for interpolated rate;
    # also create variable for age-specific rate function, if it does not yet exist
    if rate_stoch:
        # if the rate_stoch already exists, for example prevalence in the generic model,
        # we use it to back-calculate mu and eventually gamma
        mu = rate_stoch

        @mc.deterministic(name='age_coeffs_%s' % key)
        def gamma(mu=mu, Xa=X_region, Xb=X_study, alpha=alpha, beta=beta):
            return pl.log(pl.maximum(dismod3.settings.NEARLY_ZERO, mu)) - pl.dot(alpha, Xa) - pl.dot(beta, Xb)
github aflaxman / gbd / book / age_patterns.py View on Github external
### @export 'models-of-varying-smoothness'
mesh = pl.arange(0,101,1)
data = pl.array([[10., 1, .5],
                 [50., 2.5, .5]])

scale = dict(Very=dismod3.utils.rho['very'],
             Moderately=dismod3.utils.rho['moderately'],
             Slightly=dismod3.utils.rho['slightly'])

for col, smoothness in enumerate(['Slightly', 'Moderately', 'Very']):

    ## setup lognormal prior on intercept
    gamma = mc.Normal('gamma', 0.,
                      2.**-2,
                      value=pl.log(data[:,1].mean()))
    mu = mc.Lambda('mu', lambda gamma=gamma: pl.exp(gamma))

    ## setup Gaussian Process prior on age pattern
    @mc.deterministic
    def M(mu=mu):
        return mc.gp.Mean(lambda x: mu*pl.ones(len(x)))
    C = mc.gp.FullRankCovariance(mc.gp.matern.euclidean,
                                 amp=data[:,1].max(),
                                 scale=scale[smoothness],
                                 diff_degree=2)
    sm = mc.gp.GPSubmodel('sm', M, C, mesh,
                          init_vals=mu.value*pl.ones_like(mesh))

    ## condition on rate being positive
    @mc.potential
    def positive(f=sm.f_eval):
        if pl.any(f < 0.):
github YeoLab / anchor / anchor / monte_carlo.py View on Github external
def __init__(self, n_iter=1000, model_params=((2, 1), (2, 2), (1, 2),
                                                  (.5, .5), (1, 1))):
        self.n_iter = n_iter

        self.assignment = pm.Categorical('assignment',
                                    [1. / len(model_params)] * len(
                                        model_params))

        self.alpha_var = pm.Lambda('alpha_var',
                              lambda a=self.assignment: model_params[a][0])
        self.beta_var = pm.Lambda('beta_var',
                             lambda a=self.assignment: model_params[a][1])
github aflaxman / gbd / covariate_model.py View on Github external
if zero_re:
            column_map = dict([(n,i) for i,n in enumerate(U.columns)])
            # change one stoch from each set of siblings in area hierarchy to a 'sum to zero' deterministic
            for parent in model.hierarchy:
                node_names = model.hierarchy.successors(parent)
                nodes = [column_map[n] for n in node_names if n in U]
                if len(nodes) > 0:
                    i = nodes[0]
                    old_alpha_i = alpha[i]

                    # do not change if prior for this node has dist='constant'
                    if parameters.get('random_effects', {}).get(U.columns[i], {}).get('dist') == 'Constant':
                        continue

                    alpha[i] = mc.Lambda('alpha_det_%s_%d'%(name, i),
                                                lambda other_alphas_at_this_level=[alpha[n] for n in nodes[1:]]: -sum(other_alphas_at_this_level))

                    if isinstance(old_alpha_i, mc.Stochastic):
                        @mc.potential(name='alpha_pot_%s_%s'%(name, U.columns[i]))
                        def alpha_potential(alpha=alpha[i], mu=old_alpha_i.parents['mu'], tau=old_alpha_i.parents['tau']):
                            return mc.normal_like(alpha, mu, tau)
                        alpha_potentials.append(alpha_potential)

    # make X and beta
    X = input_data.select(lambda col: col.startswith('x_'), axis=1)

    # add sex as a fixed effect (TODO: decide if this should be in data.py, when loading gbd model)
    X['x_sex'] = [sex_value[row['sex']] for i, row in input_data.T.iteritems()]

    beta = pl.array([])
    const_beta_sigma = pl.array([])
github YeoLab / anchor / anchor / monte_carlo.py View on Github external
def __init__(self, n_iter=1000, model_params=((2, 1), (2, 2), (1, 2),
                                                  (.5, .5), (1, 1))):
        self.n_iter = n_iter

        self.assignment = pm.Categorical('assignment',
                                    [1. / len(model_params)] * len(
                                        model_params))

        self.alpha_var = pm.Lambda('alpha_var',
                              lambda a=self.assignment: model_params[a][0])
        self.beta_var = pm.Lambda('beta_var',
                             lambda a=self.assignment: model_params[a][1])
github pymc-devs / pymc3 / pymc / gp / gp_submodel.py View on Github external
Cholesky factor of the covariance evaluation. The Gibbs step method 
                also needs the full covariance evaluation. The mean needs a certain other
                function of the full covariance evaluation.

                All these things can be got as byproducts of Covariance.observe. Keeping the
                observed covariance and using it as the parent of f means the computations only
                get done once.
                """
                C_obs = copy.copy(C)
                try:
                    U, C_eval, Uo_Cxo = C_obs.observe(mesh, np.zeros(mesh.shape[0]), output_type='s')
                    return U.T.copy('F'), C_eval, C_obs, Uo_Cxo
                except np.linalg.LinAlgError:
                    return None

            S_eval = pm.Lambda('%s_S_eval'%name, lambda cb=covariance_bits: cb[0] if cb else None, doc="The lower triangular Cholesky factor of %s.C_eval"%name, trace=tally_all or kwds.get('tally_S_eval',False))
            C_eval = pm.Lambda('%s_C_eval'%name, lambda cb=covariance_bits: cb[1] if cb else None, doc="The evaluation %s.C(%s.mesh, %s.mesh)"%(name,name,name), trace=tally_all or kwds.get('tally_C_eval',False))
            C_obs = pm.Lambda('%s_C_obs'%name, lambda cb=covariance_bits: cb[2] if cb else None, doc="%s.C, observed on %s.mesh"%(name,name), trace=tally_all or kwds.get('tally_C_obs',False))
            Uo_Cxo = pm.Lambda('%s_Uo_Cxo'%name, lambda cb=covariance_bits: cb[3] if cb else None, doc="A byproduct of observation of %s.C that can be used by %s.M"%(name,name), trace=tally_all or kwds.get('tally_Uo_Cxo',False))
            M_eval = pm.Lambda('%s_M_eval'%name, lambda M=M, mesh=mesh, Uo_Cxo=Uo_Cxo: M(mesh, Uo_Cxo=Uo_Cxo), trace=tally_all or kwds.get('tally_M_eval',False), doc="The evaluation %s.M(%s.mesh)"%(name,name))

            @pm.potential(name = '%s_fr_check'%name)
            def fr_check(S_eval=S_eval):
                """
                Forbids non-positive-definite C_evals.
                """
                if S_eval is None:
                    return -np.inf
                else:
                    return 0
            fr_check=fr_check
github aflaxman / gbd / book / poisson_model.py View on Github external
mc.MCMC([pi, delta, obs, pred]).sample(iter, burn, thin, verbose=False, progress_bar=False)

key = 'Negative Binomial'
results[key] = dict(pred=pred.stats(), pi=pi.stats())

#mc.Matplot.plot(pi)
#mc.Matplot.plot(delta)


model_keys = ['Poisson', 'Negative Binomial']
### @export 'negative-binomial_dispersion-prior-exploration'
for mu_log_10_delta in [1,2,3]:
    ### @export 'negative-binomial_dispersion-alt_prior'
    pi = mc.Uniform('pi', lower=0, upper=1, value=.5)
    log_10_delta = mc.Normal('log_10_delta', mu=mu_log_10_delta, tau=.25**-2)
    delta = mc.Lambda('delta', lambda x=log_10_delta: 5 + 10**x)

    @mc.potential
    def obs(pi=pi, delta=delta):
        return mc.negative_binomial_like(r*n, pi*n, delta)

    @mc.deterministic
    def pred(pi=pi, delta=delta):
        return mc.rnegative_binomial(pi*n_pred, delta) / float(n_pred)

    ### @export 'negative-binomial_dispersion-fit_alt_prior'
    mc.MCMC([pi, log_10_delta, delta, obs, pred]).sample(iter, burn, thin, verbose=False, progress_bar=False)

    key = 'Neg. Binom., $\mu_{\log\delta}=%d$'%mu_log_10_delta
    results[key] = dict(pred=pred.stats(), pi=pi.stats())
    model_keys.append(key)
github armstrtw / pymc_radon / radon_varying_intercept_and_slope.py View on Github external
def createCountyIndex(counties):
    counties_uniq = sorted(set(counties))
    counties_dict = dict()
    for i, v in enumerate(counties_uniq):
        counties_dict[v] = i
    ans = np.empty(len(counties),dtype='int')
    for i in range(0,len(counties)):
        ans[i] = counties_dict[counties[i]]
    return ans

index_c = createCountyIndex(counties)

# Priors
mu_a = pymc.Normal('mu_a', mu=0., tau=0.0001)
sigma_a = pymc.Uniform('sigma_a', lower=0, upper=100)
tau_a = pymc.Lambda('tau_a', lambda s=sigma_a: s**-2)

mu_b = pymc.Normal('mu_b', mu=0., tau=0.0001)
sigma_b = pymc.Uniform('sigma_b', lower=0, upper=100)
tau_b = pymc.Lambda('tau_b', lambda s=sigma_b: s**-2)

a = pymc.Normal('a', mu=mu_a, tau=tau_a, value=np.zeros(len(set(counties))))
b = pymc.Normal('b', mu=mu_b, tau=tau_b, value=np.zeros(len(set(counties))))

sigma_y = pymc.Uniform('sigma_y', lower=0, upper=100)
tau_y = pymc.Lambda('tau_y', lambda s=sigma_y: s**-2)

# Model
@pymc.deterministic(plot=False)
def y_hat(a=a,b=b):
       return a[index_c] + b[index_c]*x

pymc

Probabilistic Programming in Python: Bayesian Modeling and Probabilistic Machine Learning with PyTensor

Apache-2.0
Latest version published 10 days ago

Package Health Score

90 / 100
Full package analysis