How to use the pymc.MAP function in pymc

To help you get started, we’ve selected a few pymc examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github erikson84 / BayesDataAnalysisWithPyMC / BayesDataAnalysisWithPymc / ANOVAOnewayPyMC.py View on Github external
# The priors are all set! Now we can define the linear model.
# Maybe it can be clearly defined using the 'Lambda()' class.
# But we will use a 'for' loop for easier readability.

mu = []
for i in x:
    mu.append(a0 + a[int(i - 1)])

# And the likelihood.

like_y = pymc.Normal('like_y', mu=mu, tau=tau, value=zy, observed=True)

# Now we build the model, set the MAP and sample the posterior distribution.

model = pymc.Model([like_y, a0, a, sigma, a_tau, a_sd])
map_ = pymc.MAP(model)
map_.fit()
mcmc = pymc.MCMC(model)
mcmc.sample(iter=80000, burn=20000, thin=10)

# Extract the samples.

a0_sample = mcmc.trace('a0')[:]
a_sample = mcmc.trace('a')[:]
sigma_sample = mcmc.trace('sigma')[:]
a_sd_sample = mcmc.trace('a_sd')[:]

# Convert the values.

b0_sample = convert_baseline(a0_sample, a_sample, x_levels, y)
b_sample = convert_deflection(a0_sample, a_sample, x_levels, y)
github aflaxman / gbd / dismod3 / bayesian_models / fit_rate_function.py View on Github external
def map_fit(asrf, speed='most accurate'):
    """
    The Maximum A Posteriori (MAP) fit of the model is a point
    estimate of the model parameters, which is found using numerical
    optimization to attempt to maximize the posterior liklihood.
    Since this is a local optimization method, it might not find the
    global optimum.
    """
    initialize_model(asrf)
    
    map = mc.MAP(asrf.vars)
    iterlim, method = MAP_PARAMS[speed]
    print "searching for maximum likelihood point estimate (%s)" % method
    map.fit(verbose=1, iterlim=iterlim, method=method)

    probabilistic_utils.save_map(asrf)
github astroML / astroML / book_figures / chapter10 / fig_matchedfilt_burst.py View on Github external
def compute_MCMC_results(niter=25000, burn=4000):
    S = pymc.MCMC(model)
    S.sample(iter=niter, burn=burn)
    traces = [S.trace(s)[:] for s in ['b0', 'A', 'T', 'alpha']]

    M = pymc.MAP(model)
    M.fit()
    fit_vals = (M.b0.value, M.A.value, M.alpha.value, M.T.value)

    return traces, fit_vals
github hddm-devs / kabuki / kabuki / hierarchical.py View on Github external
from operator import attrgetter

        # I.S: when using MAP with Hierarchical model the subjects nodes should be
        # integrated out before the computation of the MAP (see Pinheiro JC, Bates DM., 1995, 2000).
        # since we are not integrating we get a point estimation for each
        # subject which is not what we want.
        if self.is_group_model:
            raise NotImplementedError("""Sorry, This method is not yet implemented for group models.
            you might consider using the approximate_map method""")

        maps = []

        for i in range(runs):
            # (re)create nodes to get new initival values.
            #nodes are not created for the first iteration if they already exist
            self.mc = pm.MAP(self.nodes_db.node)
            if i != 0:
                self.draw_from_prior()

            self.mc.fit(method, **kwargs)
            print(self.mc.logp)
            maps.append(self.mc)

        self.mc = None

        # We want to use values of the best fitting model
        sorted_maps = sorted(maps, key=attrgetter('logp'))
        max_map = sorted_maps[-1]

        # If maximum logp values are not in the same range, there
        # could be a problem with the model.
        if runs >= 2:
github pymc-devs / pymc3 / docs / guidecode / modelfitting.py View on Github external
    @pm.deterministic
    def b(a=a):
        return 100-a

    @pm.stochastic
    def c(value=0.5, a=a, b=b):
        return (value-a)**2/b

    return locals()

M = pm.Model(make_model(3))
    


from pymc.examples import gelman_bioassay
M = pm.MAP(gelman_bioassay)
M.fit()



M.alpha.value
#array(0.8465892309923545)
M.beta.value
#array(7.7488499785334168)



M.AIC
#7.9648372671389458
M.BIC
#6.7374259893787265
github astroML / astroML / book_figures / chapter10 / fig_matchedfilt_chirp.py View on Github external
def compute_MCMC_results(niter=20000, burn=2000):
    S = pymc.MCMC(model)
    S.sample(iter=niter, burn=burn)
    traces = [S.trace(s)[:] for s in ['b0', 'A', 'omega', 'beta']]

    M = pymc.MAP(model)
    M.fit()
    fit_vals = (M.b0.value, M.beta.value, M.A.value, M.omega.value)

    return traces, fit_vals
github aflaxman / gbd / fit.py View on Github external
:Results:
      - returns a pymc.MCMC object created from vars, that has been fit with MCMC

    .. note::
      - `burn` must be less than `iter`
      - `thin` must be less than `iter` minus `burn`

    """
    assert burn < iter, 'burn must be less than iter'
    assert thin < iter - burn, 'thin must be less than iter-burn'

    vars = model.vars[data_type]
    
    start_time = time.time()
    map = mc.MAP(vars)
    m = mc.MCMC(vars)

    ## use MAP to generate good initial conditions
    try:
        method='fmin_powell'
        tol=.001

        fit_model.logger.info('finding initial values')
        fit_model.find_asr_initial_vals(vars, method, tol, verbose)

        fit_model.logger.info('\nfinding MAP estimate')
        map.fit(method=method, tol=tol, verbose=verbose)
        
        if verbose:
            fit_model.print_mare(vars)
        fit_model.logger.info('\nfinding step covariances estimate')
github aflaxman / gbd / book / splines.py View on Github external
fig4_data = {}
for i, params in enumerate([dict(label='$h(a)$ unconstrained', subt='(a)', value=dict(increasing=dict(age_start=0, age_end=0), decreasing=dict(age_start=0, age_end=0))),
                            dict(label='$h(a)$ decreasing for $a \leq 50$', subt='(b)', value=dict(increasing=dict(age_start=0, age_end=0), decreasing=dict(age_start=0, age_end=50))),
                            dict(label='$h(a)$ increasing for $a \leq 50$', subt='(c)', value=dict(increasing=dict(age_start=0, age_end=50), decreasing=dict(age_start=0, age_end=0)))]):

    vars = age_pattern.age_pattern('t', ages=ages, knots=knots, smoothing=pl.inf)
    vars.update(expert_prior_model.derivative_constraints('t',
                                                          params.pop('value'),
                                                          vars['mu_age'], ages))
    vars['mu_pred'] = mc.Lambda('mu_pred', lambda mu_age=vars['mu_age'], X=X : mu_age[X])
    vars['Y'] = mc.Normal('Y', mu=vars['mu_pred'], tau=tau, value=Y, observed=True)
    #
    for i, k_i in enumerate(knots):
        vars['gamma'][i].value = Y_true[k_i]
    mc.MAP(vars).fit(method='fmin_powell', tol=.00001, verbose=0)
    
    fig4_data[params['subt']] = vars['mu_age'].value[knots]

fig4 = pl.figure(figsize=(11, 9))

ax41 = fig4.add_subplot(3,1,1)
ax41.plot(X, Y, 'ks', ms=4, mew=2)
ax41.plot(ages[knots], fig4_data['(a)'], 'k-', linewidth=2)
decorate_figure()
pl.yticks([.1, .5, 1., 1.5])
book_graphics.subtitle('(a) ' + '$h(a)$ unconstrained')

ax42 = fig4.add_subplot(3,1,2,sharex=ax41)
ax42.plot(X, Y, 'ks', ms=4, mew=2)
ax42.plot(ages[knots], fig4_data['(b)'], 'k-', linewidth=2)
decorate_figure()

pymc

Probabilistic Programming in Python: Bayesian Modeling and Probabilistic Machine Learning with PyTensor

Apache-2.0
Latest version published 6 days ago

Package Health Score

93 / 100
Full package analysis