How to use the pymc.logit function in pymc

To help you get started, we’ve selected a few pymc examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github aflaxman / gbd / book / forward_sim.py View on Github external
book_graphics.plot_age_patterns(model, types='i r m f p'.split(),
                                yticks=dict(i=[0,.01,.02], r=[0,.05,.1], m=[0,.2,.4], f=[0,.05,.1], p=[0,.05,.1]))
pl.savefig('initial.pdf')

### @export 'more-remission'

for i, k_i in enumerate(model.parameters[t]['parameter_age_mesh']):
    model.vars['f']['gamma'][i].value = pl.log(k_i*.005 + .001)
book_graphics.plot_age_patterns(model, types='i r m f p'.split(),
                                yticks=dict(i=[0,.01,.02], r=[0,.05,.1], m=[0,.2,.4], f=[0,.3,.6], p=[0,.01,.02]))
pl.savefig('more-excess-mortality.pdf')

### @export 'birth_prevalence'

p_0 = .015
model.vars['logit_C0'].value = mc.logit(p_0)
p = model.vars['p']['mu_age'].value

print """
 For a condition with prevalence of
  %.1f\\%% at age $0$, these rates yield a prevalence age pattern which is
  highly nonlinear, dipping to a minimum of %.1f\\%% at age %d, and then
  increasing back up to %.1f\\%% at the oldest ages.
""" % (p_0*100, p.min()*100, p.argmin(), p[-1]*100)

book_graphics.plot_age_patterns(model, types='i r m f p'.split(),
                                yticks=dict(i=[0,.01,.02], r=[0,.05,.1], m=[0,.2,.4], f=[0,.3,.6], p=[.01,.015,.02]))
pl.savefig('birth-prevalence.pdf')
github aflaxman / gbd / book / two_compartment_forward_sim_2.py View on Github external
def set_birth_prev(value):
    model.vars['logit_C0'].value = mc.logit(pl.maximum(1.e-9, value))
github aflaxman / gbd / dismod3 / beta_binomial_model.py View on Github external
if np.any(np.diff(est_mesh) != 1):
        raise ValueError, 'ERROR: Gaps in estimation age mesh must all equal 1'

    # set up age-specific rate function, if it does not yet exist
    if not rate_stoch:
        param_mesh = dm.get_param_age_mesh()

        if emp_prior.has_key('mu'):
            initial_value = emp_prior['mu']
        else:
            initial_value = dm.get_initial_value(key)

        # find the logit of the initial values, which is a little bit
        # of work because initial values are sampled from the est_mesh,
        # but the logit_initial_values are needed on the param_mesh
        logit_initial_value = mc.logit(
            interpolate(est_mesh, initial_value, param_mesh))
        
        logit_rate = mc.Normal('logit(%s)' % key,
                               mu=-5.*np.ones(len(param_mesh)),
                               tau=1.e-2,
                               value=logit_initial_value)
        #logit_rate = [mc.Normal('logit(%s)_%d' % (key, a), mu=-5., tau=1.e-2) for a in param_mesh]
        vars['logit_rate'] = logit_rate

        @mc.deterministic(name=key)
        def rate_stoch(logit_rate=logit_rate):
            return interpolate(param_mesh, mc.invlogit(logit_rate), est_mesh)

    if emp_prior.has_key('mu'):
        @mc.potential(name='empirical_prior_%s' % key)
        def emp_prior_potential(f=rate_stoch, mu=emp_prior['mu'], tau=1./np.array(emp_prior['se'])**2):
github aflaxman / gbd / generate_covariate_data.py View on Github external
d.update(condition='type_2_diabetes',
             year_start=y,
             year_end=y)

    p0 = dismod3.utils.rate_for_range(truth[key], range(a0, a1 + 1), np.ones(a1 + 1 - a0))
    p0 = dismod3.utils.trim(p0, 1.e-6, 1. - 1.e-6)

    # TODO: make beta dispersion study level (instead of datum level)
    # p1 = mc.rbeta(p0 * dispersion, (1 - p0) * dispersion)
    p1 = p0

    # TODO: add additional covariates
    if key.find('prevalence') != -1:
        if random.random() < .5:
            d['self-reported'] = True
            p1 = mc.invlogit(mc.logit(p1) - .5)
        else:
            d['self-reported'] = False
    
    p2 = mc.rbinomial(n, p1) / n
    
    d['value'] = p2
    if p2 > 0:
        d['standard_error'] = np.sqrt(p2 * (1 - p2) / n)

    return d
github aflaxman / gbd / dismod3 / logit_normal_model.py View on Github external
# get the index vector and weight vector for the age range
    age_indices = indices_for_range(est_mesh, d['age_start'], d['age_end'])
    age_weights = d.get('age_weights', np.ones(len(age_indices)))

    # ensure all rate data is valid
    d_val = dm.value_per_1(d)
    if d_val < 0 or d_val > 1:
        debug('WARNING: data %d not in range (0,1)' % d['id'])
        raise ValueError
    elif d_val == 0.:
        d_val = min_val / 10.  # TODO: determine if this is an acceptible way to deal with zero
    elif d_val == 1.:
        d_val = 1. - min_val / 10.

    logit_val = mc.logit(d_val)

    d_se = dm.se_per_1(d)
    if d_se == MISSING:
        d_se = max_se #TODO: determine if this is an acceptible way to deal with missing
    elif d_se == 0.:
        d_se = max_se

    logit_se = (1/d_val + 1/(1-d_val)) * d_se

    return age_indices, age_weights, logit_val, logit_se
github aflaxman / gbd / book / forward_sim2.py View on Github external
def set_birth_prev(value):
    model.vars['logit_C0'].value = mc.logit(pl.maximum(1.e-9, value))

pymc

Probabilistic Programming in Python: Bayesian Modeling and Probabilistic Machine Learning with PyTensor

Apache-2.0
Latest version published 6 days ago

Package Health Score

93 / 100
Full package analysis