Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
sum_pi_wt = pl.cumsum(pi_age_true*age_weights)
sum_wt = pl.cumsum(age_weights)
p = (sum_pi_wt[age_end] - sum_pi_wt[age_start]) / (sum_wt[age_end] - sum_wt[age_start])
# correct cases where age_start == age_end
i = age_start == age_end
if pl.any(i):
p[i] = pi_age_true[age_start[i]]
n = mc.runiform(100, 10000, size=N)
model.input_data['age_start'] = age_start
model.input_data['age_end'] = age_end
model.input_data['effective_sample_size'] = n
model.input_data['true'] = p
model.input_data['value'] = mc.rnegative_binomial(n*p, delta_true*n*p) / n
## Then fit the model and compare the estimates to the truth
model.vars = {}
model.vars['p'] = data_model.data_model('p', model, 'p', 'all', 'total', 'all', None, None, None)
model.map, model.mcmc = fit_model.fit_data_model(model.vars['p'], iter=10000, burn=5000, thin=25, tune_interval=100)
graphics.plot_one_ppc(model.vars['p'], 'p')
graphics.plot_convergence_diag(model.vars)
graphics.plot_one_type(model, model.vars['p'], {}, 'p')
pl.plot(a, pi_age_true, 'r:', label='Truth')
pl.legend(fancybox=True, shadow=True, loc='upper left')
pl.show()
model.input_data['mu_pred'] = model.vars['p']['p_pred'].stats()['mean']
model.input_data['sigma_pred'] = model.vars['p']['p_pred'].stats()['standard deviation']
model.input_data = pandas.DataFrame(index=range(N))
initialize_input_data(model.input_data)
# add fixed effect to simulated data
X = mc.rnormal(0., 1.**-2, size=(N,len(beta_true)))
Y_true = pl.dot(X, beta_true)
for i in range(len(beta_true)):
model.input_data['x_%d'%i] = X[:,i]
model.input_data['true'] = pi_true * pl.exp(Y_true)
model.input_data['effective_sample_size'] = mc.runiform(100, 10000, N)
n = model.input_data['effective_sample_size']
p = model.input_data['true']
model.input_data['value'] = mc.rnegative_binomial(n*p, delta_true) / n
## Then fit the model and compare the estimates to the truth
model.vars = {}
model.vars['p'] = data_model.data_model('p', model, 'p', 'all', 'total', 'all', None, None, None)
model.map, model.mcmc = fit_model.fit_data_model(model.vars['p'], iter=10000, burn=5000, thin=5, tune_interval=100)
graphics.plot_one_ppc(model.vars['p'], 'p')
graphics.plot_convergence_diag(model.vars)
pl.show()
model.input_data['mu_pred'] = model.vars['p']['p_pred'].stats()['mean']
model.input_data['sigma_pred'] = model.vars['p']['p_pred'].stats()['standard deviation']
add_quality_metrics(model.input_data)
from validate_covariates import alpha_true_sim
area_list = pl.array(['all', 'super-region_3', 'north_africa_middle_east', 'EGY', 'KWT', 'IRN', 'IRQ', 'JOR', 'SYR'])
alpha = alpha_true_sim(model, area_list, sigma_true)
print alpha
model.input_data['true'] = pl.nan
model.input_data['area'] = area_list[mc.rcategorical(pl.ones(len(area_list)) / float(len(area_list)), N)]
for i, a in model.input_data['area'].iteritems():
model.input_data['true'][i] = p[i] * pl.exp(pl.sum([alpha[n] for n in nx.shortest_path(model.hierarchy, 'all', a) if n in alpha]))
p = model.input_data['true']
n = model.input_data['effective_sample_size']
model.input_data['value'] = mc.rnegative_binomial(n*p, delta_true*n*p) / n
## Then fit the model and compare the estimates to the truth
model.vars = {}
model.vars['p'] = data_model.data_model('p', model, 'p', 'north_africa_middle_east', 'total', 'all', None, None, None)
#model.map, model.mcmc = fit_model.fit_data_model(model.vars['p'], iter=1005, burn=500, thin=5, tune_interval=100)
model.map, model.mcmc = fit_model.fit_data_model(model.vars['p'], iter=10000, burn=5000, thin=25, tune_interval=100)
graphics.plot_one_ppc(model.vars['p'], 'p')
graphics.plot_convergence_diag(model.vars)
graphics.plot_one_type(model, model.vars['p'], {}, 'p')
pl.plot(range(101), pi_age_true, 'r:', label='Truth')
pl.legend(fancybox=True, shadow=True, loc='upper left')
pl.show()
model.input_data['mu_pred'] = model.vars['p']['p_pred'].stats()['mean']
def test_neg_binom_model_sim(N=16):
# simulate negative binomial data
pi_true = .01
delta_true = 50
n = pl.array(pl.exp(mc.rnormal(10, 1**-2, size=N)), dtype=int)
k = pl.array(mc.rnegative_binomial(n*pi_true, delta_true, size=N), dtype=float)
p = k/n
# create NB model and priors
vars = dict(mu_age=mc.Uniform('mu_age', 0., 1000., value=.01),
sigma=mc.Uniform('sigma', 0., 10000., value=1000.))
vars['mu_interval'] = mc.Lambda('mu_interval', lambda mu=vars['mu_age']: mu*pl.ones(N))
vars.update(rate_model.log_normal_model('sim', vars['mu_interval'], vars['sigma'], p, 1./pl.sqrt(n)))
# fit NB model
m = mc.MCMC(vars)
m.sample(1)
@mc.deterministic
def pred(pi=pi, delta=delta):
return mc.rnegative_binomial(pi*n_pred, delta) / float(n_pred)
def p_pred(pi=pi, delta=delta, n=n_nonzero):
return mc.rnegative_binomial(pi*n+1.e-9, delta) / pl.array(n+1.e-9, dtype=float)
def pred(pi=pi, delta=delta):
return mc.rnegative_binomial(pi*n_pred, delta) / float(n_pred)