Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
@pymc.stochastic(name='joint_sample')
def ci_joint(value=self.mcmc_initialization):
def logp(value):
xi = [value[i] for i in range(len(x))]
yi = [value[i+len(x)] for i in range(len(y))]
zi = [value[i+len(x)+len(y)] for i in range(len(z))]
if len(z) == 0:
log_px_given_z = np.log(self.densities[0].pdf(data_predict=xi))
log_py_given_z = np.log(self.densities[1].pdf(data_predict=yi))
log_pz = 0.
else:
log_px_given_z = np.log(self.densities[0].pdf(endog_predict=xi, exog_predict=zi))
log_py_given_z =np.log(self.densities[1].pdf(endog_predict=yi, exog_predict=zi))
log_pz = np.log(self.densities[2].pdf(data_predict=zi))
return log_px_given_z + log_py_given_z + log_pz
model = pymc.Model([ci_joint])
@pymc.stochastic(observed=True)
def y(value=0.001, x=x):
return pymc.lognormal_like(value, mu=x[0], tau=1.)
return locals()
@pm.stochastic(observed=True, dtype=int)
def disasters( value = disasters_array,
early_mean = early_mean,
late_mean = late_mean,
switchpoint = switchpoint):
"""Annual occurences of coal mining disasters."""
return pm.poisson_like(value[:switchpoint],early_mean) + pm.poisson_like(value[switchpoint:],late_mean)
@mc.observed
@mc.stochastic(name='data_%s' % key)
def obs(value=value,
S=data_sample,
N=N,
mu_i=rates,
Xz=Xz,
zeta=zeta,
delta=delta):
#zeta_i = .001
#residual = pl.log(value[S] + zeta_i) - pl.log(mu_i*N[S] + zeta_i)
#return mc.normal_like(residual, 0, 100. + delta)
logp = mc.negative_binomial_like(value[S], N[S]*mu_i, delta*pl.exp(Xz*zeta))
return logp
@mc.observed
@mc.stochastic(name='lower_bound_data_%s' % key)
def obs_lb(value=value, N=N,
Xa=Xa, Xb=Xb,
alpha=alpha, beta=beta, gamma=gamma,
bounds_func=vars['bounds_func'],
delta=delta,
age_indices=ai,
age_weights=aw):
# calculate study-specific rate function
shifts = pl.exp(pl.dot(Xa, alpha) + pl.dot(Xb, pl.atleast_1d(beta)))
exp_gamma = pl.exp(gamma)
mu_i = [pl.dot(weights, bounds_func(s_i * exp_gamma[ages], ages)) for s_i, ages, weights in zip(shifts, age_indices, age_weights)] # TODO: try vectorizing this loop to increase speed
rate_param = mu_i*N
violated_bounds = pl.nonzero(rate_param < value)
logp = mc.negative_binomial_like(value[violated_bounds], rate_param[violated_bounds], delta)
return logp
@mc.observed
@mc.stochastic(name='data_%d' % d['id'])
def obs(value=logit_val, logit_se=logit_se,
X=covariates(d),
alpha=alpha, beta=beta, gamma=gamma, sigma=sigma,
age_indices=age_indices,
age_weights=age_weights):
# calculate study-specific rate function
mu = predict_logit_rate(X, alpha, beta, gamma)
mu_i = rate_for_range(mu, age_indices, age_weights)
tau_i = 1. / (sigma**2 + logit_se**2)
logp = mc.normal_like(x=value, mu=mu_i, tau=tau_i)
return logp
@pymc.stochastic(observed=True)
def y_i(value=y, mu=y_hat, tau=tau_y):
return pymc.normal_like(value,mu,tau)
@mc.observed
@mc.stochastic(name='obs_%d' % d['id'])
def obs(f=vars['rate_stoch'],
age_indices=age_indices,
age_weights=age_weights,
value=pl.log(dm.value_per_1(d)),
tau=se**-2, data=d):
f_i = dismod3.utils.rate_for_range(f, age_indices, age_weights)
return mc.normal_like(value, pl.log(f_i), tau)
vars['observed_rates'].append(obs)
@pymc.stochastic(observed=True)
def y_i(value=y, mu=y_hat, tau=tau_y):
return pymc.normal_like(value,mu,tau)
@pymc.stochastic(observed=True)
def output(value=y_obs, mod_out=model_output, sigma=sigma, gamma=1.):
return gamma * pymc.normal_like(y_obs, mu=mod_out, tau=1/sigma ** 2)
return locals()