Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# We define two models, but they are identical so far
models = [model, model]
models = list(map(SimpleModel, models))
# However, our models' priors are not the same. Their mean differs.
mu_x_1, mu_x_2 = 0, 1
parameter_given_model_prior_distribution = [
Distribution(x=st.norm(mu_x_1, sigma)),
Distribution(x=st.norm(mu_x_2, sigma))]
# We plug all the ABC setup together
nr_populations = 3
population_size = ConstantPopulationSize(400)
abc = ABCSMC(models, parameter_given_model_prior_distribution,
PercentileDistance(measures_to_use=["y"]), population_size,
eps=MedianEpsilon(.2),
transitions=[transition(), transition()],
sampler=sampler)
# Finally we add meta data such as model names and define where to store the results
# y_observed is the important piece here: our actual observation.
y_observed = 1
abc.new(db_path, {"y": y_observed})
# We run the ABC with 3 populations max
minimum_epsilon = .05
history = abc.run(minimum_epsilon, max_nr_populations=nr_populations)
def test_gaussian_multiple_populations(db_path, sampler):
sigma_x = 1
sigma_y = .5
y_observed = 2
def model(args):
return {"y": st.norm(args['x'], sigma_y).rvs()}
models = [model]
models = list(map(SimpleModel, models))
nr_populations = 4
population_size = ConstantPopulationSize(600)
parameter_given_model_prior_distribution = [Distribution(x=st.norm(0, sigma_x))]
abc = ABCSMC(models, parameter_given_model_prior_distribution,
MinMaxDistance(measures_to_use=["y"]),
population_size,
eps=MedianEpsilon(.2),
sampler=sampler)
abc.new(db_path, {"y": y_observed})
minimum_epsilon = -1
abc.do_not_stop_when_only_single_model_alive()
history = abc.run(minimum_epsilon, max_nr_populations=nr_populations)
posterior_x, posterior_weight = history.get_distribution(0, None)
posterior_x = posterior_x["x"].values
sort_indices = np.argsort(posterior_x)
def test_gaussian_single_population(db_path, sampler):
sigma_prior = 1
sigma_ground_truth = 1
observed_data = 1
def model(args):
return {"y": st.norm(args['x'], sigma_ground_truth).rvs()}
models = [model]
models = list(map(SimpleModel, models))
nr_populations = 1
population_size = ConstantPopulationSize(600)
parameter_given_model_prior_distribution = [Distribution(x=RV("norm", 0,
sigma_prior))
]
abc = ABCSMC(models, parameter_given_model_prior_distribution,
MinMaxDistance(measures_to_use=["y"]),
population_size,
eps=MedianEpsilon(.1),
sampler=sampler)
abc.new(db_path, {"y": observed_data})
minimum_epsilon = -1
abc.do_not_stop_when_only_single_model_alive()
history = abc.run(minimum_epsilon, max_nr_populations=nr_populations)
posterior_x, posterior_weight = history.get_distribution(0, None)
def test_all_in_one_model(db_path, sampler):
models = [AllInOneModel() for _ in range(2)]
population_size = ConstantPopulationSize(800)
parameter_given_model_prior_distribution = [Distribution(theta=RV("beta",
1, 1))
for _ in range(2)]
abc = ABCSMC(models, parameter_given_model_prior_distribution,
MinMaxDistance(measures_to_use=["result"]),
population_size,
eps=MedianEpsilon(.1),
sampler=sampler)
abc.new(db_path, {"result": 2})
minimum_epsilon = .2
history = abc.run(minimum_epsilon, max_nr_populations=3)
mp = history.get_model_probabilities(history.max_t)
assert abs(mp.p[0] - .5) + abs(mp.p[1] - .5) < .08
def test_beta_binomial_different_priors_initial_epsilon_from_sample(db_path,
sampler):
binomial_n = 5
def model(args):
return {"result": st.binom(binomial_n, args.theta).rvs()}
models = [model for _ in range(2)]
models = list(map(SimpleModel, models))
population_size = ConstantPopulationSize(800)
a1, b1 = 1, 1
a2, b2 = 10, 1
parameter_given_model_prior_distribution = [Distribution(theta=RV("beta",
a1, b1)),
Distribution(theta=RV("beta",
a2, b2))]
abc = ABCSMC(models, parameter_given_model_prior_distribution,
MinMaxDistance(measures_to_use=["result"]),
population_size,
eps=MedianEpsilon(median_multiplier=.9),
sampler=sampler)
n1 = 2
abc.new(db_path, {"result": n1})
minimum_epsilon = -1
history = abc.run(minimum_epsilon, max_nr_populations=5)
def test_two_competing_gaussians_single_population(db_path, sampler, transition):
sigma_x = .5
sigma_y = .5
y_observed = 1
def model(args):
return {"y": st.norm(args['x'], sigma_y).rvs()}
models = [model, model]
models = list(map(SimpleModel, models))
population_size = ConstantPopulationSize(500)
mu_x_1, mu_x_2 = 0, 1
parameter_given_model_prior_distribution = [
Distribution(x=st.norm(mu_x_1, sigma_x)),
Distribution(x=st.norm(mu_x_2, sigma_x))]
abc = ABCSMC(models, parameter_given_model_prior_distribution,
MinMaxDistance(measures_to_use=["y"]),
population_size,
transitions=[transition(), transition()],
eps=MedianEpsilon(.02),
sampler=sampler)
abc.new(db_path, {"y": y_observed})
minimum_epsilon = -1
nr_populations = 1
abc.do_not_stop_when_only_single_model_alive()
history = abc.run(minimum_epsilon, max_nr_populations=1)
return {"y": st.norm(args['x'], sigma).rvs()}
# We define two models, but they are identical so far
models = [model, model]
models = list(map(SimpleModel, models))
# However, our models' priors are not the same. Their mean differs.
mu_x_1, mu_x_2 = 0, 1
parameter_given_model_prior_distribution = [
Distribution(x=RV("norm", mu_x_1, sigma)),
Distribution(x=RV("norm", mu_x_2, sigma))
]
# We plug all the ABC setup together
nr_populations = 2
pop_size = ConstantPopulationSize(23, nr_samples_per_parameter=n_sim)
abc = ABCSMC(models, parameter_given_model_prior_distribution,
PercentileDistance(measures_to_use=["y"]),
pop_size,
eps=MedianEpsilon(),
sampler=sampler)
# Finally we add meta data such as model names and
# define where to store the results
# y_observed is the important piece here: our actual observation.
y_observed = 1
abc.new(db_path, {"y": y_observed})
# We run the ABC with 3 populations max
minimum_epsilon = .05
history = abc.run(minimum_epsilon, max_nr_populations=nr_populations)
def test_gaussian_multiple_populations_crossval_kde(db_path, sampler):
sigma_x = 1
sigma_y = .5
y_observed = 2
def model(args):
return {"y": st.norm(args['x'], sigma_y).rvs()}
models = [model]
models = list(map(SimpleModel, models))
nr_populations = 4
population_size = ConstantPopulationSize(600)
parameter_given_model_prior_distribution = [Distribution(x=st.norm(0, sigma_x))]
parameter_perturbation_kernels = [GridSearchCV(MultivariateNormalTransition(),
{"scaling": np.logspace(-1, 1.5, 5)})]
abc = ABCSMC(models, parameter_given_model_prior_distribution,
MinMaxDistance(measures_to_use=["y"]),
population_size,
transitions=parameter_perturbation_kernels,
eps=MedianEpsilon(.2),
sampler=sampler)
abc.new(db_path, {"y": y_observed})
minimum_epsilon = -1
abc.do_not_stop_when_only_single_model_alive()
history = abc.run(minimum_epsilon, max_nr_populations=nr_populations)
posterior_x, posterior_weight = history.get_distribution(0, None)
return {"y": st.norm(args['x'], sigma).rvs()}
# We define two models, but they are identical so far
models = [model, model]
models = list(map(SimpleModel, models))
# However, our models' priors are not the same. Their mean differs.
mu_x_1, mu_x_2 = 0, 1
parameter_given_model_prior_distribution = [
Distribution(x=st.norm(mu_x_1, sigma)),
Distribution(x=st.norm(mu_x_2, sigma))
]
# We plug all the ABC setup together
nr_populations = 1
population_size = ConstantPopulationSize(20)
abc = ABCSMC(models, parameter_given_model_prior_distribution,
PercentileDistance(measures_to_use=["y"]),
population_size,
eps=MedianEpsilon(.2),
sampler=sampler)
# Finally we add meta data such as model names and
# define where to store the results
# y_observed is the important piece here: our actual observation.
y_observed = 2
abc.new(db_path, {"y": y_observed})
# We run the ABC with 3 populations max
minimum_epsilon = .05
history = abc.run(minimum_epsilon, max_nr_populations=nr_populations)