Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_two_competing_gaussians_single_population(db_path, sampler):
sigma_x = .5
sigma_y = .5
y_observed = 1
def model(args):
return {"y": st.norm(args['x'], sigma_y).rvs()}
models = [model, model]
models = list(map(SimpleModel, models))
model_prior = RV("randint", 0, 2)
population_size = ConstantPopulationStrategy(500, 1)
mu_x_1, mu_x_2 = 0, 1
parameter_given_model_prior_distribution = [Distribution(x=RV("norm", mu_x_1, sigma_x)),
Distribution(x=RV("norm", mu_x_2, sigma_x))]
parameter_perturbation_kernels = [MultivariateNormalTransition() for _ in range(2)]
abc = ABCSMC(models, model_prior, ModelPerturbationKernel(2, probability_to_stay=.7),
parameter_given_model_prior_distribution, parameter_perturbation_kernels,
PercentileDistanceFunction(measures_to_use=["y"]), MedianEpsilon(.02),
population_size,
sampler=sampler)
options = {'db_path': db_path}
abc.set_data({"y": y_observed}, 0, {}, options)
minimum_epsilon = -1
nr_populations = 1
abc.do_not_stop_when_only_single_model_alive()
history = abc.run(minimum_epsilon)
mp = history.get_model_probabilities(history.max_t)
def test_redis_catch_error():
def model(pars):
if np.random.uniform() < 0.1:
raise ValueError("error")
return {'s0': pars['p0'] + 0.2 * np.random.uniform()}
def distance(s0, s1):
return abs(s0['s0'] - s1['s0'])
prior = Distribution(p0=RV("uniform", 0, 10))
sampler = RedisEvalParallelSamplerServerStarter(
batch_size=3, workers=1, processes_per_worker=1, port=8775)
abc = ABCSMC(model, prior, distance, sampler=sampler, population_size=10)
db_file = "sqlite:///" + os.path.join(tempfile.gettempdir(), "test.db")
data = {'s0': 2.8}
abc.new(db_file, data)
abc.run(minimum_epsilon=.1, max_nr_populations=3)
sampler.cleanup()
def test_default_eps():
def model(par):
return {'s0': par['p0'] + np.random.random(),
's1': np.random.random()}
x_0 = {'s0': 0.4, 's1': 0.6}
prior = pyabc.Distribution(p0=pyabc.RV('uniform', -1, 2))
# usual setting
abc = pyabc.ABCSMC(model, prior, population_size=10)
abc.new(pyabc.create_sqlite_db_id(), x_0)
abc.run(max_nr_populations=3)
assert abc.minimum_epsilon == 0.0
# noisy setting
acceptor = pyabc.StochasticAcceptor()
eps = pyabc.Temperature()
distance = pyabc.IndependentNormalKernel(var=np.array([1, 1]))
abc = pyabc.ABCSMC(model, prior, distance, eps=eps,
acceptor=acceptor, population_size=10)
def test_all_in_one_model(db_path, sampler):
models = [AllInOneModel() for _ in range(2)]
population_size = ConstantPopulationSize(800)
parameter_given_model_prior_distribution = [Distribution(theta=RV("beta",
1, 1))
for _ in range(2)]
abc = ABCSMC(models, parameter_given_model_prior_distribution,
MinMaxDistance(measures_to_use=["result"]),
population_size,
eps=MedianEpsilon(.1),
sampler=sampler)
abc.new(db_path, {"result": 2})
minimum_epsilon = .2
history = abc.run(minimum_epsilon, max_nr_populations=3)
mp = history.get_model_probabilities(history.max_t)
assert abs(mp.p[0] - .5) + abs(mp.p[1] - .5) < .08
def test_rpy2(sampler):
# run the notebook example
r_file = "doc/examples/myRModel.R"
r = pyabc.external.R(r_file)
r.display_source_ipython()
model = r.model("myModel")
distance = r.distance("myDistance")
sum_stat = r.summary_statistics("mySummaryStatistics")
data = r.observation("mySumStatData")
prior = pyabc.Distribution(meanX=pyabc.RV("uniform", 0, 10),
meanY=pyabc.RV("uniform", 0, 10))
abc = pyabc.ABCSMC(model, prior, distance,
summary_statistics=sum_stat,
sampler=sampler, population_size=5)
db = pyabc.create_sqlite_db_id(file_="test_external.db")
abc.new(db, data)
history = abc.run(minimum_epsilon=0.9, max_nr_populations=2)
history.get_weighted_sum_stats_for_model(m=0, t=1)[1][0]["cars"].head()
# try load
id_ = history.id
abc = pyabc.ABCSMC(model, prior, distance,
summary_statistics=sum_stat,
sampler=sampler, population_size=6)
# shan't even need to pass the observed data again
abc.load(db, id_)
def test_beta_binomial_two_identical_models(db_path, sampler):
binomial_n = 5
def model_fun(args):
return {"result": st.binom(binomial_n, args.theta).rvs()}
models = [model_fun for _ in range(2)]
models = list(map(SimpleModel, models))
model_prior = RV("randint", 0, 2)
population_size = ConstantPopulationStrategy(800, 3)
parameter_given_model_prior_distribution = [Distribution(theta=RV("beta", 1, 1))
for _ in range(2)]
parameter_perturbation_kernels = [MultivariateNormalTransition() for _ in range(2)]
abc = ABCSMC(models, model_prior, ModelPerturbationKernel(2, probability_to_stay=.8),
parameter_given_model_prior_distribution, parameter_perturbation_kernels,
MinMaxDistanceFunction(measures_to_use=["result"]), MedianEpsilon(.1),
population_size,
sampler=sampler)
options = {'db_path': db_path}
abc.set_data({"result": 2}, 0, {}, options)
minimum_epsilon = .2
history = abc.run( minimum_epsilon)
mp = history.get_model_probabilities(history.max_t)
assert abs(mp.p[0] - .5) + abs(mp.p[1] - .5) < .08
sigma = .5
def model(args):
return {"y": st.norm(args['x'], sigma).rvs()}
# We define two models, but they are identical so far
models = [model, model]
models = list(map(SimpleModel, models))
# The prior over the model classes is uniform
model_prior = RV("randint", 0, 2)
# However, our models' priors are not the same. Their mean differs.
mu_x_1, mu_x_2 = 0, 1
parameter_given_model_prior_distribution = [Distribution(x=st.norm(mu_x_1, sigma)),
Distribution(x=st.norm(mu_x_2, sigma))]
# Particles are perturbed in a Gaussian fashion
parameter_perturbation_kernels = [MultivariateNormalTransition() for _ in range(2)]
# We plug all the ABC setup together
nr_populations = 3
population_size = AdaptivePopulationSize(400, mean_cv=0.05,
max_population_size=1000)
abc = ABCSMC(models, parameter_given_model_prior_distribution,
MinMaxDistance(measures_to_use=["y"]),
population_size,
model_prior=model_prior,
eps=MedianEpsilon(.2),
sampler=sampler)
# Finally we add meta data such as model names and define where to store the results
sigma = .5
def model(args):
return {"y": st.norm(args['x'], sigma).rvs()}
# We define two models, but they are identical so far
models = [model, model]
models = list(map(SimpleModel, models))
# However, our models' priors are not the same. Their mean differs.
mu_x_1, mu_x_2 = 0, 1
parameter_given_model_prior_distribution = [
Distribution(x=st.norm(mu_x_1, sigma)),
Distribution(x=st.norm(mu_x_2, sigma))]
# We plug all the ABC setup together
nr_populations = 3
population_size = ConstantPopulationSize(400)
abc = ABCSMC(models, parameter_given_model_prior_distribution,
PercentileDistance(measures_to_use=["y"]), population_size,
eps=MedianEpsilon(.2),
transitions=[transition(), transition()],
sampler=sampler)
# Finally we add meta data such as model names and define where to store the results
# y_observed is the important piece here: our actual observation.
y_observed = 1
abc.new(db_path, {"y": y_observed})
def two_competing_gaussians_multiple_population(db_path, sampler, n_sim):
# Define a gaussian model
sigma = .5
def model(args):
return {"y": st.norm(args['x'], sigma).rvs()}
# We define two models, but they are identical so far
models = [model, model]
models = list(map(SimpleModel, models))
# However, our models' priors are not the same. Their mean differs.
mu_x_1, mu_x_2 = 0, 1
parameter_given_model_prior_distribution = [
Distribution(x=RV("norm", mu_x_1, sigma)),
Distribution(x=RV("norm", mu_x_2, sigma))
]
# We plug all the ABC setup together
nr_populations = 2
pop_size = ConstantPopulationSize(23, nr_samples_per_parameter=n_sim)
abc = ABCSMC(models, parameter_given_model_prior_distribution,
PercentileDistance(measures_to_use=["y"]),
pop_size,
eps=MedianEpsilon(),
sampler=sampler)
# Finally we add meta data such as model names and
# define where to store the results
# y_observed is the important piece here: our actual observation.
y_observed = 1
def test_beta_binomial_different_priors_initial_epsilon_from_sample(db_path, sampler):
binomial_n = 5
def model(args):
return {"result": st.binom(binomial_n, args.theta).rvs()}
models = [model for _ in range(2)]
models = list(map(SimpleModel, models))
model_prior = RV("randint", 0, 2)
population_size = ConstantPopulationStrategy(800, 5)
a1, b1 = 1, 1
a2, b2 = 10, 1
parameter_given_model_prior_distribution = [Distribution(theta=RV("beta", a1, b1)),
Distribution(theta=RV("beta", a2, b2))]
parameter_perturbation_kernels = [MultivariateNormalTransition() for _ in range(2)]
abc = ABCSMC(models, model_prior, ModelPerturbationKernel(2, probability_to_stay=.8),
parameter_given_model_prior_distribution, parameter_perturbation_kernels,
MinMaxDistanceFunction(measures_to_use=["result"]),
MedianEpsilon(median_multiplier=.9), population_size,
sampler=sampler)
options = {'db_path': db_path}
n1 = 2
abc.set_data({"result": n1}, 0, {}, options)
minimum_epsilon = -1
history = abc.run(minimum_epsilon)
mp = history.get_model_probabilities(history.max_t)