Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def run_local_object_cache_test(self, local_store):
sleep_time = .2
simfn = get_sleep_simulator(sleep_time)
sim = elfi.Simulator("sim", simfn, observed=0, store=local_store)
run_cache_test(sim, sleep_time)
assert local_store._read_data(sim.id, 0)[0] == 1
# Test that nodes derived from `sim` benefit from the storing
summ = elfi.Summary("sum", lambda x : x, sim)
t0 = timeit.default_timer()
res = summ.acquire(1).compute()
td = timeit.default_timer() - t0
assert td < sleep_time
assert res[0][0] == 1
elfi.env.client().shutdown()
def test_worker_memory_cache(self):
sleep_time = .2
simfn = get_sleep_simulator(sleep_time)
sim = elfi.Simulator("sim", simfn, observed=0, store=elfi.MemoryStore())
res = run_cache_test(sim, sleep_time)
assert res[0][0] == 1
# Test that nodes derived from `sim` benefit from the caching
summ = elfi.Summary("sum", lambda x: x, sim)
t0 = timeit.default_timer()
res = summ.acquire(1).compute()
td = timeit.default_timer() - t0
assert td < sleep_time
assert res[0][0] == 1
elfi.env.client().shutdown()
# Hyperparameters
mu0, sigma0 = (10, 100)
y_obs = gauss.gauss(
mu, sigma, n_obs=n_obs, batch_size=1, random_state=np.random.RandomState(seed))
sim_fn = partial(gauss.gauss, sigma=sigma, n_obs=n_obs)
# Posterior
n = y_obs.shape[1]
mu1 = (mu0 / sigma0**2 + y_obs.sum() / sigma**2) / (1 / sigma0**2 + n / sigma**2)
sigma1 = (1 / sigma0**2 + n / sigma**2)**(-0.5)
# Model
m = elfi.ElfiModel()
elfi.Prior('norm', mu0, sigma0, model=m, name='mu')
elfi.Simulator(sim_fn, m['mu'], observed=y_obs, name='gauss')
elfi.Summary(lambda x: x.mean(axis=1), m['gauss'], name='ss_mean')
elfi.Distance('euclidean', m['ss_mean'], name='d')
res = elfi.Rejection(m['d'], output_names=['ss_mean'], batch_size=batch_size,
seed=seed).sample(1000, threshold=1)
adj = elfi.adjust_posterior(model=m, sample=res, parameter_names=['mu'], summary_names=['ss_mean'])
assert np.allclose(_statistics(adj.outputs['mu']), (4.9772879640569778, 0.02058680115402544))
# summary
def mock_summary(i, x):
return np.zeros((x.shape[0], ) + dims[i])
# discrepancy
def mock_discrepancy(x, y):
assert len(x) == len(y) == n_sum
for i in range(n_sum):
exp_dims = dims[i]
if len(exp_dims) == 0:
exp_dims = (1,)
assert y[i].shape == (1,) + exp_dims
assert x[i].shape == (n_samples,) + exp_dims
return np.zeros((n_samples, 1))
# model
mock = MockSimulator(ret)
si = elfi.Simulator("si", mock, None, observed=obs)
su = [elfi.Summary("su{}".format(j), partial(mock_summary, j), si) for j in range(n_sum)]
di = elfi.Discrepancy("di", mock_discrepancy, *su)
res = di.generate(n_samples).compute()
assert res.shape == (n_samples, 1)
elfi.new_inference_task()
def test_vectorized_and_external_combined():
constant = elfi.Constant(123)
kwargs_sim = elfi.tools.external_operation(
'echo {seed} {batch_index} {index_in_batch} {submission_index}', process_result='int32')
kwargs_sim = elfi.tools.vectorize(kwargs_sim)
sim = elfi.Simulator(kwargs_sim, constant)
with pytest.raises(Exception):
sim.generate(3)
sim['_uses_meta'] = True
g = sim.generate(3)
# Test uniqueness of seeds
assert len(np.unique(g[:, 0]) == 3)
assert len(np.unique(g[:, 1]) == 1)
# Test index_in_batch
assert np.array_equal(g[:, 2], [0, 1, 2])
# Test submission_index (all belong to the same submission)
def test_dict_output():
vsim = elfi.tools.vectorize(simulator)
vsum = elfi.tools.vectorize(summary)
obs = simulator([.2, .8])
elfi.new_model()
p = elfi.Prior('dirichlet', [2, 2])
sim = elfi.Simulator(vsim, p, observed=obs)
S = elfi.Summary(vsum, sim)
d = elfi.Distance('euclidean', S)
pool = elfi.OutputPool(['sim'])
rej = elfi.Rejection(d, batch_size=100, pool=pool, output_names=['sim'])
sample = rej.sample(100, n_sim=1000)
mean = np.mean(sample.samples['p'], axis=0)
# Crude test
assert mean[1] > mean[0]
# Hyperparameters
mu0, sigma0 = (10, 100)
y_obs = gauss.gauss(
mu, sigma, n_obs=n_obs, batch_size=1, random_state=np.random.RandomState(seed))
sim_fn = partial(gauss.gauss, sigma=sigma, n_obs=n_obs)
# Posterior
n = y_obs.shape[1]
mu1 = (mu0 / sigma0**2 + y_obs.sum() / sigma**2) / (1 / sigma0**2 + n / sigma**2)
sigma1 = (1 / sigma0**2 + n / sigma**2)**(-0.5)
# Model
m = elfi.ElfiModel()
elfi.Prior('norm', mu0, sigma0, model=m, name='mu')
elfi.Simulator(sim_fn, m['mu'], observed=y_obs, name='gauss')
elfi.Summary(lambda x: x.mean(axis=1), m['gauss'], name='ss_mean')
elfi.Distance('euclidean', m['ss_mean'], name='d')
res = elfi.Rejection(m['d'], output_names=['ss_mean'], batch_size=batch_size,
seed=seed).sample(1000, threshold=1)
# Add some invalid values
res.outputs['mu'] = np.append(res.outputs['mu'], np.array([np.inf]))
res.outputs['ss_mean'] = np.append(res.outputs['ss_mean'], np.array([np.inf]))
with pytest.warns(UserWarning):
adj = elfi.adjust_posterior(
model=m, sample=res, parameter_names=['mu'], summary_names=['ss_mean'])
assert np.allclose(_statistics(adj.outputs['mu']), (4.9772879640569778, 0.02058680115402544))
priors.append(elfi.Prior('uniform', 0, 5, model=m, name='a2'))
priors.append(elfi.Prior('uniform', 0, 5, model=m, name='b1'))
priors.append(elfi.Prior('uniform', 0, 5, model=m, name='b2'))
priors.append(elfi.Prior('uniform', -5, 10, model=m, name='g1'))
priors.append(elfi.Prior('uniform', -5, 10, model=m, name='g2'))
priors.append(elfi.Prior('uniform', -.5, 5.5, model=m, name='k1'))
priors.append(elfi.Prior('uniform', -.5, 5.5, model=m, name='k2'))
EPS = np.finfo(float).eps
priors.append(elfi.Prior('uniform', -1 + EPS, 2 - 2 * EPS, model=m, name='rho'))
# Obtaining the observations.
y_obs = BiGNK(*true_params, n_obs=n_obs, random_state=np.random.RandomState(seed))
# Defining the simulator.
fn_simulator = partial(BiGNK, n_obs=n_obs)
elfi.Simulator(fn_simulator, *priors, observed=y_obs, name='BiGNK')
# Initialising the default summary statistics.
default_ss = elfi.Summary(ss_robust, m['BiGNK'], name='ss_robust')
# Using the customEuclidean distance function designed for
# the summary statistics of shape (batch_size, dim_ss, dim_ss_point).
elfi.Discrepancy(euclidean_multiss, default_ss, name='d')
return m