How to use the elfi.Prior function in elfi

To help you get started, we’ve selected a few elfi examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github elfi-dev / elfi / tests / old_unit / test_core.py View on Github external
def test_generate_vs_acquire():
    mu = elfi.Prior('mu', 'uniform', 0, 4)
    ar1 = mu.acquire(10).compute()
    ar2 = mu.generate(5).compute()
    ar12 = mu.acquire(15).compute()
    assert np.array_equal(np.vstack((ar1, ar2)), ar12)
github elfi-dev / elfi / tests / functional / test_key_collisions.py View on Github external
def test_reset_specific_scheduler_keys():
    """This test fails if keys are not different"""
    elfi.env.client(n_workers=2, threads_per_worker=1)
    N = 20
    bs = 10

    y = None
    t = None

    p1 = elfi.Prior('p', 'Uniform')
    sim1 = elfi.Simulator('sim', lambda *args, **kwargs: args[0], p1, observed=1)

    for i in range(10):
        y_prev = y
        t_prev = t

        y = sim1.acquire(N, batch_size=bs).compute()
        t = p1.acquire(N, batch_size=bs).compute()

        if y_prev is not None:
            assert np.all(y != y_prev)
            assert np.all(t != t_prev)

        p1.reset()

    elfi.env.client().shutdown()
github elfi-dev / elfi / elfi / examples / gnk.py View on Github external
-------
    elfi.ElfiModel

    """
    m = elfi.new_model()

    # Initialising the parameters as in Allingham et al. (2009).
    if true_params is None:
        true_params = [3, 1, 2, .5]

    # Initialising the prior settings as in Allingham et al. (2009).
    priors = []
    priors.append(elfi.Prior('uniform', 0, 10, model=m, name='A'))
    priors.append(elfi.Prior('uniform', 0, 10, model=m, name='B'))
    priors.append(elfi.Prior('uniform', 0, 10, model=m, name='g'))
    priors.append(elfi.Prior('uniform', 0, 10, model=m, name='k'))

    # Obtaining the observations.
    y_obs = GNK(*true_params, n_obs=n_obs, random_state=np.random.RandomState(seed))

    # Defining the simulator.
    fn_simulator = partial(GNK, n_obs=n_obs)
    elfi.Simulator(fn_simulator, *priors, observed=y_obs, name='GNK')

    # Initialising the summary statistics as in Allingham et al. (2009).
    default_ss = elfi.Summary(ss_order, m['GNK'], name='ss_order')

    # Using the multi-dimensional Euclidean distance function as
    # the summary statistics' implementations are designed for multi-dimensional cases.
    elfi.Discrepancy(euclidean_multiss, default_ss, name='d')
    return m
github elfi-dev / elfi / elfi / examples / ricker.py View on Github external
y_obs = simulator(*true_params, n_obs=n_obs, random_state=np.random.RandomState(seed_obs))
    sim_fn = partial(simulator, n_obs=n_obs)
    sumstats = []

    if stochastic:
        elfi.Prior(ss.expon, np.e, 2, model=m, name='t1')
        elfi.Prior(ss.truncnorm, 0, 5, model=m, name='t2')
        elfi.Prior(ss.uniform, 0, 100, model=m, name='t3')
        elfi.Simulator(sim_fn, m['t1'], m['t2'], m['t3'], observed=y_obs, name='Ricker')
        sumstats.append(elfi.Summary(partial(np.mean, axis=1), m['Ricker'], name='Mean'))
        sumstats.append(elfi.Summary(partial(np.var, axis=1), m['Ricker'], name='Var'))
        sumstats.append(elfi.Summary(num_zeros, m['Ricker'], name='#0'))
        elfi.Discrepancy(chi_squared, *sumstats, name='d')

    else:  # very simple deterministic case
        elfi.Prior(ss.expon, np.e, model=m, name='t1')
        elfi.Simulator(sim_fn, m['t1'], observed=y_obs, name='Ricker')
        sumstats.append(elfi.Summary(partial(np.mean, axis=1), m['Ricker'], name='Mean'))
        elfi.Distance('euclidean', *sumstats, name='d')

    return m
github elfi-dev / elfi / elfi / examples / daycare.py View on Github external
-------
    m : elfi.ElfiModel

    """
    logger = logging.getLogger()
    if true_params is None:
        true_params = [3.6, 0.6, 0.1]

    m = elfi.ElfiModel()
    y_obs = daycare(*true_params, random_state=np.random.RandomState(seed_obs), **kwargs)
    sim_fn = partial(daycare, **kwargs)
    priors = []
    sumstats = []

    priors.append(elfi.Prior('uniform', 0, 11, model=m, name='t1'))
    priors.append(elfi.Prior('uniform', 0, 2, model=m, name='t2'))
    priors.append(elfi.Prior('uniform', 0, 1, model=m, name='t3'))

    elfi.Simulator(sim_fn, *priors, observed=y_obs, name='DCC')

    sumstats.append(elfi.Summary(ss_shannon, m['DCC'], name='Shannon'))
    sumstats.append(elfi.Summary(ss_strains, m['DCC'], name='n_strains'))
    sumstats.append(elfi.Summary(ss_prevalence, m['DCC'], name='prevalence'))
    sumstats.append(elfi.Summary(ss_prevalence_multi, m['DCC'], name='multi'))

    elfi.Discrepancy(distance, *sumstats, name='d')

    logger.info("Generated observations with true parameters "
                "t1: %.1f, t2: %.3f, t3: %.1f, ", *true_params)

    return m
github elfi-dev / elfi / elfi / examples / bignk.py View on Github external
elfi.ElfiModel

    """
    m = elfi.new_model()

    # Initialising the parameters as in Drovandi & Pettitt (2011).
    if true_params is None:
        true_params = [3, 4, 1, 0.5, 1, 2, .5, .4, 0.6]

    # Initialising the prior settings as in Drovandi & Pettitt (2011).
    priors = []
    priors.append(elfi.Prior('uniform', 0, 5, model=m, name='a1'))
    priors.append(elfi.Prior('uniform', 0, 5, model=m, name='a2'))
    priors.append(elfi.Prior('uniform', 0, 5, model=m, name='b1'))
    priors.append(elfi.Prior('uniform', 0, 5, model=m, name='b2'))
    priors.append(elfi.Prior('uniform', -5, 10, model=m, name='g1'))
    priors.append(elfi.Prior('uniform', -5, 10, model=m, name='g2'))
    priors.append(elfi.Prior('uniform', -.5, 5.5, model=m, name='k1'))
    priors.append(elfi.Prior('uniform', -.5, 5.5, model=m, name='k2'))
    EPS = np.finfo(float).eps
    priors.append(elfi.Prior('uniform', -1 + EPS, 2 - 2 * EPS, model=m, name='rho'))

    # Obtaining the observations.
    y_obs = BiGNK(*true_params, n_obs=n_obs, random_state=np.random.RandomState(seed))

    # Defining the simulator.
    fn_simulator = partial(BiGNK, n_obs=n_obs)
    elfi.Simulator(fn_simulator, *priors, observed=y_obs, name='BiGNK')

    # Initialising the default summary statistics.
    default_ss = elfi.Summary(ss_robust, m['BiGNK'], name='ss_robust')
github elfi-dev / elfi / elfi / examples / lotka_volterra.py View on Github external
if true_params is None:
        true_params = [1.0, 0.005, 0.6, 50, 100, 10.]

    kwargs['n_obs'] = n_obs
    y_obs = lotka_volterra(*true_params, random_state=np.random.RandomState(seed_obs), **kwargs)

    m = elfi.ElfiModel()
    sim_fn = partial(lotka_volterra, **kwargs)
    priors = []
    sumstats = []

    priors.append(elfi.Prior(ExpUniform, -2, 0, model=m, name='r1'))
    priors.append(elfi.Prior(ExpUniform, -5, -2.5, model=m, name='r2'))  # easily kills populations
    priors.append(elfi.Prior(ExpUniform, -2, 0, model=m, name='r3'))
    priors.append(elfi.Prior('poisson', 50, model=m, name='prey0'))
    priors.append(elfi.Prior('poisson', 100, model=m, name='predator0'))
    priors.append(elfi.Prior(ExpUniform, np.log(0.5), np.log(50), model=m, name='sigma'))

    elfi.Simulator(sim_fn, *priors, observed=y_obs, name='LV')
    sumstats.append(elfi.Summary(partial(pick_stock, species=0), m['LV'], name='prey'))
    sumstats.append(elfi.Summary(partial(pick_stock, species=1), m['LV'], name='predator'))
    elfi.Distance('sqeuclidean', *sumstats, name='d')

    logger.info("Generated %i observations with true parameters r1: %.1f, r2: %.3f, r3: %.1f, "
                "prey0: %i, predator0: %i, sigma: %.1f.", n_obs, *true_params)

    return m
github elfi-dev / elfi / elfi / examples / gauss.py View on Github external
m = elfi.new_model()
    # Initialising the priors.
    eps_prior = 5  # The longest distance from the median of an initialised prior's distribution.
    priors = []
    if nd_mean:
        n_dim = len(true_params)
        for i in range(n_dim):
            name_prior = 'mu_{}'.format(i)
            prior_mu = elfi.Prior('uniform', true_params[i] - eps_prior,
                                  2 * eps_prior, model=m, name=name_prior)
            priors.append(prior_mu)
    else:
        priors.append(elfi.Prior('uniform', true_params[0] - eps_prior,
                                 2 * eps_prior, model=m, name='mu'))
        priors.append(elfi.Prior('truncnorm', np.amax([.01, true_params[1] - eps_prior]),
                                 2 * eps_prior, model=m, name='sigma'))
    elfi.Simulator(fn_simulator, *priors, observed=y_obs, name='gauss')

    # Initialising the summary statistics.
    sumstats = []
    sumstats.append(elfi.Summary(ss_mean, m['gauss'], name='ss_mean'))
    sumstats.append(elfi.Summary(ss_var, m['gauss'], name='ss_var'))

    # Choosing the discrepancy metric.
    if nd_mean:
        elfi.Discrepancy(euclidean_multidim, *sumstats, name='d')
    else:
        elfi.Distance('euclidean', *sumstats, name='d')
    return m
github elfi-dev / elfi / elfi / examples / gauss.py View on Github external
fn_simulator = partial(gauss_nd_mean, cov_matrix=cov_matrix, n_obs=n_obs)
    else:
        fn_simulator = partial(gauss, n_obs=n_obs)

    # Obtaining the observations.
    y_obs = fn_simulator(*true_params, n_obs=n_obs, random_state=np.random.RandomState(seed_obs))

    m = elfi.new_model()
    # Initialising the priors.
    eps_prior = 5  # The longest distance from the median of an initialised prior's distribution.
    priors = []
    if nd_mean:
        n_dim = len(true_params)
        for i in range(n_dim):
            name_prior = 'mu_{}'.format(i)
            prior_mu = elfi.Prior('uniform', true_params[i] - eps_prior,
                                  2 * eps_prior, model=m, name=name_prior)
            priors.append(prior_mu)
    else:
        priors.append(elfi.Prior('uniform', true_params[0] - eps_prior,
                                 2 * eps_prior, model=m, name='mu'))
        priors.append(elfi.Prior('truncnorm', np.amax([.01, true_params[1] - eps_prior]),
                                 2 * eps_prior, model=m, name='sigma'))
    elfi.Simulator(fn_simulator, *priors, observed=y_obs, name='gauss')

    # Initialising the summary statistics.
    sumstats = []
    sumstats.append(elfi.Summary(ss_mean, m['gauss'], name='ss_mean'))
    sumstats.append(elfi.Summary(ss_var, m['gauss'], name='ss_var'))

    # Choosing the discrepancy metric.
    if nd_mean: