How to use the emcee.EnsembleSampler function in emcee

To help you get started, we’ve selected a few emcee examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github dfm / emcee / tests / unit / test_backends.py View on Github external
def run_sampler(backend, nwalkers=32, ndim=3, nsteps=25, seed=1234, thin_by=1,
                dtype=None, blobs=True, lp=None):
    if lp is None:
        lp = normal_log_prob_blobs if blobs else normal_log_prob
    if seed is not None:
        np.random.seed(seed)
    coords = np.random.randn(nwalkers, ndim)
    sampler = EnsembleSampler(nwalkers, ndim, lp,
                              backend=backend, blobs_dtype=dtype)
    sampler.run_mcmc(coords, nsteps, thin_by=thin_by)
    return sampler
github sblunt / orbitize / orbitize / sampler.py View on Github external
thin (int): factor to thin the steps of each walker
                by to remove correlations in the walker steps
            examine_chains (boolean): Displays plots of walkers at each step by
                running `examine_chains` after `total_orbits` sampled.

        Returns:
            ``emcee.sampler`` object: the sampler used to run the MCMC
        """

        if self.use_pt:
            sampler = ptemcee.Sampler(
                self.num_walkers, self.num_params, self._logl, orbitize.priors.all_lnpriors,
                ntemps=self.num_temps, threads=self.num_threads, logpargs=[self.priors, ]
            )
        else:
            sampler = emcee.EnsembleSampler(
                self.num_walkers, self.num_params, self._logl,
                threads=self.num_threads, kwargs={'include_logp': True}
            )
        
        # we're using args because emcee < 3.0 has three return values whereas emcee > 3.0 has
        # four. We can explicitly declare 4 variables instead of args in the future. 
        for args in sampler.sample(self.curr_pos, iterations=burn_steps, thin=thin):
            pass

        sampler.reset()
        try:
            self.curr_pos = args[0]
        except UnboundLocalError:  # 0 step burn-in (pos is not defined)
            pass
        print('Burn in complete')
github amzn / emukit / emukit / samplers / mcmc_sampler.py View on Github external
def get_samples(self, n_samples, log_p_function, burn_in_steps=50, n_steps=100):
        """
        Generates samples.

        Parameters:
            n_samples - number of samples to generate
            log_p_function - a function that returns log density for a specific sample
            burn_in_steps - number of burn-in steps for sampling

        Returns a tuple of two array: (samples, log_p_function values for samples)
        """
        X_init = RandomDesign(self.space).get_samples(n_samples)
        sampler = emcee.EnsembleSampler(n_samples, X_init.shape[1], log_p_function)

        # Burn-In
        samples, samples_log, _ = sampler.run_mcmc(X_init, burn_in_steps)

        # MCMC Sampling
        samples, samples_log, _ = sampler.run_mcmc(samples, n_steps)

        # make sure we have an array of shape (n samples, space input dim)
        if len(samples.shape) == 1:
            samples = samples.reshape(-1, 1)
        samples_log = samples_log.reshape(-1, 1)

        return samples, samples_log
github dfm / emcee / document / plots / oned.py View on Github external
pid = os.getpid()
    _random = _rngs.get(
        pid, np.random.RandomState(int(int(pid) + time.time()))
    )
    _rngs[pid] = _random

    ndim = int(np.ceil(2 ** (7 * _random.rand())))
    nwalkers = 2 * ndim + 2
    # nwalkers += nwalkers % 2
    print(ndim, nwalkers)

    cov = random_cov(ndim)
    icov = np.linalg.inv(cov)

    ens_samp = emcee.EnsembleSampler(nwalkers, ndim, lnprobfn, args=[icov])
    ens_samp.random_state = _random.get_state()
    pos, lnprob, state = ens_samp.run_mcmc(
        np.random.randn(nwalkers * ndim).reshape([nwalkers, ndim]), nsteps
    )

    proposal = np.diag(cov.diagonal())
    mh_samp = emcee.MHSampler(proposal, ndim, lnprobfn, args=[icov])
    mh_samp.random_state = state
    mh_samp.run_mcmc(np.random.randn(ndim), nsteps)

    f = h5py.File(outfn)
    f["data"][i, :] = np.array(
        [ndim, np.mean(ens_samp.acor), np.mean(mh_samp.acor)]
    )
    f.close()
github adammoss / nnest / nnest / ensemble.py View on Github external
"""

        def log_prob(x):
            logl, der = self.loglike(x)
            return logl + self.prior(x), der

        if self.sample_prior is not None:
            x_current = self.sample_prior(num_walkers)
        else:
            raise ValueError('Prior does not have sample method')
        try:
            import emcee
        except:
            raise ImportError
        sampler = emcee.EnsembleSampler(num_walkers, self.x_dim, log_prob, moves=[(emcee.moves.KDEMove(), 1.0)])
        state = sampler.run_mcmc(x_current, bootstrap_burn_in)
        self.logger.info('Initial acceptance [%5.4f]' % (np.mean(sampler.acceptance_fraction)))
        self._chain_stats(np.transpose(sampler.get_chain(), axes=[1, 0, 2]))

        # Use state coordinates to train flow
        mean = np.mean(state.coords, axis=0)
        std = np.std(state.coords, axis=0)
        # Normalise samples
        training_samples = (state.coords - mean) / std
        self.transform = lambda x: x * std + mean
        self.trainer.train(training_samples, jitter=initial_jitter)

        init_samples = None
        init_loglikes = None
        init_derived = None
github Fluorescence-Tools / chisurf / chisurf / fitting / sample.py View on Github external
:param fit: the fit to be samples
    :param steps: the number of steps of each walker
    :param thin: an integer (only every ith step is saved)
    :param nwalkers: the number of walkers
    :param chi2max: maximum allowed chi2
    :param std: the standard deviation of the parameters used to randomize the initial set of the walkers
    :return: a list containing the chi2 and the parameter values
    """
    model = fit.model
    ndim = fit.n_free  # Number of free parameters to be sampled (number of dimensions)
    kw = {
        'bounds': fit.model.parameter_bounds,
        'chi2max': chi2max
    }
    sampler = emcee.EnsembleSampler(
        nwalkers=nwalkers,
        ndim=ndim,
        log_prob_fn=chisurf.fitting.fit.lnprob,
        args=[fit],
        kwargs=kw
    )
    std = np.array(fit.model.parameter_values) * std
    pos = [
        fit.model.parameter_values + std * np.random.randn(ndim) for i in range(nwalkers)
    ]
    sampler.run_mcmc(
        pos,
        steps,
        thin=thin
    )
    chi2 = -2. * sampler.flatlnprobability / float(model.n_points - model.n_free - 1.0)
github automl / RoBO / robo / models / gaussian_process_mcmc.py View on Github external
if self.normalize_output:
            # Normalize output to have zero mean and unit standard deviation
            self.y, self.y_mean, self.y_std = normalization.zero_mean_unit_var_normalization(y)
            if self.y_std == 0:
                raise ValueError("Cannot normalize output. All targets have the same value")
        else:
            self.y = y

        # Use the mean of the data as mean for the GP
        self.mean = np.mean(self.y, axis=0)
        self.gp = george.GP(self.kernel, mean=self.mean)

        if do_optimize:
            # We have one walker for each hyperparameter configuration
            sampler = emcee.EnsembleSampler(self.n_hypers,
                                            len(self.kernel.pars) + 1,
                                            self.loglikelihood)
            sampler.random_state = self.rng.get_state()
            # Do a burn-in in the first iteration
            if not self.burned:
                # Initialize the walkers by sampling from the prior
                if self.prior is None:
                    self.p0 = self.rng.rand(self.n_hypers, len(self.kernel.pars) + 1)
                else:
                    self.p0 = self.prior.sample_from_prior(self.n_hypers)
                # Run MCMC sampling
                self.p0, _, _ = sampler.run_mcmc(self.p0,
                                                 self.burnin_steps,
                                                 rstate0=self.rng)

                self.burned = True
github waqasbhatti / astrobase / astrobase / lcfit / transits.py View on Github external
if verbose and isfirstrun:
            LOGINFO(
                'start {:s} MCMC with {:d} dims, {:d} steps, {:d} walkers,'.
                format(fittype, n_dim, n_mcmc_steps, n_walkers) +
                ' {:d} threads'.format(nworkers)
            )
        elif verbose and not isfirstrun:
            LOGINFO(
                'continue {:s} with {:d} dims, {:d} steps, {:d} walkers, '.
                format(fittype, n_dim, n_mcmc_steps, n_walkers) +
                '{:d} threads'.format(nworkers)
            )

        with Pool(nworkers) as pool:
            sampler = emcee.EnsembleSampler(
                n_walkers, n_dim, log_posterior_transit,
                args=(init_params, init_m, stimes,
                      smags, serrs, priorbounds),
                pool=pool,
                backend=backend
            )
            sampler.run_mcmc(starting_positions, n_mcmc_steps,
                             progress=mcmcprogressbar)

        if verbose:
            LOGINFO(
                'ended {:s} MCMC run with {:d} steps, {:d} walkers, '.format(
                    fittype, n_mcmc_steps, n_walkers
                ) + '{:d} threads'.format(nworkers)
            )
github GalacticDynamics-Oxford / Agama / py / gc_runfit.py View on Github external
if numpy.isfinite(prob):
                        ensemble[i,:] = walker
                        break
                    print('*',end='')
            self.values = ensemble
        else:
            # check that all walkers have finite likelihood
            prob = numpy.zeros((self.values.shape[0],1))
            for i in range(self.values.shape[0]):
                prob[i,0] = monteCarloSearchFnc(self.values[i,:], self)
                if not numpy.isfinite(prob[i,0]):
                    print('Invalid parameters for %i-th walker (likelihood is bogus)' % i)
                else: print('%i-th walker: logL=%g' % (i, prob[i,0]))

        nwalkers, nparams = self.values.shape
        sampler = emcee.EnsembleSampler(nwalkers, nparams, monteCarloSearchFnc, args=(self,))
        prevmaxloglike = None
        while True:  # run several passes until convergence
            print('Starting MCMC')
            sampler.run_mcmc(self.values, nsteps_mcmc)
            # restart the next pass from the latest values in the Markov chain
            self.values = sampler.chain[:,-1,:]

            # store the latest best-fit parameters and their likelihood, and the entire chain for the last nsteps_mcmc steps
            numpy.savetxt(self.filename+'.best', \
                numpy.hstack((self.values, sampler.lnprobability[:,-1].reshape(-1,1))), fmt='%.8g')
            numpy.savetxt(self.filename+".chain", \
                numpy.hstack((sampler.chain[:,-nsteps_mcmc:].reshape(-1,nparams),
                 sampler.lnprobability[:,-nsteps_mcmc:].reshape(-1,1))), fmt='%.8g')

            print("Acceptance fraction: %g" % numpy.mean(sampler.acceptance_fraction))  # should be in the range 0.2-0.5
            try:
github sibirrer / lenstronomy / lenstronomy / Sampling / sampler.py View on Github external
print("Warning: All samples (including burn-in) will be saved in backup file '{}'.".format(backup_filename))
            if start_from_backup:
                initpos = None
                n_run_eff = n_run
            else:
                n_run_eff = n_burn + n_run
                backend.reset(n_walkers, num_param)
                if pool.is_master():
                    print("Warning: backup file '{}' has been reset!".format(backup_filename))
        else:
            backend = None
            n_run_eff = n_burn + n_run

        time_start = time.time()

        sampler = emcee.EnsembleSampler(n_walkers, num_param, self.chain.logL,
                                        pool=pool, backend=backend)

        sampler.run_mcmc(initpos, n_run_eff, progress=progress)
        flat_samples = sampler.get_chain(discard=n_burn, thin=1, flat=True)
        dist = sampler.get_log_prob(flat=True, discard=n_burn, thin=1)
        if pool.is_master():
            print('Computing the MCMC...')
            print('Number of walkers = ', n_walkers)
            print('Burn-in iterations: ', n_burn)
            print('Sampling iterations (in current run):', n_run_eff)
            time_end = time.time()
            print(time_end - time_start, 'time taken for MCMC sampling')
        return flat_samples, dist