How to use the emcee.State function in emcee

To help you get started, we’ve selected a few emcee examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github adammoss / nnest / nnest / sampler.py View on Github external
output_interval=None,
            stats_interval=None,
            plot_trace=True,
            moves=None):

        self.trainer.netG.eval()

        samples = []
        latent_samples = []
        derived_samples = []
        loglikes = []

        iters = tqdm(range(1, mcmc_steps + 1)) if show_progress else range(1, mcmc_steps + 1)

        if init_samples is not None:
            if isinstance(init_samples, emcee.State):
                state = emcee.State(init_samples)
            else:
                num_walkers = init_samples.shape[0]
                z, _ = self.trainer.forward(init_samples, to_numpy=True)
                state = emcee.State(z, log_prob=init_loglikes, blobs=init_derived)
        else:
            for i in range(max_start_tries):
                z = self.trainer.get_prior_samples(num_walkers, to_numpy=True)
                x = self.trainer.get_samples(z, to_numpy=True)
                logl_prior = self.prior(x)
                if np.all(logl_prior) > -1e30:
                    break
                if i == max_start_tries - 1:
                    raise Exception('Could not find starting value')
            state = emcee.State(z)
github threeML / threeML / threeML / bayesian / emcee_sampler.py View on Github external
# If a seed is provided, set the random number seed
            if self._seed is not None:

                sampler._random.seed(self._seed)

            # Sample the burn-in
            pos, prob, state = sampler.run_mcmc(
                initial_state=p0, nsteps=self._n_burn_in, progress=loud
            )

            # Reset sampler

            sampler.reset()

            state = emcee.State(pos, prob, random_state=state)

            # Run the true sampling

            _ = sampler.run_mcmc(
                initial_state=state, nsteps=self._n_iterations, progress=loud
            )

        acc = np.mean(sampler.acceptance_fraction)

        print("\nMean acceptance fraction: %s\n" % acc)

        self._sampler = sampler
        self._raw_samples = sampler.get_chain(flat=True)

        # Compute the corresponding values of the likelihood
github adammoss / nnest / nnest / sampler.py View on Github external
if isinstance(init_samples, emcee.State):
                state = emcee.State(init_samples)
            else:
                num_walkers = init_samples.shape[0]
                z, _ = self.trainer.forward(init_samples, to_numpy=True)
                state = emcee.State(z, log_prob=init_loglikes, blobs=init_derived)
        else:
            for i in range(max_start_tries):
                z = self.trainer.get_prior_samples(num_walkers, to_numpy=True)
                x = self.trainer.get_samples(z, to_numpy=True)
                logl_prior = self.prior(x)
                if np.all(logl_prior) > -1e30:
                    break
                if i == max_start_tries - 1:
                    raise Exception('Could not find starting value')
            state = emcee.State(z)

        def transformed_loglike(z):
            assert z.shape == (self.x_dim,), z.shape
            try:
                x, log_det_J = self.trainer.inverse(z.reshape((1, -1)), to_numpy=True)
            except:
                return -np.inf, np.zeros((1, self.num_derived))
            logl, der = self.loglike(x)
            if loglstar is not None:
                if logl < loglstar:
                    return -np.inf, der
                else:
                    return log_det_J + self.prior(x), der
            else:
                return logl + log_det_J + self.prior(x), np.zeros((1, self.num_derived))
github adammoss / nnest / nnest / sampler.py View on Github external
self.trainer.netG.eval()

        samples = []
        latent_samples = []
        derived_samples = []
        loglikes = []

        iters = tqdm(range(1, mcmc_steps + 1)) if show_progress else range(1, mcmc_steps + 1)

        if init_samples is not None:
            if isinstance(init_samples, emcee.State):
                state = emcee.State(init_samples)
            else:
                num_walkers = init_samples.shape[0]
                z, _ = self.trainer.forward(init_samples, to_numpy=True)
                state = emcee.State(z, log_prob=init_loglikes, blobs=init_derived)
        else:
            for i in range(max_start_tries):
                z = self.trainer.get_prior_samples(num_walkers, to_numpy=True)
                x = self.trainer.get_samples(z, to_numpy=True)
                logl_prior = self.prior(x)
                if np.all(logl_prior) > -1e30:
                    break
                if i == max_start_tries - 1:
                    raise Exception('Could not find starting value')
            state = emcee.State(z)

        def transformed_loglike(z):
            assert z.shape == (self.x_dim,), z.shape
            try:
                x, log_det_J = self.trainer.inverse(z.reshape((1, -1)), to_numpy=True)
            except:
github threeML / threeML / threeML / bayesian / emcee_sampler.py View on Github external
def sample(self, quiet=False):

        assert self._is_setup, "You forgot to setup the sampler!"

        loud = not quiet

        self._update_free_parameters()

        n_dim = len(list(self._free_parameters.keys()))

        # Get starting point

        p0 = emcee.State(self._get_starting_points(self._n_walkers))

        # Deactivate memoization in astromodels, which is useless in this case since we will never use twice the
        # same set of parameters
        with use_astromodels_memoization(False):

            if threeML_config["parallel"]["use-parallel"]:

                c = ParallelClient()
                view = c[:]

                sampler = emcee.EnsembleSampler(
                    self._n_walkers, n_dim, self.get_posterior, pool=view
                )

            else:
github adammoss / nnest / nnest / sampler.py View on Github external
stats_interval=None,
            plot_trace=True,
            moves=None):

        self.trainer.netG.eval()

        samples = []
        latent_samples = []
        derived_samples = []
        loglikes = []

        iters = tqdm(range(1, mcmc_steps + 1)) if show_progress else range(1, mcmc_steps + 1)

        if init_samples is not None:
            if isinstance(init_samples, emcee.State):
                state = emcee.State(init_samples)
            else:
                num_walkers = init_samples.shape[0]
                z, _ = self.trainer.forward(init_samples, to_numpy=True)
                state = emcee.State(z, log_prob=init_loglikes, blobs=init_derived)
        else:
            for i in range(max_start_tries):
                z = self.trainer.get_prior_samples(num_walkers, to_numpy=True)
                x = self.trainer.get_samples(z, to_numpy=True)
                logl_prior = self.prior(x)
                if np.all(logl_prior) > -1e30:
                    break
                if i == max_start_tries - 1:
                    raise Exception('Could not find starting value')
            state = emcee.State(z)

        def transformed_loglike(z):